1 /* 2 * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver 3 * 4 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 5 * Use is subject to license terms. 6 * 7 * Permission to use, copy, modify, and distribute this software and its 8 * documentation is hereby granted, provided that the above copyright 9 * notice appears in all copies. 10 * 11 * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF 12 * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 13 * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 14 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT. SUN SHALL NOT BE LIABLE FOR 15 * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR 16 * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES 17 * 18 * Copyright (c) 1994 The Australian National University. 19 * All rights reserved. 20 * 21 * Permission to use, copy, modify, and distribute this software and its 22 * documentation is hereby granted, provided that the above copyright 23 * notice appears in all copies. This software is provided without any 24 * warranty, express or implied. The Australian National University 25 * makes no representations about the suitability of this software for 26 * any purpose. 27 * 28 * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY 29 * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES 30 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF 31 * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY 32 * OF SUCH DAMAGE. 33 * 34 * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, 35 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 36 * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS 37 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO 38 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, 39 * OR MODIFICATIONS. 40 * 41 * This driver is derived from the original SVR4 STREAMS PPP driver 42 * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>. 43 * 44 * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code 45 * for improved performance and scalability. 46 */ 47 48 #pragma ident "%Z%%M% %I% %E% SMI" 49 #define RCSID "$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $" 50 51 #include <sys/types.h> 52 #include <sys/debug.h> 53 #include <sys/param.h> 54 #include <sys/stat.h> 55 #include <sys/stream.h> 56 #include <sys/stropts.h> 57 #include <sys/sysmacros.h> 58 #include <sys/errno.h> 59 #include <sys/time.h> 60 #include <sys/cmn_err.h> 61 #include <sys/kmem.h> 62 #include <sys/conf.h> 63 #include <sys/dlpi.h> 64 #include <sys/ddi.h> 65 #include <sys/kstat.h> 66 #include <sys/strsun.h> 67 #include <sys/ethernet.h> 68 #include <sys/policy.h> 69 #include <net/ppp_defs.h> 70 #include <net/pppio.h> 71 #include "sppp.h" 72 #include "s_common.h" 73 74 /* 75 * This is used to tag official Solaris sources. Please do not define 76 * "INTERNAL_BUILD" when building this software outside of Sun Microsystems. 77 */ 78 #ifdef INTERNAL_BUILD 79 /* MODINFO is limited to 32 characters. */ 80 const char sppp_module_description[] = "PPP 4.0 mux v%I%"; 81 #else /* INTERNAL_BUILD */ 82 const char sppp_module_description[] = "ANU PPP mux $Revision: 1.0$"; 83 84 /* LINTED */ 85 static const char buildtime[] = "Built " __DATE__ " at " __TIME__ 86 #ifdef DEBUG 87 " DEBUG" 88 #endif 89 "\n"; 90 #endif /* INTERNAL_BUILD */ 91 92 static void sppp_inner_ioctl(queue_t *, mblk_t *); 93 static void sppp_outer_ioctl(queue_t *, mblk_t *); 94 static queue_t *sppp_send(queue_t *, mblk_t **, spppstr_t *); 95 static queue_t *sppp_recv(queue_t *, mblk_t **, spppstr_t *); 96 static void sppp_recv_nondata(queue_t *, mblk_t *, spppstr_t *); 97 static queue_t *sppp_outpkt(queue_t *, mblk_t **, int, spppstr_t *); 98 static spppstr_t *sppp_inpkt(queue_t *, mblk_t *, spppstr_t *); 99 static int sppp_kstat_update(kstat_t *, int); 100 static void sppp_release_pkts(sppa_t *, uint16_t); 101 102 /* 103 * sps_list contains the list of active per-stream instance state structures 104 * ordered on the minor device number (see sppp.h for details). All streams 105 * opened to this driver are threaded together in this list. 106 */ 107 static spppstr_t *sps_list = NULL; 108 /* 109 * ppa_list contains the list of active per-attachment instance state 110 * structures ordered on the ppa id number (see sppp.h for details). All of 111 * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together 112 * in this list. There is exactly one ppa structure for a given PPP interface, 113 * and multiple sps streams (upper streams) may share a ppa by performing 114 * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ). 115 */ 116 static sppa_t *ppa_list = NULL; 117 118 static const char *kstats_names[] = { SPPP_KSTATS_NAMES }; 119 static const char *kstats64_names[] = { SPPP_KSTATS64_NAMES }; 120 121 /* 122 * map proto (which is an IANA defined ppp network protocol) to 123 * a bit position indicated by NP_* in ppa_npflag 124 */ 125 static uint32_t 126 sppp_ppp2np(uint16_t proto) 127 { 128 if (proto > 0x7fff) 129 return (0); 130 switch (proto) { 131 case PPP_IP: 132 return (NP_IP); 133 case PPP_IPV6: 134 return (NP_IPV6); 135 default: 136 printf("sppp: unknown protocol 0x%x\n", proto); 137 return (0); 138 } 139 } 140 141 /* 142 * sppp_open() 143 * 144 * MT-Perimeters: 145 * exclusive inner, exclusive outer. 146 * 147 * Description: 148 * Common open procedure for module. 149 */ 150 /* ARGSUSED */ 151 int 152 sppp_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp) 153 { 154 spppstr_t *sps; 155 spppstr_t **nextmn; 156 minor_t mn; 157 158 ASSERT(q != NULL && devp != NULL); 159 ASSERT(sflag != MODOPEN); 160 161 if (q->q_ptr != NULL) { 162 return (0); /* already open */ 163 } 164 if (sflag != CLONEOPEN) { 165 return (OPENFAIL); 166 } 167 /* 168 * The sps list is sorted using the minor number as the key. The 169 * following code walks the list to find the lowest valued minor 170 * number available to be used. 171 */ 172 mn = 0; 173 for (nextmn = &sps_list; (sps = *nextmn) != NULL; 174 nextmn = &sps->sps_nextmn) { 175 if (sps->sps_mn_id != mn) { 176 break; 177 } 178 ++mn; 179 } 180 sps = (spppstr_t *)kmem_zalloc(sizeof (spppstr_t), KM_SLEEP); 181 ASSERT(sps != NULL); /* KM_SLEEP must never return NULL */ 182 sps->sps_nextmn = *nextmn; /* insert stream in global list */ 183 *nextmn = sps; 184 sps->sps_mn_id = mn; /* save minor id for this stream */ 185 sps->sps_rq = q; /* save read queue pointer */ 186 sps->sps_sap = -1; /* no sap bound to stream */ 187 sps->sps_dlstate = DL_UNATTACHED; /* dlpi state is unattached */ 188 sps->sps_npmode = NPMODE_DROP; /* drop all packets initially */ 189 q->q_ptr = WR(q)->q_ptr = (caddr_t)sps; 190 /* 191 * We explicitly disable the automatic queue scheduling for the 192 * write-side to obtain complete control over queuing during transmit. 193 * Packets will be queued at the upper write queue and the service 194 * routine will not be called until it gets scheduled by having the 195 * lower write service routine call the qenable(WR(uq)) for all streams 196 * attached to the same ppa instance. 197 */ 198 noenable(WR(q)); 199 *devp = makedevice(getmajor(*devp), mn); 200 qprocson(q); 201 return (0); 202 } 203 204 /* 205 * Free storage used by a PPA. This is not called until the last PPA 206 * user closes his connection or reattaches to a different PPA. 207 */ 208 static void 209 sppp_free_ppa(sppa_t *ppa) 210 { 211 sppa_t **nextppa; 212 213 ASSERT(ppa->ppa_refcnt == 1); 214 if (ppa->ppa_kstats != NULL) { 215 kstat_delete(ppa->ppa_kstats); 216 ppa->ppa_kstats = NULL; 217 } 218 mutex_destroy(&ppa->ppa_sta_lock); 219 mutex_destroy(&ppa->ppa_npmutex); 220 rw_destroy(&ppa->ppa_sib_lock); 221 nextppa = &ppa_list; 222 while (*nextppa != NULL) { 223 if (*nextppa == ppa) { 224 *nextppa = ppa->ppa_nextppa; 225 break; 226 } 227 nextppa = &(*nextppa)->ppa_nextppa; 228 } 229 kmem_free(ppa, sizeof (*ppa)); 230 } 231 232 /* 233 * Create a new PPA. Caller must be exclusive on outer perimeter. 234 */ 235 sppa_t * 236 sppp_create_ppa(uint32_t ppa_id) 237 { 238 sppa_t *ppa; 239 sppa_t *curppa; 240 sppa_t **availppa; 241 char unit[32]; /* Unit name */ 242 const char **cpp; 243 kstat_t *ksp; 244 kstat_named_t *knt; 245 246 /* 247 * NOTE: unit *must* be named for the driver 248 * name plus the ppa number so that netstat 249 * can find the statistics. 250 */ 251 (void) sprintf(unit, "%s%d", PPP_DRV_NAME, ppa_id); 252 /* 253 * Make sure we can allocate a buffer to 254 * contain the ppa to be sent upstream, as 255 * well as the actual ppa structure and its 256 * associated kstat structure. 257 */ 258 ppa = (sppa_t *)kmem_zalloc(sizeof (sppa_t), 259 KM_NOSLEEP); 260 ksp = kstat_create(PPP_DRV_NAME, ppa_id, unit, "net", KSTAT_TYPE_NAMED, 261 sizeof (sppp_kstats_t) / sizeof (kstat_named_t), 0); 262 263 if (ppa == NULL || ksp == NULL) { 264 if (ppa != NULL) { 265 kmem_free(ppa, sizeof (sppa_t)); 266 } 267 if (ksp != NULL) { 268 kstat_delete(ksp); 269 } 270 return (NULL); 271 } 272 ppa->ppa_kstats = ksp; /* chain kstat structure */ 273 ppa->ppa_ppa_id = ppa_id; /* record ppa id */ 274 ppa->ppa_mtu = PPP_MAXMTU; /* 65535-(PPP_HDRLEN+PPP_FCSLEN) */ 275 ppa->ppa_mru = PPP_MAXMRU; /* 65000 */ 276 277 mutex_init(&ppa->ppa_sta_lock, NULL, MUTEX_DRIVER, NULL); 278 mutex_init(&ppa->ppa_npmutex, NULL, MUTEX_DRIVER, NULL); 279 rw_init(&ppa->ppa_sib_lock, NULL, RW_DRIVER, NULL); 280 281 /* 282 * Prepare and install kstat counters. Note that for netstat 283 * -i to work, there needs to be "ipackets", "opackets", 284 * "ierrors", and "oerrors" kstat named variables. 285 */ 286 knt = (kstat_named_t *)ksp->ks_data; 287 for (cpp = kstats_names; cpp < kstats_names + Dim(kstats_names); 288 cpp++) { 289 kstat_named_init(knt, *cpp, KSTAT_DATA_UINT32); 290 knt++; 291 } 292 for (cpp = kstats64_names; cpp < kstats64_names + Dim(kstats64_names); 293 cpp++) { 294 kstat_named_init(knt, *cpp, KSTAT_DATA_UINT64); 295 knt++; 296 } 297 ksp->ks_update = sppp_kstat_update; 298 ksp->ks_private = (void *)ppa; 299 kstat_install(ksp); 300 301 /* link to the next ppa and insert into global list */ 302 availppa = &ppa_list; 303 while ((curppa = *availppa) != NULL) { 304 if (ppa_id < curppa->ppa_ppa_id) 305 break; 306 availppa = &curppa->ppa_nextppa; 307 } 308 ppa->ppa_nextppa = *availppa; 309 *availppa = ppa; 310 return (ppa); 311 } 312 313 /* 314 * sppp_close() 315 * 316 * MT-Perimeters: 317 * exclusive inner, exclusive outer. 318 * 319 * Description: 320 * Common close procedure for module. 321 */ 322 int 323 sppp_close(queue_t *q) 324 { 325 spppstr_t *sps; 326 spppstr_t **nextmn; 327 spppstr_t *sib; 328 sppa_t *ppa; 329 mblk_t *mp; 330 331 ASSERT(q != NULL && q->q_ptr != NULL); 332 sps = (spppstr_t *)q->q_ptr; 333 qprocsoff(q); 334 335 ppa = sps->sps_ppa; 336 if (ppa == NULL) { 337 ASSERT(!IS_SPS_CONTROL(sps)); 338 goto close_unattached; 339 } 340 if (IS_SPS_CONTROL(sps)) { 341 uint32_t cnt = 0; 342 343 ASSERT(ppa != NULL); 344 ASSERT(ppa->ppa_ctl == sps); 345 ppa->ppa_ctl = NULL; 346 /* 347 * STREAMS framework always issues I_UNLINK prior to close, 348 * since we only allow I_LINK under the control stream. 349 * A given ppa structure has at most one lower stream pointed 350 * by the ppa_lower_wq field, because we only allow a single 351 * linkage (I_LINK) to be done on the control stream. 352 */ 353 ASSERT(ppa->ppa_lower_wq == NULL); 354 /* 355 * Walk through all of sibling streams attached to this ppa, 356 * and remove all references to this ppa. We have exclusive 357 * access for the entire driver here, so there's no need 358 * to hold ppa_sib_lock. 359 */ 360 cnt++; 361 sib = ppa->ppa_streams; 362 while (sib != NULL) { 363 ASSERT(ppa == sib->sps_ppa); 364 sib->sps_npmode = NPMODE_DROP; 365 sib->sps_flags &= ~(SPS_PIOATTACH | SPS_CACHED); 366 /* 367 * There should be a preallocated hangup 368 * message here. Fetch it and send it up to 369 * the stream head. This will cause IP to 370 * mark the interface as "down." 371 */ 372 if ((mp = sib->sps_hangup) != NULL) { 373 sib->sps_hangup = NULL; 374 /* 375 * M_HANGUP works with IP, but snoop 376 * is lame and requires M_ERROR. Send 377 * up a clean error code instead. 378 * 379 * XXX if snoop is fixed, fix this, too. 380 */ 381 MTYPE(mp) = M_ERROR; 382 *mp->b_wptr++ = ENXIO; 383 putnext(sib->sps_rq, mp); 384 } 385 qenable(WR(sib->sps_rq)); 386 cnt++; 387 sib = sib->sps_nextsib; 388 } 389 ASSERT(ppa->ppa_refcnt == cnt); 390 } else { 391 ASSERT(ppa->ppa_streams != NULL); 392 ASSERT(ppa->ppa_ctl != sps); 393 mp = NULL; 394 if (sps->sps_sap == PPP_IP) { 395 ppa->ppa_ip_cache = NULL; 396 mp = create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND); 397 } else if (sps->sps_sap == PPP_IPV6) { 398 ppa->ppa_ip6_cache = NULL; 399 mp = create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND); 400 } 401 /* Tell the daemon the bad news. */ 402 if (mp != NULL && ppa->ppa_ctl != NULL && 403 (sps->sps_npmode == NPMODE_PASS || 404 sps->sps_npmode == NPMODE_QUEUE)) { 405 putnext(ppa->ppa_ctl->sps_rq, mp); 406 } else { 407 freemsg(mp); 408 } 409 /* 410 * Walk through all of sibling streams attached to the 411 * same ppa, and remove this stream from the sibling 412 * streams list. We have exclusive access for the 413 * entire driver here, so there's no need to hold 414 * ppa_sib_lock. 415 */ 416 sib = ppa->ppa_streams; 417 if (sib == sps) { 418 ppa->ppa_streams = sps->sps_nextsib; 419 } else { 420 while (sib->sps_nextsib != NULL) { 421 if (sib->sps_nextsib == sps) { 422 sib->sps_nextsib = sps->sps_nextsib; 423 break; 424 } 425 sib = sib->sps_nextsib; 426 } 427 } 428 sps->sps_nextsib = NULL; 429 freemsg(sps->sps_hangup); 430 sps->sps_hangup = NULL; 431 /* 432 * Check if this is a promiscous stream. If the SPS_PROMISC bit 433 * is still set, it means that the stream is closed without 434 * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ. 435 * In this case, we simply decrement the promiscous counter, 436 * and it's safe to do it without holding ppa_sib_lock since 437 * we're exclusive (inner and outer) at this point. 438 */ 439 if (IS_SPS_PROMISC(sps)) { 440 ASSERT(ppa->ppa_promicnt > 0); 441 ppa->ppa_promicnt--; 442 } 443 } 444 /* If we're the only one left, then delete now. */ 445 if (ppa->ppa_refcnt <= 1) 446 sppp_free_ppa(ppa); 447 else 448 ppa->ppa_refcnt--; 449 close_unattached: 450 q->q_ptr = WR(q)->q_ptr = NULL; 451 for (nextmn = &sps_list; *nextmn != NULL; 452 nextmn = &(*nextmn)->sps_nextmn) { 453 if (*nextmn == sps) { 454 *nextmn = sps->sps_nextmn; 455 break; 456 } 457 } 458 kmem_free(sps, sizeof (spppstr_t)); 459 return (0); 460 } 461 462 static void 463 sppp_ioctl(struct queue *q, mblk_t *mp) 464 { 465 spppstr_t *sps; 466 spppstr_t *nextsib; 467 sppa_t *ppa; 468 struct iocblk *iop; 469 mblk_t *nmp; 470 enum NPmode npmode; 471 struct ppp_idle *pip; 472 struct ppp_stats64 *psp; 473 struct ppp_comp_stats *pcsp; 474 hrtime_t hrtime; 475 int sap; 476 int count = 0; 477 int error = EINVAL; 478 479 sps = (spppstr_t *)q->q_ptr; 480 ppa = sps->sps_ppa; 481 482 iop = (struct iocblk *)mp->b_rptr; 483 switch (iop->ioc_cmd) { 484 case PPPIO_NPMODE: 485 if (!IS_SPS_CONTROL(sps)) { 486 break; /* return EINVAL */ 487 } else if (iop->ioc_count != 2 * sizeof (uint32_t) || 488 (mp->b_cont == NULL)) { 489 error = EPROTO; 490 break; 491 } 492 ASSERT(ppa != NULL); 493 ASSERT(mp->b_cont->b_rptr != NULL); 494 ASSERT(sps->sps_npmode == NPMODE_PASS); 495 sap = ((uint32_t *)mp->b_cont->b_rptr)[0]; 496 npmode = (enum NPmode)((uint32_t *)mp->b_cont->b_rptr)[1]; 497 /* 498 * Walk the sibling streams which belong to the same 499 * ppa, and try to find a stream with matching sap 500 * number. 501 */ 502 rw_enter(&ppa->ppa_sib_lock, RW_WRITER); 503 for (nextsib = ppa->ppa_streams; nextsib != NULL; 504 nextsib = nextsib->sps_nextsib) { 505 if (nextsib->sps_sap == sap) { 506 break; /* found it */ 507 } 508 } 509 if (nextsib == NULL) { 510 rw_exit(&ppa->ppa_sib_lock); 511 break; /* return EINVAL */ 512 } else { 513 nextsib->sps_npmode = npmode; 514 if ((nextsib->sps_npmode != NPMODE_QUEUE) && 515 (WR(nextsib->sps_rq)->q_first != NULL)) { 516 qenable(WR(nextsib->sps_rq)); 517 } 518 } 519 rw_exit(&ppa->ppa_sib_lock); 520 error = 0; /* return success */ 521 break; 522 case PPPIO_GIDLE: 523 if (ppa == NULL) { 524 ASSERT(!IS_SPS_CONTROL(sps)); 525 error = ENOLINK; 526 break; 527 } else if (!IS_PPA_TIMESTAMP(ppa)) { 528 break; /* return EINVAL */ 529 } 530 if ((nmp = allocb(sizeof (struct ppp_idle), 531 BPRI_MED)) == NULL) { 532 mutex_enter(&ppa->ppa_sta_lock); 533 ppa->ppa_allocbfail++; 534 mutex_exit(&ppa->ppa_sta_lock); 535 error = ENOSR; 536 break; 537 } 538 if (mp->b_cont != NULL) { 539 freemsg(mp->b_cont); 540 } 541 mp->b_cont = nmp; 542 pip = (struct ppp_idle *)nmp->b_wptr; 543 nmp->b_wptr += sizeof (struct ppp_idle); 544 /* 545 * Get current timestamp and subtract the tx and rx 546 * timestamps to get the actual idle time to be 547 * returned. 548 */ 549 hrtime = gethrtime(); 550 pip->xmit_idle = (hrtime - ppa->ppa_lasttx) / 1000000000ul; 551 pip->recv_idle = (hrtime - ppa->ppa_lastrx) / 1000000000ul; 552 count = msgsize(nmp); 553 error = 0; 554 break; /* return success (error is 0) */ 555 case PPPIO_GTYPE: 556 nmp = allocb(sizeof (uint32_t), BPRI_MED); 557 if (nmp == NULL) { 558 error = ENOSR; 559 break; 560 } 561 if (mp->b_cont != NULL) { 562 freemsg(mp->b_cont); 563 } 564 mp->b_cont = nmp; 565 /* 566 * Let the requestor know that we are the PPP 567 * multiplexer (PPPTYP_MUX). 568 */ 569 *(uint32_t *)nmp->b_wptr = PPPTYP_MUX; 570 nmp->b_wptr += sizeof (uint32_t); 571 count = msgsize(nmp); 572 error = 0; /* return success */ 573 break; 574 case PPPIO_GETSTAT64: 575 if (ppa == NULL) { 576 break; /* return EINVAL */ 577 } else if ((ppa->ppa_lower_wq != NULL) && 578 !IS_PPA_LASTMOD(ppa)) { 579 mutex_enter(&ppa->ppa_sta_lock); 580 ppa->ppa_ioctlsfwd++; 581 mutex_exit(&ppa->ppa_sta_lock); 582 /* 583 * Record the ioctl CMD & ID - this will be 584 * used to check the ACK or NAK responses 585 * coming from below. 586 */ 587 sps->sps_ioc_id = iop->ioc_id; 588 putnext(ppa->ppa_lower_wq, mp); 589 return; /* don't ack or nak the request */ 590 } 591 nmp = allocb(sizeof (*psp), BPRI_MED); 592 if (nmp == NULL) { 593 mutex_enter(&ppa->ppa_sta_lock); 594 ppa->ppa_allocbfail++; 595 mutex_exit(&ppa->ppa_sta_lock); 596 error = ENOSR; 597 break; 598 } 599 if (mp->b_cont != NULL) { 600 freemsg(mp->b_cont); 601 } 602 mp->b_cont = nmp; 603 psp = (struct ppp_stats64 *)nmp->b_wptr; 604 /* 605 * Copy the contents of ppp_stats64 structure for this 606 * ppa and return them to the caller. 607 */ 608 mutex_enter(&ppa->ppa_sta_lock); 609 bcopy(&ppa->ppa_stats, psp, sizeof (*psp)); 610 mutex_exit(&ppa->ppa_sta_lock); 611 nmp->b_wptr += sizeof (*psp); 612 count = sizeof (*psp); 613 error = 0; /* return success */ 614 break; 615 case PPPIO_GETCSTAT: 616 if (ppa == NULL) { 617 break; /* return EINVAL */ 618 } else if ((ppa->ppa_lower_wq != NULL) && 619 !IS_PPA_LASTMOD(ppa)) { 620 mutex_enter(&ppa->ppa_sta_lock); 621 ppa->ppa_ioctlsfwd++; 622 mutex_exit(&ppa->ppa_sta_lock); 623 /* 624 * Record the ioctl CMD & ID - this will be 625 * used to check the ACK or NAK responses 626 * coming from below. 627 */ 628 sps->sps_ioc_id = iop->ioc_id; 629 putnext(ppa->ppa_lower_wq, mp); 630 return; /* don't ack or nak the request */ 631 } 632 nmp = allocb(sizeof (struct ppp_comp_stats), BPRI_MED); 633 if (nmp == NULL) { 634 mutex_enter(&ppa->ppa_sta_lock); 635 ppa->ppa_allocbfail++; 636 mutex_exit(&ppa->ppa_sta_lock); 637 error = ENOSR; 638 break; 639 } 640 if (mp->b_cont != NULL) { 641 freemsg(mp->b_cont); 642 } 643 mp->b_cont = nmp; 644 pcsp = (struct ppp_comp_stats *)nmp->b_wptr; 645 nmp->b_wptr += sizeof (struct ppp_comp_stats); 646 bzero((caddr_t)pcsp, sizeof (struct ppp_comp_stats)); 647 count = msgsize(nmp); 648 error = 0; /* return success */ 649 break; 650 } 651 652 if (error == 0) { 653 /* Success; tell the user. */ 654 miocack(q, mp, count, 0); 655 } else { 656 /* Failure; send error back upstream. */ 657 miocnak(q, mp, 0, error); 658 } 659 } 660 661 /* 662 * sppp_uwput() 663 * 664 * MT-Perimeters: 665 * shared inner, shared outer. 666 * 667 * Description: 668 * Upper write-side put procedure. Messages from above arrive here. 669 */ 670 int 671 sppp_uwput(queue_t *q, mblk_t *mp) 672 { 673 queue_t *nextq; 674 spppstr_t *sps; 675 sppa_t *ppa; 676 struct iocblk *iop; 677 int error; 678 679 ASSERT(q != NULL && q->q_ptr != NULL); 680 ASSERT(mp != NULL && mp->b_rptr != NULL); 681 sps = (spppstr_t *)q->q_ptr; 682 ppa = sps->sps_ppa; 683 684 switch (MTYPE(mp)) { 685 case M_PCPROTO: 686 case M_PROTO: 687 if (IS_SPS_CONTROL(sps)) { 688 ASSERT(ppa != NULL); 689 /* 690 * Intentionally change this to a high priority 691 * message so it doesn't get queued up. M_PROTO is 692 * specifically used for signalling between pppd and its 693 * kernel-level component(s), such as ppptun, so we 694 * make sure that it doesn't get queued up behind 695 * data messages. 696 */ 697 MTYPE(mp) = M_PCPROTO; 698 if ((ppa->ppa_lower_wq != NULL) && 699 canputnext(ppa->ppa_lower_wq)) { 700 mutex_enter(&ppa->ppa_sta_lock); 701 ppa->ppa_mctlsfwd++; 702 mutex_exit(&ppa->ppa_sta_lock); 703 putnext(ppa->ppa_lower_wq, mp); 704 } else { 705 mutex_enter(&ppa->ppa_sta_lock); 706 ppa->ppa_mctlsfwderr++; 707 mutex_exit(&ppa->ppa_sta_lock); 708 freemsg(mp); 709 } 710 } else { 711 return (sppp_mproto(q, mp, sps)); 712 } 713 break; 714 case M_DATA: 715 if ((nextq = sppp_send(q, &mp, sps)) != NULL) 716 putnext(nextq, mp); 717 break; 718 case M_IOCTL: 719 error = EINVAL; 720 iop = (struct iocblk *)mp->b_rptr; 721 switch (iop->ioc_cmd) { 722 case DLIOCRAW: 723 case DL_IOC_HDR_INFO: 724 case PPPIO_ATTACH: 725 case PPPIO_DEBUG: 726 case PPPIO_DETACH: 727 case PPPIO_LASTMOD: 728 case PPPIO_MRU: 729 case PPPIO_MTU: 730 case PPPIO_USETIMESTAMP: 731 case PPPIO_BLOCKNP: 732 case PPPIO_UNBLOCKNP: 733 qwriter(q, mp, sppp_inner_ioctl, PERIM_INNER); 734 return (0); 735 case I_LINK: 736 case I_UNLINK: 737 case PPPIO_NEWPPA: 738 qwriter(q, mp, sppp_outer_ioctl, PERIM_OUTER); 739 return (0); 740 case PPPIO_NPMODE: 741 case PPPIO_GIDLE: 742 case PPPIO_GTYPE: 743 case PPPIO_GETSTAT64: 744 case PPPIO_GETCSTAT: 745 /* 746 * These require additional auto variables to 747 * handle, so (for optimization reasons) 748 * they're moved off to a separate function. 749 */ 750 sppp_ioctl(q, mp); 751 return (0); 752 case PPPIO_GETSTAT: 753 break; /* 32 bit interface gone */ 754 default: 755 if (iop->ioc_cr == NULL || 756 secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) { 757 error = EPERM; 758 break; 759 } else if ((ppa == NULL) || 760 (ppa->ppa_lower_wq == NULL)) { 761 break; /* return EINVAL */ 762 } 763 mutex_enter(&ppa->ppa_sta_lock); 764 ppa->ppa_ioctlsfwd++; 765 mutex_exit(&ppa->ppa_sta_lock); 766 /* 767 * Record the ioctl CMD & ID - this will be used to 768 * check the ACK or NAK responses coming from below. 769 */ 770 sps->sps_ioc_id = iop->ioc_id; 771 putnext(ppa->ppa_lower_wq, mp); 772 return (0); /* don't ack or nak the request */ 773 } 774 /* Failure; send error back upstream. */ 775 miocnak(q, mp, 0, error); 776 break; 777 case M_FLUSH: 778 if (*mp->b_rptr & FLUSHW) { 779 flushq(q, FLUSHDATA); 780 } 781 if (*mp->b_rptr & FLUSHR) { 782 *mp->b_rptr &= ~FLUSHW; 783 qreply(q, mp); 784 } else { 785 freemsg(mp); 786 } 787 break; 788 default: 789 freemsg(mp); 790 break; 791 } 792 return (0); 793 } 794 795 /* 796 * sppp_uwsrv() 797 * 798 * MT-Perimeters: 799 * exclusive inner, shared outer. 800 * 801 * Description: 802 * Upper write-side service procedure. Note that this procedure does 803 * not get called when a message is placed on our write-side queue, since 804 * automatic queue scheduling has been turned off by noenable() when 805 * the queue was opened. We do this on purpose, as we explicitly control 806 * the write-side queue. Therefore, this procedure gets called when 807 * the lower write service procedure qenable() the upper write stream queue. 808 */ 809 int 810 sppp_uwsrv(queue_t *q) 811 { 812 spppstr_t *sps; 813 mblk_t *mp; 814 queue_t *nextq; 815 816 ASSERT(q != NULL && q->q_ptr != NULL); 817 sps = (spppstr_t *)q->q_ptr; 818 while ((mp = getq(q)) != NULL) { 819 if ((nextq = sppp_outpkt(q, &mp, msgdsize(mp), sps)) == NULL) { 820 if (mp != NULL) { 821 if (putbq(q, mp) == 0) 822 freemsg(mp); 823 break; 824 } 825 } else { 826 putnext(nextq, mp); 827 } 828 } 829 return (0); 830 } 831 832 void 833 sppp_remove_ppa(spppstr_t *sps) 834 { 835 spppstr_t *nextsib; 836 sppa_t *ppa = sps->sps_ppa; 837 838 rw_enter(&ppa->ppa_sib_lock, RW_WRITER); 839 if (ppa->ppa_refcnt <= 1) { 840 rw_exit(&ppa->ppa_sib_lock); 841 sppp_free_ppa(ppa); 842 } else { 843 nextsib = ppa->ppa_streams; 844 if (nextsib == sps) { 845 ppa->ppa_streams = sps->sps_nextsib; 846 } else { 847 while (nextsib->sps_nextsib != NULL) { 848 if (nextsib->sps_nextsib == sps) { 849 nextsib->sps_nextsib = 850 sps->sps_nextsib; 851 break; 852 } 853 nextsib = nextsib->sps_nextsib; 854 } 855 } 856 ppa->ppa_refcnt--; 857 /* 858 * And if this stream was marked as promiscuous 859 * (SPS_PROMISC), then we need to update the 860 * promiscuous streams count. This should only happen 861 * when DL_DETACH_REQ is issued prior to marking the 862 * stream as non-promiscuous, through 863 * DL_PROMISCOFF_REQ request. 864 */ 865 if (IS_SPS_PROMISC(sps)) { 866 ASSERT(ppa->ppa_promicnt > 0); 867 ppa->ppa_promicnt--; 868 } 869 rw_exit(&ppa->ppa_sib_lock); 870 } 871 sps->sps_nextsib = NULL; 872 sps->sps_ppa = NULL; 873 freemsg(sps->sps_hangup); 874 sps->sps_hangup = NULL; 875 } 876 877 sppa_t * 878 sppp_find_ppa(uint32_t ppa_id) 879 { 880 sppa_t *ppa; 881 882 for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) { 883 if (ppa->ppa_ppa_id == ppa_id) { 884 break; /* found the ppa */ 885 } 886 } 887 return (ppa); 888 } 889 890 /* 891 * sppp_inner_ioctl() 892 * 893 * MT-Perimeters: 894 * exclusive inner, shared outer 895 * 896 * Description: 897 * Called by sppp_uwput as a result of receiving ioctls which require 898 * an exclusive access at the inner perimeter. 899 */ 900 static void 901 sppp_inner_ioctl(queue_t *q, mblk_t *mp) 902 { 903 spppstr_t *sps; 904 sppa_t *ppa; 905 struct iocblk *iop; 906 mblk_t *nmp; 907 int error = EINVAL; 908 int count = 0; 909 int dbgcmd; 910 int mru, mtu; 911 uint32_t ppa_id; 912 hrtime_t hrtime; 913 uint16_t proto; 914 915 ASSERT(q != NULL && q->q_ptr != NULL); 916 ASSERT(mp != NULL && mp->b_rptr != NULL); 917 918 sps = (spppstr_t *)q->q_ptr; 919 ppa = sps->sps_ppa; 920 iop = (struct iocblk *)mp->b_rptr; 921 switch (iop->ioc_cmd) { 922 case DLIOCRAW: 923 if (IS_SPS_CONTROL(sps)) { 924 break; /* return EINVAL */ 925 } 926 sps->sps_flags |= SPS_RAWDATA; 927 error = 0; /* return success */ 928 break; 929 case DL_IOC_HDR_INFO: 930 if (IS_SPS_CONTROL(sps)) { 931 break; /* return EINVAL */ 932 } else if ((mp->b_cont == NULL) || 933 *((t_uscalar_t *)mp->b_cont->b_rptr) != DL_UNITDATA_REQ || 934 (MBLKL(mp->b_cont) < (sizeof (dl_unitdata_req_t) + 935 SPPP_ADDRL))) { 936 error = EPROTO; 937 break; 938 } else if (ppa == NULL) { 939 error = ENOLINK; 940 break; 941 } 942 if ((nmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) { 943 mutex_enter(&ppa->ppa_sta_lock); 944 ppa->ppa_allocbfail++; 945 mutex_exit(&ppa->ppa_sta_lock); 946 error = ENOMEM; 947 break; 948 } 949 *(uchar_t *)nmp->b_wptr++ = PPP_ALLSTATIONS; 950 *(uchar_t *)nmp->b_wptr++ = PPP_UI; 951 *(uchar_t *)nmp->b_wptr++ = sps->sps_sap >> 8; 952 *(uchar_t *)nmp->b_wptr++ = sps->sps_sap & 0xff; 953 ASSERT(MBLKL(nmp) == PPP_HDRLEN); 954 955 linkb(mp, nmp); 956 sps->sps_flags |= SPS_FASTPATH; 957 error = 0; /* return success */ 958 count = msgsize(nmp); 959 break; 960 case PPPIO_ATTACH: 961 if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) || 962 (sps->sps_dlstate != DL_UNATTACHED) || 963 (iop->ioc_count != sizeof (uint32_t))) { 964 break; /* return EINVAL */ 965 } else if (mp->b_cont == NULL) { 966 error = EPROTO; 967 break; 968 } 969 ASSERT(mp->b_cont->b_rptr != NULL); 970 /* If there's something here, it's detached. */ 971 if (ppa != NULL) { 972 sppp_remove_ppa(sps); 973 } 974 ppa_id = *(uint32_t *)mp->b_cont->b_rptr; 975 ppa = sppp_find_ppa(ppa_id); 976 /* 977 * If we can't find it, then it's either because the requestor 978 * has supplied a wrong ppa_id to be attached to, or because 979 * the control stream for the specified ppa_id has been closed 980 * before we get here. 981 */ 982 if (ppa == NULL) { 983 error = ENOENT; 984 break; 985 } 986 /* 987 * Preallocate the hangup message so that we're always 988 * able to send this upstream in the event of a 989 * catastrophic failure. 990 */ 991 if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) { 992 error = ENOSR; 993 break; 994 } 995 /* 996 * There are two ways to attach a stream to a ppa: one is 997 * through DLPI (DL_ATTACH_REQ) and the other is through 998 * PPPIO_ATTACH. This is why we need to distinguish whether or 999 * not a stream was allocated via PPPIO_ATTACH, so that we can 1000 * properly detach it when we receive PPPIO_DETACH ioctl 1001 * request. 1002 */ 1003 sps->sps_flags |= SPS_PIOATTACH; 1004 sps->sps_ppa = ppa; 1005 /* 1006 * Add this stream to the head of the list of sibling streams 1007 * which belong to the same ppa as specified. 1008 */ 1009 rw_enter(&ppa->ppa_sib_lock, RW_WRITER); 1010 ppa->ppa_refcnt++; 1011 sps->sps_nextsib = ppa->ppa_streams; 1012 ppa->ppa_streams = sps; 1013 rw_exit(&ppa->ppa_sib_lock); 1014 error = 0; /* return success */ 1015 break; 1016 case PPPIO_BLOCKNP: 1017 case PPPIO_UNBLOCKNP: 1018 if (iop->ioc_cr == NULL || 1019 secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) { 1020 error = EPERM; 1021 break; 1022 } 1023 error = miocpullup(mp, sizeof (uint16_t)); 1024 if (error != 0) 1025 break; 1026 ASSERT(mp->b_cont->b_rptr != NULL); 1027 proto = *(uint16_t *)mp->b_cont->b_rptr; 1028 if (iop->ioc_cmd == PPPIO_BLOCKNP) { 1029 uint32_t npflagpos = sppp_ppp2np(proto); 1030 /* 1031 * Mark proto as blocked in ppa_npflag until the 1032 * corresponding queues for proto have been plumbed. 1033 */ 1034 if (npflagpos != 0) { 1035 mutex_enter(&ppa->ppa_npmutex); 1036 ppa->ppa_npflag |= (1 << npflagpos); 1037 mutex_exit(&ppa->ppa_npmutex); 1038 } else { 1039 error = EINVAL; 1040 } 1041 } else { 1042 /* 1043 * reset ppa_npflag and release proto 1044 * packets that were being held in control queue. 1045 */ 1046 sppp_release_pkts(ppa, proto); 1047 } 1048 break; 1049 case PPPIO_DEBUG: 1050 if (iop->ioc_cr == NULL || 1051 secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) { 1052 error = EPERM; 1053 break; 1054 } else if (iop->ioc_count != sizeof (uint32_t)) { 1055 break; /* return EINVAL */ 1056 } else if (mp->b_cont == NULL) { 1057 error = EPROTO; 1058 break; 1059 } 1060 ASSERT(mp->b_cont->b_rptr != NULL); 1061 dbgcmd = *(uint32_t *)mp->b_cont->b_rptr; 1062 /* 1063 * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication 1064 * that SPS_KDEBUG needs to be enabled for this upper stream. 1065 */ 1066 if (dbgcmd == PPPDBG_LOG + PPPDBG_DRIVER) { 1067 sps->sps_flags |= SPS_KDEBUG; 1068 error = 0; /* return success */ 1069 break; 1070 } 1071 /* 1072 * Otherwise, for any other values, we send them down only if 1073 * there is an attachment and if the attachment has something 1074 * linked underneath it. 1075 */ 1076 if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) { 1077 error = ENOLINK; 1078 break; 1079 } 1080 mutex_enter(&ppa->ppa_sta_lock); 1081 ppa->ppa_ioctlsfwd++; 1082 mutex_exit(&ppa->ppa_sta_lock); 1083 /* 1084 * Record the ioctl CMD & ID - this will be used to check the 1085 * ACK or NAK responses coming from below. 1086 */ 1087 sps->sps_ioc_id = iop->ioc_id; 1088 putnext(ppa->ppa_lower_wq, mp); 1089 return; /* don't ack or nak the request */ 1090 case PPPIO_DETACH: 1091 if (!IS_SPS_PIOATTACH(sps)) { 1092 break; /* return EINVAL */ 1093 } 1094 /* 1095 * The SPS_PIOATTACH flag set on the stream tells us that 1096 * the ppa field is still valid. In the event that the control 1097 * stream be closed prior to this stream's detachment, the 1098 * SPS_PIOATTACH flag would have been cleared from this stream 1099 * during close; in that case we won't get here. 1100 */ 1101 ASSERT(ppa != NULL); 1102 ASSERT(ppa->ppa_ctl != sps); 1103 ASSERT(sps->sps_dlstate == DL_UNATTACHED); 1104 1105 /* 1106 * We don't actually detach anything until the stream is 1107 * closed or reattached. 1108 */ 1109 1110 sps->sps_flags &= ~SPS_PIOATTACH; 1111 error = 0; /* return success */ 1112 break; 1113 case PPPIO_LASTMOD: 1114 if (!IS_SPS_CONTROL(sps)) { 1115 break; /* return EINVAL */ 1116 } 1117 ASSERT(ppa != NULL); 1118 ppa->ppa_flags |= PPA_LASTMOD; 1119 error = 0; /* return success */ 1120 break; 1121 case PPPIO_MRU: 1122 if (!IS_SPS_CONTROL(sps) || 1123 (iop->ioc_count != sizeof (uint32_t))) { 1124 break; /* return EINVAL */ 1125 } else if (mp->b_cont == NULL) { 1126 error = EPROTO; 1127 break; 1128 } 1129 ASSERT(ppa != NULL); 1130 ASSERT(mp->b_cont->b_rptr != NULL); 1131 mru = *(uint32_t *)mp->b_cont->b_rptr; 1132 if ((mru <= 0) || (mru > PPP_MAXMRU)) { 1133 error = EPROTO; 1134 break; 1135 } 1136 if (mru < PPP_MRU) { 1137 mru = PPP_MRU; 1138 } 1139 ppa->ppa_mru = (uint16_t)mru; 1140 /* 1141 * If there's something beneath this driver for the ppa, then 1142 * inform it (or them) of the MRU size. Only do this is we 1143 * are not the last PPP module on the stream. 1144 */ 1145 if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) { 1146 (void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MRU, 1147 mru); 1148 } 1149 error = 0; /* return success */ 1150 break; 1151 case PPPIO_MTU: 1152 if (!IS_SPS_CONTROL(sps) || 1153 (iop->ioc_count != sizeof (uint32_t))) { 1154 break; /* return EINVAL */ 1155 } else if (mp->b_cont == NULL) { 1156 error = EPROTO; 1157 break; 1158 } 1159 ASSERT(ppa != NULL); 1160 ASSERT(mp->b_cont->b_rptr != NULL); 1161 mtu = *(uint32_t *)mp->b_cont->b_rptr; 1162 if ((mtu <= 0) || (mtu > PPP_MAXMTU)) { 1163 error = EPROTO; 1164 break; 1165 } 1166 ppa->ppa_mtu = (uint16_t)mtu; 1167 /* 1168 * If there's something beneath this driver for the ppa, then 1169 * inform it (or them) of the MTU size. Only do this if we 1170 * are not the last PPP module on the stream. 1171 */ 1172 if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) { 1173 (void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MTU, 1174 mtu); 1175 } 1176 error = 0; /* return success */ 1177 break; 1178 case PPPIO_USETIMESTAMP: 1179 if (!IS_SPS_CONTROL(sps)) { 1180 break; /* return EINVAL */ 1181 } 1182 if (!IS_PPA_TIMESTAMP(ppa)) { 1183 hrtime = gethrtime(); 1184 ppa->ppa_lasttx = ppa->ppa_lastrx = hrtime; 1185 ppa->ppa_flags |= PPA_TIMESTAMP; 1186 } 1187 error = 0; 1188 break; 1189 } 1190 1191 if (error == 0) { 1192 /* Success; tell the user */ 1193 miocack(q, mp, count, 0); 1194 } else { 1195 /* Failure; send error back upstream */ 1196 miocnak(q, mp, 0, error); 1197 } 1198 } 1199 1200 /* 1201 * sppp_outer_ioctl() 1202 * 1203 * MT-Perimeters: 1204 * exclusive inner, exclusive outer 1205 * 1206 * Description: 1207 * Called by sppp_uwput as a result of receiving ioctls which require 1208 * an exclusive access at the outer perimeter. 1209 */ 1210 static void 1211 sppp_outer_ioctl(queue_t *q, mblk_t *mp) 1212 { 1213 spppstr_t *sps; 1214 spppstr_t *nextsib; 1215 queue_t *lwq; 1216 sppa_t *ppa; 1217 struct iocblk *iop; 1218 int error = EINVAL; 1219 int count = 0; 1220 uint32_t ppa_id; 1221 mblk_t *nmp; 1222 1223 ASSERT(q != NULL && q->q_ptr != NULL); 1224 ASSERT(mp != NULL && mp->b_rptr != NULL); 1225 1226 sps = (spppstr_t *)q->q_ptr; 1227 ppa = sps->sps_ppa; 1228 iop = (struct iocblk *)mp->b_rptr; 1229 switch (iop->ioc_cmd) { 1230 case I_LINK: 1231 if (!IS_SPS_CONTROL(sps)) { 1232 break; /* return EINVAL */ 1233 } else if (ppa->ppa_lower_wq != NULL) { 1234 error = EEXIST; 1235 break; 1236 } 1237 ASSERT(ppa->ppa_ctl != NULL); 1238 ASSERT(sps->sps_npmode == NPMODE_PASS); 1239 ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL); 1240 1241 lwq = ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot; 1242 ASSERT(lwq != NULL); 1243 1244 ppa->ppa_lower_wq = lwq; 1245 lwq->q_ptr = RD(lwq)->q_ptr = (caddr_t)ppa; 1246 /* 1247 * Unblock upper network streams which now feed this lower 1248 * stream. We don't need to hold ppa_sib_lock here, since we 1249 * are writer at the outer perimeter. 1250 */ 1251 if (WR(sps->sps_rq)->q_first != NULL) 1252 qenable(WR(sps->sps_rq)); 1253 for (nextsib = ppa->ppa_streams; nextsib != NULL; 1254 nextsib = nextsib->sps_nextsib) { 1255 nextsib->sps_npmode = NPMODE_PASS; 1256 if (WR(nextsib->sps_rq)->q_first != NULL) { 1257 qenable(WR(nextsib->sps_rq)); 1258 } 1259 } 1260 /* 1261 * Send useful information down to the modules which are now 1262 * linked below this driver (for this particular ppa). Only 1263 * do this if we are not the last PPP module on the stream. 1264 */ 1265 if (!IS_PPA_LASTMOD(ppa)) { 1266 (void) putctl8(lwq, M_CTL, PPPCTL_UNIT, 1267 ppa->ppa_ppa_id); 1268 (void) putctl4(lwq, M_CTL, PPPCTL_MRU, ppa->ppa_mru); 1269 (void) putctl4(lwq, M_CTL, PPPCTL_MTU, ppa->ppa_mtu); 1270 } 1271 1272 if (IS_SPS_KDEBUG(sps)) { 1273 SPDEBUG(PPP_DRV_NAME 1274 "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p " 1275 "flags=0x%b\n", sps->sps_mn_id, 1276 (void *)ppa->ppa_lower_wq, (void *)sps, 1277 sps->sps_flags, SPS_FLAGS_STR, 1278 (void *)ppa, ppa->ppa_flags, 1279 PPA_FLAGS_STR); 1280 } 1281 error = 0; /* return success */ 1282 break; 1283 case I_UNLINK: 1284 ASSERT(IS_SPS_CONTROL(sps)); 1285 ASSERT(ppa != NULL); 1286 lwq = ppa->ppa_lower_wq; 1287 ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL); 1288 ASSERT(lwq == ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot); 1289 1290 if (IS_SPS_KDEBUG(sps)) { 1291 SPDEBUG(PPP_DRV_NAME 1292 "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b " 1293 "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, 1294 (void *)lwq, (void *)sps, sps->sps_flags, 1295 SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags, 1296 PPA_FLAGS_STR); 1297 } 1298 /* 1299 * While accessing the outer perimeter exclusively, we 1300 * disassociate our ppa's lower_wq from the lower stream linked 1301 * beneath us, and we also disassociate our control stream from 1302 * the q_ptr of the lower stream. 1303 */ 1304 lwq->q_ptr = RD(lwq)->q_ptr = NULL; 1305 ppa->ppa_lower_wq = NULL; 1306 /* 1307 * Unblock streams which now feed back up the control stream, 1308 * and acknowledge the request. We don't need to hold 1309 * ppa_sib_lock here, since we are writer at the outer 1310 * perimeter. 1311 */ 1312 if (WR(sps->sps_rq)->q_first != NULL) 1313 qenable(WR(sps->sps_rq)); 1314 for (nextsib = ppa->ppa_streams; nextsib != NULL; 1315 nextsib = nextsib->sps_nextsib) { 1316 if (WR(nextsib->sps_rq)->q_first != NULL) { 1317 qenable(WR(nextsib->sps_rq)); 1318 } 1319 } 1320 error = 0; /* return success */ 1321 break; 1322 case PPPIO_NEWPPA: 1323 /* 1324 * Do sanity check to ensure that we don't accept PPPIO_NEWPPA 1325 * on a stream which DLPI is used (since certain DLPI messages 1326 * will cause state transition reflected in sps_dlstate, 1327 * changing it from its default DL_UNATTACHED value). In other 1328 * words, we won't allow a network/snoop stream to become 1329 * a control stream. 1330 */ 1331 if (iop->ioc_cr == NULL || 1332 secpolicy_net_config(iop->ioc_cr, B_FALSE) != 0) { 1333 error = EPERM; 1334 break; 1335 } else if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) || 1336 (ppa != NULL) || (sps->sps_dlstate != DL_UNATTACHED)) { 1337 break; /* return EINVAL */ 1338 } 1339 /* Get requested unit number (if any) */ 1340 if (iop->ioc_count == sizeof (uint32_t) && mp->b_cont != NULL) 1341 ppa_id = *(uint32_t *)mp->b_cont->b_rptr; 1342 else 1343 ppa_id = 0; 1344 /* Get mblk to use for response message */ 1345 nmp = allocb(sizeof (uint32_t), BPRI_MED); 1346 if (nmp == NULL) { 1347 error = ENOSR; 1348 break; 1349 } 1350 if (mp->b_cont != NULL) { 1351 freemsg(mp->b_cont); 1352 } 1353 mp->b_cont = nmp; /* chain our response mblk */ 1354 /* 1355 * Walk the global ppa list and determine the lowest 1356 * available ppa_id number to be used. 1357 */ 1358 if (ppa_id == (uint32_t)-1) 1359 ppa_id = 0; 1360 for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) { 1361 if (ppa_id == (uint32_t)-2) { 1362 if (ppa->ppa_ctl == NULL) 1363 break; 1364 } else { 1365 if (ppa_id < ppa->ppa_ppa_id) 1366 break; 1367 if (ppa_id == ppa->ppa_ppa_id) 1368 ++ppa_id; 1369 } 1370 } 1371 if (ppa_id == (uint32_t)-2) { 1372 if (ppa == NULL) { 1373 error = ENXIO; 1374 break; 1375 } 1376 /* Clear timestamp and lastmod flags */ 1377 ppa->ppa_flags = 0; 1378 } else { 1379 ppa = sppp_create_ppa(ppa_id); 1380 if (ppa == NULL) { 1381 error = ENOMEM; 1382 break; 1383 } 1384 } 1385 1386 sps->sps_ppa = ppa; /* chain the ppa structure */ 1387 sps->sps_npmode = NPMODE_PASS; /* network packets may travel */ 1388 sps->sps_flags |= SPS_CONTROL; /* this is the control stream */ 1389 1390 ppa->ppa_refcnt++; /* new PPA reference */ 1391 ppa->ppa_ctl = sps; /* back ptr to upper stream */ 1392 /* 1393 * Return the newly created ppa_id to the requestor and 1394 * acnowledge the request. 1395 */ 1396 *(uint32_t *)nmp->b_wptr = ppa->ppa_ppa_id; 1397 nmp->b_wptr += sizeof (uint32_t); 1398 1399 if (IS_SPS_KDEBUG(sps)) { 1400 SPDEBUG(PPP_DRV_NAME 1401 "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b " 1402 "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, ppa_id, 1403 (void *)sps, sps->sps_flags, SPS_FLAGS_STR, 1404 (void *)ppa, ppa->ppa_flags, 1405 PPA_FLAGS_STR); 1406 } 1407 count = msgsize(nmp); 1408 error = 0; 1409 break; 1410 } 1411 1412 if (error == 0) { 1413 /* Success; tell the user. */ 1414 miocack(q, mp, count, 0); 1415 } else { 1416 /* Failure; send error back upstream. */ 1417 miocnak(q, mp, 0, error); 1418 } 1419 } 1420 1421 /* 1422 * sppp_send() 1423 * 1424 * MT-Perimeters: 1425 * shared inner, shared outer. 1426 * 1427 * Description: 1428 * Called by sppp_uwput to handle M_DATA message type. Returns 1429 * queue_t for putnext, or NULL to mean that the packet was 1430 * handled internally. 1431 */ 1432 static queue_t * 1433 sppp_send(queue_t *q, mblk_t **mpp, spppstr_t *sps) 1434 { 1435 mblk_t *mp; 1436 sppa_t *ppa; 1437 int is_promisc; 1438 int msize; 1439 int error = 0; 1440 queue_t *nextq; 1441 1442 ASSERT(mpp != NULL); 1443 mp = *mpp; 1444 ASSERT(q != NULL && q->q_ptr != NULL); 1445 ASSERT(mp != NULL && mp->b_rptr != NULL); 1446 ASSERT(sps != NULL); 1447 ASSERT(q->q_ptr == sps); 1448 /* 1449 * We only let M_DATA through if the sender is either the control 1450 * stream (for PPP control packets) or one of the network streams 1451 * (for IP packets) in IP fastpath mode. If this stream is not attached 1452 * to any ppas, then discard data coming down through this stream. 1453 */ 1454 ppa = sps->sps_ppa; 1455 if (ppa == NULL) { 1456 ASSERT(!IS_SPS_CONTROL(sps)); 1457 error = ENOLINK; 1458 } else if (!IS_SPS_CONTROL(sps) && !IS_SPS_FASTPATH(sps)) { 1459 error = EPROTO; 1460 } 1461 if (error != 0) { 1462 merror(q, mp, error); 1463 return (NULL); 1464 } 1465 msize = msgdsize(mp); 1466 if (msize > (ppa->ppa_mtu + PPP_HDRLEN)) { 1467 /* Log, and send it anyway */ 1468 mutex_enter(&ppa->ppa_sta_lock); 1469 ppa->ppa_otoolongs++; 1470 mutex_exit(&ppa->ppa_sta_lock); 1471 } else if (msize < PPP_HDRLEN) { 1472 /* 1473 * Log, and send it anyway. We log it because we get things 1474 * in M_DATA form here, which tells us that the sender is 1475 * either IP in fastpath transmission mode, or pppd. In both 1476 * cases, they are currently expected to send the 4-bytes 1477 * PPP header in front of any possible payloads. 1478 */ 1479 mutex_enter(&ppa->ppa_sta_lock); 1480 ppa->ppa_orunts++; 1481 mutex_exit(&ppa->ppa_sta_lock); 1482 } 1483 1484 if (IS_SPS_KDEBUG(sps)) { 1485 SPDEBUG(PPP_DRV_NAME 1486 "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b " 1487 "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize, 1488 (void *)sps, sps->sps_flags, SPS_FLAGS_STR, 1489 (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR); 1490 } 1491 /* 1492 * Should there be any promiscuous stream(s), send the data up 1493 * for each promiscuous stream that we recognize. Make sure that 1494 * for fastpath, we skip the PPP header in the M_DATA mblk. We skip 1495 * the control stream as we obviously never allow the control stream 1496 * to become promiscous and bind to PPP_ALLSAP. 1497 */ 1498 rw_enter(&ppa->ppa_sib_lock, RW_READER); 1499 is_promisc = sps->sps_ppa->ppa_promicnt; 1500 if (is_promisc) { 1501 ASSERT(ppa->ppa_streams != NULL); 1502 sppp_dlprsendup(ppa->ppa_streams, mp, sps->sps_sap, B_TRUE); 1503 } 1504 rw_exit(&ppa->ppa_sib_lock); 1505 /* 1506 * Only time-stamp the packet with hrtime if the upper stream 1507 * is configured to do so. PPP control (negotiation) messages 1508 * are never considered link activity; only data is activity. 1509 */ 1510 if (!IS_SPS_CONTROL(sps) && IS_PPA_TIMESTAMP(ppa)) { 1511 ppa->ppa_lasttx = gethrtime(); 1512 } 1513 /* 1514 * If there's already a message in the write-side service queue, 1515 * then queue this message there as well, otherwise, try to send 1516 * it down to the module immediately below us. 1517 */ 1518 if (q->q_first != NULL || 1519 (nextq = sppp_outpkt(q, mpp, msize, sps)) == NULL) { 1520 mp = *mpp; 1521 if (mp != NULL && putq(q, mp) == 0) { 1522 mutex_enter(&ppa->ppa_sta_lock); 1523 ppa->ppa_oqdropped++; 1524 mutex_exit(&ppa->ppa_sta_lock); 1525 freemsg(mp); 1526 } 1527 return (NULL); 1528 } 1529 return (nextq); 1530 } 1531 1532 /* 1533 * sppp_outpkt() 1534 * 1535 * MT-Perimeters: 1536 * shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq). 1537 * exclusive inner, shared outer (if called from sppp_wsrv). 1538 * 1539 * Description: 1540 * Called from 1) sppp_uwput when processing a M_DATA fastpath message, 1541 * or 2) sppp_uwsrv when processing the upper write-side service queue. 1542 * For both cases, it prepares to send the data to the module below 1543 * this driver if there is a lower stream linked underneath. If none, then 1544 * the data will be sent upstream via the control channel to pppd. 1545 * 1546 * Returns: 1547 * Non-NULL queue_t if message should be sent now, otherwise 1548 * if *mpp == NULL, then message was freed, otherwise put *mpp 1549 * (back) on the queue. (Does not do putq/putbq, since it's 1550 * called both from srv and put procedures.) 1551 */ 1552 static queue_t * 1553 sppp_outpkt(queue_t *q, mblk_t **mpp, int msize, spppstr_t *sps) 1554 { 1555 mblk_t *mp; 1556 sppa_t *ppa; 1557 enum NPmode npmode; 1558 mblk_t *mpnew; 1559 1560 ASSERT(mpp != NULL); 1561 mp = *mpp; 1562 ASSERT(q != NULL && q->q_ptr != NULL); 1563 ASSERT(mp != NULL && mp->b_rptr != NULL); 1564 ASSERT(sps != NULL); 1565 1566 ppa = sps->sps_ppa; 1567 npmode = sps->sps_npmode; 1568 1569 if (npmode == NPMODE_QUEUE) { 1570 ASSERT(!IS_SPS_CONTROL(sps)); 1571 return (NULL); /* queue it for later */ 1572 } else if (ppa == NULL || ppa->ppa_ctl == NULL || 1573 npmode == NPMODE_DROP || npmode == NPMODE_ERROR) { 1574 /* 1575 * This can not be the control stream, as it must always have 1576 * a valid ppa, and its npmode must always be NPMODE_PASS. 1577 */ 1578 ASSERT(!IS_SPS_CONTROL(sps)); 1579 if (npmode == NPMODE_DROP) { 1580 freemsg(mp); 1581 } else { 1582 /* 1583 * If we no longer have the control stream, or if the 1584 * mode is set to NPMODE_ERROR, then we need to tell IP 1585 * that the interface need to be marked as down. In 1586 * other words, we tell IP to be quiescent. 1587 */ 1588 merror(q, mp, EPROTO); 1589 } 1590 *mpp = NULL; 1591 return (NULL); /* don't queue it */ 1592 } 1593 /* 1594 * Do we have a driver stream linked underneath ? If not, we need to 1595 * notify pppd that the link needs to be brought up and configure 1596 * this upper stream to drop subsequent outgoing packets. This is 1597 * for demand-dialing, in which case pppd has done the IP plumbing 1598 * but hasn't linked the driver stream underneath us. Therefore, when 1599 * a packet is sent down the IP interface, a notification message 1600 * will be sent up the control stream to pppd in order for it to 1601 * establish the physical link. The driver stream is then expected 1602 * to be linked underneath after physical link establishment is done. 1603 */ 1604 if (ppa->ppa_lower_wq == NULL) { 1605 ASSERT(ppa->ppa_ctl != NULL); 1606 ASSERT(ppa->ppa_ctl->sps_rq != NULL); 1607 1608 *mpp = NULL; 1609 mpnew = create_lsmsg(PPP_LINKSTAT_NEEDUP); 1610 if (mpnew == NULL) { 1611 freemsg(mp); 1612 mutex_enter(&ppa->ppa_sta_lock); 1613 ppa->ppa_allocbfail++; 1614 mutex_exit(&ppa->ppa_sta_lock); 1615 return (NULL); /* don't queue it */ 1616 } 1617 /* Include the data in the message for logging. */ 1618 mpnew->b_cont = mp; 1619 mutex_enter(&ppa->ppa_sta_lock); 1620 ppa->ppa_lsneedup++; 1621 mutex_exit(&ppa->ppa_sta_lock); 1622 /* 1623 * We need to set the mode to NPMODE_DROP, but should only 1624 * do so when this stream is not the control stream. 1625 */ 1626 if (!IS_SPS_CONTROL(sps)) { 1627 sps->sps_npmode = NPMODE_DROP; 1628 } 1629 putnext(ppa->ppa_ctl->sps_rq, mpnew); 1630 return (NULL); /* don't queue it */ 1631 } 1632 /* 1633 * If so, then try to send it down. The lower queue is only ever 1634 * detached while holding an exclusive lock on the whole driver, 1635 * so we can be confident that the lower queue is still there. 1636 */ 1637 if (bcanputnext(ppa->ppa_lower_wq, mp->b_band)) { 1638 mutex_enter(&ppa->ppa_sta_lock); 1639 ppa->ppa_stats.p.ppp_opackets++; 1640 if (IS_SPS_CONTROL(sps)) { 1641 ppa->ppa_opkt_ctl++; 1642 } 1643 ppa->ppa_stats.p.ppp_obytes += msize; 1644 mutex_exit(&ppa->ppa_sta_lock); 1645 return (ppa->ppa_lower_wq); /* don't queue it */ 1646 } 1647 return (NULL); /* queue it for later */ 1648 } 1649 1650 /* 1651 * sppp_lwsrv() 1652 * 1653 * MT-Perimeters: 1654 * exclusive inner, shared outer. 1655 * 1656 * Description: 1657 * Lower write-side service procedure. No messages are ever placed on 1658 * the write queue here, this just back-enables all upper write side 1659 * service procedures. 1660 */ 1661 int 1662 sppp_lwsrv(queue_t *q) 1663 { 1664 sppa_t *ppa; 1665 spppstr_t *nextsib; 1666 1667 ASSERT(q != NULL && q->q_ptr != NULL); 1668 ppa = (sppa_t *)q->q_ptr; 1669 ASSERT(ppa != NULL); 1670 1671 rw_enter(&ppa->ppa_sib_lock, RW_READER); 1672 if ((nextsib = ppa->ppa_ctl) != NULL && 1673 WR(nextsib->sps_rq)->q_first != NULL) 1674 qenable(WR(nextsib->sps_rq)); 1675 for (nextsib = ppa->ppa_streams; nextsib != NULL; 1676 nextsib = nextsib->sps_nextsib) { 1677 if (WR(nextsib->sps_rq)->q_first != NULL) { 1678 qenable(WR(nextsib->sps_rq)); 1679 } 1680 } 1681 rw_exit(&ppa->ppa_sib_lock); 1682 return (0); 1683 } 1684 1685 /* 1686 * sppp_lrput() 1687 * 1688 * MT-Perimeters: 1689 * shared inner, shared outer. 1690 * 1691 * Description: 1692 * Lower read-side put procedure. Messages from below get here. 1693 * Data messages are handled separately to limit stack usage 1694 * going into IP. 1695 */ 1696 int 1697 sppp_lrput(queue_t *q, mblk_t *mp) 1698 { 1699 sppa_t *ppa; 1700 spppstr_t *sps; 1701 1702 ppa = (sppa_t *)q->q_ptr; 1703 sps = ppa->ppa_ctl; 1704 1705 if (MTYPE(mp) != M_DATA) { 1706 sppp_recv_nondata(q, mp, sps); 1707 } else if (sps == NULL) { 1708 freemsg(mp); 1709 } else if ((q = sppp_recv(q, &mp, sps)) != NULL) { 1710 putnext(q, mp); 1711 } 1712 return (0); 1713 } 1714 1715 /* 1716 * sppp_recv_nondata() 1717 * 1718 * MT-Perimeters: 1719 * shared inner, shared outer. 1720 * 1721 * Description: 1722 * All received non-data messages come through here. 1723 */ 1724 static void 1725 sppp_recv_nondata(queue_t *q, mblk_t *mp, spppstr_t *ctlsps) 1726 { 1727 sppa_t *ppa; 1728 spppstr_t *destsps; 1729 struct iocblk *iop; 1730 1731 ppa = (sppa_t *)q->q_ptr; 1732 ctlsps = ppa->ppa_ctl; 1733 1734 switch (MTYPE(mp)) { 1735 case M_CTL: 1736 mutex_enter(&ppa->ppa_sta_lock); 1737 if (*mp->b_rptr == PPPCTL_IERROR) { 1738 ppa->ppa_stats.p.ppp_ierrors++; 1739 ppa->ppa_ierr_low++; 1740 ppa->ppa_mctlsknown++; 1741 } else if (*mp->b_rptr == PPPCTL_OERROR) { 1742 ppa->ppa_stats.p.ppp_oerrors++; 1743 ppa->ppa_oerr_low++; 1744 ppa->ppa_mctlsknown++; 1745 } else { 1746 ppa->ppa_mctlsunknown++; 1747 } 1748 mutex_exit(&ppa->ppa_sta_lock); 1749 freemsg(mp); 1750 break; 1751 case M_IOCTL: 1752 miocnak(q, mp, 0, EINVAL); 1753 break; 1754 case M_IOCACK: 1755 case M_IOCNAK: 1756 iop = (struct iocblk *)mp->b_rptr; 1757 ASSERT(iop != NULL); 1758 /* 1759 * Attempt to match up the response with the stream that the 1760 * request came from. If ioc_id doesn't match the one that we 1761 * recorded, then discard this message. 1762 */ 1763 rw_enter(&ppa->ppa_sib_lock, RW_READER); 1764 if ((destsps = ctlsps) == NULL || 1765 destsps->sps_ioc_id != iop->ioc_id) { 1766 destsps = ppa->ppa_streams; 1767 while (destsps != NULL) { 1768 if (destsps->sps_ioc_id == iop->ioc_id) { 1769 break; /* found the upper stream */ 1770 } 1771 destsps = destsps->sps_nextsib; 1772 } 1773 } 1774 rw_exit(&ppa->ppa_sib_lock); 1775 if (destsps == NULL) { 1776 mutex_enter(&ppa->ppa_sta_lock); 1777 ppa->ppa_ioctlsfwderr++; 1778 mutex_exit(&ppa->ppa_sta_lock); 1779 freemsg(mp); 1780 break; 1781 } 1782 mutex_enter(&ppa->ppa_sta_lock); 1783 ppa->ppa_ioctlsfwdok++; 1784 mutex_exit(&ppa->ppa_sta_lock); 1785 putnext(destsps->sps_rq, mp); 1786 break; 1787 case M_HANGUP: 1788 /* 1789 * Free the original mblk_t. We don't really want to send 1790 * a M_HANGUP message upstream, so we need to translate this 1791 * message into something else. 1792 */ 1793 freemsg(mp); 1794 if (ctlsps == NULL) 1795 break; 1796 mp = create_lsmsg(PPP_LINKSTAT_HANGUP); 1797 if (mp == NULL) { 1798 mutex_enter(&ppa->ppa_sta_lock); 1799 ppa->ppa_allocbfail++; 1800 mutex_exit(&ppa->ppa_sta_lock); 1801 break; 1802 } 1803 mutex_enter(&ppa->ppa_sta_lock); 1804 ppa->ppa_lsdown++; 1805 mutex_exit(&ppa->ppa_sta_lock); 1806 putnext(ctlsps->sps_rq, mp); 1807 break; 1808 case M_FLUSH: 1809 if (*mp->b_rptr & FLUSHR) { 1810 flushq(q, FLUSHDATA); 1811 } 1812 if (*mp->b_rptr & FLUSHW) { 1813 *mp->b_rptr &= ~FLUSHR; 1814 qreply(q, mp); 1815 } else { 1816 freemsg(mp); 1817 } 1818 break; 1819 default: 1820 if (ctlsps != NULL && 1821 (queclass(mp) == QPCTL) || canputnext(ctlsps->sps_rq)) { 1822 putnext(ctlsps->sps_rq, mp); 1823 } else { 1824 mutex_enter(&ppa->ppa_sta_lock); 1825 ppa->ppa_iqdropped++; 1826 mutex_exit(&ppa->ppa_sta_lock); 1827 freemsg(mp); 1828 } 1829 break; 1830 } 1831 } 1832 1833 /* 1834 * sppp_recv() 1835 * 1836 * MT-Perimeters: 1837 * shared inner, shared outer. 1838 * 1839 * Description: 1840 * Receive function called by sppp_lrput. Finds appropriate 1841 * receive stream and does accounting. 1842 */ 1843 static queue_t * 1844 sppp_recv(queue_t *q, mblk_t **mpp, spppstr_t *ctlsps) 1845 { 1846 mblk_t *mp; 1847 int len; 1848 sppa_t *ppa; 1849 spppstr_t *destsps; 1850 mblk_t *zmp; 1851 uint32_t npflagpos; 1852 1853 ASSERT(mpp != NULL); 1854 mp = *mpp; 1855 ASSERT(q != NULL && q->q_ptr != NULL); 1856 ASSERT(mp != NULL && mp->b_rptr != NULL); 1857 ASSERT(ctlsps != NULL); 1858 ASSERT(IS_SPS_CONTROL(ctlsps)); 1859 ppa = ctlsps->sps_ppa; 1860 ASSERT(ppa != NULL && ppa->ppa_ctl != NULL); 1861 1862 len = msgdsize(mp); 1863 mutex_enter(&ppa->ppa_sta_lock); 1864 ppa->ppa_stats.p.ppp_ibytes += len; 1865 mutex_exit(&ppa->ppa_sta_lock); 1866 /* 1867 * If the entire data size of the mblk is less than the length of the 1868 * PPP header, then free it. We can't do much with such message anyway, 1869 * since we can't really determine what the PPP protocol type is. 1870 */ 1871 if (len < PPP_HDRLEN) { 1872 /* Log, and free it */ 1873 mutex_enter(&ppa->ppa_sta_lock); 1874 ppa->ppa_irunts++; 1875 mutex_exit(&ppa->ppa_sta_lock); 1876 freemsg(mp); 1877 return (NULL); 1878 } else if (len > (ppa->ppa_mru + PPP_HDRLEN)) { 1879 /* Log, and accept it anyway */ 1880 mutex_enter(&ppa->ppa_sta_lock); 1881 ppa->ppa_itoolongs++; 1882 mutex_exit(&ppa->ppa_sta_lock); 1883 } 1884 /* 1885 * We need at least be able to read the PPP protocol from the header, 1886 * so if the first message block is too small, then we concatenate the 1887 * rest of the following blocks into one message. 1888 */ 1889 if (MBLKL(mp) < PPP_HDRLEN) { 1890 zmp = msgpullup(mp, PPP_HDRLEN); 1891 freemsg(mp); 1892 mp = zmp; 1893 if (mp == NULL) { 1894 mutex_enter(&ppa->ppa_sta_lock); 1895 ppa->ppa_allocbfail++; 1896 mutex_exit(&ppa->ppa_sta_lock); 1897 return (NULL); 1898 } 1899 *mpp = mp; 1900 } 1901 /* 1902 * Hold this packet in the control-queue until 1903 * the matching network-layer upper stream for the PPP protocol (sap) 1904 * has not been plumbed and configured 1905 */ 1906 npflagpos = sppp_ppp2np(PPP_PROTOCOL(mp->b_rptr)); 1907 mutex_enter(&ppa->ppa_npmutex); 1908 if (npflagpos != 0 && (ppa->ppa_npflag & (1 << npflagpos))) { 1909 /* 1910 * proto is currently blocked; Hold up to 4 packets 1911 * in the kernel. 1912 */ 1913 if (ppa->ppa_holdpkts[npflagpos] > 3 || 1914 putq(ctlsps->sps_rq, mp) == 0) 1915 freemsg(mp); 1916 else 1917 ppa->ppa_holdpkts[npflagpos]++; 1918 mutex_exit(&ppa->ppa_npmutex); 1919 return (NULL); 1920 } 1921 mutex_exit(&ppa->ppa_npmutex); 1922 /* 1923 * Try to find a matching network-layer upper stream for the specified 1924 * PPP protocol (sap), and if none is found, send this frame up the 1925 * control stream. 1926 */ 1927 destsps = sppp_inpkt(q, mp, ctlsps); 1928 if (destsps == NULL) { 1929 mutex_enter(&ppa->ppa_sta_lock); 1930 ppa->ppa_ipkt_ctl++; 1931 mutex_exit(&ppa->ppa_sta_lock); 1932 if (canputnext(ctlsps->sps_rq)) { 1933 if (IS_SPS_KDEBUG(ctlsps)) { 1934 SPDEBUG(PPP_DRV_NAME 1935 "/%d: M_DATA recv (%d bytes) sps=0x%p " 1936 "flags=0x%b ppa=0x%p flags=0x%b\n", 1937 ctlsps->sps_mn_id, len, (void *)ctlsps, 1938 ctlsps->sps_flags, SPS_FLAGS_STR, 1939 (void *)ppa, ppa->ppa_flags, 1940 PPA_FLAGS_STR); 1941 } 1942 return (ctlsps->sps_rq); 1943 } else { 1944 mutex_enter(&ppa->ppa_sta_lock); 1945 ppa->ppa_iqdropped++; 1946 mutex_exit(&ppa->ppa_sta_lock); 1947 freemsg(mp); 1948 return (NULL); 1949 } 1950 } 1951 if (canputnext(destsps->sps_rq)) { 1952 if (IS_SPS_KDEBUG(destsps)) { 1953 SPDEBUG(PPP_DRV_NAME 1954 "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b " 1955 "ppa=0x%p flags=0x%b\n", destsps->sps_mn_id, len, 1956 (void *)destsps, destsps->sps_flags, 1957 SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags, 1958 PPA_FLAGS_STR); 1959 } 1960 /* 1961 * If fastpath is enabled on the network-layer stream, then 1962 * make sure we skip over the PPP header, otherwise, we wrap 1963 * the message in a DLPI message. 1964 */ 1965 if (IS_SPS_FASTPATH(destsps)) { 1966 mp->b_rptr += PPP_HDRLEN; 1967 return (destsps->sps_rq); 1968 } else { 1969 spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr; 1970 ASSERT(uqs != NULL); 1971 mp->b_rptr += PPP_HDRLEN; 1972 mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE); 1973 if (mp != NULL) { 1974 *mpp = mp; 1975 return (destsps->sps_rq); 1976 } else { 1977 mutex_enter(&ppa->ppa_sta_lock); 1978 ppa->ppa_allocbfail++; 1979 mutex_exit(&ppa->ppa_sta_lock); 1980 /* mp already freed by sppp_dladdud */ 1981 return (NULL); 1982 } 1983 } 1984 } else { 1985 mutex_enter(&ppa->ppa_sta_lock); 1986 ppa->ppa_iqdropped++; 1987 mutex_exit(&ppa->ppa_sta_lock); 1988 freemsg(mp); 1989 return (NULL); 1990 } 1991 } 1992 1993 /* 1994 * sppp_inpkt() 1995 * 1996 * MT-Perimeters: 1997 * shared inner, shared outer. 1998 * 1999 * Description: 2000 * Find the destination upper stream for the received packet, called 2001 * from sppp_recv. 2002 * 2003 * Returns: 2004 * ptr to destination upper network stream, or NULL for control stream. 2005 */ 2006 /* ARGSUSED */ 2007 static spppstr_t * 2008 sppp_inpkt(queue_t *q, mblk_t *mp, spppstr_t *ctlsps) 2009 { 2010 spppstr_t *destsps = NULL; 2011 sppa_t *ppa; 2012 uint16_t proto; 2013 int is_promisc; 2014 2015 ASSERT(q != NULL && q->q_ptr != NULL); 2016 ASSERT(mp != NULL && mp->b_rptr != NULL); 2017 ASSERT(IS_SPS_CONTROL(ctlsps)); 2018 ppa = ctlsps->sps_ppa; 2019 ASSERT(ppa != NULL); 2020 /* 2021 * From RFC 1661 (Section 2): 2022 * 2023 * The Protocol field is one or two octets, and its value identifies 2024 * the datagram encapsulated in the Information field of the packet. 2025 * The field is transmitted and received most significant octet first. 2026 * 2027 * The structure of this field is consistent with the ISO 3309 2028 * extension mechanism for address fields. All Protocols MUST be odd; 2029 * the least significant bit of the least significant octet MUST equal 2030 * "1". Also, all Protocols MUST be assigned such that the least 2031 * significant bit of the most significant octet equals "0". Frames 2032 * received which don't comply with these rules MUST be treated as 2033 * having an unrecognized Protocol. 2034 * 2035 * Protocol field values in the "0***" to "3***" range identify the 2036 * network-layer protocol of specific packets, and values in the 2037 * "8***" to "b***" range identify packets belonging to the associated 2038 * Network Control Protocols (NCPs), if any. 2039 * 2040 * Protocol field values in the "4***" to "7***" range are used for 2041 * protocols with low volume traffic which have no associated NCP. 2042 * Protocol field values in the "c***" to "f***" range identify packets 2043 * as link-layer Control Protocols (such as LCP). 2044 */ 2045 proto = PPP_PROTOCOL(mp->b_rptr); 2046 mutex_enter(&ppa->ppa_sta_lock); 2047 ppa->ppa_stats.p.ppp_ipackets++; 2048 mutex_exit(&ppa->ppa_sta_lock); 2049 /* 2050 * We check if this is not a network-layer protocol, and if so, 2051 * then send this packet up the control stream. 2052 */ 2053 if (proto > 0x7fff) { 2054 goto inpkt_done; /* send it up the control stream */ 2055 } 2056 /* 2057 * Try to grab the destination upper stream from the network-layer 2058 * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057) 2059 * protocol types. Otherwise, if the type is not known to the cache, 2060 * or if its sap can't be matched with any of the upper streams, then 2061 * send this packet up the control stream so that it can be rejected. 2062 */ 2063 if (proto == PPP_IP) { 2064 destsps = ppa->ppa_ip_cache; 2065 } else if (proto == PPP_IPV6) { 2066 destsps = ppa->ppa_ip6_cache; 2067 } 2068 /* 2069 * Toss this one away up the control stream if there's no matching sap; 2070 * this way the protocol can be rejected (destsps is NULL). 2071 */ 2072 2073 inpkt_done: 2074 /* 2075 * Only time-stamp the packet with hrtime if the upper stream 2076 * is configured to do so. PPP control (negotiation) messages 2077 * are never considered link activity; only data is activity. 2078 */ 2079 if (destsps != NULL && IS_PPA_TIMESTAMP(ppa)) { 2080 ppa->ppa_lastrx = gethrtime(); 2081 } 2082 /* 2083 * Should there be any promiscuous stream(s), send the data up for 2084 * each promiscuous stream that we recognize. We skip the control 2085 * stream as we obviously never allow the control stream to become 2086 * promiscous and bind to PPP_ALLSAP. 2087 */ 2088 rw_enter(&ppa->ppa_sib_lock, RW_READER); 2089 is_promisc = ppa->ppa_promicnt; 2090 if (is_promisc) { 2091 ASSERT(ppa->ppa_streams != NULL); 2092 sppp_dlprsendup(ppa->ppa_streams, mp, proto, B_TRUE); 2093 } 2094 rw_exit(&ppa->ppa_sib_lock); 2095 return (destsps); 2096 } 2097 2098 /* 2099 * sppp_kstat_update() 2100 * 2101 * Description: 2102 * Update per-ppa kstat interface statistics. 2103 */ 2104 static int 2105 sppp_kstat_update(kstat_t *ksp, int rw) 2106 { 2107 register sppa_t *ppa; 2108 register sppp_kstats_t *pppkp; 2109 register struct pppstat64 *sp; 2110 2111 if (rw == KSTAT_WRITE) { 2112 return (EACCES); 2113 } 2114 2115 ppa = (sppa_t *)ksp->ks_private; 2116 ASSERT(ppa != NULL); 2117 2118 pppkp = (sppp_kstats_t *)ksp->ks_data; 2119 sp = &ppa->ppa_stats.p; 2120 2121 mutex_enter(&ppa->ppa_sta_lock); 2122 pppkp->allocbfail.value.ui32 = ppa->ppa_allocbfail; 2123 pppkp->mctlsfwd.value.ui32 = ppa->ppa_mctlsfwd; 2124 pppkp->mctlsfwderr.value.ui32 = ppa->ppa_mctlsfwderr; 2125 pppkp->rbytes.value.ui32 = sp->ppp_ibytes; 2126 pppkp->rbytes64.value.ui64 = sp->ppp_ibytes; 2127 pppkp->ierrors.value.ui32 = sp->ppp_ierrors; 2128 pppkp->ierrors_lower.value.ui32 = ppa->ppa_ierr_low; 2129 pppkp->ioctlsfwd.value.ui32 = ppa->ppa_ioctlsfwd; 2130 pppkp->ioctlsfwdok.value.ui32 = ppa->ppa_ioctlsfwdok; 2131 pppkp->ioctlsfwderr.value.ui32 = ppa->ppa_ioctlsfwderr; 2132 pppkp->ipackets.value.ui32 = sp->ppp_ipackets; 2133 pppkp->ipackets64.value.ui64 = sp->ppp_ipackets; 2134 pppkp->ipackets_ctl.value.ui32 = ppa->ppa_ipkt_ctl; 2135 pppkp->iqdropped.value.ui32 = ppa->ppa_iqdropped; 2136 pppkp->irunts.value.ui32 = ppa->ppa_irunts; 2137 pppkp->itoolongs.value.ui32 = ppa->ppa_itoolongs; 2138 pppkp->lsneedup.value.ui32 = ppa->ppa_lsneedup; 2139 pppkp->lsdown.value.ui32 = ppa->ppa_lsdown; 2140 pppkp->mctlsknown.value.ui32 = ppa->ppa_mctlsknown; 2141 pppkp->mctlsunknown.value.ui32 = ppa->ppa_mctlsunknown; 2142 pppkp->obytes.value.ui32 = sp->ppp_obytes; 2143 pppkp->obytes64.value.ui64 = sp->ppp_obytes; 2144 pppkp->oerrors.value.ui32 = sp->ppp_oerrors; 2145 pppkp->oerrors_lower.value.ui32 = ppa->ppa_oerr_low; 2146 pppkp->opackets.value.ui32 = sp->ppp_opackets; 2147 pppkp->opackets64.value.ui64 = sp->ppp_opackets; 2148 pppkp->opackets_ctl.value.ui32 = ppa->ppa_opkt_ctl; 2149 pppkp->oqdropped.value.ui32 = ppa->ppa_oqdropped; 2150 pppkp->otoolongs.value.ui32 = ppa->ppa_otoolongs; 2151 pppkp->orunts.value.ui32 = ppa->ppa_orunts; 2152 mutex_exit(&ppa->ppa_sta_lock); 2153 2154 return (0); 2155 } 2156 2157 /* 2158 * Turn off proto in ppa_npflag to indicate that 2159 * the corresponding network protocol has been plumbed. 2160 * Release proto packets that were being held in the control 2161 * queue in anticipation of this event. 2162 */ 2163 static void 2164 sppp_release_pkts(sppa_t *ppa, uint16_t proto) 2165 { 2166 uint32_t npflagpos = sppp_ppp2np(proto); 2167 int count; 2168 mblk_t *mp; 2169 uint16_t mp_proto; 2170 queue_t *q; 2171 spppstr_t *destsps; 2172 2173 ASSERT(ppa != NULL); 2174 2175 if (npflagpos == 0 || (ppa->ppa_npflag & (1 << npflagpos)) == 0) 2176 return; 2177 2178 mutex_enter(&ppa->ppa_npmutex); 2179 ppa->ppa_npflag &= ~(1 << npflagpos); 2180 count = ppa->ppa_holdpkts[npflagpos]; 2181 ppa->ppa_holdpkts[npflagpos] = 0; 2182 mutex_exit(&ppa->ppa_npmutex); 2183 2184 q = ppa->ppa_ctl->sps_rq; 2185 2186 while (count > 0) { 2187 mp = getq(q); 2188 ASSERT(mp != NULL); 2189 2190 mp_proto = PPP_PROTOCOL(mp->b_rptr); 2191 if (mp_proto != proto) { 2192 (void) putq(q, mp); 2193 continue; 2194 } 2195 count--; 2196 destsps = NULL; 2197 if (mp_proto == PPP_IP) { 2198 destsps = ppa->ppa_ip_cache; 2199 } else if (mp_proto == PPP_IPV6) { 2200 destsps = ppa->ppa_ip6_cache; 2201 } 2202 ASSERT(destsps != NULL); 2203 2204 if (IS_SPS_FASTPATH(destsps)) { 2205 mp->b_rptr += PPP_HDRLEN; 2206 } else { 2207 spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr; 2208 ASSERT(uqs != NULL); 2209 mp->b_rptr += PPP_HDRLEN; 2210 mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE); 2211 if (mp == NULL) { 2212 mutex_enter(&ppa->ppa_sta_lock); 2213 ppa->ppa_allocbfail++; 2214 mutex_exit(&ppa->ppa_sta_lock); 2215 /* mp already freed by sppp_dladdud */ 2216 continue; 2217 } 2218 } 2219 2220 if (canputnext(destsps->sps_rq)) { 2221 putnext(destsps->sps_rq, mp); 2222 } else { 2223 mutex_enter(&ppa->ppa_sta_lock); 2224 ppa->ppa_iqdropped++; 2225 mutex_exit(&ppa->ppa_sta_lock); 2226 freemsg(mp); 2227 continue; 2228 } 2229 } 2230 } 2231