1 /* 2 * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver 3 * 4 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 5 * Use is subject to license terms. 6 * 7 * Permission to use, copy, modify, and distribute this software and its 8 * documentation is hereby granted, provided that the above copyright 9 * notice appears in all copies. 10 * 11 * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF 12 * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 13 * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 14 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT. SUN SHALL NOT BE LIABLE FOR 15 * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR 16 * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES 17 * 18 * Copyright (c) 1994 The Australian National University. 19 * All rights reserved. 20 * 21 * Permission to use, copy, modify, and distribute this software and its 22 * documentation is hereby granted, provided that the above copyright 23 * notice appears in all copies. This software is provided without any 24 * warranty, express or implied. The Australian National University 25 * makes no representations about the suitability of this software for 26 * any purpose. 27 * 28 * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY 29 * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES 30 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF 31 * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY 32 * OF SUCH DAMAGE. 33 * 34 * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, 35 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 36 * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS 37 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO 38 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, 39 * OR MODIFICATIONS. 40 * 41 * This driver is derived from the original SVR4 STREAMS PPP driver 42 * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>. 43 * 44 * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code 45 * for improved performance and scalability. 46 */ 47 48 #define RCSID "$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $" 49 50 #include <sys/types.h> 51 #include <sys/debug.h> 52 #include <sys/param.h> 53 #include <sys/stat.h> 54 #include <sys/stream.h> 55 #include <sys/stropts.h> 56 #include <sys/sysmacros.h> 57 #include <sys/errno.h> 58 #include <sys/time.h> 59 #include <sys/cmn_err.h> 60 #include <sys/kmem.h> 61 #include <sys/conf.h> 62 #include <sys/dlpi.h> 63 #include <sys/ddi.h> 64 #include <sys/kstat.h> 65 #include <sys/strsun.h> 66 #include <sys/ethernet.h> 67 #include <sys/policy.h> 68 #include <sys/zone.h> 69 #include <net/ppp_defs.h> 70 #include <net/pppio.h> 71 #include "sppp.h" 72 #include "s_common.h" 73 74 /* 75 * This is used to tag official Solaris sources. Please do not define 76 * "INTERNAL_BUILD" when building this software outside of Sun Microsystems. 77 */ 78 #ifdef INTERNAL_BUILD 79 /* MODINFO is limited to 32 characters. */ 80 const char sppp_module_description[] = "PPP 4.0 mux"; 81 #else /* INTERNAL_BUILD */ 82 const char sppp_module_description[] = "ANU PPP mux"; 83 84 /* LINTED */ 85 static const char buildtime[] = "Built " __DATE__ " at " __TIME__ 86 #ifdef DEBUG 87 " DEBUG" 88 #endif 89 "\n"; 90 #endif /* INTERNAL_BUILD */ 91 92 static void sppp_inner_ioctl(queue_t *, mblk_t *); 93 static void sppp_outer_ioctl(queue_t *, mblk_t *); 94 static queue_t *sppp_send(queue_t *, mblk_t **, spppstr_t *); 95 static queue_t *sppp_recv(queue_t *, mblk_t **, spppstr_t *); 96 static void sppp_recv_nondata(queue_t *, mblk_t *, spppstr_t *); 97 static queue_t *sppp_outpkt(queue_t *, mblk_t **, int, spppstr_t *); 98 static spppstr_t *sppp_inpkt(queue_t *, mblk_t *, spppstr_t *); 99 static int sppp_kstat_update(kstat_t *, int); 100 static void sppp_release_pkts(sppa_t *, uint16_t); 101 102 /* 103 * sps_list contains the list of active per-stream instance state structures 104 * ordered on the minor device number (see sppp.h for details). All streams 105 * opened to this driver are threaded together in this list. 106 */ 107 static spppstr_t *sps_list = NULL; 108 /* 109 * ppa_list contains the list of active per-attachment instance state 110 * structures ordered on the ppa id number (see sppp.h for details). All of 111 * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together 112 * in this list. There is exactly one ppa structure for a given PPP interface, 113 * and multiple sps streams (upper streams) may share a ppa by performing 114 * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ). 115 */ 116 static sppa_t *ppa_list = NULL; 117 118 static const char *kstats_names[] = { SPPP_KSTATS_NAMES }; 119 static const char *kstats64_names[] = { SPPP_KSTATS64_NAMES }; 120 121 /* 122 * map proto (which is an IANA defined ppp network protocol) to 123 * a bit position indicated by NP_* in ppa_npflag 124 */ 125 static uint32_t 126 sppp_ppp2np(uint16_t proto) 127 { 128 switch (proto) { 129 case PPP_IP: 130 return (NP_IP); 131 case PPP_IPV6: 132 return (NP_IPV6); 133 default: 134 return (0); 135 } 136 } 137 138 /* 139 * sppp_open() 140 * 141 * MT-Perimeters: 142 * exclusive inner, exclusive outer. 143 * 144 * Description: 145 * Common open procedure for module. 146 */ 147 /* ARGSUSED */ 148 int 149 sppp_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp) 150 { 151 spppstr_t *sps; 152 spppstr_t **nextmn; 153 minor_t mn; 154 155 ASSERT(q != NULL && devp != NULL); 156 ASSERT(sflag != MODOPEN); 157 158 if (q->q_ptr != NULL) { 159 return (0); /* already open */ 160 } 161 if (sflag != CLONEOPEN) { 162 return (OPENFAIL); 163 } 164 /* 165 * The sps list is sorted using the minor number as the key. The 166 * following code walks the list to find the lowest valued minor 167 * number available to be used. 168 */ 169 mn = 0; 170 for (nextmn = &sps_list; (sps = *nextmn) != NULL; 171 nextmn = &sps->sps_nextmn) { 172 if (sps->sps_mn_id != mn) { 173 break; 174 } 175 ++mn; 176 } 177 sps = (spppstr_t *)kmem_zalloc(sizeof (spppstr_t), KM_SLEEP); 178 ASSERT(sps != NULL); /* KM_SLEEP must never return NULL */ 179 sps->sps_nextmn = *nextmn; /* insert stream in global list */ 180 *nextmn = sps; 181 sps->sps_mn_id = mn; /* save minor id for this stream */ 182 sps->sps_rq = q; /* save read queue pointer */ 183 sps->sps_sap = -1; /* no sap bound to stream */ 184 sps->sps_dlstate = DL_UNATTACHED; /* dlpi state is unattached */ 185 sps->sps_npmode = NPMODE_DROP; /* drop all packets initially */ 186 sps->sps_zoneid = crgetzoneid(credp); 187 q->q_ptr = WR(q)->q_ptr = (caddr_t)sps; 188 /* 189 * We explicitly disable the automatic queue scheduling for the 190 * write-side to obtain complete control over queuing during transmit. 191 * Packets will be queued at the upper write queue and the service 192 * routine will not be called until it gets scheduled by having the 193 * lower write service routine call the qenable(WR(uq)) for all streams 194 * attached to the same ppa instance. 195 */ 196 noenable(WR(q)); 197 *devp = makedevice(getmajor(*devp), mn); 198 qprocson(q); 199 return (0); 200 } 201 202 /* 203 * Free storage used by a PPA. This is not called until the last PPA 204 * user closes his connection or reattaches to a different PPA. 205 */ 206 static void 207 sppp_free_ppa(sppa_t *ppa) 208 { 209 sppa_t **nextppa; 210 211 ASSERT(ppa->ppa_refcnt == 1); 212 if (ppa->ppa_kstats != NULL) { 213 kstat_delete(ppa->ppa_kstats); 214 ppa->ppa_kstats = NULL; 215 } 216 mutex_destroy(&ppa->ppa_sta_lock); 217 mutex_destroy(&ppa->ppa_npmutex); 218 rw_destroy(&ppa->ppa_sib_lock); 219 nextppa = &ppa_list; 220 while (*nextppa != NULL) { 221 if (*nextppa == ppa) { 222 *nextppa = ppa->ppa_nextppa; 223 break; 224 } 225 nextppa = &(*nextppa)->ppa_nextppa; 226 } 227 kmem_free(ppa, sizeof (*ppa)); 228 } 229 230 /* 231 * Create a new PPA. Caller must be exclusive on outer perimeter. 232 */ 233 sppa_t * 234 sppp_create_ppa(uint32_t ppa_id, zoneid_t zoneid) 235 { 236 sppa_t *ppa; 237 sppa_t *curppa; 238 sppa_t **availppa; 239 char unit[32]; /* Unit name */ 240 const char **cpp; 241 kstat_t *ksp; 242 kstat_named_t *knt; 243 244 /* 245 * NOTE: unit *must* be named for the driver 246 * name plus the ppa number so that netstat 247 * can find the statistics. 248 */ 249 (void) sprintf(unit, "%s" "%d", PPP_DRV_NAME, ppa_id); 250 /* 251 * Make sure we can allocate a buffer to 252 * contain the ppa to be sent upstream, as 253 * well as the actual ppa structure and its 254 * associated kstat structure. 255 */ 256 ppa = (sppa_t *)kmem_zalloc(sizeof (sppa_t), 257 KM_NOSLEEP); 258 ksp = kstat_create(PPP_DRV_NAME, ppa_id, unit, "net", KSTAT_TYPE_NAMED, 259 sizeof (sppp_kstats_t) / sizeof (kstat_named_t), 0); 260 261 if (ppa == NULL || ksp == NULL) { 262 if (ppa != NULL) { 263 kmem_free(ppa, sizeof (sppa_t)); 264 } 265 if (ksp != NULL) { 266 kstat_delete(ksp); 267 } 268 return (NULL); 269 } 270 ppa->ppa_kstats = ksp; /* chain kstat structure */ 271 ppa->ppa_ppa_id = ppa_id; /* record ppa id */ 272 ppa->ppa_zoneid = zoneid; /* zone that owns this PPA */ 273 ppa->ppa_mtu = PPP_MAXMTU; /* 65535-(PPP_HDRLEN+PPP_FCSLEN) */ 274 ppa->ppa_mru = PPP_MAXMRU; /* 65000 */ 275 276 mutex_init(&ppa->ppa_sta_lock, NULL, MUTEX_DRIVER, NULL); 277 mutex_init(&ppa->ppa_npmutex, NULL, MUTEX_DRIVER, NULL); 278 rw_init(&ppa->ppa_sib_lock, NULL, RW_DRIVER, NULL); 279 280 /* 281 * Prepare and install kstat counters. Note that for netstat 282 * -i to work, there needs to be "ipackets", "opackets", 283 * "ierrors", and "oerrors" kstat named variables. 284 */ 285 knt = (kstat_named_t *)ksp->ks_data; 286 for (cpp = kstats_names; cpp < kstats_names + Dim(kstats_names); 287 cpp++) { 288 kstat_named_init(knt, *cpp, KSTAT_DATA_UINT32); 289 knt++; 290 } 291 for (cpp = kstats64_names; cpp < kstats64_names + Dim(kstats64_names); 292 cpp++) { 293 kstat_named_init(knt, *cpp, KSTAT_DATA_UINT64); 294 knt++; 295 } 296 ksp->ks_update = sppp_kstat_update; 297 ksp->ks_private = (void *)ppa; 298 kstat_install(ksp); 299 300 /* link to the next ppa and insert into global list */ 301 availppa = &ppa_list; 302 while ((curppa = *availppa) != NULL) { 303 if (ppa_id < curppa->ppa_ppa_id) 304 break; 305 availppa = &curppa->ppa_nextppa; 306 } 307 ppa->ppa_nextppa = *availppa; 308 *availppa = ppa; 309 return (ppa); 310 } 311 312 /* 313 * sppp_close() 314 * 315 * MT-Perimeters: 316 * exclusive inner, exclusive outer. 317 * 318 * Description: 319 * Common close procedure for module. 320 */ 321 int 322 sppp_close(queue_t *q) 323 { 324 spppstr_t *sps; 325 spppstr_t **nextmn; 326 spppstr_t *sib; 327 sppa_t *ppa; 328 mblk_t *mp; 329 330 ASSERT(q != NULL && q->q_ptr != NULL); 331 sps = (spppstr_t *)q->q_ptr; 332 qprocsoff(q); 333 334 ppa = sps->sps_ppa; 335 if (ppa == NULL) { 336 ASSERT(!IS_SPS_CONTROL(sps)); 337 goto close_unattached; 338 } 339 if (IS_SPS_CONTROL(sps)) { 340 uint32_t cnt = 0; 341 342 ASSERT(ppa != NULL); 343 ASSERT(ppa->ppa_ctl == sps); 344 ppa->ppa_ctl = NULL; 345 /* 346 * STREAMS framework always issues I_UNLINK prior to close, 347 * since we only allow I_LINK under the control stream. 348 * A given ppa structure has at most one lower stream pointed 349 * by the ppa_lower_wq field, because we only allow a single 350 * linkage (I_LINK) to be done on the control stream. 351 */ 352 ASSERT(ppa->ppa_lower_wq == NULL); 353 /* 354 * Walk through all of sibling streams attached to this ppa, 355 * and remove all references to this ppa. We have exclusive 356 * access for the entire driver here, so there's no need 357 * to hold ppa_sib_lock. 358 */ 359 cnt++; 360 sib = ppa->ppa_streams; 361 while (sib != NULL) { 362 ASSERT(ppa == sib->sps_ppa); 363 sib->sps_npmode = NPMODE_DROP; 364 sib->sps_flags &= ~(SPS_PIOATTACH | SPS_CACHED); 365 /* 366 * There should be a preallocated hangup 367 * message here. Fetch it and send it up to 368 * the stream head. This will cause IP to 369 * mark the interface as "down." 370 */ 371 if ((mp = sib->sps_hangup) != NULL) { 372 sib->sps_hangup = NULL; 373 /* 374 * M_HANGUP works with IP, but snoop 375 * is lame and requires M_ERROR. Send 376 * up a clean error code instead. 377 * 378 * XXX if snoop is fixed, fix this, too. 379 */ 380 MTYPE(mp) = M_ERROR; 381 *mp->b_wptr++ = ENXIO; 382 putnext(sib->sps_rq, mp); 383 } 384 qenable(WR(sib->sps_rq)); 385 cnt++; 386 sib = sib->sps_nextsib; 387 } 388 ASSERT(ppa->ppa_refcnt == cnt); 389 } else { 390 ASSERT(ppa->ppa_streams != NULL); 391 ASSERT(ppa->ppa_ctl != sps); 392 mp = NULL; 393 if (sps->sps_sap == PPP_IP) { 394 ppa->ppa_ip_cache = NULL; 395 mp = create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND); 396 } else if (sps->sps_sap == PPP_IPV6) { 397 ppa->ppa_ip6_cache = NULL; 398 mp = create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND); 399 } 400 /* Tell the daemon the bad news. */ 401 if (mp != NULL && ppa->ppa_ctl != NULL && 402 (sps->sps_npmode == NPMODE_PASS || 403 sps->sps_npmode == NPMODE_QUEUE)) { 404 putnext(ppa->ppa_ctl->sps_rq, mp); 405 } else { 406 freemsg(mp); 407 } 408 /* 409 * Walk through all of sibling streams attached to the 410 * same ppa, and remove this stream from the sibling 411 * streams list. We have exclusive access for the 412 * entire driver here, so there's no need to hold 413 * ppa_sib_lock. 414 */ 415 sib = ppa->ppa_streams; 416 if (sib == sps) { 417 ppa->ppa_streams = sps->sps_nextsib; 418 } else { 419 while (sib->sps_nextsib != NULL) { 420 if (sib->sps_nextsib == sps) { 421 sib->sps_nextsib = sps->sps_nextsib; 422 break; 423 } 424 sib = sib->sps_nextsib; 425 } 426 } 427 sps->sps_nextsib = NULL; 428 freemsg(sps->sps_hangup); 429 sps->sps_hangup = NULL; 430 /* 431 * Check if this is a promiscous stream. If the SPS_PROMISC bit 432 * is still set, it means that the stream is closed without 433 * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ. 434 * In this case, we simply decrement the promiscous counter, 435 * and it's safe to do it without holding ppa_sib_lock since 436 * we're exclusive (inner and outer) at this point. 437 */ 438 if (IS_SPS_PROMISC(sps)) { 439 ASSERT(ppa->ppa_promicnt > 0); 440 ppa->ppa_promicnt--; 441 } 442 } 443 /* If we're the only one left, then delete now. */ 444 if (ppa->ppa_refcnt <= 1) 445 sppp_free_ppa(ppa); 446 else 447 ppa->ppa_refcnt--; 448 close_unattached: 449 q->q_ptr = WR(q)->q_ptr = NULL; 450 for (nextmn = &sps_list; *nextmn != NULL; 451 nextmn = &(*nextmn)->sps_nextmn) { 452 if (*nextmn == sps) { 453 *nextmn = sps->sps_nextmn; 454 break; 455 } 456 } 457 kmem_free(sps, sizeof (spppstr_t)); 458 return (0); 459 } 460 461 static void 462 sppp_ioctl(struct queue *q, mblk_t *mp) 463 { 464 spppstr_t *sps; 465 spppstr_t *nextsib; 466 sppa_t *ppa; 467 struct iocblk *iop; 468 mblk_t *nmp; 469 enum NPmode npmode; 470 struct ppp_idle *pip; 471 struct ppp_stats64 *psp; 472 struct ppp_comp_stats *pcsp; 473 hrtime_t hrtime; 474 int sap; 475 int count = 0; 476 int error = EINVAL; 477 478 sps = (spppstr_t *)q->q_ptr; 479 ppa = sps->sps_ppa; 480 481 iop = (struct iocblk *)mp->b_rptr; 482 switch (iop->ioc_cmd) { 483 case PPPIO_NPMODE: 484 if (!IS_SPS_CONTROL(sps)) { 485 break; /* return EINVAL */ 486 } else if (iop->ioc_count != 2 * sizeof (uint32_t) || 487 (mp->b_cont == NULL)) { 488 error = EPROTO; 489 break; 490 } 491 ASSERT(ppa != NULL); 492 ASSERT(mp->b_cont->b_rptr != NULL); 493 ASSERT(sps->sps_npmode == NPMODE_PASS); 494 sap = ((uint32_t *)mp->b_cont->b_rptr)[0]; 495 npmode = (enum NPmode)((uint32_t *)mp->b_cont->b_rptr)[1]; 496 /* 497 * Walk the sibling streams which belong to the same 498 * ppa, and try to find a stream with matching sap 499 * number. 500 */ 501 rw_enter(&ppa->ppa_sib_lock, RW_WRITER); 502 for (nextsib = ppa->ppa_streams; nextsib != NULL; 503 nextsib = nextsib->sps_nextsib) { 504 if (nextsib->sps_sap == sap) { 505 break; /* found it */ 506 } 507 } 508 if (nextsib == NULL) { 509 rw_exit(&ppa->ppa_sib_lock); 510 break; /* return EINVAL */ 511 } else { 512 nextsib->sps_npmode = npmode; 513 if ((nextsib->sps_npmode != NPMODE_QUEUE) && 514 (WR(nextsib->sps_rq)->q_first != NULL)) { 515 qenable(WR(nextsib->sps_rq)); 516 } 517 } 518 rw_exit(&ppa->ppa_sib_lock); 519 error = 0; /* return success */ 520 break; 521 case PPPIO_GIDLE: 522 if (ppa == NULL) { 523 ASSERT(!IS_SPS_CONTROL(sps)); 524 error = ENOLINK; 525 break; 526 } else if (!IS_PPA_TIMESTAMP(ppa)) { 527 break; /* return EINVAL */ 528 } 529 if ((nmp = allocb(sizeof (struct ppp_idle), 530 BPRI_MED)) == NULL) { 531 mutex_enter(&ppa->ppa_sta_lock); 532 ppa->ppa_allocbfail++; 533 mutex_exit(&ppa->ppa_sta_lock); 534 error = ENOSR; 535 break; 536 } 537 if (mp->b_cont != NULL) { 538 freemsg(mp->b_cont); 539 } 540 mp->b_cont = nmp; 541 pip = (struct ppp_idle *)nmp->b_wptr; 542 nmp->b_wptr += sizeof (struct ppp_idle); 543 /* 544 * Get current timestamp and subtract the tx and rx 545 * timestamps to get the actual idle time to be 546 * returned. 547 */ 548 hrtime = gethrtime(); 549 pip->xmit_idle = (hrtime - ppa->ppa_lasttx) / 1000000000ul; 550 pip->recv_idle = (hrtime - ppa->ppa_lastrx) / 1000000000ul; 551 count = msgsize(nmp); 552 error = 0; 553 break; /* return success (error is 0) */ 554 case PPPIO_GTYPE: 555 nmp = allocb(sizeof (uint32_t), BPRI_MED); 556 if (nmp == NULL) { 557 error = ENOSR; 558 break; 559 } 560 if (mp->b_cont != NULL) { 561 freemsg(mp->b_cont); 562 } 563 mp->b_cont = nmp; 564 /* 565 * Let the requestor know that we are the PPP 566 * multiplexer (PPPTYP_MUX). 567 */ 568 *(uint32_t *)nmp->b_wptr = PPPTYP_MUX; 569 nmp->b_wptr += sizeof (uint32_t); 570 count = msgsize(nmp); 571 error = 0; /* return success */ 572 break; 573 case PPPIO_GETSTAT64: 574 if (ppa == NULL) { 575 break; /* return EINVAL */ 576 } else if ((ppa->ppa_lower_wq != NULL) && 577 !IS_PPA_LASTMOD(ppa)) { 578 mutex_enter(&ppa->ppa_sta_lock); 579 /* 580 * We match sps_ioc_id on the M_IOC{ACK,NAK}, 581 * so if the response hasn't come back yet, 582 * new ioctls must be queued instead. 583 */ 584 if (IS_SPS_IOCQ(sps)) { 585 mutex_exit(&ppa->ppa_sta_lock); 586 if (!putq(q, mp)) { 587 error = EAGAIN; 588 break; 589 } 590 return; 591 } else { 592 ppa->ppa_ioctlsfwd++; 593 /* 594 * Record the ioctl CMD & ID - this will be 595 * used to check the ACK or NAK responses 596 * coming from below. 597 */ 598 sps->sps_ioc_id = iop->ioc_id; 599 sps->sps_flags |= SPS_IOCQ; 600 mutex_exit(&ppa->ppa_sta_lock); 601 } 602 putnext(ppa->ppa_lower_wq, mp); 603 return; /* don't ack or nak the request */ 604 } 605 nmp = allocb(sizeof (*psp), BPRI_MED); 606 if (nmp == NULL) { 607 mutex_enter(&ppa->ppa_sta_lock); 608 ppa->ppa_allocbfail++; 609 mutex_exit(&ppa->ppa_sta_lock); 610 error = ENOSR; 611 break; 612 } 613 if (mp->b_cont != NULL) { 614 freemsg(mp->b_cont); 615 } 616 mp->b_cont = nmp; 617 psp = (struct ppp_stats64 *)nmp->b_wptr; 618 /* 619 * Copy the contents of ppp_stats64 structure for this 620 * ppa and return them to the caller. 621 */ 622 mutex_enter(&ppa->ppa_sta_lock); 623 bcopy(&ppa->ppa_stats, psp, sizeof (*psp)); 624 mutex_exit(&ppa->ppa_sta_lock); 625 nmp->b_wptr += sizeof (*psp); 626 count = sizeof (*psp); 627 error = 0; /* return success */ 628 break; 629 case PPPIO_GETCSTAT: 630 if (ppa == NULL) { 631 break; /* return EINVAL */ 632 } else if ((ppa->ppa_lower_wq != NULL) && 633 !IS_PPA_LASTMOD(ppa)) { 634 mutex_enter(&ppa->ppa_sta_lock); 635 /* 636 * See comments in PPPIO_GETSTAT64 case 637 * in sppp_ioctl(). 638 */ 639 if (IS_SPS_IOCQ(sps)) { 640 mutex_exit(&ppa->ppa_sta_lock); 641 if (!putq(q, mp)) { 642 error = EAGAIN; 643 break; 644 } 645 return; 646 } else { 647 ppa->ppa_ioctlsfwd++; 648 /* 649 * Record the ioctl CMD & ID - this will be 650 * used to check the ACK or NAK responses 651 * coming from below. 652 */ 653 sps->sps_ioc_id = iop->ioc_id; 654 sps->sps_flags |= SPS_IOCQ; 655 mutex_exit(&ppa->ppa_sta_lock); 656 } 657 putnext(ppa->ppa_lower_wq, mp); 658 return; /* don't ack or nak the request */ 659 } 660 nmp = allocb(sizeof (struct ppp_comp_stats), BPRI_MED); 661 if (nmp == NULL) { 662 mutex_enter(&ppa->ppa_sta_lock); 663 ppa->ppa_allocbfail++; 664 mutex_exit(&ppa->ppa_sta_lock); 665 error = ENOSR; 666 break; 667 } 668 if (mp->b_cont != NULL) { 669 freemsg(mp->b_cont); 670 } 671 mp->b_cont = nmp; 672 pcsp = (struct ppp_comp_stats *)nmp->b_wptr; 673 nmp->b_wptr += sizeof (struct ppp_comp_stats); 674 bzero((caddr_t)pcsp, sizeof (struct ppp_comp_stats)); 675 count = msgsize(nmp); 676 error = 0; /* return success */ 677 break; 678 } 679 680 if (error == 0) { 681 /* Success; tell the user. */ 682 miocack(q, mp, count, 0); 683 } else { 684 /* Failure; send error back upstream. */ 685 miocnak(q, mp, 0, error); 686 } 687 } 688 689 /* 690 * sppp_uwput() 691 * 692 * MT-Perimeters: 693 * shared inner, shared outer. 694 * 695 * Description: 696 * Upper write-side put procedure. Messages from above arrive here. 697 */ 698 void 699 sppp_uwput(queue_t *q, mblk_t *mp) 700 { 701 queue_t *nextq; 702 spppstr_t *sps; 703 sppa_t *ppa; 704 struct iocblk *iop; 705 int error; 706 707 ASSERT(q != NULL && q->q_ptr != NULL); 708 ASSERT(mp != NULL && mp->b_rptr != NULL); 709 sps = (spppstr_t *)q->q_ptr; 710 ppa = sps->sps_ppa; 711 712 switch (MTYPE(mp)) { 713 case M_PCPROTO: 714 case M_PROTO: 715 if (IS_SPS_CONTROL(sps)) { 716 ASSERT(ppa != NULL); 717 /* 718 * Intentionally change this to a high priority 719 * message so it doesn't get queued up. M_PROTO is 720 * specifically used for signalling between pppd and its 721 * kernel-level component(s), such as ppptun, so we 722 * make sure that it doesn't get queued up behind 723 * data messages. 724 */ 725 MTYPE(mp) = M_PCPROTO; 726 if ((ppa->ppa_lower_wq != NULL) && 727 canputnext(ppa->ppa_lower_wq)) { 728 mutex_enter(&ppa->ppa_sta_lock); 729 ppa->ppa_mctlsfwd++; 730 mutex_exit(&ppa->ppa_sta_lock); 731 putnext(ppa->ppa_lower_wq, mp); 732 } else { 733 mutex_enter(&ppa->ppa_sta_lock); 734 ppa->ppa_mctlsfwderr++; 735 mutex_exit(&ppa->ppa_sta_lock); 736 freemsg(mp); 737 } 738 } else { 739 (void) sppp_mproto(q, mp, sps); 740 return; 741 } 742 break; 743 case M_DATA: 744 if ((nextq = sppp_send(q, &mp, sps)) != NULL) 745 putnext(nextq, mp); 746 break; 747 case M_IOCTL: 748 error = EINVAL; 749 iop = (struct iocblk *)mp->b_rptr; 750 switch (iop->ioc_cmd) { 751 case DLIOCRAW: 752 case DL_IOC_HDR_INFO: 753 case PPPIO_ATTACH: 754 case PPPIO_DEBUG: 755 case PPPIO_DETACH: 756 case PPPIO_LASTMOD: 757 case PPPIO_MRU: 758 case PPPIO_MTU: 759 case PPPIO_USETIMESTAMP: 760 case PPPIO_BLOCKNP: 761 case PPPIO_UNBLOCKNP: 762 qwriter(q, mp, sppp_inner_ioctl, PERIM_INNER); 763 return; 764 case I_LINK: 765 case I_UNLINK: 766 case PPPIO_NEWPPA: 767 qwriter(q, mp, sppp_outer_ioctl, PERIM_OUTER); 768 return; 769 case PPPIO_NPMODE: 770 case PPPIO_GIDLE: 771 case PPPIO_GTYPE: 772 case PPPIO_GETSTAT64: 773 case PPPIO_GETCSTAT: 774 /* 775 * These require additional auto variables to 776 * handle, so (for optimization reasons) 777 * they're moved off to a separate function. 778 */ 779 sppp_ioctl(q, mp); 780 return; 781 case PPPIO_GETSTAT: 782 break; /* 32 bit interface gone */ 783 default: 784 if (iop->ioc_cr == NULL || 785 secpolicy_ppp_config(iop->ioc_cr) != 0) { 786 error = EPERM; 787 break; 788 } else if ((ppa == NULL) || 789 (ppa->ppa_lower_wq == NULL)) { 790 break; /* return EINVAL */ 791 } 792 mutex_enter(&ppa->ppa_sta_lock); 793 /* 794 * See comments in PPPIO_GETSTAT64 case 795 * in sppp_ioctl(). 796 */ 797 if (IS_SPS_IOCQ(sps)) { 798 mutex_exit(&ppa->ppa_sta_lock); 799 if (!putq(q, mp)) { 800 error = EAGAIN; 801 break; 802 } 803 return; 804 } else { 805 ppa->ppa_ioctlsfwd++; 806 /* 807 * Record the ioctl CMD & ID - 808 * this will be used to check the 809 * ACK or NAK responses coming from below. 810 */ 811 sps->sps_ioc_id = iop->ioc_id; 812 sps->sps_flags |= SPS_IOCQ; 813 mutex_exit(&ppa->ppa_sta_lock); 814 } 815 putnext(ppa->ppa_lower_wq, mp); 816 return; /* don't ack or nak the request */ 817 } 818 /* Failure; send error back upstream. */ 819 miocnak(q, mp, 0, error); 820 break; 821 case M_FLUSH: 822 if (*mp->b_rptr & FLUSHW) { 823 flushq(q, FLUSHDATA); 824 } 825 if (*mp->b_rptr & FLUSHR) { 826 *mp->b_rptr &= ~FLUSHW; 827 qreply(q, mp); 828 } else { 829 freemsg(mp); 830 } 831 break; 832 default: 833 freemsg(mp); 834 break; 835 } 836 } 837 838 /* 839 * sppp_uwsrv() 840 * 841 * MT-Perimeters: 842 * exclusive inner, shared outer. 843 * 844 * Description: 845 * Upper write-side service procedure. Note that this procedure does 846 * not get called when a message is placed on our write-side queue, since 847 * automatic queue scheduling has been turned off by noenable() when 848 * the queue was opened. We do this on purpose, as we explicitly control 849 * the write-side queue. Therefore, this procedure gets called when 850 * the lower write service procedure qenable() the upper write stream queue. 851 */ 852 void 853 sppp_uwsrv(queue_t *q) 854 { 855 spppstr_t *sps; 856 sppa_t *ppa; 857 mblk_t *mp; 858 queue_t *nextq; 859 struct iocblk *iop; 860 861 ASSERT(q != NULL && q->q_ptr != NULL); 862 sps = (spppstr_t *)q->q_ptr; 863 864 while ((mp = getq(q)) != NULL) { 865 if (MTYPE(mp) == M_IOCTL) { 866 ppa = sps->sps_ppa; 867 if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) { 868 miocnak(q, mp, 0, EINVAL); 869 continue; 870 } 871 872 iop = (struct iocblk *)mp->b_rptr; 873 mutex_enter(&ppa->ppa_sta_lock); 874 /* 875 * See comments in PPPIO_GETSTAT64 case 876 * in sppp_ioctl(). 877 */ 878 if (IS_SPS_IOCQ(sps)) { 879 mutex_exit(&ppa->ppa_sta_lock); 880 if (putbq(q, mp) == 0) 881 miocnak(q, mp, 0, EAGAIN); 882 break; 883 } else { 884 ppa->ppa_ioctlsfwd++; 885 sps->sps_ioc_id = iop->ioc_id; 886 sps->sps_flags |= SPS_IOCQ; 887 mutex_exit(&ppa->ppa_sta_lock); 888 putnext(ppa->ppa_lower_wq, mp); 889 } 890 } else if ((nextq = 891 sppp_outpkt(q, &mp, msgdsize(mp), sps)) == NULL) { 892 if (mp != NULL) { 893 if (putbq(q, mp) == 0) 894 freemsg(mp); 895 break; 896 } 897 } else { 898 putnext(nextq, mp); 899 } 900 } 901 } 902 903 void 904 sppp_remove_ppa(spppstr_t *sps) 905 { 906 spppstr_t *nextsib; 907 sppa_t *ppa = sps->sps_ppa; 908 909 rw_enter(&ppa->ppa_sib_lock, RW_WRITER); 910 if (ppa->ppa_refcnt <= 1) { 911 rw_exit(&ppa->ppa_sib_lock); 912 sppp_free_ppa(ppa); 913 } else { 914 nextsib = ppa->ppa_streams; 915 if (nextsib == sps) { 916 ppa->ppa_streams = sps->sps_nextsib; 917 } else { 918 while (nextsib->sps_nextsib != NULL) { 919 if (nextsib->sps_nextsib == sps) { 920 nextsib->sps_nextsib = 921 sps->sps_nextsib; 922 break; 923 } 924 nextsib = nextsib->sps_nextsib; 925 } 926 } 927 ppa->ppa_refcnt--; 928 /* 929 * And if this stream was marked as promiscuous 930 * (SPS_PROMISC), then we need to update the 931 * promiscuous streams count. This should only happen 932 * when DL_DETACH_REQ is issued prior to marking the 933 * stream as non-promiscuous, through 934 * DL_PROMISCOFF_REQ request. 935 */ 936 if (IS_SPS_PROMISC(sps)) { 937 ASSERT(ppa->ppa_promicnt > 0); 938 ppa->ppa_promicnt--; 939 } 940 rw_exit(&ppa->ppa_sib_lock); 941 } 942 sps->sps_nextsib = NULL; 943 sps->sps_ppa = NULL; 944 freemsg(sps->sps_hangup); 945 sps->sps_hangup = NULL; 946 } 947 948 sppa_t * 949 sppp_find_ppa(uint32_t ppa_id) 950 { 951 sppa_t *ppa; 952 953 for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) { 954 if (ppa->ppa_ppa_id == ppa_id) { 955 break; /* found the ppa */ 956 } 957 } 958 return (ppa); 959 } 960 961 /* 962 * sppp_inner_ioctl() 963 * 964 * MT-Perimeters: 965 * exclusive inner, shared outer 966 * 967 * Description: 968 * Called by sppp_uwput as a result of receiving ioctls which require 969 * an exclusive access at the inner perimeter. 970 */ 971 static void 972 sppp_inner_ioctl(queue_t *q, mblk_t *mp) 973 { 974 spppstr_t *sps; 975 sppa_t *ppa; 976 struct iocblk *iop; 977 mblk_t *nmp; 978 int error = EINVAL; 979 int count = 0; 980 int dbgcmd; 981 int mru, mtu; 982 uint32_t ppa_id; 983 hrtime_t hrtime; 984 uint16_t proto; 985 986 ASSERT(q != NULL && q->q_ptr != NULL); 987 ASSERT(mp != NULL && mp->b_rptr != NULL); 988 989 sps = (spppstr_t *)q->q_ptr; 990 ppa = sps->sps_ppa; 991 iop = (struct iocblk *)mp->b_rptr; 992 switch (iop->ioc_cmd) { 993 case DLIOCRAW: 994 if (IS_SPS_CONTROL(sps)) { 995 break; /* return EINVAL */ 996 } 997 sps->sps_flags |= SPS_RAWDATA; 998 error = 0; /* return success */ 999 break; 1000 case DL_IOC_HDR_INFO: 1001 if (IS_SPS_CONTROL(sps)) { 1002 break; /* return EINVAL */ 1003 } else if ((mp->b_cont == NULL) || 1004 *((t_uscalar_t *)mp->b_cont->b_rptr) != DL_UNITDATA_REQ || 1005 (MBLKL(mp->b_cont) < (sizeof (dl_unitdata_req_t) + 1006 SPPP_ADDRL))) { 1007 error = EPROTO; 1008 break; 1009 } else if (ppa == NULL) { 1010 error = ENOLINK; 1011 break; 1012 } 1013 if ((nmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) { 1014 mutex_enter(&ppa->ppa_sta_lock); 1015 ppa->ppa_allocbfail++; 1016 mutex_exit(&ppa->ppa_sta_lock); 1017 error = ENOMEM; 1018 break; 1019 } 1020 *(uchar_t *)nmp->b_wptr++ = PPP_ALLSTATIONS; 1021 *(uchar_t *)nmp->b_wptr++ = PPP_UI; 1022 *(uchar_t *)nmp->b_wptr++ = sps->sps_sap >> 8; 1023 *(uchar_t *)nmp->b_wptr++ = sps->sps_sap & 0xff; 1024 ASSERT(MBLKL(nmp) == PPP_HDRLEN); 1025 1026 linkb(mp, nmp); 1027 sps->sps_flags |= SPS_FASTPATH; 1028 error = 0; /* return success */ 1029 count = msgsize(nmp); 1030 break; 1031 case PPPIO_ATTACH: 1032 if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) || 1033 (sps->sps_dlstate != DL_UNATTACHED) || 1034 (iop->ioc_count != sizeof (uint32_t))) { 1035 break; /* return EINVAL */ 1036 } else if (mp->b_cont == NULL) { 1037 error = EPROTO; 1038 break; 1039 } 1040 ASSERT(mp->b_cont->b_rptr != NULL); 1041 /* If there's something here, it's detached. */ 1042 if (ppa != NULL) { 1043 sppp_remove_ppa(sps); 1044 } 1045 ppa_id = *(uint32_t *)mp->b_cont->b_rptr; 1046 ppa = sppp_find_ppa(ppa_id); 1047 /* 1048 * If we can't find it, then it's either because the requestor 1049 * has supplied a wrong ppa_id to be attached to, or because 1050 * the control stream for the specified ppa_id has been closed 1051 * before we get here. 1052 */ 1053 if (ppa == NULL) { 1054 error = ENOENT; 1055 break; 1056 } 1057 if (iop->ioc_cr == NULL || 1058 ppa->ppa_zoneid != crgetzoneid(iop->ioc_cr)) { 1059 error = EPERM; 1060 break; 1061 } 1062 /* 1063 * Preallocate the hangup message so that we're always 1064 * able to send this upstream in the event of a 1065 * catastrophic failure. 1066 */ 1067 if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) { 1068 error = ENOSR; 1069 break; 1070 } 1071 /* 1072 * There are two ways to attach a stream to a ppa: one is 1073 * through DLPI (DL_ATTACH_REQ) and the other is through 1074 * PPPIO_ATTACH. This is why we need to distinguish whether or 1075 * not a stream was allocated via PPPIO_ATTACH, so that we can 1076 * properly detach it when we receive PPPIO_DETACH ioctl 1077 * request. 1078 */ 1079 sps->sps_flags |= SPS_PIOATTACH; 1080 sps->sps_ppa = ppa; 1081 /* 1082 * Add this stream to the head of the list of sibling streams 1083 * which belong to the same ppa as specified. 1084 */ 1085 rw_enter(&ppa->ppa_sib_lock, RW_WRITER); 1086 ppa->ppa_refcnt++; 1087 sps->sps_nextsib = ppa->ppa_streams; 1088 ppa->ppa_streams = sps; 1089 rw_exit(&ppa->ppa_sib_lock); 1090 error = 0; /* return success */ 1091 break; 1092 case PPPIO_BLOCKNP: 1093 case PPPIO_UNBLOCKNP: 1094 if (iop->ioc_cr == NULL || 1095 secpolicy_ppp_config(iop->ioc_cr) != 0) { 1096 error = EPERM; 1097 break; 1098 } 1099 error = miocpullup(mp, sizeof (uint16_t)); 1100 if (error != 0) 1101 break; 1102 ASSERT(mp->b_cont->b_rptr != NULL); 1103 proto = *(uint16_t *)mp->b_cont->b_rptr; 1104 if (iop->ioc_cmd == PPPIO_BLOCKNP) { 1105 uint32_t npflagpos = sppp_ppp2np(proto); 1106 /* 1107 * Mark proto as blocked in ppa_npflag until the 1108 * corresponding queues for proto have been plumbed. 1109 */ 1110 if (npflagpos != 0) { 1111 mutex_enter(&ppa->ppa_npmutex); 1112 ppa->ppa_npflag |= (1 << npflagpos); 1113 mutex_exit(&ppa->ppa_npmutex); 1114 } else { 1115 error = EINVAL; 1116 } 1117 } else { 1118 /* 1119 * reset ppa_npflag and release proto 1120 * packets that were being held in control queue. 1121 */ 1122 sppp_release_pkts(ppa, proto); 1123 } 1124 break; 1125 case PPPIO_DEBUG: 1126 if (iop->ioc_cr == NULL || 1127 secpolicy_ppp_config(iop->ioc_cr) != 0) { 1128 error = EPERM; 1129 break; 1130 } else if (iop->ioc_count != sizeof (uint32_t)) { 1131 break; /* return EINVAL */ 1132 } else if (mp->b_cont == NULL) { 1133 error = EPROTO; 1134 break; 1135 } 1136 ASSERT(mp->b_cont->b_rptr != NULL); 1137 dbgcmd = *(uint32_t *)mp->b_cont->b_rptr; 1138 /* 1139 * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication 1140 * that SPS_KDEBUG needs to be enabled for this upper stream. 1141 */ 1142 if (dbgcmd == PPPDBG_LOG + PPPDBG_DRIVER) { 1143 sps->sps_flags |= SPS_KDEBUG; 1144 error = 0; /* return success */ 1145 break; 1146 } 1147 /* 1148 * Otherwise, for any other values, we send them down only if 1149 * there is an attachment and if the attachment has something 1150 * linked underneath it. 1151 */ 1152 if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) { 1153 error = ENOLINK; 1154 break; 1155 } 1156 mutex_enter(&ppa->ppa_sta_lock); 1157 /* 1158 * See comments in PPPIO_GETSTAT64 case 1159 * in sppp_ioctl(). 1160 */ 1161 if (IS_SPS_IOCQ(sps)) { 1162 mutex_exit(&ppa->ppa_sta_lock); 1163 if (!putq(q, mp)) { 1164 error = EAGAIN; 1165 break; 1166 } 1167 return; 1168 } else { 1169 ppa->ppa_ioctlsfwd++; 1170 /* 1171 * Record the ioctl CMD & ID - 1172 * this will be used to check the 1173 * ACK or NAK responses coming from below. 1174 */ 1175 sps->sps_ioc_id = iop->ioc_id; 1176 sps->sps_flags |= SPS_IOCQ; 1177 mutex_exit(&ppa->ppa_sta_lock); 1178 } 1179 putnext(ppa->ppa_lower_wq, mp); 1180 return; /* don't ack or nak the request */ 1181 case PPPIO_DETACH: 1182 if (!IS_SPS_PIOATTACH(sps)) { 1183 break; /* return EINVAL */ 1184 } 1185 /* 1186 * The SPS_PIOATTACH flag set on the stream tells us that 1187 * the ppa field is still valid. In the event that the control 1188 * stream be closed prior to this stream's detachment, the 1189 * SPS_PIOATTACH flag would have been cleared from this stream 1190 * during close; in that case we won't get here. 1191 */ 1192 ASSERT(ppa != NULL); 1193 ASSERT(ppa->ppa_ctl != sps); 1194 ASSERT(sps->sps_dlstate == DL_UNATTACHED); 1195 1196 /* 1197 * We don't actually detach anything until the stream is 1198 * closed or reattached. 1199 */ 1200 1201 sps->sps_flags &= ~SPS_PIOATTACH; 1202 error = 0; /* return success */ 1203 break; 1204 case PPPIO_LASTMOD: 1205 if (!IS_SPS_CONTROL(sps)) { 1206 break; /* return EINVAL */ 1207 } 1208 ASSERT(ppa != NULL); 1209 ppa->ppa_flags |= PPA_LASTMOD; 1210 error = 0; /* return success */ 1211 break; 1212 case PPPIO_MRU: 1213 if (!IS_SPS_CONTROL(sps) || 1214 (iop->ioc_count != sizeof (uint32_t))) { 1215 break; /* return EINVAL */ 1216 } else if (mp->b_cont == NULL) { 1217 error = EPROTO; 1218 break; 1219 } 1220 ASSERT(ppa != NULL); 1221 ASSERT(mp->b_cont->b_rptr != NULL); 1222 mru = *(uint32_t *)mp->b_cont->b_rptr; 1223 if ((mru <= 0) || (mru > PPP_MAXMRU)) { 1224 error = EPROTO; 1225 break; 1226 } 1227 if (mru < PPP_MRU) { 1228 mru = PPP_MRU; 1229 } 1230 ppa->ppa_mru = (uint16_t)mru; 1231 /* 1232 * If there's something beneath this driver for the ppa, then 1233 * inform it (or them) of the MRU size. Only do this is we 1234 * are not the last PPP module on the stream. 1235 */ 1236 if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) { 1237 (void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MRU, 1238 mru); 1239 } 1240 error = 0; /* return success */ 1241 break; 1242 case PPPIO_MTU: 1243 if (!IS_SPS_CONTROL(sps) || 1244 (iop->ioc_count != sizeof (uint32_t))) { 1245 break; /* return EINVAL */ 1246 } else if (mp->b_cont == NULL) { 1247 error = EPROTO; 1248 break; 1249 } 1250 ASSERT(ppa != NULL); 1251 ASSERT(mp->b_cont->b_rptr != NULL); 1252 mtu = *(uint32_t *)mp->b_cont->b_rptr; 1253 if ((mtu <= 0) || (mtu > PPP_MAXMTU)) { 1254 error = EPROTO; 1255 break; 1256 } 1257 ppa->ppa_mtu = (uint16_t)mtu; 1258 /* 1259 * If there's something beneath this driver for the ppa, then 1260 * inform it (or them) of the MTU size. Only do this if we 1261 * are not the last PPP module on the stream. 1262 */ 1263 if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) { 1264 (void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MTU, 1265 mtu); 1266 } 1267 error = 0; /* return success */ 1268 break; 1269 case PPPIO_USETIMESTAMP: 1270 if (!IS_SPS_CONTROL(sps)) { 1271 break; /* return EINVAL */ 1272 } 1273 if (!IS_PPA_TIMESTAMP(ppa)) { 1274 hrtime = gethrtime(); 1275 ppa->ppa_lasttx = ppa->ppa_lastrx = hrtime; 1276 ppa->ppa_flags |= PPA_TIMESTAMP; 1277 } 1278 error = 0; 1279 break; 1280 } 1281 1282 if (error == 0) { 1283 /* Success; tell the user */ 1284 miocack(q, mp, count, 0); 1285 } else { 1286 /* Failure; send error back upstream */ 1287 miocnak(q, mp, 0, error); 1288 } 1289 } 1290 1291 /* 1292 * sppp_outer_ioctl() 1293 * 1294 * MT-Perimeters: 1295 * exclusive inner, exclusive outer 1296 * 1297 * Description: 1298 * Called by sppp_uwput as a result of receiving ioctls which require 1299 * an exclusive access at the outer perimeter. 1300 */ 1301 static void 1302 sppp_outer_ioctl(queue_t *q, mblk_t *mp) 1303 { 1304 spppstr_t *sps = q->q_ptr; 1305 spppstr_t *nextsib; 1306 queue_t *lwq; 1307 sppa_t *ppa; 1308 struct iocblk *iop; 1309 int error = EINVAL; 1310 int count = 0; 1311 uint32_t ppa_id; 1312 mblk_t *nmp; 1313 zoneid_t zoneid; 1314 1315 sps = (spppstr_t *)q->q_ptr; 1316 ppa = sps->sps_ppa; 1317 iop = (struct iocblk *)mp->b_rptr; 1318 switch (iop->ioc_cmd) { 1319 case I_LINK: 1320 if (!IS_SPS_CONTROL(sps)) { 1321 break; /* return EINVAL */ 1322 } else if (ppa->ppa_lower_wq != NULL) { 1323 error = EEXIST; 1324 break; 1325 } 1326 ASSERT(ppa->ppa_ctl != NULL); 1327 ASSERT(sps->sps_npmode == NPMODE_PASS); 1328 ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL); 1329 1330 lwq = ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot; 1331 ASSERT(lwq != NULL); 1332 1333 ppa->ppa_lower_wq = lwq; 1334 lwq->q_ptr = RD(lwq)->q_ptr = (caddr_t)ppa; 1335 /* 1336 * Unblock upper network streams which now feed this lower 1337 * stream. We don't need to hold ppa_sib_lock here, since we 1338 * are writer at the outer perimeter. 1339 */ 1340 if (WR(sps->sps_rq)->q_first != NULL) 1341 qenable(WR(sps->sps_rq)); 1342 for (nextsib = ppa->ppa_streams; nextsib != NULL; 1343 nextsib = nextsib->sps_nextsib) { 1344 nextsib->sps_npmode = NPMODE_PASS; 1345 if (WR(nextsib->sps_rq)->q_first != NULL) { 1346 qenable(WR(nextsib->sps_rq)); 1347 } 1348 } 1349 1350 /* 1351 * Also unblock (run once) our lower read-side queue. This is 1352 * where packets received while doing the I_LINK may be 1353 * languishing; see sppp_lrsrv. 1354 */ 1355 qenable(RD(lwq)); 1356 1357 /* 1358 * Send useful information down to the modules which are now 1359 * linked below this driver (for this particular ppa). Only 1360 * do this if we are not the last PPP module on the stream. 1361 */ 1362 if (!IS_PPA_LASTMOD(ppa)) { 1363 (void) putctl8(lwq, M_CTL, PPPCTL_UNIT, 1364 ppa->ppa_ppa_id); 1365 (void) putctl4(lwq, M_CTL, PPPCTL_MRU, ppa->ppa_mru); 1366 (void) putctl4(lwq, M_CTL, PPPCTL_MTU, ppa->ppa_mtu); 1367 } 1368 1369 if (IS_SPS_KDEBUG(sps)) { 1370 SPDEBUG(PPP_DRV_NAME 1371 "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p " 1372 "flags=0x%b\n", sps->sps_mn_id, 1373 (void *)ppa->ppa_lower_wq, (void *)sps, 1374 sps->sps_flags, SPS_FLAGS_STR, 1375 (void *)ppa, ppa->ppa_flags, 1376 PPA_FLAGS_STR); 1377 } 1378 error = 0; /* return success */ 1379 break; 1380 case I_UNLINK: 1381 ASSERT(IS_SPS_CONTROL(sps)); 1382 ASSERT(ppa != NULL); 1383 lwq = ppa->ppa_lower_wq; 1384 ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL); 1385 ASSERT(lwq == ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot); 1386 1387 if (IS_SPS_KDEBUG(sps)) { 1388 SPDEBUG(PPP_DRV_NAME 1389 "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b " 1390 "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, 1391 (void *)lwq, (void *)sps, sps->sps_flags, 1392 SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags, 1393 PPA_FLAGS_STR); 1394 } 1395 /* 1396 * While accessing the outer perimeter exclusively, we 1397 * disassociate our ppa's lower_wq from the lower stream linked 1398 * beneath us, and we also disassociate our control stream from 1399 * the q_ptr of the lower stream. 1400 */ 1401 lwq->q_ptr = RD(lwq)->q_ptr = NULL; 1402 ppa->ppa_lower_wq = NULL; 1403 /* 1404 * Unblock streams which now feed back up the control stream, 1405 * and acknowledge the request. We don't need to hold 1406 * ppa_sib_lock here, since we are writer at the outer 1407 * perimeter. 1408 */ 1409 if (WR(sps->sps_rq)->q_first != NULL) 1410 qenable(WR(sps->sps_rq)); 1411 for (nextsib = ppa->ppa_streams; nextsib != NULL; 1412 nextsib = nextsib->sps_nextsib) { 1413 if (WR(nextsib->sps_rq)->q_first != NULL) { 1414 qenable(WR(nextsib->sps_rq)); 1415 } 1416 } 1417 error = 0; /* return success */ 1418 break; 1419 case PPPIO_NEWPPA: 1420 /* 1421 * Do sanity check to ensure that we don't accept PPPIO_NEWPPA 1422 * on a stream which DLPI is used (since certain DLPI messages 1423 * will cause state transition reflected in sps_dlstate, 1424 * changing it from its default DL_UNATTACHED value). In other 1425 * words, we won't allow a network/snoop stream to become 1426 * a control stream. 1427 */ 1428 if (iop->ioc_cr == NULL || 1429 secpolicy_ppp_config(iop->ioc_cr) != 0) { 1430 error = EPERM; 1431 break; 1432 } else if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) || 1433 (ppa != NULL) || (sps->sps_dlstate != DL_UNATTACHED)) { 1434 break; /* return EINVAL */ 1435 } 1436 /* Get requested unit number (if any) */ 1437 if (iop->ioc_count == sizeof (uint32_t) && mp->b_cont != NULL) 1438 ppa_id = *(uint32_t *)mp->b_cont->b_rptr; 1439 else 1440 ppa_id = 0; 1441 /* Get mblk to use for response message */ 1442 nmp = allocb(sizeof (uint32_t), BPRI_MED); 1443 if (nmp == NULL) { 1444 error = ENOSR; 1445 break; 1446 } 1447 if (mp->b_cont != NULL) { 1448 freemsg(mp->b_cont); 1449 } 1450 mp->b_cont = nmp; /* chain our response mblk */ 1451 /* 1452 * Walk the global ppa list and determine the lowest 1453 * available ppa_id number to be used. 1454 */ 1455 if (ppa_id == (uint32_t)-1) 1456 ppa_id = 0; 1457 zoneid = crgetzoneid(iop->ioc_cr); 1458 for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) { 1459 if (ppa_id == (uint32_t)-2) { 1460 if (ppa->ppa_ctl == NULL && 1461 ppa->ppa_zoneid == zoneid) 1462 break; 1463 } else { 1464 if (ppa_id < ppa->ppa_ppa_id) 1465 break; 1466 if (ppa_id == ppa->ppa_ppa_id) 1467 ++ppa_id; 1468 } 1469 } 1470 if (ppa_id == (uint32_t)-2) { 1471 if (ppa == NULL) { 1472 error = ENXIO; 1473 break; 1474 } 1475 /* Clear timestamp and lastmod flags */ 1476 ppa->ppa_flags = 0; 1477 } else { 1478 ppa = sppp_create_ppa(ppa_id, zoneid); 1479 if (ppa == NULL) { 1480 error = ENOMEM; 1481 break; 1482 } 1483 } 1484 1485 sps->sps_ppa = ppa; /* chain the ppa structure */ 1486 sps->sps_npmode = NPMODE_PASS; /* network packets may travel */ 1487 sps->sps_flags |= SPS_CONTROL; /* this is the control stream */ 1488 1489 ppa->ppa_refcnt++; /* new PPA reference */ 1490 ppa->ppa_ctl = sps; /* back ptr to upper stream */ 1491 /* 1492 * Return the newly created ppa_id to the requestor and 1493 * acnowledge the request. 1494 */ 1495 *(uint32_t *)nmp->b_wptr = ppa->ppa_ppa_id; 1496 nmp->b_wptr += sizeof (uint32_t); 1497 1498 if (IS_SPS_KDEBUG(sps)) { 1499 SPDEBUG(PPP_DRV_NAME 1500 "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b " 1501 "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, ppa_id, 1502 (void *)sps, sps->sps_flags, SPS_FLAGS_STR, 1503 (void *)ppa, ppa->ppa_flags, 1504 PPA_FLAGS_STR); 1505 } 1506 count = msgsize(nmp); 1507 error = 0; 1508 break; 1509 } 1510 1511 if (error == 0) { 1512 /* Success; tell the user. */ 1513 miocack(q, mp, count, 0); 1514 } else { 1515 /* Failure; send error back upstream. */ 1516 miocnak(q, mp, 0, error); 1517 } 1518 } 1519 1520 /* 1521 * sppp_send() 1522 * 1523 * MT-Perimeters: 1524 * shared inner, shared outer. 1525 * 1526 * Description: 1527 * Called by sppp_uwput to handle M_DATA message type. Returns 1528 * queue_t for putnext, or NULL to mean that the packet was 1529 * handled internally. 1530 */ 1531 static queue_t * 1532 sppp_send(queue_t *q, mblk_t **mpp, spppstr_t *sps) 1533 { 1534 mblk_t *mp; 1535 sppa_t *ppa; 1536 int is_promisc; 1537 int msize; 1538 int error = 0; 1539 queue_t *nextq; 1540 1541 ASSERT(mpp != NULL); 1542 mp = *mpp; 1543 ASSERT(q != NULL && q->q_ptr != NULL); 1544 ASSERT(mp != NULL && mp->b_rptr != NULL); 1545 ASSERT(sps != NULL); 1546 ASSERT(q->q_ptr == sps); 1547 /* 1548 * We only let M_DATA through if the sender is either the control 1549 * stream (for PPP control packets) or one of the network streams 1550 * (for IP packets) in IP fastpath mode. If this stream is not attached 1551 * to any ppas, then discard data coming down through this stream. 1552 */ 1553 ppa = sps->sps_ppa; 1554 if (ppa == NULL) { 1555 ASSERT(!IS_SPS_CONTROL(sps)); 1556 error = ENOLINK; 1557 } else if (!IS_SPS_CONTROL(sps) && !IS_SPS_FASTPATH(sps)) { 1558 error = EPROTO; 1559 } 1560 if (error != 0) { 1561 merror(q, mp, error); 1562 return (NULL); 1563 } 1564 msize = msgdsize(mp); 1565 if (msize > (ppa->ppa_mtu + PPP_HDRLEN)) { 1566 /* Log, and send it anyway */ 1567 mutex_enter(&ppa->ppa_sta_lock); 1568 ppa->ppa_otoolongs++; 1569 mutex_exit(&ppa->ppa_sta_lock); 1570 } else if (msize < PPP_HDRLEN) { 1571 /* 1572 * Log, and send it anyway. We log it because we get things 1573 * in M_DATA form here, which tells us that the sender is 1574 * either IP in fastpath transmission mode, or pppd. In both 1575 * cases, they are currently expected to send the 4-bytes 1576 * PPP header in front of any possible payloads. 1577 */ 1578 mutex_enter(&ppa->ppa_sta_lock); 1579 ppa->ppa_orunts++; 1580 mutex_exit(&ppa->ppa_sta_lock); 1581 } 1582 1583 if (IS_SPS_KDEBUG(sps)) { 1584 SPDEBUG(PPP_DRV_NAME 1585 "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b " 1586 "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize, 1587 (void *)sps, sps->sps_flags, SPS_FLAGS_STR, 1588 (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR); 1589 } 1590 /* 1591 * Should there be any promiscuous stream(s), send the data up 1592 * for each promiscuous stream that we recognize. Make sure that 1593 * for fastpath, we skip the PPP header in the M_DATA mblk. We skip 1594 * the control stream as we obviously never allow the control stream 1595 * to become promiscous and bind to PPP_ALLSAP. 1596 */ 1597 rw_enter(&ppa->ppa_sib_lock, RW_READER); 1598 is_promisc = sps->sps_ppa->ppa_promicnt; 1599 if (is_promisc) { 1600 ASSERT(ppa->ppa_streams != NULL); 1601 sppp_dlprsendup(ppa->ppa_streams, mp, sps->sps_sap, B_TRUE); 1602 } 1603 rw_exit(&ppa->ppa_sib_lock); 1604 /* 1605 * Only time-stamp the packet with hrtime if the upper stream 1606 * is configured to do so. PPP control (negotiation) messages 1607 * are never considered link activity; only data is activity. 1608 */ 1609 if (!IS_SPS_CONTROL(sps) && IS_PPA_TIMESTAMP(ppa)) { 1610 ppa->ppa_lasttx = gethrtime(); 1611 } 1612 /* 1613 * If there's already a message in the write-side service queue, 1614 * then queue this message there as well, otherwise, try to send 1615 * it down to the module immediately below us. 1616 */ 1617 if (q->q_first != NULL || 1618 (nextq = sppp_outpkt(q, mpp, msize, sps)) == NULL) { 1619 mp = *mpp; 1620 if (mp != NULL && putq(q, mp) == 0) { 1621 mutex_enter(&ppa->ppa_sta_lock); 1622 ppa->ppa_oqdropped++; 1623 mutex_exit(&ppa->ppa_sta_lock); 1624 freemsg(mp); 1625 } 1626 return (NULL); 1627 } 1628 return (nextq); 1629 } 1630 1631 /* 1632 * sppp_outpkt() 1633 * 1634 * MT-Perimeters: 1635 * shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq). 1636 * exclusive inner, shared outer (if called from sppp_wsrv). 1637 * 1638 * Description: 1639 * Called from 1) sppp_uwput when processing a M_DATA fastpath message, 1640 * or 2) sppp_uwsrv when processing the upper write-side service queue. 1641 * For both cases, it prepares to send the data to the module below 1642 * this driver if there is a lower stream linked underneath. If none, then 1643 * the data will be sent upstream via the control channel to pppd. 1644 * 1645 * Returns: 1646 * Non-NULL queue_t if message should be sent now, otherwise 1647 * if *mpp == NULL, then message was freed, otherwise put *mpp 1648 * (back) on the queue. (Does not do putq/putbq, since it's 1649 * called both from srv and put procedures.) 1650 */ 1651 static queue_t * 1652 sppp_outpkt(queue_t *q, mblk_t **mpp, int msize, spppstr_t *sps) 1653 { 1654 mblk_t *mp; 1655 sppa_t *ppa; 1656 enum NPmode npmode; 1657 mblk_t *mpnew; 1658 1659 ASSERT(mpp != NULL); 1660 mp = *mpp; 1661 ASSERT(q != NULL && q->q_ptr != NULL); 1662 ASSERT(mp != NULL && mp->b_rptr != NULL); 1663 ASSERT(sps != NULL); 1664 1665 ppa = sps->sps_ppa; 1666 npmode = sps->sps_npmode; 1667 1668 if (npmode == NPMODE_QUEUE) { 1669 ASSERT(!IS_SPS_CONTROL(sps)); 1670 return (NULL); /* queue it for later */ 1671 } else if (ppa == NULL || ppa->ppa_ctl == NULL || 1672 npmode == NPMODE_DROP || npmode == NPMODE_ERROR) { 1673 /* 1674 * This can not be the control stream, as it must always have 1675 * a valid ppa, and its npmode must always be NPMODE_PASS. 1676 */ 1677 ASSERT(!IS_SPS_CONTROL(sps)); 1678 if (npmode == NPMODE_DROP) { 1679 freemsg(mp); 1680 } else { 1681 /* 1682 * If we no longer have the control stream, or if the 1683 * mode is set to NPMODE_ERROR, then we need to tell IP 1684 * that the interface need to be marked as down. In 1685 * other words, we tell IP to be quiescent. 1686 */ 1687 merror(q, mp, EPROTO); 1688 } 1689 *mpp = NULL; 1690 return (NULL); /* don't queue it */ 1691 } 1692 /* 1693 * Do we have a driver stream linked underneath ? If not, we need to 1694 * notify pppd that the link needs to be brought up and configure 1695 * this upper stream to drop subsequent outgoing packets. This is 1696 * for demand-dialing, in which case pppd has done the IP plumbing 1697 * but hasn't linked the driver stream underneath us. Therefore, when 1698 * a packet is sent down the IP interface, a notification message 1699 * will be sent up the control stream to pppd in order for it to 1700 * establish the physical link. The driver stream is then expected 1701 * to be linked underneath after physical link establishment is done. 1702 */ 1703 if (ppa->ppa_lower_wq == NULL) { 1704 ASSERT(ppa->ppa_ctl != NULL); 1705 ASSERT(ppa->ppa_ctl->sps_rq != NULL); 1706 1707 *mpp = NULL; 1708 mpnew = create_lsmsg(PPP_LINKSTAT_NEEDUP); 1709 if (mpnew == NULL) { 1710 freemsg(mp); 1711 mutex_enter(&ppa->ppa_sta_lock); 1712 ppa->ppa_allocbfail++; 1713 mutex_exit(&ppa->ppa_sta_lock); 1714 return (NULL); /* don't queue it */ 1715 } 1716 /* Include the data in the message for logging. */ 1717 mpnew->b_cont = mp; 1718 mutex_enter(&ppa->ppa_sta_lock); 1719 ppa->ppa_lsneedup++; 1720 mutex_exit(&ppa->ppa_sta_lock); 1721 /* 1722 * We need to set the mode to NPMODE_DROP, but should only 1723 * do so when this stream is not the control stream. 1724 */ 1725 if (!IS_SPS_CONTROL(sps)) { 1726 sps->sps_npmode = NPMODE_DROP; 1727 } 1728 putnext(ppa->ppa_ctl->sps_rq, mpnew); 1729 return (NULL); /* don't queue it */ 1730 } 1731 /* 1732 * If so, then try to send it down. The lower queue is only ever 1733 * detached while holding an exclusive lock on the whole driver, 1734 * so we can be confident that the lower queue is still there. 1735 */ 1736 if (bcanputnext(ppa->ppa_lower_wq, mp->b_band)) { 1737 mutex_enter(&ppa->ppa_sta_lock); 1738 ppa->ppa_stats.p.ppp_opackets++; 1739 if (IS_SPS_CONTROL(sps)) { 1740 ppa->ppa_opkt_ctl++; 1741 } 1742 ppa->ppa_stats.p.ppp_obytes += msize; 1743 mutex_exit(&ppa->ppa_sta_lock); 1744 return (ppa->ppa_lower_wq); /* don't queue it */ 1745 } 1746 return (NULL); /* queue it for later */ 1747 } 1748 1749 /* 1750 * sppp_lwsrv() 1751 * 1752 * MT-Perimeters: 1753 * exclusive inner, shared outer. 1754 * 1755 * Description: 1756 * Lower write-side service procedure. No messages are ever placed on 1757 * the write queue here, this just back-enables all upper write side 1758 * service procedures. 1759 */ 1760 void 1761 sppp_lwsrv(queue_t *q) 1762 { 1763 sppa_t *ppa; 1764 spppstr_t *nextsib; 1765 1766 ASSERT(q != NULL && q->q_ptr != NULL); 1767 ppa = (sppa_t *)q->q_ptr; 1768 ASSERT(ppa != NULL); 1769 1770 rw_enter(&ppa->ppa_sib_lock, RW_READER); 1771 if ((nextsib = ppa->ppa_ctl) != NULL && 1772 WR(nextsib->sps_rq)->q_first != NULL) 1773 qenable(WR(nextsib->sps_rq)); 1774 for (nextsib = ppa->ppa_streams; nextsib != NULL; 1775 nextsib = nextsib->sps_nextsib) { 1776 if (WR(nextsib->sps_rq)->q_first != NULL) { 1777 qenable(WR(nextsib->sps_rq)); 1778 } 1779 } 1780 rw_exit(&ppa->ppa_sib_lock); 1781 } 1782 1783 /* 1784 * sppp_lrput() 1785 * 1786 * MT-Perimeters: 1787 * shared inner, shared outer. 1788 * 1789 * Description: 1790 * Lower read-side put procedure. Messages from below get here. 1791 * Data messages are handled separately to limit stack usage 1792 * going into IP. 1793 * 1794 * Note that during I_UNLINK processing, it's possible for a downstream 1795 * message to enable upstream data (due to pass_wput() removing the 1796 * SQ_BLOCKED flag), and thus we must protect against a NULL sppa pointer. 1797 * In this case, the only thing above us is passthru, and we might as well 1798 * discard. 1799 */ 1800 void 1801 sppp_lrput(queue_t *q, mblk_t *mp) 1802 { 1803 sppa_t *ppa; 1804 spppstr_t *sps; 1805 1806 if ((ppa = q->q_ptr) == NULL) { 1807 freemsg(mp); 1808 return; 1809 } 1810 1811 sps = ppa->ppa_ctl; 1812 1813 if (MTYPE(mp) != M_DATA) { 1814 sppp_recv_nondata(q, mp, sps); 1815 } else if (sps == NULL) { 1816 freemsg(mp); 1817 } else if ((q = sppp_recv(q, &mp, sps)) != NULL) { 1818 putnext(q, mp); 1819 } 1820 } 1821 1822 /* 1823 * sppp_lrsrv() 1824 * 1825 * MT-Perimeters: 1826 * exclusive inner, shared outer. 1827 * 1828 * Description: 1829 * Lower read-side service procedure. This is run once after the I_LINK 1830 * occurs in order to clean up any packets that came in while we were 1831 * transferring in the lower stream. Otherwise, it's not used. 1832 */ 1833 void 1834 sppp_lrsrv(queue_t *q) 1835 { 1836 mblk_t *mp; 1837 1838 while ((mp = getq(q)) != NULL) 1839 sppp_lrput(q, mp); 1840 } 1841 1842 /* 1843 * sppp_recv_nondata() 1844 * 1845 * MT-Perimeters: 1846 * shared inner, shared outer. 1847 * 1848 * Description: 1849 * All received non-data messages come through here. 1850 */ 1851 static void 1852 sppp_recv_nondata(queue_t *q, mblk_t *mp, spppstr_t *ctlsps) 1853 { 1854 sppa_t *ppa; 1855 spppstr_t *destsps; 1856 struct iocblk *iop; 1857 1858 ppa = (sppa_t *)q->q_ptr; 1859 ctlsps = ppa->ppa_ctl; 1860 1861 switch (MTYPE(mp)) { 1862 case M_CTL: 1863 mutex_enter(&ppa->ppa_sta_lock); 1864 if (*mp->b_rptr == PPPCTL_IERROR) { 1865 ppa->ppa_stats.p.ppp_ierrors++; 1866 ppa->ppa_ierr_low++; 1867 ppa->ppa_mctlsknown++; 1868 } else if (*mp->b_rptr == PPPCTL_OERROR) { 1869 ppa->ppa_stats.p.ppp_oerrors++; 1870 ppa->ppa_oerr_low++; 1871 ppa->ppa_mctlsknown++; 1872 } else { 1873 ppa->ppa_mctlsunknown++; 1874 } 1875 mutex_exit(&ppa->ppa_sta_lock); 1876 freemsg(mp); 1877 break; 1878 case M_IOCTL: 1879 miocnak(q, mp, 0, EINVAL); 1880 break; 1881 case M_IOCACK: 1882 case M_IOCNAK: 1883 iop = (struct iocblk *)mp->b_rptr; 1884 ASSERT(iop != NULL); 1885 /* 1886 * Attempt to match up the response with the stream that the 1887 * request came from. If ioc_id doesn't match the one that we 1888 * recorded, then discard this message. 1889 */ 1890 rw_enter(&ppa->ppa_sib_lock, RW_READER); 1891 if ((destsps = ctlsps) == NULL || 1892 destsps->sps_ioc_id != iop->ioc_id) { 1893 destsps = ppa->ppa_streams; 1894 while (destsps != NULL) { 1895 if (destsps->sps_ioc_id == iop->ioc_id) { 1896 break; /* found the upper stream */ 1897 } 1898 destsps = destsps->sps_nextsib; 1899 } 1900 } 1901 rw_exit(&ppa->ppa_sib_lock); 1902 if (destsps == NULL) { 1903 mutex_enter(&ppa->ppa_sta_lock); 1904 ppa->ppa_ioctlsfwderr++; 1905 mutex_exit(&ppa->ppa_sta_lock); 1906 freemsg(mp); 1907 break; 1908 } 1909 mutex_enter(&ppa->ppa_sta_lock); 1910 ppa->ppa_ioctlsfwdok++; 1911 1912 /* 1913 * Clear SPS_IOCQ and enable the lower write side queue, 1914 * this would allow the upper stream service routine 1915 * to start processing the queue for pending messages. 1916 * sppp_lwsrv -> sppp_uwsrv. 1917 */ 1918 destsps->sps_flags &= ~SPS_IOCQ; 1919 mutex_exit(&ppa->ppa_sta_lock); 1920 qenable(WR(destsps->sps_rq)); 1921 1922 putnext(destsps->sps_rq, mp); 1923 break; 1924 case M_HANGUP: 1925 /* 1926 * Free the original mblk_t. We don't really want to send 1927 * a M_HANGUP message upstream, so we need to translate this 1928 * message into something else. 1929 */ 1930 freemsg(mp); 1931 if (ctlsps == NULL) 1932 break; 1933 mp = create_lsmsg(PPP_LINKSTAT_HANGUP); 1934 if (mp == NULL) { 1935 mutex_enter(&ppa->ppa_sta_lock); 1936 ppa->ppa_allocbfail++; 1937 mutex_exit(&ppa->ppa_sta_lock); 1938 break; 1939 } 1940 mutex_enter(&ppa->ppa_sta_lock); 1941 ppa->ppa_lsdown++; 1942 mutex_exit(&ppa->ppa_sta_lock); 1943 putnext(ctlsps->sps_rq, mp); 1944 break; 1945 case M_FLUSH: 1946 if (*mp->b_rptr & FLUSHR) { 1947 flushq(q, FLUSHDATA); 1948 } 1949 if (*mp->b_rptr & FLUSHW) { 1950 *mp->b_rptr &= ~FLUSHR; 1951 qreply(q, mp); 1952 } else { 1953 freemsg(mp); 1954 } 1955 break; 1956 default: 1957 if (ctlsps != NULL && 1958 (queclass(mp) == QPCTL) || canputnext(ctlsps->sps_rq)) { 1959 putnext(ctlsps->sps_rq, mp); 1960 } else { 1961 mutex_enter(&ppa->ppa_sta_lock); 1962 ppa->ppa_iqdropped++; 1963 mutex_exit(&ppa->ppa_sta_lock); 1964 freemsg(mp); 1965 } 1966 break; 1967 } 1968 } 1969 1970 /* 1971 * sppp_recv() 1972 * 1973 * MT-Perimeters: 1974 * shared inner, shared outer. 1975 * 1976 * Description: 1977 * Receive function called by sppp_lrput. Finds appropriate 1978 * receive stream and does accounting. 1979 */ 1980 static queue_t * 1981 sppp_recv(queue_t *q, mblk_t **mpp, spppstr_t *ctlsps) 1982 { 1983 mblk_t *mp; 1984 int len; 1985 sppa_t *ppa; 1986 spppstr_t *destsps; 1987 mblk_t *zmp; 1988 uint32_t npflagpos; 1989 1990 ASSERT(mpp != NULL); 1991 mp = *mpp; 1992 ASSERT(q != NULL && q->q_ptr != NULL); 1993 ASSERT(mp != NULL && mp->b_rptr != NULL); 1994 ASSERT(ctlsps != NULL); 1995 ASSERT(IS_SPS_CONTROL(ctlsps)); 1996 ppa = ctlsps->sps_ppa; 1997 ASSERT(ppa != NULL && ppa->ppa_ctl != NULL); 1998 1999 len = msgdsize(mp); 2000 mutex_enter(&ppa->ppa_sta_lock); 2001 ppa->ppa_stats.p.ppp_ibytes += len; 2002 mutex_exit(&ppa->ppa_sta_lock); 2003 /* 2004 * If the entire data size of the mblk is less than the length of the 2005 * PPP header, then free it. We can't do much with such message anyway, 2006 * since we can't really determine what the PPP protocol type is. 2007 */ 2008 if (len < PPP_HDRLEN) { 2009 /* Log, and free it */ 2010 mutex_enter(&ppa->ppa_sta_lock); 2011 ppa->ppa_irunts++; 2012 mutex_exit(&ppa->ppa_sta_lock); 2013 freemsg(mp); 2014 return (NULL); 2015 } else if (len > (ppa->ppa_mru + PPP_HDRLEN)) { 2016 /* Log, and accept it anyway */ 2017 mutex_enter(&ppa->ppa_sta_lock); 2018 ppa->ppa_itoolongs++; 2019 mutex_exit(&ppa->ppa_sta_lock); 2020 } 2021 /* 2022 * We need at least be able to read the PPP protocol from the header, 2023 * so if the first message block is too small, then we concatenate the 2024 * rest of the following blocks into one message. 2025 */ 2026 if (MBLKL(mp) < PPP_HDRLEN) { 2027 zmp = msgpullup(mp, PPP_HDRLEN); 2028 freemsg(mp); 2029 mp = zmp; 2030 if (mp == NULL) { 2031 mutex_enter(&ppa->ppa_sta_lock); 2032 ppa->ppa_allocbfail++; 2033 mutex_exit(&ppa->ppa_sta_lock); 2034 return (NULL); 2035 } 2036 *mpp = mp; 2037 } 2038 /* 2039 * Hold this packet in the control-queue until 2040 * the matching network-layer upper stream for the PPP protocol (sap) 2041 * has not been plumbed and configured 2042 */ 2043 npflagpos = sppp_ppp2np(PPP_PROTOCOL(mp->b_rptr)); 2044 mutex_enter(&ppa->ppa_npmutex); 2045 if (npflagpos != 0 && (ppa->ppa_npflag & (1 << npflagpos))) { 2046 /* 2047 * proto is currently blocked; Hold up to 4 packets 2048 * in the kernel. 2049 */ 2050 if (ppa->ppa_holdpkts[npflagpos] > 3 || 2051 putq(ctlsps->sps_rq, mp) == 0) 2052 freemsg(mp); 2053 else 2054 ppa->ppa_holdpkts[npflagpos]++; 2055 mutex_exit(&ppa->ppa_npmutex); 2056 return (NULL); 2057 } 2058 mutex_exit(&ppa->ppa_npmutex); 2059 /* 2060 * Try to find a matching network-layer upper stream for the specified 2061 * PPP protocol (sap), and if none is found, send this frame up the 2062 * control stream. 2063 */ 2064 destsps = sppp_inpkt(q, mp, ctlsps); 2065 if (destsps == NULL) { 2066 mutex_enter(&ppa->ppa_sta_lock); 2067 ppa->ppa_ipkt_ctl++; 2068 mutex_exit(&ppa->ppa_sta_lock); 2069 if (canputnext(ctlsps->sps_rq)) { 2070 if (IS_SPS_KDEBUG(ctlsps)) { 2071 SPDEBUG(PPP_DRV_NAME 2072 "/%d: M_DATA recv (%d bytes) sps=0x%p " 2073 "flags=0x%b ppa=0x%p flags=0x%b\n", 2074 ctlsps->sps_mn_id, len, (void *)ctlsps, 2075 ctlsps->sps_flags, SPS_FLAGS_STR, 2076 (void *)ppa, ppa->ppa_flags, 2077 PPA_FLAGS_STR); 2078 } 2079 return (ctlsps->sps_rq); 2080 } else { 2081 mutex_enter(&ppa->ppa_sta_lock); 2082 ppa->ppa_iqdropped++; 2083 mutex_exit(&ppa->ppa_sta_lock); 2084 freemsg(mp); 2085 return (NULL); 2086 } 2087 } 2088 if (canputnext(destsps->sps_rq)) { 2089 if (IS_SPS_KDEBUG(destsps)) { 2090 SPDEBUG(PPP_DRV_NAME 2091 "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b " 2092 "ppa=0x%p flags=0x%b\n", destsps->sps_mn_id, len, 2093 (void *)destsps, destsps->sps_flags, 2094 SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags, 2095 PPA_FLAGS_STR); 2096 } 2097 /* 2098 * If fastpath is enabled on the network-layer stream, then 2099 * make sure we skip over the PPP header, otherwise, we wrap 2100 * the message in a DLPI message. 2101 */ 2102 if (IS_SPS_FASTPATH(destsps)) { 2103 mp->b_rptr += PPP_HDRLEN; 2104 return (destsps->sps_rq); 2105 } else { 2106 spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr; 2107 ASSERT(uqs != NULL); 2108 mp->b_rptr += PPP_HDRLEN; 2109 mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE); 2110 if (mp != NULL) { 2111 *mpp = mp; 2112 return (destsps->sps_rq); 2113 } else { 2114 mutex_enter(&ppa->ppa_sta_lock); 2115 ppa->ppa_allocbfail++; 2116 mutex_exit(&ppa->ppa_sta_lock); 2117 /* mp already freed by sppp_dladdud */ 2118 return (NULL); 2119 } 2120 } 2121 } else { 2122 mutex_enter(&ppa->ppa_sta_lock); 2123 ppa->ppa_iqdropped++; 2124 mutex_exit(&ppa->ppa_sta_lock); 2125 freemsg(mp); 2126 return (NULL); 2127 } 2128 } 2129 2130 /* 2131 * sppp_inpkt() 2132 * 2133 * MT-Perimeters: 2134 * shared inner, shared outer. 2135 * 2136 * Description: 2137 * Find the destination upper stream for the received packet, called 2138 * from sppp_recv. 2139 * 2140 * Returns: 2141 * ptr to destination upper network stream, or NULL for control stream. 2142 */ 2143 /* ARGSUSED */ 2144 static spppstr_t * 2145 sppp_inpkt(queue_t *q, mblk_t *mp, spppstr_t *ctlsps) 2146 { 2147 spppstr_t *destsps = NULL; 2148 sppa_t *ppa; 2149 uint16_t proto; 2150 int is_promisc; 2151 2152 ASSERT(q != NULL && q->q_ptr != NULL); 2153 ASSERT(mp != NULL && mp->b_rptr != NULL); 2154 ASSERT(IS_SPS_CONTROL(ctlsps)); 2155 ppa = ctlsps->sps_ppa; 2156 ASSERT(ppa != NULL); 2157 /* 2158 * From RFC 1661 (Section 2): 2159 * 2160 * The Protocol field is one or two octets, and its value identifies 2161 * the datagram encapsulated in the Information field of the packet. 2162 * The field is transmitted and received most significant octet first. 2163 * 2164 * The structure of this field is consistent with the ISO 3309 2165 * extension mechanism for address fields. All Protocols MUST be odd; 2166 * the least significant bit of the least significant octet MUST equal 2167 * "1". Also, all Protocols MUST be assigned such that the least 2168 * significant bit of the most significant octet equals "0". Frames 2169 * received which don't comply with these rules MUST be treated as 2170 * having an unrecognized Protocol. 2171 * 2172 * Protocol field values in the "0***" to "3***" range identify the 2173 * network-layer protocol of specific packets, and values in the 2174 * "8***" to "b***" range identify packets belonging to the associated 2175 * Network Control Protocols (NCPs), if any. 2176 * 2177 * Protocol field values in the "4***" to "7***" range are used for 2178 * protocols with low volume traffic which have no associated NCP. 2179 * Protocol field values in the "c***" to "f***" range identify packets 2180 * as link-layer Control Protocols (such as LCP). 2181 */ 2182 proto = PPP_PROTOCOL(mp->b_rptr); 2183 mutex_enter(&ppa->ppa_sta_lock); 2184 ppa->ppa_stats.p.ppp_ipackets++; 2185 mutex_exit(&ppa->ppa_sta_lock); 2186 /* 2187 * We check if this is not a network-layer protocol, and if so, 2188 * then send this packet up the control stream. 2189 */ 2190 if (proto > 0x7fff) { 2191 goto inpkt_done; /* send it up the control stream */ 2192 } 2193 /* 2194 * Try to grab the destination upper stream from the network-layer 2195 * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057) 2196 * protocol types. Otherwise, if the type is not known to the cache, 2197 * or if its sap can't be matched with any of the upper streams, then 2198 * send this packet up the control stream so that it can be rejected. 2199 */ 2200 if (proto == PPP_IP) { 2201 destsps = ppa->ppa_ip_cache; 2202 } else if (proto == PPP_IPV6) { 2203 destsps = ppa->ppa_ip6_cache; 2204 } 2205 /* 2206 * Toss this one away up the control stream if there's no matching sap; 2207 * this way the protocol can be rejected (destsps is NULL). 2208 */ 2209 2210 inpkt_done: 2211 /* 2212 * Only time-stamp the packet with hrtime if the upper stream 2213 * is configured to do so. PPP control (negotiation) messages 2214 * are never considered link activity; only data is activity. 2215 */ 2216 if (destsps != NULL && IS_PPA_TIMESTAMP(ppa)) { 2217 ppa->ppa_lastrx = gethrtime(); 2218 } 2219 /* 2220 * Should there be any promiscuous stream(s), send the data up for 2221 * each promiscuous stream that we recognize. We skip the control 2222 * stream as we obviously never allow the control stream to become 2223 * promiscous and bind to PPP_ALLSAP. 2224 */ 2225 rw_enter(&ppa->ppa_sib_lock, RW_READER); 2226 is_promisc = ppa->ppa_promicnt; 2227 if (is_promisc) { 2228 ASSERT(ppa->ppa_streams != NULL); 2229 sppp_dlprsendup(ppa->ppa_streams, mp, proto, B_TRUE); 2230 } 2231 rw_exit(&ppa->ppa_sib_lock); 2232 return (destsps); 2233 } 2234 2235 /* 2236 * sppp_kstat_update() 2237 * 2238 * Description: 2239 * Update per-ppa kstat interface statistics. 2240 */ 2241 static int 2242 sppp_kstat_update(kstat_t *ksp, int rw) 2243 { 2244 register sppa_t *ppa; 2245 register sppp_kstats_t *pppkp; 2246 register struct pppstat64 *sp; 2247 2248 if (rw == KSTAT_WRITE) { 2249 return (EACCES); 2250 } 2251 2252 ppa = (sppa_t *)ksp->ks_private; 2253 ASSERT(ppa != NULL); 2254 2255 pppkp = (sppp_kstats_t *)ksp->ks_data; 2256 sp = &ppa->ppa_stats.p; 2257 2258 mutex_enter(&ppa->ppa_sta_lock); 2259 pppkp->allocbfail.value.ui32 = ppa->ppa_allocbfail; 2260 pppkp->mctlsfwd.value.ui32 = ppa->ppa_mctlsfwd; 2261 pppkp->mctlsfwderr.value.ui32 = ppa->ppa_mctlsfwderr; 2262 pppkp->rbytes.value.ui32 = sp->ppp_ibytes; 2263 pppkp->rbytes64.value.ui64 = sp->ppp_ibytes; 2264 pppkp->ierrors.value.ui32 = sp->ppp_ierrors; 2265 pppkp->ierrors_lower.value.ui32 = ppa->ppa_ierr_low; 2266 pppkp->ioctlsfwd.value.ui32 = ppa->ppa_ioctlsfwd; 2267 pppkp->ioctlsfwdok.value.ui32 = ppa->ppa_ioctlsfwdok; 2268 pppkp->ioctlsfwderr.value.ui32 = ppa->ppa_ioctlsfwderr; 2269 pppkp->ipackets.value.ui32 = sp->ppp_ipackets; 2270 pppkp->ipackets64.value.ui64 = sp->ppp_ipackets; 2271 pppkp->ipackets_ctl.value.ui32 = ppa->ppa_ipkt_ctl; 2272 pppkp->iqdropped.value.ui32 = ppa->ppa_iqdropped; 2273 pppkp->irunts.value.ui32 = ppa->ppa_irunts; 2274 pppkp->itoolongs.value.ui32 = ppa->ppa_itoolongs; 2275 pppkp->lsneedup.value.ui32 = ppa->ppa_lsneedup; 2276 pppkp->lsdown.value.ui32 = ppa->ppa_lsdown; 2277 pppkp->mctlsknown.value.ui32 = ppa->ppa_mctlsknown; 2278 pppkp->mctlsunknown.value.ui32 = ppa->ppa_mctlsunknown; 2279 pppkp->obytes.value.ui32 = sp->ppp_obytes; 2280 pppkp->obytes64.value.ui64 = sp->ppp_obytes; 2281 pppkp->oerrors.value.ui32 = sp->ppp_oerrors; 2282 pppkp->oerrors_lower.value.ui32 = ppa->ppa_oerr_low; 2283 pppkp->opackets.value.ui32 = sp->ppp_opackets; 2284 pppkp->opackets64.value.ui64 = sp->ppp_opackets; 2285 pppkp->opackets_ctl.value.ui32 = ppa->ppa_opkt_ctl; 2286 pppkp->oqdropped.value.ui32 = ppa->ppa_oqdropped; 2287 pppkp->otoolongs.value.ui32 = ppa->ppa_otoolongs; 2288 pppkp->orunts.value.ui32 = ppa->ppa_orunts; 2289 mutex_exit(&ppa->ppa_sta_lock); 2290 2291 return (0); 2292 } 2293 2294 /* 2295 * Turn off proto in ppa_npflag to indicate that 2296 * the corresponding network protocol has been plumbed. 2297 * Release proto packets that were being held in the control 2298 * queue in anticipation of this event. 2299 */ 2300 static void 2301 sppp_release_pkts(sppa_t *ppa, uint16_t proto) 2302 { 2303 uint32_t npflagpos = sppp_ppp2np(proto); 2304 int count; 2305 mblk_t *mp; 2306 uint16_t mp_proto; 2307 queue_t *q; 2308 spppstr_t *destsps; 2309 2310 ASSERT(ppa != NULL); 2311 2312 if (npflagpos == 0 || (ppa->ppa_npflag & (1 << npflagpos)) == 0) 2313 return; 2314 2315 mutex_enter(&ppa->ppa_npmutex); 2316 ppa->ppa_npflag &= ~(1 << npflagpos); 2317 count = ppa->ppa_holdpkts[npflagpos]; 2318 ppa->ppa_holdpkts[npflagpos] = 0; 2319 mutex_exit(&ppa->ppa_npmutex); 2320 2321 q = ppa->ppa_ctl->sps_rq; 2322 2323 while (count > 0) { 2324 mp = getq(q); 2325 ASSERT(mp != NULL); 2326 2327 mp_proto = PPP_PROTOCOL(mp->b_rptr); 2328 if (mp_proto != proto) { 2329 (void) putq(q, mp); 2330 continue; 2331 } 2332 count--; 2333 destsps = NULL; 2334 if (mp_proto == PPP_IP) { 2335 destsps = ppa->ppa_ip_cache; 2336 } else if (mp_proto == PPP_IPV6) { 2337 destsps = ppa->ppa_ip6_cache; 2338 } 2339 ASSERT(destsps != NULL); 2340 2341 if (IS_SPS_FASTPATH(destsps)) { 2342 mp->b_rptr += PPP_HDRLEN; 2343 } else { 2344 spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr; 2345 ASSERT(uqs != NULL); 2346 mp->b_rptr += PPP_HDRLEN; 2347 mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE); 2348 if (mp == NULL) { 2349 mutex_enter(&ppa->ppa_sta_lock); 2350 ppa->ppa_allocbfail++; 2351 mutex_exit(&ppa->ppa_sta_lock); 2352 /* mp already freed by sppp_dladdud */ 2353 continue; 2354 } 2355 } 2356 2357 if (canputnext(destsps->sps_rq)) { 2358 putnext(destsps->sps_rq, mp); 2359 } else { 2360 mutex_enter(&ppa->ppa_sta_lock); 2361 ppa->ppa_iqdropped++; 2362 mutex_exit(&ppa->ppa_sta_lock); 2363 freemsg(mp); 2364 continue; 2365 } 2366 } 2367 } 2368