1 /*- 2 * Copyright (c) 1998 Brian Somers <brian@Awfulhak.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <netinet/in.h> 31 #include <netinet/in_systm.h> 32 #include <netinet/ip.h> 33 #include <arpa/inet.h> 34 #include <net/if_dl.h> 35 #include <sys/socket.h> 36 #include <sys/un.h> 37 38 #include <errno.h> 39 #include <paths.h> 40 #include <stdlib.h> 41 #include <stdio.h> 42 #include <string.h> 43 #include <sys/stat.h> 44 #include <termios.h> 45 #include <unistd.h> 46 47 #include "layer.h" 48 #ifndef NONAT 49 #include "nat_cmd.h" 50 #endif 51 #include "vjcomp.h" 52 #include "ua.h" 53 #include "defs.h" 54 #include "command.h" 55 #include "mbuf.h" 56 #include "log.h" 57 #include "timer.h" 58 #include "fsm.h" 59 #include "iplist.h" 60 #include "throughput.h" 61 #include "slcompress.h" 62 #include "lqr.h" 63 #include "hdlc.h" 64 #include "ncpaddr.h" 65 #include "ipcp.h" 66 #include "auth.h" 67 #include "lcp.h" 68 #include "async.h" 69 #include "ccp.h" 70 #include "link.h" 71 #include "descriptor.h" 72 #include "physical.h" 73 #include "chat.h" 74 #include "proto.h" 75 #include "filter.h" 76 #include "mp.h" 77 #include "chap.h" 78 #include "cbcp.h" 79 #include "datalink.h" 80 #ifndef NORADIUS 81 #include "radius.h" 82 #endif 83 #include "ipv6cp.h" 84 #include "ncp.h" 85 #include "bundle.h" 86 #include "prompt.h" 87 #include "id.h" 88 #include "arp.h" 89 90 void 91 peerid_Init(struct peerid *peer) 92 { 93 peer->enddisc.class = 0; 94 *peer->enddisc.address = '\0'; 95 peer->enddisc.len = 0; 96 *peer->authname = '\0'; 97 } 98 99 int 100 peerid_Equal(const struct peerid *p1, const struct peerid *p2) 101 { 102 return !strcmp(p1->authname, p2->authname) && 103 p1->enddisc.class == p2->enddisc.class && 104 p1->enddisc.len == p2->enddisc.len && 105 !memcmp(p1->enddisc.address, p2->enddisc.address, p1->enddisc.len); 106 } 107 108 static u_int32_t 109 inc_seq(unsigned is12bit, u_int32_t seq) 110 { 111 seq++; 112 if (is12bit) { 113 if (seq & 0xfffff000) 114 seq = 0; 115 } else if (seq & 0xff000000) 116 seq = 0; 117 return seq; 118 } 119 120 static int 121 isbefore(unsigned is12bit, u_int32_t seq1, u_int32_t seq2) 122 { 123 u_int32_t max = (is12bit ? 0xfff : 0xffffff) - 0x200; 124 125 if (seq1 > max) { 126 if (seq2 < 0x200 || seq2 > seq1) 127 return 1; 128 } else if ((seq1 > 0x200 || seq2 <= max) && seq1 < seq2) 129 return 1; 130 131 return 0; 132 } 133 134 static int 135 mp_ReadHeader(struct mp *mp, struct mbuf *m, struct mp_header *header) 136 { 137 if (mp->local_is12bit) { 138 u_int16_t val; 139 140 ua_ntohs(MBUF_CTOP(m), &val); 141 if (val & 0x3000) { 142 log_Printf(LogWARN, "Oops - MP header without required zero bits\n"); 143 return 0; 144 } 145 header->begin = val & 0x8000 ? 1 : 0; 146 header->end = val & 0x4000 ? 1 : 0; 147 header->seq = val & 0x0fff; 148 return 2; 149 } else { 150 ua_ntohl(MBUF_CTOP(m), &header->seq); 151 if (header->seq & 0x3f000000) { 152 log_Printf(LogWARN, "Oops - MP header without required zero bits\n"); 153 return 0; 154 } 155 header->begin = header->seq & 0x80000000 ? 1 : 0; 156 header->end = header->seq & 0x40000000 ? 1 : 0; 157 header->seq &= 0x00ffffff; 158 return 4; 159 } 160 } 161 162 static void 163 mp_LayerStart(void *v, struct fsm *fp) 164 { 165 /* The given FSM (ccp) is about to start up ! */ 166 } 167 168 static void 169 mp_LayerUp(void *v, struct fsm *fp) 170 { 171 /* The given fsm (ccp) is now up */ 172 173 bundle_CalculateBandwidth(fp->bundle); /* Against ccp_MTUOverhead */ 174 } 175 176 static void 177 mp_LayerDown(void *v, struct fsm *fp) 178 { 179 /* The given FSM (ccp) has been told to come down */ 180 } 181 182 static void 183 mp_LayerFinish(void *v, struct fsm *fp) 184 { 185 /* The given fsm (ccp) is now down */ 186 if (fp->state == ST_CLOSED && fp->open_mode == OPEN_PASSIVE) 187 fsm_Open(fp); /* CCP goes to ST_STOPPED */ 188 } 189 190 static void 191 mp_UpDown(void *v) 192 { 193 struct mp *mp = (struct mp *)v; 194 int percent; 195 196 percent = MAX(mp->link.stats.total.in.OctetsPerSecond, 197 mp->link.stats.total.out.OctetsPerSecond) * 800 / 198 mp->bundle->bandwidth; 199 if (percent >= mp->cfg.autoload.max) { 200 log_Printf(LogDEBUG, "%d%% saturation - bring a link up ?\n", percent); 201 bundle_AutoAdjust(mp->bundle, percent, AUTO_UP); 202 } else if (percent <= mp->cfg.autoload.min) { 203 log_Printf(LogDEBUG, "%d%% saturation - bring a link down ?\n", percent); 204 bundle_AutoAdjust(mp->bundle, percent, AUTO_DOWN); 205 } 206 } 207 208 void 209 mp_StopAutoloadTimer(struct mp *mp) 210 { 211 throughput_stop(&mp->link.stats.total); 212 } 213 214 void 215 mp_CheckAutoloadTimer(struct mp *mp) 216 { 217 if (mp->link.stats.total.SamplePeriod != mp->cfg.autoload.period) { 218 throughput_destroy(&mp->link.stats.total); 219 throughput_init(&mp->link.stats.total, mp->cfg.autoload.period); 220 throughput_callback(&mp->link.stats.total, mp_UpDown, mp); 221 } 222 223 if (bundle_WantAutoloadTimer(mp->bundle)) 224 throughput_start(&mp->link.stats.total, "MP throughput", 1); 225 else 226 mp_StopAutoloadTimer(mp); 227 } 228 229 void 230 mp_RestartAutoloadTimer(struct mp *mp) 231 { 232 if (mp->link.stats.total.SamplePeriod != mp->cfg.autoload.period) 233 mp_CheckAutoloadTimer(mp); 234 else 235 throughput_clear(&mp->link.stats.total, THROUGHPUT_OVERALL, NULL); 236 } 237 238 void 239 mp_Init(struct mp *mp, struct bundle *bundle) 240 { 241 mp->peer_is12bit = mp->local_is12bit = 0; 242 mp->peer_mrru = mp->local_mrru = 0; 243 244 peerid_Init(&mp->peer); 245 246 mp->out.seq = 0; 247 mp->out.link = 0; 248 mp->out.af = AF_INET; 249 mp->seq.min_in = 0; 250 mp->seq.next_in = 0; 251 mp->inbufs = NULL; 252 mp->bundle = bundle; 253 254 mp->link.type = LOGICAL_LINK; 255 mp->link.name = "mp"; 256 mp->link.len = sizeof *mp; 257 258 mp->cfg.autoload.period = SAMPLE_PERIOD; 259 mp->cfg.autoload.min = mp->cfg.autoload.max = 0; 260 throughput_init(&mp->link.stats.total, mp->cfg.autoload.period); 261 throughput_callback(&mp->link.stats.total, mp_UpDown, mp); 262 mp->link.stats.parent = NULL; 263 mp->link.stats.gather = 0; /* Let the physical links gather stats */ 264 memset(mp->link.Queue, '\0', sizeof mp->link.Queue); 265 memset(mp->link.proto_in, '\0', sizeof mp->link.proto_in); 266 memset(mp->link.proto_out, '\0', sizeof mp->link.proto_out); 267 268 mp->fsmp.LayerStart = mp_LayerStart; 269 mp->fsmp.LayerUp = mp_LayerUp; 270 mp->fsmp.LayerDown = mp_LayerDown; 271 mp->fsmp.LayerFinish = mp_LayerFinish; 272 mp->fsmp.object = mp; 273 274 mpserver_Init(&mp->server); 275 276 mp->cfg.mrru = 0; 277 mp->cfg.shortseq = NEG_ENABLED|NEG_ACCEPTED; 278 mp->cfg.negenddisc = NEG_ENABLED|NEG_ACCEPTED; 279 mp->cfg.enddisc.class = 0; 280 *mp->cfg.enddisc.address = '\0'; 281 mp->cfg.enddisc.len = 0; 282 283 lcp_Init(&mp->link.lcp, mp->bundle, &mp->link, NULL); 284 ccp_Init(&mp->link.ccp, mp->bundle, &mp->link, &mp->fsmp); 285 286 link_EmptyStack(&mp->link); 287 link_Stack(&mp->link, &protolayer); 288 link_Stack(&mp->link, &ccplayer); 289 link_Stack(&mp->link, &vjlayer); 290 #ifndef NONAT 291 link_Stack(&mp->link, &natlayer); 292 #endif 293 } 294 295 int 296 mp_Up(struct mp *mp, struct datalink *dl) 297 { 298 struct lcp *lcp = &dl->physical->link.lcp; 299 300 if (mp->active) { 301 /* We're adding a link - do a last validation on our parameters */ 302 if (!peerid_Equal(&dl->peer, &mp->peer)) { 303 log_Printf(LogPHASE, "%s: Inappropriate peer !\n", dl->name); 304 log_Printf(LogPHASE, " Attached to peer %s/%s\n", mp->peer.authname, 305 mp_Enddisc(mp->peer.enddisc.class, mp->peer.enddisc.address, 306 mp->peer.enddisc.len)); 307 log_Printf(LogPHASE, " New link is peer %s/%s\n", dl->peer.authname, 308 mp_Enddisc(dl->peer.enddisc.class, dl->peer.enddisc.address, 309 dl->peer.enddisc.len)); 310 return MP_FAILED; 311 } 312 if (mp->local_mrru != lcp->want_mrru || 313 mp->peer_mrru != lcp->his_mrru || 314 mp->local_is12bit != lcp->want_shortseq || 315 mp->peer_is12bit != lcp->his_shortseq) { 316 log_Printf(LogPHASE, "%s: Invalid MRRU/SHORTSEQ MP parameters !\n", 317 dl->name); 318 return MP_FAILED; 319 } 320 return MP_ADDED; 321 } else { 322 /* First link in multilink mode */ 323 324 mp->local_mrru = lcp->want_mrru; 325 mp->peer_mrru = lcp->his_mrru; 326 mp->local_is12bit = lcp->want_shortseq; 327 mp->peer_is12bit = lcp->his_shortseq; 328 mp->peer = dl->peer; 329 330 throughput_destroy(&mp->link.stats.total); 331 throughput_init(&mp->link.stats.total, mp->cfg.autoload.period); 332 throughput_callback(&mp->link.stats.total, mp_UpDown, mp); 333 memset(mp->link.Queue, '\0', sizeof mp->link.Queue); 334 memset(mp->link.proto_in, '\0', sizeof mp->link.proto_in); 335 memset(mp->link.proto_out, '\0', sizeof mp->link.proto_out); 336 337 /* Tell the link who it belongs to */ 338 dl->physical->link.stats.parent = &mp->link.stats.total; 339 340 mp->out.seq = 0; 341 mp->out.link = 0; 342 mp->out.af = AF_INET; 343 mp->seq.min_in = 0; 344 mp->seq.next_in = 0; 345 346 /* 347 * Now we create our server socket. 348 * If it already exists, join it. Otherwise, create and own it 349 */ 350 switch (mpserver_Open(&mp->server, &mp->peer)) { 351 case MPSERVER_CONNECTED: 352 log_Printf(LogPHASE, "mp: Transfer link on %s\n", 353 mp->server.socket.sun_path); 354 mp->server.send.dl = dl; /* Defer 'till it's safe to send */ 355 return MP_LINKSENT; 356 case MPSERVER_FAILED: 357 return MP_FAILED; 358 case MPSERVER_LISTENING: 359 log_Printf(LogPHASE, "mp: Listening on %s\n", mp->server.socket.sun_path); 360 log_Printf(LogPHASE, " First link: %s\n", dl->name); 361 362 /* Re-point our NCP layers at our MP link */ 363 ncp_SetLink(&mp->bundle->ncp, &mp->link); 364 365 /* Our lcp's already up 'cos of the NULL parent */ 366 if (ccp_SetOpenMode(&mp->link.ccp)) { 367 fsm_Up(&mp->link.ccp.fsm); 368 fsm_Open(&mp->link.ccp.fsm); 369 } 370 371 mp->active = 1; 372 break; 373 } 374 } 375 376 return MP_UP; 377 } 378 379 void 380 mp_Down(struct mp *mp) 381 { 382 if (mp->active) { 383 struct mbuf *next; 384 385 /* Stop that ! */ 386 mp_StopAutoloadTimer(mp); 387 388 /* Don't want any more of these */ 389 mpserver_Close(&mp->server); 390 391 /* CCP goes down with a bang */ 392 fsm2initial(&mp->link.ccp.fsm); 393 394 /* Received fragments go in the bit-bucket */ 395 while (mp->inbufs) { 396 next = mp->inbufs->m_nextpkt; 397 m_freem(mp->inbufs); 398 mp->inbufs = next; 399 } 400 401 peerid_Init(&mp->peer); 402 mp->active = 0; 403 } 404 } 405 406 void 407 mp_linkInit(struct mp_link *mplink) 408 { 409 mplink->seq = 0; 410 mplink->bandwidth = 0; 411 } 412 413 static void 414 mp_Assemble(struct mp *mp, struct mbuf *m, struct physical *p) 415 { 416 struct mp_header mh, h; 417 struct mbuf *q, *last; 418 int32_t seq; 419 420 /* 421 * When `m' and `p' are NULL, it means our oldest link has gone down. 422 * We want to determine a new min, and process any intermediate stuff 423 * as normal 424 */ 425 426 if (m && mp_ReadHeader(mp, m, &mh) == 0) { 427 m_freem(m); 428 return; 429 } 430 431 if (p) { 432 seq = p->dl->mp.seq; 433 p->dl->mp.seq = mh.seq; 434 } else 435 seq = mp->seq.min_in; 436 437 if (mp->seq.min_in == seq) { 438 /* 439 * We've received new data on the link that has our min (oldest) seq. 440 * Figure out which link now has the smallest (oldest) seq. 441 */ 442 struct datalink *dl; 443 444 mp->seq.min_in = (u_int32_t)-1; 445 for (dl = mp->bundle->links; dl; dl = dl->next) 446 if (dl->state == DATALINK_OPEN && 447 (mp->seq.min_in == -1 || 448 isbefore(mp->local_is12bit, dl->mp.seq, mp->seq.min_in))) 449 mp->seq.min_in = dl->mp.seq; 450 } 451 452 /* 453 * Now process as many of our fragments as we can, adding our new 454 * fragment in as we go, and ordering with the oldest at the top of 455 * the queue. 456 */ 457 458 last = NULL; 459 seq = mp->seq.next_in; 460 q = mp->inbufs; 461 while (q || m) { 462 if (!q) { 463 if (last) 464 last->m_nextpkt = m; 465 else 466 mp->inbufs = m; 467 q = m; 468 m = NULL; 469 h = mh; 470 } else { 471 mp_ReadHeader(mp, q, &h); 472 473 if (m && isbefore(mp->local_is12bit, mh.seq, h.seq)) { 474 /* Our received fragment fits in before this one, so link it in */ 475 if (last) 476 last->m_nextpkt = m; 477 else 478 mp->inbufs = m; 479 m->m_nextpkt = q; 480 q = m; 481 h = mh; 482 m = NULL; 483 } 484 } 485 486 if (h.seq != seq) { 487 /* we're missing something :-( */ 488 if (isbefore(mp->local_is12bit, seq, mp->seq.min_in)) { 489 /* we're never gonna get it */ 490 struct mbuf *next; 491 492 /* Zap all older fragments */ 493 while (mp->inbufs != q) { 494 log_Printf(LogDEBUG, "Drop frag\n"); 495 next = mp->inbufs->m_nextpkt; 496 m_freem(mp->inbufs); 497 mp->inbufs = next; 498 } 499 500 /* 501 * Zap everything until the next `end' fragment OR just before 502 * the next `begin' fragment OR 'till seq.min_in - whichever 503 * comes first. 504 */ 505 do { 506 mp_ReadHeader(mp, mp->inbufs, &h); 507 if (h.begin) { 508 /* We might be able to process this ! */ 509 h.seq--; /* We're gonna look for fragment with h.seq+1 */ 510 break; 511 } 512 next = mp->inbufs->m_nextpkt; 513 log_Printf(LogDEBUG, "Drop frag %u\n", h.seq); 514 m_freem(mp->inbufs); 515 mp->inbufs = next; 516 } while (mp->inbufs && (isbefore(mp->local_is12bit, mp->seq.min_in, 517 h.seq) || h.end)); 518 519 /* 520 * Continue processing things from here. 521 * This deals with the possibility that we received a fragment 522 * on the slowest link that invalidates some of our data (because 523 * of the hole at `q'), but where there are subsequent `whole' 524 * packets that have already been received. 525 */ 526 527 mp->seq.next_in = seq = inc_seq(mp->local_is12bit, h.seq); 528 last = NULL; 529 q = mp->inbufs; 530 } else 531 /* we may still receive the missing fragment */ 532 break; 533 } else if (h.end) { 534 /* We've got something, reassemble */ 535 struct mbuf **frag = &q; 536 int len; 537 u_long first = -1; 538 539 do { 540 *frag = mp->inbufs; 541 mp->inbufs = mp->inbufs->m_nextpkt; 542 len = mp_ReadHeader(mp, *frag, &h); 543 if (first == -1) 544 first = h.seq; 545 if (frag == &q && !h.begin) { 546 log_Printf(LogWARN, "Oops - MP frag %lu should have a begin flag\n", 547 (u_long)h.seq); 548 m_freem(q); 549 q = NULL; 550 } else if (frag != &q && h.begin) { 551 log_Printf(LogWARN, "Oops - MP frag %lu should have an end flag\n", 552 (u_long)h.seq - 1); 553 /* 554 * Stuff our fragment back at the front of the queue and zap 555 * our half-assembled packet. 556 */ 557 (*frag)->m_nextpkt = mp->inbufs; 558 mp->inbufs = *frag; 559 *frag = NULL; 560 m_freem(q); 561 q = NULL; 562 frag = &q; 563 h.end = 0; /* just in case it's a whole packet */ 564 } else { 565 (*frag)->m_offset += len; 566 (*frag)->m_len -= len; 567 (*frag)->m_nextpkt = NULL; 568 do 569 frag = &(*frag)->m_next; 570 while (*frag != NULL); 571 } 572 } while (!h.end); 573 574 if (q) { 575 q = m_pullup(q); 576 log_Printf(LogDEBUG, "MP: Reassembled frags %ld-%lu, length %d\n", 577 first, (u_long)h.seq, m_length(q)); 578 link_PullPacket(&mp->link, MBUF_CTOP(q), q->m_len, mp->bundle); 579 m_freem(q); 580 } 581 582 mp->seq.next_in = seq = inc_seq(mp->local_is12bit, h.seq); 583 last = NULL; 584 q = mp->inbufs; 585 } else { 586 /* Look for the next fragment */ 587 seq = inc_seq(mp->local_is12bit, seq); 588 last = q; 589 q = q->m_nextpkt; 590 } 591 } 592 593 if (m) { 594 /* We still have to find a home for our new fragment */ 595 last = NULL; 596 for (q = mp->inbufs; q; last = q, q = q->m_nextpkt) { 597 mp_ReadHeader(mp, q, &h); 598 if (isbefore(mp->local_is12bit, mh.seq, h.seq)) 599 break; 600 } 601 /* Our received fragment fits in here */ 602 if (last) 603 last->m_nextpkt = m; 604 else 605 mp->inbufs = m; 606 m->m_nextpkt = q; 607 } 608 } 609 610 struct mbuf * 611 mp_Input(struct bundle *bundle, struct link *l, struct mbuf *bp) 612 { 613 struct physical *p = link2physical(l); 614 615 if (!bundle->ncp.mp.active) 616 /* Let someone else deal with it ! */ 617 return bp; 618 619 if (p == NULL) { 620 log_Printf(LogWARN, "DecodePacket: Can't do MP inside MP !\n"); 621 m_freem(bp); 622 } else { 623 m_settype(bp, MB_MPIN); 624 mp_Assemble(&bundle->ncp.mp, bp, p); 625 } 626 627 return NULL; 628 } 629 630 static void 631 mp_Output(struct mp *mp, struct bundle *bundle, struct link *l, 632 struct mbuf *m, u_int32_t begin, u_int32_t end) 633 { 634 char prepend[4]; 635 636 /* Stuff an MP header on the front of our packet and send it */ 637 638 if (mp->peer_is12bit) { 639 u_int16_t val; 640 641 val = (begin << 15) | (end << 14) | (u_int16_t)mp->out.seq; 642 ua_htons(&val, prepend); 643 m = m_prepend(m, prepend, 2, 0); 644 } else { 645 u_int32_t val; 646 647 val = (begin << 31) | (end << 30) | (u_int32_t)mp->out.seq; 648 ua_htonl(&val, prepend); 649 m = m_prepend(m, prepend, 4, 0); 650 } 651 if (log_IsKept(LogDEBUG)) 652 log_Printf(LogDEBUG, "MP[frag %d]: Send %d bytes on link `%s'\n", 653 mp->out.seq, m_length(m), l->name); 654 mp->out.seq = inc_seq(mp->peer_is12bit, mp->out.seq); 655 656 if (l->ccp.fsm.state != ST_OPENED && ccp_Required(&l->ccp)) { 657 log_Printf(LogPHASE, "%s: Not transmitting... waiting for CCP\n", l->name); 658 return; 659 } 660 661 link_PushPacket(l, m, bundle, LINK_QUEUES(l) - 1, PROTO_MP); 662 } 663 664 int 665 mp_FillPhysicalQueues(struct bundle *bundle) 666 { 667 struct mp *mp = &bundle->ncp.mp; 668 struct datalink *dl, *fdl; 669 size_t total, add, len; 670 int thislink, nlinks, nopenlinks, sendasip; 671 u_int32_t begin, end; 672 struct mbuf *m, *mo; 673 struct link *bestlink; 674 675 thislink = nlinks = nopenlinks = 0; 676 for (fdl = NULL, dl = bundle->links; dl; dl = dl->next) { 677 /* Include non-open links here as mp->out.link will stay more correct */ 678 if (!fdl) { 679 if (thislink == mp->out.link) 680 fdl = dl; 681 else 682 thislink++; 683 } 684 nlinks++; 685 if (dl->state == DATALINK_OPEN) 686 nopenlinks++; 687 } 688 689 if (!fdl) { 690 fdl = bundle->links; 691 if (!fdl) 692 return 0; 693 thislink = 0; 694 } 695 696 total = 0; 697 for (dl = fdl; nlinks > 0; dl = dl->next, nlinks--, thislink++) { 698 if (!dl) { 699 dl = bundle->links; 700 thislink = 0; 701 } 702 703 if (dl->state != DATALINK_OPEN) 704 continue; 705 706 if (dl->physical->out) 707 /* this link has suffered a short write. Let it continue */ 708 continue; 709 710 add = link_QueueLen(&dl->physical->link); 711 if (add) { 712 /* this link has got stuff already queued. Let it continue */ 713 total += add; 714 continue; 715 } 716 717 if (!mp_QueueLen(mp)) { 718 int mrutoosmall; 719 720 /* 721 * If there's only a single open link in our bundle and we haven't got 722 * MP level link compression, queue outbound traffic directly via that 723 * link's protocol stack rather than using the MP link. This results 724 * in the outbound traffic going out as PROTO_IP or PROTO_IPV6 rather 725 * than PROTO_MP. 726 */ 727 728 mrutoosmall = 0; 729 sendasip = nopenlinks < 2; 730 if (sendasip) { 731 if (dl->physical->link.lcp.his_mru < mp->peer_mrru) { 732 /* 733 * Actually, forget it. This test is done against the MRRU rather 734 * than the packet size so that we don't end up sending some data 735 * in MP fragments and some data in PROTO_IP packets. That's just 736 * too likely to upset some ppp implementations. 737 */ 738 mrutoosmall = 1; 739 sendasip = 0; 740 } 741 } 742 743 bestlink = sendasip ? &dl->physical->link : &mp->link; 744 if (!ncp_PushPacket(&bundle->ncp, &mp->out.af, bestlink)) 745 break; /* Nothing else to send */ 746 747 if (mrutoosmall) 748 log_Printf(LogDEBUG, "Don't send data as PROTO_IP, MRU < MRRU\n"); 749 else if (sendasip) 750 log_Printf(LogDEBUG, "Sending data as PROTO_IP, not PROTO_MP\n"); 751 752 if (sendasip) { 753 add = link_QueueLen(&dl->physical->link); 754 if (add) { 755 /* this link has got stuff already queued. Let it continue */ 756 total += add; 757 continue; 758 } 759 } 760 } 761 762 m = link_Dequeue(&mp->link); 763 if (m) { 764 len = m_length(m); 765 begin = 1; 766 end = 0; 767 768 while (!end) { 769 if (dl->state == DATALINK_OPEN) { 770 /* Write at most his_mru bytes to the physical link */ 771 if (len <= dl->physical->link.lcp.his_mru) { 772 mo = m; 773 end = 1; 774 m_settype(mo, MB_MPOUT); 775 } else { 776 /* It's > his_mru, chop the packet (`m') into bits */ 777 mo = m_get(dl->physical->link.lcp.his_mru, MB_MPOUT); 778 len -= mo->m_len; 779 m = mbuf_Read(m, MBUF_CTOP(mo), mo->m_len); 780 } 781 mp_Output(mp, bundle, &dl->physical->link, mo, begin, end); 782 begin = 0; 783 } 784 785 if (!end) { 786 nlinks--; 787 dl = dl->next; 788 if (!dl) { 789 dl = bundle->links; 790 thislink = 0; 791 } else 792 thislink++; 793 } 794 } 795 } 796 } 797 mp->out.link = thislink; /* Start here next time */ 798 799 return total; 800 } 801 802 int 803 mp_SetDatalinkBandwidth(struct cmdargs const *arg) 804 { 805 int val; 806 807 if (arg->argc != arg->argn+1) 808 return -1; 809 810 val = atoi(arg->argv[arg->argn]); 811 if (val <= 0) { 812 log_Printf(LogWARN, "The link bandwidth must be greater than zero\n"); 813 return 1; 814 } 815 arg->cx->mp.bandwidth = val; 816 817 if (arg->cx->state == DATALINK_OPEN) 818 bundle_CalculateBandwidth(arg->bundle); 819 820 return 0; 821 } 822 823 int 824 mp_ShowStatus(struct cmdargs const *arg) 825 { 826 struct mp *mp = &arg->bundle->ncp.mp; 827 828 prompt_Printf(arg->prompt, "Multilink is %sactive\n", mp->active ? "" : "in"); 829 if (mp->active) { 830 struct mbuf *m, *lm; 831 int bufs = 0; 832 833 lm = NULL; 834 prompt_Printf(arg->prompt, "Socket: %s\n", 835 mp->server.socket.sun_path); 836 for (m = mp->inbufs; m; m = m->m_nextpkt) { 837 bufs++; 838 lm = m; 839 } 840 prompt_Printf(arg->prompt, "Pending frags: %d", bufs); 841 if (bufs) { 842 struct mp_header mh; 843 unsigned long first, last; 844 845 first = mp_ReadHeader(mp, mp->inbufs, &mh) ? mh.seq : 0; 846 last = mp_ReadHeader(mp, lm, &mh) ? mh.seq : 0; 847 prompt_Printf(arg->prompt, " (Have %lu - %lu, want %lu, lowest %lu)\n", 848 first, last, (unsigned long)mp->seq.next_in, 849 (unsigned long)mp->seq.min_in); 850 prompt_Printf(arg->prompt, " First has %sbegin bit and " 851 "%send bit", mh.begin ? "" : "no ", mh.end ? "" : "no "); 852 } 853 prompt_Printf(arg->prompt, "\n"); 854 } 855 856 prompt_Printf(arg->prompt, "\nMy Side:\n"); 857 if (mp->active) { 858 prompt_Printf(arg->prompt, " Output SEQ: %u\n", mp->out.seq); 859 prompt_Printf(arg->prompt, " MRRU: %u\n", mp->local_mrru); 860 prompt_Printf(arg->prompt, " Short Seq: %s\n", 861 mp->local_is12bit ? "on" : "off"); 862 } 863 prompt_Printf(arg->prompt, " Discriminator: %s\n", 864 mp_Enddisc(mp->cfg.enddisc.class, mp->cfg.enddisc.address, 865 mp->cfg.enddisc.len)); 866 867 prompt_Printf(arg->prompt, "\nHis Side:\n"); 868 if (mp->active) { 869 prompt_Printf(arg->prompt, " Auth Name: %s\n", mp->peer.authname); 870 prompt_Printf(arg->prompt, " Input SEQ: %u\n", mp->seq.next_in); 871 prompt_Printf(arg->prompt, " MRRU: %u\n", mp->peer_mrru); 872 prompt_Printf(arg->prompt, " Short Seq: %s\n", 873 mp->peer_is12bit ? "on" : "off"); 874 } 875 prompt_Printf(arg->prompt, " Discriminator: %s\n", 876 mp_Enddisc(mp->peer.enddisc.class, mp->peer.enddisc.address, 877 mp->peer.enddisc.len)); 878 879 prompt_Printf(arg->prompt, "\nDefaults:\n"); 880 881 prompt_Printf(arg->prompt, " MRRU: "); 882 if (mp->cfg.mrru) 883 prompt_Printf(arg->prompt, "%d (multilink enabled)\n", mp->cfg.mrru); 884 else 885 prompt_Printf(arg->prompt, "disabled\n"); 886 prompt_Printf(arg->prompt, " Short Seq: %s\n", 887 command_ShowNegval(mp->cfg.shortseq)); 888 prompt_Printf(arg->prompt, " Discriminator: %s\n", 889 command_ShowNegval(mp->cfg.negenddisc)); 890 prompt_Printf(arg->prompt, " AutoLoad: min %d%%, max %d%%," 891 " period %d secs\n", mp->cfg.autoload.min, 892 mp->cfg.autoload.max, mp->cfg.autoload.period); 893 894 return 0; 895 } 896 897 const char * 898 mp_Enddisc(u_char c, const char *address, int len) 899 { 900 static char result[100]; /* Used immediately after it's returned */ 901 int f, header; 902 903 switch (c) { 904 case ENDDISC_NULL: 905 sprintf(result, "Null Class"); 906 break; 907 908 case ENDDISC_LOCAL: 909 snprintf(result, sizeof result, "Local Addr: %.*s", len, address); 910 break; 911 912 case ENDDISC_IP: 913 if (len == 4) 914 snprintf(result, sizeof result, "IP %s", 915 inet_ntoa(*(const struct in_addr *)address)); 916 else 917 sprintf(result, "IP[%d] ???", len); 918 break; 919 920 case ENDDISC_MAC: 921 if (len == 6) { 922 const u_char *m = (const u_char *)address; 923 snprintf(result, sizeof result, "MAC %02x:%02x:%02x:%02x:%02x:%02x", 924 m[0], m[1], m[2], m[3], m[4], m[5]); 925 } else 926 sprintf(result, "MAC[%d] ???", len); 927 break; 928 929 case ENDDISC_MAGIC: 930 sprintf(result, "Magic: 0x"); 931 header = strlen(result); 932 if (len > sizeof result - header - 1) 933 len = sizeof result - header - 1; 934 for (f = 0; f < len; f++) 935 sprintf(result + header + 2 * f, "%02x", address[f]); 936 break; 937 938 case ENDDISC_PSN: 939 snprintf(result, sizeof result, "PSN: %.*s", len, address); 940 break; 941 942 default: 943 sprintf(result, "%d: ", (int)c); 944 header = strlen(result); 945 if (len > sizeof result - header - 1) 946 len = sizeof result - header - 1; 947 for (f = 0; f < len; f++) 948 sprintf(result + header + 2 * f, "%02x", address[f]); 949 break; 950 } 951 return result; 952 } 953 954 int 955 mp_SetEnddisc(struct cmdargs const *arg) 956 { 957 struct mp *mp = &arg->bundle->ncp.mp; 958 struct in_addr addr; 959 960 switch (bundle_Phase(arg->bundle)) { 961 case PHASE_DEAD: 962 break; 963 case PHASE_ESTABLISH: 964 /* Make sure none of our links are DATALINK_LCP or greater */ 965 if (bundle_HighestState(arg->bundle) >= DATALINK_LCP) { 966 log_Printf(LogWARN, "enddisc: Only changable before" 967 " LCP negotiations\n"); 968 return 1; 969 } 970 break; 971 default: 972 log_Printf(LogWARN, "enddisc: Only changable at phase DEAD/ESTABLISH\n"); 973 return 1; 974 } 975 976 if (arg->argc == arg->argn) { 977 mp->cfg.enddisc.class = 0; 978 *mp->cfg.enddisc.address = '\0'; 979 mp->cfg.enddisc.len = 0; 980 } else if (arg->argc > arg->argn) { 981 if (!strcasecmp(arg->argv[arg->argn], "label")) { 982 mp->cfg.enddisc.class = ENDDISC_LOCAL; 983 strcpy(mp->cfg.enddisc.address, arg->bundle->cfg.label); 984 mp->cfg.enddisc.len = strlen(mp->cfg.enddisc.address); 985 } else if (!strcasecmp(arg->argv[arg->argn], "ip")) { 986 if (arg->bundle->ncp.ipcp.my_ip.s_addr == INADDR_ANY) 987 ncprange_getip4addr(&arg->bundle->ncp.ipcp.cfg.my_range, &addr); 988 else 989 addr = arg->bundle->ncp.ipcp.my_ip; 990 memcpy(mp->cfg.enddisc.address, &addr.s_addr, sizeof addr.s_addr); 991 mp->cfg.enddisc.class = ENDDISC_IP; 992 mp->cfg.enddisc.len = sizeof arg->bundle->ncp.ipcp.my_ip.s_addr; 993 } else if (!strcasecmp(arg->argv[arg->argn], "mac")) { 994 struct sockaddr_dl hwaddr; 995 int s; 996 997 if (arg->bundle->ncp.ipcp.my_ip.s_addr == INADDR_ANY) 998 ncprange_getip4addr(&arg->bundle->ncp.ipcp.cfg.my_range, &addr); 999 else 1000 addr = arg->bundle->ncp.ipcp.my_ip; 1001 1002 s = ID0socket(PF_INET, SOCK_DGRAM, 0); 1003 if (s < 0) { 1004 log_Printf(LogERROR, "set enddisc: socket(): %s\n", strerror(errno)); 1005 return 2; 1006 } 1007 if (arp_EtherAddr(s, addr, &hwaddr, 1)) { 1008 mp->cfg.enddisc.class = ENDDISC_MAC; 1009 memcpy(mp->cfg.enddisc.address, hwaddr.sdl_data + hwaddr.sdl_nlen, 1010 hwaddr.sdl_alen); 1011 mp->cfg.enddisc.len = hwaddr.sdl_alen; 1012 } else { 1013 log_Printf(LogWARN, "set enddisc: Can't locate MAC address for %s\n", 1014 inet_ntoa(addr)); 1015 close(s); 1016 return 4; 1017 } 1018 close(s); 1019 } else if (!strcasecmp(arg->argv[arg->argn], "magic")) { 1020 int f; 1021 1022 randinit(); 1023 for (f = 0; f < 20; f += sizeof(long)) 1024 *(long *)(mp->cfg.enddisc.address + f) = random(); 1025 mp->cfg.enddisc.class = ENDDISC_MAGIC; 1026 mp->cfg.enddisc.len = 20; 1027 } else if (!strcasecmp(arg->argv[arg->argn], "psn")) { 1028 if (arg->argc > arg->argn+1) { 1029 mp->cfg.enddisc.class = ENDDISC_PSN; 1030 strcpy(mp->cfg.enddisc.address, arg->argv[arg->argn+1]); 1031 mp->cfg.enddisc.len = strlen(mp->cfg.enddisc.address); 1032 } else { 1033 log_Printf(LogWARN, "PSN endpoint requires additional data\n"); 1034 return 5; 1035 } 1036 } else { 1037 log_Printf(LogWARN, "%s: Unrecognised endpoint type\n", 1038 arg->argv[arg->argn]); 1039 return 6; 1040 } 1041 } 1042 1043 return 0; 1044 } 1045 1046 static int 1047 mpserver_UpdateSet(struct fdescriptor *d, fd_set *r, fd_set *w, fd_set *e, 1048 int *n) 1049 { 1050 struct mpserver *s = descriptor2mpserver(d); 1051 int result; 1052 1053 result = 0; 1054 if (s->send.dl != NULL) { 1055 /* We've connect()ed */ 1056 if (!link_QueueLen(&s->send.dl->physical->link) && 1057 !s->send.dl->physical->out) { 1058 /* Only send if we've transmitted all our data (i.e. the ConfigAck) */ 1059 result -= datalink_RemoveFromSet(s->send.dl, r, w, e); 1060 bundle_SendDatalink(s->send.dl, s->fd, &s->socket); 1061 s->send.dl = NULL; 1062 s->fd = -1; 1063 } else 1064 /* Never read from a datalink that's on death row ! */ 1065 result -= datalink_RemoveFromSet(s->send.dl, r, NULL, NULL); 1066 } else if (r && s->fd >= 0) { 1067 if (*n < s->fd + 1) 1068 *n = s->fd + 1; 1069 FD_SET(s->fd, r); 1070 log_Printf(LogTIMER, "mp: fdset(r) %d\n", s->fd); 1071 result++; 1072 } 1073 return result; 1074 } 1075 1076 static int 1077 mpserver_IsSet(struct fdescriptor *d, const fd_set *fdset) 1078 { 1079 struct mpserver *s = descriptor2mpserver(d); 1080 return s->fd >= 0 && FD_ISSET(s->fd, fdset); 1081 } 1082 1083 static void 1084 mpserver_Read(struct fdescriptor *d, struct bundle *bundle, const fd_set *fdset) 1085 { 1086 struct mpserver *s = descriptor2mpserver(d); 1087 1088 bundle_ReceiveDatalink(bundle, s->fd); 1089 } 1090 1091 static int 1092 mpserver_Write(struct fdescriptor *d, struct bundle *bundle, 1093 const fd_set *fdset) 1094 { 1095 /* We never want to write here ! */ 1096 log_Printf(LogALERT, "mpserver_Write: Internal error: Bad call !\n"); 1097 return 0; 1098 } 1099 1100 void 1101 mpserver_Init(struct mpserver *s) 1102 { 1103 s->desc.type = MPSERVER_DESCRIPTOR; 1104 s->desc.UpdateSet = mpserver_UpdateSet; 1105 s->desc.IsSet = mpserver_IsSet; 1106 s->desc.Read = mpserver_Read; 1107 s->desc.Write = mpserver_Write; 1108 s->send.dl = NULL; 1109 s->fd = -1; 1110 memset(&s->socket, '\0', sizeof s->socket); 1111 } 1112 1113 int 1114 mpserver_Open(struct mpserver *s, struct peerid *peer) 1115 { 1116 int f, l; 1117 mode_t mask; 1118 1119 if (s->fd != -1) { 1120 log_Printf(LogALERT, "Internal error ! mpserver already open\n"); 1121 mpserver_Close(s); 1122 } 1123 1124 l = snprintf(s->socket.sun_path, sizeof s->socket.sun_path, "%sppp-%s-%02x-", 1125 _PATH_VARRUN, peer->authname, peer->enddisc.class); 1126 if (l < 0) { 1127 log_Printf(LogERROR, "mpserver: snprintf(): %s\n", strerror(errno)); 1128 return MPSERVER_FAILED; 1129 } 1130 1131 for (f = 0; f < peer->enddisc.len && l < sizeof s->socket.sun_path - 2; f++) { 1132 snprintf(s->socket.sun_path + l, sizeof s->socket.sun_path - l, 1133 "%02x", *(u_char *)(peer->enddisc.address+f)); 1134 l += 2; 1135 } 1136 1137 s->socket.sun_family = AF_LOCAL; 1138 s->socket.sun_len = sizeof s->socket; 1139 s->fd = ID0socket(PF_LOCAL, SOCK_DGRAM, 0); 1140 if (s->fd < 0) { 1141 log_Printf(LogERROR, "mpserver: socket(): %s\n", strerror(errno)); 1142 return MPSERVER_FAILED; 1143 } 1144 1145 setsockopt(s->fd, SOL_SOCKET, SO_REUSEADDR, (struct sockaddr *)&s->socket, 1146 sizeof s->socket); 1147 mask = umask(0177); 1148 1149 /* 1150 * Try to bind the socket. If we succeed we play server, if we fail 1151 * we connect() and hand the link off. 1152 */ 1153 1154 if (ID0bind_un(s->fd, &s->socket) < 0) { 1155 if (errno != EADDRINUSE) { 1156 log_Printf(LogPHASE, "mpserver: can't create bundle socket %s (%s)\n", 1157 s->socket.sun_path, strerror(errno)); 1158 umask(mask); 1159 close(s->fd); 1160 s->fd = -1; 1161 return MPSERVER_FAILED; 1162 } 1163 1164 /* So we're the sender */ 1165 umask(mask); 1166 if (ID0connect_un(s->fd, &s->socket) < 0) { 1167 log_Printf(LogPHASE, "mpserver: can't connect to bundle socket %s (%s)\n", 1168 s->socket.sun_path, strerror(errno)); 1169 if (errno == ECONNREFUSED) 1170 log_Printf(LogPHASE, " The previous server died badly !\n"); 1171 close(s->fd); 1172 s->fd = -1; 1173 return MPSERVER_FAILED; 1174 } 1175 1176 /* Donate our link to the other guy */ 1177 return MPSERVER_CONNECTED; 1178 } 1179 1180 return MPSERVER_LISTENING; 1181 } 1182 1183 void 1184 mpserver_Close(struct mpserver *s) 1185 { 1186 if (s->send.dl != NULL) { 1187 bundle_SendDatalink(s->send.dl, s->fd, &s->socket); 1188 s->send.dl = NULL; 1189 s->fd = -1; 1190 } else if (s->fd >= 0) { 1191 close(s->fd); 1192 if (ID0unlink(s->socket.sun_path) == -1) 1193 log_Printf(LogERROR, "%s: Failed to remove: %s\n", s->socket.sun_path, 1194 strerror(errno)); 1195 memset(&s->socket, '\0', sizeof s->socket); 1196 s->fd = -1; 1197 } 1198 } 1199 1200 void 1201 mp_LinkLost(struct mp *mp, struct datalink *dl) 1202 { 1203 if (mp->seq.min_in == dl->mp.seq) 1204 /* We've lost the link that's holding everything up ! */ 1205 mp_Assemble(mp, NULL, NULL); 1206 } 1207 1208 size_t 1209 mp_QueueLen(struct mp *mp) 1210 { 1211 return link_QueueLen(&mp->link); 1212 } 1213