1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa 5 * All rights reserved 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * Dummynet portions related to packet handling. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet6.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/module.h> 44 #include <sys/mutex.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/rwlock.h> 48 #include <sys/socket.h> 49 #include <sys/time.h> 50 #include <sys/sysctl.h> 51 52 #include <net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ 53 #include <net/if_var.h> /* NET_EPOCH_... */ 54 #include <net/netisr.h> 55 #include <net/vnet.h> 56 57 #include <netinet/in.h> 58 #include <netinet/ip.h> /* ip_len, ip_off */ 59 #include <netinet/ip_var.h> /* ip_output(), IP_FORWARDING */ 60 #include <netinet/ip_fw.h> 61 #include <netinet/ip_dummynet.h> 62 #include <netinet/if_ether.h> /* various ether_* routines */ 63 #include <netinet/ip6.h> /* for ip6_input, ip6_output prototypes */ 64 #include <netinet6/ip6_var.h> 65 66 #include <netpfil/ipfw/ip_fw_private.h> 67 #include <netpfil/ipfw/dn_heap.h> 68 #include <netpfil/ipfw/ip_dn_private.h> 69 #ifdef NEW_AQM 70 #include <netpfil/ipfw/dn_aqm.h> 71 #endif 72 #include <netpfil/ipfw/dn_sched.h> 73 74 /* 75 * We keep a private variable for the simulation time, but we could 76 * probably use an existing one ("softticks" in sys/kern/kern_timeout.c) 77 * instead of dn_cfg.curr_time 78 */ 79 80 struct dn_parms dn_cfg; 81 //VNET_DEFINE(struct dn_parms, _base_dn_cfg); 82 83 static long tick_last; /* Last tick duration (usec). */ 84 static long tick_delta; /* Last vs standard tick diff (usec). */ 85 static long tick_delta_sum; /* Accumulated tick difference (usec).*/ 86 static long tick_adjustment; /* Tick adjustments done. */ 87 static long tick_lost; /* Lost(coalesced) ticks number. */ 88 /* Adjusted vs non-adjusted curr_time difference (ticks). */ 89 static long tick_diff; 90 91 static unsigned long io_pkt; 92 static unsigned long io_pkt_fast; 93 94 #ifdef NEW_AQM 95 unsigned long io_pkt_drop; 96 #else 97 static unsigned long io_pkt_drop; 98 #endif 99 /* 100 * We use a heap to store entities for which we have pending timer events. 101 * The heap is checked at every tick and all entities with expired events 102 * are extracted. 103 */ 104 105 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap"); 106 107 extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *); 108 109 #ifdef SYSCTL_NODE 110 111 /* 112 * Because of the way the SYSBEGIN/SYSEND macros work on other 113 * platforms, there should not be functions between them. 114 * So keep the handlers outside the block. 115 */ 116 static int 117 sysctl_hash_size(SYSCTL_HANDLER_ARGS) 118 { 119 int error, value; 120 121 value = dn_cfg.hash_size; 122 error = sysctl_handle_int(oidp, &value, 0, req); 123 if (error != 0 || req->newptr == NULL) 124 return (error); 125 if (value < 16 || value > 65536) 126 return (EINVAL); 127 dn_cfg.hash_size = value; 128 return (0); 129 } 130 131 static int 132 sysctl_limits(SYSCTL_HANDLER_ARGS) 133 { 134 int error; 135 long value; 136 137 if (arg2 != 0) 138 value = dn_cfg.slot_limit; 139 else 140 value = dn_cfg.byte_limit; 141 error = sysctl_handle_long(oidp, &value, 0, req); 142 143 if (error != 0 || req->newptr == NULL) 144 return (error); 145 if (arg2 != 0) { 146 if (value < 1) 147 return (EINVAL); 148 dn_cfg.slot_limit = value; 149 } else { 150 if (value < 1500) 151 return (EINVAL); 152 dn_cfg.byte_limit = value; 153 } 154 return (0); 155 } 156 157 SYSBEGIN(f4) 158 159 SYSCTL_DECL(_net_inet); 160 SYSCTL_DECL(_net_inet_ip); 161 #ifdef NEW_AQM 162 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 163 "Dummynet"); 164 #else 165 static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, 166 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 167 "Dummynet"); 168 #endif 169 170 /* wrapper to pass dn_cfg fields to SYSCTL_* */ 171 //#define DC(x) (&(VNET_NAME(_base_dn_cfg).x)) 172 #define DC(x) (&(dn_cfg.x)) 173 /* parameters */ 174 175 176 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size, 177 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 178 0, 0, sysctl_hash_size, "I", 179 "Default hash table size"); 180 181 182 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit, 183 CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 184 0, 1, sysctl_limits, "L", 185 "Upper limit in slots for pipe queue."); 186 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit, 187 CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 188 0, 0, sysctl_limits, "L", 189 "Upper limit in bytes for pipe queue."); 190 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast, 191 CTLFLAG_RW, DC(io_fast), 0, "Enable fast dummynet io."); 192 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, 193 CTLFLAG_RW, DC(debug), 0, "Dummynet debug level"); 194 195 /* RED parameters */ 196 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, 197 CTLFLAG_RD, DC(red_lookup_depth), 0, "Depth of RED lookup table"); 198 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, 199 CTLFLAG_RD, DC(red_avg_pkt_size), 0, "RED Medium packet size"); 200 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, 201 CTLFLAG_RD, DC(red_max_pkt_size), 0, "RED Max packet size"); 202 203 /* time adjustment */ 204 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta, 205 CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec)."); 206 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum, 207 CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec)."); 208 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment, 209 CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done."); 210 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff, 211 CTLFLAG_RD, &tick_diff, 0, 212 "Adjusted vs non-adjusted curr_time difference (ticks)."); 213 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost, 214 CTLFLAG_RD, &tick_lost, 0, 215 "Number of ticks coalesced by dummynet taskqueue."); 216 217 /* Drain parameters */ 218 SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire, 219 CTLFLAG_RW, DC(expire), 0, "Expire empty queues/pipes"); 220 SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle, 221 CTLFLAG_RD, DC(expire_cycle), 0, "Expire cycle for queues/pipes"); 222 223 /* statistics */ 224 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count, 225 CTLFLAG_RD, DC(schk_count), 0, "Number of schedulers"); 226 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count, 227 CTLFLAG_RD, DC(si_count), 0, "Number of scheduler instances"); 228 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count, 229 CTLFLAG_RD, DC(fsk_count), 0, "Number of flowsets"); 230 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count, 231 CTLFLAG_RD, DC(queue_count), 0, "Number of queues"); 232 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt, 233 CTLFLAG_RD, &io_pkt, 0, 234 "Number of packets passed to dummynet."); 235 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast, 236 CTLFLAG_RD, &io_pkt_fast, 0, 237 "Number of packets bypassed dummynet scheduler."); 238 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop, 239 CTLFLAG_RD, &io_pkt_drop, 0, 240 "Number of packets dropped by dummynet."); 241 #undef DC 242 SYSEND 243 244 #endif 245 246 static void dummynet_send(struct mbuf *); 247 248 /* 249 * Return the mbuf tag holding the dummynet state (it should 250 * be the first one on the list). 251 */ 252 struct dn_pkt_tag * 253 dn_tag_get(struct mbuf *m) 254 { 255 struct m_tag *mtag = m_tag_first(m); 256 #ifdef NEW_AQM 257 /* XXX: to skip ts m_tag. For Debugging only*/ 258 if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) { 259 m_tag_delete(m,mtag); 260 mtag = m_tag_first(m); 261 D("skip TS tag"); 262 } 263 #endif 264 KASSERT(mtag != NULL && 265 mtag->m_tag_cookie == MTAG_ABI_COMPAT && 266 mtag->m_tag_id == PACKET_TAG_DUMMYNET, 267 ("packet on dummynet queue w/o dummynet tag!")); 268 return (struct dn_pkt_tag *)(mtag+1); 269 } 270 271 #ifndef NEW_AQM 272 static inline void 273 mq_append(struct mq *q, struct mbuf *m) 274 { 275 #ifdef USERSPACE 276 // buffers from netmap need to be copied 277 // XXX note that the routine is not expected to fail 278 ND("append %p to %p", m, q); 279 if (m->m_flags & M_STACK) { 280 struct mbuf *m_new; 281 void *p; 282 int l, ofs; 283 284 ofs = m->m_data - m->__m_extbuf; 285 // XXX allocate 286 MGETHDR(m_new, M_NOWAIT, MT_DATA); 287 ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p", 288 m, m->__m_extbuf, m->__m_extlen, ofs, m_new); 289 p = m_new->__m_extbuf; /* new pointer */ 290 l = m_new->__m_extlen; /* new len */ 291 if (l <= m->__m_extlen) { 292 panic("extlen too large"); 293 } 294 295 *m_new = *m; // copy 296 m_new->m_flags &= ~M_STACK; 297 m_new->__m_extbuf = p; // point to new buffer 298 _pkt_copy(m->__m_extbuf, p, m->__m_extlen); 299 m_new->m_data = p + ofs; 300 m = m_new; 301 } 302 #endif /* USERSPACE */ 303 if (q->head == NULL) 304 q->head = m; 305 else 306 q->tail->m_nextpkt = m; 307 q->count++; 308 q->tail = m; 309 m->m_nextpkt = NULL; 310 } 311 #endif 312 313 /* 314 * Dispose a list of packet. Use a functions so if we need to do 315 * more work, this is a central point to do it. 316 */ 317 void dn_free_pkts(struct mbuf *mnext) 318 { 319 struct mbuf *m; 320 321 while ((m = mnext) != NULL) { 322 mnext = m->m_nextpkt; 323 FREE_PKT(m); 324 } 325 } 326 327 static int 328 red_drops (struct dn_queue *q, int len) 329 { 330 /* 331 * RED algorithm 332 * 333 * RED calculates the average queue size (avg) using a low-pass filter 334 * with an exponential weighted (w_q) moving average: 335 * avg <- (1-w_q) * avg + w_q * q_size 336 * where q_size is the queue length (measured in bytes or * packets). 337 * 338 * If q_size == 0, we compute the idle time for the link, and set 339 * avg = (1 - w_q)^(idle/s) 340 * where s is the time needed for transmitting a medium-sized packet. 341 * 342 * Now, if avg < min_th the packet is enqueued. 343 * If avg > max_th the packet is dropped. Otherwise, the packet is 344 * dropped with probability P function of avg. 345 */ 346 347 struct dn_fsk *fs = q->fs; 348 int64_t p_b = 0; 349 350 /* Queue in bytes or packets? */ 351 uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ? 352 q->ni.len_bytes : q->ni.length; 353 354 /* Average queue size estimation. */ 355 if (q_size != 0) { 356 /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */ 357 int diff = SCALE(q_size) - q->avg; 358 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); 359 360 q->avg += (int)v; 361 } else { 362 /* 363 * Queue is empty, find for how long the queue has been 364 * empty and use a lookup table for computing 365 * (1 - * w_q)^(idle_time/s) where s is the time to send a 366 * (small) packet. 367 * XXX check wraps... 368 */ 369 if (q->avg) { 370 u_int t = div64((dn_cfg.curr_time - q->q_time), fs->lookup_step); 371 372 q->avg = (t < fs->lookup_depth) ? 373 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; 374 } 375 } 376 377 /* Should i drop? */ 378 if (q->avg < fs->min_th) { 379 q->count = -1; 380 return (0); /* accept packet */ 381 } 382 if (q->avg >= fs->max_th) { /* average queue >= max threshold */ 383 if (fs->fs.flags & DN_IS_ECN) 384 return (1); 385 if (fs->fs.flags & DN_IS_GENTLE_RED) { 386 /* 387 * According to Gentle-RED, if avg is greater than 388 * max_th the packet is dropped with a probability 389 * p_b = c_3 * avg - c_4 390 * where c_3 = (1 - max_p) / max_th 391 * c_4 = 1 - 2 * max_p 392 */ 393 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - 394 fs->c_4; 395 } else { 396 q->count = -1; 397 return (1); 398 } 399 } else if (q->avg > fs->min_th) { 400 if (fs->fs.flags & DN_IS_ECN) 401 return (1); 402 /* 403 * We compute p_b using the linear dropping function 404 * p_b = c_1 * avg - c_2 405 * where c_1 = max_p / (max_th - min_th) 406 * c_2 = max_p * min_th / (max_th - min_th) 407 */ 408 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; 409 } 410 411 if (fs->fs.flags & DN_QSIZE_BYTES) 412 p_b = div64((p_b * len) , fs->max_pkt_size); 413 if (++q->count == 0) 414 q->random = random() & 0xffff; 415 else { 416 /* 417 * q->count counts packets arrived since last drop, so a greater 418 * value of q->count means a greater packet drop probability. 419 */ 420 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) { 421 q->count = 0; 422 /* After a drop we calculate a new random value. */ 423 q->random = random() & 0xffff; 424 return (1); /* drop */ 425 } 426 } 427 /* End of RED algorithm. */ 428 429 return (0); /* accept */ 430 431 } 432 433 /* 434 * ECN/ECT Processing (partially adopted from altq) 435 */ 436 #ifndef NEW_AQM 437 static 438 #endif 439 int 440 ecn_mark(struct mbuf* m) 441 { 442 struct ip *ip; 443 ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); 444 445 switch (ip->ip_v) { 446 case IPVERSION: 447 { 448 uint16_t old; 449 450 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) 451 return (0); /* not-ECT */ 452 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) 453 return (1); /* already marked */ 454 455 /* 456 * ecn-capable but not marked, 457 * mark CE and update checksum 458 */ 459 old = *(uint16_t *)ip; 460 ip->ip_tos |= IPTOS_ECN_CE; 461 ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip); 462 return (1); 463 } 464 #ifdef INET6 465 case (IPV6_VERSION >> 4): 466 { 467 struct ip6_hdr *ip6 = (struct ip6_hdr *)ip; 468 u_int32_t flowlabel; 469 470 flowlabel = ntohl(ip6->ip6_flow); 471 if ((flowlabel >> 28) != 6) 472 return (0); /* version mismatch! */ 473 if ((flowlabel & (IPTOS_ECN_MASK << 20)) == 474 (IPTOS_ECN_NOTECT << 20)) 475 return (0); /* not-ECT */ 476 if ((flowlabel & (IPTOS_ECN_MASK << 20)) == 477 (IPTOS_ECN_CE << 20)) 478 return (1); /* already marked */ 479 /* 480 * ecn-capable but not marked, mark CE 481 */ 482 flowlabel |= (IPTOS_ECN_CE << 20); 483 ip6->ip6_flow = htonl(flowlabel); 484 return (1); 485 } 486 #endif 487 } 488 return (0); 489 } 490 491 /* 492 * Enqueue a packet in q, subject to space and queue management policy 493 * (whose parameters are in q->fs). 494 * Update stats for the queue and the scheduler. 495 * Return 0 on success, 1 on drop. The packet is consumed anyways. 496 */ 497 int 498 dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop) 499 { 500 struct dn_fs *f; 501 struct dn_flow *ni; /* stats for scheduler instance */ 502 uint64_t len; 503 504 if (q->fs == NULL || q->_si == NULL) { 505 printf("%s fs %p si %p, dropping\n", 506 __FUNCTION__, q->fs, q->_si); 507 FREE_PKT(m); 508 return 1; 509 } 510 f = &(q->fs->fs); 511 ni = &q->_si->ni; 512 len = m->m_pkthdr.len; 513 /* Update statistics, then check reasons to drop pkt. */ 514 q->ni.tot_bytes += len; 515 q->ni.tot_pkts++; 516 ni->tot_bytes += len; 517 ni->tot_pkts++; 518 if (drop) 519 goto drop; 520 if (f->plr && random() < f->plr) 521 goto drop; 522 #ifdef NEW_AQM 523 /* Call AQM enqueue function */ 524 if (q->fs->aqmfp) 525 return q->fs->aqmfp->enqueue(q ,m); 526 #endif 527 if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) { 528 if (!(f->flags & DN_IS_ECN) || !ecn_mark(m)) 529 goto drop; 530 } 531 if (f->flags & DN_QSIZE_BYTES) { 532 if (q->ni.len_bytes > f->qsize) 533 goto drop; 534 } else if (q->ni.length >= f->qsize) { 535 goto drop; 536 } 537 mq_append(&q->mq, m); 538 q->ni.length++; 539 q->ni.len_bytes += len; 540 ni->length++; 541 ni->len_bytes += len; 542 return (0); 543 544 drop: 545 io_pkt_drop++; 546 q->ni.drops++; 547 ni->drops++; 548 FREE_PKT(m); 549 return (1); 550 } 551 552 /* 553 * Fetch packets from the delay line which are due now. If there are 554 * leftover packets, reinsert the delay line in the heap. 555 * Runs under scheduler lock. 556 */ 557 static void 558 transmit_event(struct mq *q, struct delay_line *dline, uint64_t now) 559 { 560 struct mbuf *m; 561 struct dn_pkt_tag *pkt = NULL; 562 563 dline->oid.subtype = 0; /* not in heap */ 564 while ((m = dline->mq.head) != NULL) { 565 pkt = dn_tag_get(m); 566 if (!DN_KEY_LEQ(pkt->output_time, now)) 567 break; 568 dline->mq.head = m->m_nextpkt; 569 dline->mq.count--; 570 mq_append(q, m); 571 } 572 if (m != NULL) { 573 dline->oid.subtype = 1; /* in heap */ 574 heap_insert(&dn_cfg.evheap, pkt->output_time, dline); 575 } 576 } 577 578 /* 579 * Convert the additional MAC overheads/delays into an equivalent 580 * number of bits for the given data rate. The samples are 581 * in milliseconds so we need to divide by 1000. 582 */ 583 static uint64_t 584 extra_bits(struct mbuf *m, struct dn_schk *s) 585 { 586 int index; 587 uint64_t bits; 588 struct dn_profile *pf = s->profile; 589 590 if (!pf || pf->samples_no == 0) 591 return 0; 592 index = random() % pf->samples_no; 593 bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000); 594 if (index >= pf->loss_level) { 595 struct dn_pkt_tag *dt = dn_tag_get(m); 596 if (dt) 597 dt->dn_dir = DIR_DROP; 598 } 599 return bits; 600 } 601 602 /* 603 * Send traffic from a scheduler instance due by 'now'. 604 * Return a pointer to the head of the queue. 605 */ 606 static struct mbuf * 607 serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now) 608 { 609 struct mq def_q; 610 struct dn_schk *s = si->sched; 611 struct mbuf *m = NULL; 612 int delay_line_idle = (si->dline.mq.head == NULL); 613 int done, bw; 614 615 if (q == NULL) { 616 q = &def_q; 617 q->head = NULL; 618 } 619 620 bw = s->link.bandwidth; 621 si->kflags &= ~DN_ACTIVE; 622 623 if (bw > 0) 624 si->credit += (now - si->sched_time) * bw; 625 else 626 si->credit = 0; 627 si->sched_time = now; 628 done = 0; 629 while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) { 630 uint64_t len_scaled; 631 632 done++; 633 len_scaled = (bw == 0) ? 0 : hz * 634 (m->m_pkthdr.len * 8 + extra_bits(m, s)); 635 si->credit -= len_scaled; 636 /* Move packet in the delay line */ 637 dn_tag_get(m)->output_time = dn_cfg.curr_time + s->link.delay ; 638 mq_append(&si->dline.mq, m); 639 } 640 641 /* 642 * If credit >= 0 the instance is idle, mark time. 643 * Otherwise put back in the heap, and adjust the output 644 * time of the last inserted packet, m, which was too early. 645 */ 646 if (si->credit >= 0) { 647 si->idle_time = now; 648 } else { 649 uint64_t t; 650 KASSERT (bw > 0, ("bw=0 and credit<0 ?")); 651 t = div64(bw - 1 - si->credit, bw); 652 if (m) 653 dn_tag_get(m)->output_time += t; 654 si->kflags |= DN_ACTIVE; 655 heap_insert(&dn_cfg.evheap, now + t, si); 656 } 657 if (delay_line_idle && done) 658 transmit_event(q, &si->dline, now); 659 return q->head; 660 } 661 662 /* 663 * The timer handler for dummynet. Time is computed in ticks, but 664 * but the code is tolerant to the actual rate at which this is called. 665 * Once complete, the function reschedules itself for the next tick. 666 */ 667 void 668 dummynet_task(void *context, int pending) 669 { 670 struct timeval t; 671 struct mq q = { NULL, NULL }; /* queue to accumulate results */ 672 673 CURVNET_SET((struct vnet *)context); 674 675 DN_BH_WLOCK(); 676 677 /* Update number of lost(coalesced) ticks. */ 678 tick_lost += pending - 1; 679 680 getmicrouptime(&t); 681 /* Last tick duration (usec). */ 682 tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 + 683 (t.tv_usec - dn_cfg.prev_t.tv_usec); 684 /* Last tick vs standard tick difference (usec). */ 685 tick_delta = (tick_last * hz - 1000000) / hz; 686 /* Accumulated tick difference (usec). */ 687 tick_delta_sum += tick_delta; 688 689 dn_cfg.prev_t = t; 690 691 /* 692 * Adjust curr_time if the accumulated tick difference is 693 * greater than the 'standard' tick. Since curr_time should 694 * be monotonically increasing, we do positive adjustments 695 * as required, and throttle curr_time in case of negative 696 * adjustment. 697 */ 698 dn_cfg.curr_time++; 699 if (tick_delta_sum - tick >= 0) { 700 int diff = tick_delta_sum / tick; 701 702 dn_cfg.curr_time += diff; 703 tick_diff += diff; 704 tick_delta_sum %= tick; 705 tick_adjustment++; 706 } else if (tick_delta_sum + tick <= 0) { 707 dn_cfg.curr_time--; 708 tick_diff--; 709 tick_delta_sum += tick; 710 tick_adjustment++; 711 } 712 713 /* serve pending events, accumulate in q */ 714 for (;;) { 715 struct dn_id *p; /* generic parameter to handler */ 716 717 if (dn_cfg.evheap.elements == 0 || 718 DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key)) 719 break; 720 p = HEAP_TOP(&dn_cfg.evheap)->object; 721 heap_extract(&dn_cfg.evheap, NULL); 722 723 if (p->type == DN_SCH_I) { 724 serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time); 725 } else { /* extracted a delay line */ 726 transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time); 727 } 728 } 729 if (dn_cfg.expire && ++dn_cfg.expire_cycle >= dn_cfg.expire) { 730 dn_cfg.expire_cycle = 0; 731 dn_drain_scheduler(); 732 dn_drain_queue(); 733 } 734 735 dn_reschedule(); 736 DN_BH_WUNLOCK(); 737 if (q.head != NULL) 738 dummynet_send(q.head); 739 CURVNET_RESTORE(); 740 } 741 742 /* 743 * forward a chain of packets to the proper destination. 744 * This runs outside the dummynet lock. 745 */ 746 static void 747 dummynet_send(struct mbuf *m) 748 { 749 struct mbuf *n; 750 751 NET_EPOCH_ASSERT(); 752 753 for (; m != NULL; m = n) { 754 struct ifnet *ifp = NULL; /* gcc 3.4.6 complains */ 755 struct m_tag *tag; 756 int dst; 757 758 n = m->m_nextpkt; 759 m->m_nextpkt = NULL; 760 tag = m_tag_first(m); 761 if (tag == NULL) { /* should not happen */ 762 dst = DIR_DROP; 763 } else { 764 struct dn_pkt_tag *pkt = dn_tag_get(m); 765 /* extract the dummynet info, rename the tag 766 * to carry reinject info. 767 */ 768 if (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2) && 769 pkt->ifp == NULL) { 770 dst = DIR_DROP; 771 } else { 772 dst = pkt->dn_dir; 773 ifp = pkt->ifp; 774 tag->m_tag_cookie = MTAG_IPFW_RULE; 775 tag->m_tag_id = 0; 776 } 777 } 778 779 switch (dst) { 780 case DIR_OUT: 781 ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); 782 break ; 783 784 case DIR_IN : 785 netisr_dispatch(NETISR_IP, m); 786 break; 787 788 #ifdef INET6 789 case DIR_IN | PROTO_IPV6: 790 netisr_dispatch(NETISR_IPV6, m); 791 break; 792 793 case DIR_OUT | PROTO_IPV6: 794 ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL); 795 break; 796 #endif 797 798 case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */ 799 if (bridge_dn_p != NULL) 800 ((*bridge_dn_p)(m, ifp)); 801 else 802 printf("dummynet: if_bridge not loaded\n"); 803 804 break; 805 806 case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */ 807 /* 808 * The Ethernet code assumes the Ethernet header is 809 * contiguous in the first mbuf header. 810 * Insure this is true. 811 */ 812 if (m->m_len < ETHER_HDR_LEN && 813 (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) { 814 printf("dummynet/ether: pullup failed, " 815 "dropping packet\n"); 816 break; 817 } 818 ether_demux(m->m_pkthdr.rcvif, m); 819 break; 820 821 case DIR_OUT | PROTO_LAYER2: /* DN_TO_ETH_OUT: */ 822 ether_output_frame(ifp, m); 823 break; 824 825 case DIR_DROP: 826 /* drop the packet after some time */ 827 FREE_PKT(m); 828 break; 829 830 default: 831 printf("dummynet: bad switch %d!\n", dst); 832 FREE_PKT(m); 833 break; 834 } 835 } 836 } 837 838 static inline int 839 tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa) 840 { 841 struct dn_pkt_tag *dt; 842 struct m_tag *mtag; 843 844 mtag = m_tag_get(PACKET_TAG_DUMMYNET, 845 sizeof(*dt), M_NOWAIT | M_ZERO); 846 if (mtag == NULL) 847 return 1; /* Cannot allocate packet header. */ 848 m_tag_prepend(m, mtag); /* Attach to mbuf chain. */ 849 dt = (struct dn_pkt_tag *)(mtag + 1); 850 dt->rule = fwa->rule; 851 dt->rule.info &= IPFW_ONEPASS; /* only keep this info */ 852 dt->dn_dir = dir; 853 dt->ifp = fwa->flags & IPFW_ARGS_OUT ? fwa->ifp : NULL; 854 /* dt->output tame is updated as we move through */ 855 dt->output_time = dn_cfg.curr_time; 856 dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0; 857 return 0; 858 } 859 860 861 /* 862 * dummynet hook for packets. 863 * We use the argument to locate the flowset fs and the sched_set sch 864 * associated to it. The we apply flow_mask and sched_mask to 865 * determine the queue and scheduler instances. 866 */ 867 int 868 dummynet_io(struct mbuf **m0, struct ip_fw_args *fwa) 869 { 870 struct mbuf *m = *m0; 871 struct dn_fsk *fs = NULL; 872 struct dn_sch_inst *si; 873 struct dn_queue *q = NULL; /* default */ 874 int fs_id, dir; 875 876 fs_id = (fwa->rule.info & IPFW_INFO_MASK) + 877 ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0); 878 /* XXXGL: convert args to dir */ 879 if (fwa->flags & IPFW_ARGS_IN) 880 dir = DIR_IN; 881 else 882 dir = DIR_OUT; 883 if (fwa->flags & IPFW_ARGS_ETHER) 884 dir |= PROTO_LAYER2; 885 else if (fwa->flags & IPFW_ARGS_IP6) 886 dir |= PROTO_IPV6; 887 DN_BH_WLOCK(); 888 io_pkt++; 889 /* we could actually tag outside the lock, but who cares... */ 890 if (tag_mbuf(m, dir, fwa)) 891 goto dropit; 892 if (dn_cfg.busy) { 893 /* if the upper half is busy doing something expensive, 894 * lets queue the packet and move forward 895 */ 896 mq_append(&dn_cfg.pending, m); 897 m = *m0 = NULL; /* consumed */ 898 goto done; /* already active, nothing to do */ 899 } 900 /* XXX locate_flowset could be optimised with a direct ref. */ 901 fs = dn_ht_find(dn_cfg.fshash, fs_id, 0, NULL); 902 if (fs == NULL) 903 goto dropit; /* This queue/pipe does not exist! */ 904 if (fs->sched == NULL) /* should not happen */ 905 goto dropit; 906 /* find scheduler instance, possibly applying sched_mask */ 907 si = ipdn_si_find(fs->sched, &(fwa->f_id)); 908 if (si == NULL) 909 goto dropit; 910 /* 911 * If the scheduler supports multiple queues, find the right one 912 * (otherwise it will be ignored by enqueue). 913 */ 914 if (fs->sched->fp->flags & DN_MULTIQUEUE) { 915 q = ipdn_q_find(fs, si, &(fwa->f_id)); 916 if (q == NULL) 917 goto dropit; 918 } 919 if (fs->sched->fp->enqueue(si, q, m)) { 920 /* packet was dropped by enqueue() */ 921 m = *m0 = NULL; 922 923 /* dn_enqueue already increases io_pkt_drop */ 924 io_pkt_drop--; 925 926 goto dropit; 927 } 928 929 if (si->kflags & DN_ACTIVE) { 930 m = *m0 = NULL; /* consumed */ 931 goto done; /* already active, nothing to do */ 932 } 933 934 /* compute the initial allowance */ 935 if (si->idle_time < dn_cfg.curr_time) { 936 /* Do this only on the first packet on an idle pipe */ 937 struct dn_link *p = &fs->sched->link; 938 939 si->sched_time = dn_cfg.curr_time; 940 si->credit = dn_cfg.io_fast ? p->bandwidth : 0; 941 if (p->burst) { 942 uint64_t burst = (dn_cfg.curr_time - si->idle_time) * p->bandwidth; 943 if (burst > p->burst) 944 burst = p->burst; 945 si->credit += burst; 946 } 947 } 948 /* pass through scheduler and delay line */ 949 m = serve_sched(NULL, si, dn_cfg.curr_time); 950 951 /* optimization -- pass it back to ipfw for immediate send */ 952 /* XXX Don't call dummynet_send() if scheduler return the packet 953 * just enqueued. This avoid a lock order reversal. 954 * 955 */ 956 if (/*dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) { 957 /* fast io, rename the tag * to carry reinject info. */ 958 struct m_tag *tag = m_tag_first(m); 959 960 tag->m_tag_cookie = MTAG_IPFW_RULE; 961 tag->m_tag_id = 0; 962 io_pkt_fast++; 963 if (m->m_nextpkt != NULL) { 964 printf("dummynet: fast io: pkt chain detected!\n"); 965 m->m_nextpkt = NULL; 966 } 967 m = NULL; 968 } else { 969 *m0 = NULL; 970 } 971 done: 972 DN_BH_WUNLOCK(); 973 if (m) 974 dummynet_send(m); 975 return 0; 976 977 dropit: 978 io_pkt_drop++; 979 DN_BH_WUNLOCK(); 980 if (m) 981 FREE_PKT(m); 982 *m0 = NULL; 983 return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS; 984 } 985