1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa 5 * All rights reserved 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * Dummynet portions related to packet handling. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet6.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/module.h> 44 #include <sys/mutex.h> 45 #include <sys/priv.h> 46 #include <sys/proc.h> 47 #include <sys/rwlock.h> 48 #include <sys/socket.h> 49 #include <sys/time.h> 50 #include <sys/sysctl.h> 51 52 #include <net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */ 53 #include <net/if_var.h> /* NET_EPOCH_... */ 54 #include <net/if_private.h> 55 #include <net/netisr.h> 56 #include <net/vnet.h> 57 58 #include <netinet/in.h> 59 #include <netinet/ip.h> /* ip_len, ip_off */ 60 #include <netinet/ip_var.h> /* ip_output(), IP_FORWARDING */ 61 #include <netinet/ip_fw.h> 62 #include <netinet/ip_dummynet.h> 63 #include <netinet/if_ether.h> /* various ether_* routines */ 64 #include <netinet/ip6.h> /* for ip6_input, ip6_output prototypes */ 65 #include <netinet6/ip6_var.h> 66 67 #include <netpfil/ipfw/ip_fw_private.h> 68 #include <netpfil/ipfw/dn_heap.h> 69 #include <netpfil/ipfw/ip_dn_private.h> 70 #ifdef NEW_AQM 71 #include <netpfil/ipfw/dn_aqm.h> 72 #endif 73 #include <netpfil/ipfw/dn_sched.h> 74 75 /* 76 * We keep a private variable for the simulation time, but we could 77 * probably use an existing one ("softticks" in sys/kern/kern_timeout.c) 78 * instead of V_dn_cfg.curr_time 79 */ 80 VNET_DEFINE(struct dn_parms, dn_cfg); 81 #define V_dn_cfg VNET(dn_cfg) 82 83 /* 84 * We use a heap to store entities for which we have pending timer events. 85 * The heap is checked at every tick and all entities with expired events 86 * are extracted. 87 */ 88 89 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap"); 90 91 extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *); 92 93 #ifdef SYSCTL_NODE 94 95 /* 96 * Because of the way the SYSBEGIN/SYSEND macros work on other 97 * platforms, there should not be functions between them. 98 * So keep the handlers outside the block. 99 */ 100 static int 101 sysctl_hash_size(SYSCTL_HANDLER_ARGS) 102 { 103 int error, value; 104 105 value = V_dn_cfg.hash_size; 106 error = sysctl_handle_int(oidp, &value, 0, req); 107 if (error != 0 || req->newptr == NULL) 108 return (error); 109 if (value < 16 || value > 65536) 110 return (EINVAL); 111 V_dn_cfg.hash_size = value; 112 return (0); 113 } 114 115 static int 116 sysctl_limits(SYSCTL_HANDLER_ARGS) 117 { 118 int error; 119 long value; 120 121 if (arg2 != 0) 122 value = V_dn_cfg.slot_limit; 123 else 124 value = V_dn_cfg.byte_limit; 125 error = sysctl_handle_long(oidp, &value, 0, req); 126 127 if (error != 0 || req->newptr == NULL) 128 return (error); 129 if (arg2 != 0) { 130 if (value < 1) 131 return (EINVAL); 132 V_dn_cfg.slot_limit = value; 133 } else { 134 if (value < 1500) 135 return (EINVAL); 136 V_dn_cfg.byte_limit = value; 137 } 138 return (0); 139 } 140 141 SYSBEGIN(f4) 142 143 SYSCTL_DECL(_net_inet); 144 SYSCTL_DECL(_net_inet_ip); 145 #ifdef NEW_AQM 146 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 147 "Dummynet"); 148 #else 149 static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, 150 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 151 "Dummynet"); 152 #endif 153 154 /* wrapper to pass V_dn_cfg fields to SYSCTL_* */ 155 #define DC(x) (&(VNET_NAME(dn_cfg).x)) 156 157 /* parameters */ 158 159 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size, 160 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 161 0, 0, sysctl_hash_size, "I", 162 "Default hash table size"); 163 164 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit, 165 CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 166 0, 1, sysctl_limits, "L", 167 "Upper limit in slots for pipe queue."); 168 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit, 169 CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 170 0, 0, sysctl_limits, "L", 171 "Upper limit in bytes for pipe queue."); 172 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast, 173 CTLFLAG_RW | CTLFLAG_VNET, DC(io_fast), 0, "Enable fast dummynet io."); 174 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, 175 CTLFLAG_RW | CTLFLAG_VNET, DC(debug), 0, "Dummynet debug level"); 176 177 /* RED parameters */ 178 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, 179 CTLFLAG_RD | CTLFLAG_VNET, DC(red_lookup_depth), 0, "Depth of RED lookup table"); 180 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, 181 CTLFLAG_RD | CTLFLAG_VNET, DC(red_avg_pkt_size), 0, "RED Medium packet size"); 182 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, 183 CTLFLAG_RD | CTLFLAG_VNET, DC(red_max_pkt_size), 0, "RED Max packet size"); 184 185 /* time adjustment */ 186 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta, 187 CTLFLAG_RD | CTLFLAG_VNET, DC(tick_delta), 0, "Last vs standard tick difference (usec)."); 188 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum, 189 CTLFLAG_RD | CTLFLAG_VNET, DC(tick_delta_sum), 0, "Accumulated tick difference (usec)."); 190 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment, 191 CTLFLAG_RD | CTLFLAG_VNET, DC(tick_adjustment), 0, "Tick adjustments done."); 192 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff, 193 CTLFLAG_RD | CTLFLAG_VNET, DC(tick_diff), 0, 194 "Adjusted vs non-adjusted curr_time difference (ticks)."); 195 SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost, 196 CTLFLAG_RD | CTLFLAG_VNET, DC(tick_lost), 0, 197 "Number of ticks coalesced by dummynet taskqueue."); 198 199 /* Drain parameters */ 200 SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire, 201 CTLFLAG_RW | CTLFLAG_VNET, DC(expire), 0, "Expire empty queues/pipes"); 202 SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle, 203 CTLFLAG_RD | CTLFLAG_VNET, DC(expire_cycle), 0, "Expire cycle for queues/pipes"); 204 205 /* statistics */ 206 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count, 207 CTLFLAG_RD | CTLFLAG_VNET, DC(schk_count), 0, "Number of schedulers"); 208 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count, 209 CTLFLAG_RD | CTLFLAG_VNET, DC(si_count), 0, "Number of scheduler instances"); 210 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count, 211 CTLFLAG_RD | CTLFLAG_VNET, DC(fsk_count), 0, "Number of flowsets"); 212 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count, 213 CTLFLAG_RD | CTLFLAG_VNET, DC(queue_count), 0, "Number of queues"); 214 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt, 215 CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt), 0, 216 "Number of packets passed to dummynet."); 217 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast, 218 CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt_fast), 0, 219 "Number of packets bypassed dummynet scheduler."); 220 SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop, 221 CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt_drop), 0, 222 "Number of packets dropped by dummynet."); 223 #undef DC 224 SYSEND 225 226 #endif 227 228 static void dummynet_send(struct mbuf *); 229 230 /* 231 * Return the mbuf tag holding the dummynet state (it should 232 * be the first one on the list). 233 */ 234 struct dn_pkt_tag * 235 dn_tag_get(struct mbuf *m) 236 { 237 struct m_tag *mtag = m_tag_first(m); 238 #ifdef NEW_AQM 239 /* XXX: to skip ts m_tag. For Debugging only*/ 240 if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) { 241 m_tag_delete(m,mtag); 242 mtag = m_tag_first(m); 243 D("skip TS tag"); 244 } 245 #endif 246 KASSERT(mtag != NULL && 247 mtag->m_tag_cookie == MTAG_ABI_COMPAT && 248 mtag->m_tag_id == PACKET_TAG_DUMMYNET, 249 ("packet on dummynet queue w/o dummynet tag!")); 250 return (struct dn_pkt_tag *)(mtag+1); 251 } 252 253 #ifndef NEW_AQM 254 static inline void 255 mq_append(struct mq *q, struct mbuf *m) 256 { 257 #ifdef USERSPACE 258 // buffers from netmap need to be copied 259 // XXX note that the routine is not expected to fail 260 ND("append %p to %p", m, q); 261 if (m->m_flags & M_STACK) { 262 struct mbuf *m_new; 263 void *p; 264 int l, ofs; 265 266 ofs = m->m_data - m->__m_extbuf; 267 // XXX allocate 268 MGETHDR(m_new, M_NOWAIT, MT_DATA); 269 ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p", 270 m, m->__m_extbuf, m->__m_extlen, ofs, m_new); 271 p = m_new->__m_extbuf; /* new pointer */ 272 l = m_new->__m_extlen; /* new len */ 273 if (l <= m->__m_extlen) { 274 panic("extlen too large"); 275 } 276 277 *m_new = *m; // copy 278 m_new->m_flags &= ~M_STACK; 279 m_new->__m_extbuf = p; // point to new buffer 280 _pkt_copy(m->__m_extbuf, p, m->__m_extlen); 281 m_new->m_data = p + ofs; 282 m = m_new; 283 } 284 #endif /* USERSPACE */ 285 if (q->head == NULL) 286 q->head = m; 287 else 288 q->tail->m_nextpkt = m; 289 q->count++; 290 q->tail = m; 291 m->m_nextpkt = NULL; 292 } 293 #endif 294 295 /* 296 * Dispose a list of packet. Use a functions so if we need to do 297 * more work, this is a central point to do it. 298 */ 299 void dn_free_pkts(struct mbuf *mnext) 300 { 301 struct mbuf *m; 302 303 while ((m = mnext) != NULL) { 304 mnext = m->m_nextpkt; 305 FREE_PKT(m); 306 } 307 } 308 309 static int 310 red_drops (struct dn_queue *q, int len) 311 { 312 /* 313 * RED algorithm 314 * 315 * RED calculates the average queue size (avg) using a low-pass filter 316 * with an exponential weighted (w_q) moving average: 317 * avg <- (1-w_q) * avg + w_q * q_size 318 * where q_size is the queue length (measured in bytes or * packets). 319 * 320 * If q_size == 0, we compute the idle time for the link, and set 321 * avg = (1 - w_q)^(idle/s) 322 * where s is the time needed for transmitting a medium-sized packet. 323 * 324 * Now, if avg < min_th the packet is enqueued. 325 * If avg > max_th the packet is dropped. Otherwise, the packet is 326 * dropped with probability P function of avg. 327 */ 328 329 struct dn_fsk *fs = q->fs; 330 int64_t p_b = 0; 331 332 /* Queue in bytes or packets? */ 333 uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ? 334 q->ni.len_bytes : q->ni.length; 335 336 /* Average queue size estimation. */ 337 if (q_size != 0) { 338 /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */ 339 int diff = SCALE(q_size) - q->avg; 340 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q); 341 342 q->avg += (int)v; 343 } else { 344 /* 345 * Queue is empty, find for how long the queue has been 346 * empty and use a lookup table for computing 347 * (1 - * w_q)^(idle_time/s) where s is the time to send a 348 * (small) packet. 349 * XXX check wraps... 350 */ 351 if (q->avg) { 352 u_int t = div64((V_dn_cfg.curr_time - q->q_time), fs->lookup_step); 353 354 q->avg = (t < fs->lookup_depth) ? 355 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; 356 } 357 } 358 359 /* Should i drop? */ 360 if (q->avg < fs->min_th) { 361 q->count = -1; 362 return (0); /* accept packet */ 363 } 364 if (q->avg >= fs->max_th) { /* average queue >= max threshold */ 365 if (fs->fs.flags & DN_IS_ECN) 366 return (1); 367 if (fs->fs.flags & DN_IS_GENTLE_RED) { 368 /* 369 * According to Gentle-RED, if avg is greater than 370 * max_th the packet is dropped with a probability 371 * p_b = c_3 * avg - c_4 372 * where c_3 = (1 - max_p) / max_th 373 * c_4 = 1 - 2 * max_p 374 */ 375 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - 376 fs->c_4; 377 } else { 378 q->count = -1; 379 return (1); 380 } 381 } else if (q->avg > fs->min_th) { 382 if (fs->fs.flags & DN_IS_ECN) 383 return (1); 384 /* 385 * We compute p_b using the linear dropping function 386 * p_b = c_1 * avg - c_2 387 * where c_1 = max_p / (max_th - min_th) 388 * c_2 = max_p * min_th / (max_th - min_th) 389 */ 390 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2; 391 } 392 393 if (fs->fs.flags & DN_QSIZE_BYTES) 394 p_b = div64((p_b * len) , fs->max_pkt_size); 395 if (++q->count == 0) 396 q->random = random() & 0xffff; 397 else { 398 /* 399 * q->count counts packets arrived since last drop, so a greater 400 * value of q->count means a greater packet drop probability. 401 */ 402 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) { 403 q->count = 0; 404 /* After a drop we calculate a new random value. */ 405 q->random = random() & 0xffff; 406 return (1); /* drop */ 407 } 408 } 409 /* End of RED algorithm. */ 410 411 return (0); /* accept */ 412 413 } 414 415 /* 416 * ECN/ECT Processing (partially adopted from altq) 417 */ 418 #ifndef NEW_AQM 419 static 420 #endif 421 int 422 ecn_mark(struct mbuf* m) 423 { 424 struct ip *ip; 425 ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off); 426 427 switch (ip->ip_v) { 428 case IPVERSION: 429 { 430 uint16_t old; 431 432 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) 433 return (0); /* not-ECT */ 434 if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) 435 return (1); /* already marked */ 436 437 /* 438 * ecn-capable but not marked, 439 * mark CE and update checksum 440 */ 441 old = *(uint16_t *)ip; 442 ip->ip_tos |= IPTOS_ECN_CE; 443 ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip); 444 return (1); 445 } 446 #ifdef INET6 447 case (IPV6_VERSION >> 4): 448 { 449 struct ip6_hdr *ip6 = (struct ip6_hdr *)ip; 450 u_int32_t flowlabel; 451 452 flowlabel = ntohl(ip6->ip6_flow); 453 if ((flowlabel >> 28) != 6) 454 return (0); /* version mismatch! */ 455 if ((flowlabel & (IPTOS_ECN_MASK << 20)) == 456 (IPTOS_ECN_NOTECT << 20)) 457 return (0); /* not-ECT */ 458 if ((flowlabel & (IPTOS_ECN_MASK << 20)) == 459 (IPTOS_ECN_CE << 20)) 460 return (1); /* already marked */ 461 /* 462 * ecn-capable but not marked, mark CE 463 */ 464 flowlabel |= (IPTOS_ECN_CE << 20); 465 ip6->ip6_flow = htonl(flowlabel); 466 return (1); 467 } 468 #endif 469 } 470 return (0); 471 } 472 473 /* 474 * Enqueue a packet in q, subject to space and queue management policy 475 * (whose parameters are in q->fs). 476 * Update stats for the queue and the scheduler. 477 * Return 0 on success, 1 on drop. The packet is consumed anyways. 478 */ 479 int 480 dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop) 481 { 482 struct dn_fs *f; 483 struct dn_flow *ni; /* stats for scheduler instance */ 484 uint64_t len; 485 486 if (q->fs == NULL || q->_si == NULL) { 487 printf("%s fs %p si %p, dropping\n", 488 __FUNCTION__, q->fs, q->_si); 489 FREE_PKT(m); 490 return 1; 491 } 492 f = &(q->fs->fs); 493 ni = &q->_si->ni; 494 len = m->m_pkthdr.len; 495 /* Update statistics, then check reasons to drop pkt. */ 496 q->ni.tot_bytes += len; 497 q->ni.tot_pkts++; 498 ni->tot_bytes += len; 499 ni->tot_pkts++; 500 if (drop) 501 goto drop; 502 if (f->plr && random() < f->plr) 503 goto drop; 504 if (m->m_pkthdr.rcvif != NULL) 505 m_rcvif_serialize(m); 506 #ifdef NEW_AQM 507 /* Call AQM enqueue function */ 508 if (q->fs->aqmfp) 509 return q->fs->aqmfp->enqueue(q ,m); 510 #endif 511 if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) { 512 if (!(f->flags & DN_IS_ECN) || !ecn_mark(m)) 513 goto drop; 514 } 515 if (f->flags & DN_QSIZE_BYTES) { 516 if (q->ni.len_bytes > f->qsize) 517 goto drop; 518 } else if (q->ni.length >= f->qsize) { 519 goto drop; 520 } 521 mq_append(&q->mq, m); 522 q->ni.length++; 523 q->ni.len_bytes += len; 524 ni->length++; 525 ni->len_bytes += len; 526 return (0); 527 528 drop: 529 V_dn_cfg.io_pkt_drop++; 530 q->ni.drops++; 531 ni->drops++; 532 FREE_PKT(m); 533 return (1); 534 } 535 536 /* 537 * Fetch packets from the delay line which are due now. If there are 538 * leftover packets, reinsert the delay line in the heap. 539 * Runs under scheduler lock. 540 */ 541 static void 542 transmit_event(struct mq *q, struct delay_line *dline, uint64_t now) 543 { 544 struct mbuf *m; 545 struct dn_pkt_tag *pkt = NULL; 546 547 dline->oid.subtype = 0; /* not in heap */ 548 while ((m = dline->mq.head) != NULL) { 549 pkt = dn_tag_get(m); 550 if (!DN_KEY_LEQ(pkt->output_time, now)) 551 break; 552 dline->mq.head = m->m_nextpkt; 553 dline->mq.count--; 554 if (m->m_pkthdr.rcvif != NULL && 555 __predict_false(m_rcvif_restore(m) == NULL)) 556 m_freem(m); 557 else 558 mq_append(q, m); 559 } 560 if (m != NULL) { 561 dline->oid.subtype = 1; /* in heap */ 562 heap_insert(&V_dn_cfg.evheap, pkt->output_time, dline); 563 } 564 } 565 566 /* 567 * Convert the additional MAC overheads/delays into an equivalent 568 * number of bits for the given data rate. The samples are 569 * in milliseconds so we need to divide by 1000. 570 */ 571 static uint64_t 572 extra_bits(struct mbuf *m, struct dn_schk *s) 573 { 574 int index; 575 uint64_t bits; 576 struct dn_profile *pf = s->profile; 577 578 if (!pf || pf->samples_no == 0) 579 return 0; 580 index = random() % pf->samples_no; 581 bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000); 582 if (index >= pf->loss_level) { 583 struct dn_pkt_tag *dt = dn_tag_get(m); 584 if (dt) 585 dt->dn_dir = DIR_DROP; 586 } 587 return bits; 588 } 589 590 /* 591 * Send traffic from a scheduler instance due by 'now'. 592 * Return a pointer to the head of the queue. 593 */ 594 static struct mbuf * 595 serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now) 596 { 597 struct mq def_q; 598 struct dn_schk *s = si->sched; 599 struct mbuf *m = NULL; 600 int delay_line_idle = (si->dline.mq.head == NULL); 601 int done; 602 uint32_t bw; 603 604 if (q == NULL) { 605 q = &def_q; 606 q->head = NULL; 607 } 608 609 bw = s->link.bandwidth; 610 si->kflags &= ~DN_ACTIVE; 611 612 if (bw > 0) 613 si->credit += (now - si->sched_time) * bw; 614 else 615 si->credit = 0; 616 si->sched_time = now; 617 done = 0; 618 while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) { 619 uint64_t len_scaled; 620 621 done++; 622 len_scaled = (bw == 0) ? 0 : hz * 623 (m->m_pkthdr.len * 8 + extra_bits(m, s)); 624 si->credit -= len_scaled; 625 /* Move packet in the delay line */ 626 dn_tag_get(m)->output_time = V_dn_cfg.curr_time + s->link.delay ; 627 if (m->m_pkthdr.rcvif != NULL) 628 m_rcvif_serialize(m); 629 mq_append(&si->dline.mq, m); 630 } 631 632 /* 633 * If credit >= 0 the instance is idle, mark time. 634 * Otherwise put back in the heap, and adjust the output 635 * time of the last inserted packet, m, which was too early. 636 */ 637 if (si->credit >= 0) { 638 si->idle_time = now; 639 } else { 640 uint64_t t; 641 KASSERT (bw > 0, ("bw=0 and credit<0 ?")); 642 t = div64(bw - 1 - si->credit, bw); 643 if (m) 644 dn_tag_get(m)->output_time += t; 645 si->kflags |= DN_ACTIVE; 646 heap_insert(&V_dn_cfg.evheap, now + t, si); 647 } 648 if (delay_line_idle && done) 649 transmit_event(q, &si->dline, now); 650 return q->head; 651 } 652 653 /* 654 * The timer handler for dummynet. Time is computed in ticks, but 655 * but the code is tolerant to the actual rate at which this is called. 656 * Once complete, the function reschedules itself for the next tick. 657 */ 658 void 659 dummynet_task(void *context, int pending) 660 { 661 struct timeval t; 662 struct mq q = { NULL, NULL }; /* queue to accumulate results */ 663 struct epoch_tracker et; 664 665 VNET_ITERATOR_DECL(vnet_iter); 666 VNET_LIST_RLOCK(); 667 NET_EPOCH_ENTER(et); 668 669 VNET_FOREACH(vnet_iter) { 670 memset(&q, 0, sizeof(struct mq)); 671 CURVNET_SET(vnet_iter); 672 673 if (! V_dn_cfg.init_done) { 674 CURVNET_RESTORE(); 675 continue; 676 } 677 678 DN_BH_WLOCK(); 679 680 /* Update number of lost(coalesced) ticks. */ 681 V_dn_cfg.tick_lost += pending - 1; 682 683 getmicrouptime(&t); 684 /* Last tick duration (usec). */ 685 V_dn_cfg.tick_last = (t.tv_sec - V_dn_cfg.prev_t.tv_sec) * 1000000 + 686 (t.tv_usec - V_dn_cfg.prev_t.tv_usec); 687 /* Last tick vs standard tick difference (usec). */ 688 V_dn_cfg.tick_delta = (V_dn_cfg.tick_last * hz - 1000000) / hz; 689 /* Accumulated tick difference (usec). */ 690 V_dn_cfg.tick_delta_sum += V_dn_cfg.tick_delta; 691 692 V_dn_cfg.prev_t = t; 693 694 /* 695 * Adjust curr_time if the accumulated tick difference is 696 * greater than the 'standard' tick. Since curr_time should 697 * be monotonically increasing, we do positive adjustments 698 * as required, and throttle curr_time in case of negative 699 * adjustment. 700 */ 701 V_dn_cfg.curr_time++; 702 if (V_dn_cfg.tick_delta_sum - tick >= 0) { 703 int diff = V_dn_cfg.tick_delta_sum / tick; 704 705 V_dn_cfg.curr_time += diff; 706 V_dn_cfg.tick_diff += diff; 707 V_dn_cfg.tick_delta_sum %= tick; 708 V_dn_cfg.tick_adjustment++; 709 } else if (V_dn_cfg.tick_delta_sum + tick <= 0) { 710 V_dn_cfg.curr_time--; 711 V_dn_cfg.tick_diff--; 712 V_dn_cfg.tick_delta_sum += tick; 713 V_dn_cfg.tick_adjustment++; 714 } 715 716 /* serve pending events, accumulate in q */ 717 for (;;) { 718 struct dn_id *p; /* generic parameter to handler */ 719 720 if (V_dn_cfg.evheap.elements == 0 || 721 DN_KEY_LT(V_dn_cfg.curr_time, HEAP_TOP(&V_dn_cfg.evheap)->key)) 722 break; 723 p = HEAP_TOP(&V_dn_cfg.evheap)->object; 724 heap_extract(&V_dn_cfg.evheap, NULL); 725 if (p->type == DN_SCH_I) { 726 serve_sched(&q, (struct dn_sch_inst *)p, V_dn_cfg.curr_time); 727 } else { /* extracted a delay line */ 728 transmit_event(&q, (struct delay_line *)p, V_dn_cfg.curr_time); 729 } 730 } 731 if (V_dn_cfg.expire && ++V_dn_cfg.expire_cycle >= V_dn_cfg.expire) { 732 V_dn_cfg.expire_cycle = 0; 733 dn_drain_scheduler(); 734 dn_drain_queue(); 735 } 736 DN_BH_WUNLOCK(); 737 if (q.head != NULL) 738 dummynet_send(q.head); 739 740 CURVNET_RESTORE(); 741 } 742 NET_EPOCH_EXIT(et); 743 VNET_LIST_RUNLOCK(); 744 745 /* Schedule our next run. */ 746 dn_reschedule(); 747 } 748 749 /* 750 * forward a chain of packets to the proper destination. 751 * This runs outside the dummynet lock. 752 */ 753 static void 754 dummynet_send(struct mbuf *m) 755 { 756 struct mbuf *n; 757 758 NET_EPOCH_ASSERT(); 759 760 for (; m != NULL; m = n) { 761 struct ifnet *ifp = NULL; /* gcc 3.4.6 complains */ 762 struct m_tag *tag; 763 int dst; 764 765 n = m->m_nextpkt; 766 m->m_nextpkt = NULL; 767 tag = m_tag_first(m); 768 if (tag == NULL) { /* should not happen */ 769 dst = DIR_DROP; 770 } else { 771 struct dn_pkt_tag *pkt = dn_tag_get(m); 772 /* extract the dummynet info, rename the tag 773 * to carry reinject info. 774 */ 775 ifp = ifnet_byindexgen(pkt->if_index, pkt->if_idxgen); 776 if (((pkt->dn_dir == (DIR_OUT | PROTO_LAYER2)) || 777 (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2 | PROTO_IPV6))) && 778 ifp == NULL) { 779 dst = DIR_DROP; 780 } else { 781 dst = pkt->dn_dir; 782 tag->m_tag_cookie = MTAG_IPFW_RULE; 783 tag->m_tag_id = 0; 784 } 785 } 786 787 switch (dst) { 788 case DIR_OUT: 789 ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); 790 break ; 791 792 case DIR_IN : 793 netisr_dispatch(NETISR_IP, m); 794 break; 795 796 #ifdef INET6 797 case DIR_IN | PROTO_IPV6: 798 netisr_dispatch(NETISR_IPV6, m); 799 break; 800 801 case DIR_OUT | PROTO_IPV6: 802 ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL); 803 break; 804 #endif 805 806 case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */ 807 if (bridge_dn_p != NULL) 808 ((*bridge_dn_p)(m, ifp)); 809 else 810 printf("dummynet: if_bridge not loaded\n"); 811 812 break; 813 814 case DIR_IN | PROTO_LAYER2 | PROTO_IPV6: 815 case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */ 816 /* 817 * The Ethernet code assumes the Ethernet header is 818 * contiguous in the first mbuf header. 819 * Insure this is true. 820 */ 821 if (m->m_len < ETHER_HDR_LEN && 822 (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) { 823 printf("dummynet/ether: pullup failed, " 824 "dropping packet\n"); 825 break; 826 } 827 ether_demux(m->m_pkthdr.rcvif, m); 828 break; 829 830 case DIR_OUT | PROTO_LAYER2 | PROTO_IPV6: 831 case DIR_OUT | PROTO_LAYER2: /* DN_TO_ETH_OUT: */ 832 MPASS(ifp != NULL); 833 ether_output_frame(ifp, m); 834 break; 835 836 case DIR_DROP: 837 /* drop the packet after some time */ 838 FREE_PKT(m); 839 break; 840 841 default: 842 printf("dummynet: bad switch %d!\n", dst); 843 FREE_PKT(m); 844 break; 845 } 846 } 847 } 848 849 static inline int 850 tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa) 851 { 852 struct dn_pkt_tag *dt; 853 struct m_tag *mtag; 854 855 mtag = m_tag_get(PACKET_TAG_DUMMYNET, 856 sizeof(*dt), M_NOWAIT | M_ZERO); 857 if (mtag == NULL) 858 return 1; /* Cannot allocate packet header. */ 859 m_tag_prepend(m, mtag); /* Attach to mbuf chain. */ 860 dt = (struct dn_pkt_tag *)(mtag + 1); 861 dt->rule = fwa->rule; 862 /* only keep this info */ 863 dt->rule.info &= (IPFW_ONEPASS | IPFW_IS_DUMMYNET); 864 dt->dn_dir = dir; 865 if (fwa->flags & IPFW_ARGS_OUT && fwa->ifp != NULL) { 866 NET_EPOCH_ASSERT(); 867 dt->if_index = fwa->ifp->if_index; 868 dt->if_idxgen = fwa->ifp->if_idxgen; 869 } 870 /* dt->output tame is updated as we move through */ 871 dt->output_time = V_dn_cfg.curr_time; 872 dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0; 873 return 0; 874 } 875 876 /* 877 * dummynet hook for packets. 878 * We use the argument to locate the flowset fs and the sched_set sch 879 * associated to it. The we apply flow_mask and sched_mask to 880 * determine the queue and scheduler instances. 881 */ 882 int 883 dummynet_io(struct mbuf **m0, struct ip_fw_args *fwa) 884 { 885 struct mbuf *m = *m0; 886 struct dn_fsk *fs = NULL; 887 struct dn_sch_inst *si; 888 struct dn_queue *q = NULL; /* default */ 889 int fs_id, dir; 890 891 fs_id = (fwa->rule.info & IPFW_INFO_MASK) + 892 ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0); 893 /* XXXGL: convert args to dir */ 894 if (fwa->flags & IPFW_ARGS_IN) 895 dir = DIR_IN; 896 else 897 dir = DIR_OUT; 898 if (fwa->flags & IPFW_ARGS_ETHER) 899 dir |= PROTO_LAYER2; 900 else if (fwa->flags & IPFW_ARGS_IP6) 901 dir |= PROTO_IPV6; 902 DN_BH_WLOCK(); 903 V_dn_cfg.io_pkt++; 904 /* we could actually tag outside the lock, but who cares... */ 905 if (tag_mbuf(m, dir, fwa)) 906 goto dropit; 907 /* XXX locate_flowset could be optimised with a direct ref. */ 908 fs = dn_ht_find(V_dn_cfg.fshash, fs_id, 0, NULL); 909 if (fs == NULL) 910 goto dropit; /* This queue/pipe does not exist! */ 911 if (fs->sched == NULL) /* should not happen */ 912 goto dropit; 913 /* find scheduler instance, possibly applying sched_mask */ 914 si = ipdn_si_find(fs->sched, &(fwa->f_id)); 915 if (si == NULL) 916 goto dropit; 917 /* 918 * If the scheduler supports multiple queues, find the right one 919 * (otherwise it will be ignored by enqueue). 920 */ 921 if (fs->sched->fp->flags & DN_MULTIQUEUE) { 922 q = ipdn_q_find(fs, si, &(fwa->f_id)); 923 if (q == NULL) 924 goto dropit; 925 } 926 if (fs->sched->fp->enqueue(si, q, m)) { 927 /* packet was dropped by enqueue() */ 928 m = *m0 = NULL; 929 930 /* dn_enqueue already increases io_pkt_drop */ 931 V_dn_cfg.io_pkt_drop--; 932 933 goto dropit; 934 } 935 936 if (si->kflags & DN_ACTIVE) { 937 m = *m0 = NULL; /* consumed */ 938 goto done; /* already active, nothing to do */ 939 } 940 941 /* compute the initial allowance */ 942 if (si->idle_time < V_dn_cfg.curr_time) { 943 /* Do this only on the first packet on an idle pipe */ 944 struct dn_link *p = &fs->sched->link; 945 946 si->sched_time = V_dn_cfg.curr_time; 947 si->credit = V_dn_cfg.io_fast ? p->bandwidth : 0; 948 if (p->burst) { 949 uint64_t burst = (V_dn_cfg.curr_time - si->idle_time) * p->bandwidth; 950 if (burst > p->burst) 951 burst = p->burst; 952 si->credit += burst; 953 } 954 } 955 /* pass through scheduler and delay line */ 956 m = serve_sched(NULL, si, V_dn_cfg.curr_time); 957 958 /* optimization -- pass it back to ipfw for immediate send */ 959 /* XXX Don't call dummynet_send() if scheduler return the packet 960 * just enqueued. This avoid a lock order reversal. 961 * 962 */ 963 if (/*V_dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) { 964 /* fast io, rename the tag * to carry reinject info. */ 965 struct m_tag *tag = m_tag_first(m); 966 967 tag->m_tag_cookie = MTAG_IPFW_RULE; 968 tag->m_tag_id = 0; 969 V_dn_cfg.io_pkt_fast++; 970 if (m->m_nextpkt != NULL) { 971 printf("dummynet: fast io: pkt chain detected!\n"); 972 m->m_nextpkt = NULL; 973 } 974 m = NULL; 975 } else { 976 *m0 = NULL; 977 } 978 done: 979 DN_BH_WUNLOCK(); 980 if (m) 981 dummynet_send(m); 982 return 0; 983 984 dropit: 985 V_dn_cfg.io_pkt_drop++; 986 DN_BH_WUNLOCK(); 987 if (m) 988 FREE_PKT(m); 989 *m0 = NULL; 990 return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS; 991 } 992