1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004-2010 University of Zagreb 5 * Copyright (c) 2007-2008 FreeBSD Foundation 6 * 7 * This software was developed by the University of Zagreb and the 8 * FreeBSD Foundation under sponsorship by the Stichting NLnet and the 9 * FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 /* 36 * This node permits simple traffic shaping by emulating bandwidth 37 * and delay, as well as random packet losses. 38 * The node has two hooks, upper and lower. Traffic flowing from upper to 39 * lower hook is referenced as downstream, and vice versa. Parameters for 40 * both directions can be set separately, except for delay. 41 */ 42 43 44 #include <sys/param.h> 45 #include <sys/errno.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/time.h> 51 52 #include <vm/uma.h> 53 54 #include <net/vnet.h> 55 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/ip.h> 59 60 #include <netgraph/ng_message.h> 61 #include <netgraph/netgraph.h> 62 #include <netgraph/ng_parse.h> 63 #include <netgraph/ng_pipe.h> 64 65 static MALLOC_DEFINE(M_NG_PIPE, "ng_pipe", "ng_pipe"); 66 67 /* Packet header struct */ 68 struct ngp_hdr { 69 TAILQ_ENTRY(ngp_hdr) ngp_link; /* next pkt in queue */ 70 struct timeval when; /* this packet's due time */ 71 struct mbuf *m; /* ptr to the packet data */ 72 }; 73 TAILQ_HEAD(p_head, ngp_hdr); 74 75 /* FIFO queue struct */ 76 struct ngp_fifo { 77 TAILQ_ENTRY(ngp_fifo) fifo_le; /* list of active queues only */ 78 struct p_head packet_head; /* FIFO queue head */ 79 u_int32_t hash; /* flow signature */ 80 struct timeval vtime; /* virtual time, for WFQ */ 81 u_int32_t rr_deficit; /* for DRR */ 82 u_int32_t packets; /* # of packets in this queue */ 83 }; 84 85 /* Per hook info */ 86 struct hookinfo { 87 hook_p hook; 88 int noqueue; /* bypass any processing */ 89 TAILQ_HEAD(, ngp_fifo) fifo_head; /* FIFO queues */ 90 TAILQ_HEAD(, ngp_hdr) qout_head; /* delay queue head */ 91 struct timeval qin_utime; 92 struct ng_pipe_hookcfg cfg; 93 struct ng_pipe_hookrun run; 94 struct ng_pipe_hookstat stats; 95 uint64_t *ber_p; /* loss_p(BER,psize) map */ 96 }; 97 98 /* Per node info */ 99 struct node_priv { 100 u_int64_t delay; 101 u_int32_t overhead; 102 u_int32_t header_offset; 103 struct hookinfo lower; 104 struct hookinfo upper; 105 struct callout timer; 106 int timer_scheduled; 107 }; 108 typedef struct node_priv *priv_p; 109 110 /* Macro for calculating the virtual time for packet dequeueing in WFQ */ 111 #define FIFO_VTIME_SORT(plen) \ 112 if (hinfo->cfg.wfq && hinfo->cfg.bandwidth) { \ 113 ngp_f->vtime.tv_usec = now->tv_usec + ((uint64_t) (plen) \ 114 + priv->overhead ) * hinfo->run.fifo_queues * \ 115 8000000 / hinfo->cfg.bandwidth; \ 116 ngp_f->vtime.tv_sec = now->tv_sec + \ 117 ngp_f->vtime.tv_usec / 1000000; \ 118 ngp_f->vtime.tv_usec = ngp_f->vtime.tv_usec % 1000000; \ 119 TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le) \ 120 if (ngp_f1->vtime.tv_sec > ngp_f->vtime.tv_sec || \ 121 (ngp_f1->vtime.tv_sec == ngp_f->vtime.tv_sec && \ 122 ngp_f1->vtime.tv_usec > ngp_f->vtime.tv_usec)) \ 123 break; \ 124 if (ngp_f1 == NULL) \ 125 TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \ 126 else \ 127 TAILQ_INSERT_BEFORE(ngp_f1, ngp_f, fifo_le); \ 128 } else \ 129 TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \ 130 131 132 static void parse_cfg(struct ng_pipe_hookcfg *, struct ng_pipe_hookcfg *, 133 struct hookinfo *, priv_p); 134 static void pipe_dequeue(struct hookinfo *, struct timeval *); 135 static void ngp_callout(node_p, hook_p, void *, int); 136 static int ngp_modevent(module_t, int, void *); 137 138 /* zone for storing ngp_hdr-s */ 139 static uma_zone_t ngp_zone; 140 141 /* Netgraph methods */ 142 static ng_constructor_t ngp_constructor; 143 static ng_rcvmsg_t ngp_rcvmsg; 144 static ng_shutdown_t ngp_shutdown; 145 static ng_newhook_t ngp_newhook; 146 static ng_rcvdata_t ngp_rcvdata; 147 static ng_disconnect_t ngp_disconnect; 148 149 /* Parse type for struct ng_pipe_hookstat */ 150 static const struct ng_parse_struct_field 151 ng_pipe_hookstat_type_fields[] = NG_PIPE_HOOKSTAT_INFO; 152 static const struct ng_parse_type ng_pipe_hookstat_type = { 153 &ng_parse_struct_type, 154 &ng_pipe_hookstat_type_fields 155 }; 156 157 /* Parse type for struct ng_pipe_stats */ 158 static const struct ng_parse_struct_field ng_pipe_stats_type_fields[] = 159 NG_PIPE_STATS_INFO(&ng_pipe_hookstat_type); 160 static const struct ng_parse_type ng_pipe_stats_type = { 161 &ng_parse_struct_type, 162 &ng_pipe_stats_type_fields 163 }; 164 165 /* Parse type for struct ng_pipe_hookrun */ 166 static const struct ng_parse_struct_field 167 ng_pipe_hookrun_type_fields[] = NG_PIPE_HOOKRUN_INFO; 168 static const struct ng_parse_type ng_pipe_hookrun_type = { 169 &ng_parse_struct_type, 170 &ng_pipe_hookrun_type_fields 171 }; 172 173 /* Parse type for struct ng_pipe_run */ 174 static const struct ng_parse_struct_field 175 ng_pipe_run_type_fields[] = NG_PIPE_RUN_INFO(&ng_pipe_hookrun_type); 176 static const struct ng_parse_type ng_pipe_run_type = { 177 &ng_parse_struct_type, 178 &ng_pipe_run_type_fields 179 }; 180 181 /* Parse type for struct ng_pipe_hookcfg */ 182 static const struct ng_parse_struct_field 183 ng_pipe_hookcfg_type_fields[] = NG_PIPE_HOOKCFG_INFO; 184 static const struct ng_parse_type ng_pipe_hookcfg_type = { 185 &ng_parse_struct_type, 186 &ng_pipe_hookcfg_type_fields 187 }; 188 189 /* Parse type for struct ng_pipe_cfg */ 190 static const struct ng_parse_struct_field 191 ng_pipe_cfg_type_fields[] = NG_PIPE_CFG_INFO(&ng_pipe_hookcfg_type); 192 static const struct ng_parse_type ng_pipe_cfg_type = { 193 &ng_parse_struct_type, 194 &ng_pipe_cfg_type_fields 195 }; 196 197 /* List of commands and how to convert arguments to/from ASCII */ 198 static const struct ng_cmdlist ngp_cmds[] = { 199 { 200 .cookie = NGM_PIPE_COOKIE, 201 .cmd = NGM_PIPE_GET_STATS, 202 .name = "getstats", 203 .respType = &ng_pipe_stats_type 204 }, 205 { 206 .cookie = NGM_PIPE_COOKIE, 207 .cmd = NGM_PIPE_CLR_STATS, 208 .name = "clrstats" 209 }, 210 { 211 .cookie = NGM_PIPE_COOKIE, 212 .cmd = NGM_PIPE_GETCLR_STATS, 213 .name = "getclrstats", 214 .respType = &ng_pipe_stats_type 215 }, 216 { 217 .cookie = NGM_PIPE_COOKIE, 218 .cmd = NGM_PIPE_GET_RUN, 219 .name = "getrun", 220 .respType = &ng_pipe_run_type 221 }, 222 { 223 .cookie = NGM_PIPE_COOKIE, 224 .cmd = NGM_PIPE_GET_CFG, 225 .name = "getcfg", 226 .respType = &ng_pipe_cfg_type 227 }, 228 { 229 .cookie = NGM_PIPE_COOKIE, 230 .cmd = NGM_PIPE_SET_CFG, 231 .name = "setcfg", 232 .mesgType = &ng_pipe_cfg_type, 233 }, 234 { 0 } 235 }; 236 237 /* Netgraph type descriptor */ 238 static struct ng_type ng_pipe_typestruct = { 239 .version = NG_ABI_VERSION, 240 .name = NG_PIPE_NODE_TYPE, 241 .mod_event = ngp_modevent, 242 .constructor = ngp_constructor, 243 .shutdown = ngp_shutdown, 244 .rcvmsg = ngp_rcvmsg, 245 .newhook = ngp_newhook, 246 .rcvdata = ngp_rcvdata, 247 .disconnect = ngp_disconnect, 248 .cmdlist = ngp_cmds 249 }; 250 NETGRAPH_INIT(pipe, &ng_pipe_typestruct); 251 252 /* Node constructor */ 253 static int 254 ngp_constructor(node_p node) 255 { 256 priv_p priv; 257 258 priv = malloc(sizeof(*priv), M_NG_PIPE, M_ZERO | M_WAITOK); 259 NG_NODE_SET_PRIVATE(node, priv); 260 261 /* Mark node as single-threaded */ 262 NG_NODE_FORCE_WRITER(node); 263 264 ng_callout_init(&priv->timer); 265 266 return (0); 267 } 268 269 /* Add a hook */ 270 static int 271 ngp_newhook(node_p node, hook_p hook, const char *name) 272 { 273 const priv_p priv = NG_NODE_PRIVATE(node); 274 struct hookinfo *hinfo; 275 276 if (strcmp(name, NG_PIPE_HOOK_UPPER) == 0) { 277 bzero(&priv->upper, sizeof(priv->upper)); 278 priv->upper.hook = hook; 279 NG_HOOK_SET_PRIVATE(hook, &priv->upper); 280 } else if (strcmp(name, NG_PIPE_HOOK_LOWER) == 0) { 281 bzero(&priv->lower, sizeof(priv->lower)); 282 priv->lower.hook = hook; 283 NG_HOOK_SET_PRIVATE(hook, &priv->lower); 284 } else 285 return (EINVAL); 286 287 /* Load non-zero initial cfg values */ 288 hinfo = NG_HOOK_PRIVATE(hook); 289 hinfo->cfg.qin_size_limit = 50; 290 hinfo->cfg.fifo = 1; 291 hinfo->cfg.droptail = 1; 292 TAILQ_INIT(&hinfo->fifo_head); 293 TAILQ_INIT(&hinfo->qout_head); 294 return (0); 295 } 296 297 /* Receive a control message */ 298 static int 299 ngp_rcvmsg(node_p node, item_p item, hook_p lasthook) 300 { 301 const priv_p priv = NG_NODE_PRIVATE(node); 302 struct ng_mesg *resp = NULL; 303 struct ng_mesg *msg, *flow_msg; 304 struct ng_pipe_stats *stats; 305 struct ng_pipe_run *run; 306 struct ng_pipe_cfg *cfg; 307 int error = 0; 308 int prev_down, now_down, cmd; 309 310 NGI_GET_MSG(item, msg); 311 switch (msg->header.typecookie) { 312 case NGM_PIPE_COOKIE: 313 switch (msg->header.cmd) { 314 case NGM_PIPE_GET_STATS: 315 case NGM_PIPE_CLR_STATS: 316 case NGM_PIPE_GETCLR_STATS: 317 if (msg->header.cmd != NGM_PIPE_CLR_STATS) { 318 NG_MKRESPONSE(resp, msg, 319 sizeof(*stats), M_NOWAIT); 320 if (resp == NULL) { 321 error = ENOMEM; 322 break; 323 } 324 stats = (struct ng_pipe_stats *) resp->data; 325 bcopy(&priv->upper.stats, &stats->downstream, 326 sizeof(stats->downstream)); 327 bcopy(&priv->lower.stats, &stats->upstream, 328 sizeof(stats->upstream)); 329 } 330 if (msg->header.cmd != NGM_PIPE_GET_STATS) { 331 bzero(&priv->upper.stats, 332 sizeof(priv->upper.stats)); 333 bzero(&priv->lower.stats, 334 sizeof(priv->lower.stats)); 335 } 336 break; 337 case NGM_PIPE_GET_RUN: 338 NG_MKRESPONSE(resp, msg, sizeof(*run), M_NOWAIT); 339 if (resp == NULL) { 340 error = ENOMEM; 341 break; 342 } 343 run = (struct ng_pipe_run *) resp->data; 344 bcopy(&priv->upper.run, &run->downstream, 345 sizeof(run->downstream)); 346 bcopy(&priv->lower.run, &run->upstream, 347 sizeof(run->upstream)); 348 break; 349 case NGM_PIPE_GET_CFG: 350 NG_MKRESPONSE(resp, msg, sizeof(*cfg), M_NOWAIT); 351 if (resp == NULL) { 352 error = ENOMEM; 353 break; 354 } 355 cfg = (struct ng_pipe_cfg *) resp->data; 356 bcopy(&priv->upper.cfg, &cfg->downstream, 357 sizeof(cfg->downstream)); 358 bcopy(&priv->lower.cfg, &cfg->upstream, 359 sizeof(cfg->upstream)); 360 cfg->delay = priv->delay; 361 cfg->overhead = priv->overhead; 362 cfg->header_offset = priv->header_offset; 363 if (cfg->upstream.bandwidth == 364 cfg->downstream.bandwidth) { 365 cfg->bandwidth = cfg->upstream.bandwidth; 366 cfg->upstream.bandwidth = 0; 367 cfg->downstream.bandwidth = 0; 368 } else 369 cfg->bandwidth = 0; 370 break; 371 case NGM_PIPE_SET_CFG: 372 cfg = (struct ng_pipe_cfg *) msg->data; 373 if (msg->header.arglen != sizeof(*cfg)) { 374 error = EINVAL; 375 break; 376 } 377 378 if (cfg->delay == -1) 379 priv->delay = 0; 380 else if (cfg->delay > 0 && cfg->delay < 10000000) 381 priv->delay = cfg->delay; 382 383 if (cfg->bandwidth == -1) { 384 priv->upper.cfg.bandwidth = 0; 385 priv->lower.cfg.bandwidth = 0; 386 priv->overhead = 0; 387 } else if (cfg->bandwidth >= 100 && 388 cfg->bandwidth <= 1000000000) { 389 priv->upper.cfg.bandwidth = cfg->bandwidth; 390 priv->lower.cfg.bandwidth = cfg->bandwidth; 391 if (cfg->bandwidth >= 10000000) 392 priv->overhead = 8+4+12; /* Ethernet */ 393 else 394 priv->overhead = 10; /* HDLC */ 395 } 396 397 if (cfg->overhead == -1) 398 priv->overhead = 0; 399 else if (cfg->overhead > 0 && 400 cfg->overhead < MAX_OHSIZE) 401 priv->overhead = cfg->overhead; 402 403 if (cfg->header_offset == -1) 404 priv->header_offset = 0; 405 else if (cfg->header_offset > 0 && 406 cfg->header_offset < 64) 407 priv->header_offset = cfg->header_offset; 408 409 prev_down = priv->upper.cfg.ber == 1 || 410 priv->lower.cfg.ber == 1; 411 parse_cfg(&priv->upper.cfg, &cfg->downstream, 412 &priv->upper, priv); 413 parse_cfg(&priv->lower.cfg, &cfg->upstream, 414 &priv->lower, priv); 415 now_down = priv->upper.cfg.ber == 1 || 416 priv->lower.cfg.ber == 1; 417 418 if (prev_down != now_down) { 419 if (now_down) 420 cmd = NGM_LINK_IS_DOWN; 421 else 422 cmd = NGM_LINK_IS_UP; 423 424 if (priv->lower.hook != NULL) { 425 NG_MKMESSAGE(flow_msg, NGM_FLOW_COOKIE, 426 cmd, 0, M_NOWAIT); 427 if (flow_msg != NULL) 428 NG_SEND_MSG_HOOK(error, node, 429 flow_msg, priv->lower.hook, 430 0); 431 } 432 if (priv->upper.hook != NULL) { 433 NG_MKMESSAGE(flow_msg, NGM_FLOW_COOKIE, 434 cmd, 0, M_NOWAIT); 435 if (flow_msg != NULL) 436 NG_SEND_MSG_HOOK(error, node, 437 flow_msg, priv->upper.hook, 438 0); 439 } 440 } 441 break; 442 default: 443 error = EINVAL; 444 break; 445 } 446 break; 447 default: 448 error = EINVAL; 449 break; 450 } 451 NG_RESPOND_MSG(error, node, item, resp); 452 NG_FREE_MSG(msg); 453 454 return (error); 455 } 456 457 static void 458 parse_cfg(struct ng_pipe_hookcfg *current, struct ng_pipe_hookcfg *new, 459 struct hookinfo *hinfo, priv_p priv) 460 { 461 462 if (new->ber == -1) { 463 current->ber = 0; 464 if (hinfo->ber_p) { 465 free(hinfo->ber_p, M_NG_PIPE); 466 hinfo->ber_p = NULL; 467 } 468 } else if (new->ber >= 1 && new->ber <= 1000000000000) { 469 static const uint64_t one = 0x1000000000000; /* = 2^48 */ 470 uint64_t p0, p; 471 uint32_t fsize, i; 472 473 if (hinfo->ber_p == NULL) 474 hinfo->ber_p = 475 malloc((MAX_FSIZE + MAX_OHSIZE) * sizeof(uint64_t), 476 M_NG_PIPE, M_WAITOK); 477 current->ber = new->ber; 478 479 /* 480 * For given BER and each frame size N (in bytes) calculate 481 * the probability P_OK that the frame is clean: 482 * 483 * P_OK(BER,N) = (1 - 1/BER)^(N*8) 484 * 485 * We use a 64-bit fixed-point format with decimal point 486 * positioned between bits 47 and 48. 487 */ 488 p0 = one - one / new->ber; 489 p = one; 490 for (fsize = 0; fsize < MAX_FSIZE + MAX_OHSIZE; fsize++) { 491 hinfo->ber_p[fsize] = p; 492 for (i = 0; i < 8; i++) 493 p = (p * (p0 & 0xffff) >> 48) + 494 (p * ((p0 >> 16) & 0xffff) >> 32) + 495 (p * (p0 >> 32) >> 16); 496 } 497 } 498 499 if (new->qin_size_limit == -1) 500 current->qin_size_limit = 0; 501 else if (new->qin_size_limit >= 5) 502 current->qin_size_limit = new->qin_size_limit; 503 504 if (new->qout_size_limit == -1) 505 current->qout_size_limit = 0; 506 else if (new->qout_size_limit >= 5) 507 current->qout_size_limit = new->qout_size_limit; 508 509 if (new->duplicate == -1) 510 current->duplicate = 0; 511 else if (new->duplicate > 0 && new->duplicate <= 50) 512 current->duplicate = new->duplicate; 513 514 if (new->fifo) { 515 current->fifo = 1; 516 current->wfq = 0; 517 current->drr = 0; 518 } 519 520 if (new->wfq) { 521 current->fifo = 0; 522 current->wfq = 1; 523 current->drr = 0; 524 } 525 526 if (new->drr) { 527 current->fifo = 0; 528 current->wfq = 0; 529 /* DRR quantum */ 530 if (new->drr >= 32) 531 current->drr = new->drr; 532 else 533 current->drr = 2048; /* default quantum */ 534 } 535 536 if (new->droptail) { 537 current->droptail = 1; 538 current->drophead = 0; 539 } 540 541 if (new->drophead) { 542 current->droptail = 0; 543 current->drophead = 1; 544 } 545 546 if (new->bandwidth == -1) { 547 current->bandwidth = 0; 548 current->fifo = 1; 549 current->wfq = 0; 550 current->drr = 0; 551 } else if (new->bandwidth >= 100 && new->bandwidth <= 1000000000) 552 current->bandwidth = new->bandwidth; 553 554 if (current->bandwidth | priv->delay | 555 current->duplicate | current->ber) 556 hinfo->noqueue = 0; 557 else 558 hinfo->noqueue = 1; 559 } 560 561 /* 562 * Compute a hash signature for a packet. This function suffers from the 563 * NIH sindrome, so probably it would be wise to look around what other 564 * folks have found out to be a good and efficient IP hash function... 565 */ 566 static int 567 ip_hash(struct mbuf *m, int offset) 568 { 569 u_int64_t i; 570 struct ip *ip = (struct ip *)(mtod(m, u_char *) + offset); 571 572 if (m->m_len < sizeof(struct ip) + offset || 573 ip->ip_v != 4 || ip->ip_hl << 2 != sizeof(struct ip)) 574 return 0; 575 576 i = ((u_int64_t) ip->ip_src.s_addr ^ 577 ((u_int64_t) ip->ip_src.s_addr << 13) ^ 578 ((u_int64_t) ip->ip_dst.s_addr << 7) ^ 579 ((u_int64_t) ip->ip_dst.s_addr << 19)); 580 return (i ^ (i >> 32)); 581 } 582 583 /* 584 * Receive data on a hook - both in upstream and downstream direction. 585 * We put the frame on the inbound queue, and try to initiate dequeuing 586 * sequence immediately. If inbound queue is full, discard one frame 587 * depending on dropping policy (from the head or from the tail of the 588 * queue). 589 */ 590 static int 591 ngp_rcvdata(hook_p hook, item_p item) 592 { 593 struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook); 594 const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); 595 struct timeval uuptime; 596 struct timeval *now = &uuptime; 597 struct ngp_fifo *ngp_f = NULL, *ngp_f1; 598 struct ngp_hdr *ngp_h = NULL; 599 struct mbuf *m; 600 int hash, plen; 601 int error = 0; 602 603 /* 604 * Shortcut from inbound to outbound hook when neither of 605 * bandwidth, delay, BER or duplication probability is 606 * configured, nor we have queued frames to drain. 607 */ 608 if (hinfo->run.qin_frames == 0 && hinfo->run.qout_frames == 0 && 609 hinfo->noqueue) { 610 struct hookinfo *dest; 611 if (hinfo == &priv->lower) 612 dest = &priv->upper; 613 else 614 dest = &priv->lower; 615 616 /* Send the frame. */ 617 plen = NGI_M(item)->m_pkthdr.len; 618 NG_FWD_ITEM_HOOK(error, item, dest->hook); 619 620 /* Update stats. */ 621 if (error) { 622 hinfo->stats.out_disc_frames++; 623 hinfo->stats.out_disc_octets += plen; 624 } else { 625 hinfo->stats.fwd_frames++; 626 hinfo->stats.fwd_octets += plen; 627 } 628 629 return (error); 630 } 631 632 microuptime(now); 633 634 /* 635 * If this was an empty queue, update service deadline time. 636 */ 637 if (hinfo->run.qin_frames == 0) { 638 struct timeval *when = &hinfo->qin_utime; 639 if (when->tv_sec < now->tv_sec || (when->tv_sec == now->tv_sec 640 && when->tv_usec < now->tv_usec)) { 641 when->tv_sec = now->tv_sec; 642 when->tv_usec = now->tv_usec; 643 } 644 } 645 646 /* Populate the packet header */ 647 ngp_h = uma_zalloc(ngp_zone, M_NOWAIT); 648 KASSERT((ngp_h != NULL), ("ngp_h zalloc failed (1)")); 649 NGI_GET_M(item, m); 650 KASSERT(m != NULL, ("NGI_GET_M failed")); 651 ngp_h->m = m; 652 NG_FREE_ITEM(item); 653 654 if (hinfo->cfg.fifo) 655 hash = 0; /* all packets go into a single FIFO queue */ 656 else 657 hash = ip_hash(m, priv->header_offset); 658 659 /* Find the appropriate FIFO queue for the packet and enqueue it*/ 660 TAILQ_FOREACH(ngp_f, &hinfo->fifo_head, fifo_le) 661 if (hash == ngp_f->hash) 662 break; 663 if (ngp_f == NULL) { 664 ngp_f = uma_zalloc(ngp_zone, M_NOWAIT); 665 KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (2)")); 666 TAILQ_INIT(&ngp_f->packet_head); 667 ngp_f->hash = hash; 668 ngp_f->packets = 1; 669 ngp_f->rr_deficit = hinfo->cfg.drr; /* DRR quantum */ 670 hinfo->run.fifo_queues++; 671 TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link); 672 FIFO_VTIME_SORT(m->m_pkthdr.len); 673 } else { 674 TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link); 675 ngp_f->packets++; 676 } 677 hinfo->run.qin_frames++; 678 hinfo->run.qin_octets += m->m_pkthdr.len; 679 680 /* Discard a frame if inbound queue limit has been reached */ 681 if (hinfo->run.qin_frames > hinfo->cfg.qin_size_limit) { 682 struct mbuf *m1; 683 int longest = 0; 684 685 /* Find the longest queue */ 686 TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le) 687 if (ngp_f1->packets > longest) { 688 longest = ngp_f1->packets; 689 ngp_f = ngp_f1; 690 } 691 692 /* Drop a frame from the queue head/tail, depending on cfg */ 693 if (hinfo->cfg.drophead) 694 ngp_h = TAILQ_FIRST(&ngp_f->packet_head); 695 else 696 ngp_h = TAILQ_LAST(&ngp_f->packet_head, p_head); 697 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link); 698 m1 = ngp_h->m; 699 uma_zfree(ngp_zone, ngp_h); 700 hinfo->run.qin_octets -= m1->m_pkthdr.len; 701 hinfo->stats.in_disc_octets += m1->m_pkthdr.len; 702 m_freem(m1); 703 if (--(ngp_f->packets) == 0) { 704 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 705 uma_zfree(ngp_zone, ngp_f); 706 hinfo->run.fifo_queues--; 707 } 708 hinfo->run.qin_frames--; 709 hinfo->stats.in_disc_frames++; 710 } 711 712 /* 713 * Try to start the dequeuing process immediately. 714 */ 715 pipe_dequeue(hinfo, now); 716 717 return (0); 718 } 719 720 721 /* 722 * Dequeueing sequence - we basically do the following: 723 * 1) Try to extract the frame from the inbound (bandwidth) queue; 724 * 2) In accordance to BER specified, discard the frame randomly; 725 * 3) If the frame survives BER, prepend it with delay info and move it 726 * to outbound (delay) queue; 727 * 4) Loop to 2) until bandwidth quota for this timeslice is reached, or 728 * inbound queue is flushed completely; 729 * 5) Dequeue frames from the outbound queue and send them downstream until 730 * outbound queue is flushed completely, or the next frame in the queue 731 * is not due to be dequeued yet 732 */ 733 static void 734 pipe_dequeue(struct hookinfo *hinfo, struct timeval *now) { 735 static uint64_t rand, oldrand; 736 const node_p node = NG_HOOK_NODE(hinfo->hook); 737 const priv_p priv = NG_NODE_PRIVATE(node); 738 struct hookinfo *dest; 739 struct ngp_fifo *ngp_f, *ngp_f1; 740 struct ngp_hdr *ngp_h; 741 struct timeval *when; 742 struct mbuf *m; 743 int plen, error = 0; 744 745 /* Which one is the destination hook? */ 746 if (hinfo == &priv->lower) 747 dest = &priv->upper; 748 else 749 dest = &priv->lower; 750 751 /* Bandwidth queue processing */ 752 while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) { 753 when = &hinfo->qin_utime; 754 if (when->tv_sec > now->tv_sec || (when->tv_sec == now->tv_sec 755 && when->tv_usec > now->tv_usec)) 756 break; 757 758 ngp_h = TAILQ_FIRST(&ngp_f->packet_head); 759 m = ngp_h->m; 760 761 /* Deficit Round Robin (DRR) processing */ 762 if (hinfo->cfg.drr) { 763 if (ngp_f->rr_deficit >= m->m_pkthdr.len) { 764 ngp_f->rr_deficit -= m->m_pkthdr.len; 765 } else { 766 ngp_f->rr_deficit += hinfo->cfg.drr; 767 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 768 TAILQ_INSERT_TAIL(&hinfo->fifo_head, 769 ngp_f, fifo_le); 770 continue; 771 } 772 } 773 774 /* 775 * Either create a duplicate and pass it on, or dequeue 776 * the original packet... 777 */ 778 if (hinfo->cfg.duplicate && 779 random() % 100 <= hinfo->cfg.duplicate) { 780 ngp_h = uma_zalloc(ngp_zone, M_NOWAIT); 781 KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (3)")); 782 m = m_dup(m, M_NOWAIT); 783 KASSERT(m != NULL, ("m_dup failed")); 784 ngp_h->m = m; 785 } else { 786 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link); 787 hinfo->run.qin_frames--; 788 hinfo->run.qin_octets -= m->m_pkthdr.len; 789 ngp_f->packets--; 790 } 791 792 /* Calculate the serialization delay */ 793 if (hinfo->cfg.bandwidth) { 794 hinfo->qin_utime.tv_usec += 795 ((uint64_t) m->m_pkthdr.len + priv->overhead ) * 796 8000000 / hinfo->cfg.bandwidth; 797 hinfo->qin_utime.tv_sec += 798 hinfo->qin_utime.tv_usec / 1000000; 799 hinfo->qin_utime.tv_usec = 800 hinfo->qin_utime.tv_usec % 1000000; 801 } 802 when = &ngp_h->when; 803 when->tv_sec = hinfo->qin_utime.tv_sec; 804 when->tv_usec = hinfo->qin_utime.tv_usec; 805 806 /* Sort / rearrange inbound queues */ 807 if (ngp_f->packets) { 808 if (hinfo->cfg.wfq) { 809 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 810 FIFO_VTIME_SORT(TAILQ_FIRST( 811 &ngp_f->packet_head)->m->m_pkthdr.len) 812 } 813 } else { 814 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 815 uma_zfree(ngp_zone, ngp_f); 816 hinfo->run.fifo_queues--; 817 } 818 819 /* Randomly discard the frame, according to BER setting */ 820 if (hinfo->cfg.ber) { 821 oldrand = rand; 822 rand = random(); 823 if (((oldrand ^ rand) << 17) >= 824 hinfo->ber_p[priv->overhead + m->m_pkthdr.len]) { 825 hinfo->stats.out_disc_frames++; 826 hinfo->stats.out_disc_octets += m->m_pkthdr.len; 827 uma_zfree(ngp_zone, ngp_h); 828 m_freem(m); 829 continue; 830 } 831 } 832 833 /* Discard frame if outbound queue size limit exceeded */ 834 if (hinfo->cfg.qout_size_limit && 835 hinfo->run.qout_frames>=hinfo->cfg.qout_size_limit) { 836 hinfo->stats.out_disc_frames++; 837 hinfo->stats.out_disc_octets += m->m_pkthdr.len; 838 uma_zfree(ngp_zone, ngp_h); 839 m_freem(m); 840 continue; 841 } 842 843 /* Calculate the propagation delay */ 844 when->tv_usec += priv->delay; 845 when->tv_sec += when->tv_usec / 1000000; 846 when->tv_usec = when->tv_usec % 1000000; 847 848 /* Put the frame into the delay queue */ 849 TAILQ_INSERT_TAIL(&hinfo->qout_head, ngp_h, ngp_link); 850 hinfo->run.qout_frames++; 851 hinfo->run.qout_octets += m->m_pkthdr.len; 852 } 853 854 /* Delay queue processing */ 855 while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) { 856 when = &ngp_h->when; 857 m = ngp_h->m; 858 if (when->tv_sec > now->tv_sec || 859 (when->tv_sec == now->tv_sec && 860 when->tv_usec > now->tv_usec)) 861 break; 862 863 /* Update outbound queue stats */ 864 plen = m->m_pkthdr.len; 865 hinfo->run.qout_frames--; 866 hinfo->run.qout_octets -= plen; 867 868 /* Dequeue the packet from qout */ 869 TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link); 870 uma_zfree(ngp_zone, ngp_h); 871 872 NG_SEND_DATA(error, dest->hook, m, meta); 873 if (error) { 874 hinfo->stats.out_disc_frames++; 875 hinfo->stats.out_disc_octets += plen; 876 } else { 877 hinfo->stats.fwd_frames++; 878 hinfo->stats.fwd_octets += plen; 879 } 880 } 881 882 if ((hinfo->run.qin_frames != 0 || hinfo->run.qout_frames != 0) && 883 !priv->timer_scheduled) { 884 ng_callout(&priv->timer, node, NULL, 1, ngp_callout, NULL, 0); 885 priv->timer_scheduled = 1; 886 } 887 } 888 889 /* 890 * This routine is called on every clock tick. We poll connected hooks 891 * for queued frames by calling pipe_dequeue(). 892 */ 893 static void 894 ngp_callout(node_p node, hook_p hook, void *arg1, int arg2) 895 { 896 const priv_p priv = NG_NODE_PRIVATE(node); 897 struct timeval now; 898 899 priv->timer_scheduled = 0; 900 microuptime(&now); 901 if (priv->upper.hook != NULL) 902 pipe_dequeue(&priv->upper, &now); 903 if (priv->lower.hook != NULL) 904 pipe_dequeue(&priv->lower, &now); 905 } 906 907 /* 908 * Shutdown processing 909 * 910 * This is tricky. If we have both a lower and upper hook, then we 911 * probably want to extricate ourselves and leave the two peers 912 * still linked to each other. Otherwise we should just shut down as 913 * a normal node would. 914 */ 915 static int 916 ngp_shutdown(node_p node) 917 { 918 const priv_p priv = NG_NODE_PRIVATE(node); 919 920 if (priv->timer_scheduled) 921 ng_uncallout(&priv->timer, node); 922 if (priv->lower.hook && priv->upper.hook) 923 ng_bypass(priv->lower.hook, priv->upper.hook); 924 else { 925 if (priv->upper.hook != NULL) 926 ng_rmhook_self(priv->upper.hook); 927 if (priv->lower.hook != NULL) 928 ng_rmhook_self(priv->lower.hook); 929 } 930 NG_NODE_UNREF(node); 931 free(priv, M_NG_PIPE); 932 return (0); 933 } 934 935 936 /* 937 * Hook disconnection 938 */ 939 static int 940 ngp_disconnect(hook_p hook) 941 { 942 struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook); 943 struct ngp_fifo *ngp_f; 944 struct ngp_hdr *ngp_h; 945 946 KASSERT(hinfo != NULL, ("%s: null info", __FUNCTION__)); 947 hinfo->hook = NULL; 948 949 /* Flush all fifo queues associated with the hook */ 950 while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) { 951 while ((ngp_h = TAILQ_FIRST(&ngp_f->packet_head))) { 952 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link); 953 m_freem(ngp_h->m); 954 uma_zfree(ngp_zone, ngp_h); 955 } 956 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 957 uma_zfree(ngp_zone, ngp_f); 958 } 959 960 /* Flush the delay queue */ 961 while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) { 962 TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link); 963 m_freem(ngp_h->m); 964 uma_zfree(ngp_zone, ngp_h); 965 } 966 967 /* Release the packet loss probability table (BER) */ 968 if (hinfo->ber_p) 969 free(hinfo->ber_p, M_NG_PIPE); 970 971 return (0); 972 } 973 974 static int 975 ngp_modevent(module_t mod, int type, void *unused) 976 { 977 int error = 0; 978 979 switch (type) { 980 case MOD_LOAD: 981 ngp_zone = uma_zcreate("ng_pipe", max(sizeof(struct ngp_hdr), 982 sizeof (struct ngp_fifo)), NULL, NULL, NULL, NULL, 983 UMA_ALIGN_PTR, 0); 984 if (ngp_zone == NULL) 985 panic("ng_pipe: couldn't allocate descriptor zone"); 986 break; 987 case MOD_UNLOAD: 988 uma_zdestroy(ngp_zone); 989 break; 990 default: 991 error = EOPNOTSUPP; 992 break; 993 } 994 995 return (error); 996 } 997