1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2004-2010 University of Zagreb 5 * Copyright (c) 2007-2008 FreeBSD Foundation 6 * 7 * This software was developed by the University of Zagreb and the 8 * FreeBSD Foundation under sponsorship by the Stichting NLnet and the 9 * FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * This node permits simple traffic shaping by emulating bandwidth 35 * and delay, as well as random packet losses. 36 * The node has two hooks, upper and lower. Traffic flowing from upper to 37 * lower hook is referenced as downstream, and vice versa. Parameters for 38 * both directions can be set separately, except for delay. 39 */ 40 41 #include <sys/param.h> 42 #include <sys/errno.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/time.h> 48 49 #include <vm/uma.h> 50 51 #include <net/vnet.h> 52 53 #include <netinet/in.h> 54 #include <netinet/in_systm.h> 55 #include <netinet/ip.h> 56 57 #include <netgraph/ng_message.h> 58 #include <netgraph/netgraph.h> 59 #include <netgraph/ng_parse.h> 60 #include <netgraph/ng_pipe.h> 61 62 static MALLOC_DEFINE(M_NG_PIPE, "ng_pipe", "ng_pipe"); 63 64 /* Packet header struct */ 65 struct ngp_hdr { 66 TAILQ_ENTRY(ngp_hdr) ngp_link; /* next pkt in queue */ 67 struct timeval when; /* this packet's due time */ 68 struct mbuf *m; /* ptr to the packet data */ 69 }; 70 TAILQ_HEAD(p_head, ngp_hdr); 71 72 /* FIFO queue struct */ 73 struct ngp_fifo { 74 TAILQ_ENTRY(ngp_fifo) fifo_le; /* list of active queues only */ 75 struct p_head packet_head; /* FIFO queue head */ 76 u_int32_t hash; /* flow signature */ 77 struct timeval vtime; /* virtual time, for WFQ */ 78 u_int32_t rr_deficit; /* for DRR */ 79 u_int32_t packets; /* # of packets in this queue */ 80 }; 81 82 /* Per hook info */ 83 struct hookinfo { 84 hook_p hook; 85 int noqueue; /* bypass any processing */ 86 TAILQ_HEAD(, ngp_fifo) fifo_head; /* FIFO queues */ 87 TAILQ_HEAD(, ngp_hdr) qout_head; /* delay queue head */ 88 struct timeval qin_utime; 89 struct ng_pipe_hookcfg cfg; 90 struct ng_pipe_hookrun run; 91 struct ng_pipe_hookstat stats; 92 uint64_t *ber_p; /* loss_p(BER,psize) map */ 93 }; 94 95 /* Per node info */ 96 struct node_priv { 97 u_int64_t delay; 98 u_int32_t overhead; 99 u_int32_t header_offset; 100 struct hookinfo lower; 101 struct hookinfo upper; 102 struct callout timer; 103 int timer_scheduled; 104 }; 105 typedef struct node_priv *priv_p; 106 107 /* Macro for calculating the virtual time for packet dequeueing in WFQ */ 108 #define FIFO_VTIME_SORT(plen) \ 109 if (hinfo->cfg.wfq && hinfo->cfg.bandwidth) { \ 110 ngp_f->vtime.tv_usec = now->tv_usec + ((uint64_t) (plen) \ 111 + priv->overhead ) * hinfo->run.fifo_queues * \ 112 8000000 / hinfo->cfg.bandwidth; \ 113 ngp_f->vtime.tv_sec = now->tv_sec + \ 114 ngp_f->vtime.tv_usec / 1000000; \ 115 ngp_f->vtime.tv_usec = ngp_f->vtime.tv_usec % 1000000; \ 116 TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le) \ 117 if (ngp_f1->vtime.tv_sec > ngp_f->vtime.tv_sec || \ 118 (ngp_f1->vtime.tv_sec == ngp_f->vtime.tv_sec && \ 119 ngp_f1->vtime.tv_usec > ngp_f->vtime.tv_usec)) \ 120 break; \ 121 if (ngp_f1 == NULL) \ 122 TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \ 123 else \ 124 TAILQ_INSERT_BEFORE(ngp_f1, ngp_f, fifo_le); \ 125 } else \ 126 TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \ 127 128 static void parse_cfg(struct ng_pipe_hookcfg *, struct ng_pipe_hookcfg *, 129 struct hookinfo *, priv_p); 130 static void pipe_dequeue(struct hookinfo *, struct timeval *); 131 static void ngp_callout(node_p, hook_p, void *, int); 132 static int ngp_modevent(module_t, int, void *); 133 134 /* zone for storing ngp_hdr-s */ 135 static uma_zone_t ngp_zone; 136 137 /* Netgraph methods */ 138 static ng_constructor_t ngp_constructor; 139 static ng_rcvmsg_t ngp_rcvmsg; 140 static ng_shutdown_t ngp_shutdown; 141 static ng_newhook_t ngp_newhook; 142 static ng_rcvdata_t ngp_rcvdata; 143 static ng_disconnect_t ngp_disconnect; 144 145 /* Parse type for struct ng_pipe_hookstat */ 146 static const struct ng_parse_struct_field 147 ng_pipe_hookstat_type_fields[] = NG_PIPE_HOOKSTAT_INFO; 148 static const struct ng_parse_type ng_pipe_hookstat_type = { 149 &ng_parse_struct_type, 150 &ng_pipe_hookstat_type_fields 151 }; 152 153 /* Parse type for struct ng_pipe_stats */ 154 static const struct ng_parse_struct_field ng_pipe_stats_type_fields[] = 155 NG_PIPE_STATS_INFO(&ng_pipe_hookstat_type); 156 static const struct ng_parse_type ng_pipe_stats_type = { 157 &ng_parse_struct_type, 158 &ng_pipe_stats_type_fields 159 }; 160 161 /* Parse type for struct ng_pipe_hookrun */ 162 static const struct ng_parse_struct_field 163 ng_pipe_hookrun_type_fields[] = NG_PIPE_HOOKRUN_INFO; 164 static const struct ng_parse_type ng_pipe_hookrun_type = { 165 &ng_parse_struct_type, 166 &ng_pipe_hookrun_type_fields 167 }; 168 169 /* Parse type for struct ng_pipe_run */ 170 static const struct ng_parse_struct_field 171 ng_pipe_run_type_fields[] = NG_PIPE_RUN_INFO(&ng_pipe_hookrun_type); 172 static const struct ng_parse_type ng_pipe_run_type = { 173 &ng_parse_struct_type, 174 &ng_pipe_run_type_fields 175 }; 176 177 /* Parse type for struct ng_pipe_hookcfg */ 178 static const struct ng_parse_struct_field 179 ng_pipe_hookcfg_type_fields[] = NG_PIPE_HOOKCFG_INFO; 180 static const struct ng_parse_type ng_pipe_hookcfg_type = { 181 &ng_parse_struct_type, 182 &ng_pipe_hookcfg_type_fields 183 }; 184 185 /* Parse type for struct ng_pipe_cfg */ 186 static const struct ng_parse_struct_field 187 ng_pipe_cfg_type_fields[] = NG_PIPE_CFG_INFO(&ng_pipe_hookcfg_type); 188 static const struct ng_parse_type ng_pipe_cfg_type = { 189 &ng_parse_struct_type, 190 &ng_pipe_cfg_type_fields 191 }; 192 193 /* List of commands and how to convert arguments to/from ASCII */ 194 static const struct ng_cmdlist ngp_cmds[] = { 195 { 196 .cookie = NGM_PIPE_COOKIE, 197 .cmd = NGM_PIPE_GET_STATS, 198 .name = "getstats", 199 .respType = &ng_pipe_stats_type 200 }, 201 { 202 .cookie = NGM_PIPE_COOKIE, 203 .cmd = NGM_PIPE_CLR_STATS, 204 .name = "clrstats" 205 }, 206 { 207 .cookie = NGM_PIPE_COOKIE, 208 .cmd = NGM_PIPE_GETCLR_STATS, 209 .name = "getclrstats", 210 .respType = &ng_pipe_stats_type 211 }, 212 { 213 .cookie = NGM_PIPE_COOKIE, 214 .cmd = NGM_PIPE_GET_RUN, 215 .name = "getrun", 216 .respType = &ng_pipe_run_type 217 }, 218 { 219 .cookie = NGM_PIPE_COOKIE, 220 .cmd = NGM_PIPE_GET_CFG, 221 .name = "getcfg", 222 .respType = &ng_pipe_cfg_type 223 }, 224 { 225 .cookie = NGM_PIPE_COOKIE, 226 .cmd = NGM_PIPE_SET_CFG, 227 .name = "setcfg", 228 .mesgType = &ng_pipe_cfg_type, 229 }, 230 { 0 } 231 }; 232 233 /* Netgraph type descriptor */ 234 static struct ng_type ng_pipe_typestruct = { 235 .version = NG_ABI_VERSION, 236 .name = NG_PIPE_NODE_TYPE, 237 .mod_event = ngp_modevent, 238 .constructor = ngp_constructor, 239 .shutdown = ngp_shutdown, 240 .rcvmsg = ngp_rcvmsg, 241 .newhook = ngp_newhook, 242 .rcvdata = ngp_rcvdata, 243 .disconnect = ngp_disconnect, 244 .cmdlist = ngp_cmds 245 }; 246 NETGRAPH_INIT(pipe, &ng_pipe_typestruct); 247 248 /* Node constructor */ 249 static int 250 ngp_constructor(node_p node) 251 { 252 priv_p priv; 253 254 priv = malloc(sizeof(*priv), M_NG_PIPE, M_ZERO | M_WAITOK); 255 NG_NODE_SET_PRIVATE(node, priv); 256 257 /* Mark node as single-threaded */ 258 NG_NODE_FORCE_WRITER(node); 259 260 ng_callout_init(&priv->timer); 261 262 return (0); 263 } 264 265 /* Add a hook */ 266 static int 267 ngp_newhook(node_p node, hook_p hook, const char *name) 268 { 269 const priv_p priv = NG_NODE_PRIVATE(node); 270 struct hookinfo *hinfo; 271 272 if (strcmp(name, NG_PIPE_HOOK_UPPER) == 0) { 273 bzero(&priv->upper, sizeof(priv->upper)); 274 priv->upper.hook = hook; 275 NG_HOOK_SET_PRIVATE(hook, &priv->upper); 276 } else if (strcmp(name, NG_PIPE_HOOK_LOWER) == 0) { 277 bzero(&priv->lower, sizeof(priv->lower)); 278 priv->lower.hook = hook; 279 NG_HOOK_SET_PRIVATE(hook, &priv->lower); 280 } else 281 return (EINVAL); 282 283 /* Load non-zero initial cfg values */ 284 hinfo = NG_HOOK_PRIVATE(hook); 285 hinfo->cfg.qin_size_limit = 50; 286 hinfo->cfg.fifo = 1; 287 hinfo->cfg.droptail = 1; 288 TAILQ_INIT(&hinfo->fifo_head); 289 TAILQ_INIT(&hinfo->qout_head); 290 return (0); 291 } 292 293 /* Receive a control message */ 294 static int 295 ngp_rcvmsg(node_p node, item_p item, hook_p lasthook) 296 { 297 const priv_p priv = NG_NODE_PRIVATE(node); 298 struct ng_mesg *resp = NULL; 299 struct ng_mesg *msg, *flow_msg; 300 struct ng_pipe_stats *stats; 301 struct ng_pipe_run *run; 302 struct ng_pipe_cfg *cfg; 303 int error = 0; 304 int prev_down, now_down, cmd; 305 306 NGI_GET_MSG(item, msg); 307 switch (msg->header.typecookie) { 308 case NGM_PIPE_COOKIE: 309 switch (msg->header.cmd) { 310 case NGM_PIPE_GET_STATS: 311 case NGM_PIPE_CLR_STATS: 312 case NGM_PIPE_GETCLR_STATS: 313 if (msg->header.cmd != NGM_PIPE_CLR_STATS) { 314 NG_MKRESPONSE(resp, msg, 315 sizeof(*stats), M_NOWAIT); 316 if (resp == NULL) { 317 error = ENOMEM; 318 break; 319 } 320 stats = (struct ng_pipe_stats *) resp->data; 321 bcopy(&priv->upper.stats, &stats->downstream, 322 sizeof(stats->downstream)); 323 bcopy(&priv->lower.stats, &stats->upstream, 324 sizeof(stats->upstream)); 325 } 326 if (msg->header.cmd != NGM_PIPE_GET_STATS) { 327 bzero(&priv->upper.stats, 328 sizeof(priv->upper.stats)); 329 bzero(&priv->lower.stats, 330 sizeof(priv->lower.stats)); 331 } 332 break; 333 case NGM_PIPE_GET_RUN: 334 NG_MKRESPONSE(resp, msg, sizeof(*run), M_NOWAIT); 335 if (resp == NULL) { 336 error = ENOMEM; 337 break; 338 } 339 run = (struct ng_pipe_run *) resp->data; 340 bcopy(&priv->upper.run, &run->downstream, 341 sizeof(run->downstream)); 342 bcopy(&priv->lower.run, &run->upstream, 343 sizeof(run->upstream)); 344 break; 345 case NGM_PIPE_GET_CFG: 346 NG_MKRESPONSE(resp, msg, sizeof(*cfg), M_NOWAIT); 347 if (resp == NULL) { 348 error = ENOMEM; 349 break; 350 } 351 cfg = (struct ng_pipe_cfg *) resp->data; 352 bcopy(&priv->upper.cfg, &cfg->downstream, 353 sizeof(cfg->downstream)); 354 bcopy(&priv->lower.cfg, &cfg->upstream, 355 sizeof(cfg->upstream)); 356 cfg->delay = priv->delay; 357 cfg->overhead = priv->overhead; 358 cfg->header_offset = priv->header_offset; 359 if (cfg->upstream.bandwidth == 360 cfg->downstream.bandwidth) { 361 cfg->bandwidth = cfg->upstream.bandwidth; 362 cfg->upstream.bandwidth = 0; 363 cfg->downstream.bandwidth = 0; 364 } else 365 cfg->bandwidth = 0; 366 break; 367 case NGM_PIPE_SET_CFG: 368 cfg = (struct ng_pipe_cfg *) msg->data; 369 if (msg->header.arglen != sizeof(*cfg)) { 370 error = EINVAL; 371 break; 372 } 373 374 if (cfg->delay == -1) 375 priv->delay = 0; 376 else if (cfg->delay > 0 && cfg->delay < 10000000) 377 priv->delay = cfg->delay; 378 379 if (cfg->bandwidth == -1) { 380 priv->upper.cfg.bandwidth = 0; 381 priv->lower.cfg.bandwidth = 0; 382 priv->overhead = 0; 383 } else if (cfg->bandwidth >= 100 && 384 cfg->bandwidth <= 1000000000) { 385 priv->upper.cfg.bandwidth = cfg->bandwidth; 386 priv->lower.cfg.bandwidth = cfg->bandwidth; 387 if (cfg->bandwidth >= 10000000) 388 priv->overhead = 8+4+12; /* Ethernet */ 389 else 390 priv->overhead = 10; /* HDLC */ 391 } 392 393 if (cfg->overhead == -1) 394 priv->overhead = 0; 395 else if (cfg->overhead > 0 && 396 cfg->overhead < MAX_OHSIZE) 397 priv->overhead = cfg->overhead; 398 399 if (cfg->header_offset == -1) 400 priv->header_offset = 0; 401 else if (cfg->header_offset > 0 && 402 cfg->header_offset < 64) 403 priv->header_offset = cfg->header_offset; 404 405 prev_down = priv->upper.cfg.ber == 1 || 406 priv->lower.cfg.ber == 1; 407 parse_cfg(&priv->upper.cfg, &cfg->downstream, 408 &priv->upper, priv); 409 parse_cfg(&priv->lower.cfg, &cfg->upstream, 410 &priv->lower, priv); 411 now_down = priv->upper.cfg.ber == 1 || 412 priv->lower.cfg.ber == 1; 413 414 if (prev_down != now_down) { 415 if (now_down) 416 cmd = NGM_LINK_IS_DOWN; 417 else 418 cmd = NGM_LINK_IS_UP; 419 420 if (priv->lower.hook != NULL) { 421 NG_MKMESSAGE(flow_msg, NGM_FLOW_COOKIE, 422 cmd, 0, M_NOWAIT); 423 if (flow_msg != NULL) 424 NG_SEND_MSG_HOOK(error, node, 425 flow_msg, priv->lower.hook, 426 0); 427 } 428 if (priv->upper.hook != NULL) { 429 NG_MKMESSAGE(flow_msg, NGM_FLOW_COOKIE, 430 cmd, 0, M_NOWAIT); 431 if (flow_msg != NULL) 432 NG_SEND_MSG_HOOK(error, node, 433 flow_msg, priv->upper.hook, 434 0); 435 } 436 } 437 break; 438 default: 439 error = EINVAL; 440 break; 441 } 442 break; 443 default: 444 error = EINVAL; 445 break; 446 } 447 NG_RESPOND_MSG(error, node, item, resp); 448 NG_FREE_MSG(msg); 449 450 return (error); 451 } 452 453 static void 454 parse_cfg(struct ng_pipe_hookcfg *current, struct ng_pipe_hookcfg *new, 455 struct hookinfo *hinfo, priv_p priv) 456 { 457 458 if (new->ber == -1) { 459 current->ber = 0; 460 if (hinfo->ber_p) { 461 free(hinfo->ber_p, M_NG_PIPE); 462 hinfo->ber_p = NULL; 463 } 464 } else if (new->ber >= 1 && new->ber <= 1000000000000) { 465 static const uint64_t one = 0x1000000000000; /* = 2^48 */ 466 uint64_t p0, p; 467 uint32_t fsize, i; 468 469 if (hinfo->ber_p == NULL) 470 hinfo->ber_p = 471 malloc((MAX_FSIZE + MAX_OHSIZE) * sizeof(uint64_t), 472 M_NG_PIPE, M_WAITOK); 473 current->ber = new->ber; 474 475 /* 476 * For given BER and each frame size N (in bytes) calculate 477 * the probability P_OK that the frame is clean: 478 * 479 * P_OK(BER,N) = (1 - 1/BER)^(N*8) 480 * 481 * We use a 64-bit fixed-point format with decimal point 482 * positioned between bits 47 and 48. 483 */ 484 p0 = one - one / new->ber; 485 p = one; 486 for (fsize = 0; fsize < MAX_FSIZE + MAX_OHSIZE; fsize++) { 487 hinfo->ber_p[fsize] = p; 488 for (i = 0; i < 8; i++) 489 p = (p * (p0 & 0xffff) >> 48) + 490 (p * ((p0 >> 16) & 0xffff) >> 32) + 491 (p * (p0 >> 32) >> 16); 492 } 493 } 494 495 if (new->qin_size_limit == -1) 496 current->qin_size_limit = 0; 497 else if (new->qin_size_limit >= 5) 498 current->qin_size_limit = new->qin_size_limit; 499 500 if (new->qout_size_limit == -1) 501 current->qout_size_limit = 0; 502 else if (new->qout_size_limit >= 5) 503 current->qout_size_limit = new->qout_size_limit; 504 505 if (new->duplicate == -1) 506 current->duplicate = 0; 507 else if (new->duplicate > 0 && new->duplicate <= 50) 508 current->duplicate = new->duplicate; 509 510 if (new->fifo) { 511 current->fifo = 1; 512 current->wfq = 0; 513 current->drr = 0; 514 } 515 516 if (new->wfq) { 517 current->fifo = 0; 518 current->wfq = 1; 519 current->drr = 0; 520 } 521 522 if (new->drr) { 523 current->fifo = 0; 524 current->wfq = 0; 525 /* DRR quantum */ 526 if (new->drr >= 32) 527 current->drr = new->drr; 528 else 529 current->drr = 2048; /* default quantum */ 530 } 531 532 if (new->droptail) { 533 current->droptail = 1; 534 current->drophead = 0; 535 } 536 537 if (new->drophead) { 538 current->droptail = 0; 539 current->drophead = 1; 540 } 541 542 if (new->bandwidth == -1) { 543 current->bandwidth = 0; 544 current->fifo = 1; 545 current->wfq = 0; 546 current->drr = 0; 547 } else if (new->bandwidth >= 100 && new->bandwidth <= 1000000000) 548 current->bandwidth = new->bandwidth; 549 550 if (current->bandwidth | priv->delay | 551 current->duplicate | current->ber) 552 hinfo->noqueue = 0; 553 else 554 hinfo->noqueue = 1; 555 } 556 557 /* 558 * Compute a hash signature for a packet. This function suffers from the 559 * NIH sindrome, so probably it would be wise to look around what other 560 * folks have found out to be a good and efficient IP hash function... 561 */ 562 static int 563 ip_hash(struct mbuf *m, int offset) 564 { 565 u_int64_t i; 566 struct ip *ip = (struct ip *)(mtod(m, u_char *) + offset); 567 568 if (m->m_len < sizeof(struct ip) + offset || 569 ip->ip_v != 4 || ip->ip_hl << 2 != sizeof(struct ip)) 570 return 0; 571 572 i = ((u_int64_t) ip->ip_src.s_addr ^ 573 ((u_int64_t) ip->ip_src.s_addr << 13) ^ 574 ((u_int64_t) ip->ip_dst.s_addr << 7) ^ 575 ((u_int64_t) ip->ip_dst.s_addr << 19)); 576 return (i ^ (i >> 32)); 577 } 578 579 /* 580 * Receive data on a hook - both in upstream and downstream direction. 581 * We put the frame on the inbound queue, and try to initiate dequeuing 582 * sequence immediately. If inbound queue is full, discard one frame 583 * depending on dropping policy (from the head or from the tail of the 584 * queue). 585 */ 586 static int 587 ngp_rcvdata(hook_p hook, item_p item) 588 { 589 struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook); 590 const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); 591 struct timeval uuptime; 592 struct timeval *now = &uuptime; 593 struct ngp_fifo *ngp_f = NULL, *ngp_f1; 594 struct ngp_hdr *ngp_h = NULL; 595 struct mbuf *m; 596 int hash, plen; 597 int error = 0; 598 599 /* 600 * Shortcut from inbound to outbound hook when neither of 601 * bandwidth, delay, BER or duplication probability is 602 * configured, nor we have queued frames to drain. 603 */ 604 if (hinfo->run.qin_frames == 0 && hinfo->run.qout_frames == 0 && 605 hinfo->noqueue) { 606 struct hookinfo *dest; 607 if (hinfo == &priv->lower) 608 dest = &priv->upper; 609 else 610 dest = &priv->lower; 611 612 /* Send the frame. */ 613 plen = NGI_M(item)->m_pkthdr.len; 614 NG_FWD_ITEM_HOOK(error, item, dest->hook); 615 616 /* Update stats. */ 617 if (error) { 618 hinfo->stats.out_disc_frames++; 619 hinfo->stats.out_disc_octets += plen; 620 } else { 621 hinfo->stats.fwd_frames++; 622 hinfo->stats.fwd_octets += plen; 623 } 624 625 return (error); 626 } 627 628 microuptime(now); 629 630 /* 631 * If this was an empty queue, update service deadline time. 632 */ 633 if (hinfo->run.qin_frames == 0) { 634 struct timeval *when = &hinfo->qin_utime; 635 if (when->tv_sec < now->tv_sec || (when->tv_sec == now->tv_sec 636 && when->tv_usec < now->tv_usec)) { 637 when->tv_sec = now->tv_sec; 638 when->tv_usec = now->tv_usec; 639 } 640 } 641 642 /* Populate the packet header */ 643 ngp_h = uma_zalloc(ngp_zone, M_NOWAIT); 644 KASSERT((ngp_h != NULL), ("ngp_h zalloc failed (1)")); 645 NGI_GET_M(item, m); 646 KASSERT(m != NULL, ("NGI_GET_M failed")); 647 ngp_h->m = m; 648 NG_FREE_ITEM(item); 649 650 if (hinfo->cfg.fifo) 651 hash = 0; /* all packets go into a single FIFO queue */ 652 else 653 hash = ip_hash(m, priv->header_offset); 654 655 /* Find the appropriate FIFO queue for the packet and enqueue it*/ 656 TAILQ_FOREACH(ngp_f, &hinfo->fifo_head, fifo_le) 657 if (hash == ngp_f->hash) 658 break; 659 if (ngp_f == NULL) { 660 ngp_f = uma_zalloc(ngp_zone, M_NOWAIT); 661 KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (2)")); 662 TAILQ_INIT(&ngp_f->packet_head); 663 ngp_f->hash = hash; 664 ngp_f->packets = 1; 665 ngp_f->rr_deficit = hinfo->cfg.drr; /* DRR quantum */ 666 hinfo->run.fifo_queues++; 667 TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link); 668 FIFO_VTIME_SORT(m->m_pkthdr.len); 669 } else { 670 TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link); 671 ngp_f->packets++; 672 } 673 hinfo->run.qin_frames++; 674 hinfo->run.qin_octets += m->m_pkthdr.len; 675 676 /* Discard a frame if inbound queue limit has been reached */ 677 if (hinfo->run.qin_frames > hinfo->cfg.qin_size_limit) { 678 struct mbuf *m1; 679 int longest = 0; 680 681 /* Find the longest queue */ 682 TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le) 683 if (ngp_f1->packets > longest) { 684 longest = ngp_f1->packets; 685 ngp_f = ngp_f1; 686 } 687 688 /* Drop a frame from the queue head/tail, depending on cfg */ 689 if (hinfo->cfg.drophead) 690 ngp_h = TAILQ_FIRST(&ngp_f->packet_head); 691 else 692 ngp_h = TAILQ_LAST(&ngp_f->packet_head, p_head); 693 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link); 694 m1 = ngp_h->m; 695 uma_zfree(ngp_zone, ngp_h); 696 hinfo->run.qin_octets -= m1->m_pkthdr.len; 697 hinfo->stats.in_disc_octets += m1->m_pkthdr.len; 698 m_freem(m1); 699 if (--(ngp_f->packets) == 0) { 700 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 701 uma_zfree(ngp_zone, ngp_f); 702 hinfo->run.fifo_queues--; 703 } 704 hinfo->run.qin_frames--; 705 hinfo->stats.in_disc_frames++; 706 } 707 708 /* 709 * Try to start the dequeuing process immediately. 710 */ 711 pipe_dequeue(hinfo, now); 712 713 return (0); 714 } 715 716 /* 717 * Dequeueing sequence - we basically do the following: 718 * 1) Try to extract the frame from the inbound (bandwidth) queue; 719 * 2) In accordance to BER specified, discard the frame randomly; 720 * 3) If the frame survives BER, prepend it with delay info and move it 721 * to outbound (delay) queue; 722 * 4) Loop to 2) until bandwidth quota for this timeslice is reached, or 723 * inbound queue is flushed completely; 724 * 5) Dequeue frames from the outbound queue and send them downstream until 725 * outbound queue is flushed completely, or the next frame in the queue 726 * is not due to be dequeued yet 727 */ 728 static void 729 pipe_dequeue(struct hookinfo *hinfo, struct timeval *now) { 730 static uint64_t rand, oldrand; 731 const node_p node = NG_HOOK_NODE(hinfo->hook); 732 const priv_p priv = NG_NODE_PRIVATE(node); 733 struct hookinfo *dest; 734 struct ngp_fifo *ngp_f, *ngp_f1; 735 struct ngp_hdr *ngp_h; 736 struct timeval *when; 737 struct mbuf *m; 738 int plen, error = 0; 739 740 /* Which one is the destination hook? */ 741 if (hinfo == &priv->lower) 742 dest = &priv->upper; 743 else 744 dest = &priv->lower; 745 746 /* Bandwidth queue processing */ 747 while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) { 748 when = &hinfo->qin_utime; 749 if (when->tv_sec > now->tv_sec || (when->tv_sec == now->tv_sec 750 && when->tv_usec > now->tv_usec)) 751 break; 752 753 ngp_h = TAILQ_FIRST(&ngp_f->packet_head); 754 m = ngp_h->m; 755 756 /* Deficit Round Robin (DRR) processing */ 757 if (hinfo->cfg.drr) { 758 if (ngp_f->rr_deficit >= m->m_pkthdr.len) { 759 ngp_f->rr_deficit -= m->m_pkthdr.len; 760 } else { 761 ngp_f->rr_deficit += hinfo->cfg.drr; 762 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 763 TAILQ_INSERT_TAIL(&hinfo->fifo_head, 764 ngp_f, fifo_le); 765 continue; 766 } 767 } 768 769 /* 770 * Either create a duplicate and pass it on, or dequeue 771 * the original packet... 772 */ 773 if (hinfo->cfg.duplicate && 774 random() % 100 <= hinfo->cfg.duplicate) { 775 ngp_h = uma_zalloc(ngp_zone, M_NOWAIT); 776 KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (3)")); 777 m = m_dup(m, M_NOWAIT); 778 KASSERT(m != NULL, ("m_dup failed")); 779 ngp_h->m = m; 780 } else { 781 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link); 782 hinfo->run.qin_frames--; 783 hinfo->run.qin_octets -= m->m_pkthdr.len; 784 ngp_f->packets--; 785 } 786 787 /* Calculate the serialization delay */ 788 if (hinfo->cfg.bandwidth) { 789 hinfo->qin_utime.tv_usec += 790 ((uint64_t) m->m_pkthdr.len + priv->overhead ) * 791 8000000 / hinfo->cfg.bandwidth; 792 hinfo->qin_utime.tv_sec += 793 hinfo->qin_utime.tv_usec / 1000000; 794 hinfo->qin_utime.tv_usec = 795 hinfo->qin_utime.tv_usec % 1000000; 796 } 797 when = &ngp_h->when; 798 when->tv_sec = hinfo->qin_utime.tv_sec; 799 when->tv_usec = hinfo->qin_utime.tv_usec; 800 801 /* Sort / rearrange inbound queues */ 802 if (ngp_f->packets) { 803 if (hinfo->cfg.wfq) { 804 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 805 FIFO_VTIME_SORT(TAILQ_FIRST( 806 &ngp_f->packet_head)->m->m_pkthdr.len) 807 } 808 } else { 809 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 810 uma_zfree(ngp_zone, ngp_f); 811 hinfo->run.fifo_queues--; 812 } 813 814 /* Randomly discard the frame, according to BER setting */ 815 if (hinfo->cfg.ber) { 816 oldrand = rand; 817 rand = random(); 818 if (((oldrand ^ rand) << 17) >= 819 hinfo->ber_p[priv->overhead + m->m_pkthdr.len]) { 820 hinfo->stats.out_disc_frames++; 821 hinfo->stats.out_disc_octets += m->m_pkthdr.len; 822 uma_zfree(ngp_zone, ngp_h); 823 m_freem(m); 824 continue; 825 } 826 } 827 828 /* Discard frame if outbound queue size limit exceeded */ 829 if (hinfo->cfg.qout_size_limit && 830 hinfo->run.qout_frames>=hinfo->cfg.qout_size_limit) { 831 hinfo->stats.out_disc_frames++; 832 hinfo->stats.out_disc_octets += m->m_pkthdr.len; 833 uma_zfree(ngp_zone, ngp_h); 834 m_freem(m); 835 continue; 836 } 837 838 /* Calculate the propagation delay */ 839 when->tv_usec += priv->delay; 840 when->tv_sec += when->tv_usec / 1000000; 841 when->tv_usec = when->tv_usec % 1000000; 842 843 /* Put the frame into the delay queue */ 844 TAILQ_INSERT_TAIL(&hinfo->qout_head, ngp_h, ngp_link); 845 hinfo->run.qout_frames++; 846 hinfo->run.qout_octets += m->m_pkthdr.len; 847 } 848 849 /* Delay queue processing */ 850 while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) { 851 when = &ngp_h->when; 852 m = ngp_h->m; 853 if (when->tv_sec > now->tv_sec || 854 (when->tv_sec == now->tv_sec && 855 when->tv_usec > now->tv_usec)) 856 break; 857 858 /* Update outbound queue stats */ 859 plen = m->m_pkthdr.len; 860 hinfo->run.qout_frames--; 861 hinfo->run.qout_octets -= plen; 862 863 /* Dequeue the packet from qout */ 864 TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link); 865 uma_zfree(ngp_zone, ngp_h); 866 867 NG_SEND_DATA(error, dest->hook, m, meta); 868 if (error) { 869 hinfo->stats.out_disc_frames++; 870 hinfo->stats.out_disc_octets += plen; 871 } else { 872 hinfo->stats.fwd_frames++; 873 hinfo->stats.fwd_octets += plen; 874 } 875 } 876 877 if ((hinfo->run.qin_frames != 0 || hinfo->run.qout_frames != 0) && 878 !priv->timer_scheduled) { 879 ng_callout(&priv->timer, node, NULL, 1, ngp_callout, NULL, 0); 880 priv->timer_scheduled = 1; 881 } 882 } 883 884 /* 885 * This routine is called on every clock tick. We poll connected hooks 886 * for queued frames by calling pipe_dequeue(). 887 */ 888 static void 889 ngp_callout(node_p node, hook_p hook, void *arg1, int arg2) 890 { 891 const priv_p priv = NG_NODE_PRIVATE(node); 892 struct timeval now; 893 894 priv->timer_scheduled = 0; 895 microuptime(&now); 896 if (priv->upper.hook != NULL) 897 pipe_dequeue(&priv->upper, &now); 898 if (priv->lower.hook != NULL) 899 pipe_dequeue(&priv->lower, &now); 900 } 901 902 /* 903 * Shutdown processing 904 * 905 * This is tricky. If we have both a lower and upper hook, then we 906 * probably want to extricate ourselves and leave the two peers 907 * still linked to each other. Otherwise we should just shut down as 908 * a normal node would. 909 */ 910 static int 911 ngp_shutdown(node_p node) 912 { 913 const priv_p priv = NG_NODE_PRIVATE(node); 914 915 if (priv->timer_scheduled) 916 ng_uncallout(&priv->timer, node); 917 if (priv->lower.hook && priv->upper.hook) 918 ng_bypass(priv->lower.hook, priv->upper.hook); 919 else { 920 if (priv->upper.hook != NULL) 921 ng_rmhook_self(priv->upper.hook); 922 if (priv->lower.hook != NULL) 923 ng_rmhook_self(priv->lower.hook); 924 } 925 NG_NODE_UNREF(node); 926 free(priv, M_NG_PIPE); 927 return (0); 928 } 929 930 /* 931 * Hook disconnection 932 */ 933 static int 934 ngp_disconnect(hook_p hook) 935 { 936 struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook); 937 struct ngp_fifo *ngp_f; 938 struct ngp_hdr *ngp_h; 939 940 KASSERT(hinfo != NULL, ("%s: null info", __FUNCTION__)); 941 hinfo->hook = NULL; 942 943 /* Flush all fifo queues associated with the hook */ 944 while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) { 945 while ((ngp_h = TAILQ_FIRST(&ngp_f->packet_head))) { 946 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link); 947 m_freem(ngp_h->m); 948 uma_zfree(ngp_zone, ngp_h); 949 } 950 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 951 uma_zfree(ngp_zone, ngp_f); 952 } 953 954 /* Flush the delay queue */ 955 while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) { 956 TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link); 957 m_freem(ngp_h->m); 958 uma_zfree(ngp_zone, ngp_h); 959 } 960 961 /* Release the packet loss probability table (BER) */ 962 if (hinfo->ber_p) 963 free(hinfo->ber_p, M_NG_PIPE); 964 965 return (0); 966 } 967 968 static int 969 ngp_modevent(module_t mod, int type, void *unused) 970 { 971 int error = 0; 972 973 switch (type) { 974 case MOD_LOAD: 975 ngp_zone = uma_zcreate("ng_pipe", max(sizeof(struct ngp_hdr), 976 sizeof (struct ngp_fifo)), NULL, NULL, NULL, NULL, 977 UMA_ALIGN_PTR, 0); 978 if (ngp_zone == NULL) 979 panic("ng_pipe: couldn't allocate descriptor zone"); 980 break; 981 case MOD_UNLOAD: 982 uma_zdestroy(ngp_zone); 983 break; 984 default: 985 error = EOPNOTSUPP; 986 break; 987 } 988 989 return (error); 990 } 991