1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2004-2010 University of Zagreb 5 * Copyright (c) 2007-2008 FreeBSD Foundation 6 * 7 * This software was developed by the University of Zagreb and the 8 * FreeBSD Foundation under sponsorship by the Stichting NLnet and the 9 * FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 /* 36 * This node permits simple traffic shaping by emulating bandwidth 37 * and delay, as well as random packet losses. 38 * The node has two hooks, upper and lower. Traffic flowing from upper to 39 * lower hook is referenced as downstream, and vice versa. Parameters for 40 * both directions can be set separately, except for delay. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/errno.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #include <sys/mbuf.h> 49 #include <sys/time.h> 50 51 #include <vm/uma.h> 52 53 #include <net/vnet.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/ip.h> 58 59 #include <netgraph/ng_message.h> 60 #include <netgraph/netgraph.h> 61 #include <netgraph/ng_parse.h> 62 #include <netgraph/ng_pipe.h> 63 64 static MALLOC_DEFINE(M_NG_PIPE, "ng_pipe", "ng_pipe"); 65 66 /* Packet header struct */ 67 struct ngp_hdr { 68 TAILQ_ENTRY(ngp_hdr) ngp_link; /* next pkt in queue */ 69 struct timeval when; /* this packet's due time */ 70 struct mbuf *m; /* ptr to the packet data */ 71 }; 72 TAILQ_HEAD(p_head, ngp_hdr); 73 74 /* FIFO queue struct */ 75 struct ngp_fifo { 76 TAILQ_ENTRY(ngp_fifo) fifo_le; /* list of active queues only */ 77 struct p_head packet_head; /* FIFO queue head */ 78 u_int32_t hash; /* flow signature */ 79 struct timeval vtime; /* virtual time, for WFQ */ 80 u_int32_t rr_deficit; /* for DRR */ 81 u_int32_t packets; /* # of packets in this queue */ 82 }; 83 84 /* Per hook info */ 85 struct hookinfo { 86 hook_p hook; 87 int noqueue; /* bypass any processing */ 88 TAILQ_HEAD(, ngp_fifo) fifo_head; /* FIFO queues */ 89 TAILQ_HEAD(, ngp_hdr) qout_head; /* delay queue head */ 90 struct timeval qin_utime; 91 struct ng_pipe_hookcfg cfg; 92 struct ng_pipe_hookrun run; 93 struct ng_pipe_hookstat stats; 94 uint64_t *ber_p; /* loss_p(BER,psize) map */ 95 }; 96 97 /* Per node info */ 98 struct node_priv { 99 u_int64_t delay; 100 u_int32_t overhead; 101 u_int32_t header_offset; 102 struct hookinfo lower; 103 struct hookinfo upper; 104 struct callout timer; 105 int timer_scheduled; 106 }; 107 typedef struct node_priv *priv_p; 108 109 /* Macro for calculating the virtual time for packet dequeueing in WFQ */ 110 #define FIFO_VTIME_SORT(plen) \ 111 if (hinfo->cfg.wfq && hinfo->cfg.bandwidth) { \ 112 ngp_f->vtime.tv_usec = now->tv_usec + ((uint64_t) (plen) \ 113 + priv->overhead ) * hinfo->run.fifo_queues * \ 114 8000000 / hinfo->cfg.bandwidth; \ 115 ngp_f->vtime.tv_sec = now->tv_sec + \ 116 ngp_f->vtime.tv_usec / 1000000; \ 117 ngp_f->vtime.tv_usec = ngp_f->vtime.tv_usec % 1000000; \ 118 TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le) \ 119 if (ngp_f1->vtime.tv_sec > ngp_f->vtime.tv_sec || \ 120 (ngp_f1->vtime.tv_sec == ngp_f->vtime.tv_sec && \ 121 ngp_f1->vtime.tv_usec > ngp_f->vtime.tv_usec)) \ 122 break; \ 123 if (ngp_f1 == NULL) \ 124 TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \ 125 else \ 126 TAILQ_INSERT_BEFORE(ngp_f1, ngp_f, fifo_le); \ 127 } else \ 128 TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \ 129 130 static void parse_cfg(struct ng_pipe_hookcfg *, struct ng_pipe_hookcfg *, 131 struct hookinfo *, priv_p); 132 static void pipe_dequeue(struct hookinfo *, struct timeval *); 133 static void ngp_callout(node_p, hook_p, void *, int); 134 static int ngp_modevent(module_t, int, void *); 135 136 /* zone for storing ngp_hdr-s */ 137 static uma_zone_t ngp_zone; 138 139 /* Netgraph methods */ 140 static ng_constructor_t ngp_constructor; 141 static ng_rcvmsg_t ngp_rcvmsg; 142 static ng_shutdown_t ngp_shutdown; 143 static ng_newhook_t ngp_newhook; 144 static ng_rcvdata_t ngp_rcvdata; 145 static ng_disconnect_t ngp_disconnect; 146 147 /* Parse type for struct ng_pipe_hookstat */ 148 static const struct ng_parse_struct_field 149 ng_pipe_hookstat_type_fields[] = NG_PIPE_HOOKSTAT_INFO; 150 static const struct ng_parse_type ng_pipe_hookstat_type = { 151 &ng_parse_struct_type, 152 &ng_pipe_hookstat_type_fields 153 }; 154 155 /* Parse type for struct ng_pipe_stats */ 156 static const struct ng_parse_struct_field ng_pipe_stats_type_fields[] = 157 NG_PIPE_STATS_INFO(&ng_pipe_hookstat_type); 158 static const struct ng_parse_type ng_pipe_stats_type = { 159 &ng_parse_struct_type, 160 &ng_pipe_stats_type_fields 161 }; 162 163 /* Parse type for struct ng_pipe_hookrun */ 164 static const struct ng_parse_struct_field 165 ng_pipe_hookrun_type_fields[] = NG_PIPE_HOOKRUN_INFO; 166 static const struct ng_parse_type ng_pipe_hookrun_type = { 167 &ng_parse_struct_type, 168 &ng_pipe_hookrun_type_fields 169 }; 170 171 /* Parse type for struct ng_pipe_run */ 172 static const struct ng_parse_struct_field 173 ng_pipe_run_type_fields[] = NG_PIPE_RUN_INFO(&ng_pipe_hookrun_type); 174 static const struct ng_parse_type ng_pipe_run_type = { 175 &ng_parse_struct_type, 176 &ng_pipe_run_type_fields 177 }; 178 179 /* Parse type for struct ng_pipe_hookcfg */ 180 static const struct ng_parse_struct_field 181 ng_pipe_hookcfg_type_fields[] = NG_PIPE_HOOKCFG_INFO; 182 static const struct ng_parse_type ng_pipe_hookcfg_type = { 183 &ng_parse_struct_type, 184 &ng_pipe_hookcfg_type_fields 185 }; 186 187 /* Parse type for struct ng_pipe_cfg */ 188 static const struct ng_parse_struct_field 189 ng_pipe_cfg_type_fields[] = NG_PIPE_CFG_INFO(&ng_pipe_hookcfg_type); 190 static const struct ng_parse_type ng_pipe_cfg_type = { 191 &ng_parse_struct_type, 192 &ng_pipe_cfg_type_fields 193 }; 194 195 /* List of commands and how to convert arguments to/from ASCII */ 196 static const struct ng_cmdlist ngp_cmds[] = { 197 { 198 .cookie = NGM_PIPE_COOKIE, 199 .cmd = NGM_PIPE_GET_STATS, 200 .name = "getstats", 201 .respType = &ng_pipe_stats_type 202 }, 203 { 204 .cookie = NGM_PIPE_COOKIE, 205 .cmd = NGM_PIPE_CLR_STATS, 206 .name = "clrstats" 207 }, 208 { 209 .cookie = NGM_PIPE_COOKIE, 210 .cmd = NGM_PIPE_GETCLR_STATS, 211 .name = "getclrstats", 212 .respType = &ng_pipe_stats_type 213 }, 214 { 215 .cookie = NGM_PIPE_COOKIE, 216 .cmd = NGM_PIPE_GET_RUN, 217 .name = "getrun", 218 .respType = &ng_pipe_run_type 219 }, 220 { 221 .cookie = NGM_PIPE_COOKIE, 222 .cmd = NGM_PIPE_GET_CFG, 223 .name = "getcfg", 224 .respType = &ng_pipe_cfg_type 225 }, 226 { 227 .cookie = NGM_PIPE_COOKIE, 228 .cmd = NGM_PIPE_SET_CFG, 229 .name = "setcfg", 230 .mesgType = &ng_pipe_cfg_type, 231 }, 232 { 0 } 233 }; 234 235 /* Netgraph type descriptor */ 236 static struct ng_type ng_pipe_typestruct = { 237 .version = NG_ABI_VERSION, 238 .name = NG_PIPE_NODE_TYPE, 239 .mod_event = ngp_modevent, 240 .constructor = ngp_constructor, 241 .shutdown = ngp_shutdown, 242 .rcvmsg = ngp_rcvmsg, 243 .newhook = ngp_newhook, 244 .rcvdata = ngp_rcvdata, 245 .disconnect = ngp_disconnect, 246 .cmdlist = ngp_cmds 247 }; 248 NETGRAPH_INIT(pipe, &ng_pipe_typestruct); 249 250 /* Node constructor */ 251 static int 252 ngp_constructor(node_p node) 253 { 254 priv_p priv; 255 256 priv = malloc(sizeof(*priv), M_NG_PIPE, M_ZERO | M_WAITOK); 257 NG_NODE_SET_PRIVATE(node, priv); 258 259 /* Mark node as single-threaded */ 260 NG_NODE_FORCE_WRITER(node); 261 262 ng_callout_init(&priv->timer); 263 264 return (0); 265 } 266 267 /* Add a hook */ 268 static int 269 ngp_newhook(node_p node, hook_p hook, const char *name) 270 { 271 const priv_p priv = NG_NODE_PRIVATE(node); 272 struct hookinfo *hinfo; 273 274 if (strcmp(name, NG_PIPE_HOOK_UPPER) == 0) { 275 bzero(&priv->upper, sizeof(priv->upper)); 276 priv->upper.hook = hook; 277 NG_HOOK_SET_PRIVATE(hook, &priv->upper); 278 } else if (strcmp(name, NG_PIPE_HOOK_LOWER) == 0) { 279 bzero(&priv->lower, sizeof(priv->lower)); 280 priv->lower.hook = hook; 281 NG_HOOK_SET_PRIVATE(hook, &priv->lower); 282 } else 283 return (EINVAL); 284 285 /* Load non-zero initial cfg values */ 286 hinfo = NG_HOOK_PRIVATE(hook); 287 hinfo->cfg.qin_size_limit = 50; 288 hinfo->cfg.fifo = 1; 289 hinfo->cfg.droptail = 1; 290 TAILQ_INIT(&hinfo->fifo_head); 291 TAILQ_INIT(&hinfo->qout_head); 292 return (0); 293 } 294 295 /* Receive a control message */ 296 static int 297 ngp_rcvmsg(node_p node, item_p item, hook_p lasthook) 298 { 299 const priv_p priv = NG_NODE_PRIVATE(node); 300 struct ng_mesg *resp = NULL; 301 struct ng_mesg *msg, *flow_msg; 302 struct ng_pipe_stats *stats; 303 struct ng_pipe_run *run; 304 struct ng_pipe_cfg *cfg; 305 int error = 0; 306 int prev_down, now_down, cmd; 307 308 NGI_GET_MSG(item, msg); 309 switch (msg->header.typecookie) { 310 case NGM_PIPE_COOKIE: 311 switch (msg->header.cmd) { 312 case NGM_PIPE_GET_STATS: 313 case NGM_PIPE_CLR_STATS: 314 case NGM_PIPE_GETCLR_STATS: 315 if (msg->header.cmd != NGM_PIPE_CLR_STATS) { 316 NG_MKRESPONSE(resp, msg, 317 sizeof(*stats), M_NOWAIT); 318 if (resp == NULL) { 319 error = ENOMEM; 320 break; 321 } 322 stats = (struct ng_pipe_stats *) resp->data; 323 bcopy(&priv->upper.stats, &stats->downstream, 324 sizeof(stats->downstream)); 325 bcopy(&priv->lower.stats, &stats->upstream, 326 sizeof(stats->upstream)); 327 } 328 if (msg->header.cmd != NGM_PIPE_GET_STATS) { 329 bzero(&priv->upper.stats, 330 sizeof(priv->upper.stats)); 331 bzero(&priv->lower.stats, 332 sizeof(priv->lower.stats)); 333 } 334 break; 335 case NGM_PIPE_GET_RUN: 336 NG_MKRESPONSE(resp, msg, sizeof(*run), M_NOWAIT); 337 if (resp == NULL) { 338 error = ENOMEM; 339 break; 340 } 341 run = (struct ng_pipe_run *) resp->data; 342 bcopy(&priv->upper.run, &run->downstream, 343 sizeof(run->downstream)); 344 bcopy(&priv->lower.run, &run->upstream, 345 sizeof(run->upstream)); 346 break; 347 case NGM_PIPE_GET_CFG: 348 NG_MKRESPONSE(resp, msg, sizeof(*cfg), M_NOWAIT); 349 if (resp == NULL) { 350 error = ENOMEM; 351 break; 352 } 353 cfg = (struct ng_pipe_cfg *) resp->data; 354 bcopy(&priv->upper.cfg, &cfg->downstream, 355 sizeof(cfg->downstream)); 356 bcopy(&priv->lower.cfg, &cfg->upstream, 357 sizeof(cfg->upstream)); 358 cfg->delay = priv->delay; 359 cfg->overhead = priv->overhead; 360 cfg->header_offset = priv->header_offset; 361 if (cfg->upstream.bandwidth == 362 cfg->downstream.bandwidth) { 363 cfg->bandwidth = cfg->upstream.bandwidth; 364 cfg->upstream.bandwidth = 0; 365 cfg->downstream.bandwidth = 0; 366 } else 367 cfg->bandwidth = 0; 368 break; 369 case NGM_PIPE_SET_CFG: 370 cfg = (struct ng_pipe_cfg *) msg->data; 371 if (msg->header.arglen != sizeof(*cfg)) { 372 error = EINVAL; 373 break; 374 } 375 376 if (cfg->delay == -1) 377 priv->delay = 0; 378 else if (cfg->delay > 0 && cfg->delay < 10000000) 379 priv->delay = cfg->delay; 380 381 if (cfg->bandwidth == -1) { 382 priv->upper.cfg.bandwidth = 0; 383 priv->lower.cfg.bandwidth = 0; 384 priv->overhead = 0; 385 } else if (cfg->bandwidth >= 100 && 386 cfg->bandwidth <= 1000000000) { 387 priv->upper.cfg.bandwidth = cfg->bandwidth; 388 priv->lower.cfg.bandwidth = cfg->bandwidth; 389 if (cfg->bandwidth >= 10000000) 390 priv->overhead = 8+4+12; /* Ethernet */ 391 else 392 priv->overhead = 10; /* HDLC */ 393 } 394 395 if (cfg->overhead == -1) 396 priv->overhead = 0; 397 else if (cfg->overhead > 0 && 398 cfg->overhead < MAX_OHSIZE) 399 priv->overhead = cfg->overhead; 400 401 if (cfg->header_offset == -1) 402 priv->header_offset = 0; 403 else if (cfg->header_offset > 0 && 404 cfg->header_offset < 64) 405 priv->header_offset = cfg->header_offset; 406 407 prev_down = priv->upper.cfg.ber == 1 || 408 priv->lower.cfg.ber == 1; 409 parse_cfg(&priv->upper.cfg, &cfg->downstream, 410 &priv->upper, priv); 411 parse_cfg(&priv->lower.cfg, &cfg->upstream, 412 &priv->lower, priv); 413 now_down = priv->upper.cfg.ber == 1 || 414 priv->lower.cfg.ber == 1; 415 416 if (prev_down != now_down) { 417 if (now_down) 418 cmd = NGM_LINK_IS_DOWN; 419 else 420 cmd = NGM_LINK_IS_UP; 421 422 if (priv->lower.hook != NULL) { 423 NG_MKMESSAGE(flow_msg, NGM_FLOW_COOKIE, 424 cmd, 0, M_NOWAIT); 425 if (flow_msg != NULL) 426 NG_SEND_MSG_HOOK(error, node, 427 flow_msg, priv->lower.hook, 428 0); 429 } 430 if (priv->upper.hook != NULL) { 431 NG_MKMESSAGE(flow_msg, NGM_FLOW_COOKIE, 432 cmd, 0, M_NOWAIT); 433 if (flow_msg != NULL) 434 NG_SEND_MSG_HOOK(error, node, 435 flow_msg, priv->upper.hook, 436 0); 437 } 438 } 439 break; 440 default: 441 error = EINVAL; 442 break; 443 } 444 break; 445 default: 446 error = EINVAL; 447 break; 448 } 449 NG_RESPOND_MSG(error, node, item, resp); 450 NG_FREE_MSG(msg); 451 452 return (error); 453 } 454 455 static void 456 parse_cfg(struct ng_pipe_hookcfg *current, struct ng_pipe_hookcfg *new, 457 struct hookinfo *hinfo, priv_p priv) 458 { 459 460 if (new->ber == -1) { 461 current->ber = 0; 462 if (hinfo->ber_p) { 463 free(hinfo->ber_p, M_NG_PIPE); 464 hinfo->ber_p = NULL; 465 } 466 } else if (new->ber >= 1 && new->ber <= 1000000000000) { 467 static const uint64_t one = 0x1000000000000; /* = 2^48 */ 468 uint64_t p0, p; 469 uint32_t fsize, i; 470 471 if (hinfo->ber_p == NULL) 472 hinfo->ber_p = 473 malloc((MAX_FSIZE + MAX_OHSIZE) * sizeof(uint64_t), 474 M_NG_PIPE, M_WAITOK); 475 current->ber = new->ber; 476 477 /* 478 * For given BER and each frame size N (in bytes) calculate 479 * the probability P_OK that the frame is clean: 480 * 481 * P_OK(BER,N) = (1 - 1/BER)^(N*8) 482 * 483 * We use a 64-bit fixed-point format with decimal point 484 * positioned between bits 47 and 48. 485 */ 486 p0 = one - one / new->ber; 487 p = one; 488 for (fsize = 0; fsize < MAX_FSIZE + MAX_OHSIZE; fsize++) { 489 hinfo->ber_p[fsize] = p; 490 for (i = 0; i < 8; i++) 491 p = (p * (p0 & 0xffff) >> 48) + 492 (p * ((p0 >> 16) & 0xffff) >> 32) + 493 (p * (p0 >> 32) >> 16); 494 } 495 } 496 497 if (new->qin_size_limit == -1) 498 current->qin_size_limit = 0; 499 else if (new->qin_size_limit >= 5) 500 current->qin_size_limit = new->qin_size_limit; 501 502 if (new->qout_size_limit == -1) 503 current->qout_size_limit = 0; 504 else if (new->qout_size_limit >= 5) 505 current->qout_size_limit = new->qout_size_limit; 506 507 if (new->duplicate == -1) 508 current->duplicate = 0; 509 else if (new->duplicate > 0 && new->duplicate <= 50) 510 current->duplicate = new->duplicate; 511 512 if (new->fifo) { 513 current->fifo = 1; 514 current->wfq = 0; 515 current->drr = 0; 516 } 517 518 if (new->wfq) { 519 current->fifo = 0; 520 current->wfq = 1; 521 current->drr = 0; 522 } 523 524 if (new->drr) { 525 current->fifo = 0; 526 current->wfq = 0; 527 /* DRR quantum */ 528 if (new->drr >= 32) 529 current->drr = new->drr; 530 else 531 current->drr = 2048; /* default quantum */ 532 } 533 534 if (new->droptail) { 535 current->droptail = 1; 536 current->drophead = 0; 537 } 538 539 if (new->drophead) { 540 current->droptail = 0; 541 current->drophead = 1; 542 } 543 544 if (new->bandwidth == -1) { 545 current->bandwidth = 0; 546 current->fifo = 1; 547 current->wfq = 0; 548 current->drr = 0; 549 } else if (new->bandwidth >= 100 && new->bandwidth <= 1000000000) 550 current->bandwidth = new->bandwidth; 551 552 if (current->bandwidth | priv->delay | 553 current->duplicate | current->ber) 554 hinfo->noqueue = 0; 555 else 556 hinfo->noqueue = 1; 557 } 558 559 /* 560 * Compute a hash signature for a packet. This function suffers from the 561 * NIH sindrome, so probably it would be wise to look around what other 562 * folks have found out to be a good and efficient IP hash function... 563 */ 564 static int 565 ip_hash(struct mbuf *m, int offset) 566 { 567 u_int64_t i; 568 struct ip *ip = (struct ip *)(mtod(m, u_char *) + offset); 569 570 if (m->m_len < sizeof(struct ip) + offset || 571 ip->ip_v != 4 || ip->ip_hl << 2 != sizeof(struct ip)) 572 return 0; 573 574 i = ((u_int64_t) ip->ip_src.s_addr ^ 575 ((u_int64_t) ip->ip_src.s_addr << 13) ^ 576 ((u_int64_t) ip->ip_dst.s_addr << 7) ^ 577 ((u_int64_t) ip->ip_dst.s_addr << 19)); 578 return (i ^ (i >> 32)); 579 } 580 581 /* 582 * Receive data on a hook - both in upstream and downstream direction. 583 * We put the frame on the inbound queue, and try to initiate dequeuing 584 * sequence immediately. If inbound queue is full, discard one frame 585 * depending on dropping policy (from the head or from the tail of the 586 * queue). 587 */ 588 static int 589 ngp_rcvdata(hook_p hook, item_p item) 590 { 591 struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook); 592 const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); 593 struct timeval uuptime; 594 struct timeval *now = &uuptime; 595 struct ngp_fifo *ngp_f = NULL, *ngp_f1; 596 struct ngp_hdr *ngp_h = NULL; 597 struct mbuf *m; 598 int hash, plen; 599 int error = 0; 600 601 /* 602 * Shortcut from inbound to outbound hook when neither of 603 * bandwidth, delay, BER or duplication probability is 604 * configured, nor we have queued frames to drain. 605 */ 606 if (hinfo->run.qin_frames == 0 && hinfo->run.qout_frames == 0 && 607 hinfo->noqueue) { 608 struct hookinfo *dest; 609 if (hinfo == &priv->lower) 610 dest = &priv->upper; 611 else 612 dest = &priv->lower; 613 614 /* Send the frame. */ 615 plen = NGI_M(item)->m_pkthdr.len; 616 NG_FWD_ITEM_HOOK(error, item, dest->hook); 617 618 /* Update stats. */ 619 if (error) { 620 hinfo->stats.out_disc_frames++; 621 hinfo->stats.out_disc_octets += plen; 622 } else { 623 hinfo->stats.fwd_frames++; 624 hinfo->stats.fwd_octets += plen; 625 } 626 627 return (error); 628 } 629 630 microuptime(now); 631 632 /* 633 * If this was an empty queue, update service deadline time. 634 */ 635 if (hinfo->run.qin_frames == 0) { 636 struct timeval *when = &hinfo->qin_utime; 637 if (when->tv_sec < now->tv_sec || (when->tv_sec == now->tv_sec 638 && when->tv_usec < now->tv_usec)) { 639 when->tv_sec = now->tv_sec; 640 when->tv_usec = now->tv_usec; 641 } 642 } 643 644 /* Populate the packet header */ 645 ngp_h = uma_zalloc(ngp_zone, M_NOWAIT); 646 KASSERT((ngp_h != NULL), ("ngp_h zalloc failed (1)")); 647 NGI_GET_M(item, m); 648 KASSERT(m != NULL, ("NGI_GET_M failed")); 649 ngp_h->m = m; 650 NG_FREE_ITEM(item); 651 652 if (hinfo->cfg.fifo) 653 hash = 0; /* all packets go into a single FIFO queue */ 654 else 655 hash = ip_hash(m, priv->header_offset); 656 657 /* Find the appropriate FIFO queue for the packet and enqueue it*/ 658 TAILQ_FOREACH(ngp_f, &hinfo->fifo_head, fifo_le) 659 if (hash == ngp_f->hash) 660 break; 661 if (ngp_f == NULL) { 662 ngp_f = uma_zalloc(ngp_zone, M_NOWAIT); 663 KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (2)")); 664 TAILQ_INIT(&ngp_f->packet_head); 665 ngp_f->hash = hash; 666 ngp_f->packets = 1; 667 ngp_f->rr_deficit = hinfo->cfg.drr; /* DRR quantum */ 668 hinfo->run.fifo_queues++; 669 TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link); 670 FIFO_VTIME_SORT(m->m_pkthdr.len); 671 } else { 672 TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link); 673 ngp_f->packets++; 674 } 675 hinfo->run.qin_frames++; 676 hinfo->run.qin_octets += m->m_pkthdr.len; 677 678 /* Discard a frame if inbound queue limit has been reached */ 679 if (hinfo->run.qin_frames > hinfo->cfg.qin_size_limit) { 680 struct mbuf *m1; 681 int longest = 0; 682 683 /* Find the longest queue */ 684 TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le) 685 if (ngp_f1->packets > longest) { 686 longest = ngp_f1->packets; 687 ngp_f = ngp_f1; 688 } 689 690 /* Drop a frame from the queue head/tail, depending on cfg */ 691 if (hinfo->cfg.drophead) 692 ngp_h = TAILQ_FIRST(&ngp_f->packet_head); 693 else 694 ngp_h = TAILQ_LAST(&ngp_f->packet_head, p_head); 695 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link); 696 m1 = ngp_h->m; 697 uma_zfree(ngp_zone, ngp_h); 698 hinfo->run.qin_octets -= m1->m_pkthdr.len; 699 hinfo->stats.in_disc_octets += m1->m_pkthdr.len; 700 m_freem(m1); 701 if (--(ngp_f->packets) == 0) { 702 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 703 uma_zfree(ngp_zone, ngp_f); 704 hinfo->run.fifo_queues--; 705 } 706 hinfo->run.qin_frames--; 707 hinfo->stats.in_disc_frames++; 708 } 709 710 /* 711 * Try to start the dequeuing process immediately. 712 */ 713 pipe_dequeue(hinfo, now); 714 715 return (0); 716 } 717 718 /* 719 * Dequeueing sequence - we basically do the following: 720 * 1) Try to extract the frame from the inbound (bandwidth) queue; 721 * 2) In accordance to BER specified, discard the frame randomly; 722 * 3) If the frame survives BER, prepend it with delay info and move it 723 * to outbound (delay) queue; 724 * 4) Loop to 2) until bandwidth quota for this timeslice is reached, or 725 * inbound queue is flushed completely; 726 * 5) Dequeue frames from the outbound queue and send them downstream until 727 * outbound queue is flushed completely, or the next frame in the queue 728 * is not due to be dequeued yet 729 */ 730 static void 731 pipe_dequeue(struct hookinfo *hinfo, struct timeval *now) { 732 static uint64_t rand, oldrand; 733 const node_p node = NG_HOOK_NODE(hinfo->hook); 734 const priv_p priv = NG_NODE_PRIVATE(node); 735 struct hookinfo *dest; 736 struct ngp_fifo *ngp_f, *ngp_f1; 737 struct ngp_hdr *ngp_h; 738 struct timeval *when; 739 struct mbuf *m; 740 int plen, error = 0; 741 742 /* Which one is the destination hook? */ 743 if (hinfo == &priv->lower) 744 dest = &priv->upper; 745 else 746 dest = &priv->lower; 747 748 /* Bandwidth queue processing */ 749 while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) { 750 when = &hinfo->qin_utime; 751 if (when->tv_sec > now->tv_sec || (when->tv_sec == now->tv_sec 752 && when->tv_usec > now->tv_usec)) 753 break; 754 755 ngp_h = TAILQ_FIRST(&ngp_f->packet_head); 756 m = ngp_h->m; 757 758 /* Deficit Round Robin (DRR) processing */ 759 if (hinfo->cfg.drr) { 760 if (ngp_f->rr_deficit >= m->m_pkthdr.len) { 761 ngp_f->rr_deficit -= m->m_pkthdr.len; 762 } else { 763 ngp_f->rr_deficit += hinfo->cfg.drr; 764 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 765 TAILQ_INSERT_TAIL(&hinfo->fifo_head, 766 ngp_f, fifo_le); 767 continue; 768 } 769 } 770 771 /* 772 * Either create a duplicate and pass it on, or dequeue 773 * the original packet... 774 */ 775 if (hinfo->cfg.duplicate && 776 random() % 100 <= hinfo->cfg.duplicate) { 777 ngp_h = uma_zalloc(ngp_zone, M_NOWAIT); 778 KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (3)")); 779 m = m_dup(m, M_NOWAIT); 780 KASSERT(m != NULL, ("m_dup failed")); 781 ngp_h->m = m; 782 } else { 783 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link); 784 hinfo->run.qin_frames--; 785 hinfo->run.qin_octets -= m->m_pkthdr.len; 786 ngp_f->packets--; 787 } 788 789 /* Calculate the serialization delay */ 790 if (hinfo->cfg.bandwidth) { 791 hinfo->qin_utime.tv_usec += 792 ((uint64_t) m->m_pkthdr.len + priv->overhead ) * 793 8000000 / hinfo->cfg.bandwidth; 794 hinfo->qin_utime.tv_sec += 795 hinfo->qin_utime.tv_usec / 1000000; 796 hinfo->qin_utime.tv_usec = 797 hinfo->qin_utime.tv_usec % 1000000; 798 } 799 when = &ngp_h->when; 800 when->tv_sec = hinfo->qin_utime.tv_sec; 801 when->tv_usec = hinfo->qin_utime.tv_usec; 802 803 /* Sort / rearrange inbound queues */ 804 if (ngp_f->packets) { 805 if (hinfo->cfg.wfq) { 806 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 807 FIFO_VTIME_SORT(TAILQ_FIRST( 808 &ngp_f->packet_head)->m->m_pkthdr.len) 809 } 810 } else { 811 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 812 uma_zfree(ngp_zone, ngp_f); 813 hinfo->run.fifo_queues--; 814 } 815 816 /* Randomly discard the frame, according to BER setting */ 817 if (hinfo->cfg.ber) { 818 oldrand = rand; 819 rand = random(); 820 if (((oldrand ^ rand) << 17) >= 821 hinfo->ber_p[priv->overhead + m->m_pkthdr.len]) { 822 hinfo->stats.out_disc_frames++; 823 hinfo->stats.out_disc_octets += m->m_pkthdr.len; 824 uma_zfree(ngp_zone, ngp_h); 825 m_freem(m); 826 continue; 827 } 828 } 829 830 /* Discard frame if outbound queue size limit exceeded */ 831 if (hinfo->cfg.qout_size_limit && 832 hinfo->run.qout_frames>=hinfo->cfg.qout_size_limit) { 833 hinfo->stats.out_disc_frames++; 834 hinfo->stats.out_disc_octets += m->m_pkthdr.len; 835 uma_zfree(ngp_zone, ngp_h); 836 m_freem(m); 837 continue; 838 } 839 840 /* Calculate the propagation delay */ 841 when->tv_usec += priv->delay; 842 when->tv_sec += when->tv_usec / 1000000; 843 when->tv_usec = when->tv_usec % 1000000; 844 845 /* Put the frame into the delay queue */ 846 TAILQ_INSERT_TAIL(&hinfo->qout_head, ngp_h, ngp_link); 847 hinfo->run.qout_frames++; 848 hinfo->run.qout_octets += m->m_pkthdr.len; 849 } 850 851 /* Delay queue processing */ 852 while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) { 853 when = &ngp_h->when; 854 m = ngp_h->m; 855 if (when->tv_sec > now->tv_sec || 856 (when->tv_sec == now->tv_sec && 857 when->tv_usec > now->tv_usec)) 858 break; 859 860 /* Update outbound queue stats */ 861 plen = m->m_pkthdr.len; 862 hinfo->run.qout_frames--; 863 hinfo->run.qout_octets -= plen; 864 865 /* Dequeue the packet from qout */ 866 TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link); 867 uma_zfree(ngp_zone, ngp_h); 868 869 NG_SEND_DATA(error, dest->hook, m, meta); 870 if (error) { 871 hinfo->stats.out_disc_frames++; 872 hinfo->stats.out_disc_octets += plen; 873 } else { 874 hinfo->stats.fwd_frames++; 875 hinfo->stats.fwd_octets += plen; 876 } 877 } 878 879 if ((hinfo->run.qin_frames != 0 || hinfo->run.qout_frames != 0) && 880 !priv->timer_scheduled) { 881 ng_callout(&priv->timer, node, NULL, 1, ngp_callout, NULL, 0); 882 priv->timer_scheduled = 1; 883 } 884 } 885 886 /* 887 * This routine is called on every clock tick. We poll connected hooks 888 * for queued frames by calling pipe_dequeue(). 889 */ 890 static void 891 ngp_callout(node_p node, hook_p hook, void *arg1, int arg2) 892 { 893 const priv_p priv = NG_NODE_PRIVATE(node); 894 struct timeval now; 895 896 priv->timer_scheduled = 0; 897 microuptime(&now); 898 if (priv->upper.hook != NULL) 899 pipe_dequeue(&priv->upper, &now); 900 if (priv->lower.hook != NULL) 901 pipe_dequeue(&priv->lower, &now); 902 } 903 904 /* 905 * Shutdown processing 906 * 907 * This is tricky. If we have both a lower and upper hook, then we 908 * probably want to extricate ourselves and leave the two peers 909 * still linked to each other. Otherwise we should just shut down as 910 * a normal node would. 911 */ 912 static int 913 ngp_shutdown(node_p node) 914 { 915 const priv_p priv = NG_NODE_PRIVATE(node); 916 917 if (priv->timer_scheduled) 918 ng_uncallout(&priv->timer, node); 919 if (priv->lower.hook && priv->upper.hook) 920 ng_bypass(priv->lower.hook, priv->upper.hook); 921 else { 922 if (priv->upper.hook != NULL) 923 ng_rmhook_self(priv->upper.hook); 924 if (priv->lower.hook != NULL) 925 ng_rmhook_self(priv->lower.hook); 926 } 927 NG_NODE_UNREF(node); 928 free(priv, M_NG_PIPE); 929 return (0); 930 } 931 932 /* 933 * Hook disconnection 934 */ 935 static int 936 ngp_disconnect(hook_p hook) 937 { 938 struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook); 939 struct ngp_fifo *ngp_f; 940 struct ngp_hdr *ngp_h; 941 942 KASSERT(hinfo != NULL, ("%s: null info", __FUNCTION__)); 943 hinfo->hook = NULL; 944 945 /* Flush all fifo queues associated with the hook */ 946 while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) { 947 while ((ngp_h = TAILQ_FIRST(&ngp_f->packet_head))) { 948 TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link); 949 m_freem(ngp_h->m); 950 uma_zfree(ngp_zone, ngp_h); 951 } 952 TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); 953 uma_zfree(ngp_zone, ngp_f); 954 } 955 956 /* Flush the delay queue */ 957 while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) { 958 TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link); 959 m_freem(ngp_h->m); 960 uma_zfree(ngp_zone, ngp_h); 961 } 962 963 /* Release the packet loss probability table (BER) */ 964 if (hinfo->ber_p) 965 free(hinfo->ber_p, M_NG_PIPE); 966 967 return (0); 968 } 969 970 static int 971 ngp_modevent(module_t mod, int type, void *unused) 972 { 973 int error = 0; 974 975 switch (type) { 976 case MOD_LOAD: 977 ngp_zone = uma_zcreate("ng_pipe", max(sizeof(struct ngp_hdr), 978 sizeof (struct ngp_fifo)), NULL, NULL, NULL, NULL, 979 UMA_ALIGN_PTR, 0); 980 if (ngp_zone == NULL) 981 panic("ng_pipe: couldn't allocate descriptor zone"); 982 break; 983 case MOD_UNLOAD: 984 uma_zdestroy(ngp_zone); 985 break; 986 default: 987 error = EOPNOTSUPP; 988 break; 989 } 990 991 return (error); 992 } 993