1 /*- 2 * 3 * SPDX-License-Identifier: BSD-3-Clause 4 * 5 * Copyright (c) 2018-2020 6 * Netflix Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 */ 30 /** 31 * Author: Randall Stewart <rrs@netflix.com> 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_ipsec.h" 39 #include "opt_ratelimit.h" 40 #include <sys/param.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <sys/eventhandler.h> 48 #include <sys/mutex.h> 49 #include <sys/ck.h> 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_private.h> 53 #include <netinet/in.h> 54 #include <netinet/in_pcb.h> 55 #define TCPSTATES /* for logging */ 56 #include <netinet/tcp_var.h> 57 #include <netinet/tcp_hpts.h> 58 #include <netinet/tcp_log_buf.h> 59 #include <netinet/tcp_ratelimit.h> 60 #ifndef USECS_IN_SECOND 61 #define USECS_IN_SECOND 1000000 62 #endif 63 /* 64 * For the purposes of each send, what is the size 65 * of an ethernet frame. 66 */ 67 MALLOC_DEFINE(M_TCPPACE, "tcp_hwpace", "TCP Hardware pacing memory"); 68 #ifdef RATELIMIT 69 70 /* 71 * The following preferred table will seem weird to 72 * the casual viewer. Why do we not have any rates below 73 * 1Mbps? Why do we have a rate at 1.44Mbps called common? 74 * Why do the rates cluster in the 1-100Mbps range more 75 * than others? Why does the table jump around at the beginnign 76 * and then be more consistently raising? 77 * 78 * Let me try to answer those questions. A lot of 79 * this is dependant on the hardware. We have three basic 80 * supporters of rate limiting 81 * 82 * Chelsio - Supporting 16 configurable rates. 83 * Mlx - c4 supporting 13 fixed rates. 84 * Mlx - c5 & c6 supporting 127 configurable rates. 85 * 86 * The c4 is why we have a common rate that is available 87 * in all rate tables. This is a selected rate from the 88 * c4 table and we assure its available in all ratelimit 89 * tables. This way the tcp_ratelimit code has an assured 90 * rate it should always be able to get. This answers a 91 * couple of the questions above. 92 * 93 * So what about the rest, well the table is built to 94 * try to get the most out of a joint hardware/software 95 * pacing system. The software pacer will always pick 96 * a rate higher than the b/w that it is estimating 97 * 98 * on the path. This is done for two reasons. 99 * a) So we can discover more b/w 100 * and 101 * b) So we can send a block of MSS's down and then 102 * have the software timer go off after the previous 103 * send is completely out of the hardware. 104 * 105 * But when we do <b> we don't want to have the delay 106 * between the last packet sent by the hardware be 107 * excessively long (to reach our desired rate). 108 * 109 * So let me give an example for clarity. 110 * 111 * Lets assume that the tcp stack sees that 29,110,000 bps is 112 * what the bw of the path is. The stack would select the 113 * rate 31Mbps. 31Mbps means that each send that is done 114 * by the hardware will cause a 390 micro-second gap between 115 * the packets sent at that rate. For 29,110,000 bps we 116 * would need 416 micro-seconds gap between each send. 117 * 118 * Note that are calculating a complete time for pacing 119 * which includes the ethernet, IP and TCP overhead. So 120 * a full 1514 bytes is used for the above calculations. 121 * My testing has shown that both cards are also using this 122 * as their basis i.e. full payload size of the ethernet frame. 123 * The TCP stack caller needs to be aware of this and make the 124 * appropriate overhead calculations be included in its choices. 125 * 126 * Now, continuing our example, we pick a MSS size based on the 127 * delta between the two rates (416 - 390) divided into the rate 128 * we really wish to send at rounded up. That results in a MSS 129 * send of 17 mss's at once. The hardware then will 130 * run out of data in a single 17MSS send in 6,630 micro-seconds. 131 * 132 * On the other hand the software pacer will send more data 133 * in 7,072 micro-seconds. This means that we will refill 134 * the hardware 52 microseconds after it would have sent 135 * next if it had not ran out of data. This is a win since we are 136 * only sending every 7ms or so and yet all the packets are spaced on 137 * the wire with 94% of what they should be and only 138 * the last packet is delayed extra to make up for the 139 * difference. 140 * 141 * Note that the above formula has two important caveat. 142 * If we are above (b/w wise) over 100Mbps we double the result 143 * of the MSS calculation. The second caveat is if we are 500Mbps 144 * or more we just send the maximum MSS at once i.e. 45MSS. At 145 * the higher b/w's even the cards have limits to what times (timer granularity) 146 * they can insert between packets and start to send more than one 147 * packet at a time on the wire. 148 * 149 */ 150 #define COMMON_RATE 180500 151 const uint64_t desired_rates[] = { 152 122500, /* 1Mbps - rate 1 */ 153 180500, /* 1.44Mpbs - rate 2 common rate */ 154 375000, /* 3Mbps - rate 3 */ 155 625000, /* 5Mbps - rate 4 */ 156 1250000, /* 10Mbps - rate 5 */ 157 1875000, /* 15Mbps - rate 6 */ 158 2500000, /* 20Mbps - rate 7 */ 159 3125000, /* 25Mbps - rate 8 */ 160 3750000, /* 30Mbps - rate 9 */ 161 4375000, /* 35Mbps - rate 10 */ 162 5000000, /* 40Meg - rate 11 */ 163 6250000, /* 50Mbps - rate 12 */ 164 12500000, /* 100Mbps - rate 13 */ 165 25000000, /* 200Mbps - rate 14 */ 166 50000000, /* 400Mbps - rate 15 */ 167 100000000, /* 800Mbps - rate 16 */ 168 5625000, /* 45Mbps - rate 17 */ 169 6875000, /* 55Mbps - rate 19 */ 170 7500000, /* 60Mbps - rate 20 */ 171 8125000, /* 65Mbps - rate 21 */ 172 8750000, /* 70Mbps - rate 22 */ 173 9375000, /* 75Mbps - rate 23 */ 174 10000000, /* 80Mbps - rate 24 */ 175 10625000, /* 85Mbps - rate 25 */ 176 11250000, /* 90Mbps - rate 26 */ 177 11875000, /* 95Mbps - rate 27 */ 178 12500000, /* 100Mbps - rate 28 */ 179 13750000, /* 110Mbps - rate 29 */ 180 15000000, /* 120Mbps - rate 30 */ 181 16250000, /* 130Mbps - rate 31 */ 182 17500000, /* 140Mbps - rate 32 */ 183 18750000, /* 150Mbps - rate 33 */ 184 20000000, /* 160Mbps - rate 34 */ 185 21250000, /* 170Mbps - rate 35 */ 186 22500000, /* 180Mbps - rate 36 */ 187 23750000, /* 190Mbps - rate 37 */ 188 26250000, /* 210Mbps - rate 38 */ 189 27500000, /* 220Mbps - rate 39 */ 190 28750000, /* 230Mbps - rate 40 */ 191 30000000, /* 240Mbps - rate 41 */ 192 31250000, /* 250Mbps - rate 42 */ 193 34375000, /* 275Mbps - rate 43 */ 194 37500000, /* 300Mbps - rate 44 */ 195 40625000, /* 325Mbps - rate 45 */ 196 43750000, /* 350Mbps - rate 46 */ 197 46875000, /* 375Mbps - rate 47 */ 198 53125000, /* 425Mbps - rate 48 */ 199 56250000, /* 450Mbps - rate 49 */ 200 59375000, /* 475Mbps - rate 50 */ 201 62500000, /* 500Mbps - rate 51 */ 202 68750000, /* 550Mbps - rate 52 */ 203 75000000, /* 600Mbps - rate 53 */ 204 81250000, /* 650Mbps - rate 54 */ 205 87500000, /* 700Mbps - rate 55 */ 206 93750000, /* 750Mbps - rate 56 */ 207 106250000, /* 850Mbps - rate 57 */ 208 112500000, /* 900Mbps - rate 58 */ 209 125000000, /* 1Gbps - rate 59 */ 210 156250000, /* 1.25Gps - rate 60 */ 211 187500000, /* 1.5Gps - rate 61 */ 212 218750000, /* 1.75Gps - rate 62 */ 213 250000000, /* 2Gbps - rate 63 */ 214 281250000, /* 2.25Gps - rate 64 */ 215 312500000, /* 2.5Gbps - rate 65 */ 216 343750000, /* 2.75Gbps - rate 66 */ 217 375000000, /* 3Gbps - rate 67 */ 218 500000000, /* 4Gbps - rate 68 */ 219 625000000, /* 5Gbps - rate 69 */ 220 750000000, /* 6Gbps - rate 70 */ 221 875000000, /* 7Gbps - rate 71 */ 222 1000000000, /* 8Gbps - rate 72 */ 223 1125000000, /* 9Gbps - rate 73 */ 224 1250000000, /* 10Gbps - rate 74 */ 225 1875000000, /* 15Gbps - rate 75 */ 226 2500000000 /* 20Gbps - rate 76 */ 227 }; 228 229 #define MAX_HDWR_RATES (sizeof(desired_rates)/sizeof(uint64_t)) 230 #define RS_ORDERED_COUNT 16 /* 231 * Number that are in order 232 * at the beginning of the table, 233 * over this a sort is required. 234 */ 235 #define RS_NEXT_ORDER_GROUP 16 /* 236 * The point in our table where 237 * we come fill in a second ordered 238 * group (index wise means -1). 239 */ 240 #define ALL_HARDWARE_RATES 1004 /* 241 * 1Meg - 1Gig in 1 Meg steps 242 * plus 100, 200k and 500k and 243 * 10Gig 244 */ 245 246 #define RS_ONE_MEGABIT_PERSEC 1000000 247 #define RS_ONE_GIGABIT_PERSEC 1000000000 248 #define RS_TEN_GIGABIT_PERSEC 10000000000 249 250 static struct head_tcp_rate_set int_rs; 251 static struct mtx rs_mtx; 252 uint32_t rs_number_alive; 253 uint32_t rs_number_dead; 254 static uint32_t rs_floor_mss = 0; 255 static uint32_t wait_time_floor = 8000; /* 8 ms */ 256 static uint32_t rs_hw_floor_mss = 16; 257 static uint32_t num_of_waits_allowed = 1; /* How many time blocks are we willing to wait */ 258 259 static uint32_t mss_divisor = RL_DEFAULT_DIVISOR; 260 static uint32_t even_num_segs = 1; 261 static uint32_t even_threshold = 4; 262 263 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, rl, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 264 "TCP Ratelimit stats"); 265 SYSCTL_UINT(_net_inet_tcp_rl, OID_AUTO, alive, CTLFLAG_RW, 266 &rs_number_alive, 0, 267 "Number of interfaces initialized for ratelimiting"); 268 SYSCTL_UINT(_net_inet_tcp_rl, OID_AUTO, dead, CTLFLAG_RW, 269 &rs_number_dead, 0, 270 "Number of interfaces departing from ratelimiting"); 271 SYSCTL_UINT(_net_inet_tcp_rl, OID_AUTO, floor_mss, CTLFLAG_RW, 272 &rs_floor_mss, 0, 273 "Number of MSS that will override the normal minimums (0 means don't enforce)"); 274 SYSCTL_UINT(_net_inet_tcp_rl, OID_AUTO, wait_floor, CTLFLAG_RW, 275 &wait_time_floor, 2000, 276 "Has b/w increases what is the wait floor we are willing to wait at the end?"); 277 SYSCTL_UINT(_net_inet_tcp_rl, OID_AUTO, time_blocks, CTLFLAG_RW, 278 &num_of_waits_allowed, 1, 279 "How many time blocks on the end should software pacing be willing to wait?"); 280 281 SYSCTL_UINT(_net_inet_tcp_rl, OID_AUTO, hw_floor_mss, CTLFLAG_RW, 282 &rs_hw_floor_mss, 16, 283 "Number of mss that are a minum for hardware pacing?"); 284 285 SYSCTL_INT(_net_inet_tcp_rl, OID_AUTO, divisor, CTLFLAG_RW, 286 &mss_divisor, RL_DEFAULT_DIVISOR, 287 "The value divided into bytes per second to help establish mss size"); 288 SYSCTL_INT(_net_inet_tcp_rl, OID_AUTO, even, CTLFLAG_RW, 289 &even_num_segs, 1, 290 "Do we round mss size up to an even number of segments for delayed ack"); 291 SYSCTL_INT(_net_inet_tcp_rl, OID_AUTO, eventhresh, CTLFLAG_RW, 292 &even_threshold, 4, 293 "At what number of mss do we start rounding up to an even number of mss?"); 294 295 static void 296 rl_add_syctl_entries(struct sysctl_oid *rl_sysctl_root, struct tcp_rate_set *rs) 297 { 298 /* 299 * Add sysctl entries for thus interface. 300 */ 301 if (rs->rs_flags & RS_INTF_NO_SUP) { 302 SYSCTL_ADD_S32(&rs->sysctl_ctx, 303 SYSCTL_CHILDREN(rl_sysctl_root), 304 OID_AUTO, "disable", CTLFLAG_RD, 305 &rs->rs_disable, 0, 306 "Disable this interface from new hdwr limiting?"); 307 } else { 308 SYSCTL_ADD_S32(&rs->sysctl_ctx, 309 SYSCTL_CHILDREN(rl_sysctl_root), 310 OID_AUTO, "disable", CTLFLAG_RW, 311 &rs->rs_disable, 0, 312 "Disable this interface from new hdwr limiting?"); 313 } 314 SYSCTL_ADD_S32(&rs->sysctl_ctx, 315 SYSCTL_CHILDREN(rl_sysctl_root), 316 OID_AUTO, "minseg", CTLFLAG_RW, 317 &rs->rs_min_seg, 0, 318 "What is the minimum we need to send on this interface?"); 319 SYSCTL_ADD_U64(&rs->sysctl_ctx, 320 SYSCTL_CHILDREN(rl_sysctl_root), 321 OID_AUTO, "flow_limit", CTLFLAG_RW, 322 &rs->rs_flow_limit, 0, 323 "What is the limit for number of flows (0=unlimited)?"); 324 SYSCTL_ADD_S32(&rs->sysctl_ctx, 325 SYSCTL_CHILDREN(rl_sysctl_root), 326 OID_AUTO, "highest", CTLFLAG_RD, 327 &rs->rs_highest_valid, 0, 328 "Highest valid rate"); 329 SYSCTL_ADD_S32(&rs->sysctl_ctx, 330 SYSCTL_CHILDREN(rl_sysctl_root), 331 OID_AUTO, "lowest", CTLFLAG_RD, 332 &rs->rs_lowest_valid, 0, 333 "Lowest valid rate"); 334 SYSCTL_ADD_S32(&rs->sysctl_ctx, 335 SYSCTL_CHILDREN(rl_sysctl_root), 336 OID_AUTO, "flags", CTLFLAG_RD, 337 &rs->rs_flags, 0, 338 "What lags are on the entry?"); 339 SYSCTL_ADD_S32(&rs->sysctl_ctx, 340 SYSCTL_CHILDREN(rl_sysctl_root), 341 OID_AUTO, "numrates", CTLFLAG_RD, 342 &rs->rs_rate_cnt, 0, 343 "How many rates re there?"); 344 SYSCTL_ADD_U64(&rs->sysctl_ctx, 345 SYSCTL_CHILDREN(rl_sysctl_root), 346 OID_AUTO, "flows_using", CTLFLAG_RD, 347 &rs->rs_flows_using, 0, 348 "How many flows are using this interface now?"); 349 #ifdef DETAILED_RATELIMIT_SYSCTL 350 if (rs->rs_rlt && rs->rs_rate_cnt > 0) { 351 /* Lets display the rates */ 352 int i; 353 struct sysctl_oid *rl_rates; 354 struct sysctl_oid *rl_rate_num; 355 char rate_num[16]; 356 rl_rates = SYSCTL_ADD_NODE(&rs->sysctl_ctx, 357 SYSCTL_CHILDREN(rl_sysctl_root), 358 OID_AUTO, 359 "rate", 360 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 361 "Ratelist"); 362 for( i = 0; i < rs->rs_rate_cnt; i++) { 363 sprintf(rate_num, "%d", i); 364 rl_rate_num = SYSCTL_ADD_NODE(&rs->sysctl_ctx, 365 SYSCTL_CHILDREN(rl_rates), 366 OID_AUTO, 367 rate_num, 368 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 369 "Individual Rate"); 370 SYSCTL_ADD_U32(&rs->sysctl_ctx, 371 SYSCTL_CHILDREN(rl_rate_num), 372 OID_AUTO, "flags", CTLFLAG_RD, 373 &rs->rs_rlt[i].flags, 0, 374 "Flags on this rate"); 375 SYSCTL_ADD_U32(&rs->sysctl_ctx, 376 SYSCTL_CHILDREN(rl_rate_num), 377 OID_AUTO, "pacetime", CTLFLAG_RD, 378 &rs->rs_rlt[i].time_between, 0, 379 "Time hardware inserts between 1500 byte sends"); 380 SYSCTL_ADD_LONG(&rs->sysctl_ctx, 381 SYSCTL_CHILDREN(rl_rate_num), 382 OID_AUTO, "rate", CTLFLAG_RD, 383 &rs->rs_rlt[i].rate, 384 "Rate in bytes per second"); 385 SYSCTL_ADD_LONG(&rs->sysctl_ctx, 386 SYSCTL_CHILDREN(rl_rate_num), 387 OID_AUTO, "using", CTLFLAG_RD, 388 &rs->rs_rlt[i].using, 389 "Number of flows using"); 390 SYSCTL_ADD_LONG(&rs->sysctl_ctx, 391 SYSCTL_CHILDREN(rl_rate_num), 392 OID_AUTO, "enobufs", CTLFLAG_RD, 393 &rs->rs_rlt[i].rs_num_enobufs, 394 "Number of enobufs logged on this rate"); 395 396 } 397 } 398 #endif 399 } 400 401 static void 402 rs_destroy(epoch_context_t ctx) 403 { 404 struct tcp_rate_set *rs; 405 bool do_free_rs; 406 407 rs = __containerof(ctx, struct tcp_rate_set, rs_epoch_ctx); 408 409 mtx_lock(&rs_mtx); 410 rs->rs_flags &= ~RS_FUNERAL_SCHD; 411 /* 412 * In theory its possible (but unlikely) 413 * that while the delete was occuring 414 * and we were applying the DEAD flag 415 * someone slipped in and found the 416 * interface in a lookup. While we 417 * decided rs_flows_using were 0 and 418 * scheduling the epoch_call, the other 419 * thread incremented rs_flow_using. This 420 * is because users have a pointer and 421 * we only use the rs_flows_using in an 422 * atomic fashion, i.e. the other entities 423 * are not protected. To assure this did 424 * not occur, we check rs_flows_using here 425 * before deleting. 426 */ 427 do_free_rs = (rs->rs_flows_using == 0); 428 rs_number_dead--; 429 mtx_unlock(&rs_mtx); 430 431 if (do_free_rs) { 432 sysctl_ctx_free(&rs->sysctl_ctx); 433 free(rs->rs_rlt, M_TCPPACE); 434 free(rs, M_TCPPACE); 435 } 436 } 437 438 static void 439 rs_defer_destroy(struct tcp_rate_set *rs) 440 { 441 442 mtx_assert(&rs_mtx, MA_OWNED); 443 444 /* Check if already pending. */ 445 if (rs->rs_flags & RS_FUNERAL_SCHD) 446 return; 447 448 rs_number_dead++; 449 450 /* Set flag to only defer once. */ 451 rs->rs_flags |= RS_FUNERAL_SCHD; 452 NET_EPOCH_CALL(rs_destroy, &rs->rs_epoch_ctx); 453 } 454 455 #ifdef INET 456 extern counter_u64_t rate_limit_new; 457 extern counter_u64_t rate_limit_chg; 458 extern counter_u64_t rate_limit_set_ok; 459 extern counter_u64_t rate_limit_active; 460 extern counter_u64_t rate_limit_alloc_fail; 461 #endif 462 463 static int 464 rl_attach_txrtlmt(struct ifnet *ifp, 465 uint32_t flowtype, 466 int flowid, 467 uint64_t cfg_rate, 468 struct m_snd_tag **tag) 469 { 470 int error; 471 union if_snd_tag_alloc_params params = { 472 .rate_limit.hdr.type = IF_SND_TAG_TYPE_RATE_LIMIT, 473 .rate_limit.hdr.flowid = flowid, 474 .rate_limit.hdr.flowtype = flowtype, 475 .rate_limit.max_rate = cfg_rate, 476 .rate_limit.flags = M_NOWAIT, 477 }; 478 479 error = m_snd_tag_alloc(ifp, ¶ms, tag); 480 #ifdef INET 481 if (error == 0) { 482 counter_u64_add(rate_limit_set_ok, 1); 483 counter_u64_add(rate_limit_active, 1); 484 } else if (error != EOPNOTSUPP) 485 counter_u64_add(rate_limit_alloc_fail, 1); 486 #endif 487 return (error); 488 } 489 490 static void 491 populate_canned_table(struct tcp_rate_set *rs, const uint64_t *rate_table_act) 492 { 493 /* 494 * The internal table is "special", it 495 * is two seperate ordered tables that 496 * must be merged. We get here when the 497 * adapter specifies a number of rates that 498 * covers both ranges in the table in some 499 * form. 500 */ 501 int i, at_low, at_high; 502 uint8_t low_disabled = 0, high_disabled = 0; 503 504 for(i = 0, at_low = 0, at_high = RS_NEXT_ORDER_GROUP; i < rs->rs_rate_cnt; i++) { 505 rs->rs_rlt[i].flags = 0; 506 rs->rs_rlt[i].time_between = 0; 507 if ((low_disabled == 0) && 508 (high_disabled || 509 (rate_table_act[at_low] < rate_table_act[at_high]))) { 510 rs->rs_rlt[i].rate = rate_table_act[at_low]; 511 at_low++; 512 if (at_low == RS_NEXT_ORDER_GROUP) 513 low_disabled = 1; 514 } else if (high_disabled == 0) { 515 rs->rs_rlt[i].rate = rate_table_act[at_high]; 516 at_high++; 517 if (at_high == MAX_HDWR_RATES) 518 high_disabled = 1; 519 } 520 } 521 } 522 523 static struct tcp_rate_set * 524 rt_setup_new_rs(struct ifnet *ifp, int *error) 525 { 526 struct tcp_rate_set *rs; 527 const uint64_t *rate_table_act; 528 uint64_t lentim, res; 529 size_t sz; 530 uint32_t hash_type; 531 int i; 532 struct if_ratelimit_query_results rl; 533 struct sysctl_oid *rl_sysctl_root; 534 struct epoch_tracker et; 535 /* 536 * We expect to enter with the 537 * mutex locked. 538 */ 539 540 if (ifp->if_ratelimit_query == NULL) { 541 /* 542 * We can do nothing if we cannot 543 * get a query back from the driver. 544 */ 545 printf("Warning:No query functions for %s:%d-- failed\n", 546 ifp->if_dname, ifp->if_dunit); 547 return (NULL); 548 } 549 rs = malloc(sizeof(struct tcp_rate_set), M_TCPPACE, M_NOWAIT | M_ZERO); 550 if (rs == NULL) { 551 if (error) 552 *error = ENOMEM; 553 printf("Warning:No memory for malloc of tcp_rate_set\n"); 554 return (NULL); 555 } 556 memset(&rl, 0, sizeof(rl)); 557 rl.flags = RT_NOSUPPORT; 558 ifp->if_ratelimit_query(ifp, &rl); 559 if (rl.flags & RT_IS_UNUSABLE) { 560 /* 561 * The interface does not really support 562 * the rate-limiting. 563 */ 564 memset(rs, 0, sizeof(struct tcp_rate_set)); 565 rs->rs_ifp = ifp; 566 rs->rs_if_dunit = ifp->if_dunit; 567 rs->rs_flags = RS_INTF_NO_SUP; 568 rs->rs_disable = 1; 569 rs_number_alive++; 570 sysctl_ctx_init(&rs->sysctl_ctx); 571 rl_sysctl_root = SYSCTL_ADD_NODE(&rs->sysctl_ctx, 572 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_rl), 573 OID_AUTO, 574 rs->rs_ifp->if_xname, 575 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 576 ""); 577 rl_add_syctl_entries(rl_sysctl_root, rs); 578 NET_EPOCH_ENTER(et); 579 mtx_lock(&rs_mtx); 580 CK_LIST_INSERT_HEAD(&int_rs, rs, next); 581 mtx_unlock(&rs_mtx); 582 NET_EPOCH_EXIT(et); 583 return (rs); 584 } else if ((rl.flags & RT_IS_INDIRECT) == RT_IS_INDIRECT) { 585 memset(rs, 0, sizeof(struct tcp_rate_set)); 586 rs->rs_ifp = ifp; 587 rs->rs_if_dunit = ifp->if_dunit; 588 rs->rs_flags = RS_IS_DEFF; 589 rs_number_alive++; 590 sysctl_ctx_init(&rs->sysctl_ctx); 591 rl_sysctl_root = SYSCTL_ADD_NODE(&rs->sysctl_ctx, 592 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_rl), 593 OID_AUTO, 594 rs->rs_ifp->if_xname, 595 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 596 ""); 597 rl_add_syctl_entries(rl_sysctl_root, rs); 598 NET_EPOCH_ENTER(et); 599 mtx_lock(&rs_mtx); 600 CK_LIST_INSERT_HEAD(&int_rs, rs, next); 601 mtx_unlock(&rs_mtx); 602 NET_EPOCH_EXIT(et); 603 return (rs); 604 } else if ((rl.flags & RT_IS_FIXED_TABLE) == RT_IS_FIXED_TABLE) { 605 /* Mellanox C4 likely */ 606 rs->rs_ifp = ifp; 607 rs->rs_if_dunit = ifp->if_dunit; 608 rs->rs_rate_cnt = rl.number_of_rates; 609 rs->rs_min_seg = rl.min_segment_burst; 610 rs->rs_highest_valid = 0; 611 rs->rs_flow_limit = rl.max_flows; 612 rs->rs_flags = RS_IS_INTF | RS_NO_PRE; 613 rs->rs_disable = 0; 614 rate_table_act = rl.rate_table; 615 } else if ((rl.flags & RT_IS_SELECTABLE) == RT_IS_SELECTABLE) { 616 /* Chelsio, C5 and C6 of Mellanox? */ 617 rs->rs_ifp = ifp; 618 rs->rs_if_dunit = ifp->if_dunit; 619 rs->rs_rate_cnt = rl.number_of_rates; 620 rs->rs_min_seg = rl.min_segment_burst; 621 rs->rs_disable = 0; 622 rs->rs_flow_limit = rl.max_flows; 623 rate_table_act = desired_rates; 624 if ((rs->rs_rate_cnt > MAX_HDWR_RATES) && 625 (rs->rs_rate_cnt < ALL_HARDWARE_RATES)) { 626 /* 627 * Our desired table is not big 628 * enough, do what we can. 629 */ 630 rs->rs_rate_cnt = MAX_HDWR_RATES; 631 } 632 if (rs->rs_rate_cnt <= RS_ORDERED_COUNT) 633 rs->rs_flags = RS_IS_INTF; 634 else 635 rs->rs_flags = RS_IS_INTF | RS_INT_TBL; 636 if (rs->rs_rate_cnt >= ALL_HARDWARE_RATES) 637 rs->rs_rate_cnt = ALL_HARDWARE_RATES; 638 } else { 639 free(rs, M_TCPPACE); 640 return (NULL); 641 } 642 sz = sizeof(struct tcp_hwrate_limit_table) * rs->rs_rate_cnt; 643 rs->rs_rlt = malloc(sz, M_TCPPACE, M_NOWAIT); 644 if (rs->rs_rlt == NULL) { 645 if (error) 646 *error = ENOMEM; 647 bail: 648 free(rs, M_TCPPACE); 649 return (NULL); 650 } 651 if (rs->rs_rate_cnt >= ALL_HARDWARE_RATES) { 652 /* 653 * The interface supports all 654 * the rates we could possibly want. 655 */ 656 uint64_t rat; 657 658 rs->rs_rlt[0].rate = 12500; /* 100k */ 659 rs->rs_rlt[1].rate = 25000; /* 200k */ 660 rs->rs_rlt[2].rate = 62500; /* 500k */ 661 /* Note 125000 == 1Megabit 662 * populate 1Meg - 1000meg. 663 */ 664 for(i = 3, rat = 125000; i< (ALL_HARDWARE_RATES-1); i++) { 665 rs->rs_rlt[i].rate = rat; 666 rat += 125000; 667 } 668 rs->rs_rlt[(ALL_HARDWARE_RATES-1)].rate = 1250000000; 669 } else if (rs->rs_flags & RS_INT_TBL) { 670 /* We populate this in a special way */ 671 populate_canned_table(rs, rate_table_act); 672 } else { 673 /* 674 * Just copy in the rates from 675 * the table, it is in order. 676 */ 677 for (i=0; i<rs->rs_rate_cnt; i++) { 678 rs->rs_rlt[i].rate = rate_table_act[i]; 679 rs->rs_rlt[i].time_between = 0; 680 rs->rs_rlt[i].flags = 0; 681 } 682 } 683 for (i = (rs->rs_rate_cnt - 1); i >= 0; i--) { 684 /* 685 * We go backwards through the list so that if we can't get 686 * a rate and fail to init one, we have at least a chance of 687 * getting the highest one. 688 */ 689 rs->rs_rlt[i].ptbl = rs; 690 rs->rs_rlt[i].tag = NULL; 691 rs->rs_rlt[i].using = 0; 692 rs->rs_rlt[i].rs_num_enobufs = 0; 693 /* 694 * Calculate the time between. 695 */ 696 lentim = ETHERNET_SEGMENT_SIZE * USECS_IN_SECOND; 697 res = lentim / rs->rs_rlt[i].rate; 698 if (res > 0) 699 rs->rs_rlt[i].time_between = res; 700 else 701 rs->rs_rlt[i].time_between = 1; 702 if (rs->rs_flags & RS_NO_PRE) { 703 rs->rs_rlt[i].flags = HDWRPACE_INITED; 704 rs->rs_lowest_valid = i; 705 } else { 706 int err; 707 708 if ((rl.flags & RT_IS_SETUP_REQ) && 709 (ifp->if_ratelimit_query)) { 710 err = ifp->if_ratelimit_setup(ifp, 711 rs->rs_rlt[i].rate, i); 712 if (err) 713 goto handle_err; 714 } 715 #ifdef RSS 716 hash_type = M_HASHTYPE_RSS_TCP_IPV4; 717 #else 718 hash_type = M_HASHTYPE_OPAQUE_HASH; 719 #endif 720 err = rl_attach_txrtlmt(ifp, 721 hash_type, 722 (i + 1), 723 rs->rs_rlt[i].rate, 724 &rs->rs_rlt[i].tag); 725 if (err) { 726 handle_err: 727 if (i == (rs->rs_rate_cnt - 1)) { 728 /* 729 * Huh - first rate and we can't get 730 * it? 731 */ 732 free(rs->rs_rlt, M_TCPPACE); 733 if (error) 734 *error = err; 735 goto bail; 736 } else { 737 if (error) 738 *error = err; 739 } 740 break; 741 } else { 742 rs->rs_rlt[i].flags = HDWRPACE_INITED | HDWRPACE_TAGPRESENT; 743 rs->rs_lowest_valid = i; 744 } 745 } 746 } 747 /* Did we get at least 1 rate? */ 748 if (rs->rs_rlt[(rs->rs_rate_cnt - 1)].flags & HDWRPACE_INITED) 749 rs->rs_highest_valid = rs->rs_rate_cnt - 1; 750 else { 751 free(rs->rs_rlt, M_TCPPACE); 752 goto bail; 753 } 754 rs_number_alive++; 755 sysctl_ctx_init(&rs->sysctl_ctx); 756 rl_sysctl_root = SYSCTL_ADD_NODE(&rs->sysctl_ctx, 757 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_rl), 758 OID_AUTO, 759 rs->rs_ifp->if_xname, 760 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 761 ""); 762 rl_add_syctl_entries(rl_sysctl_root, rs); 763 NET_EPOCH_ENTER(et); 764 mtx_lock(&rs_mtx); 765 CK_LIST_INSERT_HEAD(&int_rs, rs, next); 766 mtx_unlock(&rs_mtx); 767 NET_EPOCH_EXIT(et); 768 return (rs); 769 } 770 771 /* 772 * For an explanation of why the argument is volatile please 773 * look at the comments around rt_setup_rate(). 774 */ 775 static const struct tcp_hwrate_limit_table * 776 tcp_int_find_suitable_rate(const volatile struct tcp_rate_set *rs, 777 uint64_t bytes_per_sec, uint32_t flags, uint64_t *lower_rate) 778 { 779 struct tcp_hwrate_limit_table *arte = NULL, *rte = NULL; 780 uint64_t mbits_per_sec, ind_calc, previous_rate = 0; 781 int i; 782 783 mbits_per_sec = (bytes_per_sec * 8); 784 if (flags & RS_PACING_LT) { 785 if ((mbits_per_sec < RS_ONE_MEGABIT_PERSEC) && 786 (rs->rs_lowest_valid <= 2)){ 787 /* 788 * Smaller than 1Meg, only 789 * 3 entries can match it. 790 */ 791 previous_rate = 0; 792 for(i = rs->rs_lowest_valid; i < 3; i++) { 793 if (bytes_per_sec <= rs->rs_rlt[i].rate) { 794 rte = &rs->rs_rlt[i]; 795 break; 796 } else if (rs->rs_rlt[i].flags & HDWRPACE_INITED) { 797 arte = &rs->rs_rlt[i]; 798 } 799 previous_rate = rs->rs_rlt[i].rate; 800 } 801 goto done; 802 } else if ((mbits_per_sec > RS_ONE_GIGABIT_PERSEC) && 803 (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED)){ 804 /* 805 * Larger than 1G (the majority of 806 * our table. 807 */ 808 if (mbits_per_sec < RS_TEN_GIGABIT_PERSEC) 809 rte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)]; 810 else 811 arte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)]; 812 previous_rate = rs->rs_rlt[(ALL_HARDWARE_RATES-2)].rate; 813 goto done; 814 } 815 /* 816 * If we reach here its in our table (between 1Meg - 1000Meg), 817 * just take the rounded down mbits per second, and add 818 * 1Megabit to it, from this we can calculate 819 * the index in the table. 820 */ 821 ind_calc = mbits_per_sec/RS_ONE_MEGABIT_PERSEC; 822 if ((ind_calc * RS_ONE_MEGABIT_PERSEC) != mbits_per_sec) 823 ind_calc++; 824 /* our table is offset by 3, we add 2 */ 825 ind_calc += 2; 826 if (ind_calc > (ALL_HARDWARE_RATES-1)) { 827 /* This should not happen */ 828 ind_calc = ALL_HARDWARE_RATES-1; 829 } 830 if ((ind_calc >= rs->rs_lowest_valid) && 831 (ind_calc <= rs->rs_highest_valid)) { 832 rte = &rs->rs_rlt[ind_calc]; 833 if (ind_calc >= 1) 834 previous_rate = rs->rs_rlt[(ind_calc-1)].rate; 835 } 836 } else if (flags & RS_PACING_EXACT_MATCH) { 837 if ((mbits_per_sec < RS_ONE_MEGABIT_PERSEC) && 838 (rs->rs_lowest_valid <= 2)){ 839 for(i = rs->rs_lowest_valid; i < 3; i++) { 840 if (bytes_per_sec == rs->rs_rlt[i].rate) { 841 rte = &rs->rs_rlt[i]; 842 break; 843 } 844 } 845 } else if ((mbits_per_sec > RS_ONE_GIGABIT_PERSEC) && 846 (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED)) { 847 /* > 1Gbps only one rate */ 848 if (bytes_per_sec == rs->rs_rlt[(ALL_HARDWARE_RATES-1)].rate) { 849 /* Its 10G wow */ 850 rte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)]; 851 } 852 } else { 853 /* Ok it must be a exact meg (its between 1G and 1Meg) */ 854 ind_calc = mbits_per_sec/RS_ONE_MEGABIT_PERSEC; 855 if ((ind_calc * RS_ONE_MEGABIT_PERSEC) == mbits_per_sec) { 856 /* its an exact Mbps */ 857 ind_calc += 2; 858 if (ind_calc > (ALL_HARDWARE_RATES-1)) { 859 /* This should not happen */ 860 ind_calc = ALL_HARDWARE_RATES-1; 861 } 862 if (rs->rs_rlt[ind_calc].flags & HDWRPACE_INITED) 863 rte = &rs->rs_rlt[ind_calc]; 864 } 865 } 866 } else { 867 /* we want greater than the requested rate */ 868 if ((mbits_per_sec < RS_ONE_MEGABIT_PERSEC) && 869 (rs->rs_lowest_valid <= 2)){ 870 arte = &rs->rs_rlt[3]; /* set alternate to 1Meg */ 871 for (i=2; i>=rs->rs_lowest_valid; i--) { 872 if (bytes_per_sec < rs->rs_rlt[i].rate) { 873 rte = &rs->rs_rlt[i]; 874 if (i >= 1) { 875 previous_rate = rs->rs_rlt[(i-1)].rate; 876 } 877 break; 878 } else if ((flags & RS_PACING_GEQ) && 879 (bytes_per_sec == rs->rs_rlt[i].rate)) { 880 rte = &rs->rs_rlt[i]; 881 if (i >= 1) { 882 previous_rate = rs->rs_rlt[(i-1)].rate; 883 } 884 break; 885 } else { 886 arte = &rs->rs_rlt[i]; /* new alternate */ 887 } 888 } 889 } else if (mbits_per_sec > RS_ONE_GIGABIT_PERSEC) { 890 if ((bytes_per_sec < rs->rs_rlt[(ALL_HARDWARE_RATES-1)].rate) && 891 (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED)){ 892 /* Our top rate is larger than the request */ 893 rte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)]; 894 } else if ((flags & RS_PACING_GEQ) && 895 (bytes_per_sec == rs->rs_rlt[(ALL_HARDWARE_RATES-1)].rate) && 896 (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED)) { 897 /* It matches our top rate */ 898 rte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)]; 899 } else if (rs->rs_rlt[(ALL_HARDWARE_RATES-1)].flags & HDWRPACE_INITED) { 900 /* The top rate is an alternative */ 901 arte = &rs->rs_rlt[(ALL_HARDWARE_RATES-1)]; 902 } 903 previous_rate = rs->rs_rlt[(ALL_HARDWARE_RATES-2)].rate; 904 } else { 905 /* Its in our range 1Meg - 1Gig */ 906 if (flags & RS_PACING_GEQ) { 907 ind_calc = mbits_per_sec/RS_ONE_MEGABIT_PERSEC; 908 if ((ind_calc * RS_ONE_MEGABIT_PERSEC) == mbits_per_sec) { 909 if (ind_calc > (ALL_HARDWARE_RATES-1)) { 910 /* This should not happen */ 911 ind_calc = (ALL_HARDWARE_RATES-1); 912 } 913 rte = &rs->rs_rlt[ind_calc]; 914 if (ind_calc >= 1) 915 previous_rate = rs->rs_rlt[(ind_calc-1)].rate; 916 } 917 goto done; 918 } 919 ind_calc = (mbits_per_sec + (RS_ONE_MEGABIT_PERSEC-1))/RS_ONE_MEGABIT_PERSEC; 920 ind_calc += 2; 921 if (ind_calc > (ALL_HARDWARE_RATES-1)) { 922 /* This should not happen */ 923 ind_calc = ALL_HARDWARE_RATES-1; 924 } 925 if (rs->rs_rlt[ind_calc].flags & HDWRPACE_INITED) { 926 rte = &rs->rs_rlt[ind_calc]; 927 if (ind_calc >= 1) 928 previous_rate = rs->rs_rlt[(ind_calc-1)].rate; 929 } 930 } 931 } 932 done: 933 if ((rte == NULL) && 934 (arte != NULL) && 935 (flags & RS_PACING_SUB_OK)) { 936 /* We can use the substitute */ 937 rte = arte; 938 } 939 if (lower_rate) 940 *lower_rate = previous_rate; 941 return (rte); 942 } 943 944 /* 945 * For an explanation of why the argument is volatile please 946 * look at the comments around rt_setup_rate(). 947 */ 948 static const struct tcp_hwrate_limit_table * 949 tcp_find_suitable_rate(const volatile struct tcp_rate_set *rs, uint64_t bytes_per_sec, uint32_t flags, uint64_t *lower_rate) 950 { 951 /** 952 * Hunt the rate table with the restrictions in flags and find a 953 * suitable rate if possible. 954 * RS_PACING_EXACT_MATCH - look for an exact match to rate. 955 * RS_PACING_GT - must be greater than. 956 * RS_PACING_GEQ - must be greater than or equal. 957 * RS_PACING_LT - must be less than. 958 * RS_PACING_SUB_OK - If we don't meet criteria a 959 * substitute is ok. 960 */ 961 int i, matched; 962 struct tcp_hwrate_limit_table *rte = NULL; 963 uint64_t previous_rate = 0; 964 965 if ((rs->rs_flags & RS_INT_TBL) && 966 (rs->rs_rate_cnt >= ALL_HARDWARE_RATES)) { 967 /* 968 * Here we don't want to paw thru 969 * a big table, we have everything 970 * from 1Meg - 1000Meg in 1Meg increments. 971 * Use an alternate method to "lookup". 972 */ 973 return (tcp_int_find_suitable_rate(rs, bytes_per_sec, flags, lower_rate)); 974 } 975 if ((flags & RS_PACING_LT) || 976 (flags & RS_PACING_EXACT_MATCH)) { 977 /* 978 * For exact and less than we go forward through the table. 979 * This way when we find one larger we stop (exact was a 980 * toss up). 981 */ 982 for (i = rs->rs_lowest_valid, matched = 0; i <= rs->rs_highest_valid; i++) { 983 if ((flags & RS_PACING_EXACT_MATCH) && 984 (bytes_per_sec == rs->rs_rlt[i].rate)) { 985 rte = &rs->rs_rlt[i]; 986 matched = 1; 987 if (lower_rate != NULL) 988 *lower_rate = previous_rate; 989 break; 990 } else if ((flags & RS_PACING_LT) && 991 (bytes_per_sec <= rs->rs_rlt[i].rate)) { 992 rte = &rs->rs_rlt[i]; 993 matched = 1; 994 if (lower_rate != NULL) 995 *lower_rate = previous_rate; 996 break; 997 } 998 previous_rate = rs->rs_rlt[i].rate; 999 if (bytes_per_sec > rs->rs_rlt[i].rate) 1000 break; 1001 } 1002 if ((matched == 0) && 1003 (flags & RS_PACING_LT) && 1004 (flags & RS_PACING_SUB_OK)) { 1005 /* Kick in a substitute (the lowest) */ 1006 rte = &rs->rs_rlt[rs->rs_lowest_valid]; 1007 } 1008 } else { 1009 /* 1010 * Here we go backward through the table so that we can find 1011 * the one greater in theory faster (but its probably a 1012 * wash). 1013 */ 1014 for (i = rs->rs_highest_valid, matched = 0; i >= rs->rs_lowest_valid; i--) { 1015 if (rs->rs_rlt[i].rate > bytes_per_sec) { 1016 /* A possible candidate */ 1017 rte = &rs->rs_rlt[i]; 1018 } 1019 if ((flags & RS_PACING_GEQ) && 1020 (bytes_per_sec == rs->rs_rlt[i].rate)) { 1021 /* An exact match and we want equal */ 1022 matched = 1; 1023 rte = &rs->rs_rlt[i]; 1024 break; 1025 } else if (rte) { 1026 /* 1027 * Found one that is larger than but don't 1028 * stop, there may be a more closer match. 1029 */ 1030 matched = 1; 1031 } 1032 if (rs->rs_rlt[i].rate < bytes_per_sec) { 1033 /* 1034 * We found a table entry that is smaller, 1035 * stop there will be none greater or equal. 1036 */ 1037 if (lower_rate != NULL) 1038 *lower_rate = rs->rs_rlt[i].rate; 1039 break; 1040 } 1041 } 1042 if ((matched == 0) && 1043 (flags & RS_PACING_SUB_OK)) { 1044 /* Kick in a substitute (the highest) */ 1045 rte = &rs->rs_rlt[rs->rs_highest_valid]; 1046 } 1047 } 1048 return (rte); 1049 } 1050 1051 static struct ifnet * 1052 rt_find_real_interface(struct ifnet *ifp, struct inpcb *inp, int *error) 1053 { 1054 struct ifnet *tifp; 1055 struct m_snd_tag *tag, *ntag; 1056 union if_snd_tag_alloc_params params = { 1057 .rate_limit.hdr.type = IF_SND_TAG_TYPE_RATE_LIMIT, 1058 .rate_limit.hdr.flowid = inp->inp_flowid, 1059 .rate_limit.hdr.numa_domain = inp->inp_numa_domain, 1060 .rate_limit.max_rate = COMMON_RATE, 1061 .rate_limit.flags = M_NOWAIT, 1062 }; 1063 int err; 1064 #ifdef RSS 1065 params.rate_limit.hdr.flowtype = ((inp->inp_vflag & INP_IPV6) ? 1066 M_HASHTYPE_RSS_TCP_IPV6 : M_HASHTYPE_RSS_TCP_IPV4); 1067 #else 1068 params.rate_limit.hdr.flowtype = M_HASHTYPE_OPAQUE_HASH; 1069 #endif 1070 err = m_snd_tag_alloc(ifp, ¶ms, &tag); 1071 if (err) { 1072 /* Failed to setup a tag? */ 1073 if (error) 1074 *error = err; 1075 return (NULL); 1076 } 1077 ntag = tag; 1078 while (ntag->sw->next_snd_tag != NULL) { 1079 ntag = ntag->sw->next_snd_tag(ntag); 1080 } 1081 tifp = ntag->ifp; 1082 m_snd_tag_rele(tag); 1083 return (tifp); 1084 } 1085 1086 static void 1087 rl_increment_using(const struct tcp_hwrate_limit_table *rte) 1088 { 1089 struct tcp_hwrate_limit_table *decon_rte; 1090 1091 decon_rte = __DECONST(struct tcp_hwrate_limit_table *, rte); 1092 atomic_add_long(&decon_rte->using, 1); 1093 } 1094 1095 static void 1096 rl_decrement_using(const struct tcp_hwrate_limit_table *rte) 1097 { 1098 struct tcp_hwrate_limit_table *decon_rte; 1099 1100 decon_rte = __DECONST(struct tcp_hwrate_limit_table *, rte); 1101 atomic_subtract_long(&decon_rte->using, 1); 1102 } 1103 1104 void 1105 tcp_rl_log_enobuf(const struct tcp_hwrate_limit_table *rte) 1106 { 1107 struct tcp_hwrate_limit_table *decon_rte; 1108 1109 decon_rte = __DECONST(struct tcp_hwrate_limit_table *, rte); 1110 atomic_add_long(&decon_rte->rs_num_enobufs, 1); 1111 } 1112 1113 /* 1114 * Do NOT take the __noinline out of the 1115 * find_rs_for_ifp() function. If you do the inline 1116 * of it for the rt_setup_rate() will show you a 1117 * compiler bug. For some reason the compiler thinks 1118 * the list can never be empty. The consequence of 1119 * this will be a crash when we dereference NULL 1120 * if an ifp is removed just has a hw rate limit 1121 * is attempted. If you are working on the compiler 1122 * and want to "test" this go ahead and take the noinline 1123 * out otherwise let sleeping dogs ly until such time 1124 * as we get a compiler fix 10/2/20 -- RRS 1125 */ 1126 static __noinline struct tcp_rate_set * 1127 find_rs_for_ifp(struct ifnet *ifp) 1128 { 1129 struct tcp_rate_set *rs; 1130 1131 CK_LIST_FOREACH(rs, &int_rs, next) { 1132 if ((rs->rs_ifp == ifp) && 1133 (rs->rs_if_dunit == ifp->if_dunit)) { 1134 /* Ok we found it */ 1135 return (rs); 1136 } 1137 } 1138 return (NULL); 1139 } 1140 1141 1142 static const struct tcp_hwrate_limit_table * 1143 rt_setup_rate(struct inpcb *inp, struct ifnet *ifp, uint64_t bytes_per_sec, 1144 uint32_t flags, int *error, uint64_t *lower_rate) 1145 { 1146 /* First lets find the interface if it exists */ 1147 const struct tcp_hwrate_limit_table *rte; 1148 /* 1149 * So why is rs volatile? This is to defeat a 1150 * compiler bug where in the compiler is convinced 1151 * that rs can never be NULL (which is not true). Because 1152 * of its conviction it nicely optimizes out the if ((rs == NULL 1153 * below which means if you get a NULL back you dereference it. 1154 */ 1155 volatile struct tcp_rate_set *rs; 1156 struct epoch_tracker et; 1157 struct ifnet *oifp = ifp; 1158 int err; 1159 1160 NET_EPOCH_ENTER(et); 1161 use_real_interface: 1162 rs = find_rs_for_ifp(ifp); 1163 if ((rs == NULL) || 1164 (rs->rs_flags & RS_INTF_NO_SUP) || 1165 (rs->rs_flags & RS_IS_DEAD)) { 1166 /* 1167 * This means we got a packet *before* 1168 * the IF-UP was processed below, <or> 1169 * while or after we already received an interface 1170 * departed event. In either case we really don't 1171 * want to do anything with pacing, in 1172 * the departing case the packet is not 1173 * going to go very far. The new case 1174 * might be arguable, but its impossible 1175 * to tell from the departing case. 1176 */ 1177 if (error) 1178 *error = ENODEV; 1179 NET_EPOCH_EXIT(et); 1180 return (NULL); 1181 } 1182 1183 if ((rs == NULL) || (rs->rs_disable != 0)) { 1184 if (error) 1185 *error = ENOSPC; 1186 NET_EPOCH_EXIT(et); 1187 return (NULL); 1188 } 1189 if (rs->rs_flags & RS_IS_DEFF) { 1190 /* We need to find the real interface */ 1191 struct ifnet *tifp; 1192 1193 tifp = rt_find_real_interface(ifp, inp, error); 1194 if (tifp == NULL) { 1195 if (rs->rs_disable && error) 1196 *error = ENOTSUP; 1197 NET_EPOCH_EXIT(et); 1198 return (NULL); 1199 } 1200 KASSERT((tifp != ifp), 1201 ("Lookup failure ifp:%p inp:%p rt_find_real_interface() returns the same interface tifp:%p?\n", 1202 ifp, inp, tifp)); 1203 ifp = tifp; 1204 goto use_real_interface; 1205 } 1206 if (rs->rs_flow_limit && 1207 ((rs->rs_flows_using + 1) > rs->rs_flow_limit)) { 1208 if (error) 1209 *error = ENOSPC; 1210 NET_EPOCH_EXIT(et); 1211 return (NULL); 1212 } 1213 rte = tcp_find_suitable_rate(rs, bytes_per_sec, flags, lower_rate); 1214 if (rte) { 1215 err = in_pcbattach_txrtlmt(inp, oifp, 1216 inp->inp_flowtype, 1217 inp->inp_flowid, 1218 rte->rate, 1219 &inp->inp_snd_tag); 1220 if (err) { 1221 /* Failed to attach */ 1222 if (error) 1223 *error = err; 1224 rte = NULL; 1225 } else { 1226 KASSERT((inp->inp_snd_tag != NULL) , 1227 ("Setup rate has no snd_tag inp:%p rte:%p rate:%llu rs:%p", 1228 inp, rte, (unsigned long long)rte->rate, rs)); 1229 #ifdef INET 1230 counter_u64_add(rate_limit_new, 1); 1231 #endif 1232 } 1233 } 1234 if (rte) { 1235 /* 1236 * We use an atomic here for accounting so we don't have to 1237 * use locks when freeing. 1238 */ 1239 atomic_add_64(&rs->rs_flows_using, 1); 1240 } 1241 NET_EPOCH_EXIT(et); 1242 return (rte); 1243 } 1244 1245 static void 1246 tcp_rl_ifnet_link(void *arg __unused, struct ifnet *ifp, int link_state) 1247 { 1248 int error; 1249 struct tcp_rate_set *rs; 1250 struct epoch_tracker et; 1251 1252 if (((ifp->if_capenable & IFCAP_TXRTLMT) == 0) || 1253 (link_state != LINK_STATE_UP)) { 1254 /* 1255 * We only care on an interface going up that is rate-limit 1256 * capable. 1257 */ 1258 return; 1259 } 1260 NET_EPOCH_ENTER(et); 1261 mtx_lock(&rs_mtx); 1262 rs = find_rs_for_ifp(ifp); 1263 if (rs) { 1264 /* We already have initialized this guy */ 1265 mtx_unlock(&rs_mtx); 1266 NET_EPOCH_EXIT(et); 1267 return; 1268 } 1269 mtx_unlock(&rs_mtx); 1270 NET_EPOCH_EXIT(et); 1271 rt_setup_new_rs(ifp, &error); 1272 } 1273 1274 static void 1275 tcp_rl_ifnet_departure(void *arg __unused, struct ifnet *ifp) 1276 { 1277 struct tcp_rate_set *rs; 1278 struct epoch_tracker et; 1279 int i; 1280 1281 NET_EPOCH_ENTER(et); 1282 mtx_lock(&rs_mtx); 1283 rs = find_rs_for_ifp(ifp); 1284 if (rs) { 1285 CK_LIST_REMOVE(rs, next); 1286 rs_number_alive--; 1287 rs->rs_flags |= RS_IS_DEAD; 1288 for (i = 0; i < rs->rs_rate_cnt; i++) { 1289 if (rs->rs_rlt[i].flags & HDWRPACE_TAGPRESENT) { 1290 in_pcbdetach_tag(rs->rs_rlt[i].tag); 1291 rs->rs_rlt[i].tag = NULL; 1292 } 1293 rs->rs_rlt[i].flags = HDWRPACE_IFPDEPARTED; 1294 } 1295 if (rs->rs_flows_using == 0) 1296 rs_defer_destroy(rs); 1297 } 1298 mtx_unlock(&rs_mtx); 1299 NET_EPOCH_EXIT(et); 1300 } 1301 1302 static void 1303 tcp_rl_shutdown(void *arg __unused, int howto __unused) 1304 { 1305 struct tcp_rate_set *rs, *nrs; 1306 struct epoch_tracker et; 1307 int i; 1308 1309 NET_EPOCH_ENTER(et); 1310 mtx_lock(&rs_mtx); 1311 CK_LIST_FOREACH_SAFE(rs, &int_rs, next, nrs) { 1312 CK_LIST_REMOVE(rs, next); 1313 rs_number_alive--; 1314 rs->rs_flags |= RS_IS_DEAD; 1315 for (i = 0; i < rs->rs_rate_cnt; i++) { 1316 if (rs->rs_rlt[i].flags & HDWRPACE_TAGPRESENT) { 1317 in_pcbdetach_tag(rs->rs_rlt[i].tag); 1318 rs->rs_rlt[i].tag = NULL; 1319 } 1320 rs->rs_rlt[i].flags = HDWRPACE_IFPDEPARTED; 1321 } 1322 if (rs->rs_flows_using == 0) 1323 rs_defer_destroy(rs); 1324 } 1325 mtx_unlock(&rs_mtx); 1326 NET_EPOCH_EXIT(et); 1327 } 1328 1329 const struct tcp_hwrate_limit_table * 1330 tcp_set_pacing_rate(struct tcpcb *tp, struct ifnet *ifp, 1331 uint64_t bytes_per_sec, int flags, int *error, uint64_t *lower_rate) 1332 { 1333 struct inpcb *inp = tptoinpcb(tp); 1334 const struct tcp_hwrate_limit_table *rte; 1335 #ifdef KERN_TLS 1336 struct ktls_session *tls; 1337 #endif 1338 1339 INP_WLOCK_ASSERT(inp); 1340 1341 if (inp->inp_snd_tag == NULL) { 1342 /* 1343 * We are setting up a rate for the first time. 1344 */ 1345 if ((ifp->if_capenable & IFCAP_TXRTLMT) == 0) { 1346 /* Not supported by the egress */ 1347 if (error) 1348 *error = ENODEV; 1349 return (NULL); 1350 } 1351 #ifdef KERN_TLS 1352 tls = NULL; 1353 if (tp->t_nic_ktls_xmit != 0) { 1354 tls = tptosocket(tp)->so_snd.sb_tls_info; 1355 1356 if ((ifp->if_capenable & IFCAP_TXTLS_RTLMT) == 0 || 1357 tls->mode != TCP_TLS_MODE_IFNET) { 1358 if (error) 1359 *error = ENODEV; 1360 return (NULL); 1361 } 1362 } 1363 #endif 1364 rte = rt_setup_rate(inp, ifp, bytes_per_sec, flags, error, lower_rate); 1365 if (rte) 1366 rl_increment_using(rte); 1367 #ifdef KERN_TLS 1368 if (rte != NULL && tls != NULL && tls->snd_tag != NULL) { 1369 /* 1370 * Fake a route change error to reset the TLS 1371 * send tag. This will convert the existing 1372 * tag to a TLS ratelimit tag. 1373 */ 1374 MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS); 1375 ktls_output_eagain(inp, tls); 1376 } 1377 #endif 1378 } else { 1379 /* 1380 * We are modifying a rate, wrong interface? 1381 */ 1382 if (error) 1383 *error = EINVAL; 1384 rte = NULL; 1385 } 1386 if (rte != NULL) { 1387 tp->t_pacing_rate = rte->rate; 1388 *error = 0; 1389 } 1390 return (rte); 1391 } 1392 1393 const struct tcp_hwrate_limit_table * 1394 tcp_chg_pacing_rate(const struct tcp_hwrate_limit_table *crte, 1395 struct tcpcb *tp, struct ifnet *ifp, 1396 uint64_t bytes_per_sec, int flags, int *error, uint64_t *lower_rate) 1397 { 1398 struct inpcb *inp = tptoinpcb(tp); 1399 const struct tcp_hwrate_limit_table *nrte; 1400 const struct tcp_rate_set *rs; 1401 #ifdef KERN_TLS 1402 struct ktls_session *tls = NULL; 1403 #endif 1404 int err; 1405 1406 INP_WLOCK_ASSERT(inp); 1407 1408 if (crte == NULL) { 1409 /* Wrong interface */ 1410 if (error) 1411 *error = EINVAL; 1412 return (NULL); 1413 } 1414 1415 #ifdef KERN_TLS 1416 if (tp->t_nic_ktls_xmit) { 1417 tls = tptosocket(tp)->so_snd.sb_tls_info; 1418 if (tls->mode != TCP_TLS_MODE_IFNET) 1419 tls = NULL; 1420 else if (tls->snd_tag != NULL && 1421 tls->snd_tag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT) { 1422 if (!tls->reset_pending) { 1423 /* 1424 * NIC probably doesn't support 1425 * ratelimit TLS tags if it didn't 1426 * allocate one when an existing rate 1427 * was present, so ignore. 1428 */ 1429 tcp_rel_pacing_rate(crte, tp); 1430 if (error) 1431 *error = EOPNOTSUPP; 1432 return (NULL); 1433 } 1434 1435 /* 1436 * The send tag is being converted, so set the 1437 * rate limit on the inpcb tag. There is a 1438 * race that the new NIC send tag might use 1439 * the current rate instead of this one. 1440 */ 1441 tls = NULL; 1442 } 1443 } 1444 #endif 1445 if (inp->inp_snd_tag == NULL) { 1446 /* Wrong interface */ 1447 tcp_rel_pacing_rate(crte, tp); 1448 if (error) 1449 *error = EINVAL; 1450 return (NULL); 1451 } 1452 rs = crte->ptbl; 1453 if ((rs->rs_flags & RS_IS_DEAD) || 1454 (crte->flags & HDWRPACE_IFPDEPARTED)) { 1455 /* Release the rate, and try anew */ 1456 1457 tcp_rel_pacing_rate(crte, tp); 1458 nrte = tcp_set_pacing_rate(tp, ifp, 1459 bytes_per_sec, flags, error, lower_rate); 1460 return (nrte); 1461 } 1462 nrte = tcp_find_suitable_rate(rs, bytes_per_sec, flags, lower_rate); 1463 if (nrte == crte) { 1464 /* No change */ 1465 if (error) 1466 *error = 0; 1467 return (crte); 1468 } 1469 if (nrte == NULL) { 1470 /* Release the old rate */ 1471 if (error) 1472 *error = ENOENT; 1473 tcp_rel_pacing_rate(crte, tp); 1474 return (NULL); 1475 } 1476 rl_decrement_using(crte); 1477 rl_increment_using(nrte); 1478 /* Change rates to our new entry */ 1479 #ifdef KERN_TLS 1480 if (tls != NULL) 1481 err = ktls_modify_txrtlmt(tls, nrte->rate); 1482 else 1483 #endif 1484 err = in_pcbmodify_txrtlmt(inp, nrte->rate); 1485 if (err) { 1486 struct tcp_rate_set *lrs; 1487 uint64_t pre; 1488 1489 rl_decrement_using(nrte); 1490 lrs = __DECONST(struct tcp_rate_set *, rs); 1491 pre = atomic_fetchadd_64(&lrs->rs_flows_using, -1); 1492 /* Do we still have a snd-tag attached? */ 1493 if (inp->inp_snd_tag) 1494 in_pcbdetach_txrtlmt(inp); 1495 1496 if (pre == 1) { 1497 struct epoch_tracker et; 1498 1499 NET_EPOCH_ENTER(et); 1500 mtx_lock(&rs_mtx); 1501 /* 1502 * Is it dead? 1503 */ 1504 if (lrs->rs_flags & RS_IS_DEAD) 1505 rs_defer_destroy(lrs); 1506 mtx_unlock(&rs_mtx); 1507 NET_EPOCH_EXIT(et); 1508 } 1509 if (error) 1510 *error = err; 1511 return (NULL); 1512 } else { 1513 #ifdef INET 1514 counter_u64_add(rate_limit_chg, 1); 1515 #endif 1516 } 1517 if (error) 1518 *error = 0; 1519 tp->t_pacing_rate = nrte->rate; 1520 return (nrte); 1521 } 1522 1523 void 1524 tcp_rel_pacing_rate(const struct tcp_hwrate_limit_table *crte, struct tcpcb *tp) 1525 { 1526 struct inpcb *inp = tptoinpcb(tp); 1527 const struct tcp_rate_set *crs; 1528 struct tcp_rate_set *rs; 1529 uint64_t pre; 1530 1531 INP_WLOCK_ASSERT(inp); 1532 1533 tp->t_pacing_rate = -1; 1534 crs = crte->ptbl; 1535 /* 1536 * Now we must break the const 1537 * in order to release our refcount. 1538 */ 1539 rs = __DECONST(struct tcp_rate_set *, crs); 1540 rl_decrement_using(crte); 1541 pre = atomic_fetchadd_64(&rs->rs_flows_using, -1); 1542 if (pre == 1) { 1543 struct epoch_tracker et; 1544 1545 NET_EPOCH_ENTER(et); 1546 mtx_lock(&rs_mtx); 1547 /* 1548 * Is it dead? 1549 */ 1550 if (rs->rs_flags & RS_IS_DEAD) 1551 rs_defer_destroy(rs); 1552 mtx_unlock(&rs_mtx); 1553 NET_EPOCH_EXIT(et); 1554 } 1555 1556 /* 1557 * XXX: If this connection is using ifnet TLS, should we 1558 * switch it to using an unlimited rate, or perhaps use 1559 * ktls_output_eagain() to reset the send tag to a plain 1560 * TLS tag? 1561 */ 1562 in_pcbdetach_txrtlmt(inp); 1563 } 1564 1565 #define ONE_POINT_TWO_MEG 150000 /* 1.2 megabits in bytes */ 1566 #define ONE_HUNDRED_MBPS 12500000 /* 100Mbps in bytes per second */ 1567 #define FIVE_HUNDRED_MBPS 62500000 /* 500Mbps in bytes per second */ 1568 #define MAX_MSS_SENT 43 /* 43 mss = 43 x 1500 = 64,500 bytes */ 1569 1570 static void 1571 tcp_log_pacing_size(struct tcpcb *tp, uint64_t bw, uint32_t segsiz, uint32_t new_tso, 1572 uint64_t hw_rate, uint32_t time_between, uint32_t calc_time_between, 1573 uint32_t segs, uint32_t res_div, uint16_t mult, uint8_t mod) 1574 { 1575 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 1576 union tcp_log_stackspecific log; 1577 struct timeval tv; 1578 1579 memset(&log, 0, sizeof(log)); 1580 log.u_bbr.flex1 = segsiz; 1581 log.u_bbr.flex2 = new_tso; 1582 log.u_bbr.flex3 = time_between; 1583 log.u_bbr.flex4 = calc_time_between; 1584 log.u_bbr.flex5 = segs; 1585 log.u_bbr.flex6 = res_div; 1586 log.u_bbr.flex7 = mult; 1587 log.u_bbr.flex8 = mod; 1588 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1589 log.u_bbr.cur_del_rate = bw; 1590 log.u_bbr.delRate = hw_rate; 1591 TCP_LOG_EVENTP(tp, NULL, 1592 &tptosocket(tp)->so_rcv, 1593 &tptosocket(tp)->so_snd, 1594 TCP_HDWR_PACE_SIZE, 0, 1595 0, &log, false, &tv); 1596 } 1597 } 1598 1599 uint32_t 1600 tcp_get_pacing_burst_size_w_divisor(struct tcpcb *tp, uint64_t bw, uint32_t segsiz, int can_use_1mss, 1601 const struct tcp_hwrate_limit_table *te, int *err, int divisor) 1602 { 1603 /* 1604 * We use the google formula to calculate the 1605 * TSO size. I.E. 1606 * bw < 24Meg 1607 * tso = 2mss 1608 * else 1609 * tso = min(bw/(div=1000), 64k) 1610 * 1611 * Note for these calculations we ignore the 1612 * packet overhead (enet hdr, ip hdr and tcp hdr). 1613 * We only get the google formula when we have 1614 * divisor = 1000, which is the default for now. 1615 */ 1616 uint64_t lentim, res, bytes; 1617 uint32_t new_tso, min_tso_segs; 1618 1619 /* It can't be zero */ 1620 if ((divisor == 0) || 1621 (divisor < RL_MIN_DIVISOR)) { 1622 if (mss_divisor) 1623 bytes = bw / mss_divisor; 1624 else 1625 bytes = bw / 1000; 1626 } else 1627 bytes = bw / divisor; 1628 /* We can't ever send more than 65k in a TSO */ 1629 if (bytes > 0xffff) { 1630 bytes = 0xffff; 1631 } 1632 /* Round up */ 1633 new_tso = (bytes + segsiz - 1) / segsiz; 1634 /* Are we enforcing even boundaries? */ 1635 if (even_num_segs && (new_tso & 1) && (new_tso > even_threshold)) 1636 new_tso++; 1637 if (can_use_1mss) 1638 min_tso_segs = 1; 1639 else 1640 min_tso_segs = 2; 1641 if (rs_floor_mss && (new_tso < rs_floor_mss)) 1642 new_tso = rs_floor_mss; 1643 else if (new_tso < min_tso_segs) 1644 new_tso = min_tso_segs; 1645 if (new_tso > MAX_MSS_SENT) 1646 new_tso = MAX_MSS_SENT; 1647 new_tso *= segsiz; 1648 tcp_log_pacing_size(tp, bw, segsiz, new_tso, 1649 0, 0, 0, 0, 0, 0, 1); 1650 /* 1651 * If we are not doing hardware pacing 1652 * then we are done. 1653 */ 1654 if (te == NULL) { 1655 if (err) 1656 *err = 0; 1657 return(new_tso); 1658 } 1659 /* 1660 * For hardware pacing we look at the 1661 * rate you are sending at and compare 1662 * that to the rate you have in hardware. 1663 * 1664 * If the hardware rate is slower than your 1665 * software rate then you are in error and 1666 * we will build a queue in our hardware whic 1667 * is probably not desired, in such a case 1668 * just return the non-hardware TSO size. 1669 * 1670 * If the rate in hardware is faster (which 1671 * it should be) then look at how long it 1672 * takes to send one ethernet segment size at 1673 * your b/w and compare that to the time it 1674 * takes to send at the rate you had selected. 1675 * 1676 * If your time is greater (which we hope it is) 1677 * we get the delta between the two, and then 1678 * divide that into your pacing time. This tells 1679 * us how many MSS you can send down at once (rounded up). 1680 * 1681 * Note we also double this value if the b/w is over 1682 * 100Mbps. If its over 500meg we just set you to the 1683 * max (43 segments). 1684 */ 1685 if (te->rate > FIVE_HUNDRED_MBPS) 1686 goto max; 1687 if (te->rate == bw) { 1688 /* We are pacing at exactly the hdwr rate */ 1689 max: 1690 tcp_log_pacing_size(tp, bw, segsiz, new_tso, 1691 te->rate, te->time_between, (uint32_t)0, 1692 (segsiz * MAX_MSS_SENT), 0, 0, 3); 1693 return (segsiz * MAX_MSS_SENT); 1694 } 1695 lentim = ETHERNET_SEGMENT_SIZE * USECS_IN_SECOND; 1696 res = lentim / bw; 1697 if (res > te->time_between) { 1698 uint32_t delta, segs, res_div; 1699 1700 res_div = ((res * num_of_waits_allowed) + wait_time_floor); 1701 delta = res - te->time_between; 1702 segs = (res_div + delta - 1)/delta; 1703 if (segs < min_tso_segs) 1704 segs = min_tso_segs; 1705 if (segs < rs_hw_floor_mss) 1706 segs = rs_hw_floor_mss; 1707 if (segs > MAX_MSS_SENT) 1708 segs = MAX_MSS_SENT; 1709 segs *= segsiz; 1710 tcp_log_pacing_size(tp, bw, segsiz, new_tso, 1711 te->rate, te->time_between, (uint32_t)res, 1712 segs, res_div, 1, 3); 1713 if (err) 1714 *err = 0; 1715 if (segs < new_tso) { 1716 /* unexpected ? */ 1717 return(new_tso); 1718 } else { 1719 return (segs); 1720 } 1721 } else { 1722 /* 1723 * Your time is smaller which means 1724 * we will grow a queue on our 1725 * hardware. Send back the non-hardware 1726 * rate. 1727 */ 1728 tcp_log_pacing_size(tp, bw, segsiz, new_tso, 1729 te->rate, te->time_between, (uint32_t)res, 1730 0, 0, 0, 4); 1731 if (err) 1732 *err = -1; 1733 return (new_tso); 1734 } 1735 } 1736 1737 uint64_t 1738 tcp_hw_highest_rate_ifp(struct ifnet *ifp, struct inpcb *inp) 1739 { 1740 struct epoch_tracker et; 1741 struct tcp_rate_set *rs; 1742 uint64_t rate_ret; 1743 1744 NET_EPOCH_ENTER(et); 1745 use_next_interface: 1746 rs = find_rs_for_ifp(ifp); 1747 if (rs == NULL) { 1748 /* This interface does not do ratelimiting */ 1749 rate_ret = 0; 1750 } else if (rs->rs_flags & RS_IS_DEFF) { 1751 /* We need to find the real interface */ 1752 struct ifnet *tifp; 1753 1754 tifp = rt_find_real_interface(ifp, inp, NULL); 1755 if (tifp == NULL) { 1756 NET_EPOCH_EXIT(et); 1757 return (0); 1758 } 1759 ifp = tifp; 1760 goto use_next_interface; 1761 } else { 1762 /* Lets return the highest rate this guy has */ 1763 rate_ret = rs->rs_rlt[rs->rs_highest_valid].rate; 1764 } 1765 NET_EPOCH_EXIT(et); 1766 return(rate_ret); 1767 } 1768 1769 static eventhandler_tag rl_ifnet_departs; 1770 static eventhandler_tag rl_ifnet_arrives; 1771 static eventhandler_tag rl_shutdown_start; 1772 1773 static void 1774 tcp_rs_init(void *st __unused) 1775 { 1776 CK_LIST_INIT(&int_rs); 1777 rs_number_alive = 0; 1778 rs_number_dead = 0; 1779 mtx_init(&rs_mtx, "tcp_rs_mtx", "rsmtx", MTX_DEF); 1780 rl_ifnet_departs = EVENTHANDLER_REGISTER(ifnet_departure_event, 1781 tcp_rl_ifnet_departure, 1782 NULL, EVENTHANDLER_PRI_ANY); 1783 rl_ifnet_arrives = EVENTHANDLER_REGISTER(ifnet_link_event, 1784 tcp_rl_ifnet_link, 1785 NULL, EVENTHANDLER_PRI_ANY); 1786 rl_shutdown_start = EVENTHANDLER_REGISTER(shutdown_pre_sync, 1787 tcp_rl_shutdown, NULL, 1788 SHUTDOWN_PRI_FIRST); 1789 printf("TCP_ratelimit: Is now initialized\n"); 1790 } 1791 1792 SYSINIT(tcp_rl_init, SI_SUB_SMP + 1, SI_ORDER_ANY, tcp_rs_init, NULL); 1793 #endif 1794