1 /*- 2 * Copyright (c) 2016 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "en.h" 29 30 #ifdef RATELIMIT 31 32 static int mlx5e_rl_open_workers(struct mlx5e_priv *); 33 static void mlx5e_rl_close_workers(struct mlx5e_priv *); 34 static int mlx5e_rl_sysctl_show_rate_table(SYSCTL_HANDLER_ARGS); 35 static void mlx5e_rl_sysctl_add_u64_oid(struct mlx5e_rl_priv_data *, unsigned x, 36 struct sysctl_oid *, const char *name, const char *desc); 37 static void mlx5e_rl_sysctl_add_stats_u64_oid(struct mlx5e_rl_priv_data *rl, unsigned x, 38 struct sysctl_oid *node, const char *name, const char *desc); 39 static int mlx5e_rl_tx_limit_add(struct mlx5e_rl_priv_data *, uint64_t value); 40 static int mlx5e_rl_tx_limit_clr(struct mlx5e_rl_priv_data *, uint64_t value); 41 42 static void 43 mlx5e_rl_build_sq_param(struct mlx5e_rl_priv_data *rl, 44 struct mlx5e_sq_param *param) 45 { 46 void *sqc = param->sqc; 47 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 48 uint8_t log_sq_size = order_base_2(rl->param.tx_queue_size); 49 50 MLX5_SET(wq, wq, log_wq_sz, log_sq_size); 51 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 52 MLX5_SET(wq, wq, pd, rl->priv->pdn); 53 54 param->wq.buf_numa_node = 0; 55 param->wq.db_numa_node = 0; 56 param->wq.linear = 1; 57 } 58 59 static void 60 mlx5e_rl_build_cq_param(struct mlx5e_rl_priv_data *rl, 61 struct mlx5e_cq_param *param) 62 { 63 void *cqc = param->cqc; 64 uint8_t log_sq_size = order_base_2(rl->param.tx_queue_size); 65 66 MLX5_SET(cqc, cqc, log_cq_size, log_sq_size); 67 MLX5_SET(cqc, cqc, cq_period, rl->param.tx_coalesce_usecs); 68 MLX5_SET(cqc, cqc, cq_max_count, rl->param.tx_coalesce_pkts); 69 70 switch (rl->param.tx_coalesce_mode) { 71 case 0: 72 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 73 break; 74 default: 75 if (MLX5_CAP_GEN(rl->priv->mdev, cq_period_start_from_cqe)) 76 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 77 else 78 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 79 break; 80 } 81 } 82 83 static void 84 mlx5e_rl_build_channel_param(struct mlx5e_rl_priv_data *rl, 85 struct mlx5e_rl_channel_param *cparam) 86 { 87 memset(cparam, 0, sizeof(*cparam)); 88 89 mlx5e_rl_build_sq_param(rl, &cparam->sq); 90 mlx5e_rl_build_cq_param(rl, &cparam->cq); 91 } 92 93 static int 94 mlx5e_rl_create_sq(struct mlx5e_priv *priv, struct mlx5e_sq *sq, 95 struct mlx5e_sq_param *param, int ix) 96 { 97 struct mlx5_core_dev *mdev = priv->mdev; 98 void *sqc = param->sqc; 99 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); 100 int err; 101 102 /* Create DMA descriptor TAG */ 103 if ((err = -bus_dma_tag_create( 104 bus_get_dma_tag(mdev->pdev->dev.bsddev), 105 1, /* any alignment */ 106 0, /* no boundary */ 107 BUS_SPACE_MAXADDR, /* lowaddr */ 108 BUS_SPACE_MAXADDR, /* highaddr */ 109 NULL, NULL, /* filter, filterarg */ 110 MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */ 111 MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */ 112 MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */ 113 0, /* flags */ 114 NULL, NULL, /* lockfunc, lockfuncarg */ 115 &sq->dma_tag))) 116 goto done; 117 118 /* use shared UAR */ 119 sq->uar = priv->rl.sq_uar; 120 121 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, 122 &sq->wq_ctrl); 123 if (err) 124 goto err_free_dma_tag; 125 126 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; 127 /* 128 * The sq->bf_buf_size variable is intentionally left zero so 129 * that the doorbell writes will occur at the same memory 130 * location. 131 */ 132 133 err = mlx5e_alloc_sq_db(sq); 134 if (err) 135 goto err_sq_wq_destroy; 136 137 sq->mkey_be = cpu_to_be32(priv->mr.key); 138 sq->ifp = priv->ifp; 139 sq->priv = priv; 140 141 mlx5e_update_sq_inline(sq); 142 143 return (0); 144 145 err_sq_wq_destroy: 146 mlx5_wq_destroy(&sq->wq_ctrl); 147 err_free_dma_tag: 148 bus_dma_tag_destroy(sq->dma_tag); 149 done: 150 return (err); 151 } 152 153 static void 154 mlx5e_rl_destroy_sq(struct mlx5e_sq *sq) 155 { 156 157 mlx5e_free_sq_db(sq); 158 mlx5_wq_destroy(&sq->wq_ctrl); 159 } 160 161 static int 162 mlx5e_rl_open_sq(struct mlx5e_priv *priv, struct mlx5e_sq *sq, 163 struct mlx5e_sq_param *param, int ix) 164 { 165 int err; 166 167 err = mlx5e_rl_create_sq(priv, sq, param, ix); 168 if (err) 169 return (err); 170 171 err = mlx5e_enable_sq(sq, param, priv->rl.tisn); 172 if (err) 173 goto err_destroy_sq; 174 175 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); 176 if (err) 177 goto err_disable_sq; 178 179 WRITE_ONCE(sq->running, 1); 180 181 return (0); 182 183 err_disable_sq: 184 mlx5e_disable_sq(sq); 185 err_destroy_sq: 186 mlx5e_rl_destroy_sq(sq); 187 188 return (err); 189 } 190 191 static void 192 mlx5e_rl_chan_mtx_init(struct mlx5e_priv *priv, struct mlx5e_sq *sq) 193 { 194 mtx_init(&sq->lock, "mlx5tx-rl", NULL, MTX_DEF); 195 mtx_init(&sq->comp_lock, "mlx5comp-rl", NULL, MTX_DEF); 196 197 callout_init_mtx(&sq->cev_callout, &sq->lock, 0); 198 199 sq->cev_factor = priv->rl.param.tx_completion_fact; 200 201 /* ensure the TX completion event factor is not zero */ 202 if (sq->cev_factor == 0) 203 sq->cev_factor = 1; 204 } 205 206 static int 207 mlx5e_rl_open_channel(struct mlx5e_rl_worker *rlw, int eq_ix, 208 struct mlx5e_rl_channel_param *cparam, 209 struct mlx5e_sq *volatile *ppsq) 210 { 211 struct mlx5e_priv *priv = rlw->priv; 212 struct mlx5e_sq *sq; 213 int err; 214 215 sq = malloc(sizeof(*sq), M_MLX5EN, M_WAITOK | M_ZERO); 216 217 /* init mutexes */ 218 mlx5e_rl_chan_mtx_init(priv, sq); 219 220 /* open TX completion queue */ 221 err = mlx5e_open_cq(priv, &cparam->cq, &sq->cq, 222 &mlx5e_tx_cq_comp, eq_ix); 223 if (err) 224 goto err_free; 225 226 err = mlx5e_rl_open_sq(priv, sq, &cparam->sq, eq_ix); 227 if (err) 228 goto err_close_tx_cq; 229 230 /* store TX channel pointer */ 231 *ppsq = sq; 232 233 /* poll TX queue initially */ 234 sq->cq.mcq.comp(&sq->cq.mcq); 235 236 return (0); 237 238 err_close_tx_cq: 239 mlx5e_close_cq(&sq->cq); 240 241 err_free: 242 /* destroy mutexes */ 243 mtx_destroy(&sq->lock); 244 mtx_destroy(&sq->comp_lock); 245 free(sq, M_MLX5EN); 246 atomic_add_64(&priv->rl.stats.tx_allocate_resource_failure, 1ULL); 247 return (err); 248 } 249 250 static void 251 mlx5e_rl_close_channel(struct mlx5e_sq *volatile *ppsq) 252 { 253 struct mlx5e_sq *sq = *ppsq; 254 255 /* check if channel is already closed */ 256 if (sq == NULL) 257 return; 258 /* ensure channel pointer is no longer used */ 259 *ppsq = NULL; 260 261 /* teardown and destroy SQ */ 262 mlx5e_drain_sq(sq); 263 mlx5e_disable_sq(sq); 264 mlx5e_rl_destroy_sq(sq); 265 266 /* close CQ */ 267 mlx5e_close_cq(&sq->cq); 268 269 /* destroy mutexes */ 270 mtx_destroy(&sq->lock); 271 mtx_destroy(&sq->comp_lock); 272 273 free(sq, M_MLX5EN); 274 } 275 276 static void 277 mlx5e_rl_sync_tx_completion_fact(struct mlx5e_rl_priv_data *rl) 278 { 279 /* 280 * Limit the maximum distance between completion events to 281 * half of the currently set TX queue size. 282 * 283 * The maximum number of queue entries a single IP packet can 284 * consume is given by MLX5_SEND_WQE_MAX_WQEBBS. 285 * 286 * The worst case max value is then given as below: 287 */ 288 uint64_t max = rl->param.tx_queue_size / 289 (2 * MLX5_SEND_WQE_MAX_WQEBBS); 290 291 /* 292 * Update the maximum completion factor value in case the 293 * tx_queue_size field changed. Ensure we don't overflow 294 * 16-bits. 295 */ 296 if (max < 1) 297 max = 1; 298 else if (max > 65535) 299 max = 65535; 300 rl->param.tx_completion_fact_max = max; 301 302 /* 303 * Verify that the current TX completion factor is within the 304 * given limits: 305 */ 306 if (rl->param.tx_completion_fact < 1) 307 rl->param.tx_completion_fact = 1; 308 else if (rl->param.tx_completion_fact > max) 309 rl->param.tx_completion_fact = max; 310 } 311 312 static int 313 mlx5e_rl_modify_sq(struct mlx5e_sq *sq, uint16_t rl_index) 314 { 315 struct mlx5e_priv *priv = sq->priv; 316 struct mlx5_core_dev *mdev = priv->mdev; 317 318 void *in; 319 void *sqc; 320 int inlen; 321 int err; 322 323 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 324 in = mlx5_vzalloc(inlen); 325 if (in == NULL) 326 return (-ENOMEM); 327 328 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 329 330 MLX5_SET(modify_sq_in, in, sqn, sq->sqn); 331 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RDY); 332 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); 333 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); 334 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); 335 336 err = mlx5_core_modify_sq(mdev, in, inlen); 337 338 kvfree(in); 339 340 return (err); 341 } 342 343 /* 344 * This function will search the configured rate limit table for the 345 * best match to avoid that a single socket based application can 346 * allocate all the available hardware rates. If the user selected 347 * rate deviates too much from the closes rate available in the rate 348 * limit table, unlimited rate will be selected. 349 */ 350 static uint64_t 351 mlx5e_rl_find_best_rate_locked(struct mlx5e_rl_priv_data *rl, uint64_t user_rate) 352 { 353 uint64_t distance = -1ULL; 354 uint64_t diff; 355 uint64_t retval = 0; /* unlimited */ 356 uint64_t x; 357 358 /* search for closest rate */ 359 for (x = 0; x != rl->param.tx_rates_def; x++) { 360 uint64_t rate = rl->rate_limit_table[x]; 361 if (rate == 0) 362 continue; 363 364 if (rate > user_rate) 365 diff = rate - user_rate; 366 else 367 diff = user_rate - rate; 368 369 /* check if distance is smaller than previous rate */ 370 if (diff < distance) { 371 distance = diff; 372 retval = rate; 373 } 374 } 375 376 /* range check for multiplication below */ 377 if (user_rate > rl->param.tx_limit_max) 378 user_rate = rl->param.tx_limit_max; 379 380 /* fallback to unlimited, if rate deviates too much */ 381 if (distance > howmany(user_rate * 382 rl->param.tx_allowed_deviation, 1000ULL)) 383 retval = 0; 384 385 return (retval); 386 } 387 388 /* 389 * This function sets the requested rate for a rate limit channel, in 390 * bits per second. The requested rate will be filtered through the 391 * find best rate function above. 392 */ 393 static int 394 mlx5e_rlw_channel_set_rate_locked(struct mlx5e_rl_worker *rlw, 395 struct mlx5e_rl_channel *channel, uint64_t rate) 396 { 397 struct mlx5e_rl_priv_data *rl = &rlw->priv->rl; 398 struct mlx5e_sq *sq; 399 uint64_t temp; 400 uint16_t index; 401 uint16_t burst; 402 int error; 403 404 if (rate != 0) { 405 MLX5E_RL_WORKER_UNLOCK(rlw); 406 407 MLX5E_RL_RLOCK(rl); 408 409 /* get current burst size in bytes */ 410 temp = rl->param.tx_burst_size * 411 MLX5E_SW2HW_MTU(rlw->priv->ifp->if_mtu); 412 413 /* limit burst size to 64K currently */ 414 if (temp > 65535) 415 temp = 65535; 416 burst = temp; 417 418 /* find best rate */ 419 rate = mlx5e_rl_find_best_rate_locked(rl, rate); 420 421 MLX5E_RL_RUNLOCK(rl); 422 423 if (rate == 0) { 424 /* rate doesn't exist, fallback to unlimited */ 425 index = 0; 426 rate = 0; 427 atomic_add_64(&rlw->priv->rl.stats.tx_modify_rate_failure, 1ULL); 428 } else { 429 /* get a reference on the new rate */ 430 error = -mlx5_rl_add_rate(rlw->priv->mdev, 431 howmany(rate, 1000), burst, &index); 432 433 if (error != 0) { 434 /* adding rate failed, fallback to unlimited */ 435 index = 0; 436 rate = 0; 437 atomic_add_64(&rlw->priv->rl.stats.tx_add_new_rate_failure, 1ULL); 438 } 439 } 440 MLX5E_RL_WORKER_LOCK(rlw); 441 } else { 442 index = 0; 443 burst = 0; /* default */ 444 } 445 446 /* atomically swap rates */ 447 temp = channel->last_rate; 448 channel->last_rate = rate; 449 rate = temp; 450 451 /* atomically swap burst size */ 452 temp = channel->last_burst; 453 channel->last_burst = burst; 454 burst = temp; 455 456 MLX5E_RL_WORKER_UNLOCK(rlw); 457 /* put reference on the old rate, if any */ 458 if (rate != 0) { 459 mlx5_rl_remove_rate(rlw->priv->mdev, 460 howmany(rate, 1000), burst); 461 } 462 463 /* set new rate, if SQ is running */ 464 sq = channel->sq; 465 if (sq != NULL && READ_ONCE(sq->running) != 0) { 466 error = mlx5e_rl_modify_sq(sq, index); 467 if (error != 0) 468 atomic_add_64(&rlw->priv->rl.stats.tx_modify_rate_failure, 1ULL); 469 } else 470 error = 0; 471 MLX5E_RL_WORKER_LOCK(rlw); 472 473 return (-error); 474 } 475 476 static void 477 mlx5e_rl_worker(void *arg) 478 { 479 struct thread *td; 480 struct mlx5e_rl_worker *rlw = arg; 481 struct mlx5e_rl_channel *channel; 482 struct mlx5e_priv *priv; 483 unsigned ix; 484 uint64_t x; 485 int error; 486 487 /* set thread priority */ 488 td = curthread; 489 490 thread_lock(td); 491 sched_prio(td, PI_SWI(SWI_NET)); 492 thread_unlock(td); 493 494 priv = rlw->priv; 495 496 /* compute completion vector */ 497 ix = (rlw - priv->rl.workers) % 498 priv->mdev->priv.eq_table.num_comp_vectors; 499 500 /* TODO bind to CPU */ 501 502 /* open all the SQs */ 503 MLX5E_RL_WORKER_LOCK(rlw); 504 for (x = 0; x < priv->rl.param.tx_channels_per_worker_def; x++) { 505 struct mlx5e_rl_channel *channel = rlw->channels + x; 506 507 #if !defined(HAVE_RL_PRE_ALLOCATE_CHANNELS) 508 if (channel->state == MLX5E_RL_ST_FREE) 509 continue; 510 #endif 511 MLX5E_RL_WORKER_UNLOCK(rlw); 512 513 MLX5E_RL_RLOCK(&priv->rl); 514 error = mlx5e_rl_open_channel(rlw, ix, 515 &priv->rl.chan_param, &channel->sq); 516 MLX5E_RL_RUNLOCK(&priv->rl); 517 518 MLX5E_RL_WORKER_LOCK(rlw); 519 if (error != 0) { 520 if_printf(priv->ifp, 521 "mlx5e_rl_open_channel failed: %d\n", error); 522 break; 523 } 524 mlx5e_rlw_channel_set_rate_locked(rlw, channel, channel->init_rate); 525 } 526 while (1) { 527 if (STAILQ_FIRST(&rlw->process_head) == NULL) { 528 /* check if we are tearing down */ 529 if (rlw->worker_done != 0) 530 break; 531 cv_wait(&rlw->cv, &rlw->mtx); 532 } 533 /* check if we are tearing down */ 534 if (rlw->worker_done != 0) 535 break; 536 channel = STAILQ_FIRST(&rlw->process_head); 537 if (channel != NULL) { 538 STAILQ_REMOVE_HEAD(&rlw->process_head, entry); 539 540 switch (channel->state) { 541 case MLX5E_RL_ST_MODIFY: 542 channel->state = MLX5E_RL_ST_USED; 543 MLX5E_RL_WORKER_UNLOCK(rlw); 544 545 /* create channel by demand */ 546 if (channel->sq == NULL) { 547 MLX5E_RL_RLOCK(&priv->rl); 548 error = mlx5e_rl_open_channel(rlw, ix, 549 &priv->rl.chan_param, &channel->sq); 550 MLX5E_RL_RUNLOCK(&priv->rl); 551 552 if (error != 0) { 553 if_printf(priv->ifp, 554 "mlx5e_rl_open_channel failed: %d\n", error); 555 } else { 556 atomic_add_64(&rlw->priv->rl.stats.tx_open_queues, 1ULL); 557 } 558 } else { 559 mlx5e_resume_sq(channel->sq); 560 } 561 562 MLX5E_RL_WORKER_LOCK(rlw); 563 /* convert from bytes/s to bits/s and set new rate */ 564 error = mlx5e_rlw_channel_set_rate_locked(rlw, channel, 565 channel->new_rate * 8ULL); 566 if (error != 0) { 567 if_printf(priv->ifp, 568 "mlx5e_rlw_channel_set_rate_locked failed: %d\n", 569 error); 570 } 571 break; 572 573 case MLX5E_RL_ST_DESTROY: 574 error = mlx5e_rlw_channel_set_rate_locked(rlw, channel, 0); 575 if (error != 0) { 576 if_printf(priv->ifp, 577 "mlx5e_rlw_channel_set_rate_locked failed: %d\n", 578 error); 579 } 580 if (channel->sq != NULL) { 581 /* 582 * Make sure all packets are 583 * transmitted before SQ is 584 * returned to free list: 585 */ 586 MLX5E_RL_WORKER_UNLOCK(rlw); 587 mlx5e_drain_sq(channel->sq); 588 MLX5E_RL_WORKER_LOCK(rlw); 589 } 590 /* put the channel back into the free list */ 591 STAILQ_INSERT_HEAD(&rlw->index_list_head, channel, entry); 592 channel->state = MLX5E_RL_ST_FREE; 593 atomic_add_64(&priv->rl.stats.tx_active_connections, -1ULL); 594 break; 595 default: 596 /* NOP */ 597 break; 598 } 599 } 600 } 601 602 /* close all the SQs */ 603 for (x = 0; x < priv->rl.param.tx_channels_per_worker_def; x++) { 604 struct mlx5e_rl_channel *channel = rlw->channels + x; 605 606 /* update the initial rate */ 607 channel->init_rate = channel->last_rate; 608 609 /* make sure we free up the rate resource */ 610 mlx5e_rlw_channel_set_rate_locked(rlw, channel, 0); 611 612 if (channel->sq != NULL) { 613 MLX5E_RL_WORKER_UNLOCK(rlw); 614 mlx5e_rl_close_channel(&channel->sq); 615 atomic_add_64(&rlw->priv->rl.stats.tx_open_queues, -1ULL); 616 MLX5E_RL_WORKER_LOCK(rlw); 617 } 618 } 619 620 rlw->worker_done = 0; 621 cv_broadcast(&rlw->cv); 622 MLX5E_RL_WORKER_UNLOCK(rlw); 623 624 kthread_exit(); 625 } 626 627 static int 628 mlx5e_rl_open_tis(struct mlx5e_priv *priv) 629 { 630 struct mlx5_core_dev *mdev = priv->mdev; 631 u32 in[MLX5_ST_SZ_DW(create_tis_in)]; 632 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 633 634 memset(in, 0, sizeof(in)); 635 636 MLX5_SET(tisc, tisc, prio, 0); 637 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); 638 639 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->rl.tisn)); 640 } 641 642 static void 643 mlx5e_rl_close_tis(struct mlx5e_priv *priv) 644 { 645 mlx5_core_destroy_tis(priv->mdev, priv->rl.tisn); 646 } 647 648 static void 649 mlx5e_rl_set_default_params(struct mlx5e_rl_params *param, 650 struct mlx5_core_dev *mdev) 651 { 652 /* ratelimit workers */ 653 param->tx_worker_threads_def = mdev->priv.eq_table.num_comp_vectors; 654 param->tx_worker_threads_max = MLX5E_RL_MAX_WORKERS; 655 656 /* range check */ 657 if (param->tx_worker_threads_def == 0 || 658 param->tx_worker_threads_def > param->tx_worker_threads_max) 659 param->tx_worker_threads_def = param->tx_worker_threads_max; 660 661 /* ratelimit channels */ 662 param->tx_channels_per_worker_def = MLX5E_RL_MAX_SQS / 663 param->tx_worker_threads_def; 664 param->tx_channels_per_worker_max = MLX5E_RL_MAX_SQS; 665 666 /* range check */ 667 if (param->tx_channels_per_worker_def > MLX5E_RL_DEF_SQ_PER_WORKER) 668 param->tx_channels_per_worker_def = MLX5E_RL_DEF_SQ_PER_WORKER; 669 670 /* set default burst size */ 671 param->tx_burst_size = 4; /* MTUs */ 672 673 /* 674 * Set maximum burst size 675 * 676 * The burst size is multiplied by the MTU and clamped to the 677 * range 0 ... 65535 bytes inclusivly before fed into the 678 * firmware. 679 * 680 * NOTE: If the burst size or MTU is changed only ratelimit 681 * connections made after the change will use the new burst 682 * size. 683 */ 684 param->tx_burst_size_max = 255; 685 686 /* get firmware rate limits in 1000bit/s and convert them to bit/s */ 687 param->tx_limit_min = mdev->priv.rl_table.min_rate * 1000ULL; 688 param->tx_limit_max = mdev->priv.rl_table.max_rate * 1000ULL; 689 690 /* ratelimit table size */ 691 param->tx_rates_max = mdev->priv.rl_table.max_size; 692 693 /* range check */ 694 if (param->tx_rates_max > MLX5E_RL_MAX_TX_RATES) 695 param->tx_rates_max = MLX5E_RL_MAX_TX_RATES; 696 697 /* set default number of rates */ 698 param->tx_rates_def = param->tx_rates_max; 699 700 /* set maximum allowed rate deviation */ 701 if (param->tx_limit_max != 0) { 702 /* 703 * Make sure the deviation multiplication doesn't 704 * overflow unsigned 64-bit: 705 */ 706 param->tx_allowed_deviation_max = -1ULL / 707 param->tx_limit_max; 708 } 709 /* set default rate deviation */ 710 param->tx_allowed_deviation = 50; /* 5.0% */ 711 712 /* channel parameters */ 713 param->tx_queue_size = (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); 714 param->tx_coalesce_usecs = MLX5E_RL_TX_COAL_USEC_DEFAULT; 715 param->tx_coalesce_pkts = MLX5E_RL_TX_COAL_PKTS_DEFAULT; 716 param->tx_coalesce_mode = MLX5E_RL_TX_COAL_MODE_DEFAULT; 717 param->tx_completion_fact = MLX5E_RL_TX_COMP_FACT_DEFAULT; 718 } 719 720 static const char *mlx5e_rl_params_desc[] = { 721 MLX5E_RL_PARAMS(MLX5E_STATS_DESC) 722 }; 723 724 static const char *mlx5e_rl_table_params_desc[] = { 725 MLX5E_RL_TABLE_PARAMS(MLX5E_STATS_DESC) 726 }; 727 728 static const char *mlx5e_rl_stats_desc[] = { 729 MLX5E_RL_STATS(MLX5E_STATS_DESC) 730 }; 731 732 int 733 mlx5e_rl_init(struct mlx5e_priv *priv) 734 { 735 struct mlx5e_rl_priv_data *rl = &priv->rl; 736 struct sysctl_oid *node; 737 struct sysctl_oid *stats; 738 char buf[64]; 739 uint64_t i; 740 uint64_t j; 741 int error; 742 743 /* check if there is support for packet pacing */ 744 if (!MLX5_CAP_GEN(priv->mdev, qos) || !MLX5_CAP_QOS(priv->mdev, packet_pacing)) 745 return (0); 746 747 rl->priv = priv; 748 749 sysctl_ctx_init(&rl->ctx); 750 751 sx_init(&rl->rl_sxlock, "ratelimit-sxlock"); 752 753 /* allocate shared UAR for SQs */ 754 error = mlx5_alloc_map_uar(priv->mdev, &rl->sq_uar); 755 if (error) 756 goto done; 757 758 /* open own TIS domain for ratelimit SQs */ 759 error = mlx5e_rl_open_tis(priv); 760 if (error) 761 goto err_uar; 762 763 /* setup default value for parameters */ 764 mlx5e_rl_set_default_params(&rl->param, priv->mdev); 765 766 /* update the completion factor */ 767 mlx5e_rl_sync_tx_completion_fact(rl); 768 769 /* create root node */ 770 node = SYSCTL_ADD_NODE(&rl->ctx, 771 SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, 772 "rate_limit", CTLFLAG_RW, NULL, "Rate limiting support"); 773 774 if (node != NULL) { 775 /* create SYSCTLs */ 776 for (i = 0; i != MLX5E_RL_PARAMS_NUM; i++) { 777 mlx5e_rl_sysctl_add_u64_oid(rl, 778 MLX5E_RL_PARAMS_INDEX(arg[i]), 779 node, mlx5e_rl_params_desc[2 * i], 780 mlx5e_rl_params_desc[2 * i + 1]); 781 } 782 783 stats = SYSCTL_ADD_NODE(&rl->ctx, SYSCTL_CHILDREN(node), 784 OID_AUTO, "stats", CTLFLAG_RD, NULL, 785 "Rate limiting statistics"); 786 if (stats != NULL) { 787 /* create SYSCTLs */ 788 for (i = 0; i != MLX5E_RL_STATS_NUM; i++) { 789 mlx5e_rl_sysctl_add_stats_u64_oid(rl, i, 790 stats, mlx5e_rl_stats_desc[2 * i], 791 mlx5e_rl_stats_desc[2 * i + 1]); 792 } 793 } 794 } 795 796 /* allocate workers array */ 797 rl->workers = malloc(sizeof(rl->workers[0]) * 798 rl->param.tx_worker_threads_def, M_MLX5EN, M_WAITOK | M_ZERO); 799 800 /* allocate rate limit array */ 801 rl->rate_limit_table = malloc(sizeof(rl->rate_limit_table[0]) * 802 rl->param.tx_rates_def, M_MLX5EN, M_WAITOK | M_ZERO); 803 804 if (node != NULL) { 805 /* create more SYSCTls */ 806 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, 807 "tx_rate_show", CTLTYPE_STRING | CTLFLAG_RD | 808 CTLFLAG_MPSAFE, rl, 0, &mlx5e_rl_sysctl_show_rate_table, 809 "A", "Show table of all configured TX rates"); 810 811 /* try to fetch rate table from kernel environment */ 812 for (i = 0; i != rl->param.tx_rates_def; i++) { 813 /* compute path for tunable */ 814 snprintf(buf, sizeof(buf), "dev.mce.%d.rate_limit.tx_rate_add_%d", 815 device_get_unit(priv->mdev->pdev->dev.bsddev), (int)i); 816 if (TUNABLE_QUAD_FETCH(buf, &j)) 817 mlx5e_rl_tx_limit_add(rl, j); 818 } 819 820 /* setup rate table sysctls */ 821 for (i = 0; i != MLX5E_RL_TABLE_PARAMS_NUM; i++) { 822 mlx5e_rl_sysctl_add_u64_oid(rl, 823 MLX5E_RL_PARAMS_INDEX(table_arg[i]), 824 node, mlx5e_rl_table_params_desc[2 * i], 825 mlx5e_rl_table_params_desc[2 * i + 1]); 826 } 827 } 828 829 for (j = 0; j < rl->param.tx_worker_threads_def; j++) { 830 struct mlx5e_rl_worker *rlw = rl->workers + j; 831 832 rlw->priv = priv; 833 834 cv_init(&rlw->cv, "mlx5-worker-cv"); 835 mtx_init(&rlw->mtx, "mlx5-worker-mtx", NULL, MTX_DEF); 836 STAILQ_INIT(&rlw->index_list_head); 837 STAILQ_INIT(&rlw->process_head); 838 839 rlw->channels = malloc(sizeof(rlw->channels[0]) * 840 rl->param.tx_channels_per_worker_def, M_MLX5EN, M_WAITOK | M_ZERO); 841 842 MLX5E_RL_WORKER_LOCK(rlw); 843 for (i = 0; i < rl->param.tx_channels_per_worker_def; i++) { 844 struct mlx5e_rl_channel *channel = rlw->channels + i; 845 channel->worker = rlw; 846 channel->tag.m_snd_tag.ifp = priv->ifp; 847 channel->tag.type = IF_SND_TAG_TYPE_RATE_LIMIT; 848 STAILQ_INSERT_TAIL(&rlw->index_list_head, channel, entry); 849 } 850 MLX5E_RL_WORKER_UNLOCK(rlw); 851 } 852 853 PRIV_LOCK(priv); 854 error = mlx5e_rl_open_workers(priv); 855 PRIV_UNLOCK(priv); 856 857 if (error != 0) { 858 if_printf(priv->ifp, 859 "mlx5e_rl_open_workers failed: %d\n", error); 860 } 861 862 return (0); 863 864 err_uar: 865 mlx5_unmap_free_uar(priv->mdev, &rl->sq_uar); 866 done: 867 sysctl_ctx_free(&rl->ctx); 868 sx_destroy(&rl->rl_sxlock); 869 return (error); 870 } 871 872 static int 873 mlx5e_rl_open_workers(struct mlx5e_priv *priv) 874 { 875 struct mlx5e_rl_priv_data *rl = &priv->rl; 876 struct thread *rl_thread = NULL; 877 struct proc *rl_proc = NULL; 878 uint64_t j; 879 int error; 880 881 if (priv->gone || rl->opened) 882 return (-EINVAL); 883 884 MLX5E_RL_WLOCK(rl); 885 /* compute channel parameters once */ 886 mlx5e_rl_build_channel_param(rl, &rl->chan_param); 887 MLX5E_RL_WUNLOCK(rl); 888 889 for (j = 0; j < rl->param.tx_worker_threads_def; j++) { 890 struct mlx5e_rl_worker *rlw = rl->workers + j; 891 892 /* start worker thread */ 893 error = kproc_kthread_add(mlx5e_rl_worker, rlw, &rl_proc, &rl_thread, 894 RFHIGHPID, 0, "mlx5-ratelimit", "mlx5-rl-worker-thread-%d", (int)j); 895 if (error != 0) { 896 if_printf(rl->priv->ifp, 897 "kproc_kthread_add failed: %d\n", error); 898 rlw->worker_done = 1; 899 } 900 } 901 902 rl->opened = 1; 903 904 return (0); 905 } 906 907 static void 908 mlx5e_rl_close_workers(struct mlx5e_priv *priv) 909 { 910 struct mlx5e_rl_priv_data *rl = &priv->rl; 911 uint64_t y; 912 913 if (rl->opened == 0) 914 return; 915 916 /* tear down worker threads simultaneously */ 917 for (y = 0; y < rl->param.tx_worker_threads_def; y++) { 918 struct mlx5e_rl_worker *rlw = rl->workers + y; 919 920 /* tear down worker before freeing SQs */ 921 MLX5E_RL_WORKER_LOCK(rlw); 922 if (rlw->worker_done == 0) { 923 rlw->worker_done = 1; 924 cv_broadcast(&rlw->cv); 925 } else { 926 /* XXX thread not started */ 927 rlw->worker_done = 0; 928 } 929 MLX5E_RL_WORKER_UNLOCK(rlw); 930 } 931 932 /* wait for worker threads to exit */ 933 for (y = 0; y < rl->param.tx_worker_threads_def; y++) { 934 struct mlx5e_rl_worker *rlw = rl->workers + y; 935 936 /* tear down worker before freeing SQs */ 937 MLX5E_RL_WORKER_LOCK(rlw); 938 while (rlw->worker_done != 0) 939 cv_wait(&rlw->cv, &rlw->mtx); 940 MLX5E_RL_WORKER_UNLOCK(rlw); 941 } 942 943 rl->opened = 0; 944 } 945 946 static void 947 mlx5e_rl_reset_rates(struct mlx5e_rl_priv_data *rl) 948 { 949 unsigned x; 950 951 MLX5E_RL_WLOCK(rl); 952 for (x = 0; x != rl->param.tx_rates_def; x++) 953 rl->rate_limit_table[x] = 0; 954 MLX5E_RL_WUNLOCK(rl); 955 } 956 957 void 958 mlx5e_rl_cleanup(struct mlx5e_priv *priv) 959 { 960 struct mlx5e_rl_priv_data *rl = &priv->rl; 961 uint64_t y; 962 963 /* check if there is support for packet pacing */ 964 if (!MLX5_CAP_GEN(priv->mdev, qos) || !MLX5_CAP_QOS(priv->mdev, packet_pacing)) 965 return; 966 967 /* TODO check if there is support for packet pacing */ 968 969 sysctl_ctx_free(&rl->ctx); 970 971 PRIV_LOCK(priv); 972 mlx5e_rl_close_workers(priv); 973 PRIV_UNLOCK(priv); 974 975 mlx5e_rl_reset_rates(rl); 976 977 /* free shared UAR for SQs */ 978 mlx5_unmap_free_uar(priv->mdev, &rl->sq_uar); 979 980 /* close TIS domain */ 981 mlx5e_rl_close_tis(priv); 982 983 for (y = 0; y < rl->param.tx_worker_threads_def; y++) { 984 struct mlx5e_rl_worker *rlw = rl->workers + y; 985 986 cv_destroy(&rlw->cv); 987 mtx_destroy(&rlw->mtx); 988 free(rlw->channels, M_MLX5EN); 989 } 990 free(rl->rate_limit_table, M_MLX5EN); 991 free(rl->workers, M_MLX5EN); 992 sx_destroy(&rl->rl_sxlock); 993 } 994 995 static void 996 mlx5e_rlw_queue_channel_locked(struct mlx5e_rl_worker *rlw, 997 struct mlx5e_rl_channel *channel) 998 { 999 STAILQ_INSERT_TAIL(&rlw->process_head, channel, entry); 1000 cv_broadcast(&rlw->cv); 1001 } 1002 1003 static void 1004 mlx5e_rl_free(struct mlx5e_rl_worker *rlw, struct mlx5e_rl_channel *channel) 1005 { 1006 if (channel == NULL) 1007 return; 1008 1009 MLX5E_RL_WORKER_LOCK(rlw); 1010 switch (channel->state) { 1011 case MLX5E_RL_ST_MODIFY: 1012 channel->state = MLX5E_RL_ST_DESTROY; 1013 break; 1014 case MLX5E_RL_ST_USED: 1015 channel->state = MLX5E_RL_ST_DESTROY; 1016 mlx5e_rlw_queue_channel_locked(rlw, channel); 1017 break; 1018 default: 1019 break; 1020 } 1021 MLX5E_RL_WORKER_UNLOCK(rlw); 1022 } 1023 1024 static int 1025 mlx5e_rl_modify(struct mlx5e_rl_worker *rlw, struct mlx5e_rl_channel *channel, uint64_t rate) 1026 { 1027 1028 MLX5E_RL_WORKER_LOCK(rlw); 1029 channel->new_rate = rate; 1030 switch (channel->state) { 1031 case MLX5E_RL_ST_USED: 1032 channel->state = MLX5E_RL_ST_MODIFY; 1033 mlx5e_rlw_queue_channel_locked(rlw, channel); 1034 break; 1035 default: 1036 break; 1037 } 1038 MLX5E_RL_WORKER_UNLOCK(rlw); 1039 1040 return (0); 1041 } 1042 1043 static int 1044 mlx5e_rl_query(struct mlx5e_rl_worker *rlw, struct mlx5e_rl_channel *channel, 1045 union if_snd_tag_query_params *params) 1046 { 1047 int retval; 1048 1049 MLX5E_RL_WORKER_LOCK(rlw); 1050 switch (channel->state) { 1051 case MLX5E_RL_ST_USED: 1052 params->rate_limit.max_rate = channel->last_rate; 1053 params->rate_limit.queue_level = mlx5e_sq_queue_level(channel->sq); 1054 retval = 0; 1055 break; 1056 case MLX5E_RL_ST_MODIFY: 1057 params->rate_limit.max_rate = channel->last_rate; 1058 params->rate_limit.queue_level = mlx5e_sq_queue_level(channel->sq); 1059 retval = EBUSY; 1060 break; 1061 default: 1062 retval = EINVAL; 1063 break; 1064 } 1065 MLX5E_RL_WORKER_UNLOCK(rlw); 1066 1067 return (retval); 1068 } 1069 1070 static int 1071 mlx5e_find_available_tx_ring_index(struct mlx5e_rl_worker *rlw, 1072 struct mlx5e_rl_channel **pchannel) 1073 { 1074 struct mlx5e_rl_channel *channel; 1075 int retval = ENOMEM; 1076 1077 MLX5E_RL_WORKER_LOCK(rlw); 1078 /* Check for available channel in free list */ 1079 if ((channel = STAILQ_FIRST(&rlw->index_list_head)) != NULL) { 1080 retval = 0; 1081 /* Remove head index from available list */ 1082 STAILQ_REMOVE_HEAD(&rlw->index_list_head, entry); 1083 channel->state = MLX5E_RL_ST_USED; 1084 atomic_add_64(&rlw->priv->rl.stats.tx_active_connections, 1ULL); 1085 } else { 1086 atomic_add_64(&rlw->priv->rl.stats.tx_available_resource_failure, 1ULL); 1087 } 1088 MLX5E_RL_WORKER_UNLOCK(rlw); 1089 1090 *pchannel = channel; 1091 #ifdef RATELIMIT_DEBUG 1092 if_printf(rlw->priv->ifp, "Channel pointer for rate limit connection is %p\n", channel); 1093 #endif 1094 return (retval); 1095 } 1096 1097 int 1098 mlx5e_rl_snd_tag_alloc(struct ifnet *ifp, 1099 union if_snd_tag_alloc_params *params, 1100 struct m_snd_tag **ppmt) 1101 { 1102 struct mlx5e_rl_channel *channel; 1103 struct mlx5e_rl_worker *rlw; 1104 struct mlx5e_priv *priv; 1105 int error; 1106 1107 priv = ifp->if_softc; 1108 1109 /* check if there is support for packet pacing or if device is going away */ 1110 if (!MLX5_CAP_GEN(priv->mdev, qos) || 1111 !MLX5_CAP_QOS(priv->mdev, packet_pacing) || priv->gone || 1112 params->rate_limit.hdr.type != IF_SND_TAG_TYPE_RATE_LIMIT) 1113 return (EOPNOTSUPP); 1114 1115 /* compute worker thread this TCP connection belongs to */ 1116 rlw = priv->rl.workers + ((params->rate_limit.hdr.flowid % 128) % 1117 priv->rl.param.tx_worker_threads_def); 1118 1119 error = mlx5e_find_available_tx_ring_index(rlw, &channel); 1120 if (error != 0) 1121 goto done; 1122 1123 error = mlx5e_rl_modify(rlw, channel, params->rate_limit.max_rate); 1124 if (error != 0) { 1125 mlx5e_rl_free(rlw, channel); 1126 goto done; 1127 } 1128 1129 /* store pointer to mbuf tag */ 1130 *ppmt = &channel->tag.m_snd_tag; 1131 done: 1132 return (error); 1133 } 1134 1135 1136 int 1137 mlx5e_rl_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params) 1138 { 1139 struct mlx5e_rl_channel *channel = 1140 container_of(pmt, struct mlx5e_rl_channel, tag.m_snd_tag); 1141 1142 return (mlx5e_rl_modify(channel->worker, channel, params->rate_limit.max_rate)); 1143 } 1144 1145 int 1146 mlx5e_rl_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) 1147 { 1148 struct mlx5e_rl_channel *channel = 1149 container_of(pmt, struct mlx5e_rl_channel, tag.m_snd_tag); 1150 1151 return (mlx5e_rl_query(channel->worker, channel, params)); 1152 } 1153 1154 void 1155 mlx5e_rl_snd_tag_free(struct m_snd_tag *pmt) 1156 { 1157 struct mlx5e_rl_channel *channel = 1158 container_of(pmt, struct mlx5e_rl_channel, tag.m_snd_tag); 1159 1160 mlx5e_rl_free(channel->worker, channel); 1161 } 1162 1163 static int 1164 mlx5e_rl_sysctl_show_rate_table(SYSCTL_HANDLER_ARGS) 1165 { 1166 struct mlx5e_rl_priv_data *rl = arg1; 1167 struct mlx5e_priv *priv = rl->priv; 1168 struct sbuf sbuf; 1169 unsigned x; 1170 int error; 1171 1172 error = sysctl_wire_old_buffer(req, 0); 1173 if (error != 0) 1174 return (error); 1175 1176 PRIV_LOCK(priv); 1177 1178 sbuf_new_for_sysctl(&sbuf, NULL, 128 * rl->param.tx_rates_def, req); 1179 1180 sbuf_printf(&sbuf, 1181 "\n\n" "\t" "ENTRY" "\t" "BURST" "\t" "RATE [bit/s]\n" 1182 "\t" "--------------------------------------------\n"); 1183 1184 MLX5E_RL_RLOCK(rl); 1185 for (x = 0; x != rl->param.tx_rates_def; x++) { 1186 if (rl->rate_limit_table[x] == 0) 1187 continue; 1188 1189 sbuf_printf(&sbuf, "\t" "%3u" "\t" "%3u" "\t" "%lld\n", 1190 x, (unsigned)rl->param.tx_burst_size, 1191 (long long)rl->rate_limit_table[x]); 1192 } 1193 MLX5E_RL_RUNLOCK(rl); 1194 1195 error = sbuf_finish(&sbuf); 1196 sbuf_delete(&sbuf); 1197 1198 PRIV_UNLOCK(priv); 1199 1200 return (error); 1201 } 1202 1203 static int 1204 mlx5e_rl_refresh_channel_params(struct mlx5e_rl_priv_data *rl) 1205 { 1206 uint64_t x; 1207 uint64_t y; 1208 1209 MLX5E_RL_WLOCK(rl); 1210 /* compute channel parameters once */ 1211 mlx5e_rl_build_channel_param(rl, &rl->chan_param); 1212 MLX5E_RL_WUNLOCK(rl); 1213 1214 for (y = 0; y != rl->param.tx_worker_threads_def; y++) { 1215 struct mlx5e_rl_worker *rlw = rl->workers + y; 1216 1217 for (x = 0; x != rl->param.tx_channels_per_worker_def; x++) { 1218 struct mlx5e_rl_channel *channel; 1219 struct mlx5e_sq *sq; 1220 1221 channel = rlw->channels + x; 1222 sq = channel->sq; 1223 1224 if (sq == NULL) 1225 continue; 1226 1227 if (MLX5_CAP_GEN(rl->priv->mdev, cq_period_mode_modify)) { 1228 mlx5_core_modify_cq_moderation_mode(rl->priv->mdev, &sq->cq.mcq, 1229 rl->param.tx_coalesce_usecs, 1230 rl->param.tx_coalesce_pkts, 1231 rl->param.tx_coalesce_mode); 1232 } else { 1233 mlx5_core_modify_cq_moderation(rl->priv->mdev, &sq->cq.mcq, 1234 rl->param.tx_coalesce_usecs, 1235 rl->param.tx_coalesce_pkts); 1236 } 1237 } 1238 } 1239 return (0); 1240 } 1241 1242 void 1243 mlx5e_rl_refresh_sq_inline(struct mlx5e_rl_priv_data *rl) 1244 { 1245 uint64_t x; 1246 uint64_t y; 1247 1248 for (y = 0; y != rl->param.tx_worker_threads_def; y++) { 1249 struct mlx5e_rl_worker *rlw = rl->workers + y; 1250 1251 for (x = 0; x != rl->param.tx_channels_per_worker_def; x++) { 1252 struct mlx5e_rl_channel *channel; 1253 struct mlx5e_sq *sq; 1254 1255 channel = rlw->channels + x; 1256 sq = channel->sq; 1257 1258 if (sq == NULL) 1259 continue; 1260 1261 mtx_lock(&sq->lock); 1262 mlx5e_update_sq_inline(sq); 1263 mtx_unlock(&sq->lock); 1264 } 1265 } 1266 } 1267 1268 static int 1269 mlx5e_rl_tx_limit_add(struct mlx5e_rl_priv_data *rl, uint64_t value) 1270 { 1271 unsigned x; 1272 int error; 1273 1274 if (value < 1000 || 1275 mlx5_rl_is_in_range(rl->priv->mdev, howmany(value, 1000), 0) == 0) 1276 return (EINVAL); 1277 1278 MLX5E_RL_WLOCK(rl); 1279 error = ENOMEM; 1280 1281 /* check if rate already exists */ 1282 for (x = 0; x != rl->param.tx_rates_def; x++) { 1283 if (rl->rate_limit_table[x] != value) 1284 continue; 1285 error = EEXIST; 1286 break; 1287 } 1288 1289 /* check if there is a free rate entry */ 1290 if (x == rl->param.tx_rates_def) { 1291 for (x = 0; x != rl->param.tx_rates_def; x++) { 1292 if (rl->rate_limit_table[x] != 0) 1293 continue; 1294 rl->rate_limit_table[x] = value; 1295 error = 0; 1296 break; 1297 } 1298 } 1299 MLX5E_RL_WUNLOCK(rl); 1300 1301 return (error); 1302 } 1303 1304 static int 1305 mlx5e_rl_tx_limit_clr(struct mlx5e_rl_priv_data *rl, uint64_t value) 1306 { 1307 unsigned x; 1308 int error; 1309 1310 if (value == 0) 1311 return (EINVAL); 1312 1313 MLX5E_RL_WLOCK(rl); 1314 1315 /* check if rate already exists */ 1316 for (x = 0; x != rl->param.tx_rates_def; x++) { 1317 if (rl->rate_limit_table[x] != value) 1318 continue; 1319 /* free up rate */ 1320 rl->rate_limit_table[x] = 0; 1321 break; 1322 } 1323 1324 /* check if there is a free rate entry */ 1325 if (x == rl->param.tx_rates_def) 1326 error = ENOENT; 1327 else 1328 error = 0; 1329 MLX5E_RL_WUNLOCK(rl); 1330 1331 return (error); 1332 } 1333 1334 static int 1335 mlx5e_rl_sysctl_handler(SYSCTL_HANDLER_ARGS) 1336 { 1337 struct mlx5e_rl_priv_data *rl = arg1; 1338 struct mlx5e_priv *priv = rl->priv; 1339 unsigned mode_modify; 1340 unsigned was_opened; 1341 uint64_t value; 1342 uint64_t old; 1343 int error; 1344 1345 PRIV_LOCK(priv); 1346 1347 MLX5E_RL_RLOCK(rl); 1348 value = rl->param.arg[arg2]; 1349 MLX5E_RL_RUNLOCK(rl); 1350 1351 if (req != NULL) { 1352 old = value; 1353 error = sysctl_handle_64(oidp, &value, 0, req); 1354 if (error || req->newptr == NULL || 1355 value == rl->param.arg[arg2]) 1356 goto done; 1357 } else { 1358 old = 0; 1359 error = 0; 1360 } 1361 1362 /* check if device is gone */ 1363 if (priv->gone) { 1364 error = ENXIO; 1365 goto done; 1366 } 1367 was_opened = rl->opened; 1368 mode_modify = MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify); 1369 1370 switch (MLX5E_RL_PARAMS_INDEX(arg[arg2])) { 1371 case MLX5E_RL_PARAMS_INDEX(tx_worker_threads_def): 1372 if (value > rl->param.tx_worker_threads_max) 1373 value = rl->param.tx_worker_threads_max; 1374 else if (value < 1) 1375 value = 1; 1376 1377 /* store new value */ 1378 rl->param.arg[arg2] = value; 1379 break; 1380 1381 case MLX5E_RL_PARAMS_INDEX(tx_channels_per_worker_def): 1382 if (value > rl->param.tx_channels_per_worker_max) 1383 value = rl->param.tx_channels_per_worker_max; 1384 else if (value < 1) 1385 value = 1; 1386 1387 /* store new value */ 1388 rl->param.arg[arg2] = value; 1389 break; 1390 1391 case MLX5E_RL_PARAMS_INDEX(tx_rates_def): 1392 if (value > rl->param.tx_rates_max) 1393 value = rl->param.tx_rates_max; 1394 else if (value < 1) 1395 value = 1; 1396 1397 /* store new value */ 1398 rl->param.arg[arg2] = value; 1399 break; 1400 1401 case MLX5E_RL_PARAMS_INDEX(tx_coalesce_usecs): 1402 /* range check */ 1403 if (value < 1) 1404 value = 0; 1405 else if (value > MLX5E_FLD_MAX(cqc, cq_period)) 1406 value = MLX5E_FLD_MAX(cqc, cq_period); 1407 1408 /* store new value */ 1409 rl->param.arg[arg2] = value; 1410 1411 /* check to avoid down and up the network interface */ 1412 if (was_opened) 1413 error = mlx5e_rl_refresh_channel_params(rl); 1414 break; 1415 1416 case MLX5E_RL_PARAMS_INDEX(tx_coalesce_pkts): 1417 /* import TX coal pkts */ 1418 if (value < 1) 1419 value = 0; 1420 else if (value > MLX5E_FLD_MAX(cqc, cq_max_count)) 1421 value = MLX5E_FLD_MAX(cqc, cq_max_count); 1422 1423 /* store new value */ 1424 rl->param.arg[arg2] = value; 1425 1426 /* check to avoid down and up the network interface */ 1427 if (was_opened) 1428 error = mlx5e_rl_refresh_channel_params(rl); 1429 break; 1430 1431 case MLX5E_RL_PARAMS_INDEX(tx_coalesce_mode): 1432 /* network interface must be down */ 1433 if (was_opened != 0 && mode_modify == 0) 1434 mlx5e_rl_close_workers(priv); 1435 1436 /* import TX coalesce mode */ 1437 if (value != 0) 1438 value = 1; 1439 1440 /* store new value */ 1441 rl->param.arg[arg2] = value; 1442 1443 /* restart network interface, if any */ 1444 if (was_opened != 0) { 1445 if (mode_modify == 0) 1446 mlx5e_rl_open_workers(priv); 1447 else 1448 error = mlx5e_rl_refresh_channel_params(rl); 1449 } 1450 break; 1451 1452 case MLX5E_RL_PARAMS_INDEX(tx_queue_size): 1453 /* network interface must be down */ 1454 if (was_opened) 1455 mlx5e_rl_close_workers(priv); 1456 1457 /* import TX queue size */ 1458 if (value < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) 1459 value = (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); 1460 else if (value > priv->params_ethtool.tx_queue_size_max) 1461 value = priv->params_ethtool.tx_queue_size_max; 1462 1463 /* store actual TX queue size */ 1464 value = 1ULL << order_base_2(value); 1465 1466 /* store new value */ 1467 rl->param.arg[arg2] = value; 1468 1469 /* verify TX completion factor */ 1470 mlx5e_rl_sync_tx_completion_fact(rl); 1471 1472 /* restart network interface, if any */ 1473 if (was_opened) 1474 mlx5e_rl_open_workers(priv); 1475 break; 1476 1477 case MLX5E_RL_PARAMS_INDEX(tx_completion_fact): 1478 /* network interface must be down */ 1479 if (was_opened) 1480 mlx5e_rl_close_workers(priv); 1481 1482 /* store new value */ 1483 rl->param.arg[arg2] = value; 1484 1485 /* verify parameter */ 1486 mlx5e_rl_sync_tx_completion_fact(rl); 1487 1488 /* restart network interface, if any */ 1489 if (was_opened) 1490 mlx5e_rl_open_workers(priv); 1491 break; 1492 1493 case MLX5E_RL_PARAMS_INDEX(tx_limit_add): 1494 error = mlx5e_rl_tx_limit_add(rl, value); 1495 break; 1496 1497 case MLX5E_RL_PARAMS_INDEX(tx_limit_clr): 1498 error = mlx5e_rl_tx_limit_clr(rl, value); 1499 break; 1500 1501 case MLX5E_RL_PARAMS_INDEX(tx_allowed_deviation): 1502 /* range check */ 1503 if (value > rl->param.tx_allowed_deviation_max) 1504 value = rl->param.tx_allowed_deviation_max; 1505 else if (value < rl->param.tx_allowed_deviation_min) 1506 value = rl->param.tx_allowed_deviation_min; 1507 1508 MLX5E_RL_WLOCK(rl); 1509 rl->param.arg[arg2] = value; 1510 MLX5E_RL_WUNLOCK(rl); 1511 break; 1512 1513 case MLX5E_RL_PARAMS_INDEX(tx_burst_size): 1514 /* range check */ 1515 if (value > rl->param.tx_burst_size_max) 1516 value = rl->param.tx_burst_size_max; 1517 else if (value < rl->param.tx_burst_size_min) 1518 value = rl->param.tx_burst_size_min; 1519 1520 MLX5E_RL_WLOCK(rl); 1521 rl->param.arg[arg2] = value; 1522 MLX5E_RL_WUNLOCK(rl); 1523 break; 1524 1525 default: 1526 break; 1527 } 1528 done: 1529 PRIV_UNLOCK(priv); 1530 return (error); 1531 } 1532 1533 static void 1534 mlx5e_rl_sysctl_add_u64_oid(struct mlx5e_rl_priv_data *rl, unsigned x, 1535 struct sysctl_oid *node, const char *name, const char *desc) 1536 { 1537 /* 1538 * NOTE: In FreeBSD-11 and newer the CTLFLAG_RWTUN flag will 1539 * take care of loading default sysctl value from the kernel 1540 * environment, if any: 1541 */ 1542 if (strstr(name, "_max") != 0 || strstr(name, "_min") != 0) { 1543 /* read-only SYSCTLs */ 1544 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, 1545 name, CTLTYPE_U64 | CTLFLAG_RD | 1546 CTLFLAG_MPSAFE, rl, x, &mlx5e_rl_sysctl_handler, "QU", desc); 1547 } else { 1548 if (strstr(name, "_def") != 0) { 1549 #ifdef RATELIMIT_DEBUG 1550 /* tunable read-only advanced SYSCTLs */ 1551 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, 1552 name, CTLTYPE_U64 | CTLFLAG_RDTUN | 1553 CTLFLAG_MPSAFE, rl, x, &mlx5e_rl_sysctl_handler, "QU", desc); 1554 #endif 1555 } else { 1556 /* read-write SYSCTLs */ 1557 SYSCTL_ADD_PROC(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, 1558 name, CTLTYPE_U64 | CTLFLAG_RWTUN | 1559 CTLFLAG_MPSAFE, rl, x, &mlx5e_rl_sysctl_handler, "QU", desc); 1560 } 1561 } 1562 } 1563 1564 static void 1565 mlx5e_rl_sysctl_add_stats_u64_oid(struct mlx5e_rl_priv_data *rl, unsigned x, 1566 struct sysctl_oid *node, const char *name, const char *desc) 1567 { 1568 /* read-only SYSCTLs */ 1569 SYSCTL_ADD_U64(&rl->ctx, SYSCTL_CHILDREN(node), OID_AUTO, name, 1570 CTLFLAG_RD, &rl->stats.arg[x], 0, desc); 1571 } 1572 1573 #endif 1574