1 /*- 2 * Copyright (c) 2017 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_ratelimit.h" 34 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/queue.h> 38 #include <sys/sbuf.h> 39 #include <sys/taskqueue.h> 40 #include <sys/sysctl.h> 41 42 #include "common/common.h" 43 #include "common/t4_regs.h" 44 #include "common/t4_regs_values.h" 45 #include "common/t4_msg.h" 46 47 48 static int 49 in_range(int val, int lo, int hi) 50 { 51 52 return (val < 0 || (val <= hi && val >= lo)); 53 } 54 55 static int 56 set_sched_class_config(struct adapter *sc, int minmax) 57 { 58 int rc; 59 60 if (minmax < 0) 61 return (EINVAL); 62 63 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc"); 64 if (rc) 65 return (rc); 66 rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1); 67 end_synchronized_op(sc, 0); 68 69 return (rc); 70 } 71 72 static int 73 set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p, 74 int sleep_ok) 75 { 76 int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode; 77 struct port_info *pi; 78 struct tx_cl_rl_params *tc; 79 bool check_pktsize = false; 80 81 if (p->level == SCHED_CLASS_LEVEL_CL_RL) 82 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 83 else if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 84 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 85 else if (p->level == SCHED_CLASS_LEVEL_CH_RL) 86 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 87 else 88 return (EINVAL); 89 90 if (p->level == SCHED_CLASS_LEVEL_CL_RL) { 91 if (p->mode == SCHED_CLASS_MODE_CLASS) 92 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 93 else if (p->mode == SCHED_CLASS_MODE_FLOW) { 94 check_pktsize = true; 95 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 96 } else 97 return (EINVAL); 98 } else 99 fw_mode = 0; 100 101 /* Valid channel must always be provided. */ 102 if (p->channel < 0) 103 return (EINVAL); 104 if (!in_range(p->channel, 0, sc->chip_params->nchan - 1)) 105 return (ERANGE); 106 107 pi = sc->port[sc->chan_map[p->channel]]; 108 if (pi == NULL) 109 return (ENXIO); 110 MPASS(pi->tx_chan == p->channel); 111 top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */ 112 113 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 114 p->level == SCHED_CLASS_LEVEL_CH_RL) { 115 /* 116 * Valid rate (mode, unit and values) must be provided. 117 */ 118 119 if (p->minrate < 0) 120 p->minrate = 0; 121 if (p->maxrate < 0) 122 return (EINVAL); 123 124 if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS) { 125 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 126 /* ratemode could be relative (%) or absolute. */ 127 if (p->ratemode == SCHED_CLASS_RATEMODE_REL) { 128 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 129 /* maxrate is % of port bandwidth. */ 130 if (!in_range(p->minrate, 0, 100) || 131 !in_range(p->maxrate, 0, 100)) { 132 return (ERANGE); 133 } 134 } else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS) { 135 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 136 /* maxrate is absolute value in kbps. */ 137 if (!in_range(p->minrate, 0, top_speed) || 138 !in_range(p->maxrate, 0, top_speed)) { 139 return (ERANGE); 140 } 141 } else 142 return (EINVAL); 143 } else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS) { 144 /* maxrate is the absolute value in pps. */ 145 check_pktsize = true; 146 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 147 } else 148 return (EINVAL); 149 } else { 150 MPASS(p->level == SCHED_CLASS_LEVEL_CL_WRR); 151 152 /* 153 * Valid weight must be provided. 154 */ 155 if (p->weight < 0) 156 return (EINVAL); 157 if (!in_range(p->weight, 1, 99)) 158 return (ERANGE); 159 160 fw_rateunit = 0; 161 fw_ratemode = 0; 162 } 163 164 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 165 p->level == SCHED_CLASS_LEVEL_CL_WRR) { 166 /* 167 * Valid scheduling class must be provided. 168 */ 169 if (p->cl < 0) 170 return (EINVAL); 171 if (!in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) 172 return (ERANGE); 173 } 174 175 if (check_pktsize) { 176 if (p->pktsize < 0) 177 return (EINVAL); 178 if (!in_range(p->pktsize, 64, pi->vi[0].ifp->if_mtu)) 179 return (ERANGE); 180 } 181 182 rc = begin_synchronized_op(sc, NULL, 183 sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp"); 184 if (rc) 185 return (rc); 186 if (p->level == SCHED_CLASS_LEVEL_CL_RL) { 187 tc = &pi->sched_params->cl_rl[p->cl]; 188 if (tc->refcount > 0) { 189 rc = EBUSY; 190 goto done; 191 } else { 192 tc->ratemode = fw_ratemode; 193 tc->rateunit = fw_rateunit; 194 tc->mode = fw_mode; 195 tc->maxrate = p->maxrate; 196 tc->pktsize = p->pktsize; 197 } 198 } 199 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode, 200 fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate, 201 p->weight, p->pktsize, sleep_ok); 202 if (p->level == SCHED_CLASS_LEVEL_CL_RL && rc != 0) { 203 /* 204 * Unknown state at this point, see parameters in tc for what 205 * was attempted. 206 */ 207 tc->flags |= TX_CLRL_ERROR; 208 } 209 done: 210 end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD); 211 212 return (rc); 213 } 214 215 static void 216 update_tx_sched(void *context, int pending) 217 { 218 int i, j, mode, rateunit, ratemode, maxrate, pktsize, rc; 219 struct port_info *pi; 220 struct tx_cl_rl_params *tc; 221 struct adapter *sc = context; 222 const int n = sc->chip_params->nsched_cls; 223 224 mtx_lock(&sc->tc_lock); 225 for_each_port(sc, i) { 226 pi = sc->port[i]; 227 tc = &pi->sched_params->cl_rl[0]; 228 for (j = 0; j < n; j++, tc++) { 229 MPASS(mtx_owned(&sc->tc_lock)); 230 if ((tc->flags & TX_CLRL_REFRESH) == 0) 231 continue; 232 233 mode = tc->mode; 234 rateunit = tc->rateunit; 235 ratemode = tc->ratemode; 236 maxrate = tc->maxrate; 237 pktsize = tc->pktsize; 238 mtx_unlock(&sc->tc_lock); 239 240 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 241 "t4utxs") != 0) { 242 mtx_lock(&sc->tc_lock); 243 continue; 244 } 245 rc = t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, 246 FW_SCHED_PARAMS_LEVEL_CL_RL, mode, rateunit, 247 ratemode, pi->tx_chan, j, 0, maxrate, 0, pktsize, 248 1); 249 end_synchronized_op(sc, 0); 250 251 mtx_lock(&sc->tc_lock); 252 if (rc != 0) { 253 tc->flags |= TX_CLRL_ERROR; 254 } else if (tc->mode == mode && 255 tc->rateunit == rateunit && 256 tc->maxrate == maxrate && 257 tc->pktsize == tc->pktsize) { 258 tc->flags &= ~(TX_CLRL_REFRESH | TX_CLRL_ERROR); 259 } 260 } 261 } 262 mtx_unlock(&sc->tc_lock); 263 } 264 265 int 266 t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p) 267 { 268 269 if (p->type != SCHED_CLASS_TYPE_PACKET) 270 return (EINVAL); 271 272 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 273 return (set_sched_class_config(sc, p->u.config.minmax)); 274 275 if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 276 return (set_sched_class_params(sc, &p->u.params, 1)); 277 278 return (EINVAL); 279 } 280 281 int 282 t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 283 { 284 struct port_info *pi = NULL; 285 struct vi_info *vi; 286 struct sge_txq *txq; 287 uint32_t fw_mnem, fw_queue, fw_class; 288 int i, rc; 289 290 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 291 if (rc) 292 return (rc); 293 294 if (p->port >= sc->params.nports) { 295 rc = EINVAL; 296 goto done; 297 } 298 299 /* XXX: Only supported for the main VI. */ 300 pi = sc->port[p->port]; 301 vi = &pi->vi[0]; 302 if (!(vi->flags & VI_INIT_DONE)) { 303 /* tx queues not set up yet */ 304 rc = EAGAIN; 305 goto done; 306 } 307 308 if (!in_range(p->queue, 0, vi->ntxq - 1) || 309 !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) { 310 rc = EINVAL; 311 goto done; 312 } 313 314 /* 315 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 316 * Scheduling Class in this case). 317 */ 318 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 319 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 320 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 321 322 /* 323 * If op.queue is non-negative, then we're only changing the scheduling 324 * on a single specified TX queue. 325 */ 326 if (p->queue >= 0) { 327 txq = &sc->sge.txq[vi->first_txq + p->queue]; 328 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 329 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 330 &fw_class); 331 goto done; 332 } 333 334 /* 335 * Change the scheduling on all the TX queues for the 336 * interface. 337 */ 338 for_each_txq(vi, i, txq) { 339 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 340 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 341 &fw_class); 342 if (rc) 343 goto done; 344 } 345 346 rc = 0; 347 done: 348 end_synchronized_op(sc, 0); 349 return (rc); 350 } 351 352 int 353 t4_init_tx_sched(struct adapter *sc) 354 { 355 int i, j; 356 const int n = sc->chip_params->nsched_cls; 357 struct port_info *pi; 358 struct tx_cl_rl_params *tc; 359 static const uint32_t init_kbps[] = { 360 100 * 1000, 361 200 * 1000, 362 400 * 1000, 363 500 * 1000, 364 800 * 1000, 365 1000 * 1000, 366 1200 * 1000, 367 1500 * 1000, 368 1800 * 1000, 369 2000 * 1000, 370 2500 * 1000, 371 3000 * 1000, 372 3500 * 1000, 373 4000 * 1000, 374 5000 * 1000, 375 10000 * 1000 376 }; 377 378 mtx_init(&sc->tc_lock, "tx_sched lock", NULL, MTX_DEF); 379 TASK_INIT(&sc->tc_task, 0, update_tx_sched, sc); 380 for_each_port(sc, i) { 381 pi = sc->port[i]; 382 pi->sched_params = malloc(sizeof(*pi->sched_params) + 383 n * sizeof(*tc), M_CXGBE, M_ZERO | M_WAITOK); 384 tc = &pi->sched_params->cl_rl[0]; 385 for (j = 0; j < n; j++, tc++) { 386 tc->refcount = 0; 387 tc->ratemode = FW_SCHED_PARAMS_RATE_ABS; 388 tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 389 tc->mode = FW_SCHED_PARAMS_MODE_FLOW; 390 tc->maxrate = init_kbps[min(j, nitems(init_kbps) - 1)]; 391 tc->pktsize = ETHERMTU; /* XXX */ 392 393 if (t4_sched_params_cl_rl_kbps(sc, pi->tx_chan, j, 394 tc->mode, tc->maxrate, tc->pktsize, 1) == 0) 395 tc->flags = 0; 396 else 397 tc->flags = TX_CLRL_ERROR; 398 } 399 } 400 401 return (0); 402 } 403 404 int 405 t4_free_tx_sched(struct adapter *sc) 406 { 407 int i; 408 409 taskqueue_drain(taskqueue_thread, &sc->tc_task); 410 411 for_each_port(sc, i) { 412 if (sc->port[i] != NULL) 413 free(sc->port[i]->sched_params, M_CXGBE); 414 } 415 416 if (mtx_initialized(&sc->tc_lock)) 417 mtx_destroy(&sc->tc_lock); 418 419 return (0); 420 } 421 422 void 423 t4_update_tx_sched(struct adapter *sc) 424 { 425 426 taskqueue_enqueue(taskqueue_thread, &sc->tc_task); 427 } 428 429 int 430 t4_reserve_cl_rl_kbps(struct adapter *sc, int port_id, u_int maxrate, 431 int *tc_idx) 432 { 433 int rc = 0, fa = -1, i; 434 struct tx_cl_rl_params *tc; 435 436 MPASS(port_id >= 0 && port_id < sc->params.nports); 437 438 tc = &sc->port[port_id]->sched_params->cl_rl[0]; 439 mtx_lock(&sc->tc_lock); 440 for (i = 0; i < sc->chip_params->nsched_cls; i++, tc++) { 441 if (fa < 0 && tc->refcount == 0) 442 fa = i; 443 444 if (tc->ratemode == FW_SCHED_PARAMS_RATE_ABS && 445 tc->rateunit == FW_SCHED_PARAMS_UNIT_BITRATE && 446 tc->mode == FW_SCHED_PARAMS_MODE_FLOW && 447 tc->maxrate == maxrate) { 448 tc->refcount++; 449 *tc_idx = i; 450 goto done; 451 } 452 } 453 /* Not found */ 454 MPASS(i == sc->chip_params->nsched_cls); 455 if (fa != -1) { 456 tc = &sc->port[port_id]->sched_params->cl_rl[fa]; 457 tc->flags = TX_CLRL_REFRESH; 458 tc->refcount = 1; 459 tc->ratemode = FW_SCHED_PARAMS_RATE_ABS; 460 tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 461 tc->mode = FW_SCHED_PARAMS_MODE_FLOW; 462 tc->maxrate = maxrate; 463 tc->pktsize = ETHERMTU; /* XXX */ 464 *tc_idx = fa; 465 t4_update_tx_sched(sc); 466 } else { 467 *tc_idx = -1; 468 rc = ENOSPC; 469 } 470 done: 471 mtx_unlock(&sc->tc_lock); 472 return (rc); 473 } 474 475 void 476 t4_release_cl_rl_kbps(struct adapter *sc, int port_id, int tc_idx) 477 { 478 struct tx_cl_rl_params *tc; 479 480 MPASS(port_id >= 0 && port_id < sc->params.nports); 481 MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls); 482 483 mtx_lock(&sc->tc_lock); 484 tc = &sc->port[port_id]->sched_params->cl_rl[tc_idx]; 485 MPASS(tc->refcount > 0); 486 MPASS(tc->ratemode == FW_SCHED_PARAMS_RATE_ABS); 487 MPASS(tc->rateunit == FW_SCHED_PARAMS_UNIT_BITRATE); 488 MPASS(tc->mode == FW_SCHED_PARAMS_MODE_FLOW); 489 tc->refcount--; 490 mtx_unlock(&sc->tc_lock); 491 } 492 493 #ifdef RATELIMIT 494 void 495 t4_init_etid_table(struct adapter *sc) 496 { 497 int i; 498 struct tid_info *t; 499 500 if (!is_ethoffload(sc)) 501 return; 502 503 t = &sc->tids; 504 MPASS(t->netids > 0); 505 506 mtx_init(&t->etid_lock, "etid lock", NULL, MTX_DEF); 507 t->etid_tab = malloc(sizeof(*t->etid_tab) * t->netids, M_CXGBE, 508 M_ZERO | M_WAITOK); 509 t->efree = t->etid_tab; 510 t->etids_in_use = 0; 511 for (i = 1; i < t->netids; i++) 512 t->etid_tab[i - 1].next = &t->etid_tab[i]; 513 t->etid_tab[t->netids - 1].next = NULL; 514 } 515 516 void 517 t4_free_etid_table(struct adapter *sc) 518 { 519 struct tid_info *t; 520 521 if (!is_ethoffload(sc)) 522 return; 523 524 t = &sc->tids; 525 MPASS(t->netids > 0); 526 527 free(t->etid_tab, M_CXGBE); 528 t->etid_tab = NULL; 529 530 if (mtx_initialized(&t->etid_lock)) 531 mtx_destroy(&t->etid_lock); 532 } 533 534 /* etid services */ 535 static int alloc_etid(struct adapter *, struct cxgbe_snd_tag *); 536 static void free_etid(struct adapter *, int); 537 538 static int 539 alloc_etid(struct adapter *sc, struct cxgbe_snd_tag *cst) 540 { 541 struct tid_info *t = &sc->tids; 542 int etid = -1; 543 544 mtx_lock(&t->etid_lock); 545 if (t->efree) { 546 union etid_entry *p = t->efree; 547 548 etid = p - t->etid_tab + t->etid_base; 549 t->efree = p->next; 550 p->cst = cst; 551 t->etids_in_use++; 552 } 553 mtx_unlock(&t->etid_lock); 554 return (etid); 555 } 556 557 struct cxgbe_snd_tag * 558 lookup_etid(struct adapter *sc, int etid) 559 { 560 struct tid_info *t = &sc->tids; 561 562 return (t->etid_tab[etid - t->etid_base].cst); 563 } 564 565 static void 566 free_etid(struct adapter *sc, int etid) 567 { 568 struct tid_info *t = &sc->tids; 569 union etid_entry *p = &t->etid_tab[etid - t->etid_base]; 570 571 mtx_lock(&t->etid_lock); 572 p->next = t->efree; 573 t->efree = p; 574 t->etids_in_use--; 575 mtx_unlock(&t->etid_lock); 576 } 577 578 int 579 cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params, 580 struct m_snd_tag **pt) 581 { 582 int rc, schedcl; 583 struct vi_info *vi = ifp->if_softc; 584 struct port_info *pi = vi->pi; 585 struct adapter *sc = pi->adapter; 586 struct cxgbe_snd_tag *cst; 587 588 if (params->hdr.type != IF_SND_TAG_TYPE_RATE_LIMIT) 589 return (ENOTSUP); 590 591 rc = t4_reserve_cl_rl_kbps(sc, pi->port_id, 592 (params->rate_limit.max_rate * 8ULL / 1000), &schedcl); 593 if (rc != 0) 594 return (rc); 595 MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls); 596 597 cst = malloc(sizeof(*cst), M_CXGBE, M_ZERO | M_NOWAIT); 598 if (cst == NULL) { 599 failed: 600 t4_release_cl_rl_kbps(sc, pi->port_id, schedcl); 601 return (ENOMEM); 602 } 603 604 cst->etid = alloc_etid(sc, cst); 605 if (cst->etid < 0) { 606 free(cst, M_CXGBE); 607 goto failed; 608 } 609 610 mtx_init(&cst->lock, "cst_lock", NULL, MTX_DEF); 611 mbufq_init(&cst->pending_tx, INT_MAX); 612 mbufq_init(&cst->pending_fwack, INT_MAX); 613 cst->com.ifp = ifp; 614 cst->flags |= EO_FLOWC_PENDING | EO_SND_TAG_REF; 615 cst->adapter = sc; 616 cst->port_id = pi->port_id; 617 cst->schedcl = schedcl; 618 cst->max_rate = params->rate_limit.max_rate; 619 cst->tx_credits = sc->params.ofldq_wr_cred; 620 cst->tx_total = cst->tx_credits; 621 cst->plen = 0; 622 cst->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 623 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) | 624 V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) | 625 V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid))); 626 627 /* 628 * Queues will be selected later when the connection flowid is available. 629 */ 630 631 *pt = &cst->com; 632 return (0); 633 } 634 635 /* 636 * Change in parameters, no change in ifp. 637 */ 638 int 639 cxgbe_snd_tag_modify(struct m_snd_tag *mst, 640 union if_snd_tag_modify_params *params) 641 { 642 int rc, schedcl; 643 struct cxgbe_snd_tag *cst = mst_to_cst(mst); 644 struct adapter *sc = cst->adapter; 645 646 /* XXX: is schedcl -1 ok here? */ 647 MPASS(cst->schedcl >= 0 && cst->schedcl < sc->chip_params->nsched_cls); 648 649 mtx_lock(&cst->lock); 650 MPASS(cst->flags & EO_SND_TAG_REF); 651 rc = t4_reserve_cl_rl_kbps(sc, cst->port_id, 652 (params->rate_limit.max_rate * 8ULL / 1000), &schedcl); 653 if (rc != 0) 654 return (rc); 655 MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls); 656 t4_release_cl_rl_kbps(sc, cst->port_id, cst->schedcl); 657 cst->schedcl = schedcl; 658 cst->max_rate = params->rate_limit.max_rate; 659 mtx_unlock(&cst->lock); 660 661 return (0); 662 } 663 664 int 665 cxgbe_snd_tag_query(struct m_snd_tag *mst, 666 union if_snd_tag_query_params *params) 667 { 668 struct cxgbe_snd_tag *cst = mst_to_cst(mst); 669 670 params->rate_limit.max_rate = cst->max_rate; 671 672 #define CST_TO_MST_QLEVEL_SCALE (IF_SND_QUEUE_LEVEL_MAX / cst->tx_total) 673 params->rate_limit.queue_level = 674 (cst->tx_total - cst->tx_credits) * CST_TO_MST_QLEVEL_SCALE; 675 676 return (0); 677 } 678 679 /* 680 * Unlocks cst and frees it. 681 */ 682 void 683 cxgbe_snd_tag_free_locked(struct cxgbe_snd_tag *cst) 684 { 685 struct adapter *sc = cst->adapter; 686 687 mtx_assert(&cst->lock, MA_OWNED); 688 MPASS((cst->flags & EO_SND_TAG_REF) == 0); 689 MPASS(cst->tx_credits == cst->tx_total); 690 MPASS(cst->plen == 0); 691 MPASS(mbufq_first(&cst->pending_tx) == NULL); 692 MPASS(mbufq_first(&cst->pending_fwack) == NULL); 693 694 if (cst->etid >= 0) 695 free_etid(sc, cst->etid); 696 if (cst->schedcl != -1) 697 t4_release_cl_rl_kbps(sc, cst->port_id, cst->schedcl); 698 mtx_unlock(&cst->lock); 699 mtx_destroy(&cst->lock); 700 free(cst, M_CXGBE); 701 } 702 703 void 704 cxgbe_snd_tag_free(struct m_snd_tag *mst) 705 { 706 struct cxgbe_snd_tag *cst = mst_to_cst(mst); 707 708 mtx_lock(&cst->lock); 709 710 /* The kernel is done with the snd_tag. Remove its reference. */ 711 MPASS(cst->flags & EO_SND_TAG_REF); 712 cst->flags &= ~EO_SND_TAG_REF; 713 714 if (cst->ncompl == 0) { 715 /* 716 * No fw4_ack in flight. Free the tag right away if there are 717 * no outstanding credits. Request the firmware to return all 718 * credits for the etid otherwise. 719 */ 720 if (cst->tx_credits == cst->tx_total) { 721 cxgbe_snd_tag_free_locked(cst); 722 return; /* cst is gone. */ 723 } 724 send_etid_flush_wr(cst); 725 } 726 mtx_unlock(&cst->lock); 727 } 728 #endif 729