xref: /freebsd/sys/dev/cxgbe/t4_sched.c (revision d0ba1baed3f6e4936a0c1b89c25f6c59168ef6de)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_ratelimit.h"
34 
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/queue.h>
38 #include <sys/sbuf.h>
39 #include <sys/taskqueue.h>
40 #include <sys/sysctl.h>
41 
42 #include "common/common.h"
43 #include "common/t4_regs.h"
44 #include "common/t4_regs_values.h"
45 #include "common/t4_msg.h"
46 
47 
48 static int
49 in_range(int val, int lo, int hi)
50 {
51 
52 	return (val < 0 || (val <= hi && val >= lo));
53 }
54 
55 static int
56 set_sched_class_config(struct adapter *sc, int minmax)
57 {
58 	int rc;
59 
60 	if (minmax < 0)
61 		return (EINVAL);
62 
63 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc");
64 	if (rc)
65 		return (rc);
66 	rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1);
67 	end_synchronized_op(sc, 0);
68 
69 	return (rc);
70 }
71 
72 static int
73 set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p,
74     int sleep_ok)
75 {
76 	int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode;
77 	struct port_info *pi;
78 	struct tx_cl_rl_params *tc;
79 
80 	if (p->level == SCHED_CLASS_LEVEL_CL_RL)
81 		fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
82 	else if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
83 		fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
84 	else if (p->level == SCHED_CLASS_LEVEL_CH_RL)
85 		fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
86 	else
87 		return (EINVAL);
88 
89 	if (p->mode == SCHED_CLASS_MODE_CLASS)
90 		fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
91 	else if (p->mode == SCHED_CLASS_MODE_FLOW)
92 		fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
93 	else
94 		return (EINVAL);
95 
96 	if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS)
97 		fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
98 	else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS)
99 		fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
100 	else
101 		return (EINVAL);
102 
103 	if (p->ratemode == SCHED_CLASS_RATEMODE_REL)
104 		fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
105 	else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS)
106 		fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
107 	else
108 		return (EINVAL);
109 
110 	/* Vet our parameters ... */
111 	if (!in_range(p->channel, 0, sc->chip_params->nchan - 1))
112 		return (ERANGE);
113 
114 	pi = sc->port[sc->chan_map[p->channel]];
115 	if (pi == NULL)
116 		return (ENXIO);
117 	MPASS(pi->tx_chan == p->channel);
118 	top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */
119 
120 	if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) ||
121 	    !in_range(p->minrate, 0, top_speed) ||
122 	    !in_range(p->maxrate, 0, top_speed) ||
123 	    !in_range(p->weight, 0, 100))
124 		return (ERANGE);
125 
126 	/*
127 	 * Translate any unset parameters into the firmware's
128 	 * nomenclature and/or fail the call if the parameters
129 	 * are required ...
130 	 */
131 	if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0)
132 		return (EINVAL);
133 
134 	if (p->minrate < 0)
135 		p->minrate = 0;
136 	if (p->maxrate < 0) {
137 		if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
138 		    p->level == SCHED_CLASS_LEVEL_CH_RL)
139 			return (EINVAL);
140 		else
141 			p->maxrate = 0;
142 	}
143 	if (p->weight < 0) {
144 		if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
145 			return (EINVAL);
146 		else
147 			p->weight = 0;
148 	}
149 	if (p->pktsize < 0) {
150 		if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
151 		    p->level == SCHED_CLASS_LEVEL_CH_RL)
152 			return (EINVAL);
153 		else
154 			p->pktsize = 0;
155 	}
156 
157 	rc = begin_synchronized_op(sc, NULL,
158 	    sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp");
159 	if (rc)
160 		return (rc);
161 	if (p->level == SCHED_CLASS_LEVEL_CL_RL) {
162 		tc = &pi->sched_params->cl_rl[p->cl];
163 		if (tc->refcount > 0) {
164 			rc = EBUSY;
165 			goto done;
166 		} else {
167 			tc->ratemode = fw_ratemode;
168 			tc->rateunit = fw_rateunit;
169 			tc->mode = fw_mode;
170 			tc->maxrate = p->maxrate;
171 			tc->pktsize = p->pktsize;
172 		}
173 	}
174 	rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode,
175 	    fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate,
176 	    p->weight, p->pktsize, sleep_ok);
177 	if (p->level == SCHED_CLASS_LEVEL_CL_RL && rc != 0) {
178 		/*
179 		 * Unknown state at this point, see parameters in tc for what
180 		 * was attempted.
181 		 */
182 		tc->flags |= TX_CLRL_ERROR;
183 	}
184 done:
185 	end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD);
186 
187 	return (rc);
188 }
189 
190 static void
191 update_tx_sched(void *context, int pending)
192 {
193 	int i, j, mode, rateunit, ratemode, maxrate, pktsize, rc;
194 	struct port_info *pi;
195 	struct tx_cl_rl_params *tc;
196 	struct adapter *sc = context;
197 	const int n = sc->chip_params->nsched_cls;
198 
199 	mtx_lock(&sc->tc_lock);
200 	for_each_port(sc, i) {
201 		pi = sc->port[i];
202 		tc = &pi->sched_params->cl_rl[0];
203 		for (j = 0; j < n; j++, tc++) {
204 			MPASS(mtx_owned(&sc->tc_lock));
205 			if ((tc->flags & TX_CLRL_REFRESH) == 0)
206 				continue;
207 
208 			mode = tc->mode;
209 			rateunit = tc->rateunit;
210 			ratemode = tc->ratemode;
211 			maxrate = tc->maxrate;
212 			pktsize = tc->pktsize;
213 			mtx_unlock(&sc->tc_lock);
214 
215 			if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
216 			    "t4utxs") != 0) {
217 				mtx_lock(&sc->tc_lock);
218 				continue;
219 			}
220 			rc = t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
221 			    FW_SCHED_PARAMS_LEVEL_CL_RL, mode, rateunit,
222 			    ratemode, pi->tx_chan, j, 0, maxrate, 0, pktsize,
223 			    1);
224 			end_synchronized_op(sc, 0);
225 
226 			mtx_lock(&sc->tc_lock);
227 			if (rc != 0) {
228 				tc->flags |= TX_CLRL_ERROR;
229 			} else if (tc->mode == mode &&
230 			    tc->rateunit == rateunit &&
231 			    tc->maxrate == maxrate &&
232 			    tc->pktsize == tc->pktsize) {
233 				tc->flags &= ~(TX_CLRL_REFRESH | TX_CLRL_ERROR);
234 			}
235 		}
236 	}
237 	mtx_unlock(&sc->tc_lock);
238 }
239 
240 int
241 t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p)
242 {
243 
244 	if (p->type != SCHED_CLASS_TYPE_PACKET)
245 		return (EINVAL);
246 
247 	if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
248 		return (set_sched_class_config(sc, p->u.config.minmax));
249 
250 	if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
251 		return (set_sched_class_params(sc, &p->u.params, 1));
252 
253 	return (EINVAL);
254 }
255 
256 int
257 t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
258 {
259 	struct port_info *pi = NULL;
260 	struct vi_info *vi;
261 	struct sge_txq *txq;
262 	uint32_t fw_mnem, fw_queue, fw_class;
263 	int i, rc;
264 
265 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
266 	if (rc)
267 		return (rc);
268 
269 	if (p->port >= sc->params.nports) {
270 		rc = EINVAL;
271 		goto done;
272 	}
273 
274 	/* XXX: Only supported for the main VI. */
275 	pi = sc->port[p->port];
276 	vi = &pi->vi[0];
277 	if (!(vi->flags & VI_INIT_DONE)) {
278 		/* tx queues not set up yet */
279 		rc = EAGAIN;
280 		goto done;
281 	}
282 
283 	if (!in_range(p->queue, 0, vi->ntxq - 1) ||
284 	    !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) {
285 		rc = EINVAL;
286 		goto done;
287 	}
288 
289 	/*
290 	 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
291 	 * Scheduling Class in this case).
292 	 */
293 	fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
294 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
295 	fw_class = p->cl < 0 ? 0xffffffff : p->cl;
296 
297 	/*
298 	 * If op.queue is non-negative, then we're only changing the scheduling
299 	 * on a single specified TX queue.
300 	 */
301 	if (p->queue >= 0) {
302 		txq = &sc->sge.txq[vi->first_txq + p->queue];
303 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
304 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
305 		    &fw_class);
306 		goto done;
307 	}
308 
309 	/*
310 	 * Change the scheduling on all the TX queues for the
311 	 * interface.
312 	 */
313 	for_each_txq(vi, i, txq) {
314 		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
315 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
316 		    &fw_class);
317 		if (rc)
318 			goto done;
319 	}
320 
321 	rc = 0;
322 done:
323 	end_synchronized_op(sc, 0);
324 	return (rc);
325 }
326 
327 int
328 t4_init_tx_sched(struct adapter *sc)
329 {
330 	int i, j;
331 	const int n = sc->chip_params->nsched_cls;
332 	struct port_info *pi;
333 	struct tx_cl_rl_params *tc;
334 	static const uint32_t init_kbps[] = {
335 		100 * 1000,
336 		200 * 1000,
337 		400 * 1000,
338 		500 * 1000,
339 		800 * 1000,
340 		1000 * 1000,
341 		1200 * 1000,
342 		1500 * 1000,
343 		1800 * 1000,
344 		2000 * 1000,
345 		2500 * 1000,
346 		3000 * 1000,
347 		3500 * 1000,
348 		4000 * 1000,
349 		5000 * 1000,
350 		10000 * 1000
351 	};
352 
353 	mtx_init(&sc->tc_lock, "tx_sched lock", NULL, MTX_DEF);
354 	TASK_INIT(&sc->tc_task, 0, update_tx_sched, sc);
355 	for_each_port(sc, i) {
356 		pi = sc->port[i];
357 		pi->sched_params = malloc(sizeof(*pi->sched_params) +
358 		    n * sizeof(*tc), M_CXGBE, M_ZERO | M_WAITOK);
359 		tc = &pi->sched_params->cl_rl[0];
360 		for (j = 0; j < n; j++, tc++) {
361 			tc->refcount = 0;
362 			tc->ratemode = FW_SCHED_PARAMS_RATE_ABS;
363 			tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
364 			tc->mode = FW_SCHED_PARAMS_MODE_FLOW;
365 			tc->maxrate = init_kbps[min(j, nitems(init_kbps) - 1)];
366 			tc->pktsize = ETHERMTU;	/* XXX */
367 
368 			if (t4_sched_params_cl_rl_kbps(sc, pi->tx_chan, j,
369 			    tc->mode, tc->maxrate, tc->pktsize, 1) == 0)
370 				tc->flags = 0;
371 			else
372 				tc->flags = TX_CLRL_ERROR;
373 		}
374 	}
375 
376 	return (0);
377 }
378 
379 int
380 t4_free_tx_sched(struct adapter *sc)
381 {
382 	int i;
383 
384 	taskqueue_drain(taskqueue_thread, &sc->tc_task);
385 
386 	for_each_port(sc, i) {
387 		if (sc->port[i] != NULL)
388 			free(sc->port[i]->sched_params, M_CXGBE);
389 	}
390 
391 	if (mtx_initialized(&sc->tc_lock))
392 		mtx_destroy(&sc->tc_lock);
393 
394 	return (0);
395 }
396 
397 void
398 t4_update_tx_sched(struct adapter *sc)
399 {
400 
401 	taskqueue_enqueue(taskqueue_thread, &sc->tc_task);
402 }
403 
404 int
405 t4_reserve_cl_rl_kbps(struct adapter *sc, int port_id, u_int maxrate,
406     int *tc_idx)
407 {
408 	int rc = 0, fa = -1, i;
409 	struct tx_cl_rl_params *tc;
410 
411 	MPASS(port_id >= 0 && port_id < sc->params.nports);
412 
413 	tc = &sc->port[port_id]->sched_params->cl_rl[0];
414 	mtx_lock(&sc->tc_lock);
415 	for (i = 0; i < sc->chip_params->nsched_cls; i++, tc++) {
416 		if (fa < 0 && tc->refcount == 0)
417 			fa = i;
418 
419 		if (tc->ratemode == FW_SCHED_PARAMS_RATE_ABS &&
420 		    tc->rateunit == FW_SCHED_PARAMS_UNIT_BITRATE &&
421 		    tc->mode == FW_SCHED_PARAMS_MODE_FLOW &&
422 		    tc->maxrate == maxrate) {
423 			tc->refcount++;
424 			*tc_idx = i;
425 			goto done;
426 		}
427 	}
428 	/* Not found */
429 	MPASS(i == sc->chip_params->nsched_cls);
430 	if (fa != -1) {
431 		tc = &sc->port[port_id]->sched_params->cl_rl[fa];
432 		tc->flags = TX_CLRL_REFRESH;
433 		tc->refcount = 1;
434 		tc->ratemode = FW_SCHED_PARAMS_RATE_ABS;
435 		tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
436 		tc->mode = FW_SCHED_PARAMS_MODE_FLOW;
437 		tc->maxrate = maxrate;
438 		tc->pktsize = ETHERMTU;	/* XXX */
439 		*tc_idx = fa;
440 		t4_update_tx_sched(sc);
441 	} else {
442 		*tc_idx = -1;
443 		rc = ENOSPC;
444 	}
445 done:
446 	mtx_unlock(&sc->tc_lock);
447 	return (rc);
448 }
449 
450 void
451 t4_release_cl_rl_kbps(struct adapter *sc, int port_id, int tc_idx)
452 {
453 	struct tx_cl_rl_params *tc;
454 
455 	MPASS(port_id >= 0 && port_id < sc->params.nports);
456 	MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls);
457 
458 	mtx_lock(&sc->tc_lock);
459 	tc = &sc->port[port_id]->sched_params->cl_rl[tc_idx];
460 	MPASS(tc->refcount > 0);
461 	MPASS(tc->ratemode == FW_SCHED_PARAMS_RATE_ABS);
462 	MPASS(tc->rateunit == FW_SCHED_PARAMS_UNIT_BITRATE);
463 	MPASS(tc->mode == FW_SCHED_PARAMS_MODE_FLOW);
464 	tc->refcount--;
465 	mtx_unlock(&sc->tc_lock);
466 }
467 
468 #ifdef RATELIMIT
469 void
470 t4_init_etid_table(struct adapter *sc)
471 {
472 	int i;
473 	struct tid_info *t;
474 
475 	if (!is_ethoffload(sc))
476 		return;
477 
478 	t = &sc->tids;
479 	MPASS(t->netids > 0);
480 
481 	mtx_init(&t->etid_lock, "etid lock", NULL, MTX_DEF);
482 	t->etid_tab = malloc(sizeof(*t->etid_tab) * t->netids, M_CXGBE,
483 			M_ZERO | M_WAITOK);
484 	t->efree = t->etid_tab;
485 	t->etids_in_use = 0;
486 	for (i = 1; i < t->netids; i++)
487 		t->etid_tab[i - 1].next = &t->etid_tab[i];
488 	t->etid_tab[t->netids - 1].next = NULL;
489 }
490 
491 void
492 t4_free_etid_table(struct adapter *sc)
493 {
494 	struct tid_info *t;
495 
496 	if (!is_ethoffload(sc))
497 		return;
498 
499 	t = &sc->tids;
500 	MPASS(t->netids > 0);
501 
502 	free(t->etid_tab, M_CXGBE);
503 	t->etid_tab = NULL;
504 
505 	if (mtx_initialized(&t->etid_lock))
506 		mtx_destroy(&t->etid_lock);
507 }
508 
509 /* etid services */
510 static int alloc_etid(struct adapter *, struct cxgbe_snd_tag *);
511 static void free_etid(struct adapter *, int);
512 
513 static int
514 alloc_etid(struct adapter *sc, struct cxgbe_snd_tag *cst)
515 {
516 	struct tid_info *t = &sc->tids;
517 	int etid = -1;
518 
519 	mtx_lock(&t->etid_lock);
520 	if (t->efree) {
521 		union etid_entry *p = t->efree;
522 
523 		etid = p - t->etid_tab + t->etid_base;
524 		t->efree = p->next;
525 		p->cst = cst;
526 		t->etids_in_use++;
527 	}
528 	mtx_unlock(&t->etid_lock);
529 	return (etid);
530 }
531 
532 #ifdef notyet
533 struct cxgbe_snd_tag *
534 lookup_etid(struct adapter *sc, int etid)
535 {
536 	struct tid_info *t = &sc->tids;
537 
538 	return (t->etid_tab[etid - t->etid_base].cst);
539 }
540 #endif
541 
542 static void
543 free_etid(struct adapter *sc, int etid)
544 {
545 	struct tid_info *t = &sc->tids;
546 	union etid_entry *p = &t->etid_tab[etid - t->etid_base];
547 
548 	mtx_lock(&t->etid_lock);
549 	p->next = t->efree;
550 	t->efree = p;
551 	t->etids_in_use--;
552 	mtx_unlock(&t->etid_lock);
553 }
554 
555 int
556 cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
557     struct m_snd_tag **pt)
558 {
559 	int rc, schedcl;
560 	struct vi_info *vi = ifp->if_softc;
561 	struct port_info *pi = vi->pi;
562 	struct adapter *sc = pi->adapter;
563 	struct cxgbe_snd_tag *cst;
564 
565 	if (params->hdr.type != IF_SND_TAG_TYPE_RATE_LIMIT)
566 		return (ENOTSUP);
567 
568 	rc = t4_reserve_cl_rl_kbps(sc, pi->port_id,
569 	    (params->rate_limit.max_rate * 8ULL / 1000), &schedcl);
570 	if (rc != 0)
571 		return (rc);
572 	MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls);
573 
574 	cst = malloc(sizeof(*cst), M_CXGBE, M_ZERO | M_NOWAIT);
575 	if (cst == NULL) {
576 failed:
577 		t4_release_cl_rl_kbps(sc, pi->port_id, schedcl);
578 		return (ENOMEM);
579 	}
580 
581 	cst->etid = alloc_etid(sc, cst);
582 	if (cst->etid < 0) {
583 		free(cst, M_CXGBE);
584 		goto failed;
585 	}
586 
587 	mtx_init(&cst->lock, "cst_lock", NULL, MTX_DEF);
588 	cst->com.ifp = ifp;
589 	cst->adapter = sc;
590 	cst->port_id = pi->port_id;
591 	cst->schedcl = schedcl;
592 	cst->max_rate = params->rate_limit.max_rate;
593 	cst->next_credits = -1;
594 	cst->tx_credits = sc->params.ofldq_wr_cred;
595 	cst->tx_total = cst->tx_credits;
596 
597 	/*
598 	 * Queues will be selected later when the connection flowid is available.
599 	 */
600 
601 	*pt = &cst->com;
602 	return (0);
603 }
604 
605 /*
606  * Change in parameters, no change in ifp.
607  */
608 int
609 cxgbe_snd_tag_modify(struct m_snd_tag *mst,
610     union if_snd_tag_modify_params *params)
611 {
612 	int rc, schedcl;
613 	struct cxgbe_snd_tag *cst = mst_to_cst(mst);
614 	struct adapter *sc = cst->adapter;
615 
616 	/* XXX: is schedcl -1 ok here? */
617 	MPASS(cst->schedcl >= 0 && cst->schedcl < sc->chip_params->nsched_cls);
618 
619 	rc = t4_reserve_cl_rl_kbps(sc, cst->port_id,
620 	    (params->rate_limit.max_rate * 8ULL / 1000), &schedcl);
621 	if (rc != 0)
622 		return (rc);
623 	MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls);
624 	t4_release_cl_rl_kbps(sc, cst->port_id, cst->schedcl);
625 	cst->schedcl = schedcl;
626 	cst->max_rate = params->rate_limit.max_rate;
627 
628 	return (0);
629 }
630 
631 int
632 cxgbe_snd_tag_query(struct m_snd_tag *mst,
633     union if_snd_tag_query_params *params)
634 {
635 	struct cxgbe_snd_tag *cst = mst_to_cst(mst);
636 
637 	params->rate_limit.max_rate = cst->max_rate;
638 
639 #define CST_TO_MST_QLEVEL_SCALE (IF_SND_QUEUE_LEVEL_MAX / cst->tx_total)
640 	params->rate_limit.queue_level =
641 		(cst->tx_total - cst->tx_credits) * CST_TO_MST_QLEVEL_SCALE;
642 
643 	return (0);
644 }
645 
646 void
647 cxgbe_snd_tag_free(struct m_snd_tag *mst)
648 {
649 	struct cxgbe_snd_tag *cst = mst_to_cst(mst);
650 	struct adapter *sc = cst->adapter;
651 
652 	if (cst->etid >= 0)
653 		free_etid(sc, cst->etid);
654 	if (cst->schedcl != -1)
655 		t4_release_cl_rl_kbps(sc, cst->port_id, cst->schedcl);
656 	if (mtx_initialized(&cst->lock))
657 		mtx_destroy(&cst->lock);
658 	free(cst, M_CXGBE);
659 }
660 #endif
661