xref: /linux/drivers/net/ethernet/chelsio/cxgb4/sched.c (revision 4d66c56f7efe122d09d06cd3ebfa52a43d51a9cb)
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/module.h>
36 #include <linux/netdevice.h>
37 
38 #include "cxgb4.h"
39 #include "sched.h"
40 
41 static int t4_sched_class_fw_cmd(struct port_info *pi,
42 				 struct ch_sched_params *p,
43 				 enum sched_fw_ops op)
44 {
45 	struct adapter *adap = pi->adapter;
46 	struct sched_table *s = pi->sched_tbl;
47 	struct sched_class *e;
48 	int err = 0;
49 
50 	e = &s->tab[p->u.params.class];
51 	switch (op) {
52 	case SCHED_FW_OP_ADD:
53 		err = t4_sched_params(adap, p->type,
54 				      p->u.params.level, p->u.params.mode,
55 				      p->u.params.rateunit,
56 				      p->u.params.ratemode,
57 				      p->u.params.channel, e->idx,
58 				      p->u.params.minrate, p->u.params.maxrate,
59 				      p->u.params.weight, p->u.params.pktsize);
60 		break;
61 	default:
62 		err = -ENOTSUPP;
63 		break;
64 	}
65 
66 	return err;
67 }
68 
69 static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
70 				   enum sched_bind_type type, bool bind)
71 {
72 	struct adapter *adap = pi->adapter;
73 	u32 fw_mnem, fw_class, fw_param;
74 	unsigned int pf = adap->pf;
75 	unsigned int vf = 0;
76 	int err = 0;
77 
78 	switch (type) {
79 	case SCHED_QUEUE: {
80 		struct sched_queue_entry *qe;
81 
82 		qe = (struct sched_queue_entry *)arg;
83 
84 		/* Create a template for the FW_PARAMS_CMD mnemonic and
85 		 * value (TX Scheduling Class in this case).
86 		 */
87 		fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
88 			   FW_PARAMS_PARAM_X_V(
89 				   FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
90 		fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
91 		fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
92 
93 		pf = adap->pf;
94 		vf = 0;
95 
96 		err = t4_set_params(adap, adap->mbox, pf, vf, 1,
97 				    &fw_param, &fw_class);
98 		break;
99 	}
100 	case SCHED_FLOWC: {
101 		struct sched_flowc_entry *fe;
102 
103 		fe = (struct sched_flowc_entry *)arg;
104 
105 		fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
106 		err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
107 					       fe->param.tid, fw_class);
108 		break;
109 	}
110 	default:
111 		err = -ENOTSUPP;
112 		break;
113 	}
114 
115 	return err;
116 }
117 
118 static void *t4_sched_entry_lookup(struct port_info *pi,
119 				   enum sched_bind_type type,
120 				   const u32 val)
121 {
122 	struct sched_table *s = pi->sched_tbl;
123 	struct sched_class *e, *end;
124 	void *found = NULL;
125 
126 	/* Look for an entry with matching @val */
127 	end = &s->tab[s->sched_size];
128 	for (e = &s->tab[0]; e != end; ++e) {
129 		if (e->state == SCHED_STATE_UNUSED ||
130 		    e->bind_type != type)
131 			continue;
132 
133 		switch (type) {
134 		case SCHED_QUEUE: {
135 			struct sched_queue_entry *qe;
136 
137 			list_for_each_entry(qe, &e->entry_list, list) {
138 				if (qe->cntxt_id == val) {
139 					found = qe;
140 					break;
141 				}
142 			}
143 			break;
144 		}
145 		case SCHED_FLOWC: {
146 			struct sched_flowc_entry *fe;
147 
148 			list_for_each_entry(fe, &e->entry_list, list) {
149 				if (fe->param.tid == val) {
150 					found = fe;
151 					break;
152 				}
153 			}
154 			break;
155 		}
156 		default:
157 			return NULL;
158 		}
159 
160 		if (found)
161 			break;
162 	}
163 
164 	return found;
165 }
166 
167 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
168 {
169 	struct sched_queue_entry *qe = NULL;
170 	struct adapter *adap = pi->adapter;
171 	struct sge_eth_txq *txq;
172 	struct sched_class *e;
173 	int err = 0;
174 
175 	if (p->queue < 0 || p->queue >= pi->nqsets)
176 		return -ERANGE;
177 
178 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
179 
180 	/* Find the existing entry that the queue is bound to */
181 	qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
182 	if (qe) {
183 		err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
184 					      false);
185 		if (err)
186 			return err;
187 
188 		e = &pi->sched_tbl->tab[qe->param.class];
189 		list_del(&qe->list);
190 		kvfree(qe);
191 		if (atomic_dec_and_test(&e->refcnt)) {
192 			e->state = SCHED_STATE_UNUSED;
193 			memset(&e->info, 0, sizeof(e->info));
194 		}
195 	}
196 	return err;
197 }
198 
199 static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
200 {
201 	struct sched_table *s = pi->sched_tbl;
202 	struct sched_queue_entry *qe = NULL;
203 	struct adapter *adap = pi->adapter;
204 	struct sge_eth_txq *txq;
205 	struct sched_class *e;
206 	unsigned int qid;
207 	int err = 0;
208 
209 	if (p->queue < 0 || p->queue >= pi->nqsets)
210 		return -ERANGE;
211 
212 	qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
213 	if (!qe)
214 		return -ENOMEM;
215 
216 	txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
217 	qid = txq->q.cntxt_id;
218 
219 	/* Unbind queue from any existing class */
220 	err = t4_sched_queue_unbind(pi, p);
221 	if (err)
222 		goto out_err;
223 
224 	/* Bind queue to specified class */
225 	qe->cntxt_id = qid;
226 	memcpy(&qe->param, p, sizeof(qe->param));
227 
228 	e = &s->tab[qe->param.class];
229 	err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
230 	if (err)
231 		goto out_err;
232 
233 	list_add_tail(&qe->list, &e->entry_list);
234 	e->bind_type = SCHED_QUEUE;
235 	atomic_inc(&e->refcnt);
236 	return err;
237 
238 out_err:
239 	kvfree(qe);
240 	return err;
241 }
242 
243 static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
244 {
245 	struct sched_flowc_entry *fe = NULL;
246 	struct adapter *adap = pi->adapter;
247 	struct sched_class *e;
248 	int err = 0;
249 
250 	if (p->tid < 0 || p->tid >= adap->tids.neotids)
251 		return -ERANGE;
252 
253 	/* Find the existing entry that the flowc is bound to */
254 	fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
255 	if (fe) {
256 		err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
257 					      false);
258 		if (err)
259 			return err;
260 
261 		e = &pi->sched_tbl->tab[fe->param.class];
262 		list_del(&fe->list);
263 		kvfree(fe);
264 		if (atomic_dec_and_test(&e->refcnt)) {
265 			e->state = SCHED_STATE_UNUSED;
266 			memset(&e->info, 0, sizeof(e->info));
267 		}
268 	}
269 	return err;
270 }
271 
272 static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
273 {
274 	struct sched_table *s = pi->sched_tbl;
275 	struct sched_flowc_entry *fe = NULL;
276 	struct adapter *adap = pi->adapter;
277 	struct sched_class *e;
278 	int err = 0;
279 
280 	if (p->tid < 0 || p->tid >= adap->tids.neotids)
281 		return -ERANGE;
282 
283 	fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
284 	if (!fe)
285 		return -ENOMEM;
286 
287 	/* Unbind flowc from any existing class */
288 	err = t4_sched_flowc_unbind(pi, p);
289 	if (err)
290 		goto out_err;
291 
292 	/* Bind flowc to specified class */
293 	memcpy(&fe->param, p, sizeof(fe->param));
294 
295 	e = &s->tab[fe->param.class];
296 	err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
297 	if (err)
298 		goto out_err;
299 
300 	list_add_tail(&fe->list, &e->entry_list);
301 	e->bind_type = SCHED_FLOWC;
302 	atomic_inc(&e->refcnt);
303 	return err;
304 
305 out_err:
306 	kvfree(fe);
307 	return err;
308 }
309 
310 static void t4_sched_class_unbind_all(struct port_info *pi,
311 				      struct sched_class *e,
312 				      enum sched_bind_type type)
313 {
314 	if (!e)
315 		return;
316 
317 	switch (type) {
318 	case SCHED_QUEUE: {
319 		struct sched_queue_entry *qe;
320 
321 		list_for_each_entry(qe, &e->entry_list, list)
322 			t4_sched_queue_unbind(pi, &qe->param);
323 		break;
324 	}
325 	case SCHED_FLOWC: {
326 		struct sched_flowc_entry *fe;
327 
328 		list_for_each_entry(fe, &e->entry_list, list)
329 			t4_sched_flowc_unbind(pi, &fe->param);
330 		break;
331 	}
332 	default:
333 		break;
334 	}
335 }
336 
337 static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
338 					 enum sched_bind_type type, bool bind)
339 {
340 	int err = 0;
341 
342 	if (!arg)
343 		return -EINVAL;
344 
345 	switch (type) {
346 	case SCHED_QUEUE: {
347 		struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
348 
349 		if (bind)
350 			err = t4_sched_queue_bind(pi, qe);
351 		else
352 			err = t4_sched_queue_unbind(pi, qe);
353 		break;
354 	}
355 	case SCHED_FLOWC: {
356 		struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
357 
358 		if (bind)
359 			err = t4_sched_flowc_bind(pi, fe);
360 		else
361 			err = t4_sched_flowc_unbind(pi, fe);
362 		break;
363 	}
364 	default:
365 		err = -ENOTSUPP;
366 		break;
367 	}
368 
369 	return err;
370 }
371 
372 /**
373  * cxgb4_sched_class_bind - Bind an entity to a scheduling class
374  * @dev: net_device pointer
375  * @arg: Entity opaque data
376  * @type: Entity type (Queue)
377  *
378  * Binds an entity (queue) to a scheduling class.  If the entity
379  * is bound to another class, it will be unbound from the other class
380  * and bound to the class specified in @arg.
381  */
382 int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
383 			   enum sched_bind_type type)
384 {
385 	struct port_info *pi = netdev2pinfo(dev);
386 	u8 class_id;
387 
388 	if (!can_sched(dev))
389 		return -ENOTSUPP;
390 
391 	if (!arg)
392 		return -EINVAL;
393 
394 	switch (type) {
395 	case SCHED_QUEUE: {
396 		struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
397 
398 		class_id = qe->class;
399 		break;
400 	}
401 	case SCHED_FLOWC: {
402 		struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
403 
404 		class_id = fe->class;
405 		break;
406 	}
407 	default:
408 		return -ENOTSUPP;
409 	}
410 
411 	if (!valid_class_id(dev, class_id))
412 		return -EINVAL;
413 
414 	if (class_id == SCHED_CLS_NONE)
415 		return -ENOTSUPP;
416 
417 	return t4_sched_class_bind_unbind_op(pi, arg, type, true);
418 
419 }
420 
421 /**
422  * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
423  * @dev: net_device pointer
424  * @arg: Entity opaque data
425  * @type: Entity type (Queue)
426  *
427  * Unbinds an entity (queue) from a scheduling class.
428  */
429 int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
430 			     enum sched_bind_type type)
431 {
432 	struct port_info *pi = netdev2pinfo(dev);
433 	u8 class_id;
434 
435 	if (!can_sched(dev))
436 		return -ENOTSUPP;
437 
438 	if (!arg)
439 		return -EINVAL;
440 
441 	switch (type) {
442 	case SCHED_QUEUE: {
443 		struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
444 
445 		class_id = qe->class;
446 		break;
447 	}
448 	case SCHED_FLOWC: {
449 		struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
450 
451 		class_id = fe->class;
452 		break;
453 	}
454 	default:
455 		return -ENOTSUPP;
456 	}
457 
458 	if (!valid_class_id(dev, class_id))
459 		return -EINVAL;
460 
461 	return t4_sched_class_bind_unbind_op(pi, arg, type, false);
462 }
463 
464 /* If @p is NULL, fetch any available unused class */
465 static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
466 						const struct ch_sched_params *p)
467 {
468 	struct sched_table *s = pi->sched_tbl;
469 	struct sched_class *found = NULL;
470 	struct sched_class *e, *end;
471 
472 	/* Only allow tc to be shared among SCHED_FLOWC types. For
473 	 * other types, always allocate a new tc.
474 	 */
475 	if (!p || p->u.params.mode != SCHED_CLASS_MODE_FLOW) {
476 		/* Get any available unused class */
477 		end = &s->tab[s->sched_size];
478 		for (e = &s->tab[0]; e != end; ++e) {
479 			if (e->state == SCHED_STATE_UNUSED) {
480 				found = e;
481 				break;
482 			}
483 		}
484 	} else {
485 		/* Look for a class with matching scheduling parameters */
486 		struct ch_sched_params info;
487 		struct ch_sched_params tp;
488 
489 		memcpy(&tp, p, sizeof(tp));
490 		/* Don't try to match class parameter */
491 		tp.u.params.class = SCHED_CLS_NONE;
492 
493 		end = &s->tab[s->sched_size];
494 		for (e = &s->tab[0]; e != end; ++e) {
495 			if (e->state == SCHED_STATE_UNUSED)
496 				continue;
497 
498 			memcpy(&info, &e->info, sizeof(info));
499 			/* Don't try to match class parameter */
500 			info.u.params.class = SCHED_CLS_NONE;
501 
502 			if ((info.type == tp.type) &&
503 			    (!memcmp(&info.u.params, &tp.u.params,
504 				     sizeof(info.u.params)))) {
505 				found = e;
506 				break;
507 			}
508 		}
509 	}
510 
511 	return found;
512 }
513 
514 static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
515 						struct ch_sched_params *p)
516 {
517 	struct sched_class *e;
518 	u8 class_id;
519 	int err;
520 
521 	if (!p)
522 		return NULL;
523 
524 	class_id = p->u.params.class;
525 
526 	/* Only accept search for existing class with matching params
527 	 * or allocation of new class with specified params
528 	 */
529 	if (class_id != SCHED_CLS_NONE)
530 		return NULL;
531 
532 	/* See if there's an exisiting class with same
533 	 * requested sched params
534 	 */
535 	e = t4_sched_class_lookup(pi, p);
536 	if (!e) {
537 		struct ch_sched_params np;
538 
539 		/* Fetch any available unused class */
540 		e = t4_sched_class_lookup(pi, NULL);
541 		if (!e)
542 			return NULL;
543 
544 		memcpy(&np, p, sizeof(np));
545 		np.u.params.class = e->idx;
546 		/* New class */
547 		err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
548 		if (err)
549 			return NULL;
550 		memcpy(&e->info, &np, sizeof(e->info));
551 		atomic_set(&e->refcnt, 0);
552 		e->state = SCHED_STATE_ACTIVE;
553 	}
554 
555 	return e;
556 }
557 
558 /**
559  * cxgb4_sched_class_alloc - allocate a scheduling class
560  * @dev: net_device pointer
561  * @p: new scheduling class to create.
562  *
563  * Returns pointer to the scheduling class created.  If @p is NULL, then
564  * it allocates and returns any available unused scheduling class. If a
565  * scheduling class with matching @p is found, then the matching class is
566  * returned.
567  */
568 struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
569 					    struct ch_sched_params *p)
570 {
571 	struct port_info *pi = netdev2pinfo(dev);
572 	u8 class_id;
573 
574 	if (!can_sched(dev))
575 		return NULL;
576 
577 	class_id = p->u.params.class;
578 	if (!valid_class_id(dev, class_id))
579 		return NULL;
580 
581 	return t4_sched_class_alloc(pi, p);
582 }
583 
584 /**
585  * cxgb4_sched_class_free - free a scheduling class
586  * @dev: net_device pointer
587  * @e: scheduling class
588  *
589  * Frees a scheduling class if there are no users.
590  */
591 void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
592 {
593 	struct port_info *pi = netdev2pinfo(dev);
594 	struct sched_table *s = pi->sched_tbl;
595 	struct sched_class *e;
596 
597 	e = &s->tab[classid];
598 	if (!atomic_read(&e->refcnt)) {
599 		e->state = SCHED_STATE_UNUSED;
600 		memset(&e->info, 0, sizeof(e->info));
601 	}
602 }
603 
604 static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
605 {
606 	struct port_info *pi = netdev2pinfo(dev);
607 
608 	t4_sched_class_unbind_all(pi, e, e->bind_type);
609 	cxgb4_sched_class_free(dev, e->idx);
610 }
611 
612 struct sched_table *t4_init_sched(unsigned int sched_size)
613 {
614 	struct sched_table *s;
615 	unsigned int i;
616 
617 	s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL);
618 	if (!s)
619 		return NULL;
620 
621 	s->sched_size = sched_size;
622 
623 	for (i = 0; i < s->sched_size; i++) {
624 		memset(&s->tab[i], 0, sizeof(struct sched_class));
625 		s->tab[i].idx = i;
626 		s->tab[i].state = SCHED_STATE_UNUSED;
627 		INIT_LIST_HEAD(&s->tab[i].entry_list);
628 		atomic_set(&s->tab[i].refcnt, 0);
629 	}
630 	return s;
631 }
632 
633 void t4_cleanup_sched(struct adapter *adap)
634 {
635 	struct sched_table *s;
636 	unsigned int j, i;
637 
638 	for_each_port(adap, j) {
639 		struct port_info *pi = netdev2pinfo(adap->port[j]);
640 
641 		s = pi->sched_tbl;
642 		if (!s)
643 			continue;
644 
645 		for (i = 0; i < s->sched_size; i++) {
646 			struct sched_class *e;
647 
648 			e = &s->tab[i];
649 			if (e->state == SCHED_STATE_ACTIVE)
650 				t4_sched_class_free(adap->port[j], e);
651 		}
652 		kvfree(s);
653 	}
654 }
655