xref: /linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c (revision 48dea9a700c8728cc31a1dd44588b97578de86ee)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications.  All rights reserved. */
3 
4 #include "cxgb4.h"
5 #include "cxgb4_tc_matchall.h"
6 #include "sched.h"
7 #include "cxgb4_uld.h"
8 #include "cxgb4_filter.h"
9 #include "cxgb4_tc_flower.h"
10 
11 static int cxgb4_matchall_egress_validate(struct net_device *dev,
12 					  struct tc_cls_matchall_offload *cls)
13 {
14 	struct netlink_ext_ack *extack = cls->common.extack;
15 	struct flow_action *actions = &cls->rule->action;
16 	struct port_info *pi = netdev2pinfo(dev);
17 	struct flow_action_entry *entry;
18 	struct ch_sched_queue qe;
19 	struct sched_class *e;
20 	u64 max_link_rate;
21 	u32 i, speed;
22 	int ret;
23 
24 	if (!flow_action_has_entries(actions)) {
25 		NL_SET_ERR_MSG_MOD(extack,
26 				   "Egress MATCHALL offload needs at least 1 policing action");
27 		return -EINVAL;
28 	} else if (!flow_offload_has_one_action(actions)) {
29 		NL_SET_ERR_MSG_MOD(extack,
30 				   "Egress MATCHALL offload only supports 1 policing action");
31 		return -EINVAL;
32 	} else if (pi->tc_block_shared) {
33 		NL_SET_ERR_MSG_MOD(extack,
34 				   "Egress MATCHALL offload not supported with shared blocks");
35 		return -EINVAL;
36 	}
37 
38 	ret = t4_get_link_params(pi, NULL, &speed, NULL);
39 	if (ret) {
40 		NL_SET_ERR_MSG_MOD(extack,
41 				   "Failed to get max speed supported by the link");
42 		return -EINVAL;
43 	}
44 
45 	/* Convert from Mbps to bps */
46 	max_link_rate = (u64)speed * 1000 * 1000;
47 
48 	flow_action_for_each(i, entry, actions) {
49 		switch (entry->id) {
50 		case FLOW_ACTION_POLICE:
51 			/* Convert bytes per second to bits per second */
52 			if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
53 				NL_SET_ERR_MSG_MOD(extack,
54 						   "Specified policing max rate is larger than underlying link speed");
55 				return -ERANGE;
56 			}
57 			break;
58 		default:
59 			NL_SET_ERR_MSG_MOD(extack,
60 					   "Only policing action supported with Egress MATCHALL offload");
61 			return -EOPNOTSUPP;
62 		}
63 	}
64 
65 	for (i = 0; i < pi->nqsets; i++) {
66 		memset(&qe, 0, sizeof(qe));
67 		qe.queue = i;
68 
69 		e = cxgb4_sched_queue_lookup(dev, &qe);
70 		if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
71 			NL_SET_ERR_MSG_MOD(extack,
72 					   "Some queues are already bound to different class");
73 			return -EBUSY;
74 		}
75 	}
76 
77 	return 0;
78 }
79 
80 static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
81 {
82 	struct port_info *pi = netdev2pinfo(dev);
83 	struct ch_sched_queue qe;
84 	int ret;
85 	u32 i;
86 
87 	for (i = 0; i < pi->nqsets; i++) {
88 		qe.queue = i;
89 		qe.class = tc;
90 		ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
91 		if (ret)
92 			goto out_free;
93 	}
94 
95 	return 0;
96 
97 out_free:
98 	while (i--) {
99 		qe.queue = i;
100 		qe.class = SCHED_CLS_NONE;
101 		cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
102 	}
103 
104 	return ret;
105 }
106 
107 static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
108 {
109 	struct port_info *pi = netdev2pinfo(dev);
110 	struct ch_sched_queue qe;
111 	u32 i;
112 
113 	for (i = 0; i < pi->nqsets; i++) {
114 		qe.queue = i;
115 		qe.class = SCHED_CLS_NONE;
116 		cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
117 	}
118 }
119 
120 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
121 				   struct tc_cls_matchall_offload *cls)
122 {
123 	struct ch_sched_params p = {
124 		.type = SCHED_CLASS_TYPE_PACKET,
125 		.u.params.level = SCHED_CLASS_LEVEL_CH_RL,
126 		.u.params.mode = SCHED_CLASS_MODE_CLASS,
127 		.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
128 		.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
129 		.u.params.class = SCHED_CLS_NONE,
130 		.u.params.minrate = 0,
131 		.u.params.weight = 0,
132 		.u.params.pktsize = dev->mtu,
133 	};
134 	struct netlink_ext_ack *extack = cls->common.extack;
135 	struct cxgb4_tc_port_matchall *tc_port_matchall;
136 	struct port_info *pi = netdev2pinfo(dev);
137 	struct adapter *adap = netdev2adap(dev);
138 	struct flow_action_entry *entry;
139 	struct sched_class *e;
140 	int ret;
141 	u32 i;
142 
143 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
144 
145 	flow_action_for_each(i, entry, &cls->rule->action)
146 		if (entry->id == FLOW_ACTION_POLICE)
147 			break;
148 
149 	/* Convert from bytes per second to Kbps */
150 	p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
151 	p.u.params.channel = pi->tx_chan;
152 	e = cxgb4_sched_class_alloc(dev, &p);
153 	if (!e) {
154 		NL_SET_ERR_MSG_MOD(extack,
155 				   "No free traffic class available for policing action");
156 		return -ENOMEM;
157 	}
158 
159 	ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
160 	if (ret) {
161 		NL_SET_ERR_MSG_MOD(extack,
162 				   "Could not bind queues to traffic class");
163 		goto out_free;
164 	}
165 
166 	tc_port_matchall->egress.hwtc = e->idx;
167 	tc_port_matchall->egress.cookie = cls->cookie;
168 	tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
169 	return 0;
170 
171 out_free:
172 	cxgb4_sched_class_free(dev, e->idx);
173 	return ret;
174 }
175 
176 static void cxgb4_matchall_free_tc(struct net_device *dev)
177 {
178 	struct cxgb4_tc_port_matchall *tc_port_matchall;
179 	struct port_info *pi = netdev2pinfo(dev);
180 	struct adapter *adap = netdev2adap(dev);
181 
182 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
183 	cxgb4_matchall_tc_unbind_queues(dev);
184 	cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
185 
186 	tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
187 	tc_port_matchall->egress.cookie = 0;
188 	tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
189 }
190 
191 static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
192 				       struct tc_cls_matchall_offload *cls)
193 {
194 	struct netlink_ext_ack *extack = cls->common.extack;
195 	struct cxgb4_tc_port_matchall *tc_port_matchall;
196 	struct port_info *pi = netdev2pinfo(dev);
197 	struct adapter *adap = netdev2adap(dev);
198 	struct flow_action_entry *act;
199 	int ret;
200 	u32 i;
201 
202 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
203 	flow_action_for_each(i, act, &cls->rule->action) {
204 		if (act->id == FLOW_ACTION_MIRRED) {
205 			ret = cxgb4_port_mirror_alloc(dev);
206 			if (ret) {
207 				NL_SET_ERR_MSG_MOD(extack,
208 						   "Couldn't allocate mirror");
209 				return ret;
210 			}
211 
212 			tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
213 			break;
214 		}
215 	}
216 
217 	return 0;
218 }
219 
220 static void cxgb4_matchall_mirror_free(struct net_device *dev)
221 {
222 	struct cxgb4_tc_port_matchall *tc_port_matchall;
223 	struct port_info *pi = netdev2pinfo(dev);
224 	struct adapter *adap = netdev2adap(dev);
225 
226 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
227 	if (!tc_port_matchall->ingress.viid_mirror)
228 		return;
229 
230 	cxgb4_port_mirror_free(dev);
231 	tc_port_matchall->ingress.viid_mirror = 0;
232 }
233 
234 static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
235 {
236 	struct cxgb4_tc_port_matchall *tc_port_matchall;
237 	struct port_info *pi = netdev2pinfo(dev);
238 	struct adapter *adap = netdev2adap(dev);
239 	int ret;
240 
241 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
242 	ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
243 			       &tc_port_matchall->ingress.fs[filter_type]);
244 	if (ret)
245 		return ret;
246 
247 	tc_port_matchall->ingress.tid[filter_type] = 0;
248 	return 0;
249 }
250 
251 static int cxgb4_matchall_add_filter(struct net_device *dev,
252 				     struct tc_cls_matchall_offload *cls,
253 				     u8 filter_type)
254 {
255 	struct netlink_ext_ack *extack = cls->common.extack;
256 	struct cxgb4_tc_port_matchall *tc_port_matchall;
257 	struct port_info *pi = netdev2pinfo(dev);
258 	struct adapter *adap = netdev2adap(dev);
259 	struct ch_filter_specification *fs;
260 	int ret, fidx;
261 
262 	/* Get a free filter entry TID, where we can insert this new
263 	 * rule. Only insert rule if its prio doesn't conflict with
264 	 * existing rules.
265 	 */
266 	fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
267 				   false, cls->common.prio);
268 	if (fidx < 0) {
269 		NL_SET_ERR_MSG_MOD(extack,
270 				   "No free LETCAM index available");
271 		return -ENOMEM;
272 	}
273 
274 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
275 	fs = &tc_port_matchall->ingress.fs[filter_type];
276 	memset(fs, 0, sizeof(*fs));
277 
278 	if (fidx < adap->tids.nhpftids)
279 		fs->prio = 1;
280 	fs->tc_prio = cls->common.prio;
281 	fs->tc_cookie = cls->cookie;
282 	fs->type = filter_type;
283 	fs->hitcnts = 1;
284 
285 	fs->val.pfvf_vld = 1;
286 	fs->val.pf = adap->pf;
287 	fs->val.vf = pi->vin;
288 
289 	cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
290 
291 	ret = cxgb4_set_filter(dev, fidx, fs);
292 	if (ret)
293 		return ret;
294 
295 	tc_port_matchall->ingress.tid[filter_type] = fidx;
296 	return 0;
297 }
298 
299 static int cxgb4_matchall_alloc_filter(struct net_device *dev,
300 				       struct tc_cls_matchall_offload *cls)
301 {
302 	struct cxgb4_tc_port_matchall *tc_port_matchall;
303 	struct port_info *pi = netdev2pinfo(dev);
304 	struct adapter *adap = netdev2adap(dev);
305 	int ret, i;
306 
307 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
308 
309 	ret = cxgb4_matchall_mirror_alloc(dev, cls);
310 	if (ret)
311 		return ret;
312 
313 	for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
314 		ret = cxgb4_matchall_add_filter(dev, cls, i);
315 		if (ret)
316 			goto out_free;
317 	}
318 
319 	tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
320 	return 0;
321 
322 out_free:
323 	while (i-- > 0)
324 		cxgb4_matchall_del_filter(dev, i);
325 
326 	cxgb4_matchall_mirror_free(dev);
327 	return ret;
328 }
329 
330 static int cxgb4_matchall_free_filter(struct net_device *dev)
331 {
332 	struct cxgb4_tc_port_matchall *tc_port_matchall;
333 	struct port_info *pi = netdev2pinfo(dev);
334 	struct adapter *adap = netdev2adap(dev);
335 	int ret;
336 	u8 i;
337 
338 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
339 
340 	for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
341 		ret = cxgb4_matchall_del_filter(dev, i);
342 		if (ret)
343 			return ret;
344 	}
345 
346 	cxgb4_matchall_mirror_free(dev);
347 
348 	tc_port_matchall->ingress.packets = 0;
349 	tc_port_matchall->ingress.bytes = 0;
350 	tc_port_matchall->ingress.last_used = 0;
351 	tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
352 	return 0;
353 }
354 
355 int cxgb4_tc_matchall_replace(struct net_device *dev,
356 			      struct tc_cls_matchall_offload *cls_matchall,
357 			      bool ingress)
358 {
359 	struct netlink_ext_ack *extack = cls_matchall->common.extack;
360 	struct cxgb4_tc_port_matchall *tc_port_matchall;
361 	struct port_info *pi = netdev2pinfo(dev);
362 	struct adapter *adap = netdev2adap(dev);
363 	int ret;
364 
365 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
366 	if (ingress) {
367 		if (tc_port_matchall->ingress.state ==
368 		    CXGB4_MATCHALL_STATE_ENABLED) {
369 			NL_SET_ERR_MSG_MOD(extack,
370 					   "Only 1 Ingress MATCHALL can be offloaded");
371 			return -ENOMEM;
372 		}
373 
374 		ret = cxgb4_validate_flow_actions(dev,
375 						  &cls_matchall->rule->action,
376 						  extack, 1);
377 		if (ret)
378 			return ret;
379 
380 		return cxgb4_matchall_alloc_filter(dev, cls_matchall);
381 	}
382 
383 	if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
384 		NL_SET_ERR_MSG_MOD(extack,
385 				   "Only 1 Egress MATCHALL can be offloaded");
386 		return -ENOMEM;
387 	}
388 
389 	ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
390 	if (ret)
391 		return ret;
392 
393 	return cxgb4_matchall_alloc_tc(dev, cls_matchall);
394 }
395 
396 int cxgb4_tc_matchall_destroy(struct net_device *dev,
397 			      struct tc_cls_matchall_offload *cls_matchall,
398 			      bool ingress)
399 {
400 	struct cxgb4_tc_port_matchall *tc_port_matchall;
401 	struct port_info *pi = netdev2pinfo(dev);
402 	struct adapter *adap = netdev2adap(dev);
403 
404 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
405 	if (ingress) {
406 		/* All the filter types of this matchall rule save the
407 		 * same cookie. So, checking for the first one is
408 		 * enough.
409 		 */
410 		if (cls_matchall->cookie !=
411 		    tc_port_matchall->ingress.fs[0].tc_cookie)
412 			return -ENOENT;
413 
414 		return cxgb4_matchall_free_filter(dev);
415 	}
416 
417 	if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
418 		return -ENOENT;
419 
420 	cxgb4_matchall_free_tc(dev);
421 	return 0;
422 }
423 
424 int cxgb4_tc_matchall_stats(struct net_device *dev,
425 			    struct tc_cls_matchall_offload *cls_matchall)
426 {
427 	u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
428 	struct cxgb4_tc_port_matchall *tc_port_matchall;
429 	struct cxgb4_matchall_ingress_entry *ingress;
430 	struct port_info *pi = netdev2pinfo(dev);
431 	struct adapter *adap = netdev2adap(dev);
432 	int ret;
433 	u8 i;
434 
435 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
436 	if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
437 		return -ENOENT;
438 
439 	ingress = &tc_port_matchall->ingress;
440 	for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
441 		ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
442 						&tmp_packets, &tmp_bytes,
443 						ingress->fs[i].hash);
444 		if (ret)
445 			return ret;
446 
447 		packets += tmp_packets;
448 		bytes += tmp_bytes;
449 	}
450 
451 	if (tc_port_matchall->ingress.packets != packets) {
452 		flow_stats_update(&cls_matchall->stats,
453 				  bytes - tc_port_matchall->ingress.bytes,
454 				  packets - tc_port_matchall->ingress.packets,
455 				  0, tc_port_matchall->ingress.last_used,
456 				  FLOW_ACTION_HW_STATS_IMMEDIATE);
457 
458 		tc_port_matchall->ingress.packets = packets;
459 		tc_port_matchall->ingress.bytes = bytes;
460 		tc_port_matchall->ingress.last_used = jiffies;
461 	}
462 
463 	return 0;
464 }
465 
466 static void cxgb4_matchall_disable_offload(struct net_device *dev)
467 {
468 	struct cxgb4_tc_port_matchall *tc_port_matchall;
469 	struct port_info *pi = netdev2pinfo(dev);
470 	struct adapter *adap = netdev2adap(dev);
471 
472 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
473 	if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
474 		cxgb4_matchall_free_tc(dev);
475 
476 	if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
477 		cxgb4_matchall_free_filter(dev);
478 }
479 
480 int cxgb4_init_tc_matchall(struct adapter *adap)
481 {
482 	struct cxgb4_tc_port_matchall *tc_port_matchall;
483 	struct cxgb4_tc_matchall *tc_matchall;
484 	int ret;
485 
486 	tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
487 	if (!tc_matchall)
488 		return -ENOMEM;
489 
490 	tc_port_matchall = kcalloc(adap->params.nports,
491 				   sizeof(*tc_port_matchall),
492 				   GFP_KERNEL);
493 	if (!tc_port_matchall) {
494 		ret = -ENOMEM;
495 		goto out_free_matchall;
496 	}
497 
498 	tc_matchall->port_matchall = tc_port_matchall;
499 	adap->tc_matchall = tc_matchall;
500 	return 0;
501 
502 out_free_matchall:
503 	kfree(tc_matchall);
504 	return ret;
505 }
506 
507 void cxgb4_cleanup_tc_matchall(struct adapter *adap)
508 {
509 	u8 i;
510 
511 	if (adap->tc_matchall) {
512 		if (adap->tc_matchall->port_matchall) {
513 			for (i = 0; i < adap->params.nports; i++) {
514 				struct net_device *dev = adap->port[i];
515 
516 				if (dev)
517 					cxgb4_matchall_disable_offload(dev);
518 			}
519 			kfree(adap->tc_matchall->port_matchall);
520 		}
521 		kfree(adap->tc_matchall);
522 	}
523 }
524