xref: /linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c (revision 8be98d2f2a0a262f8bf8a0bc1fdf522b3c7aab17)
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_mirred.h>
37 
38 #include "cxgb4.h"
39 #include "cxgb4_filter.h"
40 #include "cxgb4_tc_u32_parse.h"
41 #include "cxgb4_tc_u32.h"
42 
43 /* Fill ch_filter_specification with parsed match value/mask pair. */
fill_match_fields(struct adapter * adap,struct ch_filter_specification * fs,struct tc_cls_u32_offload * cls,const struct cxgb4_match_field * entry,bool next_header)44 static int fill_match_fields(struct adapter *adap,
45 			     struct ch_filter_specification *fs,
46 			     struct tc_cls_u32_offload *cls,
47 			     const struct cxgb4_match_field *entry,
48 			     bool next_header)
49 {
50 	unsigned int i, j;
51 	__be32 val, mask;
52 	int off, err;
53 	bool found;
54 
55 	for (i = 0; i < cls->knode.sel->nkeys; i++) {
56 		off = cls->knode.sel->keys[i].off;
57 		val = cls->knode.sel->keys[i].val;
58 		mask = cls->knode.sel->keys[i].mask;
59 
60 		if (next_header) {
61 			/* For next headers, parse only keys with offmask */
62 			if (!cls->knode.sel->keys[i].offmask)
63 				continue;
64 		} else {
65 			/* For the remaining, parse only keys without offmask */
66 			if (cls->knode.sel->keys[i].offmask)
67 				continue;
68 		}
69 
70 		found = false;
71 
72 		for (j = 0; entry[j].val; j++) {
73 			if (off == entry[j].off) {
74 				found = true;
75 				err = entry[j].val(fs, val, mask);
76 				if (err)
77 					return err;
78 				break;
79 			}
80 		}
81 
82 		if (!found)
83 			return -EINVAL;
84 	}
85 
86 	return 0;
87 }
88 
89 /* Fill ch_filter_specification with parsed action. */
fill_action_fields(struct adapter * adap,struct ch_filter_specification * fs,struct tc_cls_u32_offload * cls)90 static int fill_action_fields(struct adapter *adap,
91 			      struct ch_filter_specification *fs,
92 			      struct tc_cls_u32_offload *cls)
93 {
94 	unsigned int num_actions = 0;
95 	const struct tc_action *a;
96 	struct tcf_exts *exts;
97 	int i;
98 
99 	exts = cls->knode.exts;
100 	if (!tcf_exts_has_actions(exts))
101 		return -EINVAL;
102 
103 	tcf_exts_for_each_action(i, a, exts) {
104 		/* Don't allow more than one action per rule. */
105 		if (num_actions)
106 			return -EINVAL;
107 
108 		/* Drop in hardware. */
109 		if (is_tcf_gact_shot(a)) {
110 			fs->action = FILTER_DROP;
111 			num_actions++;
112 			continue;
113 		}
114 
115 		/* Re-direct to specified port in hardware. */
116 		if (is_tcf_mirred_egress_redirect(a)) {
117 			struct net_device *n_dev, *target_dev;
118 			bool found = false;
119 			unsigned int i;
120 
121 			target_dev = tcf_mirred_dev(a);
122 			for_each_port(adap, i) {
123 				n_dev = adap->port[i];
124 				if (target_dev == n_dev) {
125 					fs->action = FILTER_SWITCH;
126 					fs->eport = i;
127 					found = true;
128 					break;
129 				}
130 			}
131 
132 			/* Interface doesn't belong to any port of
133 			 * the underlying hardware.
134 			 */
135 			if (!found)
136 				return -EINVAL;
137 
138 			num_actions++;
139 			continue;
140 		}
141 
142 		/* Un-supported action. */
143 		return -EINVAL;
144 	}
145 
146 	return 0;
147 }
148 
cxgb4_config_knode(struct net_device * dev,struct tc_cls_u32_offload * cls)149 int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
150 {
151 	const struct cxgb4_match_field *start, *link_start = NULL;
152 	struct netlink_ext_ack *extack = cls->common.extack;
153 	struct adapter *adapter = netdev2adap(dev);
154 	__be16 protocol = cls->common.protocol;
155 	struct ch_filter_specification fs;
156 	struct cxgb4_tc_u32_table *t;
157 	struct cxgb4_link *link;
158 	u32 uhtid, link_uhtid;
159 	bool is_ipv6 = false;
160 	u8 inet_family;
161 	int filter_id;
162 	int ret;
163 
164 	if (!can_tc_u32_offload(dev))
165 		return -EOPNOTSUPP;
166 
167 	if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
168 		return -EOPNOTSUPP;
169 
170 	inet_family = (protocol == htons(ETH_P_IPV6)) ? PF_INET6 : PF_INET;
171 
172 	/* Get a free filter entry TID, where we can insert this new
173 	 * rule. Only insert rule if its prio doesn't conflict with
174 	 * existing rules.
175 	 */
176 	filter_id = cxgb4_get_free_ftid(dev, inet_family, false,
177 					TC_U32_NODE(cls->knode.handle));
178 	if (filter_id < 0) {
179 		NL_SET_ERR_MSG_MOD(extack,
180 				   "No free LETCAM index available");
181 		return -ENOMEM;
182 	}
183 
184 	t = adapter->tc_u32;
185 	uhtid = TC_U32_USERHTID(cls->knode.handle);
186 	link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
187 
188 	/* Ensure that uhtid is either root u32 (i.e. 0x800)
189 	 * or a a valid linked bucket.
190 	 */
191 	if (uhtid != 0x800 && uhtid >= t->size)
192 		return -EINVAL;
193 
194 	/* Ensure link handle uhtid is sane, if specified. */
195 	if (link_uhtid >= t->size)
196 		return -EINVAL;
197 
198 	memset(&fs, 0, sizeof(fs));
199 
200 	if (filter_id < adapter->tids.nhpftids)
201 		fs.prio = 1;
202 	fs.tc_prio = cls->common.prio;
203 	fs.tc_cookie = cls->knode.handle;
204 
205 	if (protocol == htons(ETH_P_IPV6)) {
206 		start = cxgb4_ipv6_fields;
207 		is_ipv6 = true;
208 	} else {
209 		start = cxgb4_ipv4_fields;
210 		is_ipv6 = false;
211 	}
212 
213 	if (uhtid != 0x800) {
214 		/* Link must exist from root node before insertion. */
215 		if (!t->table[uhtid - 1].link_handle)
216 			return -EINVAL;
217 
218 		/* Link must have a valid supported next header. */
219 		link_start = t->table[uhtid - 1].match_field;
220 		if (!link_start)
221 			return -EINVAL;
222 	}
223 
224 	/* Parse links and record them for subsequent jumps to valid
225 	 * next headers.
226 	 */
227 	if (link_uhtid) {
228 		const struct cxgb4_next_header *next;
229 		bool found = false;
230 		unsigned int i, j;
231 		__be32 val, mask;
232 		int off;
233 
234 		if (t->table[link_uhtid - 1].link_handle) {
235 			dev_err(adapter->pdev_dev,
236 				"Link handle exists for: 0x%x\n",
237 				link_uhtid);
238 			return -EINVAL;
239 		}
240 
241 		next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
242 
243 		/* Try to find matches that allow jumps to next header. */
244 		for (i = 0; next[i].jump; i++) {
245 			if (next[i].sel.offoff != cls->knode.sel->offoff ||
246 			    next[i].sel.offshift != cls->knode.sel->offshift ||
247 			    next[i].sel.offmask != cls->knode.sel->offmask ||
248 			    next[i].sel.off != cls->knode.sel->off)
249 				continue;
250 
251 			/* Found a possible candidate.  Find a key that
252 			 * matches the corresponding offset, value, and
253 			 * mask to jump to next header.
254 			 */
255 			for (j = 0; j < cls->knode.sel->nkeys; j++) {
256 				off = cls->knode.sel->keys[j].off;
257 				val = cls->knode.sel->keys[j].val;
258 				mask = cls->knode.sel->keys[j].mask;
259 
260 				if (next[i].key.off == off &&
261 				    next[i].key.val == val &&
262 				    next[i].key.mask == mask) {
263 					found = true;
264 					break;
265 				}
266 			}
267 
268 			if (!found)
269 				continue; /* Try next candidate. */
270 
271 			/* Candidate to jump to next header found.
272 			 * Translate all keys to internal specification
273 			 * and store them in jump table. This spec is copied
274 			 * later to set the actual filters.
275 			 */
276 			ret = fill_match_fields(adapter, &fs, cls,
277 						start, false);
278 			if (ret)
279 				goto out;
280 
281 			link = &t->table[link_uhtid - 1];
282 			link->match_field = next[i].jump;
283 			link->link_handle = cls->knode.handle;
284 			memcpy(&link->fs, &fs, sizeof(fs));
285 			break;
286 		}
287 
288 		/* No candidate found to jump to next header. */
289 		if (!found)
290 			return -EINVAL;
291 
292 		return 0;
293 	}
294 
295 	/* Fill ch_filter_specification match fields to be shipped to hardware.
296 	 * Copy the linked spec (if any) first.  And then update the spec as
297 	 * needed.
298 	 */
299 	if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
300 		/* Copy linked ch_filter_specification */
301 		memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
302 		ret = fill_match_fields(adapter, &fs, cls,
303 					link_start, true);
304 		if (ret)
305 			goto out;
306 	}
307 
308 	ret = fill_match_fields(adapter, &fs, cls, start, false);
309 	if (ret)
310 		goto out;
311 
312 	/* Fill ch_filter_specification action fields to be shipped to
313 	 * hardware.
314 	 */
315 	ret = fill_action_fields(adapter, &fs, cls);
316 	if (ret)
317 		goto out;
318 
319 	/* The filter spec has been completely built from the info
320 	 * provided from u32.  We now set some default fields in the
321 	 * spec for sanity.
322 	 */
323 
324 	/* Match only packets coming from the ingress port where this
325 	 * filter will be created.
326 	 */
327 	fs.val.iport = netdev2pinfo(dev)->port_id;
328 	fs.mask.iport = ~0;
329 
330 	/* Enable filter hit counts. */
331 	fs.hitcnts = 1;
332 
333 	/* Set type of filter - IPv6 or IPv4 */
334 	fs.type = is_ipv6 ? 1 : 0;
335 
336 	/* Set the filter */
337 	ret = cxgb4_set_filter(dev, filter_id, &fs);
338 	if (ret)
339 		goto out;
340 
341 	/* If this is a linked bucket, then set the corresponding
342 	 * entry in the bitmap to mark it as belonging to this linked
343 	 * bucket.
344 	 */
345 	if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
346 		set_bit(filter_id, t->table[uhtid - 1].tid_map);
347 
348 out:
349 	return ret;
350 }
351 
cxgb4_delete_knode(struct net_device * dev,struct tc_cls_u32_offload * cls)352 int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
353 {
354 	struct adapter *adapter = netdev2adap(dev);
355 	unsigned int filter_id, max_tids, i, j;
356 	struct cxgb4_link *link = NULL;
357 	struct cxgb4_tc_u32_table *t;
358 	struct filter_entry *f;
359 	bool found = false;
360 	u32 handle, uhtid;
361 	u8 nslots;
362 	int ret;
363 
364 	if (!can_tc_u32_offload(dev))
365 		return -EOPNOTSUPP;
366 
367 	/* Fetch the location to delete the filter. */
368 	max_tids = adapter->tids.nhpftids + adapter->tids.nftids;
369 
370 	spin_lock_bh(&adapter->tids.ftid_lock);
371 	filter_id = 0;
372 	while (filter_id < max_tids) {
373 		if (filter_id < adapter->tids.nhpftids) {
374 			i = filter_id;
375 			f = &adapter->tids.hpftid_tab[i];
376 			if (f->valid && f->fs.tc_cookie == cls->knode.handle) {
377 				found = true;
378 				break;
379 			}
380 
381 			i = find_next_bit(adapter->tids.hpftid_bmap,
382 					  adapter->tids.nhpftids, i + 1);
383 			if (i >= adapter->tids.nhpftids) {
384 				filter_id = adapter->tids.nhpftids;
385 				continue;
386 			}
387 
388 			filter_id = i;
389 		} else {
390 			i = filter_id - adapter->tids.nhpftids;
391 			f = &adapter->tids.ftid_tab[i];
392 			if (f->valid && f->fs.tc_cookie == cls->knode.handle) {
393 				found = true;
394 				break;
395 			}
396 
397 			i = find_next_bit(adapter->tids.ftid_bmap,
398 					  adapter->tids.nftids, i + 1);
399 			if (i >= adapter->tids.nftids)
400 				break;
401 
402 			filter_id = i + adapter->tids.nhpftids;
403 		}
404 
405 		nslots = 0;
406 		if (f->fs.type) {
407 			nslots++;
408 			if (CHELSIO_CHIP_VERSION(adapter->params.chip) <
409 			    CHELSIO_T6)
410 				nslots += 2;
411 		}
412 
413 		filter_id += nslots;
414 	}
415 	spin_unlock_bh(&adapter->tids.ftid_lock);
416 
417 	if (!found)
418 		return -ERANGE;
419 
420 	t = adapter->tc_u32;
421 	handle = cls->knode.handle;
422 	uhtid = TC_U32_USERHTID(cls->knode.handle);
423 
424 	/* Ensure that uhtid is either root u32 (i.e. 0x800)
425 	 * or a a valid linked bucket.
426 	 */
427 	if (uhtid != 0x800 && uhtid >= t->size)
428 		return -EINVAL;
429 
430 	/* Delete the specified filter */
431 	if (uhtid != 0x800) {
432 		link = &t->table[uhtid - 1];
433 		if (!link->link_handle)
434 			return -EINVAL;
435 
436 		if (!test_bit(filter_id, link->tid_map))
437 			return -EINVAL;
438 	}
439 
440 	ret = cxgb4_del_filter(dev, filter_id, NULL);
441 	if (ret)
442 		goto out;
443 
444 	if (link)
445 		clear_bit(filter_id, link->tid_map);
446 
447 	/* If a link is being deleted, then delete all filters
448 	 * associated with the link.
449 	 */
450 	for (i = 0; i < t->size; i++) {
451 		link = &t->table[i];
452 
453 		if (link->link_handle == handle) {
454 			for (j = 0; j < max_tids; j++) {
455 				if (!test_bit(j, link->tid_map))
456 					continue;
457 
458 				ret = __cxgb4_del_filter(dev, j, NULL, NULL);
459 				if (ret)
460 					goto out;
461 
462 				clear_bit(j, link->tid_map);
463 			}
464 
465 			/* Clear the link state */
466 			link->match_field = NULL;
467 			link->link_handle = 0;
468 			memset(&link->fs, 0, sizeof(link->fs));
469 			break;
470 		}
471 	}
472 
473 out:
474 	return ret;
475 }
476 
cxgb4_cleanup_tc_u32(struct adapter * adap)477 void cxgb4_cleanup_tc_u32(struct adapter *adap)
478 {
479 	struct cxgb4_tc_u32_table *t;
480 	unsigned int i;
481 
482 	if (!adap->tc_u32)
483 		return;
484 
485 	/* Free up all allocated memory. */
486 	t = adap->tc_u32;
487 	for (i = 0; i < t->size; i++) {
488 		struct cxgb4_link *link = &t->table[i];
489 
490 		kvfree(link->tid_map);
491 	}
492 	kvfree(adap->tc_u32);
493 }
494 
cxgb4_init_tc_u32(struct adapter * adap)495 struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
496 {
497 	unsigned int max_tids = adap->tids.nftids + adap->tids.nhpftids;
498 	struct cxgb4_tc_u32_table *t;
499 	unsigned int i;
500 
501 	if (!max_tids)
502 		return NULL;
503 
504 	t = kvzalloc(struct_size(t, table, max_tids), GFP_KERNEL);
505 	if (!t)
506 		return NULL;
507 
508 	t->size = max_tids;
509 
510 	for (i = 0; i < t->size; i++) {
511 		struct cxgb4_link *link = &t->table[i];
512 		unsigned int bmap_size;
513 
514 		bmap_size = BITS_TO_LONGS(max_tids);
515 		link->tid_map = kvcalloc(bmap_size, sizeof(unsigned long),
516 					 GFP_KERNEL);
517 		if (!link->tid_map)
518 			goto out_no_mem;
519 		bitmap_zero(link->tid_map, max_tids);
520 	}
521 
522 	return t;
523 
524 out_no_mem:
525 	for (i = 0; i < t->size; i++) {
526 		struct cxgb4_link *link = &t->table[i];
527 		kvfree(link->tid_map);
528 	}
529 	kvfree(t);
530 
531 	return NULL;
532 }
533