xref: /linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DPAA2 Ethernet Switch flower support
4  *
5  * Copyright 2021 NXP
6  *
7  */
8 
9 #include "dpaa2-switch.h"
10 
dpaa2_switch_flower_parse_key(struct flow_cls_offload * cls,struct dpsw_acl_key * acl_key)11 static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
12 					 struct dpsw_acl_key *acl_key)
13 {
14 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
15 	struct flow_dissector *dissector = rule->match.dissector;
16 	struct netlink_ext_ack *extack = cls->common.extack;
17 	struct dpsw_acl_fields *acl_h, *acl_m;
18 
19 	if (dissector->used_keys &
20 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
21 	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
22 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
24 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
25 	      BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
26 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
27 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
28 		NL_SET_ERR_MSG_MOD(extack,
29 				   "Unsupported keys used");
30 		return -EOPNOTSUPP;
31 	}
32 
33 	acl_h = &acl_key->match;
34 	acl_m = &acl_key->mask;
35 
36 	if (flow_rule_match_has_control_flags(rule, extack))
37 		return -EOPNOTSUPP;
38 
39 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
40 		struct flow_match_basic match;
41 
42 		flow_rule_match_basic(rule, &match);
43 		acl_h->l3_protocol = match.key->ip_proto;
44 		acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
45 		acl_m->l3_protocol = match.mask->ip_proto;
46 		acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
47 	}
48 
49 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
50 		struct flow_match_eth_addrs match;
51 
52 		flow_rule_match_eth_addrs(rule, &match);
53 		ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
54 		ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
55 		ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
56 		ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
57 	}
58 
59 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
60 		struct flow_match_vlan match;
61 
62 		flow_rule_match_vlan(rule, &match);
63 		acl_h->l2_vlan_id = match.key->vlan_id;
64 		acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
65 		acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
66 				    match.key->vlan_dei;
67 
68 		acl_m->l2_vlan_id = match.mask->vlan_id;
69 		acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
70 		acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
71 				    match.mask->vlan_dei;
72 	}
73 
74 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
75 		struct flow_match_ipv4_addrs match;
76 
77 		flow_rule_match_ipv4_addrs(rule, &match);
78 		acl_h->l3_source_ip = be32_to_cpu(match.key->src);
79 		acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
80 		acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
81 		acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
82 	}
83 
84 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
85 		struct flow_match_ports match;
86 
87 		flow_rule_match_ports(rule, &match);
88 		acl_h->l4_source_port = be16_to_cpu(match.key->src);
89 		acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
90 		acl_m->l4_source_port = be16_to_cpu(match.mask->src);
91 		acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
92 	}
93 
94 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
95 		struct flow_match_ip match;
96 
97 		flow_rule_match_ip(rule, &match);
98 		if (match.mask->ttl != 0) {
99 			NL_SET_ERR_MSG_MOD(extack,
100 					   "Matching on TTL not supported");
101 			return -EOPNOTSUPP;
102 		}
103 
104 		if ((match.mask->tos & 0x3) != 0) {
105 			NL_SET_ERR_MSG_MOD(extack,
106 					   "Matching on ECN not supported, only DSCP");
107 			return -EOPNOTSUPP;
108 		}
109 
110 		acl_h->l3_dscp = match.key->tos >> 2;
111 		acl_m->l3_dscp = match.mask->tos >> 2;
112 	}
113 
114 	return 0;
115 }
116 
dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block * filter_block,struct dpaa2_switch_acl_entry * entry)117 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
118 			       struct dpaa2_switch_acl_entry *entry)
119 {
120 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
121 	struct ethsw_core *ethsw = filter_block->ethsw;
122 	struct dpsw_acl_key *acl_key = &entry->key;
123 	struct device *dev = ethsw->dev;
124 	u8 *cmd_buff;
125 	int err;
126 
127 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
128 	if (!cmd_buff)
129 		return -ENOMEM;
130 
131 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
132 
133 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
134 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
135 						 DMA_TO_DEVICE);
136 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
137 		dev_err(dev, "DMA mapping failed\n");
138 		kfree(cmd_buff);
139 		return -EFAULT;
140 	}
141 
142 	err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
143 				 filter_block->acl_id, acl_entry_cfg);
144 
145 	dma_unmap_single(dev, acl_entry_cfg->key_iova,
146 			 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
147 			 DMA_TO_DEVICE);
148 	if (err) {
149 		dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
150 		kfree(cmd_buff);
151 		return err;
152 	}
153 
154 	kfree(cmd_buff);
155 
156 	return 0;
157 }
158 
159 static int
dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)160 dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
161 			      struct dpaa2_switch_acl_entry *entry)
162 {
163 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
164 	struct dpsw_acl_key *acl_key = &entry->key;
165 	struct ethsw_core *ethsw = block->ethsw;
166 	struct device *dev = ethsw->dev;
167 	u8 *cmd_buff;
168 	int err;
169 
170 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
171 	if (!cmd_buff)
172 		return -ENOMEM;
173 
174 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
175 
176 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
177 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
178 						 DMA_TO_DEVICE);
179 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
180 		dev_err(dev, "DMA mapping failed\n");
181 		kfree(cmd_buff);
182 		return -EFAULT;
183 	}
184 
185 	err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
186 				    block->acl_id, acl_entry_cfg);
187 
188 	dma_unmap_single(dev, acl_entry_cfg->key_iova,
189 			 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
190 	if (err) {
191 		dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
192 		kfree(cmd_buff);
193 		return err;
194 	}
195 
196 	kfree(cmd_buff);
197 
198 	return 0;
199 }
200 
201 static int
dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)202 dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
203 				   struct dpaa2_switch_acl_entry *entry)
204 {
205 	struct dpaa2_switch_acl_entry *tmp;
206 	struct list_head *pos, *n;
207 	int index = 0;
208 
209 	if (list_empty(&block->acl_entries)) {
210 		list_add(&entry->list, &block->acl_entries);
211 		return index;
212 	}
213 
214 	list_for_each_safe(pos, n, &block->acl_entries) {
215 		tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
216 		if (entry->prio < tmp->prio)
217 			break;
218 		index++;
219 	}
220 	list_add(&entry->list, pos->prev);
221 	return index;
222 }
223 
224 static struct dpaa2_switch_acl_entry*
dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block * block,int index)225 dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
226 				    int index)
227 {
228 	struct dpaa2_switch_acl_entry *tmp;
229 	int i = 0;
230 
231 	list_for_each_entry(tmp, &block->acl_entries, list) {
232 		if (i == index)
233 			return tmp;
234 		++i;
235 	}
236 
237 	return NULL;
238 }
239 
240 static int
dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry,int precedence)241 dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
242 				      struct dpaa2_switch_acl_entry *entry,
243 				      int precedence)
244 {
245 	int err;
246 
247 	err = dpaa2_switch_acl_entry_remove(block, entry);
248 	if (err)
249 		return err;
250 
251 	entry->cfg.precedence = precedence;
252 	return dpaa2_switch_acl_entry_add(block, entry);
253 }
254 
255 static int
dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)256 dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
257 			       struct dpaa2_switch_acl_entry *entry)
258 {
259 	struct dpaa2_switch_acl_entry *tmp;
260 	int index, i, precedence, err;
261 
262 	/* Add the new ACL entry to the linked list and get its index */
263 	index = dpaa2_switch_acl_entry_add_to_list(block, entry);
264 
265 	/* Move up in priority the ACL entries to make space
266 	 * for the new filter.
267 	 */
268 	precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
269 	for (i = 0; i < index; i++) {
270 		tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
271 
272 		err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
273 							    precedence);
274 		if (err)
275 			return err;
276 
277 		precedence++;
278 	}
279 
280 	/* Add the new entry to hardware */
281 	entry->cfg.precedence = precedence;
282 	err = dpaa2_switch_acl_entry_add(block, entry);
283 	block->num_acl_rules++;
284 
285 	return err;
286 }
287 
288 static struct dpaa2_switch_acl_entry *
dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block * block,unsigned long cookie)289 dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
290 					  unsigned long cookie)
291 {
292 	struct dpaa2_switch_acl_entry *tmp, *n;
293 
294 	list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
295 		if (tmp->cookie == cookie)
296 			return tmp;
297 	}
298 	return NULL;
299 }
300 
301 static int
dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)302 dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
303 				 struct dpaa2_switch_acl_entry *entry)
304 {
305 	struct dpaa2_switch_acl_entry *tmp, *n;
306 	int index = 0;
307 
308 	list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
309 		if (tmp->cookie == entry->cookie)
310 			return index;
311 		index++;
312 	}
313 	return -ENOENT;
314 }
315 
316 static struct dpaa2_switch_mirror_entry *
dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block * block,unsigned long cookie)317 dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
318 					 unsigned long cookie)
319 {
320 	struct dpaa2_switch_mirror_entry *tmp, *n;
321 
322 	list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
323 		if (tmp->cookie == cookie)
324 			return tmp;
325 	}
326 	return NULL;
327 }
328 
329 static int
dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_acl_entry * entry)330 dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
331 				  struct dpaa2_switch_acl_entry *entry)
332 {
333 	struct dpaa2_switch_acl_entry *tmp;
334 	int index, i, precedence, err;
335 
336 	index = dpaa2_switch_acl_entry_get_index(block, entry);
337 
338 	/* Remove from hardware the ACL entry */
339 	err = dpaa2_switch_acl_entry_remove(block, entry);
340 	if (err)
341 		return err;
342 
343 	block->num_acl_rules--;
344 
345 	/* Remove it from the list also */
346 	list_del(&entry->list);
347 
348 	/* Move down in priority the entries over the deleted one */
349 	precedence = entry->cfg.precedence;
350 	for (i = index - 1; i >= 0; i--) {
351 		tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
352 		err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
353 							    precedence);
354 		if (err)
355 			return err;
356 
357 		precedence--;
358 	}
359 
360 	kfree(entry);
361 
362 	return 0;
363 }
364 
dpaa2_switch_tc_parse_action_acl(struct ethsw_core * ethsw,struct flow_action_entry * cls_act,struct dpsw_acl_result * dpsw_act,struct netlink_ext_ack * extack)365 static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
366 					    struct flow_action_entry *cls_act,
367 					    struct dpsw_acl_result *dpsw_act,
368 					    struct netlink_ext_ack *extack)
369 {
370 	int err = 0;
371 
372 	switch (cls_act->id) {
373 	case FLOW_ACTION_TRAP:
374 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
375 		break;
376 	case FLOW_ACTION_REDIRECT:
377 		if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
378 			NL_SET_ERR_MSG_MOD(extack,
379 					   "Destination not a DPAA2 switch port");
380 			return -EOPNOTSUPP;
381 		}
382 
383 		dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
384 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
385 		break;
386 	case FLOW_ACTION_DROP:
387 		dpsw_act->action = DPSW_ACL_ACTION_DROP;
388 		break;
389 	default:
390 		NL_SET_ERR_MSG_MOD(extack,
391 				   "Action not supported");
392 		err = -EOPNOTSUPP;
393 		goto out;
394 	}
395 
396 out:
397 	return err;
398 }
399 
400 static int
dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_mirror_entry * entry,u16 to,struct netlink_ext_ack * extack)401 dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
402 			      struct dpaa2_switch_mirror_entry *entry,
403 			      u16 to, struct netlink_ext_ack *extack)
404 {
405 	unsigned long block_ports = block->ports;
406 	struct ethsw_core *ethsw = block->ethsw;
407 	struct ethsw_port_priv *port_priv;
408 	unsigned long ports_added = 0;
409 	u16 vlan = entry->cfg.vlan_id;
410 	bool mirror_port_enabled;
411 	int err, port;
412 
413 	/* Setup the mirroring port */
414 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
415 	if (!mirror_port_enabled) {
416 		err = dpsw_set_reflection_if(ethsw->mc_io, 0,
417 					     ethsw->dpsw_handle, to);
418 		if (err)
419 			return err;
420 		ethsw->mirror_port = to;
421 	}
422 
423 	/* Setup the same egress mirroring configuration on all the switch
424 	 * ports that share the same filter block.
425 	 */
426 	for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
427 		port_priv = ethsw->ports[port];
428 
429 		/* We cannot add a per VLAN mirroring rule if the VLAN in
430 		 * question is not installed on the switch port.
431 		 */
432 		if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
433 		    !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
434 			NL_SET_ERR_MSG(extack,
435 				       "VLAN must be installed on the switch port");
436 			err = -EINVAL;
437 			goto err_remove_filters;
438 		}
439 
440 		err = dpsw_if_add_reflection(ethsw->mc_io, 0,
441 					     ethsw->dpsw_handle,
442 					     port, &entry->cfg);
443 		if (err)
444 			goto err_remove_filters;
445 
446 		ports_added |= BIT(port);
447 	}
448 
449 	list_add(&entry->list, &block->mirror_entries);
450 
451 	return 0;
452 
453 err_remove_filters:
454 	for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
455 		dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
456 					  port, &entry->cfg);
457 	}
458 
459 	if (!mirror_port_enabled)
460 		ethsw->mirror_port = ethsw->sw_attr.num_ifs;
461 
462 	return err;
463 }
464 
465 static int
dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block * block,struct dpaa2_switch_mirror_entry * entry)466 dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
467 				 struct dpaa2_switch_mirror_entry *entry)
468 {
469 	struct dpsw_reflection_cfg *cfg = &entry->cfg;
470 	unsigned long block_ports = block->ports;
471 	struct ethsw_core *ethsw = block->ethsw;
472 	int port;
473 
474 	/* Remove this mirroring configuration from all the ports belonging to
475 	 * the filter block.
476 	 */
477 	for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
478 		dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
479 					  port, cfg);
480 
481 	/* Also remove it from the list of mirror filters */
482 	list_del(&entry->list);
483 	kfree(entry);
484 
485 	/* If this was the last mirror filter, then unset the mirror port */
486 	if (list_empty(&block->mirror_entries))
487 		ethsw->mirror_port =  ethsw->sw_attr.num_ifs;
488 
489 	return 0;
490 }
491 
492 static int
dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)493 dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
494 				    struct flow_cls_offload *cls)
495 {
496 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
497 	struct netlink_ext_ack *extack = cls->common.extack;
498 	struct dpaa2_switch_acl_entry *acl_entry;
499 	struct ethsw_core *ethsw = block->ethsw;
500 	struct flow_action_entry *act;
501 	int err;
502 
503 	if (dpaa2_switch_acl_tbl_is_full(block)) {
504 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
505 		return -ENOMEM;
506 	}
507 
508 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
509 	if (!acl_entry)
510 		return -ENOMEM;
511 
512 	err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
513 	if (err)
514 		goto free_acl_entry;
515 
516 	act = &rule->action.entries[0];
517 	err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
518 					       &acl_entry->cfg.result, extack);
519 	if (err)
520 		goto free_acl_entry;
521 
522 	acl_entry->prio = cls->common.prio;
523 	acl_entry->cookie = cls->cookie;
524 
525 	err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
526 	if (err)
527 		goto free_acl_entry;
528 
529 	return 0;
530 
531 free_acl_entry:
532 	kfree(acl_entry);
533 
534 	return err;
535 }
536 
dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload * cls,u16 * vlan)537 static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
538 						u16 *vlan)
539 {
540 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
541 	struct flow_dissector *dissector = rule->match.dissector;
542 	struct netlink_ext_ack *extack = cls->common.extack;
543 	int ret = -EOPNOTSUPP;
544 
545 	if (dissector->used_keys &
546 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
547 	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
548 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
549 		NL_SET_ERR_MSG_MOD(extack,
550 				   "Mirroring is supported only per VLAN");
551 		return -EOPNOTSUPP;
552 	}
553 
554 	if (flow_rule_match_has_control_flags(rule, extack))
555 		return -EOPNOTSUPP;
556 
557 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
558 		struct flow_match_vlan match;
559 
560 		flow_rule_match_vlan(rule, &match);
561 
562 		if (match.mask->vlan_priority != 0 ||
563 		    match.mask->vlan_dei != 0) {
564 			NL_SET_ERR_MSG_MOD(extack,
565 					   "Only matching on VLAN ID supported");
566 			return -EOPNOTSUPP;
567 		}
568 
569 		if (match.mask->vlan_id != 0xFFF) {
570 			NL_SET_ERR_MSG_MOD(extack,
571 					   "Masked matching not supported");
572 			return -EOPNOTSUPP;
573 		}
574 
575 		*vlan = (u16)match.key->vlan_id;
576 		ret = 0;
577 	}
578 
579 	return ret;
580 }
581 
582 static int
dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)583 dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
584 				       struct flow_cls_offload *cls)
585 {
586 	struct netlink_ext_ack *extack = cls->common.extack;
587 	struct dpaa2_switch_mirror_entry *mirror_entry;
588 	struct ethsw_core *ethsw = block->ethsw;
589 	struct dpaa2_switch_mirror_entry *tmp;
590 	struct flow_action_entry *cls_act;
591 	struct list_head *pos, *n;
592 	bool mirror_port_enabled;
593 	u16 if_id, vlan;
594 	int err;
595 
596 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
597 	cls_act = &cls->rule->action.entries[0];
598 
599 	/* Offload rules only when the destination is a DPAA2 switch port */
600 	if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
601 		NL_SET_ERR_MSG_MOD(extack,
602 				   "Destination not a DPAA2 switch port");
603 		return -EOPNOTSUPP;
604 	}
605 	if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
606 
607 	/* We have a single mirror port but can configure egress mirroring on
608 	 * all the other switch ports. We need to allow mirroring rules only
609 	 * when the destination port is the same.
610 	 */
611 	if (mirror_port_enabled && ethsw->mirror_port != if_id) {
612 		NL_SET_ERR_MSG_MOD(extack,
613 				   "Multiple mirror ports not supported");
614 		return -EBUSY;
615 	}
616 
617 	/* Parse the key */
618 	err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
619 	if (err)
620 		return err;
621 
622 	/* Make sure that we don't already have a mirror rule with the same
623 	 * configuration.
624 	 */
625 	list_for_each_safe(pos, n, &block->mirror_entries) {
626 		tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
627 
628 		if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
629 		    tmp->cfg.vlan_id == vlan) {
630 			NL_SET_ERR_MSG_MOD(extack,
631 					   "VLAN mirror filter already installed");
632 			return -EBUSY;
633 		}
634 	}
635 
636 	mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
637 	if (!mirror_entry)
638 		return -ENOMEM;
639 
640 	mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
641 	mirror_entry->cfg.vlan_id = vlan;
642 	mirror_entry->cookie = cls->cookie;
643 
644 	return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
645 					     extack);
646 }
647 
dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)648 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
649 				    struct flow_cls_offload *cls)
650 {
651 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
652 	struct netlink_ext_ack *extack = cls->common.extack;
653 	struct flow_action_entry *act;
654 
655 	if (!flow_offload_has_one_action(&rule->action)) {
656 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
657 		return -EOPNOTSUPP;
658 	}
659 
660 	act = &rule->action.entries[0];
661 	switch (act->id) {
662 	case FLOW_ACTION_REDIRECT:
663 	case FLOW_ACTION_TRAP:
664 	case FLOW_ACTION_DROP:
665 		return dpaa2_switch_cls_flower_replace_acl(block, cls);
666 	case FLOW_ACTION_MIRRED:
667 		return dpaa2_switch_cls_flower_replace_mirror(block, cls);
668 	default:
669 		NL_SET_ERR_MSG_MOD(extack, "Action not supported");
670 		return -EOPNOTSUPP;
671 	}
672 }
673 
dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block * block,struct flow_cls_offload * cls)674 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
675 				    struct flow_cls_offload *cls)
676 {
677 	struct dpaa2_switch_mirror_entry *mirror_entry;
678 	struct dpaa2_switch_acl_entry *acl_entry;
679 
680 	/* If this filter is a an ACL one, remove it */
681 	acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
682 							      cls->cookie);
683 	if (acl_entry)
684 		return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
685 
686 	/* If not, then it has to be a mirror */
687 	mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
688 								cls->cookie);
689 	if (mirror_entry)
690 		return dpaa2_switch_block_remove_mirror(block,
691 							mirror_entry);
692 
693 	return 0;
694 }
695 
696 static int
dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)697 dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
698 				      struct tc_cls_matchall_offload *cls)
699 {
700 	struct netlink_ext_ack *extack = cls->common.extack;
701 	struct ethsw_core *ethsw = block->ethsw;
702 	struct dpaa2_switch_acl_entry *acl_entry;
703 	struct flow_action_entry *act;
704 	int err;
705 
706 	if (dpaa2_switch_acl_tbl_is_full(block)) {
707 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
708 		return -ENOMEM;
709 	}
710 
711 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
712 	if (!acl_entry)
713 		return -ENOMEM;
714 
715 	act = &cls->rule->action.entries[0];
716 	err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
717 					       &acl_entry->cfg.result, extack);
718 	if (err)
719 		goto free_acl_entry;
720 
721 	acl_entry->prio = cls->common.prio;
722 	acl_entry->cookie = cls->cookie;
723 
724 	err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
725 	if (err)
726 		goto free_acl_entry;
727 
728 	return 0;
729 
730 free_acl_entry:
731 	kfree(acl_entry);
732 
733 	return err;
734 }
735 
736 static int
dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)737 dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
738 					 struct tc_cls_matchall_offload *cls)
739 {
740 	struct netlink_ext_ack *extack = cls->common.extack;
741 	struct dpaa2_switch_mirror_entry *mirror_entry;
742 	struct ethsw_core *ethsw = block->ethsw;
743 	struct dpaa2_switch_mirror_entry *tmp;
744 	struct flow_action_entry *cls_act;
745 	struct list_head *pos, *n;
746 	bool mirror_port_enabled;
747 	u16 if_id;
748 
749 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
750 	cls_act = &cls->rule->action.entries[0];
751 
752 	/* Offload rules only when the destination is a DPAA2 switch port */
753 	if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
754 		NL_SET_ERR_MSG_MOD(extack,
755 				   "Destination not a DPAA2 switch port");
756 		return -EOPNOTSUPP;
757 	}
758 	if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
759 
760 	/* We have a single mirror port but can configure egress mirroring on
761 	 * all the other switch ports. We need to allow mirroring rules only
762 	 * when the destination port is the same.
763 	 */
764 	if (mirror_port_enabled && ethsw->mirror_port != if_id) {
765 		NL_SET_ERR_MSG_MOD(extack,
766 				   "Multiple mirror ports not supported");
767 		return -EBUSY;
768 	}
769 
770 	/* Make sure that we don't already have a mirror rule with the same
771 	 * configuration. One matchall rule per block is the maximum.
772 	 */
773 	list_for_each_safe(pos, n, &block->mirror_entries) {
774 		tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
775 
776 		if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
777 			NL_SET_ERR_MSG_MOD(extack,
778 					   "Matchall mirror filter already installed");
779 			return -EBUSY;
780 		}
781 	}
782 
783 	mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
784 	if (!mirror_entry)
785 		return -ENOMEM;
786 
787 	mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
788 	mirror_entry->cookie = cls->cookie;
789 
790 	return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
791 					     extack);
792 }
793 
dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)794 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
795 				      struct tc_cls_matchall_offload *cls)
796 {
797 	struct netlink_ext_ack *extack = cls->common.extack;
798 	struct flow_action_entry *act;
799 
800 	if (!flow_offload_has_one_action(&cls->rule->action)) {
801 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
802 		return -EOPNOTSUPP;
803 	}
804 
805 	act = &cls->rule->action.entries[0];
806 	switch (act->id) {
807 	case FLOW_ACTION_REDIRECT:
808 	case FLOW_ACTION_TRAP:
809 	case FLOW_ACTION_DROP:
810 		return dpaa2_switch_cls_matchall_replace_acl(block, cls);
811 	case FLOW_ACTION_MIRRED:
812 		return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
813 	default:
814 		NL_SET_ERR_MSG_MOD(extack, "Action not supported");
815 		return -EOPNOTSUPP;
816 	}
817 }
818 
dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block * block,struct ethsw_port_priv * port_priv)819 int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
820 				      struct ethsw_port_priv *port_priv)
821 {
822 	struct ethsw_core *ethsw = port_priv->ethsw_data;
823 	struct dpaa2_switch_mirror_entry *tmp;
824 	int err;
825 
826 	list_for_each_entry(tmp, &block->mirror_entries, list) {
827 		err = dpsw_if_add_reflection(ethsw->mc_io, 0,
828 					     ethsw->dpsw_handle,
829 					     port_priv->idx, &tmp->cfg);
830 		if (err)
831 			goto unwind_add;
832 	}
833 
834 	return 0;
835 
836 unwind_add:
837 	list_for_each_entry(tmp, &block->mirror_entries, list)
838 		dpsw_if_remove_reflection(ethsw->mc_io, 0,
839 					  ethsw->dpsw_handle,
840 					  port_priv->idx, &tmp->cfg);
841 
842 	return err;
843 }
844 
dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block * block,struct ethsw_port_priv * port_priv)845 int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
846 					struct ethsw_port_priv *port_priv)
847 {
848 	struct ethsw_core *ethsw = port_priv->ethsw_data;
849 	struct dpaa2_switch_mirror_entry *tmp;
850 	int err;
851 
852 	list_for_each_entry(tmp, &block->mirror_entries, list) {
853 		err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
854 						ethsw->dpsw_handle,
855 						port_priv->idx, &tmp->cfg);
856 		if (err)
857 			goto unwind_remove;
858 	}
859 
860 	return 0;
861 
862 unwind_remove:
863 	list_for_each_entry(tmp, &block->mirror_entries, list)
864 		dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
865 				       port_priv->idx, &tmp->cfg);
866 
867 	return err;
868 }
869 
dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block * block,struct tc_cls_matchall_offload * cls)870 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
871 				      struct tc_cls_matchall_offload *cls)
872 {
873 	struct dpaa2_switch_mirror_entry *mirror_entry;
874 	struct dpaa2_switch_acl_entry *acl_entry;
875 
876 	/* If this filter is a an ACL one, remove it */
877 	acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
878 							      cls->cookie);
879 	if (acl_entry)
880 		return dpaa2_switch_acl_tbl_remove_entry(block,
881 							 acl_entry);
882 
883 	/* If not, then it has to be a mirror */
884 	mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
885 								cls->cookie);
886 	if (mirror_entry)
887 		return dpaa2_switch_block_remove_mirror(block,
888 							mirror_entry);
889 
890 	return 0;
891 }
892