xref: /linux/drivers/net/ethernet/microchip/vcap/vcap_tc.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip VCAP TC
3  *
4  * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/flow_offload.h>
8 #include <net/ipv6.h>
9 #include <net/tcp.h>
10 
11 #include "vcap_api_client.h"
12 #include "vcap_tc.h"
13 
14 enum vcap_is2_arp_opcode {
15 	VCAP_IS2_ARP_REQUEST,
16 	VCAP_IS2_ARP_REPLY,
17 	VCAP_IS2_RARP_REQUEST,
18 	VCAP_IS2_RARP_REPLY,
19 };
20 
21 enum vcap_arp_opcode {
22 	VCAP_ARP_OP_RESERVED,
23 	VCAP_ARP_OP_REQUEST,
24 	VCAP_ARP_OP_REPLY,
25 };
26 
27 int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st)
28 {
29 	enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
30 	enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
31 	struct flow_match_eth_addrs match;
32 	struct vcap_u48_key smac, dmac;
33 	int err = 0;
34 
35 	flow_rule_match_eth_addrs(st->frule, &match);
36 
37 	if (!is_zero_ether_addr(match.mask->src)) {
38 		vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
39 		vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
40 		err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
41 		if (err)
42 			goto out;
43 	}
44 
45 	if (!is_zero_ether_addr(match.mask->dst)) {
46 		vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
47 		vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
48 		err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
49 		if (err)
50 			goto out;
51 	}
52 
53 	st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS);
54 
55 	return err;
56 
57 out:
58 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
59 	return err;
60 }
61 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ethaddr_usage);
62 
63 int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st)
64 {
65 	int err = 0;
66 
67 	if (st->l3_proto == ETH_P_IP) {
68 		struct flow_match_ipv4_addrs mt;
69 
70 		flow_rule_match_ipv4_addrs(st->frule, &mt);
71 		if (mt.mask->src) {
72 			err = vcap_rule_add_key_u32(st->vrule,
73 						    VCAP_KF_L3_IP4_SIP,
74 						    be32_to_cpu(mt.key->src),
75 						    be32_to_cpu(mt.mask->src));
76 			if (err)
77 				goto out;
78 		}
79 		if (mt.mask->dst) {
80 			err = vcap_rule_add_key_u32(st->vrule,
81 						    VCAP_KF_L3_IP4_DIP,
82 						    be32_to_cpu(mt.key->dst),
83 						    be32_to_cpu(mt.mask->dst));
84 			if (err)
85 				goto out;
86 		}
87 	}
88 
89 	st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
90 
91 	return err;
92 
93 out:
94 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
95 	return err;
96 }
97 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv4_usage);
98 
99 int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st)
100 {
101 	int err = 0;
102 
103 	if (st->l3_proto == ETH_P_IPV6) {
104 		struct flow_match_ipv6_addrs mt;
105 		struct vcap_u128_key sip;
106 		struct vcap_u128_key dip;
107 
108 		flow_rule_match_ipv6_addrs(st->frule, &mt);
109 		/* Check if address masks are non-zero */
110 		if (!ipv6_addr_any(&mt.mask->src)) {
111 			vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
112 			vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
113 			err = vcap_rule_add_key_u128(st->vrule,
114 						     VCAP_KF_L3_IP6_SIP, &sip);
115 			if (err)
116 				goto out;
117 		}
118 		if (!ipv6_addr_any(&mt.mask->dst)) {
119 			vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
120 			vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
121 			err = vcap_rule_add_key_u128(st->vrule,
122 						     VCAP_KF_L3_IP6_DIP, &dip);
123 			if (err)
124 				goto out;
125 		}
126 	}
127 	st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
128 	return err;
129 out:
130 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
131 	return err;
132 }
133 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv6_usage);
134 
135 int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st)
136 {
137 	struct flow_match_ports mt;
138 	u16 value, mask;
139 	int err = 0;
140 
141 	flow_rule_match_ports(st->frule, &mt);
142 
143 	if (mt.mask->src) {
144 		value = be16_to_cpu(mt.key->src);
145 		mask = be16_to_cpu(mt.mask->src);
146 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
147 					    mask);
148 		if (err)
149 			goto out;
150 	}
151 
152 	if (mt.mask->dst) {
153 		value = be16_to_cpu(mt.key->dst);
154 		mask = be16_to_cpu(mt.mask->dst);
155 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
156 					    mask);
157 		if (err)
158 			goto out;
159 	}
160 
161 	st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_PORTS);
162 
163 	return err;
164 
165 out:
166 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
167 	return err;
168 }
169 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_portnum_usage);
170 
171 int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
172 {
173 	enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0;
174 	enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0;
175 	struct flow_match_vlan mt;
176 	u16 tpid;
177 	int err;
178 
179 	flow_rule_match_cvlan(st->frule, &mt);
180 
181 	tpid = be16_to_cpu(mt.key->vlan_tpid);
182 
183 	if (tpid == ETH_P_8021Q) {
184 		vid_key = VCAP_KF_8021Q_VID1;
185 		pcp_key = VCAP_KF_8021Q_PCP1;
186 	}
187 
188 	if (mt.mask->vlan_id) {
189 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
190 					    mt.key->vlan_id,
191 					    mt.mask->vlan_id);
192 		if (err)
193 			goto out;
194 	}
195 
196 	if (mt.mask->vlan_priority) {
197 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
198 					    mt.key->vlan_priority,
199 					    mt.mask->vlan_priority);
200 		if (err)
201 			goto out;
202 	}
203 
204 	st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN);
205 
206 	return 0;
207 out:
208 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error");
209 	return err;
210 }
211 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_cvlan_usage);
212 
213 int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
214 				      enum vcap_key_field vid_key,
215 				      enum vcap_key_field pcp_key)
216 {
217 	struct flow_match_vlan mt;
218 	int err;
219 
220 	flow_rule_match_vlan(st->frule, &mt);
221 
222 	if (mt.mask->vlan_id) {
223 		err = vcap_rule_add_key_u32(st->vrule, vid_key,
224 					    mt.key->vlan_id,
225 					    mt.mask->vlan_id);
226 		if (err)
227 			goto out;
228 	}
229 
230 	if (mt.mask->vlan_priority) {
231 		err = vcap_rule_add_key_u32(st->vrule, pcp_key,
232 					    mt.key->vlan_priority,
233 					    mt.mask->vlan_priority);
234 		if (err)
235 			goto out;
236 	}
237 
238 	if (mt.mask->vlan_tpid)
239 		st->tpid = be16_to_cpu(mt.key->vlan_tpid);
240 
241 	st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN);
242 
243 	return 0;
244 out:
245 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
246 	return err;
247 }
248 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_vlan_usage);
249 
250 int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st)
251 {
252 	struct flow_match_tcp mt;
253 	u16 tcp_flags_mask;
254 	u16 tcp_flags_key;
255 	enum vcap_bit val;
256 	int err = 0;
257 
258 	flow_rule_match_tcp(st->frule, &mt);
259 	tcp_flags_key = be16_to_cpu(mt.key->flags);
260 	tcp_flags_mask = be16_to_cpu(mt.mask->flags);
261 
262 	if (tcp_flags_mask & TCPHDR_FIN) {
263 		val = VCAP_BIT_0;
264 		if (tcp_flags_key & TCPHDR_FIN)
265 			val = VCAP_BIT_1;
266 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
267 		if (err)
268 			goto out;
269 	}
270 
271 	if (tcp_flags_mask & TCPHDR_SYN) {
272 		val = VCAP_BIT_0;
273 		if (tcp_flags_key & TCPHDR_SYN)
274 			val = VCAP_BIT_1;
275 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
276 		if (err)
277 			goto out;
278 	}
279 
280 	if (tcp_flags_mask & TCPHDR_RST) {
281 		val = VCAP_BIT_0;
282 		if (tcp_flags_key & TCPHDR_RST)
283 			val = VCAP_BIT_1;
284 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
285 		if (err)
286 			goto out;
287 	}
288 
289 	if (tcp_flags_mask & TCPHDR_PSH) {
290 		val = VCAP_BIT_0;
291 		if (tcp_flags_key & TCPHDR_PSH)
292 			val = VCAP_BIT_1;
293 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
294 		if (err)
295 			goto out;
296 	}
297 
298 	if (tcp_flags_mask & TCPHDR_ACK) {
299 		val = VCAP_BIT_0;
300 		if (tcp_flags_key & TCPHDR_ACK)
301 			val = VCAP_BIT_1;
302 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
303 		if (err)
304 			goto out;
305 	}
306 
307 	if (tcp_flags_mask & TCPHDR_URG) {
308 		val = VCAP_BIT_0;
309 		if (tcp_flags_key & TCPHDR_URG)
310 			val = VCAP_BIT_1;
311 		err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
312 		if (err)
313 			goto out;
314 	}
315 
316 	st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_TCP);
317 
318 	return err;
319 
320 out:
321 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
322 	return err;
323 }
324 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_tcp_usage);
325 
326 int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st)
327 {
328 	struct flow_match_arp mt;
329 	u16 value, mask;
330 	u32 ipval, ipmsk;
331 	int err;
332 
333 	flow_rule_match_arp(st->frule, &mt);
334 
335 	if (mt.mask->op) {
336 		mask = 0x3;
337 		if (st->l3_proto == ETH_P_ARP) {
338 			value = mt.key->op == VCAP_ARP_OP_REQUEST ?
339 					VCAP_IS2_ARP_REQUEST :
340 					VCAP_IS2_ARP_REPLY;
341 		} else { /* RARP */
342 			value = mt.key->op == VCAP_ARP_OP_REQUEST ?
343 					VCAP_IS2_RARP_REQUEST :
344 					VCAP_IS2_RARP_REPLY;
345 		}
346 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
347 					    value, mask);
348 		if (err)
349 			goto out;
350 	}
351 
352 	/* The IS2 ARP keyset does not support ARP hardware addresses */
353 	if (!is_zero_ether_addr(mt.mask->sha) ||
354 	    !is_zero_ether_addr(mt.mask->tha)) {
355 		err = -EINVAL;
356 		goto out;
357 	}
358 
359 	if (mt.mask->sip) {
360 		ipval = be32_to_cpu((__force __be32)mt.key->sip);
361 		ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
362 
363 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
364 					    ipval, ipmsk);
365 		if (err)
366 			goto out;
367 	}
368 
369 	if (mt.mask->tip) {
370 		ipval = be32_to_cpu((__force __be32)mt.key->tip);
371 		ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
372 
373 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
374 					    ipval, ipmsk);
375 		if (err)
376 			goto out;
377 	}
378 
379 	st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ARP);
380 
381 	return 0;
382 
383 out:
384 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
385 	return err;
386 }
387 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_arp_usage);
388 
389 int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st)
390 {
391 	struct flow_match_ip mt;
392 	int err = 0;
393 
394 	flow_rule_match_ip(st->frule, &mt);
395 
396 	if (mt.mask->tos) {
397 		err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
398 					    mt.key->tos,
399 					    mt.mask->tos);
400 		if (err)
401 			goto out;
402 	}
403 
404 	st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IP);
405 
406 	return err;
407 
408 out:
409 	NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
410 	return err;
411 }
412 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ip_usage);
413