1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2020 Marvell.
5 */
6
7 #include <linux/bitfield.h>
8
9 #include "rvu_struct.h"
10 #include "rvu_reg.h"
11 #include "rvu.h"
12 #include "npc.h"
13 #include "rvu_npc_fs.h"
14 #include "rvu_npc_hash.h"
15
16 static const char * const npc_flow_names[] = {
17 [NPC_DMAC] = "dmac",
18 [NPC_SMAC] = "smac",
19 [NPC_ETYPE] = "ether type",
20 [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag",
21 [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
22 [NPC_OUTER_VID] = "outer vlan id",
23 [NPC_INNER_VID] = "inner vlan id",
24 [NPC_TOS] = "tos",
25 [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ",
26 [NPC_SIP_IPV4] = "ipv4 source ip",
27 [NPC_DIP_IPV4] = "ipv4 destination ip",
28 [NPC_IPFRAG_IPV6] = "fragmented IPv6 header ",
29 [NPC_SIP_IPV6] = "ipv6 source ip",
30 [NPC_DIP_IPV6] = "ipv6 destination ip",
31 [NPC_IPPROTO_TCP] = "ip proto tcp",
32 [NPC_IPPROTO_UDP] = "ip proto udp",
33 [NPC_IPPROTO_SCTP] = "ip proto sctp",
34 [NPC_IPPROTO_ICMP] = "ip proto icmp",
35 [NPC_IPPROTO_ICMP6] = "ip proto icmp6",
36 [NPC_IPPROTO_AH] = "ip proto AH",
37 [NPC_IPPROTO_ESP] = "ip proto ESP",
38 [NPC_SPORT_TCP] = "tcp source port",
39 [NPC_DPORT_TCP] = "tcp destination port",
40 [NPC_SPORT_UDP] = "udp source port",
41 [NPC_DPORT_UDP] = "udp destination port",
42 [NPC_SPORT_SCTP] = "sctp source port",
43 [NPC_DPORT_SCTP] = "sctp destination port",
44 [NPC_LXMB] = "Mcast/Bcast header ",
45 [NPC_IPSEC_SPI] = "SPI ",
46 [NPC_MPLS1_LBTCBOS] = "lse depth 1 label tc bos",
47 [NPC_MPLS1_TTL] = "lse depth 1 ttl",
48 [NPC_MPLS2_LBTCBOS] = "lse depth 2 label tc bos",
49 [NPC_MPLS2_TTL] = "lse depth 2 ttl",
50 [NPC_MPLS3_LBTCBOS] = "lse depth 3 label tc bos",
51 [NPC_MPLS3_TTL] = "lse depth 3 ttl",
52 [NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos",
53 [NPC_MPLS4_TTL] = "lse depth 4",
54 [NPC_TYPE_ICMP] = "icmp type",
55 [NPC_CODE_ICMP] = "icmp code",
56 [NPC_TCP_FLAGS] = "tcp flags",
57 [NPC_UNKNOWN] = "unknown",
58 };
59
npc_is_feature_supported(struct rvu * rvu,u64 features,u8 intf)60 bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf)
61 {
62 struct npc_mcam *mcam = &rvu->hw->mcam;
63 u64 mcam_features;
64 u64 unsupported;
65
66 mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features;
67 unsupported = (mcam_features ^ features) & ~mcam_features;
68
69 /* Return false if at least one of the input flows is not extracted */
70 return !unsupported;
71 }
72
npc_get_field_name(u8 hdr)73 const char *npc_get_field_name(u8 hdr)
74 {
75 if (hdr >= ARRAY_SIZE(npc_flow_names))
76 return npc_flow_names[NPC_UNKNOWN];
77
78 return npc_flow_names[hdr];
79 }
80
81 /* Compute keyword masks and figure out the number of keywords a field
82 * spans in the key.
83 */
npc_set_kw_masks(struct npc_mcam * mcam,u8 type,u8 nr_bits,int start_kwi,int offset,u8 intf)84 static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type,
85 u8 nr_bits, int start_kwi, int offset, u8 intf)
86 {
87 struct npc_key_field *field = &mcam->rx_key_fields[type];
88 u8 bits_in_kw;
89 int max_kwi;
90
91 if (mcam->banks_per_entry == 1)
92 max_kwi = 1; /* NPC_MCAM_KEY_X1 */
93 else if (mcam->banks_per_entry == 2)
94 max_kwi = 3; /* NPC_MCAM_KEY_X2 */
95 else
96 max_kwi = 6; /* NPC_MCAM_KEY_X4 */
97
98 if (is_npc_intf_tx(intf))
99 field = &mcam->tx_key_fields[type];
100
101 if (offset + nr_bits <= 64) {
102 /* one KW only */
103 if (start_kwi > max_kwi)
104 return;
105 field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0)
106 << offset;
107 field->nr_kws = 1;
108 } else if (offset + nr_bits > 64 &&
109 offset + nr_bits <= 128) {
110 /* two KWs */
111 if (start_kwi + 1 > max_kwi)
112 return;
113 /* first KW mask */
114 bits_in_kw = 64 - offset;
115 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
116 << offset;
117 /* second KW mask i.e. mask for rest of bits */
118 bits_in_kw = nr_bits + offset - 64;
119 field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0);
120 field->nr_kws = 2;
121 } else {
122 /* three KWs */
123 if (start_kwi + 2 > max_kwi)
124 return;
125 /* first KW mask */
126 bits_in_kw = 64 - offset;
127 field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
128 << offset;
129 /* second KW mask */
130 field->kw_mask[start_kwi + 1] = ~0ULL;
131 /* third KW mask i.e. mask for rest of bits */
132 bits_in_kw = nr_bits + offset - 128;
133 field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0);
134 field->nr_kws = 3;
135 }
136 }
137
138 /* Helper function to figure out whether field exists in the key */
npc_is_field_present(struct rvu * rvu,enum key_fields type,u8 intf)139 static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
140 {
141 struct npc_mcam *mcam = &rvu->hw->mcam;
142 struct npc_key_field *input;
143
144 input = &mcam->rx_key_fields[type];
145 if (is_npc_intf_tx(intf))
146 input = &mcam->tx_key_fields[type];
147
148 return input->nr_kws > 0;
149 }
150
npc_is_same(struct npc_key_field * input,struct npc_key_field * field)151 static bool npc_is_same(struct npc_key_field *input,
152 struct npc_key_field *field)
153 {
154 return memcmp(&input->layer_mdata, &field->layer_mdata,
155 sizeof(struct npc_layer_mdata)) == 0;
156 }
157
npc_set_layer_mdata(struct npc_mcam * mcam,enum key_fields type,u64 cfg,u8 lid,u8 lt,u8 intf)158 static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
159 u64 cfg, u8 lid, u8 lt, u8 intf)
160 {
161 struct npc_key_field *input = &mcam->rx_key_fields[type];
162
163 if (is_npc_intf_tx(intf))
164 input = &mcam->tx_key_fields[type];
165
166 input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
167 input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg);
168 input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1;
169 input->layer_mdata.ltype = lt;
170 input->layer_mdata.lid = lid;
171 }
172
npc_check_overlap_fields(struct npc_key_field * input1,struct npc_key_field * input2)173 static bool npc_check_overlap_fields(struct npc_key_field *input1,
174 struct npc_key_field *input2)
175 {
176 int kwi;
177
178 /* Fields with same layer id and different ltypes are mutually
179 * exclusive hence they can be overlapped
180 */
181 if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
182 input1->layer_mdata.ltype != input2->layer_mdata.ltype)
183 return false;
184
185 for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) {
186 if (input1->kw_mask[kwi] & input2->kw_mask[kwi])
187 return true;
188 }
189
190 return false;
191 }
192
193 /* Helper function to check whether given field overlaps with any other fields
194 * in the key. Due to limitations on key size and the key extraction profile in
195 * use higher layers can overwrite lower layer's header fields. Hence overlap
196 * needs to be checked.
197 */
npc_check_overlap(struct rvu * rvu,int blkaddr,enum key_fields type,u8 start_lid,u8 intf)198 static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
199 enum key_fields type, u8 start_lid, u8 intf)
200 {
201 struct npc_mcam *mcam = &rvu->hw->mcam;
202 struct npc_key_field *dummy, *input;
203 int start_kwi, offset;
204 u8 nr_bits, lid, lt, ld;
205 u64 cfg;
206
207 dummy = &mcam->rx_key_fields[NPC_UNKNOWN];
208 input = &mcam->rx_key_fields[type];
209
210 if (is_npc_intf_tx(intf)) {
211 dummy = &mcam->tx_key_fields[NPC_UNKNOWN];
212 input = &mcam->tx_key_fields[type];
213 }
214
215 for (lid = start_lid; lid < NPC_MAX_LID; lid++) {
216 for (lt = 0; lt < NPC_MAX_LT; lt++) {
217 for (ld = 0; ld < NPC_MAX_LD; ld++) {
218 cfg = rvu_read64(rvu, blkaddr,
219 NPC_AF_INTFX_LIDX_LTX_LDX_CFG
220 (intf, lid, lt, ld));
221 if (!FIELD_GET(NPC_LDATA_EN, cfg))
222 continue;
223 memset(dummy, 0, sizeof(struct npc_key_field));
224 npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg,
225 lid, lt, intf);
226 /* exclude input */
227 if (npc_is_same(input, dummy))
228 continue;
229 start_kwi = dummy->layer_mdata.key / 8;
230 offset = (dummy->layer_mdata.key * 8) % 64;
231 nr_bits = dummy->layer_mdata.len * 8;
232 /* form KW masks */
233 npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits,
234 start_kwi, offset, intf);
235 /* check any input field bits falls in any
236 * other field bits.
237 */
238 if (npc_check_overlap_fields(dummy, input))
239 return true;
240 }
241 }
242 }
243
244 return false;
245 }
246
npc_check_field(struct rvu * rvu,int blkaddr,enum key_fields type,u8 intf)247 static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
248 u8 intf)
249 {
250 if (!npc_is_field_present(rvu, type, intf) ||
251 npc_check_overlap(rvu, blkaddr, type, 0, intf))
252 return false;
253 return true;
254 }
255
npc_scan_exact_result(struct npc_mcam * mcam,u8 bit_number,u8 key_nibble,u8 intf)256 static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number,
257 u8 key_nibble, u8 intf)
258 {
259 u8 offset = (key_nibble * 4) % 64; /* offset within key word */
260 u8 kwi = (key_nibble * 4) / 64; /* which word in key */
261 u8 nr_bits = 4; /* bits in a nibble */
262 u8 type;
263
264 switch (bit_number) {
265 case 40 ... 43:
266 type = NPC_EXACT_RESULT;
267 break;
268
269 default:
270 return;
271 }
272 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
273 }
274
npc_scan_parse_result(struct npc_mcam * mcam,u8 bit_number,u8 key_nibble,u8 intf)275 static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
276 u8 key_nibble, u8 intf)
277 {
278 u8 offset = (key_nibble * 4) % 64; /* offset within key word */
279 u8 kwi = (key_nibble * 4) / 64; /* which word in key */
280 u8 nr_bits = 4; /* bits in a nibble */
281 u8 type;
282
283 switch (bit_number) {
284 case 0 ... 2:
285 type = NPC_CHAN;
286 break;
287 case 3:
288 type = NPC_ERRLEV;
289 break;
290 case 4 ... 5:
291 type = NPC_ERRCODE;
292 break;
293 case 6:
294 type = NPC_LXMB;
295 break;
296 /* check for LTYPE only as of now */
297 case 9:
298 type = NPC_LA;
299 break;
300 case 12:
301 type = NPC_LB;
302 break;
303 case 15:
304 type = NPC_LC;
305 break;
306 case 18:
307 type = NPC_LD;
308 break;
309 case 21:
310 type = NPC_LE;
311 break;
312 case 24:
313 type = NPC_LF;
314 break;
315 case 27:
316 type = NPC_LG;
317 break;
318 case 30:
319 type = NPC_LH;
320 break;
321 default:
322 return;
323 }
324
325 npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
326 }
327
npc_handle_multi_layer_fields(struct rvu * rvu,int blkaddr,u8 intf)328 static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
329 {
330 struct npc_mcam *mcam = &rvu->hw->mcam;
331 struct npc_key_field *key_fields;
332 /* Ether type can come from three layers
333 * (ethernet, single tagged, double tagged)
334 */
335 struct npc_key_field *etype_ether;
336 struct npc_key_field *etype_tag1;
337 struct npc_key_field *etype_tag2;
338 /* Outer VLAN TCI can come from two layers
339 * (single tagged, double tagged)
340 */
341 struct npc_key_field *vlan_tag1;
342 struct npc_key_field *vlan_tag2;
343 /* Inner VLAN TCI for double tagged frames */
344 struct npc_key_field *vlan_tag3;
345 u64 *features;
346 u8 start_lid;
347 int i;
348
349 key_fields = mcam->rx_key_fields;
350 features = &mcam->rx_features;
351
352 if (is_npc_intf_tx(intf)) {
353 key_fields = mcam->tx_key_fields;
354 features = &mcam->tx_features;
355 }
356
357 /* Handle header fields which can come from multiple layers like
358 * etype, outer vlan tci. These fields should have same position in
359 * the key otherwise to install a mcam rule more than one entry is
360 * needed which complicates mcam space management.
361 */
362 etype_ether = &key_fields[NPC_ETYPE_ETHER];
363 etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
364 etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
365 vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
366 vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
367 vlan_tag3 = &key_fields[NPC_VLAN_TAG3];
368
369 /* if key profile programmed does not extract Ethertype at all */
370 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
371 dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n");
372 goto vlan_tci;
373 }
374
375 /* if key profile programmed extracts Ethertype from one layer */
376 if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
377 key_fields[NPC_ETYPE] = *etype_ether;
378 if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
379 key_fields[NPC_ETYPE] = *etype_tag1;
380 if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
381 key_fields[NPC_ETYPE] = *etype_tag2;
382
383 /* if key profile programmed extracts Ethertype from multiple layers */
384 if (etype_ether->nr_kws && etype_tag1->nr_kws) {
385 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
386 if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) {
387 dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n");
388 goto vlan_tci;
389 }
390 }
391 key_fields[NPC_ETYPE] = *etype_tag1;
392 }
393 if (etype_ether->nr_kws && etype_tag2->nr_kws) {
394 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
395 if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) {
396 dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n");
397 goto vlan_tci;
398 }
399 }
400 key_fields[NPC_ETYPE] = *etype_tag2;
401 }
402 if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
403 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
404 if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) {
405 dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n");
406 goto vlan_tci;
407 }
408 }
409 key_fields[NPC_ETYPE] = *etype_tag2;
410 }
411
412 /* check none of higher layers overwrite Ethertype */
413 start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
414 if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) {
415 dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n");
416 goto vlan_tci;
417 }
418 *features |= BIT_ULL(NPC_ETYPE);
419 vlan_tci:
420 /* if key profile does not extract outer vlan tci at all */
421 if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) {
422 dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n");
423 goto done;
424 }
425
426 /* if key profile extracts outer vlan tci from one layer */
427 if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
428 key_fields[NPC_OUTER_VID] = *vlan_tag1;
429 if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
430 key_fields[NPC_OUTER_VID] = *vlan_tag2;
431
432 /* if key profile extracts outer vlan tci from multiple layers */
433 if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
434 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
435 if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) {
436 dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n");
437 goto done;
438 }
439 }
440 key_fields[NPC_OUTER_VID] = *vlan_tag2;
441 }
442 /* check none of higher layers overwrite outer vlan tci */
443 start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
444 if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) {
445 dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n");
446 goto done;
447 }
448 *features |= BIT_ULL(NPC_OUTER_VID);
449
450 /* If key profile extracts inner vlan tci */
451 if (vlan_tag3->nr_kws) {
452 key_fields[NPC_INNER_VID] = *vlan_tag3;
453 *features |= BIT_ULL(NPC_INNER_VID);
454 }
455 done:
456 return;
457 }
458
npc_scan_ldata(struct rvu * rvu,int blkaddr,u8 lid,u8 lt,u64 cfg,u8 intf)459 static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
460 u8 lt, u64 cfg, u8 intf)
461 {
462 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
463 struct npc_mcam *mcam = &rvu->hw->mcam;
464 u8 hdr, key, nr_bytes, bit_offset;
465 u8 la_ltype, la_start;
466 /* starting KW index and starting bit position */
467 int start_kwi, offset;
468
469 nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
470 hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
471 key = FIELD_GET(NPC_KEY_OFFSET, cfg);
472
473 /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
474 * ethernet header.
475 */
476 if (is_npc_intf_tx(intf)) {
477 la_ltype = NPC_LT_LA_IH_NIX_ETHER;
478 la_start = 8;
479 } else {
480 la_ltype = NPC_LT_LA_ETHER;
481 la_start = 0;
482 }
483
484 #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
485 do { \
486 start_kwi = key / 8; \
487 offset = (key * 8) % 64; \
488 if (lid == (hlid) && lt == (hlt)) { \
489 if ((hstart) >= hdr && \
490 ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
491 bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
492 npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
493 offset += bit_offset; \
494 start_kwi += offset / 64; \
495 offset %= 64; \
496 npc_set_kw_masks(mcam, (name), (hlen) * 8, \
497 start_kwi, offset, intf); \
498 } \
499 } \
500 } while (0)
501
502 /* List LID, LTYPE, start offset from layer and length(in bytes) of
503 * packet header fields below.
504 * Example: Source IP is 4 bytes and starts at 12th byte of IP header
505 */
506 NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
507 NPC_SCAN_HDR(NPC_IPFRAG_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 6, 1);
508 NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
509 NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
510 NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1);
511 if (rvu->hw->cap.npc_hash_extract) {
512 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0])
513 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4);
514 else
515 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
516
517 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1])
518 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4);
519 else
520 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
521 } else {
522 NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
523 NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
524 }
525
526 NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
527 NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
528 NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
529 NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
530 NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
531 NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
532 NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
533 NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
534 NPC_SCAN_HDR(NPC_TCP_FLAGS, NPC_LID_LD, NPC_LT_LD_TCP, 12, 2);
535 NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
536 NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
537 NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
538 NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
539 NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
540 NPC_SCAN_HDR(NPC_VLAN_TAG3, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6, 2);
541 NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
542
543 NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4);
544 NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4);
545 NPC_SCAN_HDR(NPC_MPLS1_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 0, 3);
546 NPC_SCAN_HDR(NPC_MPLS1_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 3, 1);
547 NPC_SCAN_HDR(NPC_MPLS2_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 4, 3);
548 NPC_SCAN_HDR(NPC_MPLS2_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 7, 1);
549 NPC_SCAN_HDR(NPC_MPLS3_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 8, 3);
550 NPC_SCAN_HDR(NPC_MPLS3_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 11, 1);
551 NPC_SCAN_HDR(NPC_MPLS4_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 12, 3);
552 NPC_SCAN_HDR(NPC_MPLS4_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 15, 1);
553
554 /* SMAC follows the DMAC(which is 6 bytes) */
555 NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
556 /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
557 NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
558 }
559
npc_set_features(struct rvu * rvu,int blkaddr,u8 intf)560 static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
561 {
562 struct npc_mcam *mcam = &rvu->hw->mcam;
563 u64 *features = &mcam->rx_features;
564 u64 proto_flags;
565 int hdr;
566
567 if (is_npc_intf_tx(intf))
568 features = &mcam->tx_features;
569
570 for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
571 if (npc_check_field(rvu, blkaddr, hdr, intf))
572 *features |= BIT_ULL(hdr);
573 }
574
575 proto_flags = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
576 BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
577 BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
578 BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
579 BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP) |
580 BIT_ULL(NPC_TCP_FLAGS);
581
582 /* for tcp/udp/sctp corresponding layer type should be in the key */
583 if (*features & proto_flags) {
584 if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
585 *features &= ~proto_flags;
586 else
587 *features |= BIT_ULL(NPC_IPPROTO_TCP) |
588 BIT_ULL(NPC_IPPROTO_UDP) |
589 BIT_ULL(NPC_IPPROTO_SCTP) |
590 BIT_ULL(NPC_IPPROTO_ICMP);
591 }
592
593 /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
594 if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) {
595 *features |= BIT_ULL(NPC_IPPROTO_AH);
596 *features |= BIT_ULL(NPC_IPPROTO_ICMP);
597 *features |= BIT_ULL(NPC_IPPROTO_ICMP6);
598 }
599
600 /* for ESP, check if corresponding layer type is present in the key */
601 if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
602 *features |= BIT_ULL(NPC_IPPROTO_ESP);
603
604 /* for vlan corresponding layer type should be in the key */
605 if (*features & BIT_ULL(NPC_OUTER_VID))
606 if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
607 *features &= ~BIT_ULL(NPC_OUTER_VID);
608
609 /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
610 if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
611 (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
612 *features |= BIT_ULL(NPC_IPSEC_SPI);
613
614 /* for vlan ethertypes corresponding layer type should be in the key */
615 if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
616 *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
617 BIT_ULL(NPC_VLAN_ETYPE_STAG);
618
619 /* for L2M/L2B/L3M/L3B, check if the type is present in the key */
620 if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
621 *features |= BIT_ULL(NPC_LXMB);
622
623 for (hdr = NPC_MPLS1_LBTCBOS; hdr <= NPC_MPLS4_TTL; hdr++) {
624 if (npc_check_field(rvu, blkaddr, hdr, intf))
625 *features |= BIT_ULL(hdr);
626 }
627 }
628
629 /* Scan key extraction profile and record how fields of our interest
630 * fill the key structure. Also verify Channel and DMAC exists in
631 * key and not overwritten by other header fields.
632 */
npc_scan_kex(struct rvu * rvu,int blkaddr,u8 intf)633 static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
634 {
635 struct npc_mcam *mcam = &rvu->hw->mcam;
636 u8 lid, lt, ld, bitnr;
637 u64 cfg, masked_cfg;
638 u8 key_nibble = 0;
639
640 /* Scan and note how parse result is going to be in key.
641 * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
642 * parse result in the key. The enabled nibbles from parse result
643 * will be concatenated in key.
644 */
645 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
646 masked_cfg = cfg & NPC_PARSE_NIBBLE;
647 for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) {
648 npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
649 key_nibble++;
650 }
651
652 /* Ignore exact match bits for mcam entries except the first rule
653 * which is drop on hit. This first rule is configured explitcitly by
654 * exact match code.
655 */
656 masked_cfg = cfg & NPC_EXACT_NIBBLE;
657 bitnr = NPC_EXACT_NIBBLE_START;
658 for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) {
659 npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
660 key_nibble++;
661 }
662
663 /* Scan and note how layer data is going to be in key */
664 for (lid = 0; lid < NPC_MAX_LID; lid++) {
665 for (lt = 0; lt < NPC_MAX_LT; lt++) {
666 for (ld = 0; ld < NPC_MAX_LD; ld++) {
667 cfg = rvu_read64(rvu, blkaddr,
668 NPC_AF_INTFX_LIDX_LTX_LDX_CFG
669 (intf, lid, lt, ld));
670 if (!FIELD_GET(NPC_LDATA_EN, cfg))
671 continue;
672 npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
673 intf);
674 }
675 }
676 }
677
678 return 0;
679 }
680
npc_scan_verify_kex(struct rvu * rvu,int blkaddr)681 static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
682 {
683 int err;
684
685 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX);
686 if (err)
687 return err;
688
689 err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX);
690 if (err)
691 return err;
692
693 /* Channel is mandatory */
694 if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
695 dev_err(rvu->dev, "Channel not present in Key\n");
696 return -EINVAL;
697 }
698 /* check that none of the fields overwrite channel */
699 if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
700 dev_err(rvu->dev, "Channel cannot be overwritten\n");
701 return -EINVAL;
702 }
703
704 npc_set_features(rvu, blkaddr, NIX_INTF_TX);
705 npc_set_features(rvu, blkaddr, NIX_INTF_RX);
706 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX);
707 npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX);
708
709 return 0;
710 }
711
npc_flow_steering_init(struct rvu * rvu,int blkaddr)712 int npc_flow_steering_init(struct rvu *rvu, int blkaddr)
713 {
714 struct npc_mcam *mcam = &rvu->hw->mcam;
715
716 INIT_LIST_HEAD(&mcam->mcam_rules);
717
718 return npc_scan_verify_kex(rvu, blkaddr);
719 }
720
npc_check_unsupported_flows(struct rvu * rvu,u64 features,u8 intf)721 static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
722 {
723 struct npc_mcam *mcam = &rvu->hw->mcam;
724 u64 *mcam_features = &mcam->rx_features;
725 u64 unsupported;
726 u8 bit;
727
728 if (is_npc_intf_tx(intf))
729 mcam_features = &mcam->tx_features;
730
731 unsupported = (*mcam_features ^ features) & ~(*mcam_features);
732 if (unsupported) {
733 dev_warn(rvu->dev, "Unsupported flow(s):\n");
734 for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
735 dev_warn(rvu->dev, "%s ", npc_get_field_name(bit));
736 return -EOPNOTSUPP;
737 }
738
739 return 0;
740 }
741
742 /* npc_update_entry - Based on the masks generated during
743 * the key scanning, updates the given entry with value and
744 * masks for the field of interest. Maximum 16 bytes of a packet
745 * header can be extracted by HW hence lo and hi are sufficient.
746 * When field bytes are less than or equal to 8 then hi should be
747 * 0 for value and mask.
748 *
749 * If exact match of value is required then mask should be all 1's.
750 * If any bits in mask are 0 then corresponding bits in value are
751 * dont care.
752 */
npc_update_entry(struct rvu * rvu,enum key_fields type,struct mcam_entry * entry,u64 val_lo,u64 val_hi,u64 mask_lo,u64 mask_hi,u8 intf)753 void npc_update_entry(struct rvu *rvu, enum key_fields type,
754 struct mcam_entry *entry, u64 val_lo,
755 u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
756 {
757 struct npc_mcam *mcam = &rvu->hw->mcam;
758 struct mcam_entry dummy = { {0} };
759 struct npc_key_field *field;
760 u64 kw1, kw2, kw3;
761 u8 shift;
762 int i;
763
764 field = &mcam->rx_key_fields[type];
765 if (is_npc_intf_tx(intf))
766 field = &mcam->tx_key_fields[type];
767
768 if (!field->nr_kws)
769 return;
770
771 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
772 if (!field->kw_mask[i])
773 continue;
774 /* place key value in kw[x] */
775 shift = __ffs64(field->kw_mask[i]);
776 /* update entry value */
777 kw1 = (val_lo << shift) & field->kw_mask[i];
778 dummy.kw[i] = kw1;
779 /* update entry mask */
780 kw1 = (mask_lo << shift) & field->kw_mask[i];
781 dummy.kw_mask[i] = kw1;
782
783 if (field->nr_kws == 1)
784 break;
785 /* place remaining bits of key value in kw[x + 1] */
786 if (field->nr_kws == 2) {
787 /* update entry value */
788 kw2 = shift ? val_lo >> (64 - shift) : 0;
789 kw2 |= (val_hi << shift);
790 kw2 &= field->kw_mask[i + 1];
791 dummy.kw[i + 1] = kw2;
792 /* update entry mask */
793 kw2 = shift ? mask_lo >> (64 - shift) : 0;
794 kw2 |= (mask_hi << shift);
795 kw2 &= field->kw_mask[i + 1];
796 dummy.kw_mask[i + 1] = kw2;
797 break;
798 }
799 /* place remaining bits of key value in kw[x + 1], kw[x + 2] */
800 if (field->nr_kws == 3) {
801 /* update entry value */
802 kw2 = shift ? val_lo >> (64 - shift) : 0;
803 kw2 |= (val_hi << shift);
804 kw2 &= field->kw_mask[i + 1];
805 kw3 = shift ? val_hi >> (64 - shift) : 0;
806 kw3 &= field->kw_mask[i + 2];
807 dummy.kw[i + 1] = kw2;
808 dummy.kw[i + 2] = kw3;
809 /* update entry mask */
810 kw2 = shift ? mask_lo >> (64 - shift) : 0;
811 kw2 |= (mask_hi << shift);
812 kw2 &= field->kw_mask[i + 1];
813 kw3 = shift ? mask_hi >> (64 - shift) : 0;
814 kw3 &= field->kw_mask[i + 2];
815 dummy.kw_mask[i + 1] = kw2;
816 dummy.kw_mask[i + 2] = kw3;
817 break;
818 }
819 }
820 /* dummy is ready with values and masks for given key
821 * field now clear and update input entry with those
822 */
823 for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
824 if (!field->kw_mask[i])
825 continue;
826 entry->kw[i] &= ~field->kw_mask[i];
827 entry->kw_mask[i] &= ~field->kw_mask[i];
828
829 entry->kw[i] |= dummy.kw[i];
830 entry->kw_mask[i] |= dummy.kw_mask[i];
831 }
832 }
833
npc_update_ipv6_flow(struct rvu * rvu,struct mcam_entry * entry,u64 features,struct flow_msg * pkt,struct flow_msg * mask,struct rvu_npc_mcam_rule * output,u8 intf)834 static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
835 u64 features, struct flow_msg *pkt,
836 struct flow_msg *mask,
837 struct rvu_npc_mcam_rule *output, u8 intf)
838 {
839 u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS];
840 u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS];
841 struct flow_msg *opkt = &output->packet;
842 struct flow_msg *omask = &output->mask;
843 u64 mask_lo, mask_hi;
844 u64 val_lo, val_hi;
845
846 /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet
847 * values to be programmed in MCAM should as below:
848 * val_high: 0xfe80000000000000
849 * val_low: 0x2c6863fffe5e2d0a
850 */
851 if (features & BIT_ULL(NPC_SIP_IPV6)) {
852 be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS);
853 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
854
855 mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1];
856 mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3];
857 val_hi = (u64)src_ip[0] << 32 | src_ip[1];
858 val_lo = (u64)src_ip[2] << 32 | src_ip[3];
859
860 npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi,
861 mask_lo, mask_hi, intf);
862 memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src));
863 memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src));
864 }
865 if (features & BIT_ULL(NPC_DIP_IPV6)) {
866 be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS);
867 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
868
869 mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1];
870 mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3];
871 val_hi = (u64)dst_ip[0] << 32 | dst_ip[1];
872 val_lo = (u64)dst_ip[2] << 32 | dst_ip[3];
873
874 npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi,
875 mask_lo, mask_hi, intf);
876 memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst));
877 memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst));
878 }
879 }
880
npc_update_vlan_features(struct rvu * rvu,struct mcam_entry * entry,u64 features,u8 intf)881 static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry,
882 u64 features, u8 intf)
883 {
884 bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG));
885 bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG));
886 bool vid = !!(features & BIT_ULL(NPC_OUTER_VID));
887
888 /* If only VLAN id is given then always match outer VLAN id */
889 if (vid && !ctag && !stag) {
890 npc_update_entry(rvu, NPC_LB, entry,
891 NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
892 NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
893 return;
894 }
895 if (ctag)
896 npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0,
897 ~0ULL, 0, intf);
898 if (stag)
899 npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0,
900 ~0ULL, 0, intf);
901 }
902
npc_update_flow(struct rvu * rvu,struct mcam_entry * entry,u64 features,struct flow_msg * pkt,struct flow_msg * mask,struct rvu_npc_mcam_rule * output,u8 intf,int blkaddr)903 static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
904 u64 features, struct flow_msg *pkt,
905 struct flow_msg *mask,
906 struct rvu_npc_mcam_rule *output, u8 intf,
907 int blkaddr)
908 {
909 u64 dmac_mask = ether_addr_to_u64(mask->dmac);
910 u64 smac_mask = ether_addr_to_u64(mask->smac);
911 u64 dmac_val = ether_addr_to_u64(pkt->dmac);
912 u64 smac_val = ether_addr_to_u64(pkt->smac);
913 struct flow_msg *opkt = &output->packet;
914 struct flow_msg *omask = &output->mask;
915
916 if (!features)
917 return;
918
919 /* For tcp/udp/sctp LTYPE should be present in entry */
920 if (features & BIT_ULL(NPC_IPPROTO_TCP))
921 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
922 0, ~0ULL, 0, intf);
923 if (features & BIT_ULL(NPC_IPPROTO_UDP))
924 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
925 0, ~0ULL, 0, intf);
926 if (features & BIT_ULL(NPC_IPPROTO_SCTP))
927 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
928 0, ~0ULL, 0, intf);
929 if (features & BIT_ULL(NPC_IPPROTO_ICMP))
930 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP,
931 0, ~0ULL, 0, intf);
932 if (features & BIT_ULL(NPC_IPPROTO_ICMP6))
933 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6,
934 0, ~0ULL, 0, intf);
935
936 /* For AH, LTYPE should be present in entry */
937 if (features & BIT_ULL(NPC_IPPROTO_AH))
938 npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
939 0, ~0ULL, 0, intf);
940 /* For ESP, LTYPE should be present in entry */
941 if (features & BIT_ULL(NPC_IPPROTO_ESP))
942 npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
943 0, ~0ULL, 0, intf);
944
945 if (features & BIT_ULL(NPC_LXMB)) {
946 output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1;
947 npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0,
948 output->lxmb, 0, intf);
949 }
950 #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
951 do { \
952 if (features & BIT_ULL((field))) { \
953 npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \
954 (mask_lo), (mask_hi), intf); \
955 memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \
956 memcpy(&omask->member, &mask->member, sizeof(mask->member)); \
957 } \
958 } while (0)
959
960 NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
961
962 NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
963 NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
964 ntohs(mask->etype), 0);
965 NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0);
966 NPC_WRITE_FLOW(NPC_IPFRAG_IPV4, ip_flag, pkt->ip_flag, 0,
967 mask->ip_flag, 0);
968 NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
969 ntohl(mask->ip4src), 0);
970 NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
971 ntohl(mask->ip4dst), 0);
972 NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0,
973 ntohs(mask->sport), 0);
974 NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0,
975 ntohs(mask->sport), 0);
976 NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0,
977 ntohs(mask->dport), 0);
978 NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0,
979 ntohs(mask->dport), 0);
980 NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0,
981 ntohs(mask->sport), 0);
982 NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
983 ntohs(mask->dport), 0);
984 NPC_WRITE_FLOW(NPC_TYPE_ICMP, icmp_type, pkt->icmp_type, 0,
985 mask->icmp_type, 0);
986 NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
987 mask->icmp_code, 0);
988 NPC_WRITE_FLOW(NPC_TCP_FLAGS, tcp_flags, ntohs(pkt->tcp_flags), 0,
989 ntohs(mask->tcp_flags), 0);
990 NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
991 ntohl(mask->spi), 0);
992
993 NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
994 ntohs(mask->vlan_tci), 0);
995 NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0,
996 ntohs(mask->vlan_itci), 0);
997
998 NPC_WRITE_FLOW(NPC_MPLS1_LBTCBOS, mpls_lse,
999 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
1000 pkt->mpls_lse[0]), 0,
1001 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
1002 mask->mpls_lse[0]), 0);
1003 NPC_WRITE_FLOW(NPC_MPLS1_TTL, mpls_lse,
1004 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
1005 pkt->mpls_lse[0]), 0,
1006 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
1007 mask->mpls_lse[0]), 0);
1008 NPC_WRITE_FLOW(NPC_MPLS2_LBTCBOS, mpls_lse,
1009 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
1010 pkt->mpls_lse[1]), 0,
1011 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
1012 mask->mpls_lse[1]), 0);
1013 NPC_WRITE_FLOW(NPC_MPLS2_TTL, mpls_lse,
1014 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
1015 pkt->mpls_lse[1]), 0,
1016 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
1017 mask->mpls_lse[1]), 0);
1018 NPC_WRITE_FLOW(NPC_MPLS3_LBTCBOS, mpls_lse,
1019 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
1020 pkt->mpls_lse[2]), 0,
1021 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
1022 mask->mpls_lse[2]), 0);
1023 NPC_WRITE_FLOW(NPC_MPLS3_TTL, mpls_lse,
1024 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
1025 pkt->mpls_lse[2]), 0,
1026 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
1027 mask->mpls_lse[2]), 0);
1028 NPC_WRITE_FLOW(NPC_MPLS4_LBTCBOS, mpls_lse,
1029 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
1030 pkt->mpls_lse[3]), 0,
1031 FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
1032 mask->mpls_lse[3]), 0);
1033 NPC_WRITE_FLOW(NPC_MPLS4_TTL, mpls_lse,
1034 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
1035 pkt->mpls_lse[3]), 0,
1036 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
1037 mask->mpls_lse[3]), 0);
1038
1039 NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
1040 mask->next_header, 0);
1041 npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
1042 npc_update_vlan_features(rvu, entry, features, intf);
1043
1044 npc_update_field_hash(rvu, intf, entry, blkaddr, features,
1045 pkt, mask, opkt, omask);
1046 }
1047
rvu_mcam_find_rule(struct npc_mcam * mcam,u16 entry)1048 static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry)
1049 {
1050 struct rvu_npc_mcam_rule *iter;
1051
1052 mutex_lock(&mcam->lock);
1053 list_for_each_entry(iter, &mcam->mcam_rules, list) {
1054 if (iter->entry == entry) {
1055 mutex_unlock(&mcam->lock);
1056 return iter;
1057 }
1058 }
1059 mutex_unlock(&mcam->lock);
1060
1061 return NULL;
1062 }
1063
rvu_mcam_add_rule(struct npc_mcam * mcam,struct rvu_npc_mcam_rule * rule)1064 static void rvu_mcam_add_rule(struct npc_mcam *mcam,
1065 struct rvu_npc_mcam_rule *rule)
1066 {
1067 struct list_head *head = &mcam->mcam_rules;
1068 struct rvu_npc_mcam_rule *iter;
1069
1070 mutex_lock(&mcam->lock);
1071 list_for_each_entry(iter, &mcam->mcam_rules, list) {
1072 if (iter->entry > rule->entry)
1073 break;
1074 head = &iter->list;
1075 }
1076
1077 list_add(&rule->list, head);
1078 mutex_unlock(&mcam->lock);
1079 }
1080
rvu_mcam_remove_counter_from_rule(struct rvu * rvu,u16 pcifunc,struct rvu_npc_mcam_rule * rule)1081 static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
1082 struct rvu_npc_mcam_rule *rule)
1083 {
1084 struct npc_mcam *mcam = &rvu->hw->mcam;
1085
1086 mutex_lock(&mcam->lock);
1087
1088 __rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
1089
1090 mutex_unlock(&mcam->lock);
1091 }
1092
rvu_mcam_add_counter_to_rule(struct rvu * rvu,u16 pcifunc,struct rvu_npc_mcam_rule * rule,struct npc_install_flow_rsp * rsp)1093 static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
1094 struct rvu_npc_mcam_rule *rule,
1095 struct npc_install_flow_rsp *rsp)
1096 {
1097 struct npc_mcam *mcam = &rvu->hw->mcam;
1098
1099 mutex_lock(&mcam->lock);
1100
1101 __rvu_mcam_add_counter_to_rule(rvu, pcifunc, rule, rsp);
1102
1103 mutex_unlock(&mcam->lock);
1104 }
1105
npc_mcast_update_action_index(struct rvu * rvu,struct npc_install_flow_req * req,u64 op,void * action)1106 static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req,
1107 u64 op, void *action)
1108 {
1109 int mce_index;
1110
1111 /* If a PF/VF is installing a multicast rule then it is expected
1112 * that the PF/VF should have created a group for the multicast/mirror
1113 * list. Otherwise reject the configuration.
1114 * During this scenario, req->index is set as multicast/mirror
1115 * group index.
1116 */
1117 if (req->hdr.pcifunc &&
1118 (op == NIX_RX_ACTIONOP_MCAST || op == NIX_TX_ACTIONOP_MCAST)) {
1119 mce_index = rvu_nix_mcast_get_mce_index(rvu, req->hdr.pcifunc, req->index);
1120 if (mce_index < 0)
1121 return mce_index;
1122
1123 if (op == NIX_RX_ACTIONOP_MCAST)
1124 ((struct nix_rx_action *)action)->index = mce_index;
1125 else
1126 ((struct nix_tx_action *)action)->index = mce_index;
1127 }
1128
1129 return 0;
1130 }
1131
npc_update_rx_entry(struct rvu * rvu,struct rvu_pfvf * pfvf,struct mcam_entry * entry,struct npc_install_flow_req * req,u16 target,bool pf_set_vfs_mac)1132 static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
1133 struct mcam_entry *entry,
1134 struct npc_install_flow_req *req,
1135 u16 target, bool pf_set_vfs_mac)
1136 {
1137 struct rvu_switch *rswitch = &rvu->rswitch;
1138 struct nix_rx_action action;
1139 int ret;
1140
1141 if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
1142 req->chan_mask = 0x0; /* Do not care channel */
1143
1144 npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
1145 0, NIX_INTF_RX);
1146
1147 *(u64 *)&action = 0x00;
1148 action.pf_func = target;
1149 action.op = req->op;
1150 action.index = req->index;
1151
1152 ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
1153 if (ret)
1154 return ret;
1155
1156 action.match_id = req->match_id;
1157 action.flow_key_alg = req->flow_key_alg;
1158
1159 if (req->op == NIX_RX_ACTION_DEFAULT) {
1160 if (pfvf->def_ucast_rule) {
1161 action = pfvf->def_ucast_rule->rx_action;
1162 } else {
1163 /* For profiles which do not extract DMAC, the default
1164 * unicast entry is unused. Hence modify action for the
1165 * requests which use same action as default unicast
1166 * entry
1167 */
1168 *(u64 *)&action = 0;
1169 action.pf_func = target;
1170 action.op = NIX_RX_ACTIONOP_UCAST;
1171 }
1172 if (req->match_id)
1173 action.match_id = req->match_id;
1174 }
1175
1176 entry->action = *(u64 *)&action;
1177
1178 /* VTAG0 starts at 0th byte of LID_B.
1179 * VTAG1 starts at 4th byte of LID_B.
1180 */
1181 entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) |
1182 FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) |
1183 FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) |
1184 FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) |
1185 FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) |
1186 FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
1187 FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
1188 FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
1189
1190 return 0;
1191 }
1192
npc_update_tx_entry(struct rvu * rvu,struct rvu_pfvf * pfvf,struct mcam_entry * entry,struct npc_install_flow_req * req,u16 target)1193 static int npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
1194 struct mcam_entry *entry,
1195 struct npc_install_flow_req *req, u16 target)
1196 {
1197 struct nix_tx_action action;
1198 u64 mask = ~0ULL;
1199 int ret;
1200
1201 /* If AF is installing then do not care about
1202 * PF_FUNC in Send Descriptor
1203 */
1204 if (is_pffunc_af(req->hdr.pcifunc))
1205 mask = 0;
1206
1207 npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
1208 0, mask, 0, NIX_INTF_TX);
1209
1210 *(u64 *)&action = 0x00;
1211 action.op = req->op;
1212 action.index = req->index;
1213
1214 ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
1215 if (ret)
1216 return ret;
1217
1218 action.match_id = req->match_id;
1219
1220 entry->action = *(u64 *)&action;
1221
1222 /* VTAG0 starts at 0th byte of LID_B.
1223 * VTAG1 starts at 4th byte of LID_B.
1224 */
1225 entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) |
1226 FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) |
1227 FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) |
1228 FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) |
1229 FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) |
1230 FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
1231 FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
1232 FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
1233
1234 return 0;
1235 }
1236
npc_install_flow(struct rvu * rvu,int blkaddr,u16 target,int nixlf,struct rvu_pfvf * pfvf,struct npc_install_flow_req * req,struct npc_install_flow_rsp * rsp,bool enable,bool pf_set_vfs_mac)1237 static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
1238 int nixlf, struct rvu_pfvf *pfvf,
1239 struct npc_install_flow_req *req,
1240 struct npc_install_flow_rsp *rsp, bool enable,
1241 bool pf_set_vfs_mac)
1242 {
1243 struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule;
1244 u64 features, installed_features, missing_features = 0;
1245 struct npc_mcam_write_entry_req write_req = { 0 };
1246 struct npc_mcam *mcam = &rvu->hw->mcam;
1247 struct rvu_npc_mcam_rule dummy = { 0 };
1248 struct rvu_npc_mcam_rule *rule;
1249 u16 owner = req->hdr.pcifunc;
1250 struct msg_rsp write_rsp;
1251 struct mcam_entry *entry;
1252 bool new = false;
1253 u16 entry_index;
1254 int err;
1255
1256 installed_features = req->features;
1257 features = req->features;
1258 entry = &write_req.entry_data;
1259 entry_index = req->entry;
1260
1261 npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
1262 req->intf, blkaddr);
1263
1264 if (is_npc_intf_rx(req->intf)) {
1265 err = npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
1266 if (err)
1267 return err;
1268 } else {
1269 err = npc_update_tx_entry(rvu, pfvf, entry, req, target);
1270 if (err)
1271 return err;
1272 }
1273
1274 /* Default unicast rules do not exist for TX */
1275 if (is_npc_intf_tx(req->intf))
1276 goto find_rule;
1277
1278 if (req->default_rule) {
1279 entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf,
1280 NIXLF_UCAST_ENTRY);
1281 enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index);
1282 }
1283
1284 /* update mcam entry with default unicast rule attributes */
1285 if (def_ucast_rule && (req->default_rule && req->append)) {
1286 missing_features = (def_ucast_rule->features ^ features) &
1287 def_ucast_rule->features;
1288 if (missing_features)
1289 npc_update_flow(rvu, entry, missing_features,
1290 &def_ucast_rule->packet,
1291 &def_ucast_rule->mask,
1292 &dummy, req->intf,
1293 blkaddr);
1294 installed_features = req->features | missing_features;
1295 }
1296
1297 find_rule:
1298 rule = rvu_mcam_find_rule(mcam, entry_index);
1299 if (!rule) {
1300 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1301 if (!rule)
1302 return -ENOMEM;
1303 new = true;
1304 }
1305
1306 /* allocate new counter if rule has no counter */
1307 if (!req->default_rule && req->set_cntr && !rule->has_cntr)
1308 rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
1309
1310 /* if user wants to delete an existing counter for a rule then
1311 * free the counter
1312 */
1313 if (!req->set_cntr && rule->has_cntr)
1314 rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
1315
1316 write_req.hdr.pcifunc = owner;
1317
1318 /* AF owns the default rules so change the owner just to relax
1319 * the checks in rvu_mbox_handler_npc_mcam_write_entry
1320 */
1321 if (req->default_rule)
1322 write_req.hdr.pcifunc = 0;
1323
1324 write_req.entry = entry_index;
1325 write_req.intf = req->intf;
1326 write_req.enable_entry = (u8)enable;
1327 /* if counter is available then clear and use it */
1328 if (req->set_cntr && rule->has_cntr) {
1329 rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
1330 write_req.set_cntr = 1;
1331 write_req.cntr = rule->cntr;
1332 }
1333
1334 /* update rule */
1335 memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
1336 memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
1337 rule->entry = entry_index;
1338 memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action));
1339 if (is_npc_intf_tx(req->intf))
1340 memcpy(&rule->tx_action, &entry->action,
1341 sizeof(struct nix_tx_action));
1342 rule->vtag_action = entry->vtag_action;
1343 rule->features = installed_features;
1344 rule->default_rule = req->default_rule;
1345 rule->owner = owner;
1346 rule->enable = enable;
1347 rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
1348 rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
1349 rule->chan &= rule->chan_mask;
1350 rule->lxmb = dummy.lxmb;
1351 if (is_npc_intf_tx(req->intf))
1352 rule->intf = pfvf->nix_tx_intf;
1353 else
1354 rule->intf = pfvf->nix_rx_intf;
1355
1356 if (new)
1357 rvu_mcam_add_rule(mcam, rule);
1358 if (req->default_rule)
1359 pfvf->def_ucast_rule = rule;
1360
1361 /* write to mcam entry registers */
1362 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
1363 &write_rsp);
1364 if (err) {
1365 rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
1366 if (new) {
1367 list_del(&rule->list);
1368 kfree(rule);
1369 }
1370 return err;
1371 }
1372
1373 /* VF's MAC address is being changed via PF */
1374 if (pf_set_vfs_mac) {
1375 ether_addr_copy(pfvf->default_mac, req->packet.dmac);
1376 ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
1377 set_bit(PF_SET_VF_MAC, &pfvf->flags);
1378 }
1379
1380 if (test_bit(PF_SET_VF_CFG, &pfvf->flags) &&
1381 req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
1382 rule->vfvlan_cfg = true;
1383
1384 if (is_npc_intf_rx(req->intf) && req->match_id &&
1385 (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
1386 return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
1387 req->index, req->match_id);
1388
1389 if (owner && req->op == NIX_RX_ACTIONOP_MCAST)
1390 return rvu_nix_mcast_update_mcam_entry(rvu, req->hdr.pcifunc,
1391 req->index, entry_index);
1392
1393 return 0;
1394 }
1395
rvu_mbox_handler_npc_install_flow(struct rvu * rvu,struct npc_install_flow_req * req,struct npc_install_flow_rsp * rsp)1396 int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
1397 struct npc_install_flow_req *req,
1398 struct npc_install_flow_rsp *rsp)
1399 {
1400 bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
1401 bool from_rep_dev = !!is_rep_dev(rvu, req->hdr.pcifunc);
1402 struct rvu_switch *rswitch = &rvu->rswitch;
1403 int blkaddr, nixlf, err;
1404 struct rvu_pfvf *pfvf;
1405 bool pf_set_vfs_mac = false;
1406 bool enable = true;
1407 u16 target;
1408
1409 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1410 if (blkaddr < 0) {
1411 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
1412 return NPC_MCAM_INVALID_REQ;
1413 }
1414
1415 if (!is_npc_interface_valid(rvu, req->intf))
1416 return NPC_FLOW_INTF_INVALID;
1417
1418 /* If DMAC is not extracted in MKEX, rules installed by AF
1419 * can rely on L2MB bit set by hardware protocol checker for
1420 * broadcast and multicast addresses.
1421 */
1422 if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf))
1423 goto process_flow;
1424
1425 if (is_pffunc_af(req->hdr.pcifunc) &&
1426 req->features & BIT_ULL(NPC_DMAC)) {
1427 if (is_unicast_ether_addr(req->packet.dmac)) {
1428 dev_warn(rvu->dev,
1429 "%s: mkex profile does not support ucast flow\n",
1430 __func__);
1431 return NPC_FLOW_NOT_SUPPORTED;
1432 }
1433
1434 if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) {
1435 dev_warn(rvu->dev,
1436 "%s: mkex profile does not support bcast/mcast flow",
1437 __func__);
1438 return NPC_FLOW_NOT_SUPPORTED;
1439 }
1440
1441 /* Modify feature to use LXMB instead of DMAC */
1442 req->features &= ~BIT_ULL(NPC_DMAC);
1443 req->features |= BIT_ULL(NPC_LXMB);
1444 }
1445
1446 process_flow:
1447 if (from_vf && req->default_rule)
1448 return NPC_FLOW_VF_PERM_DENIED;
1449
1450 /* Each PF/VF info is maintained in struct rvu_pfvf.
1451 * rvu_pfvf for the target PF/VF needs to be retrieved
1452 * hence modify pcifunc accordingly.
1453 */
1454
1455 if (!req->hdr.pcifunc) {
1456 /* AF installing for a PF/VF */
1457 target = req->vf;
1458 } else if (!from_vf && req->vf && !from_rep_dev) {
1459 /* PF installing for its VF */
1460 target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
1461 pf_set_vfs_mac = req->default_rule &&
1462 (req->features & BIT_ULL(NPC_DMAC));
1463 } else if (from_rep_dev && req->vf) {
1464 /* Representor device installing for a representee */
1465 target = req->vf;
1466 } else {
1467 /* msg received from PF/VF */
1468 target = req->hdr.pcifunc;
1469 }
1470
1471 /* ignore chan_mask in case pf func is not AF, revisit later */
1472 if (!is_pffunc_af(req->hdr.pcifunc))
1473 req->chan_mask = 0xFFF;
1474
1475 err = npc_check_unsupported_flows(rvu, req->features, req->intf);
1476 if (err)
1477 return NPC_FLOW_NOT_SUPPORTED;
1478
1479 pfvf = rvu_get_pfvf(rvu, target);
1480
1481 if (from_rep_dev)
1482 req->channel = pfvf->rx_chan_base;
1483 /* PF installing for its VF */
1484 if (req->hdr.pcifunc && !from_vf && req->vf && !from_rep_dev)
1485 set_bit(PF_SET_VF_CFG, &pfvf->flags);
1486
1487 /* update req destination mac addr */
1488 if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
1489 is_zero_ether_addr(req->packet.dmac)) {
1490 ether_addr_copy(req->packet.dmac, pfvf->mac_addr);
1491 eth_broadcast_addr((u8 *)&req->mask.dmac);
1492 }
1493
1494 /* Proceed if NIXLF is attached or not for TX rules */
1495 err = nix_get_nixlf(rvu, target, &nixlf, NULL);
1496 if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
1497 return NPC_FLOW_NO_NIXLF;
1498
1499 /* don't enable rule when nixlf not attached or initialized */
1500 if (!(is_nixlf_attached(rvu, target) &&
1501 test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
1502 enable = false;
1503
1504 /* Packets reaching NPC in Tx path implies that a
1505 * NIXLF is properly setup and transmitting.
1506 * Hence rules can be enabled for Tx.
1507 */
1508 if (is_npc_intf_tx(req->intf))
1509 enable = true;
1510
1511 /* Do not allow requests from uninitialized VFs */
1512 if (from_vf && !enable)
1513 return NPC_FLOW_VF_NOT_INIT;
1514
1515 /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
1516 if (pf_set_vfs_mac && !enable) {
1517 ether_addr_copy(pfvf->default_mac, req->packet.dmac);
1518 ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
1519 set_bit(PF_SET_VF_MAC, &pfvf->flags);
1520 return 0;
1521 }
1522
1523 mutex_lock(&rswitch->switch_lock);
1524 err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf,
1525 req, rsp, enable, pf_set_vfs_mac);
1526 mutex_unlock(&rswitch->switch_lock);
1527
1528 return err;
1529 }
1530
npc_delete_flow(struct rvu * rvu,struct rvu_npc_mcam_rule * rule,u16 pcifunc)1531 static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
1532 u16 pcifunc)
1533 {
1534 struct npc_mcam_ena_dis_entry_req dis_req = { 0 };
1535 struct msg_rsp dis_rsp;
1536
1537 if (rule->default_rule)
1538 return 0;
1539
1540 if (rule->has_cntr)
1541 rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
1542
1543 dis_req.hdr.pcifunc = pcifunc;
1544 dis_req.entry = rule->entry;
1545
1546 list_del(&rule->list);
1547 kfree(rule);
1548
1549 return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp);
1550 }
1551
rvu_mbox_handler_npc_delete_flow(struct rvu * rvu,struct npc_delete_flow_req * req,struct npc_delete_flow_rsp * rsp)1552 int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
1553 struct npc_delete_flow_req *req,
1554 struct npc_delete_flow_rsp *rsp)
1555 {
1556 struct npc_mcam *mcam = &rvu->hw->mcam;
1557 struct rvu_npc_mcam_rule *iter, *tmp;
1558 u16 pcifunc = req->hdr.pcifunc;
1559 struct list_head del_list;
1560 int blkaddr;
1561
1562 INIT_LIST_HEAD(&del_list);
1563
1564 mutex_lock(&mcam->lock);
1565 list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
1566 if (iter->owner == pcifunc) {
1567 /* All rules */
1568 if (req->all) {
1569 list_move_tail(&iter->list, &del_list);
1570 /* Range of rules */
1571 } else if (req->end && iter->entry >= req->start &&
1572 iter->entry <= req->end) {
1573 list_move_tail(&iter->list, &del_list);
1574 /* single rule */
1575 } else if (req->entry == iter->entry) {
1576 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1577 if (blkaddr)
1578 rsp->cntr_val = rvu_read64(rvu, blkaddr,
1579 NPC_AF_MATCH_STATX(iter->cntr));
1580 list_move_tail(&iter->list, &del_list);
1581 break;
1582 }
1583 }
1584 }
1585 mutex_unlock(&mcam->lock);
1586
1587 list_for_each_entry_safe(iter, tmp, &del_list, list) {
1588 u16 entry = iter->entry;
1589
1590 /* clear the mcam entry target pcifunc */
1591 mcam->entry2target_pffunc[entry] = 0x0;
1592 if (npc_delete_flow(rvu, iter, pcifunc))
1593 dev_err(rvu->dev, "rule deletion failed for entry:%u",
1594 entry);
1595 }
1596
1597 return 0;
1598 }
1599
npc_update_dmac_value(struct rvu * rvu,int npcblkaddr,struct rvu_npc_mcam_rule * rule,struct rvu_pfvf * pfvf)1600 static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr,
1601 struct rvu_npc_mcam_rule *rule,
1602 struct rvu_pfvf *pfvf)
1603 {
1604 struct npc_mcam_write_entry_req write_req = { 0 };
1605 struct mcam_entry *entry = &write_req.entry_data;
1606 struct npc_mcam *mcam = &rvu->hw->mcam;
1607 struct msg_rsp rsp;
1608 u8 intf, enable;
1609 int err;
1610
1611 ether_addr_copy(rule->packet.dmac, pfvf->mac_addr);
1612
1613 npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry,
1614 entry, &intf, &enable);
1615
1616 npc_update_entry(rvu, NPC_DMAC, entry,
1617 ether_addr_to_u64(pfvf->mac_addr), 0,
1618 0xffffffffffffull, 0, intf);
1619
1620 write_req.hdr.pcifunc = rule->owner;
1621 write_req.entry = rule->entry;
1622 write_req.intf = pfvf->nix_rx_intf;
1623
1624 mutex_unlock(&mcam->lock);
1625 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp);
1626 mutex_lock(&mcam->lock);
1627
1628 return err;
1629 }
1630
npc_mcam_enable_flows(struct rvu * rvu,u16 target)1631 void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
1632 {
1633 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target);
1634 struct rvu_npc_mcam_rule *def_ucast_rule;
1635 struct npc_mcam *mcam = &rvu->hw->mcam;
1636 struct rvu_npc_mcam_rule *rule;
1637 int blkaddr, bank, index;
1638 u64 def_action;
1639
1640 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1641 if (blkaddr < 0)
1642 return;
1643
1644 def_ucast_rule = pfvf->def_ucast_rule;
1645
1646 mutex_lock(&mcam->lock);
1647 list_for_each_entry(rule, &mcam->mcam_rules, list) {
1648 if (is_npc_intf_rx(rule->intf) &&
1649 rule->rx_action.pf_func == target && !rule->enable) {
1650 if (rule->default_rule) {
1651 npc_enable_mcam_entry(rvu, mcam, blkaddr,
1652 rule->entry, true);
1653 rule->enable = true;
1654 continue;
1655 }
1656
1657 if (rule->vfvlan_cfg)
1658 npc_update_dmac_value(rvu, blkaddr, rule, pfvf);
1659
1660 if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
1661 if (!def_ucast_rule)
1662 continue;
1663 /* Use default unicast entry action */
1664 rule->rx_action = def_ucast_rule->rx_action;
1665 def_action = *(u64 *)&def_ucast_rule->rx_action;
1666 bank = npc_get_bank(mcam, rule->entry);
1667 rvu_write64(rvu, blkaddr,
1668 NPC_AF_MCAMEX_BANKX_ACTION
1669 (rule->entry, bank), def_action);
1670 }
1671
1672 npc_enable_mcam_entry(rvu, mcam, blkaddr,
1673 rule->entry, true);
1674 rule->enable = true;
1675 }
1676 }
1677
1678 /* Enable MCAM entries installed by PF with target as VF pcifunc */
1679 for (index = 0; index < mcam->bmap_entries; index++) {
1680 if (mcam->entry2target_pffunc[index] == target)
1681 npc_enable_mcam_entry(rvu, mcam, blkaddr,
1682 index, true);
1683 }
1684 mutex_unlock(&mcam->lock);
1685 }
1686
npc_mcam_disable_flows(struct rvu * rvu,u16 target)1687 void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
1688 {
1689 struct npc_mcam *mcam = &rvu->hw->mcam;
1690 int blkaddr, index;
1691
1692 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1693 if (blkaddr < 0)
1694 return;
1695
1696 mutex_lock(&mcam->lock);
1697 /* Disable MCAM entries installed by PF with target as VF pcifunc */
1698 for (index = 0; index < mcam->bmap_entries; index++) {
1699 if (mcam->entry2target_pffunc[index] == target)
1700 npc_enable_mcam_entry(rvu, mcam, blkaddr,
1701 index, false);
1702 }
1703 mutex_unlock(&mcam->lock);
1704 }
1705
1706 /* single drop on non hit rule starting from 0th index. This an extension
1707 * to RPM mac filter to support more rules.
1708 */
npc_install_mcam_drop_rule(struct rvu * rvu,int mcam_idx,u16 * counter_idx,u64 chan_val,u64 chan_mask,u64 exact_val,u64 exact_mask,u64 bcast_mcast_val,u64 bcast_mcast_mask)1709 int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
1710 u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
1711 u64 bcast_mcast_val, u64 bcast_mcast_mask)
1712 {
1713 struct npc_mcam_alloc_counter_req cntr_req = { 0 };
1714 struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
1715 struct npc_mcam_write_entry_req req = { 0 };
1716 struct npc_mcam *mcam = &rvu->hw->mcam;
1717 struct rvu_npc_mcam_rule *rule;
1718 struct msg_rsp rsp;
1719 bool enabled;
1720 int blkaddr;
1721 int err;
1722
1723 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1724 if (blkaddr < 0) {
1725 dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
1726 return -ENODEV;
1727 }
1728
1729 /* Bail out if no exact match support */
1730 if (!rvu_npc_exact_has_match_table(rvu)) {
1731 dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__);
1732 return -EINVAL;
1733 }
1734
1735 /* If 0th entry is already used, return err */
1736 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx);
1737 if (enabled) {
1738 dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n",
1739 __func__, mcam_idx);
1740 return -EINVAL;
1741 }
1742
1743 /* Add this entry to mcam rules list */
1744 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1745 if (!rule)
1746 return -ENOMEM;
1747
1748 /* Disable rule by default. Enable rule when first dmac filter is
1749 * installed
1750 */
1751 rule->enable = false;
1752 rule->chan = chan_val;
1753 rule->chan_mask = chan_mask;
1754 rule->entry = mcam_idx;
1755 rvu_mcam_add_rule(mcam, rule);
1756
1757 /* Reserve slot 0 */
1758 npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx);
1759
1760 /* Allocate counter for this single drop on non hit rule */
1761 cntr_req.hdr.pcifunc = 0; /* AF request */
1762 cntr_req.contig = true;
1763 cntr_req.count = 1;
1764 err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
1765 if (err) {
1766 dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n",
1767 __func__, err);
1768 return -EFAULT;
1769 }
1770 *counter_idx = cntr_rsp.cntr;
1771
1772 /* Fill in fields for this mcam entry */
1773 npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0,
1774 exact_mask, 0, NIX_INTF_RX);
1775 npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0,
1776 chan_mask, 0, NIX_INTF_RX);
1777 npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0,
1778 bcast_mcast_mask, 0, NIX_INTF_RX);
1779
1780 req.intf = NIX_INTF_RX;
1781 req.set_cntr = true;
1782 req.cntr = cntr_rsp.cntr;
1783 req.entry = mcam_idx;
1784
1785 err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp);
1786 if (err) {
1787 dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n",
1788 __func__, mcam_idx);
1789 return err;
1790 }
1791
1792 dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n",
1793 __func__, mcam_idx, req.cntr);
1794
1795 /* disable entry at Bank 0, index 0 */
1796 npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false);
1797
1798 return 0;
1799 }
1800
rvu_mbox_handler_npc_get_field_status(struct rvu * rvu,struct npc_get_field_status_req * req,struct npc_get_field_status_rsp * rsp)1801 int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu,
1802 struct npc_get_field_status_req *req,
1803 struct npc_get_field_status_rsp *rsp)
1804 {
1805 int blkaddr;
1806
1807 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1808 if (blkaddr < 0)
1809 return NPC_MCAM_INVALID_REQ;
1810
1811 if (!is_npc_interface_valid(rvu, req->intf))
1812 return NPC_FLOW_INTF_INVALID;
1813
1814 if (npc_check_field(rvu, blkaddr, req->field, req->intf))
1815 rsp->enable = 1;
1816
1817 return 0;
1818 }
1819