1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <net/ipv6.h>
9 #include <linux/sort.h>
10
11 #include "otx2_common.h"
12
13 #define OTX2_DEFAULT_ACTION 0x1
14
15 struct otx2_flow {
16 struct ethtool_rx_flow_spec flow_spec;
17 struct list_head list;
18 u32 location;
19 u32 entry;
20 bool is_vf;
21 u8 rss_ctx_id;
22 #define DMAC_FILTER_RULE BIT(0)
23 #define PFC_FLOWCTRL_RULE BIT(1)
24 u16 rule_type;
25 int vf;
26 };
27
28 enum dmac_req {
29 DMAC_ADDR_UPDATE,
30 DMAC_ADDR_DEL
31 };
32
otx2_clear_ntuple_flow_info(struct otx2_nic * pfvf,struct otx2_flow_config * flow_cfg)33 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
34 {
35 devm_kfree(pfvf->dev, flow_cfg->flow_ent);
36 flow_cfg->flow_ent = NULL;
37 flow_cfg->max_flows = 0;
38 }
39
otx2_free_ntuple_mcam_entries(struct otx2_nic * pfvf)40 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
41 {
42 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
43 struct npc_mcam_free_entry_req *req;
44 int ent, err;
45
46 if (!flow_cfg->max_flows)
47 return 0;
48
49 mutex_lock(&pfvf->mbox.lock);
50 for (ent = 0; ent < flow_cfg->max_flows; ent++) {
51 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
52 if (!req)
53 break;
54
55 req->entry = flow_cfg->flow_ent[ent];
56
57 /* Send message to AF to free MCAM entries */
58 err = otx2_sync_mbox_msg(&pfvf->mbox);
59 if (err)
60 break;
61 }
62 mutex_unlock(&pfvf->mbox.lock);
63 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
64 return 0;
65 }
66
otx2_alloc_mcam_entries(struct otx2_nic * pfvf,u16 count)67 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
68 {
69 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
70 struct npc_mcam_alloc_entry_req *req;
71 struct npc_mcam_alloc_entry_rsp *rsp;
72 int ent, allocated = 0;
73
74 /* Free current ones and allocate new ones with requested count */
75 otx2_free_ntuple_mcam_entries(pfvf);
76
77 if (!count)
78 return 0;
79
80 flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
81 sizeof(u16), GFP_KERNEL);
82 if (!flow_cfg->flow_ent) {
83 netdev_err(pfvf->netdev,
84 "%s: Unable to allocate memory for flow entries\n",
85 __func__);
86 return -ENOMEM;
87 }
88
89 mutex_lock(&pfvf->mbox.lock);
90
91 /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
92 * can only be allocated.
93 */
94 while (allocated < count) {
95 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
96 if (!req)
97 goto exit;
98
99 req->contig = false;
100 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
101 NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
102
103 /* Allocate higher priority entries for PFs, so that VF's entries
104 * will be on top of PF.
105 */
106 if (!is_otx2_vf(pfvf->pcifunc)) {
107 req->priority = NPC_MCAM_HIGHER_PRIO;
108 req->ref_entry = flow_cfg->def_ent[0];
109 }
110
111 /* Send message to AF */
112 if (otx2_sync_mbox_msg(&pfvf->mbox))
113 goto exit;
114
115 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
116 (&pfvf->mbox.mbox, 0, &req->hdr);
117 if (IS_ERR(rsp))
118 goto exit;
119
120 for (ent = 0; ent < rsp->count; ent++)
121 flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
122
123 allocated += rsp->count;
124
125 /* If this request is not fulfilled, no need to send
126 * further requests.
127 */
128 if (rsp->count != req->count)
129 break;
130 }
131
132 /* Multiple MCAM entry alloc requests could result in non-sequential
133 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
134 * otherwise user installed ntuple filter index and MCAM entry index will
135 * not be in sync.
136 */
137 if (allocated)
138 sort(&flow_cfg->flow_ent[0], allocated,
139 sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
140
141 exit:
142 mutex_unlock(&pfvf->mbox.lock);
143
144 flow_cfg->max_flows = allocated;
145
146 if (allocated) {
147 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
148 pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
149 }
150
151 if (allocated != count)
152 netdev_info(pfvf->netdev,
153 "Unable to allocate %d MCAM entries, got only %d\n",
154 count, allocated);
155 return allocated;
156 }
157 EXPORT_SYMBOL(otx2_alloc_mcam_entries);
158
otx2_mcam_entry_init(struct otx2_nic * pfvf)159 int otx2_mcam_entry_init(struct otx2_nic *pfvf)
160 {
161 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
162 struct npc_get_field_status_req *freq;
163 struct npc_get_field_status_rsp *frsp;
164 struct npc_mcam_alloc_entry_req *req;
165 struct npc_mcam_alloc_entry_rsp *rsp;
166 int vf_vlan_max_flows;
167 int ent, count;
168
169 vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
170 count = flow_cfg->ucast_flt_cnt +
171 OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
172
173 flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
174 sizeof(u16), GFP_KERNEL);
175 if (!flow_cfg->def_ent)
176 return -ENOMEM;
177
178 mutex_lock(&pfvf->mbox.lock);
179
180 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
181 if (!req) {
182 mutex_unlock(&pfvf->mbox.lock);
183 return -ENOMEM;
184 }
185
186 req->contig = false;
187 req->count = count;
188
189 /* Send message to AF */
190 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
191 mutex_unlock(&pfvf->mbox.lock);
192 return -EINVAL;
193 }
194
195 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
196 (&pfvf->mbox.mbox, 0, &req->hdr);
197 if (IS_ERR(rsp)) {
198 mutex_unlock(&pfvf->mbox.lock);
199 return PTR_ERR(rsp);
200 }
201
202 if (rsp->count != req->count) {
203 netdev_info(pfvf->netdev,
204 "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
205 mutex_unlock(&pfvf->mbox.lock);
206 devm_kfree(pfvf->dev, flow_cfg->def_ent);
207 return 0;
208 }
209
210 for (ent = 0; ent < rsp->count; ent++)
211 flow_cfg->def_ent[ent] = rsp->entry_list[ent];
212
213 flow_cfg->vf_vlan_offset = 0;
214 flow_cfg->unicast_offset = vf_vlan_max_flows;
215 flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
216 flow_cfg->ucast_flt_cnt;
217 pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
218
219 /* Check if NPC_DMAC field is supported
220 * by the mkex profile before setting VLAN support flag.
221 */
222 freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox);
223 if (!freq) {
224 mutex_unlock(&pfvf->mbox.lock);
225 return -ENOMEM;
226 }
227
228 freq->field = NPC_DMAC;
229 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
230 mutex_unlock(&pfvf->mbox.lock);
231 return -EINVAL;
232 }
233
234 frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
235 (&pfvf->mbox.mbox, 0, &freq->hdr);
236 if (IS_ERR(frsp)) {
237 mutex_unlock(&pfvf->mbox.lock);
238 return PTR_ERR(frsp);
239 }
240
241 if (frsp->enable) {
242 pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
243 pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
244 }
245
246 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
247 mutex_unlock(&pfvf->mbox.lock);
248
249 /* Allocate entries for Ntuple filters */
250 count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
251 if (count <= 0) {
252 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
253 return 0;
254 }
255
256 pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
257
258 refcount_set(&flow_cfg->mark_flows, 1);
259 return 0;
260 }
261 EXPORT_SYMBOL(otx2_mcam_entry_init);
262
263 /* TODO : revisit on size */
264 #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
265
otx2vf_mcam_flow_init(struct otx2_nic * pfvf)266 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
267 {
268 struct otx2_flow_config *flow_cfg;
269
270 pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
271 sizeof(struct otx2_flow_config),
272 GFP_KERNEL);
273 if (!pfvf->flow_cfg)
274 return -ENOMEM;
275
276 pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev,
277 BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
278 sizeof(long), GFP_KERNEL);
279 if (!pfvf->flow_cfg->dmacflt_bmap)
280 return -ENOMEM;
281
282 flow_cfg = pfvf->flow_cfg;
283 INIT_LIST_HEAD(&flow_cfg->flow_list);
284 INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
285 flow_cfg->max_flows = 0;
286
287 return 0;
288 }
289 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
290
otx2_mcam_flow_init(struct otx2_nic * pf)291 int otx2_mcam_flow_init(struct otx2_nic *pf)
292 {
293 int err;
294
295 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
296 GFP_KERNEL);
297 if (!pf->flow_cfg)
298 return -ENOMEM;
299
300 pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev,
301 BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
302 sizeof(long), GFP_KERNEL);
303 if (!pf->flow_cfg->dmacflt_bmap)
304 return -ENOMEM;
305
306 INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
307 INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
308
309 pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
310 pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
311
312 /* Allocate bare minimum number of MCAM entries needed for
313 * unicast and ntuple filters.
314 */
315 err = otx2_mcam_entry_init(pf);
316 if (err)
317 return err;
318
319 /* Check if MCAM entries are allocate or not */
320 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
321 return 0;
322
323 pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
324 * pf->flow_cfg->ucast_flt_cnt, GFP_KERNEL);
325 if (!pf->mac_table)
326 return -ENOMEM;
327
328 otx2_dmacflt_get_max_cnt(pf);
329
330 /* DMAC filters are not allocated */
331 if (!pf->flow_cfg->dmacflt_max_flows)
332 return 0;
333
334 pf->flow_cfg->bmap_to_dmacindex =
335 devm_kzalloc(pf->dev, sizeof(u32) *
336 pf->flow_cfg->dmacflt_max_flows,
337 GFP_KERNEL);
338
339 if (!pf->flow_cfg->bmap_to_dmacindex)
340 return -ENOMEM;
341
342 pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
343
344 return 0;
345 }
346
otx2_mcam_flow_del(struct otx2_nic * pf)347 void otx2_mcam_flow_del(struct otx2_nic *pf)
348 {
349 otx2_destroy_mcam_flows(pf);
350 }
351 EXPORT_SYMBOL(otx2_mcam_flow_del);
352
353 /* On success adds mcam entry
354 * On failure enable promisous mode
355 */
otx2_do_add_macfilter(struct otx2_nic * pf,const u8 * mac)356 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
357 {
358 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
359 struct npc_install_flow_req *req;
360 int err, i;
361
362 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
363 return -ENOMEM;
364
365 /* dont have free mcam entries or uc list is greater than alloted */
366 if (netdev_uc_count(pf->netdev) > pf->flow_cfg->ucast_flt_cnt)
367 return -ENOMEM;
368
369 mutex_lock(&pf->mbox.lock);
370 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
371 if (!req) {
372 mutex_unlock(&pf->mbox.lock);
373 return -ENOMEM;
374 }
375
376 /* unicast offset starts with 32 0..31 for ntuple */
377 for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) {
378 if (pf->mac_table[i].inuse)
379 continue;
380 ether_addr_copy(pf->mac_table[i].addr, mac);
381 pf->mac_table[i].inuse = true;
382 pf->mac_table[i].mcam_entry =
383 flow_cfg->def_ent[i + flow_cfg->unicast_offset];
384 req->entry = pf->mac_table[i].mcam_entry;
385 break;
386 }
387
388 ether_addr_copy(req->packet.dmac, mac);
389 eth_broadcast_addr((u8 *)&req->mask.dmac);
390 req->features = BIT_ULL(NPC_DMAC);
391 req->channel = pf->hw.rx_chan_base;
392 req->intf = NIX_INTF_RX;
393 req->op = NIX_RX_ACTION_DEFAULT;
394 req->set_cntr = 1;
395
396 err = otx2_sync_mbox_msg(&pf->mbox);
397 mutex_unlock(&pf->mbox.lock);
398
399 return err;
400 }
401
otx2_add_macfilter(struct net_device * netdev,const u8 * mac)402 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
403 {
404 struct otx2_nic *pf = netdev_priv(netdev);
405
406 if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap,
407 pf->flow_cfg->dmacflt_max_flows))
408 netdev_warn(netdev,
409 "Add %pM to CGX/RPM DMAC filters list as well\n",
410 mac);
411
412 return otx2_do_add_macfilter(pf, mac);
413 }
414
otx2_get_mcamentry_for_mac(struct otx2_nic * pf,const u8 * mac,int * mcam_entry)415 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
416 int *mcam_entry)
417 {
418 int i;
419
420 for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) {
421 if (!pf->mac_table[i].inuse)
422 continue;
423
424 if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
425 *mcam_entry = pf->mac_table[i].mcam_entry;
426 pf->mac_table[i].inuse = false;
427 return true;
428 }
429 }
430 return false;
431 }
432
otx2_del_macfilter(struct net_device * netdev,const u8 * mac)433 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
434 {
435 struct otx2_nic *pf = netdev_priv(netdev);
436 struct npc_delete_flow_req *req;
437 int err, mcam_entry;
438
439 /* check does mcam entry exists for given mac */
440 if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
441 return 0;
442
443 mutex_lock(&pf->mbox.lock);
444 req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
445 if (!req) {
446 mutex_unlock(&pf->mbox.lock);
447 return -ENOMEM;
448 }
449 req->entry = mcam_entry;
450 /* Send message to AF */
451 err = otx2_sync_mbox_msg(&pf->mbox);
452 mutex_unlock(&pf->mbox.lock);
453
454 return err;
455 }
456
otx2_find_flow(struct otx2_nic * pfvf,u32 location)457 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
458 {
459 struct otx2_flow *iter;
460
461 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
462 if (iter->location == location)
463 return iter;
464 }
465
466 return NULL;
467 }
468
otx2_add_flow_to_list(struct otx2_nic * pfvf,struct otx2_flow * flow)469 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
470 {
471 struct list_head *head = &pfvf->flow_cfg->flow_list;
472 struct otx2_flow *iter;
473
474 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
475 if (iter->location > flow->location)
476 break;
477 head = &iter->list;
478 }
479
480 list_add(&flow->list, head);
481 }
482
otx2_get_maxflows(struct otx2_flow_config * flow_cfg)483 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
484 {
485 if (!flow_cfg)
486 return 0;
487
488 if (flow_cfg->nr_flows == flow_cfg->max_flows ||
489 !bitmap_empty(flow_cfg->dmacflt_bmap,
490 flow_cfg->dmacflt_max_flows))
491 return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
492 else
493 return flow_cfg->max_flows;
494 }
495 EXPORT_SYMBOL(otx2_get_maxflows);
496
otx2_get_flow(struct otx2_nic * pfvf,struct ethtool_rxnfc * nfc,u32 location)497 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
498 u32 location)
499 {
500 struct otx2_flow *iter;
501
502 if (location >= otx2_get_maxflows(pfvf->flow_cfg))
503 return -EINVAL;
504
505 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
506 if (iter->location == location) {
507 nfc->fs = iter->flow_spec;
508 nfc->rss_context = iter->rss_ctx_id;
509 return 0;
510 }
511 }
512
513 return -ENOENT;
514 }
515
otx2_get_all_flows(struct otx2_nic * pfvf,struct ethtool_rxnfc * nfc,u32 * rule_locs)516 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
517 u32 *rule_locs)
518 {
519 u32 rule_cnt = nfc->rule_cnt;
520 u32 location = 0;
521 int idx = 0;
522 int err = 0;
523
524 nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
525 while ((!err || err == -ENOENT) && idx < rule_cnt) {
526 err = otx2_get_flow(pfvf, nfc, location);
527 if (!err)
528 rule_locs[idx++] = location;
529 location++;
530 }
531 nfc->rule_cnt = rule_cnt;
532
533 return err;
534 }
535
otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec * fsp,struct npc_install_flow_req * req,u32 flow_type)536 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
537 struct npc_install_flow_req *req,
538 u32 flow_type)
539 {
540 struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
541 struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
542 struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
543 struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
544 struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
545 struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
546 struct flow_msg *pmask = &req->mask;
547 struct flow_msg *pkt = &req->packet;
548
549 switch (flow_type) {
550 case IP_USER_FLOW:
551 if (ipv4_usr_mask->ip4src) {
552 memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
553 sizeof(pkt->ip4src));
554 memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
555 sizeof(pmask->ip4src));
556 req->features |= BIT_ULL(NPC_SIP_IPV4);
557 }
558 if (ipv4_usr_mask->ip4dst) {
559 memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
560 sizeof(pkt->ip4dst));
561 memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
562 sizeof(pmask->ip4dst));
563 req->features |= BIT_ULL(NPC_DIP_IPV4);
564 }
565 if (ipv4_usr_mask->tos) {
566 pkt->tos = ipv4_usr_hdr->tos;
567 pmask->tos = ipv4_usr_mask->tos;
568 req->features |= BIT_ULL(NPC_TOS);
569 }
570 if (ipv4_usr_mask->proto) {
571 switch (ipv4_usr_hdr->proto) {
572 case IPPROTO_ICMP:
573 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
574 break;
575 case IPPROTO_TCP:
576 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
577 break;
578 case IPPROTO_UDP:
579 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
580 break;
581 case IPPROTO_SCTP:
582 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
583 break;
584 case IPPROTO_AH:
585 req->features |= BIT_ULL(NPC_IPPROTO_AH);
586 break;
587 case IPPROTO_ESP:
588 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
589 break;
590 default:
591 return -EOPNOTSUPP;
592 }
593 }
594 pkt->etype = cpu_to_be16(ETH_P_IP);
595 pmask->etype = cpu_to_be16(0xFFFF);
596 req->features |= BIT_ULL(NPC_ETYPE);
597 break;
598 case TCP_V4_FLOW:
599 case UDP_V4_FLOW:
600 case SCTP_V4_FLOW:
601 pkt->etype = cpu_to_be16(ETH_P_IP);
602 pmask->etype = cpu_to_be16(0xFFFF);
603 req->features |= BIT_ULL(NPC_ETYPE);
604 if (ipv4_l4_mask->ip4src) {
605 memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
606 sizeof(pkt->ip4src));
607 memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
608 sizeof(pmask->ip4src));
609 req->features |= BIT_ULL(NPC_SIP_IPV4);
610 }
611 if (ipv4_l4_mask->ip4dst) {
612 memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
613 sizeof(pkt->ip4dst));
614 memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
615 sizeof(pmask->ip4dst));
616 req->features |= BIT_ULL(NPC_DIP_IPV4);
617 }
618 if (ipv4_l4_mask->tos) {
619 pkt->tos = ipv4_l4_hdr->tos;
620 pmask->tos = ipv4_l4_mask->tos;
621 req->features |= BIT_ULL(NPC_TOS);
622 }
623 if (ipv4_l4_mask->psrc) {
624 memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
625 sizeof(pkt->sport));
626 memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
627 sizeof(pmask->sport));
628 if (flow_type == UDP_V4_FLOW)
629 req->features |= BIT_ULL(NPC_SPORT_UDP);
630 else if (flow_type == TCP_V4_FLOW)
631 req->features |= BIT_ULL(NPC_SPORT_TCP);
632 else
633 req->features |= BIT_ULL(NPC_SPORT_SCTP);
634 }
635 if (ipv4_l4_mask->pdst) {
636 memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
637 sizeof(pkt->dport));
638 memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
639 sizeof(pmask->dport));
640 if (flow_type == UDP_V4_FLOW)
641 req->features |= BIT_ULL(NPC_DPORT_UDP);
642 else if (flow_type == TCP_V4_FLOW)
643 req->features |= BIT_ULL(NPC_DPORT_TCP);
644 else
645 req->features |= BIT_ULL(NPC_DPORT_SCTP);
646 }
647 if (flow_type == UDP_V4_FLOW)
648 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
649 else if (flow_type == TCP_V4_FLOW)
650 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
651 else
652 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
653 break;
654 case AH_V4_FLOW:
655 case ESP_V4_FLOW:
656 pkt->etype = cpu_to_be16(ETH_P_IP);
657 pmask->etype = cpu_to_be16(0xFFFF);
658 req->features |= BIT_ULL(NPC_ETYPE);
659 if (ah_esp_mask->ip4src) {
660 memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
661 sizeof(pkt->ip4src));
662 memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
663 sizeof(pmask->ip4src));
664 req->features |= BIT_ULL(NPC_SIP_IPV4);
665 }
666 if (ah_esp_mask->ip4dst) {
667 memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
668 sizeof(pkt->ip4dst));
669 memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
670 sizeof(pmask->ip4dst));
671 req->features |= BIT_ULL(NPC_DIP_IPV4);
672 }
673 if (ah_esp_mask->tos) {
674 pkt->tos = ah_esp_hdr->tos;
675 pmask->tos = ah_esp_mask->tos;
676 req->features |= BIT_ULL(NPC_TOS);
677 }
678
679 /* NPC profile doesn't extract AH/ESP header fields */
680 if (ah_esp_mask->spi & ah_esp_hdr->spi)
681 return -EOPNOTSUPP;
682
683 if (flow_type == AH_V4_FLOW)
684 req->features |= BIT_ULL(NPC_IPPROTO_AH);
685 else
686 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
687 break;
688 default:
689 break;
690 }
691
692 return 0;
693 }
694
otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec * fsp,struct npc_install_flow_req * req,u32 flow_type)695 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
696 struct npc_install_flow_req *req,
697 u32 flow_type)
698 {
699 struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
700 struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
701 struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
702 struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
703 struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
704 struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
705 struct flow_msg *pmask = &req->mask;
706 struct flow_msg *pkt = &req->packet;
707
708 switch (flow_type) {
709 case IPV6_USER_FLOW:
710 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
711 memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
712 sizeof(pkt->ip6src));
713 memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
714 sizeof(pmask->ip6src));
715 req->features |= BIT_ULL(NPC_SIP_IPV6);
716 }
717 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
718 memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
719 sizeof(pkt->ip6dst));
720 memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
721 sizeof(pmask->ip6dst));
722 req->features |= BIT_ULL(NPC_DIP_IPV6);
723 }
724 if (ipv6_usr_hdr->l4_proto == IPPROTO_FRAGMENT) {
725 pkt->next_header = ipv6_usr_hdr->l4_proto;
726 pmask->next_header = ipv6_usr_mask->l4_proto;
727 req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
728 }
729 pkt->etype = cpu_to_be16(ETH_P_IPV6);
730 pmask->etype = cpu_to_be16(0xFFFF);
731 req->features |= BIT_ULL(NPC_ETYPE);
732 break;
733 case TCP_V6_FLOW:
734 case UDP_V6_FLOW:
735 case SCTP_V6_FLOW:
736 pkt->etype = cpu_to_be16(ETH_P_IPV6);
737 pmask->etype = cpu_to_be16(0xFFFF);
738 req->features |= BIT_ULL(NPC_ETYPE);
739 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
740 memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
741 sizeof(pkt->ip6src));
742 memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
743 sizeof(pmask->ip6src));
744 req->features |= BIT_ULL(NPC_SIP_IPV6);
745 }
746 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
747 memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
748 sizeof(pkt->ip6dst));
749 memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
750 sizeof(pmask->ip6dst));
751 req->features |= BIT_ULL(NPC_DIP_IPV6);
752 }
753 if (ipv6_l4_mask->psrc) {
754 memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
755 sizeof(pkt->sport));
756 memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
757 sizeof(pmask->sport));
758 if (flow_type == UDP_V6_FLOW)
759 req->features |= BIT_ULL(NPC_SPORT_UDP);
760 else if (flow_type == TCP_V6_FLOW)
761 req->features |= BIT_ULL(NPC_SPORT_TCP);
762 else
763 req->features |= BIT_ULL(NPC_SPORT_SCTP);
764 }
765 if (ipv6_l4_mask->pdst) {
766 memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
767 sizeof(pkt->dport));
768 memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
769 sizeof(pmask->dport));
770 if (flow_type == UDP_V6_FLOW)
771 req->features |= BIT_ULL(NPC_DPORT_UDP);
772 else if (flow_type == TCP_V6_FLOW)
773 req->features |= BIT_ULL(NPC_DPORT_TCP);
774 else
775 req->features |= BIT_ULL(NPC_DPORT_SCTP);
776 }
777 if (flow_type == UDP_V6_FLOW)
778 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
779 else if (flow_type == TCP_V6_FLOW)
780 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
781 else
782 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
783 break;
784 case AH_V6_FLOW:
785 case ESP_V6_FLOW:
786 pkt->etype = cpu_to_be16(ETH_P_IPV6);
787 pmask->etype = cpu_to_be16(0xFFFF);
788 req->features |= BIT_ULL(NPC_ETYPE);
789 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
790 memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
791 sizeof(pkt->ip6src));
792 memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
793 sizeof(pmask->ip6src));
794 req->features |= BIT_ULL(NPC_SIP_IPV6);
795 }
796 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
797 memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
798 sizeof(pkt->ip6dst));
799 memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
800 sizeof(pmask->ip6dst));
801 req->features |= BIT_ULL(NPC_DIP_IPV6);
802 }
803
804 /* NPC profile doesn't extract AH/ESP header fields */
805 if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
806 (ah_esp_mask->tclass & ah_esp_hdr->tclass))
807 return -EOPNOTSUPP;
808
809 if (flow_type == AH_V6_FLOW)
810 req->features |= BIT_ULL(NPC_IPPROTO_AH);
811 else
812 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
813 break;
814 default:
815 break;
816 }
817
818 return 0;
819 }
820
otx2_prepare_flow_request(struct ethtool_rx_flow_spec * fsp,struct npc_install_flow_req * req)821 static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
822 struct npc_install_flow_req *req)
823 {
824 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
825 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
826 struct flow_msg *pmask = &req->mask;
827 struct flow_msg *pkt = &req->packet;
828 u32 flow_type;
829 int ret;
830
831 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
832 switch (flow_type) {
833 /* bits not set in mask are don't care */
834 case ETHER_FLOW:
835 if (!is_zero_ether_addr(eth_mask->h_source)) {
836 ether_addr_copy(pkt->smac, eth_hdr->h_source);
837 ether_addr_copy(pmask->smac, eth_mask->h_source);
838 req->features |= BIT_ULL(NPC_SMAC);
839 }
840 if (!is_zero_ether_addr(eth_mask->h_dest)) {
841 ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
842 ether_addr_copy(pmask->dmac, eth_mask->h_dest);
843 req->features |= BIT_ULL(NPC_DMAC);
844 }
845 if (eth_hdr->h_proto) {
846 memcpy(&pkt->etype, ð_hdr->h_proto,
847 sizeof(pkt->etype));
848 memcpy(&pmask->etype, ð_mask->h_proto,
849 sizeof(pmask->etype));
850 req->features |= BIT_ULL(NPC_ETYPE);
851 }
852 break;
853 case IP_USER_FLOW:
854 case TCP_V4_FLOW:
855 case UDP_V4_FLOW:
856 case SCTP_V4_FLOW:
857 case AH_V4_FLOW:
858 case ESP_V4_FLOW:
859 ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
860 if (ret)
861 return ret;
862 break;
863 case IPV6_USER_FLOW:
864 case TCP_V6_FLOW:
865 case UDP_V6_FLOW:
866 case SCTP_V6_FLOW:
867 case AH_V6_FLOW:
868 case ESP_V6_FLOW:
869 ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
870 if (ret)
871 return ret;
872 break;
873 default:
874 return -EOPNOTSUPP;
875 }
876 if (fsp->flow_type & FLOW_EXT) {
877 u16 vlan_etype;
878
879 if (fsp->m_ext.vlan_etype) {
880 /* Partial masks not supported */
881 if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF)
882 return -EINVAL;
883
884 vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
885
886 /* Drop rule with vlan_etype == 802.1Q
887 * and vlan_id == 0 is not supported
888 */
889 if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
890 fsp->ring_cookie == RX_CLS_FLOW_DISC)
891 return -EINVAL;
892
893 /* Only ETH_P_8021Q and ETH_P_802AD types supported */
894 if (vlan_etype != ETH_P_8021Q &&
895 vlan_etype != ETH_P_8021AD)
896 return -EINVAL;
897
898 memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
899 sizeof(pkt->vlan_etype));
900 memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
901 sizeof(pmask->vlan_etype));
902
903 if (vlan_etype == ETH_P_8021Q)
904 req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
905 else
906 req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
907 }
908
909 if (fsp->m_ext.vlan_tci) {
910 memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
911 sizeof(pkt->vlan_tci));
912 memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
913 sizeof(pmask->vlan_tci));
914 req->features |= BIT_ULL(NPC_OUTER_VID);
915 }
916
917 if (fsp->m_ext.data[1]) {
918 if (flow_type == IP_USER_FLOW) {
919 if (be32_to_cpu(fsp->h_ext.data[1]) != IPV4_FLAG_MORE)
920 return -EINVAL;
921
922 pkt->ip_flag = be32_to_cpu(fsp->h_ext.data[1]);
923 pmask->ip_flag = be32_to_cpu(fsp->m_ext.data[1]);
924 req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
925 } else if (fsp->h_ext.data[1] ==
926 cpu_to_be32(OTX2_DEFAULT_ACTION)) {
927 /* Not Drop/Direct to queue but use action
928 * in default entry
929 */
930 req->op = NIX_RX_ACTION_DEFAULT;
931 }
932 }
933 }
934
935 if (fsp->flow_type & FLOW_MAC_EXT &&
936 !is_zero_ether_addr(fsp->m_ext.h_dest)) {
937 ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
938 ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
939 req->features |= BIT_ULL(NPC_DMAC);
940 }
941
942 if (!req->features)
943 return -EOPNOTSUPP;
944
945 return 0;
946 }
947
otx2_is_flow_rule_dmacfilter(struct otx2_nic * pfvf,struct ethtool_rx_flow_spec * fsp)948 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
949 struct ethtool_rx_flow_spec *fsp)
950 {
951 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
952 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
953 u64 ring_cookie = fsp->ring_cookie;
954 u32 flow_type;
955
956 if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
957 return false;
958
959 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
960
961 /* CGX/RPM block dmac filtering configured for white listing
962 * check for action other than DROP
963 */
964 if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
965 !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
966 if (is_zero_ether_addr(eth_mask->h_dest) &&
967 is_valid_ether_addr(eth_hdr->h_dest))
968 return true;
969 }
970
971 return false;
972 }
973
otx2_add_flow_msg(struct otx2_nic * pfvf,struct otx2_flow * flow)974 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
975 {
976 u64 ring_cookie = flow->flow_spec.ring_cookie;
977 #ifdef CONFIG_DCB
978 int vlan_prio, qidx, pfc_rule = 0;
979 #endif
980 struct npc_install_flow_req *req;
981 int err, vf = 0;
982
983 mutex_lock(&pfvf->mbox.lock);
984 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
985 if (!req) {
986 mutex_unlock(&pfvf->mbox.lock);
987 return -ENOMEM;
988 }
989
990 err = otx2_prepare_flow_request(&flow->flow_spec, req);
991 if (err) {
992 /* free the allocated msg above */
993 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
994 mutex_unlock(&pfvf->mbox.lock);
995 return err;
996 }
997
998 req->entry = flow->entry;
999 req->intf = NIX_INTF_RX;
1000 req->set_cntr = 1;
1001 req->channel = pfvf->hw.rx_chan_base;
1002 if (ring_cookie == RX_CLS_FLOW_DISC) {
1003 req->op = NIX_RX_ACTIONOP_DROP;
1004 } else {
1005 /* change to unicast only if action of default entry is not
1006 * requested by user
1007 */
1008 if (flow->flow_spec.flow_type & FLOW_RSS) {
1009 req->op = NIX_RX_ACTIONOP_RSS;
1010 req->index = flow->rss_ctx_id;
1011 req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
1012 } else {
1013 req->op = NIX_RX_ACTIONOP_UCAST;
1014 req->index = ethtool_get_flow_spec_ring(ring_cookie);
1015 }
1016 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
1017 if (vf > pci_num_vf(pfvf->pdev)) {
1018 mutex_unlock(&pfvf->mbox.lock);
1019 return -EINVAL;
1020 }
1021
1022 #ifdef CONFIG_DCB
1023 /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
1024 if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
1025 pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
1026 vlan_prio = ntohs(req->packet.vlan_tci) &
1027 ntohs(req->mask.vlan_tci);
1028
1029 /* Get the priority */
1030 vlan_prio >>= 13;
1031 flow->rule_type |= PFC_FLOWCTRL_RULE;
1032 /* Check if PFC enabled for this priority */
1033 if (pfvf->pfc_en & BIT(vlan_prio)) {
1034 pfc_rule = true;
1035 qidx = req->index;
1036 }
1037 }
1038 #endif
1039 }
1040
1041 /* ethtool ring_cookie has (VF + 1) for VF */
1042 if (vf) {
1043 req->vf = vf;
1044 flow->is_vf = true;
1045 flow->vf = vf;
1046 }
1047
1048 /* Send message to AF */
1049 err = otx2_sync_mbox_msg(&pfvf->mbox);
1050
1051 #ifdef CONFIG_DCB
1052 if (!err && pfc_rule)
1053 otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
1054 #endif
1055
1056 mutex_unlock(&pfvf->mbox.lock);
1057 return err;
1058 }
1059
otx2_add_flow_with_pfmac(struct otx2_nic * pfvf,struct otx2_flow * flow)1060 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
1061 struct otx2_flow *flow)
1062 {
1063 struct otx2_flow *pf_mac;
1064 struct ethhdr *eth_hdr;
1065
1066 pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
1067 if (!pf_mac)
1068 return -ENOMEM;
1069
1070 pf_mac->entry = 0;
1071 pf_mac->rule_type |= DMAC_FILTER_RULE;
1072 pf_mac->location = pfvf->flow_cfg->max_flows;
1073 memcpy(&pf_mac->flow_spec, &flow->flow_spec,
1074 sizeof(struct ethtool_rx_flow_spec));
1075 pf_mac->flow_spec.location = pf_mac->location;
1076
1077 /* Copy PF mac address */
1078 eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
1079 ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
1080
1081 /* Install DMAC filter with PF mac address */
1082 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
1083
1084 otx2_add_flow_to_list(pfvf, pf_mac);
1085 pfvf->flow_cfg->nr_flows++;
1086 set_bit(0, pfvf->flow_cfg->dmacflt_bmap);
1087
1088 return 0;
1089 }
1090
otx2_add_flow(struct otx2_nic * pfvf,struct ethtool_rxnfc * nfc)1091 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
1092 {
1093 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1094 struct ethtool_rx_flow_spec *fsp = &nfc->fs;
1095 struct otx2_flow *flow;
1096 struct ethhdr *eth_hdr;
1097 bool new = false;
1098 int err = 0;
1099 u64 vf_num;
1100 u32 ring;
1101
1102 if (!flow_cfg->max_flows) {
1103 netdev_err(pfvf->netdev,
1104 "Ntuple rule count is 0, allocate and retry\n");
1105 return -EINVAL;
1106 }
1107
1108 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1109 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1110 return -ENOMEM;
1111
1112 /* Number of queues on a VF can be greater or less than
1113 * the PF's queue. Hence no need to check for the
1114 * queue count. Hence no need to check queue count if PF
1115 * is installing for its VF. Below is the expected vf_num value
1116 * based on the ethtool commands.
1117 *
1118 * e.g.
1119 * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
1120 * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
1121 * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
1122 * vf_num:vf_idx+1
1123 */
1124 vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
1125 if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
1126 ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
1127 return -EINVAL;
1128
1129 if (fsp->location >= otx2_get_maxflows(flow_cfg))
1130 return -EINVAL;
1131
1132 flow = otx2_find_flow(pfvf, fsp->location);
1133 if (!flow) {
1134 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
1135 if (!flow)
1136 return -ENOMEM;
1137 flow->location = fsp->location;
1138 flow->entry = flow_cfg->flow_ent[flow->location];
1139 new = true;
1140 }
1141 /* struct copy */
1142 flow->flow_spec = *fsp;
1143
1144 if (fsp->flow_type & FLOW_RSS)
1145 flow->rss_ctx_id = nfc->rss_context;
1146
1147 if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1148 eth_hdr = &flow->flow_spec.h_u.ether_spec;
1149
1150 /* Sync dmac filter table with updated fields */
1151 if (flow->rule_type & DMAC_FILTER_RULE)
1152 return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1153 flow->entry);
1154
1155 if (bitmap_full(flow_cfg->dmacflt_bmap,
1156 flow_cfg->dmacflt_max_flows)) {
1157 netdev_warn(pfvf->netdev,
1158 "Can't insert the rule %d as max allowed dmac filters are %d\n",
1159 flow->location +
1160 flow_cfg->dmacflt_max_flows,
1161 flow_cfg->dmacflt_max_flows);
1162 err = -EINVAL;
1163 if (new)
1164 kfree(flow);
1165 return err;
1166 }
1167
1168 /* Install PF mac address to DMAC filter list */
1169 if (!test_bit(0, flow_cfg->dmacflt_bmap))
1170 otx2_add_flow_with_pfmac(pfvf, flow);
1171
1172 flow->rule_type |= DMAC_FILTER_RULE;
1173 flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap,
1174 flow_cfg->dmacflt_max_flows);
1175 fsp->location = flow_cfg->max_flows + flow->entry;
1176 flow->flow_spec.location = fsp->location;
1177 flow->location = fsp->location;
1178
1179 set_bit(flow->entry, flow_cfg->dmacflt_bmap);
1180 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1181
1182 } else {
1183 if (flow->location >= pfvf->flow_cfg->max_flows) {
1184 netdev_warn(pfvf->netdev,
1185 "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1186 flow->location,
1187 flow_cfg->max_flows - 1);
1188 err = -EINVAL;
1189 } else {
1190 err = otx2_add_flow_msg(pfvf, flow);
1191 }
1192 }
1193
1194 if (err) {
1195 if (err == MBOX_MSG_INVALID)
1196 err = -EINVAL;
1197 if (new)
1198 kfree(flow);
1199 return err;
1200 }
1201
1202 /* add the new flow installed to list */
1203 if (new) {
1204 otx2_add_flow_to_list(pfvf, flow);
1205 flow_cfg->nr_flows++;
1206 }
1207
1208 if (flow->is_vf)
1209 netdev_info(pfvf->netdev,
1210 "Make sure that VF's queue number is within its queue limit\n");
1211 return 0;
1212 }
1213
otx2_remove_flow_msg(struct otx2_nic * pfvf,u16 entry,bool all)1214 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1215 {
1216 struct npc_delete_flow_req *req;
1217 int err;
1218
1219 mutex_lock(&pfvf->mbox.lock);
1220 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1221 if (!req) {
1222 mutex_unlock(&pfvf->mbox.lock);
1223 return -ENOMEM;
1224 }
1225
1226 req->entry = entry;
1227 if (all)
1228 req->all = 1;
1229
1230 /* Send message to AF */
1231 err = otx2_sync_mbox_msg(&pfvf->mbox);
1232 mutex_unlock(&pfvf->mbox.lock);
1233 return err;
1234 }
1235
otx2_update_rem_pfmac(struct otx2_nic * pfvf,int req)1236 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1237 {
1238 struct otx2_flow *iter;
1239 struct ethhdr *eth_hdr;
1240 bool found = false;
1241
1242 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1243 if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
1244 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1245 if (req == DMAC_ADDR_DEL) {
1246 otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1247 0);
1248 clear_bit(0, pfvf->flow_cfg->dmacflt_bmap);
1249 found = true;
1250 } else {
1251 ether_addr_copy(eth_hdr->h_dest,
1252 pfvf->netdev->dev_addr);
1253
1254 otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1255 }
1256 break;
1257 }
1258 }
1259
1260 if (found) {
1261 list_del(&iter->list);
1262 kfree(iter);
1263 pfvf->flow_cfg->nr_flows--;
1264 }
1265 }
1266
otx2_remove_flow(struct otx2_nic * pfvf,u32 location)1267 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1268 {
1269 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1270 struct otx2_flow *flow;
1271 int err;
1272
1273 if (location >= otx2_get_maxflows(flow_cfg))
1274 return -EINVAL;
1275
1276 flow = otx2_find_flow(pfvf, location);
1277 if (!flow)
1278 return -ENOENT;
1279
1280 if (flow->rule_type & DMAC_FILTER_RULE) {
1281 struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1282
1283 /* user not allowed to remove dmac filter with interface mac */
1284 if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1285 return -EPERM;
1286
1287 err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1288 flow->entry);
1289 clear_bit(flow->entry, flow_cfg->dmacflt_bmap);
1290 /* If all dmac filters are removed delete macfilter with
1291 * interface mac address and configure CGX/RPM block in
1292 * promiscuous mode
1293 */
1294 if (bitmap_weight(flow_cfg->dmacflt_bmap,
1295 flow_cfg->dmacflt_max_flows) == 1)
1296 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1297 } else {
1298 #ifdef CONFIG_DCB
1299 if (flow->rule_type & PFC_FLOWCTRL_RULE)
1300 otx2_update_bpid_in_rqctx(pfvf, 0,
1301 flow->flow_spec.ring_cookie,
1302 false);
1303 #endif
1304
1305 err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1306 }
1307
1308 if (err)
1309 return err;
1310
1311 list_del(&flow->list);
1312 kfree(flow);
1313 flow_cfg->nr_flows--;
1314
1315 return 0;
1316 }
1317
otx2_rss_ctx_flow_del(struct otx2_nic * pfvf,int ctx_id)1318 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1319 {
1320 struct otx2_flow *flow, *tmp;
1321 int err;
1322
1323 list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1324 if (flow->rss_ctx_id != ctx_id)
1325 continue;
1326 err = otx2_remove_flow(pfvf, flow->location);
1327 if (err)
1328 netdev_warn(pfvf->netdev,
1329 "Can't delete the rule %d associated with this rss group err:%d",
1330 flow->location, err);
1331 }
1332 }
1333
otx2_destroy_ntuple_flows(struct otx2_nic * pfvf)1334 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1335 {
1336 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1337 struct npc_delete_flow_req *req;
1338 struct otx2_flow *iter, *tmp;
1339 int err;
1340
1341 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1342 return 0;
1343
1344 if (!flow_cfg->max_flows)
1345 return 0;
1346
1347 mutex_lock(&pfvf->mbox.lock);
1348 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1349 if (!req) {
1350 mutex_unlock(&pfvf->mbox.lock);
1351 return -ENOMEM;
1352 }
1353
1354 req->start = flow_cfg->flow_ent[0];
1355 req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1356 err = otx2_sync_mbox_msg(&pfvf->mbox);
1357 mutex_unlock(&pfvf->mbox.lock);
1358
1359 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1360 list_del(&iter->list);
1361 kfree(iter);
1362 flow_cfg->nr_flows--;
1363 }
1364 return err;
1365 }
1366
otx2_destroy_mcam_flows(struct otx2_nic * pfvf)1367 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1368 {
1369 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1370 struct npc_mcam_free_entry_req *req;
1371 struct otx2_flow *iter, *tmp;
1372 int err;
1373
1374 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1375 return 0;
1376
1377 /* remove all flows */
1378 err = otx2_remove_flow_msg(pfvf, 0, true);
1379 if (err)
1380 return err;
1381
1382 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1383 list_del(&iter->list);
1384 kfree(iter);
1385 flow_cfg->nr_flows--;
1386 }
1387
1388 mutex_lock(&pfvf->mbox.lock);
1389 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1390 if (!req) {
1391 mutex_unlock(&pfvf->mbox.lock);
1392 return -ENOMEM;
1393 }
1394
1395 req->all = 1;
1396 /* Send message to AF to free MCAM entries */
1397 err = otx2_sync_mbox_msg(&pfvf->mbox);
1398 if (err) {
1399 mutex_unlock(&pfvf->mbox.lock);
1400 return err;
1401 }
1402
1403 pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1404 flow_cfg->max_flows = 0;
1405 mutex_unlock(&pfvf->mbox.lock);
1406
1407 return 0;
1408 }
1409
otx2_install_rxvlan_offload_flow(struct otx2_nic * pfvf)1410 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1411 {
1412 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1413 struct npc_install_flow_req *req;
1414 int err;
1415
1416 mutex_lock(&pfvf->mbox.lock);
1417 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1418 if (!req) {
1419 mutex_unlock(&pfvf->mbox.lock);
1420 return -ENOMEM;
1421 }
1422
1423 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1424 req->intf = NIX_INTF_RX;
1425 ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1426 eth_broadcast_addr((u8 *)&req->mask.dmac);
1427 req->channel = pfvf->hw.rx_chan_base;
1428 req->op = NIX_RX_ACTION_DEFAULT;
1429 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1430 req->vtag0_valid = true;
1431 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1432
1433 /* Send message to AF */
1434 err = otx2_sync_mbox_msg(&pfvf->mbox);
1435 mutex_unlock(&pfvf->mbox.lock);
1436 return err;
1437 }
1438
otx2_delete_rxvlan_offload_flow(struct otx2_nic * pfvf)1439 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1440 {
1441 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1442 struct npc_delete_flow_req *req;
1443 int err;
1444
1445 mutex_lock(&pfvf->mbox.lock);
1446 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1447 if (!req) {
1448 mutex_unlock(&pfvf->mbox.lock);
1449 return -ENOMEM;
1450 }
1451
1452 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1453 /* Send message to AF */
1454 err = otx2_sync_mbox_msg(&pfvf->mbox);
1455 mutex_unlock(&pfvf->mbox.lock);
1456 return err;
1457 }
1458
otx2_enable_rxvlan(struct otx2_nic * pf,bool enable)1459 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1460 {
1461 struct nix_vtag_config *req;
1462 struct mbox_msghdr *rsp_hdr;
1463 int err;
1464
1465 /* Dont have enough mcam entries */
1466 if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1467 return -ENOMEM;
1468
1469 if (enable) {
1470 err = otx2_install_rxvlan_offload_flow(pf);
1471 if (err)
1472 return err;
1473 } else {
1474 err = otx2_delete_rxvlan_offload_flow(pf);
1475 if (err)
1476 return err;
1477 }
1478
1479 mutex_lock(&pf->mbox.lock);
1480 req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1481 if (!req) {
1482 mutex_unlock(&pf->mbox.lock);
1483 return -ENOMEM;
1484 }
1485
1486 /* config strip, capture and size */
1487 req->vtag_size = VTAGSIZE_T4;
1488 req->cfg_type = 1; /* rx vlan cfg */
1489 req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1490 req->rx.strip_vtag = enable;
1491 req->rx.capture_vtag = enable;
1492
1493 err = otx2_sync_mbox_msg(&pf->mbox);
1494 if (err) {
1495 mutex_unlock(&pf->mbox.lock);
1496 return err;
1497 }
1498
1499 rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1500 if (IS_ERR(rsp_hdr)) {
1501 mutex_unlock(&pf->mbox.lock);
1502 return PTR_ERR(rsp_hdr);
1503 }
1504
1505 mutex_unlock(&pf->mbox.lock);
1506 return rsp_hdr->rc;
1507 }
1508
otx2_dmacflt_reinstall_flows(struct otx2_nic * pf)1509 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1510 {
1511 struct otx2_flow *iter;
1512 struct ethhdr *eth_hdr;
1513
1514 list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1515 if (iter->rule_type & DMAC_FILTER_RULE) {
1516 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1517 otx2_dmacflt_add(pf, eth_hdr->h_dest,
1518 iter->entry);
1519 }
1520 }
1521 }
1522
otx2_dmacflt_update_pfmac_flow(struct otx2_nic * pfvf)1523 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1524 {
1525 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1526 }
1527