1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU representor driver
3 *
4 * Copyright (C) 2024 Marvell.
5 *
6 */
7
8 #include <linux/etherdevice.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/net_tstamp.h>
12 #include <linux/sort.h>
13
14 #include "otx2_common.h"
15 #include "cn10k.h"
16 #include "otx2_reg.h"
17 #include "rep.h"
18
19 #define DRV_NAME "rvu_rep"
20 #define DRV_STRING "Marvell RVU Representor Driver"
21
22 static const struct pci_device_id rvu_rep_id_table[] = {
23 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) },
24 { }
25 };
26
27 MODULE_AUTHOR("Marvell International Ltd.");
28 MODULE_DESCRIPTION(DRV_STRING);
29 MODULE_LICENSE("GPL");
30 MODULE_DEVICE_TABLE(pci, rvu_rep_id_table);
31
32 static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
33 struct rep_event *data);
34
rvu_rep_mcam_flow_init(struct rep_dev * rep)35 static int rvu_rep_mcam_flow_init(struct rep_dev *rep)
36 {
37 struct npc_mcam_alloc_entry_req *req;
38 struct npc_mcam_alloc_entry_rsp *rsp;
39 struct otx2_nic *priv = rep->mdev;
40 int ent, allocated = 0;
41 int count;
42
43 rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL);
44
45 if (!rep->flow_cfg)
46 return -ENOMEM;
47
48 count = OTX2_DEFAULT_FLOWCOUNT;
49
50 rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL);
51 if (!rep->flow_cfg->flow_ent)
52 return -ENOMEM;
53
54 while (allocated < count) {
55 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox);
56 if (!req)
57 goto exit;
58
59 req->hdr.pcifunc = rep->pcifunc;
60 req->contig = false;
61 req->ref_entry = 0;
62 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
63 NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
64
65 if (otx2_sync_mbox_msg(&priv->mbox))
66 goto exit;
67
68 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
69 (&priv->mbox.mbox, 0, &req->hdr);
70
71 for (ent = 0; ent < rsp->count; ent++)
72 rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
73
74 allocated += rsp->count;
75
76 if (rsp->count != req->count)
77 break;
78 }
79 exit:
80 /* Multiple MCAM entry alloc requests could result in non-sequential
81 * MCAM entries in the flow_ent[] array. Sort them in an ascending
82 * order, otherwise user installed ntuple filter index and MCAM entry
83 * index will not be in sync.
84 */
85 if (allocated)
86 sort(&rep->flow_cfg->flow_ent[0], allocated,
87 sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
88
89 mutex_unlock(&priv->mbox.lock);
90
91 rep->flow_cfg->max_flows = allocated;
92
93 if (allocated) {
94 rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
95 rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
96 rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
97 }
98
99 INIT_LIST_HEAD(&rep->flow_cfg->flow_list);
100 INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc);
101 return 0;
102 }
103
rvu_rep_setup_tc_cb(enum tc_setup_type type,void * type_data,void * cb_priv)104 static int rvu_rep_setup_tc_cb(enum tc_setup_type type,
105 void *type_data, void *cb_priv)
106 {
107 struct rep_dev *rep = cb_priv;
108 struct otx2_nic *priv = rep->mdev;
109
110 if (!(rep->flags & RVU_REP_VF_INITIALIZED))
111 return -EINVAL;
112
113 if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
114 rvu_rep_mcam_flow_init(rep);
115
116 priv->netdev = rep->netdev;
117 priv->flags = rep->flags;
118 priv->pcifunc = rep->pcifunc;
119 priv->flow_cfg = rep->flow_cfg;
120
121 switch (type) {
122 case TC_SETUP_CLSFLOWER:
123 return otx2_setup_tc_cls_flower(priv, type_data);
124 default:
125 return -EOPNOTSUPP;
126 }
127 }
128
129 static LIST_HEAD(rvu_rep_block_cb_list);
rvu_rep_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)130 static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type,
131 void *type_data)
132 {
133 struct rvu_rep *rep = netdev_priv(netdev);
134
135 switch (type) {
136 case TC_SETUP_BLOCK:
137 return flow_block_cb_setup_simple(type_data,
138 &rvu_rep_block_cb_list,
139 rvu_rep_setup_tc_cb,
140 rep, rep, true);
141 default:
142 return -EOPNOTSUPP;
143 }
144 }
145
146 static int
rvu_rep_sp_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)147 rvu_rep_sp_stats64(const struct net_device *dev,
148 struct rtnl_link_stats64 *stats)
149 {
150 struct rep_dev *rep = netdev_priv(dev);
151 struct otx2_nic *priv = rep->mdev;
152 struct otx2_rcv_queue *rq;
153 struct otx2_snd_queue *sq;
154 u16 qidx = rep->rep_id;
155
156 otx2_update_rq_stats(priv, qidx);
157 rq = &priv->qset.rq[qidx];
158
159 otx2_update_sq_stats(priv, qidx);
160 sq = &priv->qset.sq[qidx];
161
162 stats->tx_bytes = sq->stats.bytes;
163 stats->tx_packets = sq->stats.pkts;
164 stats->rx_bytes = rq->stats.bytes;
165 stats->rx_packets = rq->stats.pkts;
166 return 0;
167 }
168
169 static bool
rvu_rep_has_offload_stats(const struct net_device * dev,int attr_id)170 rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id)
171 {
172 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
173 }
174
175 static int
rvu_rep_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)176 rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev,
177 void *sp)
178 {
179 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
180 return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
181
182 return -EINVAL;
183 }
184
rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port * port,u8 * hw_addr,int * hw_addr_len,struct netlink_ext_ack * extack)185 static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port,
186 u8 *hw_addr, int *hw_addr_len,
187 struct netlink_ext_ack *extack)
188 {
189 struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
190
191 ether_addr_copy(hw_addr, rep->mac);
192 *hw_addr_len = ETH_ALEN;
193 return 0;
194 }
195
rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port * port,const u8 * hw_addr,int hw_addr_len,struct netlink_ext_ack * extack)196 static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port,
197 const u8 *hw_addr, int hw_addr_len,
198 struct netlink_ext_ack *extack)
199 {
200 struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
201 struct otx2_nic *priv = rep->mdev;
202 struct rep_event evt = {0};
203
204 eth_hw_addr_set(rep->netdev, hw_addr);
205 ether_addr_copy(rep->mac, hw_addr);
206
207 ether_addr_copy(evt.evt_data.mac, hw_addr);
208 evt.pcifunc = rep->pcifunc;
209 rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt);
210 return 0;
211 }
212
213 static const struct devlink_port_ops rvu_rep_dl_port_ops = {
214 .port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get,
215 .port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set,
216 };
217
218 static void
rvu_rep_devlink_set_switch_id(struct otx2_nic * priv,struct netdev_phys_item_id * ppid)219 rvu_rep_devlink_set_switch_id(struct otx2_nic *priv,
220 struct netdev_phys_item_id *ppid)
221 {
222 struct pci_dev *pdev = priv->pdev;
223 u64 id;
224
225 id = pci_get_dsn(pdev);
226
227 ppid->id_len = sizeof(id);
228 put_unaligned_be64(id, &ppid->id);
229 }
230
rvu_rep_devlink_port_unregister(struct rep_dev * rep)231 static void rvu_rep_devlink_port_unregister(struct rep_dev *rep)
232 {
233 devlink_port_unregister(&rep->dl_port);
234 }
235
rvu_rep_devlink_port_register(struct rep_dev * rep)236 static int rvu_rep_devlink_port_register(struct rep_dev *rep)
237 {
238 struct devlink_port_attrs attrs = {};
239 struct otx2_nic *priv = rep->mdev;
240 struct devlink *dl = priv->dl->dl;
241 int err;
242
243 if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) {
244 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
245 attrs.phys.port_number = rvu_get_pf(rep->pcifunc);
246 } else {
247 attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
248 attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc);
249 attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK;
250 }
251
252 rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id);
253 devlink_port_attrs_set(&rep->dl_port, &attrs);
254
255 err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id,
256 &rvu_rep_dl_port_ops);
257 if (err) {
258 dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n",
259 err);
260 return err;
261 }
262 return 0;
263 }
264
rvu_rep_get_repid(struct otx2_nic * priv,u16 pcifunc)265 static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc)
266 {
267 int rep_id;
268
269 for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++)
270 if (priv->rep_pf_map[rep_id] == pcifunc)
271 return rep_id;
272 return -EINVAL;
273 }
274
rvu_rep_notify_pfvf(struct otx2_nic * priv,u16 event,struct rep_event * data)275 static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
276 struct rep_event *data)
277 {
278 struct rep_event *req;
279
280 mutex_lock(&priv->mbox.lock);
281 req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox);
282 if (!req) {
283 mutex_unlock(&priv->mbox.lock);
284 return -ENOMEM;
285 }
286 req->event = event;
287 req->pcifunc = data->pcifunc;
288
289 memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data));
290 otx2_sync_mbox_msg(&priv->mbox);
291 mutex_unlock(&priv->mbox.lock);
292 return 0;
293 }
294
rvu_rep_state_evt_handler(struct otx2_nic * priv,struct rep_event * info)295 static void rvu_rep_state_evt_handler(struct otx2_nic *priv,
296 struct rep_event *info)
297 {
298 struct rep_dev *rep;
299 int rep_id;
300
301 rep_id = rvu_rep_get_repid(priv, info->pcifunc);
302 rep = priv->reps[rep_id];
303 if (info->evt_data.vf_state)
304 rep->flags |= RVU_REP_VF_INITIALIZED;
305 else
306 rep->flags &= ~RVU_REP_VF_INITIALIZED;
307 }
308
rvu_event_up_notify(struct otx2_nic * pf,struct rep_event * info)309 int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info)
310 {
311 if (info->event & RVU_EVENT_PFVF_STATE)
312 rvu_rep_state_evt_handler(pf, info);
313 return 0;
314 }
315
rvu_rep_change_mtu(struct net_device * dev,int new_mtu)316 static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu)
317 {
318 struct rep_dev *rep = netdev_priv(dev);
319 struct otx2_nic *priv = rep->mdev;
320 struct rep_event evt = {0};
321
322 netdev_info(dev, "Changing MTU from %d to %d\n",
323 dev->mtu, new_mtu);
324 dev->mtu = new_mtu;
325
326 evt.evt_data.mtu = new_mtu;
327 evt.pcifunc = rep->pcifunc;
328 rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt);
329 return 0;
330 }
331
rvu_rep_get_stats(struct work_struct * work)332 static void rvu_rep_get_stats(struct work_struct *work)
333 {
334 struct delayed_work *del_work = to_delayed_work(work);
335 struct nix_stats_req *req;
336 struct nix_stats_rsp *rsp;
337 struct rep_stats *stats;
338 struct otx2_nic *priv;
339 struct rep_dev *rep;
340 int err;
341
342 rep = container_of(del_work, struct rep_dev, stats_wrk);
343 priv = rep->mdev;
344
345 mutex_lock(&priv->mbox.lock);
346 req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox);
347 if (!req) {
348 mutex_unlock(&priv->mbox.lock);
349 return;
350 }
351 req->pcifunc = rep->pcifunc;
352 err = otx2_sync_mbox_msg_busy_poll(&priv->mbox);
353 if (err)
354 goto exit;
355
356 rsp = (struct nix_stats_rsp *)
357 otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
358
359 if (IS_ERR(rsp)) {
360 err = PTR_ERR(rsp);
361 goto exit;
362 }
363
364 stats = &rep->stats;
365 stats->rx_bytes = rsp->rx.octs;
366 stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast +
367 rsp->rx.mcast;
368 stats->rx_drops = rsp->rx.drop;
369 stats->rx_mcast_frames = rsp->rx.mcast;
370 stats->tx_bytes = rsp->tx.octs;
371 stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast;
372 stats->tx_drops = rsp->tx.drop;
373 exit:
374 mutex_unlock(&priv->mbox.lock);
375 }
376
rvu_rep_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)377 static void rvu_rep_get_stats64(struct net_device *dev,
378 struct rtnl_link_stats64 *stats)
379 {
380 struct rep_dev *rep = netdev_priv(dev);
381
382 if (!(rep->flags & RVU_REP_VF_INITIALIZED))
383 return;
384
385 stats->rx_packets = rep->stats.rx_frames;
386 stats->rx_bytes = rep->stats.rx_bytes;
387 stats->rx_dropped = rep->stats.rx_drops;
388 stats->multicast = rep->stats.rx_mcast_frames;
389
390 stats->tx_packets = rep->stats.tx_frames;
391 stats->tx_bytes = rep->stats.tx_bytes;
392 stats->tx_dropped = rep->stats.tx_drops;
393
394 schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100));
395 }
396
rvu_eswitch_config(struct otx2_nic * priv,u8 ena)397 static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena)
398 {
399 struct esw_cfg_req *req;
400
401 mutex_lock(&priv->mbox.lock);
402 req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox);
403 if (!req) {
404 mutex_unlock(&priv->mbox.lock);
405 return -ENOMEM;
406 }
407 req->ena = ena;
408 otx2_sync_mbox_msg(&priv->mbox);
409 mutex_unlock(&priv->mbox.lock);
410 return 0;
411 }
412
rvu_rep_xmit(struct sk_buff * skb,struct net_device * dev)413 static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
414 {
415 struct rep_dev *rep = netdev_priv(dev);
416 struct otx2_nic *pf = rep->mdev;
417 struct otx2_snd_queue *sq;
418 struct netdev_queue *txq;
419
420 sq = &pf->qset.sq[rep->rep_id];
421 txq = netdev_get_tx_queue(dev, 0);
422
423 if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) {
424 netif_tx_stop_queue(txq);
425
426 /* Check again, in case SQBs got freed up */
427 smp_mb();
428 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
429 > sq->sqe_thresh)
430 netif_tx_wake_queue(txq);
431
432 return NETDEV_TX_BUSY;
433 }
434 return NETDEV_TX_OK;
435 }
436
rvu_rep_open(struct net_device * dev)437 static int rvu_rep_open(struct net_device *dev)
438 {
439 struct rep_dev *rep = netdev_priv(dev);
440 struct otx2_nic *priv = rep->mdev;
441 struct rep_event evt = {0};
442
443 if (!(rep->flags & RVU_REP_VF_INITIALIZED))
444 return 0;
445
446 netif_carrier_on(dev);
447 netif_tx_start_all_queues(dev);
448
449 evt.event = RVU_EVENT_PORT_STATE;
450 evt.evt_data.port_state = 1;
451 evt.pcifunc = rep->pcifunc;
452 rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
453 return 0;
454 }
455
rvu_rep_stop(struct net_device * dev)456 static int rvu_rep_stop(struct net_device *dev)
457 {
458 struct rep_dev *rep = netdev_priv(dev);
459 struct otx2_nic *priv = rep->mdev;
460 struct rep_event evt = {0};
461
462 if (!(rep->flags & RVU_REP_VF_INITIALIZED))
463 return 0;
464
465 netif_carrier_off(dev);
466 netif_tx_disable(dev);
467
468 evt.event = RVU_EVENT_PORT_STATE;
469 evt.pcifunc = rep->pcifunc;
470 rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
471 return 0;
472 }
473
474 static const struct net_device_ops rvu_rep_netdev_ops = {
475 .ndo_open = rvu_rep_open,
476 .ndo_stop = rvu_rep_stop,
477 .ndo_start_xmit = rvu_rep_xmit,
478 .ndo_get_stats64 = rvu_rep_get_stats64,
479 .ndo_change_mtu = rvu_rep_change_mtu,
480 .ndo_has_offload_stats = rvu_rep_has_offload_stats,
481 .ndo_get_offload_stats = rvu_rep_get_offload_stats,
482 .ndo_setup_tc = rvu_rep_setup_tc,
483 };
484
rvu_rep_napi_init(struct otx2_nic * priv,struct netlink_ext_ack * extack)485 static int rvu_rep_napi_init(struct otx2_nic *priv,
486 struct netlink_ext_ack *extack)
487 {
488 struct otx2_qset *qset = &priv->qset;
489 struct otx2_cq_poll *cq_poll = NULL;
490 struct otx2_hw *hw = &priv->hw;
491 int err = 0, qidx, vec;
492 char *irq_name;
493
494 qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
495 if (!qset->napi)
496 return -ENOMEM;
497
498 /* Register NAPI handler */
499 for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
500 cq_poll = &qset->napi[qidx];
501 cq_poll->cint_idx = qidx;
502 cq_poll->cq_ids[CQ_RX] =
503 (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ;
504 cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ?
505 qidx + hw->rx_queues :
506 CINT_INVALID_CQ;
507 cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
508 cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ;
509
510 cq_poll->dev = (void *)priv;
511 netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi,
512 otx2_napi_handler);
513 napi_enable(&cq_poll->napi);
514 }
515 /* Register CQ IRQ handlers */
516 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
517 for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
518 irq_name = &hw->irq_name[vec * NAME_SIZE];
519
520 snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx);
521
522 err = request_irq(pci_irq_vector(priv->pdev, vec),
523 otx2_cq_intr_handler, 0, irq_name,
524 &qset->napi[qidx]);
525 if (err) {
526 NL_SET_ERR_MSG_FMT_MOD(extack,
527 "RVU REP IRQ registration failed for CQ%d",
528 qidx);
529 goto err_free_cints;
530 }
531 vec++;
532
533 /* Enable CQ IRQ */
534 otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
535 otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
536 }
537 priv->flags &= ~OTX2_FLAG_INTF_DOWN;
538 return 0;
539
540 err_free_cints:
541 otx2_free_cints(priv, qidx);
542 otx2_disable_napi(priv);
543 return err;
544 }
545
rvu_rep_free_cq_rsrc(struct otx2_nic * priv)546 static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv)
547 {
548 struct otx2_qset *qset = &priv->qset;
549 struct otx2_cq_poll *cq_poll = NULL;
550 int qidx, vec;
551
552 /* Cleanup CQ NAPI and IRQ */
553 vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
554 for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) {
555 /* Disable interrupt */
556 otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
557
558 synchronize_irq(pci_irq_vector(priv->pdev, vec));
559
560 cq_poll = &qset->napi[qidx];
561 napi_synchronize(&cq_poll->napi);
562 vec++;
563 }
564 otx2_free_cints(priv, priv->hw.cint_cnt);
565 otx2_disable_napi(priv);
566 }
567
rvu_rep_rsrc_free(struct otx2_nic * priv)568 static void rvu_rep_rsrc_free(struct otx2_nic *priv)
569 {
570 struct otx2_qset *qset = &priv->qset;
571 struct delayed_work *work;
572 int wrk;
573
574 for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) {
575 work = &priv->refill_wrk[wrk].pool_refill_work;
576 cancel_delayed_work_sync(work);
577 }
578 devm_kfree(priv->dev, priv->refill_wrk);
579
580 otx2_free_hw_resources(priv);
581 otx2_free_queue_mem(qset);
582 }
583
rvu_rep_rsrc_init(struct otx2_nic * priv)584 static int rvu_rep_rsrc_init(struct otx2_nic *priv)
585 {
586 struct otx2_qset *qset = &priv->qset;
587 int err;
588
589 err = otx2_alloc_queue_mem(priv);
590 if (err)
591 return err;
592
593 priv->hw.max_mtu = otx2_get_max_mtu(priv);
594 priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN;
595 priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
596
597 err = otx2_init_hw_resources(priv);
598 if (err)
599 goto err_free_rsrc;
600
601 /* Set maximum frame size allowed in HW */
602 err = otx2_hw_set_mtu(priv, priv->hw.max_mtu);
603 if (err) {
604 dev_err(priv->dev, "Failed to set HW MTU\n");
605 goto err_free_rsrc;
606 }
607 return 0;
608
609 err_free_rsrc:
610 otx2_free_hw_resources(priv);
611 otx2_free_queue_mem(qset);
612 return err;
613 }
614
rvu_rep_destroy(struct otx2_nic * priv)615 void rvu_rep_destroy(struct otx2_nic *priv)
616 {
617 struct rep_dev *rep;
618 int rep_id;
619
620 rvu_eswitch_config(priv, false);
621 priv->flags |= OTX2_FLAG_INTF_DOWN;
622 rvu_rep_free_cq_rsrc(priv);
623 for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) {
624 rep = priv->reps[rep_id];
625 unregister_netdev(rep->netdev);
626 rvu_rep_devlink_port_unregister(rep);
627 free_netdev(rep->netdev);
628 kfree(rep->flow_cfg);
629 }
630 kfree(priv->reps);
631 rvu_rep_rsrc_free(priv);
632 }
633
rvu_rep_create(struct otx2_nic * priv,struct netlink_ext_ack * extack)634 int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
635 {
636 int rep_cnt = priv->rep_cnt;
637 struct net_device *ndev;
638 struct rep_dev *rep;
639 int rep_id, err;
640 u16 pcifunc;
641
642 err = rvu_rep_rsrc_init(priv);
643 if (err)
644 return -ENOMEM;
645
646 priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL);
647 if (!priv->reps)
648 return -ENOMEM;
649
650 for (rep_id = 0; rep_id < rep_cnt; rep_id++) {
651 ndev = alloc_etherdev(sizeof(*rep));
652 if (!ndev) {
653 NL_SET_ERR_MSG_FMT_MOD(extack,
654 "PFVF representor:%d creation failed",
655 rep_id);
656 err = -ENOMEM;
657 goto exit;
658 }
659
660 rep = netdev_priv(ndev);
661 priv->reps[rep_id] = rep;
662 rep->mdev = priv;
663 rep->netdev = ndev;
664 rep->rep_id = rep_id;
665
666 ndev->min_mtu = OTX2_MIN_MTU;
667 ndev->max_mtu = priv->hw.max_mtu;
668 ndev->netdev_ops = &rvu_rep_netdev_ops;
669 pcifunc = priv->rep_pf_map[rep_id];
670 rep->pcifunc = pcifunc;
671
672 snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d",
673 rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK));
674
675 ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
676 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
677 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
678
679 ndev->hw_features |= NETIF_F_HW_TC;
680 ndev->features |= ndev->hw_features;
681 eth_hw_addr_random(ndev);
682 err = rvu_rep_devlink_port_register(rep);
683 if (err) {
684 free_netdev(ndev);
685 goto exit;
686 }
687
688 SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
689 err = register_netdev(ndev);
690 if (err) {
691 NL_SET_ERR_MSG_MOD(extack,
692 "PFVF representor registration failed");
693 rvu_rep_devlink_port_unregister(rep);
694 free_netdev(ndev);
695 goto exit;
696 }
697
698 INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats);
699 }
700 err = rvu_rep_napi_init(priv, extack);
701 if (err)
702 goto exit;
703
704 rvu_eswitch_config(priv, true);
705 return 0;
706 exit:
707 while (--rep_id >= 0) {
708 rep = priv->reps[rep_id];
709 unregister_netdev(rep->netdev);
710 rvu_rep_devlink_port_unregister(rep);
711 free_netdev(rep->netdev);
712 }
713 kfree(priv->reps);
714 rvu_rep_rsrc_free(priv);
715 return err;
716 }
717
rvu_get_rep_cnt(struct otx2_nic * priv)718 static int rvu_get_rep_cnt(struct otx2_nic *priv)
719 {
720 struct get_rep_cnt_rsp *rsp;
721 struct mbox_msghdr *msghdr;
722 struct msg_req *req;
723 int err, rep;
724
725 mutex_lock(&priv->mbox.lock);
726 req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox);
727 if (!req) {
728 mutex_unlock(&priv->mbox.lock);
729 return -ENOMEM;
730 }
731 err = otx2_sync_mbox_msg(&priv->mbox);
732 if (err)
733 goto exit;
734
735 msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
736 if (IS_ERR(msghdr)) {
737 err = PTR_ERR(msghdr);
738 goto exit;
739 }
740
741 rsp = (struct get_rep_cnt_rsp *)msghdr;
742 priv->hw.tx_queues = rsp->rep_cnt;
743 priv->hw.rx_queues = rsp->rep_cnt;
744 priv->rep_cnt = rsp->rep_cnt;
745 for (rep = 0; rep < priv->rep_cnt; rep++)
746 priv->rep_pf_map[rep] = rsp->rep_pf_map[rep];
747
748 exit:
749 mutex_unlock(&priv->mbox.lock);
750 return err;
751 }
752
rvu_rep_probe(struct pci_dev * pdev,const struct pci_device_id * id)753 static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
754 {
755 struct device *dev = &pdev->dev;
756 struct otx2_nic *priv;
757 struct otx2_hw *hw;
758 int err;
759
760 err = pcim_enable_device(pdev);
761 if (err) {
762 dev_err(dev, "Failed to enable PCI device\n");
763 return err;
764 }
765
766 err = pci_request_regions(pdev, DRV_NAME);
767 if (err) {
768 dev_err(dev, "PCI request regions failed 0x%x\n", err);
769 return err;
770 }
771
772 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
773 if (err) {
774 dev_err(dev, "DMA mask config failed, abort\n");
775 goto err_release_regions;
776 }
777
778 pci_set_master(pdev);
779
780 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
781 if (!priv) {
782 err = -ENOMEM;
783 goto err_release_regions;
784 }
785
786 pci_set_drvdata(pdev, priv);
787 priv->pdev = pdev;
788 priv->dev = dev;
789 priv->flags |= OTX2_FLAG_INTF_DOWN;
790 priv->flags |= OTX2_FLAG_REP_MODE_ENABLED;
791
792 hw = &priv->hw;
793 hw->pdev = pdev;
794 hw->max_queues = OTX2_MAX_CQ_CNT;
795 hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
796 hw->xqe_size = 128;
797
798 err = otx2_init_rsrc(pdev, priv);
799 if (err)
800 goto err_release_regions;
801
802 priv->iommu_domain = iommu_get_domain_for_dev(dev);
803
804 err = rvu_get_rep_cnt(priv);
805 if (err)
806 goto err_detach_rsrc;
807
808 err = otx2_register_dl(priv);
809 if (err)
810 goto err_detach_rsrc;
811
812 return 0;
813
814 err_detach_rsrc:
815 if (priv->hw.lmt_info)
816 free_percpu(priv->hw.lmt_info);
817 if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
818 qmem_free(priv->dev, priv->dync_lmt);
819 otx2_detach_resources(&priv->mbox);
820 otx2_disable_mbox_intr(priv);
821 otx2_pfaf_mbox_destroy(priv);
822 pci_free_irq_vectors(pdev);
823 err_release_regions:
824 pci_set_drvdata(pdev, NULL);
825 pci_release_regions(pdev);
826 return err;
827 }
828
rvu_rep_remove(struct pci_dev * pdev)829 static void rvu_rep_remove(struct pci_dev *pdev)
830 {
831 struct otx2_nic *priv = pci_get_drvdata(pdev);
832
833 otx2_unregister_dl(priv);
834 if (!(priv->flags & OTX2_FLAG_INTF_DOWN))
835 rvu_rep_destroy(priv);
836 otx2_detach_resources(&priv->mbox);
837 if (priv->hw.lmt_info)
838 free_percpu(priv->hw.lmt_info);
839 if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
840 qmem_free(priv->dev, priv->dync_lmt);
841 otx2_disable_mbox_intr(priv);
842 otx2_pfaf_mbox_destroy(priv);
843 pci_free_irq_vectors(priv->pdev);
844 pci_set_drvdata(pdev, NULL);
845 pci_release_regions(pdev);
846 }
847
848 static struct pci_driver rvu_rep_driver = {
849 .name = DRV_NAME,
850 .id_table = rvu_rep_id_table,
851 .probe = rvu_rep_probe,
852 .remove = rvu_rep_remove,
853 .shutdown = rvu_rep_remove,
854 };
855
rvu_rep_init_module(void)856 static int __init rvu_rep_init_module(void)
857 {
858 return pci_register_driver(&rvu_rep_driver);
859 }
860
rvu_rep_cleanup_module(void)861 static void __exit rvu_rep_cleanup_module(void)
862 {
863 pci_unregister_driver(&rvu_rep_driver);
864 }
865
866 module_init(rvu_rep_init_module);
867 module_exit(rvu_rep_cleanup_module);
868