1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU representor driver 3 * 4 * Copyright (C) 2024 Marvell. 5 * 6 */ 7 8 #include <linux/etherdevice.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/net_tstamp.h> 12 #include <linux/sort.h> 13 14 #include "otx2_common.h" 15 #include "cn10k.h" 16 #include "otx2_reg.h" 17 #include "rep.h" 18 19 #define DRV_NAME "rvu_rep" 20 #define DRV_STRING "Marvell RVU Representor Driver" 21 22 static const struct pci_device_id rvu_rep_id_table[] = { 23 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) }, 24 { } 25 }; 26 27 MODULE_AUTHOR("Marvell International Ltd."); 28 MODULE_DESCRIPTION(DRV_STRING); 29 MODULE_LICENSE("GPL"); 30 MODULE_DEVICE_TABLE(pci, rvu_rep_id_table); 31 32 static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event, 33 struct rep_event *data); 34 35 static int rvu_rep_mcam_flow_init(struct rep_dev *rep) 36 { 37 struct npc_mcam_alloc_entry_req *req; 38 struct npc_mcam_alloc_entry_rsp *rsp; 39 struct otx2_nic *priv = rep->mdev; 40 int ent, allocated = 0; 41 int count; 42 43 rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL); 44 45 if (!rep->flow_cfg) 46 return -ENOMEM; 47 48 count = OTX2_DEFAULT_FLOWCOUNT; 49 50 rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL); 51 if (!rep->flow_cfg->flow_ent) 52 return -ENOMEM; 53 54 while (allocated < count) { 55 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox); 56 if (!req) 57 goto exit; 58 59 req->hdr.pcifunc = rep->pcifunc; 60 req->contig = false; 61 req->ref_entry = 0; 62 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? 63 NPC_MAX_NONCONTIG_ENTRIES : count - allocated; 64 65 if (otx2_sync_mbox_msg(&priv->mbox)) 66 goto exit; 67 68 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp 69 (&priv->mbox.mbox, 0, &req->hdr); 70 if (IS_ERR(rsp)) 71 goto exit; 72 73 for (ent = 0; ent < rsp->count; ent++) 74 rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; 75 76 allocated += rsp->count; 77 78 if (rsp->count != req->count) 79 break; 80 } 81 exit: 82 /* Multiple MCAM entry alloc requests could result in non-sequential 83 * MCAM entries in the flow_ent[] array. Sort them in an ascending 84 * order, otherwise user installed ntuple filter index and MCAM entry 85 * index will not be in sync. 86 */ 87 if (allocated) 88 sort(&rep->flow_cfg->flow_ent[0], allocated, 89 sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL); 90 91 mutex_unlock(&priv->mbox.lock); 92 93 rep->flow_cfg->max_flows = allocated; 94 95 if (allocated) { 96 rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; 97 rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT; 98 rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; 99 } 100 101 INIT_LIST_HEAD(&rep->flow_cfg->flow_list); 102 INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc); 103 return 0; 104 } 105 106 static int rvu_rep_setup_tc_cb(enum tc_setup_type type, 107 void *type_data, void *cb_priv) 108 { 109 struct rep_dev *rep = cb_priv; 110 struct otx2_nic *priv = rep->mdev; 111 112 if (!(rep->flags & RVU_REP_VF_INITIALIZED)) 113 return -EINVAL; 114 115 if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 116 rvu_rep_mcam_flow_init(rep); 117 118 priv->netdev = rep->netdev; 119 priv->flags = rep->flags; 120 priv->pcifunc = rep->pcifunc; 121 priv->flow_cfg = rep->flow_cfg; 122 123 switch (type) { 124 case TC_SETUP_CLSFLOWER: 125 return otx2_setup_tc_cls_flower(priv, type_data); 126 default: 127 return -EOPNOTSUPP; 128 } 129 } 130 131 static LIST_HEAD(rvu_rep_block_cb_list); 132 static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type, 133 void *type_data) 134 { 135 struct rvu_rep *rep = netdev_priv(netdev); 136 137 switch (type) { 138 case TC_SETUP_BLOCK: 139 return flow_block_cb_setup_simple(type_data, 140 &rvu_rep_block_cb_list, 141 rvu_rep_setup_tc_cb, 142 rep, rep, true); 143 default: 144 return -EOPNOTSUPP; 145 } 146 } 147 148 static int 149 rvu_rep_sp_stats64(const struct net_device *dev, 150 struct rtnl_link_stats64 *stats) 151 { 152 struct rep_dev *rep = netdev_priv(dev); 153 struct otx2_nic *priv = rep->mdev; 154 struct otx2_rcv_queue *rq; 155 struct otx2_snd_queue *sq; 156 u16 qidx = rep->rep_id; 157 158 otx2_update_rq_stats(priv, qidx); 159 rq = &priv->qset.rq[qidx]; 160 161 otx2_update_sq_stats(priv, qidx); 162 sq = &priv->qset.sq[qidx]; 163 164 stats->tx_bytes = sq->stats.bytes; 165 stats->tx_packets = sq->stats.pkts; 166 stats->rx_bytes = rq->stats.bytes; 167 stats->rx_packets = rq->stats.pkts; 168 return 0; 169 } 170 171 static bool 172 rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id) 173 { 174 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; 175 } 176 177 static int 178 rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev, 179 void *sp) 180 { 181 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) 182 return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); 183 184 return -EINVAL; 185 } 186 187 static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port, 188 u8 *hw_addr, int *hw_addr_len, 189 struct netlink_ext_ack *extack) 190 { 191 struct rep_dev *rep = container_of(port, struct rep_dev, dl_port); 192 193 ether_addr_copy(hw_addr, rep->mac); 194 *hw_addr_len = ETH_ALEN; 195 return 0; 196 } 197 198 static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port, 199 const u8 *hw_addr, int hw_addr_len, 200 struct netlink_ext_ack *extack) 201 { 202 struct rep_dev *rep = container_of(port, struct rep_dev, dl_port); 203 struct otx2_nic *priv = rep->mdev; 204 struct rep_event evt = {0}; 205 206 eth_hw_addr_set(rep->netdev, hw_addr); 207 ether_addr_copy(rep->mac, hw_addr); 208 209 ether_addr_copy(evt.evt_data.mac, hw_addr); 210 evt.pcifunc = rep->pcifunc; 211 rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt); 212 return 0; 213 } 214 215 static const struct devlink_port_ops rvu_rep_dl_port_ops = { 216 .port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get, 217 .port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set, 218 }; 219 220 static void 221 rvu_rep_devlink_set_switch_id(struct otx2_nic *priv, 222 struct netdev_phys_item_id *ppid) 223 { 224 struct pci_dev *pdev = priv->pdev; 225 u64 id; 226 227 id = pci_get_dsn(pdev); 228 229 ppid->id_len = sizeof(id); 230 put_unaligned_be64(id, &ppid->id); 231 } 232 233 static void rvu_rep_devlink_port_unregister(struct rep_dev *rep) 234 { 235 devlink_port_unregister(&rep->dl_port); 236 } 237 238 static int rvu_rep_devlink_port_register(struct rep_dev *rep) 239 { 240 struct devlink_port_attrs attrs = {}; 241 struct otx2_nic *priv = rep->mdev; 242 struct devlink *dl = priv->dl->dl; 243 int err; 244 245 if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) { 246 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 247 attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc); 248 } else { 249 attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; 250 attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc); 251 attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK; 252 } 253 254 rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id); 255 devlink_port_attrs_set(&rep->dl_port, &attrs); 256 257 err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id, 258 &rvu_rep_dl_port_ops); 259 if (err) { 260 dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n", 261 err); 262 return err; 263 } 264 return 0; 265 } 266 267 static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc) 268 { 269 int rep_id; 270 271 for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) 272 if (priv->rep_pf_map[rep_id] == pcifunc) 273 return rep_id; 274 return -EINVAL; 275 } 276 277 static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event, 278 struct rep_event *data) 279 { 280 struct rep_event *req; 281 282 mutex_lock(&priv->mbox.lock); 283 req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox); 284 if (!req) { 285 mutex_unlock(&priv->mbox.lock); 286 return -ENOMEM; 287 } 288 req->event = event; 289 req->pcifunc = data->pcifunc; 290 291 memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data)); 292 otx2_sync_mbox_msg(&priv->mbox); 293 mutex_unlock(&priv->mbox.lock); 294 return 0; 295 } 296 297 static void rvu_rep_state_evt_handler(struct otx2_nic *priv, 298 struct rep_event *info) 299 { 300 struct rep_dev *rep; 301 int rep_id; 302 303 rep_id = rvu_rep_get_repid(priv, info->pcifunc); 304 rep = priv->reps[rep_id]; 305 if (info->evt_data.vf_state) 306 rep->flags |= RVU_REP_VF_INITIALIZED; 307 else 308 rep->flags &= ~RVU_REP_VF_INITIALIZED; 309 } 310 311 int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info) 312 { 313 if (info->event & RVU_EVENT_PFVF_STATE) 314 rvu_rep_state_evt_handler(pf, info); 315 return 0; 316 } 317 318 static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu) 319 { 320 struct rep_dev *rep = netdev_priv(dev); 321 struct otx2_nic *priv = rep->mdev; 322 struct rep_event evt = {0}; 323 324 netdev_info(dev, "Changing MTU from %d to %d\n", 325 dev->mtu, new_mtu); 326 dev->mtu = new_mtu; 327 328 evt.evt_data.mtu = new_mtu; 329 evt.pcifunc = rep->pcifunc; 330 rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt); 331 return 0; 332 } 333 334 static void rvu_rep_get_stats(struct work_struct *work) 335 { 336 struct delayed_work *del_work = to_delayed_work(work); 337 struct nix_stats_req *req; 338 struct nix_stats_rsp *rsp; 339 struct rep_stats *stats; 340 struct otx2_nic *priv; 341 struct rep_dev *rep; 342 int err; 343 344 rep = container_of(del_work, struct rep_dev, stats_wrk); 345 priv = rep->mdev; 346 347 mutex_lock(&priv->mbox.lock); 348 req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox); 349 if (!req) { 350 mutex_unlock(&priv->mbox.lock); 351 return; 352 } 353 req->pcifunc = rep->pcifunc; 354 err = otx2_sync_mbox_msg_busy_poll(&priv->mbox); 355 if (err) 356 goto exit; 357 358 rsp = (struct nix_stats_rsp *) 359 otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); 360 361 if (IS_ERR(rsp)) { 362 err = PTR_ERR(rsp); 363 goto exit; 364 } 365 366 stats = &rep->stats; 367 stats->rx_bytes = rsp->rx.octs; 368 stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast + 369 rsp->rx.mcast; 370 stats->rx_drops = rsp->rx.drop; 371 stats->rx_mcast_frames = rsp->rx.mcast; 372 stats->tx_bytes = rsp->tx.octs; 373 stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast; 374 stats->tx_drops = rsp->tx.drop + 375 (unsigned long)atomic_long_read(&stats->tx_discards); 376 exit: 377 mutex_unlock(&priv->mbox.lock); 378 } 379 380 static void rvu_rep_get_stats64(struct net_device *dev, 381 struct rtnl_link_stats64 *stats) 382 { 383 struct rep_dev *rep = netdev_priv(dev); 384 385 if (!(rep->flags & RVU_REP_VF_INITIALIZED)) 386 return; 387 388 stats->rx_packets = rep->stats.rx_frames; 389 stats->rx_bytes = rep->stats.rx_bytes; 390 stats->rx_dropped = rep->stats.rx_drops; 391 stats->multicast = rep->stats.rx_mcast_frames; 392 393 stats->tx_packets = rep->stats.tx_frames; 394 stats->tx_bytes = rep->stats.tx_bytes; 395 stats->tx_dropped = rep->stats.tx_drops; 396 397 schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100)); 398 } 399 400 static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena) 401 { 402 struct esw_cfg_req *req; 403 404 mutex_lock(&priv->mbox.lock); 405 req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox); 406 if (!req) { 407 mutex_unlock(&priv->mbox.lock); 408 return -ENOMEM; 409 } 410 req->ena = ena; 411 otx2_sync_mbox_msg(&priv->mbox); 412 mutex_unlock(&priv->mbox.lock); 413 return 0; 414 } 415 416 static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev) 417 { 418 struct rep_dev *rep = netdev_priv(dev); 419 struct otx2_nic *pf = rep->mdev; 420 struct otx2_snd_queue *sq; 421 struct netdev_queue *txq; 422 struct rep_stats *stats; 423 424 /* Check for minimum and maximum packet length */ 425 if (skb->len <= ETH_HLEN || 426 (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) { 427 stats = &rep->stats; 428 atomic_long_inc(&stats->tx_discards); 429 dev_kfree_skb(skb); 430 return NETDEV_TX_OK; 431 } 432 433 sq = &pf->qset.sq[rep->rep_id]; 434 txq = netdev_get_tx_queue(dev, 0); 435 436 if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) { 437 netif_tx_stop_queue(txq); 438 439 /* Check again, in case SQBs got freed up */ 440 smp_mb(); 441 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) 442 > sq->sqe_thresh) 443 netif_tx_wake_queue(txq); 444 445 return NETDEV_TX_BUSY; 446 } 447 return NETDEV_TX_OK; 448 } 449 450 static int rvu_rep_open(struct net_device *dev) 451 { 452 struct rep_dev *rep = netdev_priv(dev); 453 struct otx2_nic *priv = rep->mdev; 454 struct rep_event evt = {0}; 455 456 if (!(rep->flags & RVU_REP_VF_INITIALIZED)) 457 return 0; 458 459 netif_carrier_on(dev); 460 netif_tx_start_all_queues(dev); 461 462 evt.event = RVU_EVENT_PORT_STATE; 463 evt.evt_data.port_state = 1; 464 evt.pcifunc = rep->pcifunc; 465 rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt); 466 return 0; 467 } 468 469 static int rvu_rep_stop(struct net_device *dev) 470 { 471 struct rep_dev *rep = netdev_priv(dev); 472 struct otx2_nic *priv = rep->mdev; 473 struct rep_event evt = {0}; 474 475 if (!(rep->flags & RVU_REP_VF_INITIALIZED)) 476 return 0; 477 478 netif_carrier_off(dev); 479 netif_tx_disable(dev); 480 481 evt.event = RVU_EVENT_PORT_STATE; 482 evt.pcifunc = rep->pcifunc; 483 rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt); 484 return 0; 485 } 486 487 static const struct net_device_ops rvu_rep_netdev_ops = { 488 .ndo_open = rvu_rep_open, 489 .ndo_stop = rvu_rep_stop, 490 .ndo_start_xmit = rvu_rep_xmit, 491 .ndo_get_stats64 = rvu_rep_get_stats64, 492 .ndo_change_mtu = rvu_rep_change_mtu, 493 .ndo_has_offload_stats = rvu_rep_has_offload_stats, 494 .ndo_get_offload_stats = rvu_rep_get_offload_stats, 495 .ndo_setup_tc = rvu_rep_setup_tc, 496 }; 497 498 static int rvu_rep_napi_init(struct otx2_nic *priv, 499 struct netlink_ext_ack *extack) 500 { 501 struct otx2_qset *qset = &priv->qset; 502 struct otx2_cq_poll *cq_poll = NULL; 503 struct otx2_hw *hw = &priv->hw; 504 int err = 0, qidx, vec; 505 char *irq_name; 506 507 qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL); 508 if (!qset->napi) 509 return -ENOMEM; 510 511 /* Register NAPI handler */ 512 for (qidx = 0; qidx < hw->cint_cnt; qidx++) { 513 cq_poll = &qset->napi[qidx]; 514 cq_poll->cint_idx = qidx; 515 cq_poll->cq_ids[CQ_RX] = 516 (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ; 517 cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ? 518 qidx + hw->rx_queues : 519 CINT_INVALID_CQ; 520 cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; 521 cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ; 522 523 cq_poll->dev = (void *)priv; 524 netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi, 525 otx2_napi_handler); 526 napi_enable(&cq_poll->napi); 527 } 528 /* Register CQ IRQ handlers */ 529 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; 530 for (qidx = 0; qidx < hw->cint_cnt; qidx++) { 531 irq_name = &hw->irq_name[vec * NAME_SIZE]; 532 533 snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx); 534 535 err = request_irq(pci_irq_vector(priv->pdev, vec), 536 otx2_cq_intr_handler, 0, irq_name, 537 &qset->napi[qidx]); 538 if (err) { 539 NL_SET_ERR_MSG_FMT_MOD(extack, 540 "RVU REP IRQ registration failed for CQ%d", 541 qidx); 542 goto err_free_cints; 543 } 544 vec++; 545 546 /* Enable CQ IRQ */ 547 otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); 548 otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); 549 } 550 priv->flags &= ~OTX2_FLAG_INTF_DOWN; 551 return 0; 552 553 err_free_cints: 554 otx2_free_cints(priv, qidx); 555 otx2_disable_napi(priv); 556 return err; 557 } 558 559 static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv) 560 { 561 struct otx2_qset *qset = &priv->qset; 562 struct otx2_cq_poll *cq_poll = NULL; 563 int qidx, vec; 564 565 /* Cleanup CQ NAPI and IRQ */ 566 vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START; 567 for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) { 568 /* Disable interrupt */ 569 otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); 570 571 synchronize_irq(pci_irq_vector(priv->pdev, vec)); 572 573 cq_poll = &qset->napi[qidx]; 574 napi_synchronize(&cq_poll->napi); 575 vec++; 576 } 577 otx2_free_cints(priv, priv->hw.cint_cnt); 578 otx2_disable_napi(priv); 579 } 580 581 static void rvu_rep_rsrc_free(struct otx2_nic *priv) 582 { 583 struct otx2_qset *qset = &priv->qset; 584 struct delayed_work *work; 585 int wrk; 586 587 for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) { 588 work = &priv->refill_wrk[wrk].pool_refill_work; 589 cancel_delayed_work_sync(work); 590 } 591 devm_kfree(priv->dev, priv->refill_wrk); 592 593 otx2_free_hw_resources(priv); 594 otx2_free_queue_mem(qset); 595 } 596 597 static int rvu_rep_rsrc_init(struct otx2_nic *priv) 598 { 599 struct otx2_qset *qset = &priv->qset; 600 int err; 601 602 err = otx2_alloc_queue_mem(priv); 603 if (err) 604 return err; 605 606 priv->hw.max_mtu = otx2_get_max_mtu(priv); 607 priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN; 608 priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM; 609 610 err = otx2_init_hw_resources(priv); 611 if (err) 612 goto err_free_rsrc; 613 614 /* Set maximum frame size allowed in HW */ 615 err = otx2_hw_set_mtu(priv, priv->hw.max_mtu); 616 if (err) { 617 dev_err(priv->dev, "Failed to set HW MTU\n"); 618 goto err_free_rsrc; 619 } 620 return 0; 621 622 err_free_rsrc: 623 otx2_free_hw_resources(priv); 624 otx2_free_queue_mem(qset); 625 return err; 626 } 627 628 void rvu_rep_destroy(struct otx2_nic *priv) 629 { 630 struct rep_dev *rep; 631 int rep_id; 632 633 rvu_eswitch_config(priv, false); 634 priv->flags |= OTX2_FLAG_INTF_DOWN; 635 rvu_rep_free_cq_rsrc(priv); 636 for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) { 637 rep = priv->reps[rep_id]; 638 unregister_netdev(rep->netdev); 639 rvu_rep_devlink_port_unregister(rep); 640 free_netdev(rep->netdev); 641 kfree(rep->flow_cfg); 642 } 643 kfree(priv->reps); 644 rvu_rep_rsrc_free(priv); 645 } 646 647 int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack) 648 { 649 int rep_cnt = priv->rep_cnt; 650 struct net_device *ndev; 651 struct rep_dev *rep; 652 int rep_id, err; 653 u16 pcifunc; 654 655 err = rvu_rep_rsrc_init(priv); 656 if (err) 657 return -ENOMEM; 658 659 priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL); 660 if (!priv->reps) 661 return -ENOMEM; 662 663 for (rep_id = 0; rep_id < rep_cnt; rep_id++) { 664 ndev = alloc_etherdev(sizeof(*rep)); 665 if (!ndev) { 666 NL_SET_ERR_MSG_FMT_MOD(extack, 667 "PFVF representor:%d creation failed", 668 rep_id); 669 err = -ENOMEM; 670 goto exit; 671 } 672 673 rep = netdev_priv(ndev); 674 priv->reps[rep_id] = rep; 675 rep->mdev = priv; 676 rep->netdev = ndev; 677 rep->rep_id = rep_id; 678 679 ndev->min_mtu = OTX2_MIN_MTU; 680 ndev->max_mtu = priv->hw.max_mtu; 681 ndev->netdev_ops = &rvu_rep_netdev_ops; 682 pcifunc = priv->rep_pf_map[rep_id]; 683 rep->pcifunc = pcifunc; 684 685 snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d", 686 rvu_get_pf(priv->pdev, pcifunc), 687 (pcifunc & RVU_PFVF_FUNC_MASK)); 688 689 ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | 690 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | 691 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6); 692 693 ndev->hw_features |= NETIF_F_HW_TC; 694 ndev->features |= ndev->hw_features; 695 eth_hw_addr_random(ndev); 696 err = rvu_rep_devlink_port_register(rep); 697 if (err) { 698 free_netdev(ndev); 699 goto exit; 700 } 701 702 SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port); 703 err = register_netdev(ndev); 704 if (err) { 705 NL_SET_ERR_MSG_MOD(extack, 706 "PFVF representor registration failed"); 707 rvu_rep_devlink_port_unregister(rep); 708 free_netdev(ndev); 709 goto exit; 710 } 711 712 INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats); 713 } 714 err = rvu_rep_napi_init(priv, extack); 715 if (err) 716 goto exit; 717 718 rvu_eswitch_config(priv, true); 719 return 0; 720 exit: 721 while (--rep_id >= 0) { 722 rep = priv->reps[rep_id]; 723 unregister_netdev(rep->netdev); 724 rvu_rep_devlink_port_unregister(rep); 725 free_netdev(rep->netdev); 726 } 727 kfree(priv->reps); 728 rvu_rep_rsrc_free(priv); 729 return err; 730 } 731 732 static int rvu_get_rep_cnt(struct otx2_nic *priv) 733 { 734 struct get_rep_cnt_rsp *rsp; 735 struct mbox_msghdr *msghdr; 736 struct msg_req *req; 737 int err, rep; 738 739 mutex_lock(&priv->mbox.lock); 740 req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox); 741 if (!req) { 742 mutex_unlock(&priv->mbox.lock); 743 return -ENOMEM; 744 } 745 err = otx2_sync_mbox_msg(&priv->mbox); 746 if (err) 747 goto exit; 748 749 msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); 750 if (IS_ERR(msghdr)) { 751 err = PTR_ERR(msghdr); 752 goto exit; 753 } 754 755 rsp = (struct get_rep_cnt_rsp *)msghdr; 756 priv->hw.tx_queues = rsp->rep_cnt; 757 priv->hw.rx_queues = rsp->rep_cnt; 758 priv->rep_cnt = rsp->rep_cnt; 759 for (rep = 0; rep < priv->rep_cnt; rep++) 760 priv->rep_pf_map[rep] = rsp->rep_pf_map[rep]; 761 762 exit: 763 mutex_unlock(&priv->mbox.lock); 764 return err; 765 } 766 767 static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) 768 { 769 struct device *dev = &pdev->dev; 770 struct otx2_nic *priv; 771 struct otx2_hw *hw; 772 int err; 773 774 err = pcim_enable_device(pdev); 775 if (err) { 776 dev_err(dev, "Failed to enable PCI device\n"); 777 return err; 778 } 779 780 err = pcim_request_all_regions(pdev, DRV_NAME); 781 if (err) { 782 dev_err(dev, "PCI request regions failed 0x%x\n", err); 783 return err; 784 } 785 786 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 787 if (err) { 788 dev_err(dev, "DMA mask config failed, abort\n"); 789 goto err_set_drv_data; 790 } 791 792 pci_set_master(pdev); 793 794 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 795 if (!priv) { 796 err = -ENOMEM; 797 goto err_set_drv_data; 798 } 799 800 pci_set_drvdata(pdev, priv); 801 priv->pdev = pdev; 802 priv->dev = dev; 803 priv->flags |= OTX2_FLAG_INTF_DOWN; 804 priv->flags |= OTX2_FLAG_REP_MODE_ENABLED; 805 806 hw = &priv->hw; 807 hw->pdev = pdev; 808 hw->max_queues = OTX2_MAX_CQ_CNT; 809 hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; 810 hw->xqe_size = 128; 811 812 err = otx2_init_rsrc(pdev, priv); 813 if (err) 814 goto err_set_drv_data; 815 816 priv->iommu_domain = iommu_get_domain_for_dev(dev); 817 818 err = rvu_get_rep_cnt(priv); 819 if (err) 820 goto err_detach_rsrc; 821 822 err = otx2_register_dl(priv); 823 if (err) 824 goto err_detach_rsrc; 825 826 return 0; 827 828 err_detach_rsrc: 829 if (priv->hw.lmt_info) 830 free_percpu(priv->hw.lmt_info); 831 if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) 832 qmem_free(priv->dev, priv->dync_lmt); 833 otx2_detach_resources(&priv->mbox); 834 otx2_disable_mbox_intr(priv); 835 otx2_pfaf_mbox_destroy(priv); 836 pci_free_irq_vectors(pdev); 837 err_set_drv_data: 838 pci_set_drvdata(pdev, NULL); 839 return err; 840 } 841 842 static void rvu_rep_remove(struct pci_dev *pdev) 843 { 844 struct otx2_nic *priv = pci_get_drvdata(pdev); 845 846 otx2_unregister_dl(priv); 847 if (!(priv->flags & OTX2_FLAG_INTF_DOWN)) 848 rvu_rep_destroy(priv); 849 otx2_detach_resources(&priv->mbox); 850 if (priv->hw.lmt_info) 851 free_percpu(priv->hw.lmt_info); 852 if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) 853 qmem_free(priv->dev, priv->dync_lmt); 854 otx2_disable_mbox_intr(priv); 855 otx2_pfaf_mbox_destroy(priv); 856 pci_free_irq_vectors(priv->pdev); 857 pci_set_drvdata(pdev, NULL); 858 } 859 860 static struct pci_driver rvu_rep_driver = { 861 .name = DRV_NAME, 862 .id_table = rvu_rep_id_table, 863 .probe = rvu_rep_probe, 864 .remove = rvu_rep_remove, 865 .shutdown = rvu_rep_remove, 866 }; 867 868 static int __init rvu_rep_init_module(void) 869 { 870 return pci_register_driver(&rvu_rep_driver); 871 } 872 873 static void __exit rvu_rep_cleanup_module(void) 874 { 875 pci_unregister_driver(&rvu_rep_driver); 876 } 877 878 module_init(rvu_rep_init_module); 879 module_exit(rvu_rep_cleanup_module); 880