1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Physical Function ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/interrupt.h> 10 #include <linux/pci.h> 11 #include <linux/etherdevice.h> 12 #include <linux/of.h> 13 #include <linux/if_vlan.h> 14 #include <linux/iommu.h> 15 #include <net/ip.h> 16 #include <linux/bpf.h> 17 #include <linux/bpf_trace.h> 18 #include <linux/bitfield.h> 19 20 #include "otx2_reg.h" 21 #include "otx2_common.h" 22 #include "otx2_txrx.h" 23 #include "otx2_struct.h" 24 #include "otx2_ptp.h" 25 #include "cn10k.h" 26 #include "qos.h" 27 #include <rvu_trace.h> 28 29 #define DRV_NAME "rvu_nicpf" 30 #define DRV_STRING "Marvell RVU NIC Physical Function Driver" 31 32 /* Supported devices */ 33 static const struct pci_device_id otx2_pf_id_table[] = { 34 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) }, 35 { 0, } /* end of table */ 36 }; 37 38 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); 39 MODULE_DESCRIPTION(DRV_STRING); 40 MODULE_LICENSE("GPL v2"); 41 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table); 42 43 static void otx2_vf_link_event_task(struct work_struct *work); 44 45 enum { 46 TYPE_PFAF, 47 TYPE_PFVF, 48 }; 49 50 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable); 51 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable); 52 53 static int otx2_change_mtu(struct net_device *netdev, int new_mtu) 54 { 55 struct otx2_nic *pf = netdev_priv(netdev); 56 bool if_up = netif_running(netdev); 57 int err = 0; 58 59 if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) { 60 netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", 61 netdev->mtu); 62 return -EINVAL; 63 } 64 if (if_up) 65 otx2_stop(netdev); 66 67 netdev_info(netdev, "Changing MTU from %d to %d\n", 68 netdev->mtu, new_mtu); 69 netdev->mtu = new_mtu; 70 71 if (if_up) 72 err = otx2_open(netdev); 73 74 return err; 75 } 76 77 static void otx2_disable_flr_me_intr(struct otx2_nic *pf) 78 { 79 int irq, vfs = pf->total_vfs; 80 81 /* Disable VFs ME interrupts */ 82 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); 83 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0); 84 free_irq(irq, pf); 85 86 /* Disable VFs FLR interrupts */ 87 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); 88 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0); 89 free_irq(irq, pf); 90 91 if (vfs <= 64) 92 return; 93 94 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 95 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1); 96 free_irq(irq, pf); 97 98 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 99 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1); 100 free_irq(irq, pf); 101 } 102 103 static void otx2_flr_wq_destroy(struct otx2_nic *pf) 104 { 105 if (!pf->flr_wq) 106 return; 107 destroy_workqueue(pf->flr_wq); 108 pf->flr_wq = NULL; 109 devm_kfree(pf->dev, pf->flr_wrk); 110 } 111 112 static void otx2_flr_handler(struct work_struct *work) 113 { 114 struct flr_work *flrwork = container_of(work, struct flr_work, work); 115 struct otx2_nic *pf = flrwork->pf; 116 struct mbox *mbox = &pf->mbox; 117 struct msg_req *req; 118 int vf, reg = 0; 119 120 vf = flrwork - pf->flr_wrk; 121 122 mutex_lock(&mbox->lock); 123 req = otx2_mbox_alloc_msg_vf_flr(mbox); 124 if (!req) { 125 mutex_unlock(&mbox->lock); 126 return; 127 } 128 req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK; 129 req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK; 130 131 if (!otx2_sync_mbox_msg(&pf->mbox)) { 132 if (vf >= 64) { 133 reg = 1; 134 vf = vf - 64; 135 } 136 /* clear transcation pending bit */ 137 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); 138 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); 139 } 140 141 mutex_unlock(&mbox->lock); 142 } 143 144 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq) 145 { 146 struct otx2_nic *pf = (struct otx2_nic *)pf_irq; 147 int reg, dev, vf, start_vf, num_reg = 1; 148 u64 intr; 149 150 if (pf->total_vfs > 64) 151 num_reg = 2; 152 153 for (reg = 0; reg < num_reg; reg++) { 154 intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg)); 155 if (!intr) 156 continue; 157 start_vf = 64 * reg; 158 for (vf = 0; vf < 64; vf++) { 159 if (!(intr & BIT_ULL(vf))) 160 continue; 161 dev = vf + start_vf; 162 queue_work(pf->flr_wq, &pf->flr_wrk[dev].work); 163 /* Clear interrupt */ 164 otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); 165 /* Disable the interrupt */ 166 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg), 167 BIT_ULL(vf)); 168 } 169 } 170 return IRQ_HANDLED; 171 } 172 173 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq) 174 { 175 struct otx2_nic *pf = (struct otx2_nic *)pf_irq; 176 int vf, reg, num_reg = 1; 177 u64 intr; 178 179 if (pf->total_vfs > 64) 180 num_reg = 2; 181 182 for (reg = 0; reg < num_reg; reg++) { 183 intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg)); 184 if (!intr) 185 continue; 186 for (vf = 0; vf < 64; vf++) { 187 if (!(intr & BIT_ULL(vf))) 188 continue; 189 /* clear trpend bit */ 190 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); 191 /* clear interrupt */ 192 otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf)); 193 } 194 } 195 return IRQ_HANDLED; 196 } 197 198 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) 199 { 200 struct otx2_hw *hw = &pf->hw; 201 char *irq_name; 202 int ret; 203 204 /* Register ME interrupt handler*/ 205 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE]; 206 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); 207 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), 208 otx2_pf_me_intr_handler, 0, irq_name, pf); 209 if (ret) { 210 dev_err(pf->dev, 211 "RVUPF: IRQ registration failed for ME0\n"); 212 } 213 214 /* Register FLR interrupt handler */ 215 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE]; 216 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); 217 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), 218 otx2_pf_flr_intr_handler, 0, irq_name, pf); 219 if (ret) { 220 dev_err(pf->dev, 221 "RVUPF: IRQ registration failed for FLR0\n"); 222 return ret; 223 } 224 225 if (numvfs > 64) { 226 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE]; 227 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1", 228 rvu_get_pf(pf->pcifunc)); 229 ret = request_irq(pci_irq_vector 230 (pf->pdev, RVU_PF_INT_VEC_VFME1), 231 otx2_pf_me_intr_handler, 0, irq_name, pf); 232 if (ret) { 233 dev_err(pf->dev, 234 "RVUPF: IRQ registration failed for ME1\n"); 235 } 236 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE]; 237 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1", 238 rvu_get_pf(pf->pcifunc)); 239 ret = request_irq(pci_irq_vector 240 (pf->pdev, RVU_PF_INT_VEC_VFFLR1), 241 otx2_pf_flr_intr_handler, 0, irq_name, pf); 242 if (ret) { 243 dev_err(pf->dev, 244 "RVUPF: IRQ registration failed for FLR1\n"); 245 return ret; 246 } 247 } 248 249 /* Enable ME interrupt for all VFs*/ 250 otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs)); 251 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs)); 252 253 /* Enable FLR interrupt for all VFs*/ 254 otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs)); 255 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs)); 256 257 if (numvfs > 64) { 258 numvfs -= 64; 259 260 otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs)); 261 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1), 262 INTR_MASK(numvfs)); 263 264 otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs)); 265 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1), 266 INTR_MASK(numvfs)); 267 } 268 return 0; 269 } 270 271 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) 272 { 273 int vf; 274 275 pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI); 276 if (!pf->flr_wq) 277 return -ENOMEM; 278 279 pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs, 280 sizeof(struct flr_work), GFP_KERNEL); 281 if (!pf->flr_wrk) { 282 destroy_workqueue(pf->flr_wq); 283 return -ENOMEM; 284 } 285 286 for (vf = 0; vf < num_vfs; vf++) { 287 pf->flr_wrk[vf].pf = pf; 288 INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler); 289 } 290 291 return 0; 292 } 293 294 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, 295 int first, int mdevs, u64 intr, int type) 296 { 297 struct otx2_mbox_dev *mdev; 298 struct otx2_mbox *mbox; 299 struct mbox_hdr *hdr; 300 int i; 301 302 for (i = first; i < mdevs; i++) { 303 /* start from 0 */ 304 if (!(intr & BIT_ULL(i - first))) 305 continue; 306 307 mbox = &mw->mbox; 308 mdev = &mbox->dev[i]; 309 if (type == TYPE_PFAF) 310 otx2_sync_mbox_bbuf(mbox, i); 311 hdr = mdev->mbase + mbox->rx_start; 312 /* The hdr->num_msgs is set to zero immediately in the interrupt 313 * handler to ensure that it holds a correct value next time 314 * when the interrupt handler is called. 315 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler 316 * pf>mbox.up_num_msgs holds the data for use in 317 * pfaf_mbox_up_handler. 318 */ 319 if (hdr->num_msgs) { 320 mw[i].num_msgs = hdr->num_msgs; 321 hdr->num_msgs = 0; 322 if (type == TYPE_PFAF) 323 memset(mbox->hwbase + mbox->rx_start, 0, 324 ALIGN(sizeof(struct mbox_hdr), 325 sizeof(u64))); 326 327 queue_work(mbox_wq, &mw[i].mbox_wrk); 328 } 329 330 mbox = &mw->mbox_up; 331 mdev = &mbox->dev[i]; 332 if (type == TYPE_PFAF) 333 otx2_sync_mbox_bbuf(mbox, i); 334 hdr = mdev->mbase + mbox->rx_start; 335 if (hdr->num_msgs) { 336 mw[i].up_num_msgs = hdr->num_msgs; 337 hdr->num_msgs = 0; 338 if (type == TYPE_PFAF) 339 memset(mbox->hwbase + mbox->rx_start, 0, 340 ALIGN(sizeof(struct mbox_hdr), 341 sizeof(u64))); 342 343 queue_work(mbox_wq, &mw[i].mbox_up_wrk); 344 } 345 } 346 } 347 348 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev, 349 struct otx2_mbox *pfvf_mbox, void *bbuf_base, 350 int devid) 351 { 352 struct otx2_mbox_dev *src_mdev = mdev; 353 int offset; 354 355 /* Msgs are already copied, trigger VF's mbox irq */ 356 smp_wmb(); 357 358 offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift); 359 writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset); 360 361 /* Restore VF's mbox bounce buffer region address */ 362 src_mdev->mbase = bbuf_base; 363 } 364 365 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, 366 struct otx2_mbox *src_mbox, 367 int dir, int vf, int num_msgs) 368 { 369 struct otx2_mbox_dev *src_mdev, *dst_mdev; 370 struct mbox_hdr *mbox_hdr; 371 struct mbox_hdr *req_hdr; 372 struct mbox *dst_mbox; 373 int dst_size, err; 374 375 if (dir == MBOX_DIR_PFAF) { 376 /* Set VF's mailbox memory as PF's bounce buffer memory, so 377 * that explicit copying of VF's msgs to PF=>AF mbox region 378 * and AF=>PF responses to VF's mbox region can be avoided. 379 */ 380 src_mdev = &src_mbox->dev[vf]; 381 mbox_hdr = src_mbox->hwbase + 382 src_mbox->rx_start + (vf * MBOX_SIZE); 383 384 dst_mbox = &pf->mbox; 385 dst_size = dst_mbox->mbox.tx_size - 386 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); 387 /* Check if msgs fit into destination area and has valid size */ 388 if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size) 389 return -EINVAL; 390 391 dst_mdev = &dst_mbox->mbox.dev[0]; 392 393 mutex_lock(&pf->mbox.lock); 394 dst_mdev->mbase = src_mdev->mbase; 395 dst_mdev->msg_size = mbox_hdr->msg_size; 396 dst_mdev->num_msgs = num_msgs; 397 err = otx2_sync_mbox_msg(dst_mbox); 398 /* Error code -EIO indicate there is a communication failure 399 * to the AF. Rest of the error codes indicate that AF processed 400 * VF messages and set the error codes in response messages 401 * (if any) so simply forward responses to VF. 402 */ 403 if (err == -EIO) { 404 dev_warn(pf->dev, 405 "AF not responding to VF%d messages\n", vf); 406 /* restore PF mbase and exit */ 407 dst_mdev->mbase = pf->mbox.bbuf_base; 408 mutex_unlock(&pf->mbox.lock); 409 return err; 410 } 411 /* At this point, all the VF messages sent to AF are acked 412 * with proper responses and responses are copied to VF 413 * mailbox hence raise interrupt to VF. 414 */ 415 req_hdr = (struct mbox_hdr *)(dst_mdev->mbase + 416 dst_mbox->mbox.rx_start); 417 req_hdr->num_msgs = num_msgs; 418 419 otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox, 420 pf->mbox.bbuf_base, vf); 421 mutex_unlock(&pf->mbox.lock); 422 } else if (dir == MBOX_DIR_PFVF_UP) { 423 src_mdev = &src_mbox->dev[0]; 424 mbox_hdr = src_mbox->hwbase + src_mbox->rx_start; 425 req_hdr = (struct mbox_hdr *)(src_mdev->mbase + 426 src_mbox->rx_start); 427 req_hdr->num_msgs = num_msgs; 428 429 dst_mbox = &pf->mbox_pfvf[0]; 430 dst_size = dst_mbox->mbox_up.tx_size - 431 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); 432 /* Check if msgs fit into destination area */ 433 if (mbox_hdr->msg_size > dst_size) 434 return -EINVAL; 435 436 dst_mdev = &dst_mbox->mbox_up.dev[vf]; 437 dst_mdev->mbase = src_mdev->mbase; 438 dst_mdev->msg_size = mbox_hdr->msg_size; 439 dst_mdev->num_msgs = mbox_hdr->num_msgs; 440 err = otx2_sync_mbox_up_msg(dst_mbox, vf); 441 if (err) { 442 dev_warn(pf->dev, 443 "VF%d is not responding to mailbox\n", vf); 444 return err; 445 } 446 } else if (dir == MBOX_DIR_VFPF_UP) { 447 req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase + 448 src_mbox->rx_start); 449 req_hdr->num_msgs = num_msgs; 450 otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf], 451 &pf->mbox.mbox_up, 452 pf->mbox_pfvf[vf].bbuf_base, 453 0); 454 } 455 456 return 0; 457 } 458 459 static void otx2_pfvf_mbox_handler(struct work_struct *work) 460 { 461 struct mbox_msghdr *msg = NULL; 462 int offset, vf_idx, id, err; 463 struct otx2_mbox_dev *mdev; 464 struct mbox_hdr *req_hdr; 465 struct otx2_mbox *mbox; 466 struct mbox *vf_mbox; 467 struct otx2_nic *pf; 468 469 vf_mbox = container_of(work, struct mbox, mbox_wrk); 470 pf = vf_mbox->pfvf; 471 vf_idx = vf_mbox - pf->mbox_pfvf; 472 473 mbox = &pf->mbox_pfvf[0].mbox; 474 mdev = &mbox->dev[vf_idx]; 475 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 476 477 offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 478 479 for (id = 0; id < vf_mbox->num_msgs; id++) { 480 msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + 481 offset); 482 483 if (msg->sig != OTX2_MBOX_REQ_SIG) 484 goto inval_msg; 485 486 /* Set VF's number in each of the msg */ 487 msg->pcifunc &= RVU_PFVF_FUNC_MASK; 488 msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK; 489 offset = msg->next_msgoff; 490 } 491 err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx, 492 vf_mbox->num_msgs); 493 if (err) 494 goto inval_msg; 495 return; 496 497 inval_msg: 498 otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id); 499 otx2_mbox_msg_send(mbox, vf_idx); 500 } 501 502 static void otx2_pfvf_mbox_up_handler(struct work_struct *work) 503 { 504 struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk); 505 struct otx2_nic *pf = vf_mbox->pfvf; 506 struct otx2_mbox_dev *mdev; 507 int offset, id, vf_idx = 0; 508 struct mbox_hdr *rsp_hdr; 509 struct mbox_msghdr *msg; 510 struct otx2_mbox *mbox; 511 512 vf_idx = vf_mbox - pf->mbox_pfvf; 513 mbox = &pf->mbox_pfvf[0].mbox_up; 514 mdev = &mbox->dev[vf_idx]; 515 516 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 517 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 518 519 for (id = 0; id < vf_mbox->up_num_msgs; id++) { 520 msg = mdev->mbase + offset; 521 522 if (msg->id >= MBOX_MSG_MAX) { 523 dev_err(pf->dev, 524 "Mbox msg with unknown ID 0x%x\n", msg->id); 525 goto end; 526 } 527 528 if (msg->sig != OTX2_MBOX_RSP_SIG) { 529 dev_err(pf->dev, 530 "Mbox msg with wrong signature %x, ID 0x%x\n", 531 msg->sig, msg->id); 532 goto end; 533 } 534 535 switch (msg->id) { 536 case MBOX_MSG_CGX_LINK_EVENT: 537 break; 538 default: 539 if (msg->rc) 540 dev_err(pf->dev, 541 "Mbox msg response has err %d, ID 0x%x\n", 542 msg->rc, msg->id); 543 break; 544 } 545 546 end: 547 offset = mbox->rx_start + msg->next_msgoff; 548 if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1)) 549 __otx2_mbox_reset(mbox, 0); 550 mdev->msgs_acked++; 551 } 552 } 553 554 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) 555 { 556 struct otx2_nic *pf = (struct otx2_nic *)(pf_irq); 557 int vfs = pf->total_vfs; 558 struct mbox *mbox; 559 u64 intr; 560 561 mbox = pf->mbox_pfvf; 562 /* Handle VF interrupts */ 563 if (vfs > 64) { 564 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1)); 565 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); 566 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, 567 TYPE_PFVF); 568 vfs -= 64; 569 } 570 571 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); 572 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr); 573 574 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); 575 576 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); 577 578 return IRQ_HANDLED; 579 } 580 581 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) 582 { 583 void __iomem *hwbase; 584 struct mbox *mbox; 585 int err, vf; 586 u64 base; 587 588 if (!numvfs) 589 return -EINVAL; 590 591 pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs, 592 sizeof(struct mbox), GFP_KERNEL); 593 if (!pf->mbox_pfvf) 594 return -ENOMEM; 595 596 pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox", 597 WQ_HIGHPRI | WQ_MEM_RECLAIM); 598 if (!pf->mbox_pfvf_wq) 599 return -ENOMEM; 600 601 /* On CN10K platform, PF <-> VF mailbox region follows after 602 * PF <-> AF mailbox region. 603 */ 604 if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) 605 base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + 606 MBOX_SIZE; 607 else 608 base = readq((void __iomem *)((u64)pf->reg_base + 609 RVU_PF_VF_BAR4_ADDR)); 610 611 hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); 612 if (!hwbase) { 613 err = -ENOMEM; 614 goto free_wq; 615 } 616 617 mbox = &pf->mbox_pfvf[0]; 618 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, 619 MBOX_DIR_PFVF, numvfs); 620 if (err) 621 goto free_iomem; 622 623 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, 624 MBOX_DIR_PFVF_UP, numvfs); 625 if (err) 626 goto free_iomem; 627 628 for (vf = 0; vf < numvfs; vf++) { 629 mbox->pfvf = pf; 630 INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler); 631 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler); 632 mbox++; 633 } 634 635 return 0; 636 637 free_iomem: 638 if (hwbase) 639 iounmap(hwbase); 640 free_wq: 641 destroy_workqueue(pf->mbox_pfvf_wq); 642 return err; 643 } 644 645 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf) 646 { 647 struct mbox *mbox = &pf->mbox_pfvf[0]; 648 649 if (!mbox) 650 return; 651 652 if (pf->mbox_pfvf_wq) { 653 destroy_workqueue(pf->mbox_pfvf_wq); 654 pf->mbox_pfvf_wq = NULL; 655 } 656 657 if (mbox->mbox.hwbase) 658 iounmap(mbox->mbox.hwbase); 659 660 otx2_mbox_destroy(&mbox->mbox); 661 } 662 663 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) 664 { 665 /* Clear PF <=> VF mailbox IRQ */ 666 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); 667 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); 668 669 /* Enable PF <=> VF mailbox IRQ */ 670 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs)); 671 if (numvfs > 64) { 672 numvfs -= 64; 673 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), 674 INTR_MASK(numvfs)); 675 } 676 } 677 678 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) 679 { 680 int vector; 681 682 /* Disable PF <=> VF mailbox IRQ */ 683 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull); 684 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull); 685 686 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); 687 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0); 688 free_irq(vector, pf); 689 690 if (numvfs > 64) { 691 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); 692 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1); 693 free_irq(vector, pf); 694 } 695 } 696 697 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) 698 { 699 struct otx2_hw *hw = &pf->hw; 700 char *irq_name; 701 int err; 702 703 /* Register MBOX0 interrupt handler */ 704 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE]; 705 if (pf->pcifunc) 706 snprintf(irq_name, NAME_SIZE, 707 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); 708 else 709 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0"); 710 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), 711 otx2_pfvf_mbox_intr_handler, 0, irq_name, pf); 712 if (err) { 713 dev_err(pf->dev, 714 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n"); 715 return err; 716 } 717 718 if (numvfs > 64) { 719 /* Register MBOX1 interrupt handler */ 720 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE]; 721 if (pf->pcifunc) 722 snprintf(irq_name, NAME_SIZE, 723 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); 724 else 725 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1"); 726 err = request_irq(pci_irq_vector(pf->pdev, 727 RVU_PF_INT_VEC_VFPF_MBOX1), 728 otx2_pfvf_mbox_intr_handler, 729 0, irq_name, pf); 730 if (err) { 731 dev_err(pf->dev, 732 "RVUPF: IRQ registration failed for PFVF mbox1 irq\n"); 733 return err; 734 } 735 } 736 737 otx2_enable_pfvf_mbox_intr(pf, numvfs); 738 739 return 0; 740 } 741 742 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf, 743 struct mbox_msghdr *msg) 744 { 745 int devid; 746 747 if (msg->id >= MBOX_MSG_MAX) { 748 dev_err(pf->dev, 749 "Mbox msg with unknown ID 0x%x\n", msg->id); 750 return; 751 } 752 753 if (msg->sig != OTX2_MBOX_RSP_SIG) { 754 dev_err(pf->dev, 755 "Mbox msg with wrong signature %x, ID 0x%x\n", 756 msg->sig, msg->id); 757 return; 758 } 759 760 /* message response heading VF */ 761 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; 762 if (devid) { 763 struct otx2_vf_config *config = &pf->vf_configs[devid - 1]; 764 struct delayed_work *dwork; 765 766 switch (msg->id) { 767 case MBOX_MSG_NIX_LF_START_RX: 768 config->intf_down = false; 769 dwork = &config->link_event_work; 770 schedule_delayed_work(dwork, msecs_to_jiffies(100)); 771 break; 772 case MBOX_MSG_NIX_LF_STOP_RX: 773 config->intf_down = true; 774 break; 775 } 776 777 return; 778 } 779 780 switch (msg->id) { 781 case MBOX_MSG_READY: 782 pf->pcifunc = msg->pcifunc; 783 break; 784 case MBOX_MSG_MSIX_OFFSET: 785 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg); 786 break; 787 case MBOX_MSG_NPA_LF_ALLOC: 788 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg); 789 break; 790 case MBOX_MSG_NIX_LF_ALLOC: 791 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg); 792 break; 793 case MBOX_MSG_NIX_BP_ENABLE: 794 mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg); 795 break; 796 case MBOX_MSG_CGX_STATS: 797 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg); 798 break; 799 case MBOX_MSG_CGX_FEC_STATS: 800 mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg); 801 break; 802 default: 803 if (msg->rc) 804 dev_err(pf->dev, 805 "Mbox msg response has err %d, ID 0x%x\n", 806 msg->rc, msg->id); 807 break; 808 } 809 } 810 811 static void otx2_pfaf_mbox_handler(struct work_struct *work) 812 { 813 struct otx2_mbox_dev *mdev; 814 struct mbox_hdr *rsp_hdr; 815 struct mbox_msghdr *msg; 816 struct otx2_mbox *mbox; 817 struct mbox *af_mbox; 818 struct otx2_nic *pf; 819 int offset, id; 820 821 af_mbox = container_of(work, struct mbox, mbox_wrk); 822 mbox = &af_mbox->mbox; 823 mdev = &mbox->dev[0]; 824 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 825 826 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 827 pf = af_mbox->pfvf; 828 829 for (id = 0; id < af_mbox->num_msgs; id++) { 830 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 831 otx2_process_pfaf_mbox_msg(pf, msg); 832 offset = mbox->rx_start + msg->next_msgoff; 833 if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) 834 __otx2_mbox_reset(mbox, 0); 835 mdev->msgs_acked++; 836 } 837 838 } 839 840 static void otx2_handle_link_event(struct otx2_nic *pf) 841 { 842 struct cgx_link_user_info *linfo = &pf->linfo; 843 struct net_device *netdev = pf->netdev; 844 845 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name, 846 linfo->link_up ? "UP" : "DOWN", linfo->speed, 847 linfo->full_duplex ? "Full" : "Half"); 848 if (linfo->link_up) { 849 netif_carrier_on(netdev); 850 netif_tx_start_all_queues(netdev); 851 } else { 852 netif_tx_stop_all_queues(netdev); 853 netif_carrier_off(netdev); 854 } 855 } 856 857 int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf, 858 struct mcs_intr_info *event, 859 struct msg_rsp *rsp) 860 { 861 cn10k_handle_mcs_event(pf, event); 862 863 return 0; 864 } 865 866 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, 867 struct cgx_link_info_msg *msg, 868 struct msg_rsp *rsp) 869 { 870 int i; 871 872 /* Copy the link info sent by AF */ 873 pf->linfo = msg->link_info; 874 875 /* notify VFs about link event */ 876 for (i = 0; i < pci_num_vf(pf->pdev); i++) { 877 struct otx2_vf_config *config = &pf->vf_configs[i]; 878 struct delayed_work *dwork = &config->link_event_work; 879 880 if (config->intf_down) 881 continue; 882 883 schedule_delayed_work(dwork, msecs_to_jiffies(100)); 884 } 885 886 /* interface has not been fully configured yet */ 887 if (pf->flags & OTX2_FLAG_INTF_DOWN) 888 return 0; 889 890 otx2_handle_link_event(pf); 891 return 0; 892 } 893 894 static int otx2_process_mbox_msg_up(struct otx2_nic *pf, 895 struct mbox_msghdr *req) 896 { 897 /* Check if valid, if not reply with a invalid msg */ 898 if (req->sig != OTX2_MBOX_REQ_SIG) { 899 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); 900 return -ENODEV; 901 } 902 903 switch (req->id) { 904 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 905 case _id: { \ 906 struct _rsp_type *rsp; \ 907 int err; \ 908 \ 909 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ 910 &pf->mbox.mbox_up, 0, \ 911 sizeof(struct _rsp_type)); \ 912 if (!rsp) \ 913 return -ENOMEM; \ 914 \ 915 rsp->hdr.id = _id; \ 916 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ 917 rsp->hdr.pcifunc = 0; \ 918 rsp->hdr.rc = 0; \ 919 \ 920 err = otx2_mbox_up_handler_ ## _fn_name( \ 921 pf, (struct _req_type *)req, rsp); \ 922 return err; \ 923 } 924 MBOX_UP_CGX_MESSAGES 925 MBOX_UP_MCS_MESSAGES 926 #undef M 927 break; 928 default: 929 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); 930 return -ENODEV; 931 } 932 return 0; 933 } 934 935 static void otx2_pfaf_mbox_up_handler(struct work_struct *work) 936 { 937 struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk); 938 struct otx2_mbox *mbox = &af_mbox->mbox_up; 939 struct otx2_mbox_dev *mdev = &mbox->dev[0]; 940 struct otx2_nic *pf = af_mbox->pfvf; 941 int offset, id, devid = 0; 942 struct mbox_hdr *rsp_hdr; 943 struct mbox_msghdr *msg; 944 945 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 946 947 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 948 949 for (id = 0; id < af_mbox->up_num_msgs; id++) { 950 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 951 952 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; 953 /* Skip processing VF's messages */ 954 if (!devid) 955 otx2_process_mbox_msg_up(pf, msg); 956 offset = mbox->rx_start + msg->next_msgoff; 957 } 958 if (devid) { 959 otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up, 960 MBOX_DIR_PFVF_UP, devid - 1, 961 af_mbox->up_num_msgs); 962 return; 963 } 964 965 otx2_mbox_msg_send(mbox, 0); 966 } 967 968 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) 969 { 970 struct otx2_nic *pf = (struct otx2_nic *)pf_irq; 971 struct mbox *mbox; 972 973 /* Clear the IRQ */ 974 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); 975 976 mbox = &pf->mbox; 977 978 trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0)); 979 980 otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF); 981 982 return IRQ_HANDLED; 983 } 984 985 static void otx2_disable_mbox_intr(struct otx2_nic *pf) 986 { 987 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); 988 989 /* Disable AF => PF mailbox IRQ */ 990 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); 991 free_irq(vector, pf); 992 } 993 994 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) 995 { 996 struct otx2_hw *hw = &pf->hw; 997 struct msg_req *req; 998 char *irq_name; 999 int err; 1000 1001 /* Register mailbox interrupt handler */ 1002 irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; 1003 snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox"); 1004 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), 1005 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf); 1006 if (err) { 1007 dev_err(pf->dev, 1008 "RVUPF: IRQ registration failed for PFAF mbox irq\n"); 1009 return err; 1010 } 1011 1012 /* Enable mailbox interrupt for msgs coming from AF. 1013 * First clear to avoid spurious interrupts, if any. 1014 */ 1015 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); 1016 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); 1017 1018 if (!probe_af) 1019 return 0; 1020 1021 /* Check mailbox communication with AF */ 1022 req = otx2_mbox_alloc_msg_ready(&pf->mbox); 1023 if (!req) { 1024 otx2_disable_mbox_intr(pf); 1025 return -ENOMEM; 1026 } 1027 err = otx2_sync_mbox_msg(&pf->mbox); 1028 if (err) { 1029 dev_warn(pf->dev, 1030 "AF not responding to mailbox, deferring probe\n"); 1031 otx2_disable_mbox_intr(pf); 1032 return -EPROBE_DEFER; 1033 } 1034 1035 return 0; 1036 } 1037 1038 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) 1039 { 1040 struct mbox *mbox = &pf->mbox; 1041 1042 if (pf->mbox_wq) { 1043 destroy_workqueue(pf->mbox_wq); 1044 pf->mbox_wq = NULL; 1045 } 1046 1047 if (mbox->mbox.hwbase) 1048 iounmap((void __iomem *)mbox->mbox.hwbase); 1049 1050 otx2_mbox_destroy(&mbox->mbox); 1051 otx2_mbox_destroy(&mbox->mbox_up); 1052 } 1053 1054 static int otx2_pfaf_mbox_init(struct otx2_nic *pf) 1055 { 1056 struct mbox *mbox = &pf->mbox; 1057 void __iomem *hwbase; 1058 int err; 1059 1060 mbox->pfvf = pf; 1061 pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox", 1062 WQ_HIGHPRI | WQ_MEM_RECLAIM); 1063 if (!pf->mbox_wq) 1064 return -ENOMEM; 1065 1066 /* Mailbox is a reserved memory (in RAM) region shared between 1067 * admin function (i.e AF) and this PF, shouldn't be mapped as 1068 * device memory to allow unaligned accesses. 1069 */ 1070 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), 1071 MBOX_SIZE); 1072 if (!hwbase) { 1073 dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); 1074 err = -ENOMEM; 1075 goto exit; 1076 } 1077 1078 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, 1079 MBOX_DIR_PFAF, 1); 1080 if (err) 1081 goto exit; 1082 1083 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, 1084 MBOX_DIR_PFAF_UP, 1); 1085 if (err) 1086 goto exit; 1087 1088 err = otx2_mbox_bbuf_init(mbox, pf->pdev); 1089 if (err) 1090 goto exit; 1091 1092 INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler); 1093 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler); 1094 mutex_init(&mbox->lock); 1095 1096 return 0; 1097 exit: 1098 otx2_pfaf_mbox_destroy(pf); 1099 return err; 1100 } 1101 1102 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable) 1103 { 1104 struct msg_req *msg; 1105 int err; 1106 1107 mutex_lock(&pf->mbox.lock); 1108 if (enable) 1109 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox); 1110 else 1111 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox); 1112 1113 if (!msg) { 1114 mutex_unlock(&pf->mbox.lock); 1115 return -ENOMEM; 1116 } 1117 1118 err = otx2_sync_mbox_msg(&pf->mbox); 1119 mutex_unlock(&pf->mbox.lock); 1120 return err; 1121 } 1122 1123 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) 1124 { 1125 struct msg_req *msg; 1126 int err; 1127 1128 if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap, 1129 pf->flow_cfg->dmacflt_max_flows)) 1130 netdev_warn(pf->netdev, 1131 "CGX/RPM internal loopback might not work as DMAC filters are active\n"); 1132 1133 mutex_lock(&pf->mbox.lock); 1134 if (enable) 1135 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); 1136 else 1137 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox); 1138 1139 if (!msg) { 1140 mutex_unlock(&pf->mbox.lock); 1141 return -ENOMEM; 1142 } 1143 1144 err = otx2_sync_mbox_msg(&pf->mbox); 1145 mutex_unlock(&pf->mbox.lock); 1146 return err; 1147 } 1148 1149 int otx2_set_real_num_queues(struct net_device *netdev, 1150 int tx_queues, int rx_queues) 1151 { 1152 int err; 1153 1154 err = netif_set_real_num_tx_queues(netdev, tx_queues); 1155 if (err) { 1156 netdev_err(netdev, 1157 "Failed to set no of Tx queues: %d\n", tx_queues); 1158 return err; 1159 } 1160 1161 err = netif_set_real_num_rx_queues(netdev, rx_queues); 1162 if (err) 1163 netdev_err(netdev, 1164 "Failed to set no of Rx queues: %d\n", rx_queues); 1165 return err; 1166 } 1167 EXPORT_SYMBOL(otx2_set_real_num_queues); 1168 1169 static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = { 1170 "NIX_SQOPERR_OOR", 1171 "NIX_SQOPERR_CTX_FAULT", 1172 "NIX_SQOPERR_CTX_POISON", 1173 "NIX_SQOPERR_DISABLED", 1174 "NIX_SQOPERR_SIZE_ERR", 1175 "NIX_SQOPERR_OFLOW", 1176 "NIX_SQOPERR_SQB_NULL", 1177 "NIX_SQOPERR_SQB_FAULT", 1178 "NIX_SQOPERR_SQE_SZ_ZERO", 1179 }; 1180 1181 static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = { 1182 "NIX_MNQERR_SQ_CTX_FAULT", 1183 "NIX_MNQERR_SQ_CTX_POISON", 1184 "NIX_MNQERR_SQB_FAULT", 1185 "NIX_MNQERR_SQB_POISON", 1186 "NIX_MNQERR_TOTAL_ERR", 1187 "NIX_MNQERR_LSO_ERR", 1188 "NIX_MNQERR_CQ_QUERY_ERR", 1189 "NIX_MNQERR_MAX_SQE_SIZE_ERR", 1190 "NIX_MNQERR_MAXLEN_ERR", 1191 "NIX_MNQERR_SQE_SIZEM1_ZERO", 1192 }; 1193 1194 static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = { 1195 "NIX_SND_STATUS_GOOD", 1196 "NIX_SND_STATUS_SQ_CTX_FAULT", 1197 "NIX_SND_STATUS_SQ_CTX_POISON", 1198 "NIX_SND_STATUS_SQB_FAULT", 1199 "NIX_SND_STATUS_SQB_POISON", 1200 "NIX_SND_STATUS_HDR_ERR", 1201 "NIX_SND_STATUS_EXT_ERR", 1202 "NIX_SND_STATUS_JUMP_FAULT", 1203 "NIX_SND_STATUS_JUMP_POISON", 1204 "NIX_SND_STATUS_CRC_ERR", 1205 "NIX_SND_STATUS_IMM_ERR", 1206 "NIX_SND_STATUS_SG_ERR", 1207 "NIX_SND_STATUS_MEM_ERR", 1208 "NIX_SND_STATUS_INVALID_SUBDC", 1209 "NIX_SND_STATUS_SUBDC_ORDER_ERR", 1210 "NIX_SND_STATUS_DATA_FAULT", 1211 "NIX_SND_STATUS_DATA_POISON", 1212 "NIX_SND_STATUS_NPC_DROP_ACTION", 1213 "NIX_SND_STATUS_LOCK_VIOL", 1214 "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR", 1215 "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR", 1216 "NIX_SND_STATUS_NPC_MCAST_ABORT", 1217 "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", 1218 "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", 1219 "NIX_SND_STATUS_SEND_STATS_ERR", 1220 }; 1221 1222 static irqreturn_t otx2_q_intr_handler(int irq, void *data) 1223 { 1224 struct otx2_nic *pf = data; 1225 struct otx2_snd_queue *sq; 1226 u64 val, *ptr; 1227 u64 qidx = 0; 1228 1229 /* CQ */ 1230 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { 1231 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT); 1232 val = otx2_atomic64_add((qidx << 44), ptr); 1233 1234 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) | 1235 (val & NIX_CQERRINT_BITS)); 1236 if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42)))) 1237 continue; 1238 1239 if (val & BIT_ULL(42)) { 1240 netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", 1241 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); 1242 } else { 1243 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR)) 1244 netdev_err(pf->netdev, "CQ%lld: Doorbell error", 1245 qidx); 1246 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT)) 1247 netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM", 1248 qidx); 1249 } 1250 1251 schedule_work(&pf->reset_task); 1252 } 1253 1254 /* SQ */ 1255 for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) { 1256 u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg; 1257 u8 sq_op_err_code, mnq_err_code, snd_err_code; 1258 1259 sq = &pf->qset.sq[qidx]; 1260 if (!sq->sqb_ptrs) 1261 continue; 1262 1263 /* Below debug registers captures first errors corresponding to 1264 * those registers. We don't have to check against SQ qid as 1265 * these are fatal errors. 1266 */ 1267 1268 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); 1269 val = otx2_atomic64_add((qidx << 44), ptr); 1270 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | 1271 (val & NIX_SQINT_BITS)); 1272 1273 if (val & BIT_ULL(42)) { 1274 netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", 1275 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); 1276 goto done; 1277 } 1278 1279 sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG); 1280 if (!(sq_op_err_dbg & BIT(44))) 1281 goto chk_mnq_err_dbg; 1282 1283 sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg); 1284 netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n", 1285 qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]); 1286 1287 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44)); 1288 1289 if (sq_op_err_code == NIX_SQOPERR_SQB_NULL) 1290 goto chk_mnq_err_dbg; 1291 1292 /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure. 1293 * TODO: But we are in irq context. How to call mbox functions which does sleep 1294 */ 1295 1296 chk_mnq_err_dbg: 1297 mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG); 1298 if (!(mnq_err_dbg & BIT(44))) 1299 goto chk_snd_err_dbg; 1300 1301 mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg); 1302 netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n", 1303 qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]); 1304 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44)); 1305 1306 chk_snd_err_dbg: 1307 snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG); 1308 if (snd_err_dbg & BIT(44)) { 1309 snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg); 1310 netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n", 1311 qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]); 1312 otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44)); 1313 } 1314 1315 done: 1316 /* Print values and reset */ 1317 if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) 1318 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", 1319 qidx); 1320 1321 schedule_work(&pf->reset_task); 1322 } 1323 1324 return IRQ_HANDLED; 1325 } 1326 1327 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) 1328 { 1329 struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq; 1330 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev; 1331 int qidx = cq_poll->cint_idx; 1332 1333 /* Disable interrupts. 1334 * 1335 * Completion interrupts behave in a level-triggered interrupt 1336 * fashion, and hence have to be cleared only after it is serviced. 1337 */ 1338 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); 1339 1340 /* Schedule NAPI */ 1341 pf->napi_events++; 1342 napi_schedule_irqoff(&cq_poll->napi); 1343 1344 return IRQ_HANDLED; 1345 } 1346 1347 static void otx2_disable_napi(struct otx2_nic *pf) 1348 { 1349 struct otx2_qset *qset = &pf->qset; 1350 struct otx2_cq_poll *cq_poll; 1351 int qidx; 1352 1353 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { 1354 cq_poll = &qset->napi[qidx]; 1355 cancel_work_sync(&cq_poll->dim.work); 1356 napi_disable(&cq_poll->napi); 1357 netif_napi_del(&cq_poll->napi); 1358 } 1359 } 1360 1361 static void otx2_free_cq_res(struct otx2_nic *pf) 1362 { 1363 struct otx2_qset *qset = &pf->qset; 1364 struct otx2_cq_queue *cq; 1365 int qidx; 1366 1367 /* Disable CQs */ 1368 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false); 1369 for (qidx = 0; qidx < qset->cq_cnt; qidx++) { 1370 cq = &qset->cq[qidx]; 1371 qmem_free(pf->dev, cq->cqe); 1372 } 1373 } 1374 1375 static void otx2_free_sq_res(struct otx2_nic *pf) 1376 { 1377 struct otx2_qset *qset = &pf->qset; 1378 struct otx2_snd_queue *sq; 1379 int qidx; 1380 1381 /* Disable SQs */ 1382 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false); 1383 /* Free SQB pointers */ 1384 otx2_sq_free_sqbs(pf); 1385 for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) { 1386 sq = &qset->sq[qidx]; 1387 /* Skip freeing Qos queues if they are not initialized */ 1388 if (!sq->sqe) 1389 continue; 1390 qmem_free(pf->dev, sq->sqe); 1391 qmem_free(pf->dev, sq->tso_hdrs); 1392 kfree(sq->sg); 1393 kfree(sq->sqb_ptrs); 1394 } 1395 } 1396 1397 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu) 1398 { 1399 int frame_size; 1400 int total_size; 1401 int rbuf_size; 1402 1403 if (pf->hw.rbuf_len) 1404 return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM; 1405 1406 /* The data transferred by NIX to memory consists of actual packet 1407 * plus additional data which has timestamp and/or EDSA/HIGIG2 1408 * headers if interface is configured in corresponding modes. 1409 * NIX transfers entire data using 6 segments/buffers and writes 1410 * a CQE_RX descriptor with those segment addresses. First segment 1411 * has additional data prepended to packet. Also software omits a 1412 * headroom of 128 bytes in each segment. Hence the total size of 1413 * memory needed to receive a packet with 'mtu' is: 1414 * frame size = mtu + additional data; 1415 * memory = frame_size + headroom * 6; 1416 * each receive buffer size = memory / 6; 1417 */ 1418 frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; 1419 total_size = frame_size + OTX2_HEAD_ROOM * 6; 1420 rbuf_size = total_size / 6; 1421 1422 return ALIGN(rbuf_size, 2048); 1423 } 1424 1425 static int otx2_init_hw_resources(struct otx2_nic *pf) 1426 { 1427 struct nix_lf_free_req *free_req; 1428 struct mbox *mbox = &pf->mbox; 1429 struct otx2_hw *hw = &pf->hw; 1430 struct msg_req *req; 1431 int err = 0, lvl; 1432 1433 /* Set required NPA LF's pool counts 1434 * Auras and Pools are used in a 1:1 mapping, 1435 * so, aura count = pool count. 1436 */ 1437 hw->rqpool_cnt = hw->rx_queues; 1438 hw->sqpool_cnt = otx2_get_total_tx_queues(pf); 1439 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; 1440 1441 /* Maximum hardware supported transmit length */ 1442 pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; 1443 1444 pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); 1445 1446 mutex_lock(&mbox->lock); 1447 /* NPA init */ 1448 err = otx2_config_npa(pf); 1449 if (err) 1450 goto exit; 1451 1452 /* NIX init */ 1453 err = otx2_config_nix(pf); 1454 if (err) 1455 goto err_free_npa_lf; 1456 1457 /* Enable backpressure */ 1458 otx2_nix_config_bp(pf, true); 1459 1460 /* Init Auras and pools used by NIX RQ, for free buffer ptrs */ 1461 err = otx2_rq_aura_pool_init(pf); 1462 if (err) { 1463 mutex_unlock(&mbox->lock); 1464 goto err_free_nix_lf; 1465 } 1466 /* Init Auras and pools used by NIX SQ, for queueing SQEs */ 1467 err = otx2_sq_aura_pool_init(pf); 1468 if (err) { 1469 mutex_unlock(&mbox->lock); 1470 goto err_free_rq_ptrs; 1471 } 1472 1473 err = otx2_txsch_alloc(pf); 1474 if (err) { 1475 mutex_unlock(&mbox->lock); 1476 goto err_free_sq_ptrs; 1477 } 1478 1479 #ifdef CONFIG_DCB 1480 if (pf->pfc_en) { 1481 err = otx2_pfc_txschq_alloc(pf); 1482 if (err) { 1483 mutex_unlock(&mbox->lock); 1484 goto err_free_sq_ptrs; 1485 } 1486 } 1487 #endif 1488 1489 err = otx2_config_nix_queues(pf); 1490 if (err) { 1491 mutex_unlock(&mbox->lock); 1492 goto err_free_txsch; 1493 } 1494 1495 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1496 err = otx2_txschq_config(pf, lvl, 0, false); 1497 if (err) { 1498 mutex_unlock(&mbox->lock); 1499 goto err_free_nix_queues; 1500 } 1501 } 1502 1503 #ifdef CONFIG_DCB 1504 if (pf->pfc_en) { 1505 err = otx2_pfc_txschq_config(pf); 1506 if (err) { 1507 mutex_unlock(&mbox->lock); 1508 goto err_free_nix_queues; 1509 } 1510 } 1511 #endif 1512 1513 mutex_unlock(&mbox->lock); 1514 return err; 1515 1516 err_free_nix_queues: 1517 otx2_free_sq_res(pf); 1518 otx2_free_cq_res(pf); 1519 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); 1520 err_free_txsch: 1521 otx2_txschq_stop(pf); 1522 err_free_sq_ptrs: 1523 otx2_sq_free_sqbs(pf); 1524 err_free_rq_ptrs: 1525 otx2_free_aura_ptr(pf, AURA_NIX_RQ); 1526 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); 1527 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); 1528 otx2_aura_pool_free(pf); 1529 err_free_nix_lf: 1530 mutex_lock(&mbox->lock); 1531 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); 1532 if (free_req) { 1533 free_req->flags = NIX_LF_DISABLE_FLOWS; 1534 if (otx2_sync_mbox_msg(mbox)) 1535 dev_err(pf->dev, "%s failed to free nixlf\n", __func__); 1536 } 1537 err_free_npa_lf: 1538 /* Reset NPA LF */ 1539 req = otx2_mbox_alloc_msg_npa_lf_free(mbox); 1540 if (req) { 1541 if (otx2_sync_mbox_msg(mbox)) 1542 dev_err(pf->dev, "%s failed to free npalf\n", __func__); 1543 } 1544 exit: 1545 mutex_unlock(&mbox->lock); 1546 return err; 1547 } 1548 1549 static void otx2_free_hw_resources(struct otx2_nic *pf) 1550 { 1551 struct otx2_qset *qset = &pf->qset; 1552 struct nix_lf_free_req *free_req; 1553 struct mbox *mbox = &pf->mbox; 1554 struct otx2_cq_queue *cq; 1555 struct otx2_pool *pool; 1556 struct msg_req *req; 1557 int pool_id; 1558 int qidx; 1559 1560 /* Ensure all SQE are processed */ 1561 otx2_sqb_flush(pf); 1562 1563 /* Stop transmission */ 1564 otx2_txschq_stop(pf); 1565 1566 #ifdef CONFIG_DCB 1567 if (pf->pfc_en) 1568 otx2_pfc_txschq_stop(pf); 1569 #endif 1570 1571 otx2_clean_qos_queues(pf); 1572 1573 mutex_lock(&mbox->lock); 1574 /* Disable backpressure */ 1575 if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) 1576 otx2_nix_config_bp(pf, false); 1577 mutex_unlock(&mbox->lock); 1578 1579 /* Disable RQs */ 1580 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); 1581 1582 /*Dequeue all CQEs */ 1583 for (qidx = 0; qidx < qset->cq_cnt; qidx++) { 1584 cq = &qset->cq[qidx]; 1585 if (cq->cq_type == CQ_RX) 1586 otx2_cleanup_rx_cqes(pf, cq, qidx); 1587 else 1588 otx2_cleanup_tx_cqes(pf, cq); 1589 } 1590 1591 otx2_free_sq_res(pf); 1592 1593 /* Free RQ buffer pointers*/ 1594 otx2_free_aura_ptr(pf, AURA_NIX_RQ); 1595 1596 for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) { 1597 pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx); 1598 pool = &pf->qset.pool[pool_id]; 1599 page_pool_destroy(pool->page_pool); 1600 pool->page_pool = NULL; 1601 } 1602 1603 otx2_free_cq_res(pf); 1604 1605 /* Free all ingress bandwidth profiles allocated */ 1606 cn10k_free_all_ipolicers(pf); 1607 1608 mutex_lock(&mbox->lock); 1609 /* Reset NIX LF */ 1610 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); 1611 if (free_req) { 1612 free_req->flags = NIX_LF_DISABLE_FLOWS; 1613 if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN)) 1614 free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG; 1615 if (otx2_sync_mbox_msg(mbox)) 1616 dev_err(pf->dev, "%s failed to free nixlf\n", __func__); 1617 } 1618 mutex_unlock(&mbox->lock); 1619 1620 /* Disable NPA Pool and Aura hw context */ 1621 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); 1622 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); 1623 otx2_aura_pool_free(pf); 1624 1625 mutex_lock(&mbox->lock); 1626 /* Reset NPA LF */ 1627 req = otx2_mbox_alloc_msg_npa_lf_free(mbox); 1628 if (req) { 1629 if (otx2_sync_mbox_msg(mbox)) 1630 dev_err(pf->dev, "%s failed to free npalf\n", __func__); 1631 } 1632 mutex_unlock(&mbox->lock); 1633 } 1634 1635 static void otx2_do_set_rx_mode(struct otx2_nic *pf) 1636 { 1637 struct net_device *netdev = pf->netdev; 1638 struct nix_rx_mode *req; 1639 bool promisc = false; 1640 1641 if (!(netdev->flags & IFF_UP)) 1642 return; 1643 1644 if ((netdev->flags & IFF_PROMISC) || 1645 (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { 1646 promisc = true; 1647 } 1648 1649 /* Write unicast address to mcam entries or del from mcam */ 1650 if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) 1651 __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); 1652 1653 mutex_lock(&pf->mbox.lock); 1654 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); 1655 if (!req) { 1656 mutex_unlock(&pf->mbox.lock); 1657 return; 1658 } 1659 1660 req->mode = NIX_RX_MODE_UCAST; 1661 1662 if (promisc) 1663 req->mode |= NIX_RX_MODE_PROMISC; 1664 if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) 1665 req->mode |= NIX_RX_MODE_ALLMULTI; 1666 1667 req->mode |= NIX_RX_MODE_USE_MCE; 1668 1669 otx2_sync_mbox_msg(&pf->mbox); 1670 mutex_unlock(&pf->mbox.lock); 1671 } 1672 1673 static void otx2_dim_work(struct work_struct *w) 1674 { 1675 struct dim_cq_moder cur_moder; 1676 struct otx2_cq_poll *cq_poll; 1677 struct otx2_nic *pfvf; 1678 struct dim *dim; 1679 1680 dim = container_of(w, struct dim, work); 1681 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 1682 cq_poll = container_of(dim, struct otx2_cq_poll, dim); 1683 pfvf = (struct otx2_nic *)cq_poll->dev; 1684 pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ? 1685 CQ_TIMER_THRESH_MAX : cur_moder.usec; 1686 pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ? 1687 NAPI_POLL_WEIGHT : cur_moder.pkts; 1688 dim->state = DIM_START_MEASURE; 1689 } 1690 1691 int otx2_open(struct net_device *netdev) 1692 { 1693 struct otx2_nic *pf = netdev_priv(netdev); 1694 struct otx2_cq_poll *cq_poll = NULL; 1695 struct otx2_qset *qset = &pf->qset; 1696 int err = 0, qidx, vec; 1697 char *irq_name; 1698 1699 netif_carrier_off(netdev); 1700 1701 /* RQ and SQs are mapped to different CQs, 1702 * so find out max CQ IRQs (i.e CINTs) needed. 1703 */ 1704 pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues, 1705 pf->hw.tc_tx_queues); 1706 1707 pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf); 1708 1709 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL); 1710 if (!qset->napi) 1711 return -ENOMEM; 1712 1713 /* CQ size of RQ */ 1714 qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256); 1715 /* CQ size of SQ */ 1716 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K); 1717 1718 err = -ENOMEM; 1719 qset->cq = kcalloc(pf->qset.cq_cnt, 1720 sizeof(struct otx2_cq_queue), GFP_KERNEL); 1721 if (!qset->cq) 1722 goto err_free_mem; 1723 1724 qset->sq = kcalloc(otx2_get_total_tx_queues(pf), 1725 sizeof(struct otx2_snd_queue), GFP_KERNEL); 1726 if (!qset->sq) 1727 goto err_free_mem; 1728 1729 qset->rq = kcalloc(pf->hw.rx_queues, 1730 sizeof(struct otx2_rcv_queue), GFP_KERNEL); 1731 if (!qset->rq) 1732 goto err_free_mem; 1733 1734 err = otx2_init_hw_resources(pf); 1735 if (err) 1736 goto err_free_mem; 1737 1738 /* Register NAPI handler */ 1739 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { 1740 cq_poll = &qset->napi[qidx]; 1741 cq_poll->cint_idx = qidx; 1742 /* RQ0 & SQ0 are mapped to CINT0 and so on.. 1743 * 'cq_ids[0]' points to RQ's CQ and 1744 * 'cq_ids[1]' points to SQ's CQ and 1745 * 'cq_ids[2]' points to XDP's CQ and 1746 */ 1747 cq_poll->cq_ids[CQ_RX] = 1748 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ; 1749 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ? 1750 qidx + pf->hw.rx_queues : CINT_INVALID_CQ; 1751 if (pf->xdp_prog) 1752 cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ? 1753 (qidx + pf->hw.rx_queues + 1754 pf->hw.tx_queues) : 1755 CINT_INVALID_CQ; 1756 else 1757 cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; 1758 1759 cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ? 1760 (qidx + pf->hw.rx_queues + 1761 pf->hw.non_qos_queues) : 1762 CINT_INVALID_CQ; 1763 1764 cq_poll->dev = (void *)pf; 1765 cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; 1766 INIT_WORK(&cq_poll->dim.work, otx2_dim_work); 1767 netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler); 1768 napi_enable(&cq_poll->napi); 1769 } 1770 1771 /* Set maximum frame size allowed in HW */ 1772 err = otx2_hw_set_mtu(pf, netdev->mtu); 1773 if (err) 1774 goto err_disable_napi; 1775 1776 /* Setup segmentation algorithms, if failed, clear offload capability */ 1777 otx2_setup_segmentation(pf); 1778 1779 /* Initialize RSS */ 1780 err = otx2_rss_init(pf); 1781 if (err) 1782 goto err_disable_napi; 1783 1784 /* Register Queue IRQ handlers */ 1785 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START; 1786 irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; 1787 1788 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name); 1789 1790 err = request_irq(pci_irq_vector(pf->pdev, vec), 1791 otx2_q_intr_handler, 0, irq_name, pf); 1792 if (err) { 1793 dev_err(pf->dev, 1794 "RVUPF%d: IRQ registration failed for QERR\n", 1795 rvu_get_pf(pf->pcifunc)); 1796 goto err_disable_napi; 1797 } 1798 1799 /* Enable QINT IRQ */ 1800 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0)); 1801 1802 /* Register CQ IRQ handlers */ 1803 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; 1804 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { 1805 irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; 1806 1807 snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name, 1808 qidx); 1809 1810 err = request_irq(pci_irq_vector(pf->pdev, vec), 1811 otx2_cq_intr_handler, 0, irq_name, 1812 &qset->napi[qidx]); 1813 if (err) { 1814 dev_err(pf->dev, 1815 "RVUPF%d: IRQ registration failed for CQ%d\n", 1816 rvu_get_pf(pf->pcifunc), qidx); 1817 goto err_free_cints; 1818 } 1819 vec++; 1820 1821 otx2_config_irq_coalescing(pf, qidx); 1822 1823 /* Enable CQ IRQ */ 1824 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); 1825 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); 1826 } 1827 1828 otx2_set_cints_affinity(pf); 1829 1830 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) 1831 otx2_enable_rxvlan(pf, true); 1832 1833 /* When reinitializing enable time stamping if it is enabled before */ 1834 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) { 1835 pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; 1836 otx2_config_hw_tx_tstamp(pf, true); 1837 } 1838 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) { 1839 pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; 1840 otx2_config_hw_rx_tstamp(pf, true); 1841 } 1842 1843 pf->flags &= ~OTX2_FLAG_INTF_DOWN; 1844 /* 'intf_down' may be checked on any cpu */ 1845 smp_wmb(); 1846 1847 /* Enable QoS configuration before starting tx queues */ 1848 otx2_qos_config_txschq(pf); 1849 1850 /* we have already received link status notification */ 1851 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK)) 1852 otx2_handle_link_event(pf); 1853 1854 /* Install DMAC Filters */ 1855 if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) 1856 otx2_dmacflt_reinstall_flows(pf); 1857 1858 err = otx2_rxtx_enable(pf, true); 1859 /* If a mbox communication error happens at this point then interface 1860 * will end up in a state such that it is in down state but hardware 1861 * mcam entries are enabled to receive the packets. Hence disable the 1862 * packet I/O. 1863 */ 1864 if (err == EIO) 1865 goto err_disable_rxtx; 1866 else if (err) 1867 goto err_tx_stop_queues; 1868 1869 otx2_do_set_rx_mode(pf); 1870 1871 return 0; 1872 1873 err_disable_rxtx: 1874 otx2_rxtx_enable(pf, false); 1875 err_tx_stop_queues: 1876 netif_tx_stop_all_queues(netdev); 1877 netif_carrier_off(netdev); 1878 pf->flags |= OTX2_FLAG_INTF_DOWN; 1879 err_free_cints: 1880 otx2_free_cints(pf, qidx); 1881 vec = pci_irq_vector(pf->pdev, 1882 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); 1883 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); 1884 free_irq(vec, pf); 1885 err_disable_napi: 1886 otx2_disable_napi(pf); 1887 otx2_free_hw_resources(pf); 1888 err_free_mem: 1889 kfree(qset->sq); 1890 kfree(qset->cq); 1891 kfree(qset->rq); 1892 kfree(qset->napi); 1893 return err; 1894 } 1895 EXPORT_SYMBOL(otx2_open); 1896 1897 int otx2_stop(struct net_device *netdev) 1898 { 1899 struct otx2_nic *pf = netdev_priv(netdev); 1900 struct otx2_cq_poll *cq_poll = NULL; 1901 struct otx2_qset *qset = &pf->qset; 1902 struct otx2_rss_info *rss; 1903 int qidx, vec, wrk; 1904 1905 /* If the DOWN flag is set resources are already freed */ 1906 if (pf->flags & OTX2_FLAG_INTF_DOWN) 1907 return 0; 1908 1909 netif_carrier_off(netdev); 1910 netif_tx_stop_all_queues(netdev); 1911 1912 pf->flags |= OTX2_FLAG_INTF_DOWN; 1913 /* 'intf_down' may be checked on any cpu */ 1914 smp_wmb(); 1915 1916 /* First stop packet Rx/Tx */ 1917 otx2_rxtx_enable(pf, false); 1918 1919 /* Clear RSS enable flag */ 1920 rss = &pf->hw.rss_info; 1921 rss->enable = false; 1922 1923 /* Cleanup Queue IRQ */ 1924 vec = pci_irq_vector(pf->pdev, 1925 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); 1926 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); 1927 free_irq(vec, pf); 1928 1929 /* Cleanup CQ NAPI and IRQ */ 1930 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; 1931 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { 1932 /* Disable interrupt */ 1933 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); 1934 1935 synchronize_irq(pci_irq_vector(pf->pdev, vec)); 1936 1937 cq_poll = &qset->napi[qidx]; 1938 napi_synchronize(&cq_poll->napi); 1939 vec++; 1940 } 1941 1942 netif_tx_disable(netdev); 1943 1944 otx2_free_hw_resources(pf); 1945 otx2_free_cints(pf, pf->hw.cint_cnt); 1946 otx2_disable_napi(pf); 1947 1948 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) 1949 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); 1950 1951 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) 1952 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); 1953 devm_kfree(pf->dev, pf->refill_wrk); 1954 1955 kfree(qset->sq); 1956 kfree(qset->cq); 1957 kfree(qset->rq); 1958 kfree(qset->napi); 1959 /* Do not clear RQ/SQ ringsize settings */ 1960 memset_startat(qset, 0, sqe_cnt); 1961 return 0; 1962 } 1963 EXPORT_SYMBOL(otx2_stop); 1964 1965 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) 1966 { 1967 struct otx2_nic *pf = netdev_priv(netdev); 1968 int qidx = skb_get_queue_mapping(skb); 1969 struct otx2_snd_queue *sq; 1970 struct netdev_queue *txq; 1971 int sq_idx; 1972 1973 /* XDP SQs are not mapped with TXQs 1974 * advance qid to derive correct sq mapped with QOS 1975 */ 1976 sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx; 1977 1978 /* Check for minimum and maximum packet length */ 1979 if (skb->len <= ETH_HLEN || 1980 (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) { 1981 dev_kfree_skb(skb); 1982 return NETDEV_TX_OK; 1983 } 1984 1985 sq = &pf->qset.sq[sq_idx]; 1986 txq = netdev_get_tx_queue(netdev, qidx); 1987 1988 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { 1989 netif_tx_stop_queue(txq); 1990 1991 /* Check again, incase SQBs got freed up */ 1992 smp_mb(); 1993 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) 1994 > sq->sqe_thresh) 1995 netif_tx_wake_queue(txq); 1996 1997 return NETDEV_TX_BUSY; 1998 } 1999 2000 return NETDEV_TX_OK; 2001 } 2002 2003 static int otx2_qos_select_htb_queue(struct otx2_nic *pf, struct sk_buff *skb, 2004 u16 htb_maj_id) 2005 { 2006 u16 classid; 2007 2008 if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id) 2009 classid = TC_H_MIN(skb->priority); 2010 else 2011 classid = READ_ONCE(pf->qos.defcls); 2012 2013 if (!classid) 2014 return 0; 2015 2016 return otx2_get_txq_by_classid(pf, classid); 2017 } 2018 2019 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, 2020 struct net_device *sb_dev) 2021 { 2022 struct otx2_nic *pf = netdev_priv(netdev); 2023 bool qos_enabled; 2024 #ifdef CONFIG_DCB 2025 u8 vlan_prio; 2026 #endif 2027 int txq; 2028 2029 qos_enabled = (netdev->real_num_tx_queues > pf->hw.tx_queues) ? true : false; 2030 if (unlikely(qos_enabled)) { 2031 /* This smp_load_acquire() pairs with smp_store_release() in 2032 * otx2_qos_root_add() called from htb offload root creation 2033 */ 2034 u16 htb_maj_id = smp_load_acquire(&pf->qos.maj_id); 2035 2036 if (unlikely(htb_maj_id)) { 2037 txq = otx2_qos_select_htb_queue(pf, skb, htb_maj_id); 2038 if (txq > 0) 2039 return txq; 2040 goto process_pfc; 2041 } 2042 } 2043 2044 process_pfc: 2045 #ifdef CONFIG_DCB 2046 if (!skb_vlan_tag_present(skb)) 2047 goto pick_tx; 2048 2049 vlan_prio = skb->vlan_tci >> 13; 2050 if ((vlan_prio > pf->hw.tx_queues - 1) || 2051 !pf->pfc_alloc_status[vlan_prio]) 2052 goto pick_tx; 2053 2054 return vlan_prio; 2055 2056 pick_tx: 2057 #endif 2058 txq = netdev_pick_tx(netdev, skb, NULL); 2059 if (unlikely(qos_enabled)) 2060 return txq % pf->hw.tx_queues; 2061 2062 return txq; 2063 } 2064 EXPORT_SYMBOL(otx2_select_queue); 2065 2066 static netdev_features_t otx2_fix_features(struct net_device *dev, 2067 netdev_features_t features) 2068 { 2069 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2070 features |= NETIF_F_HW_VLAN_STAG_RX; 2071 else 2072 features &= ~NETIF_F_HW_VLAN_STAG_RX; 2073 2074 return features; 2075 } 2076 2077 static void otx2_set_rx_mode(struct net_device *netdev) 2078 { 2079 struct otx2_nic *pf = netdev_priv(netdev); 2080 2081 queue_work(pf->otx2_wq, &pf->rx_mode_work); 2082 } 2083 2084 static void otx2_rx_mode_wrk_handler(struct work_struct *work) 2085 { 2086 struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work); 2087 2088 otx2_do_set_rx_mode(pf); 2089 } 2090 2091 static int otx2_set_features(struct net_device *netdev, 2092 netdev_features_t features) 2093 { 2094 netdev_features_t changed = features ^ netdev->features; 2095 struct otx2_nic *pf = netdev_priv(netdev); 2096 2097 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) 2098 return otx2_cgx_config_loopback(pf, 2099 features & NETIF_F_LOOPBACK); 2100 2101 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev)) 2102 return otx2_enable_rxvlan(pf, 2103 features & NETIF_F_HW_VLAN_CTAG_RX); 2104 2105 return otx2_handle_ntuple_tc_features(netdev, features); 2106 } 2107 2108 static void otx2_reset_task(struct work_struct *work) 2109 { 2110 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task); 2111 2112 if (!netif_running(pf->netdev)) 2113 return; 2114 2115 rtnl_lock(); 2116 otx2_stop(pf->netdev); 2117 pf->reset_count++; 2118 otx2_open(pf->netdev); 2119 netif_trans_update(pf->netdev); 2120 rtnl_unlock(); 2121 } 2122 2123 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable) 2124 { 2125 struct msg_req *req; 2126 int err; 2127 2128 if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable) 2129 return 0; 2130 2131 mutex_lock(&pfvf->mbox.lock); 2132 if (enable) 2133 req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox); 2134 else 2135 req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox); 2136 if (!req) { 2137 mutex_unlock(&pfvf->mbox.lock); 2138 return -ENOMEM; 2139 } 2140 2141 err = otx2_sync_mbox_msg(&pfvf->mbox); 2142 if (err) { 2143 mutex_unlock(&pfvf->mbox.lock); 2144 return err; 2145 } 2146 2147 mutex_unlock(&pfvf->mbox.lock); 2148 if (enable) 2149 pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED; 2150 else 2151 pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; 2152 return 0; 2153 } 2154 2155 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) 2156 { 2157 struct msg_req *req; 2158 int err; 2159 2160 if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable) 2161 return 0; 2162 2163 mutex_lock(&pfvf->mbox.lock); 2164 if (enable) 2165 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox); 2166 else 2167 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox); 2168 if (!req) { 2169 mutex_unlock(&pfvf->mbox.lock); 2170 return -ENOMEM; 2171 } 2172 2173 err = otx2_sync_mbox_msg(&pfvf->mbox); 2174 if (err) { 2175 mutex_unlock(&pfvf->mbox.lock); 2176 return err; 2177 } 2178 2179 mutex_unlock(&pfvf->mbox.lock); 2180 if (enable) 2181 pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED; 2182 else 2183 pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; 2184 return 0; 2185 } 2186 2187 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) 2188 { 2189 struct otx2_nic *pfvf = netdev_priv(netdev); 2190 struct hwtstamp_config config; 2191 2192 if (!pfvf->ptp) 2193 return -ENODEV; 2194 2195 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2196 return -EFAULT; 2197 2198 switch (config.tx_type) { 2199 case HWTSTAMP_TX_OFF: 2200 if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC) 2201 pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC; 2202 2203 cancel_delayed_work(&pfvf->ptp->synctstamp_work); 2204 otx2_config_hw_tx_tstamp(pfvf, false); 2205 break; 2206 case HWTSTAMP_TX_ONESTEP_SYNC: 2207 if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) 2208 return -ERANGE; 2209 pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC; 2210 schedule_delayed_work(&pfvf->ptp->synctstamp_work, 2211 msecs_to_jiffies(500)); 2212 fallthrough; 2213 case HWTSTAMP_TX_ON: 2214 otx2_config_hw_tx_tstamp(pfvf, true); 2215 break; 2216 default: 2217 return -ERANGE; 2218 } 2219 2220 switch (config.rx_filter) { 2221 case HWTSTAMP_FILTER_NONE: 2222 otx2_config_hw_rx_tstamp(pfvf, false); 2223 break; 2224 case HWTSTAMP_FILTER_ALL: 2225 case HWTSTAMP_FILTER_SOME: 2226 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2227 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2228 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2229 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2230 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2231 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2232 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2233 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2234 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2235 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2236 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2237 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2238 otx2_config_hw_rx_tstamp(pfvf, true); 2239 config.rx_filter = HWTSTAMP_FILTER_ALL; 2240 break; 2241 default: 2242 return -ERANGE; 2243 } 2244 2245 memcpy(&pfvf->tstamp, &config, sizeof(config)); 2246 2247 return copy_to_user(ifr->ifr_data, &config, 2248 sizeof(config)) ? -EFAULT : 0; 2249 } 2250 EXPORT_SYMBOL(otx2_config_hwtstamp); 2251 2252 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) 2253 { 2254 struct otx2_nic *pfvf = netdev_priv(netdev); 2255 struct hwtstamp_config *cfg = &pfvf->tstamp; 2256 2257 switch (cmd) { 2258 case SIOCSHWTSTAMP: 2259 return otx2_config_hwtstamp(netdev, req); 2260 case SIOCGHWTSTAMP: 2261 return copy_to_user(req->ifr_data, cfg, 2262 sizeof(*cfg)) ? -EFAULT : 0; 2263 default: 2264 return -EOPNOTSUPP; 2265 } 2266 } 2267 EXPORT_SYMBOL(otx2_ioctl); 2268 2269 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) 2270 { 2271 struct npc_install_flow_req *req; 2272 int err; 2273 2274 mutex_lock(&pf->mbox.lock); 2275 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); 2276 if (!req) { 2277 err = -ENOMEM; 2278 goto out; 2279 } 2280 2281 ether_addr_copy(req->packet.dmac, mac); 2282 eth_broadcast_addr((u8 *)&req->mask.dmac); 2283 req->features = BIT_ULL(NPC_DMAC); 2284 req->channel = pf->hw.rx_chan_base; 2285 req->intf = NIX_INTF_RX; 2286 req->default_rule = 1; 2287 req->append = 1; 2288 req->vf = vf + 1; 2289 req->op = NIX_RX_ACTION_DEFAULT; 2290 2291 err = otx2_sync_mbox_msg(&pf->mbox); 2292 out: 2293 mutex_unlock(&pf->mbox.lock); 2294 return err; 2295 } 2296 2297 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2298 { 2299 struct otx2_nic *pf = netdev_priv(netdev); 2300 struct pci_dev *pdev = pf->pdev; 2301 struct otx2_vf_config *config; 2302 int ret; 2303 2304 if (!netif_running(netdev)) 2305 return -EAGAIN; 2306 2307 if (vf >= pf->total_vfs) 2308 return -EINVAL; 2309 2310 if (!is_valid_ether_addr(mac)) 2311 return -EINVAL; 2312 2313 config = &pf->vf_configs[vf]; 2314 ether_addr_copy(config->mac, mac); 2315 2316 ret = otx2_do_set_vf_mac(pf, vf, mac); 2317 if (ret == 0) 2318 dev_info(&pdev->dev, 2319 "Load/Reload VF driver\n"); 2320 2321 return ret; 2322 } 2323 2324 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos, 2325 __be16 proto) 2326 { 2327 struct otx2_flow_config *flow_cfg = pf->flow_cfg; 2328 struct nix_vtag_config_rsp *vtag_rsp; 2329 struct npc_delete_flow_req *del_req; 2330 struct nix_vtag_config *vtag_req; 2331 struct npc_install_flow_req *req; 2332 struct otx2_vf_config *config; 2333 int err = 0; 2334 u32 idx; 2335 2336 config = &pf->vf_configs[vf]; 2337 2338 if (!vlan && !config->vlan) 2339 goto out; 2340 2341 mutex_lock(&pf->mbox.lock); 2342 2343 /* free old tx vtag entry */ 2344 if (config->vlan) { 2345 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); 2346 if (!vtag_req) { 2347 err = -ENOMEM; 2348 goto out; 2349 } 2350 vtag_req->cfg_type = 0; 2351 vtag_req->tx.free_vtag0 = 1; 2352 vtag_req->tx.vtag0_idx = config->tx_vtag_idx; 2353 2354 err = otx2_sync_mbox_msg(&pf->mbox); 2355 if (err) 2356 goto out; 2357 } 2358 2359 if (!vlan && config->vlan) { 2360 /* rx */ 2361 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); 2362 if (!del_req) { 2363 err = -ENOMEM; 2364 goto out; 2365 } 2366 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); 2367 del_req->entry = 2368 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; 2369 err = otx2_sync_mbox_msg(&pf->mbox); 2370 if (err) 2371 goto out; 2372 2373 /* tx */ 2374 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); 2375 if (!del_req) { 2376 err = -ENOMEM; 2377 goto out; 2378 } 2379 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); 2380 del_req->entry = 2381 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; 2382 err = otx2_sync_mbox_msg(&pf->mbox); 2383 2384 goto out; 2385 } 2386 2387 /* rx */ 2388 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); 2389 if (!req) { 2390 err = -ENOMEM; 2391 goto out; 2392 } 2393 2394 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); 2395 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; 2396 req->packet.vlan_tci = htons(vlan); 2397 req->mask.vlan_tci = htons(VLAN_VID_MASK); 2398 /* af fills the destination mac addr */ 2399 eth_broadcast_addr((u8 *)&req->mask.dmac); 2400 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC); 2401 req->channel = pf->hw.rx_chan_base; 2402 req->intf = NIX_INTF_RX; 2403 req->vf = vf + 1; 2404 req->op = NIX_RX_ACTION_DEFAULT; 2405 req->vtag0_valid = true; 2406 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 2407 req->set_cntr = 1; 2408 2409 err = otx2_sync_mbox_msg(&pf->mbox); 2410 if (err) 2411 goto out; 2412 2413 /* tx */ 2414 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); 2415 if (!vtag_req) { 2416 err = -ENOMEM; 2417 goto out; 2418 } 2419 2420 /* configure tx vtag params */ 2421 vtag_req->vtag_size = VTAGSIZE_T4; 2422 vtag_req->cfg_type = 0; /* tx vlan cfg */ 2423 vtag_req->tx.cfg_vtag0 = 1; 2424 vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan; 2425 2426 err = otx2_sync_mbox_msg(&pf->mbox); 2427 if (err) 2428 goto out; 2429 2430 vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp 2431 (&pf->mbox.mbox, 0, &vtag_req->hdr); 2432 if (IS_ERR(vtag_rsp)) { 2433 err = PTR_ERR(vtag_rsp); 2434 goto out; 2435 } 2436 config->tx_vtag_idx = vtag_rsp->vtag0_idx; 2437 2438 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); 2439 if (!req) { 2440 err = -ENOMEM; 2441 goto out; 2442 } 2443 2444 eth_zero_addr((u8 *)&req->mask.dmac); 2445 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); 2446 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; 2447 req->features = BIT_ULL(NPC_DMAC); 2448 req->channel = pf->hw.tx_chan_base; 2449 req->intf = NIX_INTF_TX; 2450 req->vf = vf + 1; 2451 req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT; 2452 req->vtag0_def = vtag_rsp->vtag0_idx; 2453 req->vtag0_op = VTAG_INSERT; 2454 req->set_cntr = 1; 2455 2456 err = otx2_sync_mbox_msg(&pf->mbox); 2457 out: 2458 config->vlan = vlan; 2459 mutex_unlock(&pf->mbox.lock); 2460 return err; 2461 } 2462 2463 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, 2464 __be16 proto) 2465 { 2466 struct otx2_nic *pf = netdev_priv(netdev); 2467 struct pci_dev *pdev = pf->pdev; 2468 2469 if (!netif_running(netdev)) 2470 return -EAGAIN; 2471 2472 if (vf >= pci_num_vf(pdev)) 2473 return -EINVAL; 2474 2475 /* qos is currently unsupported */ 2476 if (vlan >= VLAN_N_VID || qos) 2477 return -EINVAL; 2478 2479 if (proto != htons(ETH_P_8021Q)) 2480 return -EPROTONOSUPPORT; 2481 2482 if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT)) 2483 return -EOPNOTSUPP; 2484 2485 return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto); 2486 } 2487 2488 static int otx2_get_vf_config(struct net_device *netdev, int vf, 2489 struct ifla_vf_info *ivi) 2490 { 2491 struct otx2_nic *pf = netdev_priv(netdev); 2492 struct pci_dev *pdev = pf->pdev; 2493 struct otx2_vf_config *config; 2494 2495 if (!netif_running(netdev)) 2496 return -EAGAIN; 2497 2498 if (vf >= pci_num_vf(pdev)) 2499 return -EINVAL; 2500 2501 config = &pf->vf_configs[vf]; 2502 ivi->vf = vf; 2503 ether_addr_copy(ivi->mac, config->mac); 2504 ivi->vlan = config->vlan; 2505 ivi->trusted = config->trusted; 2506 2507 return 0; 2508 } 2509 2510 static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, 2511 int qidx) 2512 { 2513 struct page *page; 2514 u64 dma_addr; 2515 int err = 0; 2516 2517 dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data), 2518 offset_in_page(xdpf->data), xdpf->len, 2519 DMA_TO_DEVICE); 2520 if (dma_mapping_error(pf->dev, dma_addr)) 2521 return -ENOMEM; 2522 2523 err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx); 2524 if (!err) { 2525 otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE); 2526 page = virt_to_page(xdpf->data); 2527 put_page(page); 2528 return -ENOMEM; 2529 } 2530 return 0; 2531 } 2532 2533 static int otx2_xdp_xmit(struct net_device *netdev, int n, 2534 struct xdp_frame **frames, u32 flags) 2535 { 2536 struct otx2_nic *pf = netdev_priv(netdev); 2537 int qidx = smp_processor_id(); 2538 struct otx2_snd_queue *sq; 2539 int drops = 0, i; 2540 2541 if (!netif_running(netdev)) 2542 return -ENETDOWN; 2543 2544 qidx += pf->hw.tx_queues; 2545 sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL; 2546 2547 /* Abort xmit if xdp queue is not */ 2548 if (unlikely(!sq)) 2549 return -ENXIO; 2550 2551 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2552 return -EINVAL; 2553 2554 for (i = 0; i < n; i++) { 2555 struct xdp_frame *xdpf = frames[i]; 2556 int err; 2557 2558 err = otx2_xdp_xmit_tx(pf, xdpf, qidx); 2559 if (err) 2560 drops++; 2561 } 2562 return n - drops; 2563 } 2564 2565 static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog) 2566 { 2567 struct net_device *dev = pf->netdev; 2568 bool if_up = netif_running(pf->netdev); 2569 struct bpf_prog *old_prog; 2570 2571 if (prog && dev->mtu > MAX_XDP_MTU) { 2572 netdev_warn(dev, "Jumbo frames not yet supported with XDP\n"); 2573 return -EOPNOTSUPP; 2574 } 2575 2576 if (if_up) 2577 otx2_stop(pf->netdev); 2578 2579 old_prog = xchg(&pf->xdp_prog, prog); 2580 2581 if (old_prog) 2582 bpf_prog_put(old_prog); 2583 2584 if (pf->xdp_prog) 2585 bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1); 2586 2587 /* Network stack and XDP shared same rx queues. 2588 * Use separate tx queues for XDP and network stack. 2589 */ 2590 if (pf->xdp_prog) { 2591 pf->hw.xdp_queues = pf->hw.rx_queues; 2592 xdp_features_set_redirect_target(dev, false); 2593 } else { 2594 pf->hw.xdp_queues = 0; 2595 xdp_features_clear_redirect_target(dev); 2596 } 2597 2598 pf->hw.non_qos_queues += pf->hw.xdp_queues; 2599 2600 if (if_up) 2601 otx2_open(pf->netdev); 2602 2603 return 0; 2604 } 2605 2606 static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp) 2607 { 2608 struct otx2_nic *pf = netdev_priv(netdev); 2609 2610 switch (xdp->command) { 2611 case XDP_SETUP_PROG: 2612 return otx2_xdp_setup(pf, xdp->prog); 2613 default: 2614 return -EINVAL; 2615 } 2616 } 2617 2618 static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf, 2619 int req_perm) 2620 { 2621 struct set_vf_perm *req; 2622 int rc; 2623 2624 mutex_lock(&pf->mbox.lock); 2625 req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox); 2626 if (!req) { 2627 rc = -ENOMEM; 2628 goto out; 2629 } 2630 2631 /* Let AF reset VF permissions as sriov is disabled */ 2632 if (req_perm == OTX2_RESET_VF_PERM) { 2633 req->flags |= RESET_VF_PERM; 2634 } else if (req_perm == OTX2_TRUSTED_VF) { 2635 if (pf->vf_configs[vf].trusted) 2636 req->flags |= VF_TRUSTED; 2637 } 2638 2639 req->vf = vf; 2640 rc = otx2_sync_mbox_msg(&pf->mbox); 2641 out: 2642 mutex_unlock(&pf->mbox.lock); 2643 return rc; 2644 } 2645 2646 static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf, 2647 bool enable) 2648 { 2649 struct otx2_nic *pf = netdev_priv(netdev); 2650 struct pci_dev *pdev = pf->pdev; 2651 int rc; 2652 2653 if (vf >= pci_num_vf(pdev)) 2654 return -EINVAL; 2655 2656 if (pf->vf_configs[vf].trusted == enable) 2657 return 0; 2658 2659 pf->vf_configs[vf].trusted = enable; 2660 rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF); 2661 2662 if (rc) 2663 pf->vf_configs[vf].trusted = !enable; 2664 else 2665 netdev_info(pf->netdev, "VF %d is %strusted\n", 2666 vf, enable ? "" : "not "); 2667 return rc; 2668 } 2669 2670 static const struct net_device_ops otx2_netdev_ops = { 2671 .ndo_open = otx2_open, 2672 .ndo_stop = otx2_stop, 2673 .ndo_start_xmit = otx2_xmit, 2674 .ndo_select_queue = otx2_select_queue, 2675 .ndo_fix_features = otx2_fix_features, 2676 .ndo_set_mac_address = otx2_set_mac_address, 2677 .ndo_change_mtu = otx2_change_mtu, 2678 .ndo_set_rx_mode = otx2_set_rx_mode, 2679 .ndo_set_features = otx2_set_features, 2680 .ndo_tx_timeout = otx2_tx_timeout, 2681 .ndo_get_stats64 = otx2_get_stats64, 2682 .ndo_eth_ioctl = otx2_ioctl, 2683 .ndo_set_vf_mac = otx2_set_vf_mac, 2684 .ndo_set_vf_vlan = otx2_set_vf_vlan, 2685 .ndo_get_vf_config = otx2_get_vf_config, 2686 .ndo_bpf = otx2_xdp, 2687 .ndo_xdp_xmit = otx2_xdp_xmit, 2688 .ndo_setup_tc = otx2_setup_tc, 2689 .ndo_set_vf_trust = otx2_ndo_set_vf_trust, 2690 }; 2691 2692 static int otx2_wq_init(struct otx2_nic *pf) 2693 { 2694 pf->otx2_wq = create_singlethread_workqueue("otx2_wq"); 2695 if (!pf->otx2_wq) 2696 return -ENOMEM; 2697 2698 INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler); 2699 INIT_WORK(&pf->reset_task, otx2_reset_task); 2700 return 0; 2701 } 2702 2703 static int otx2_check_pf_usable(struct otx2_nic *nic) 2704 { 2705 u64 rev; 2706 2707 rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM)); 2708 rev = (rev >> 12) & 0xFF; 2709 /* Check if AF has setup revision for RVUM block, 2710 * otherwise this driver probe should be deferred 2711 * until AF driver comes up. 2712 */ 2713 if (!rev) { 2714 dev_warn(nic->dev, 2715 "AF is not initialized, deferring probe\n"); 2716 return -EPROBE_DEFER; 2717 } 2718 return 0; 2719 } 2720 2721 static int otx2_realloc_msix_vectors(struct otx2_nic *pf) 2722 { 2723 struct otx2_hw *hw = &pf->hw; 2724 int num_vec, err; 2725 2726 /* NPA interrupts are inot registered, so alloc only 2727 * upto NIX vector offset. 2728 */ 2729 num_vec = hw->nix_msixoff; 2730 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; 2731 2732 otx2_disable_mbox_intr(pf); 2733 pci_free_irq_vectors(hw->pdev); 2734 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); 2735 if (err < 0) { 2736 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n", 2737 __func__, num_vec); 2738 return err; 2739 } 2740 2741 return otx2_register_mbox_intr(pf, false); 2742 } 2743 2744 static int otx2_sriov_vfcfg_init(struct otx2_nic *pf) 2745 { 2746 int i; 2747 2748 pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs, 2749 sizeof(struct otx2_vf_config), 2750 GFP_KERNEL); 2751 if (!pf->vf_configs) 2752 return -ENOMEM; 2753 2754 for (i = 0; i < pf->total_vfs; i++) { 2755 pf->vf_configs[i].pf = pf; 2756 pf->vf_configs[i].intf_down = true; 2757 pf->vf_configs[i].trusted = false; 2758 INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work, 2759 otx2_vf_link_event_task); 2760 } 2761 2762 return 0; 2763 } 2764 2765 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf) 2766 { 2767 int i; 2768 2769 if (!pf->vf_configs) 2770 return; 2771 2772 for (i = 0; i < pf->total_vfs; i++) { 2773 cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work); 2774 otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM); 2775 } 2776 } 2777 2778 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2779 { 2780 struct device *dev = &pdev->dev; 2781 int err, qcount, qos_txqs; 2782 struct net_device *netdev; 2783 struct otx2_nic *pf; 2784 struct otx2_hw *hw; 2785 int num_vec; 2786 2787 err = pcim_enable_device(pdev); 2788 if (err) { 2789 dev_err(dev, "Failed to enable PCI device\n"); 2790 return err; 2791 } 2792 2793 err = pci_request_regions(pdev, DRV_NAME); 2794 if (err) { 2795 dev_err(dev, "PCI request regions failed 0x%x\n", err); 2796 return err; 2797 } 2798 2799 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 2800 if (err) { 2801 dev_err(dev, "DMA mask config failed, abort\n"); 2802 goto err_release_regions; 2803 } 2804 2805 pci_set_master(pdev); 2806 2807 /* Set number of queues */ 2808 qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT); 2809 qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); 2810 2811 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount); 2812 if (!netdev) { 2813 err = -ENOMEM; 2814 goto err_release_regions; 2815 } 2816 2817 pci_set_drvdata(pdev, netdev); 2818 SET_NETDEV_DEV(netdev, &pdev->dev); 2819 pf = netdev_priv(netdev); 2820 pf->netdev = netdev; 2821 pf->pdev = pdev; 2822 pf->dev = dev; 2823 pf->total_vfs = pci_sriov_get_totalvfs(pdev); 2824 pf->flags |= OTX2_FLAG_INTF_DOWN; 2825 2826 hw = &pf->hw; 2827 hw->pdev = pdev; 2828 hw->rx_queues = qcount; 2829 hw->tx_queues = qcount; 2830 hw->non_qos_queues = qcount; 2831 hw->max_queues = qcount; 2832 hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; 2833 /* Use CQE of 128 byte descriptor size by default */ 2834 hw->xqe_size = 128; 2835 2836 num_vec = pci_msix_vec_count(pdev); 2837 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, 2838 GFP_KERNEL); 2839 if (!hw->irq_name) { 2840 err = -ENOMEM; 2841 goto err_free_netdev; 2842 } 2843 2844 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, 2845 sizeof(cpumask_var_t), GFP_KERNEL); 2846 if (!hw->affinity_mask) { 2847 err = -ENOMEM; 2848 goto err_free_netdev; 2849 } 2850 2851 /* Map CSRs */ 2852 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 2853 if (!pf->reg_base) { 2854 dev_err(dev, "Unable to map physical function CSRs, aborting\n"); 2855 err = -ENOMEM; 2856 goto err_free_netdev; 2857 } 2858 2859 err = otx2_check_pf_usable(pf); 2860 if (err) 2861 goto err_free_netdev; 2862 2863 err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, 2864 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); 2865 if (err < 0) { 2866 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", 2867 __func__, num_vec); 2868 goto err_free_netdev; 2869 } 2870 2871 otx2_setup_dev_hw_settings(pf); 2872 2873 /* Init PF <=> AF mailbox stuff */ 2874 err = otx2_pfaf_mbox_init(pf); 2875 if (err) 2876 goto err_free_irq_vectors; 2877 2878 /* Register mailbox interrupt */ 2879 err = otx2_register_mbox_intr(pf, true); 2880 if (err) 2881 goto err_mbox_destroy; 2882 2883 /* Request AF to attach NPA and NIX LFs to this PF. 2884 * NIX and NPA LFs are needed for this PF to function as a NIC. 2885 */ 2886 err = otx2_attach_npa_nix(pf); 2887 if (err) 2888 goto err_disable_mbox_intr; 2889 2890 err = otx2_realloc_msix_vectors(pf); 2891 if (err) 2892 goto err_detach_rsrc; 2893 2894 err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues); 2895 if (err) 2896 goto err_detach_rsrc; 2897 2898 err = cn10k_lmtst_init(pf); 2899 if (err) 2900 goto err_detach_rsrc; 2901 2902 /* Assign default mac address */ 2903 otx2_get_mac_from_af(netdev); 2904 2905 /* Don't check for error. Proceed without ptp */ 2906 otx2_ptp_init(pf); 2907 2908 /* NPA's pool is a stack to which SW frees buffer pointers via Aura. 2909 * HW allocates buffer pointer from stack and uses it for DMA'ing 2910 * ingress packet. In some scenarios HW can free back allocated buffer 2911 * pointers to pool. This makes it impossible for SW to maintain a 2912 * parallel list where physical addresses of buffer pointers (IOVAs) 2913 * given to HW can be saved for later reference. 2914 * 2915 * So the only way to convert Rx packet's buffer address is to use 2916 * IOMMU's iova_to_phys() handler which translates the address by 2917 * walking through the translation tables. 2918 */ 2919 pf->iommu_domain = iommu_get_domain_for_dev(dev); 2920 2921 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | 2922 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | 2923 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 2924 NETIF_F_GSO_UDP_L4); 2925 netdev->features |= netdev->hw_features; 2926 2927 err = otx2_mcam_flow_init(pf); 2928 if (err) 2929 goto err_ptp_destroy; 2930 2931 err = cn10k_mcs_init(pf); 2932 if (err) 2933 goto err_del_mcam_entries; 2934 2935 if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT) 2936 netdev->hw_features |= NETIF_F_NTUPLE; 2937 2938 if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT) 2939 netdev->priv_flags |= IFF_UNICAST_FLT; 2940 2941 /* Support TSO on tag interface */ 2942 netdev->vlan_features |= netdev->features; 2943 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 2944 NETIF_F_HW_VLAN_STAG_TX; 2945 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) 2946 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | 2947 NETIF_F_HW_VLAN_STAG_RX; 2948 netdev->features |= netdev->hw_features; 2949 2950 /* HW supports tc offload but mutually exclusive with n-tuple filters */ 2951 if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT) 2952 netdev->hw_features |= NETIF_F_HW_TC; 2953 2954 netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; 2955 2956 netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS); 2957 netdev->watchdog_timeo = OTX2_TX_TIMEOUT; 2958 2959 netdev->netdev_ops = &otx2_netdev_ops; 2960 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; 2961 2962 netdev->min_mtu = OTX2_MIN_MTU; 2963 netdev->max_mtu = otx2_get_max_mtu(pf); 2964 2965 err = register_netdev(netdev); 2966 if (err) { 2967 dev_err(dev, "Failed to register netdevice\n"); 2968 goto err_mcs_free; 2969 } 2970 2971 err = otx2_wq_init(pf); 2972 if (err) 2973 goto err_unreg_netdev; 2974 2975 otx2_set_ethtool_ops(netdev); 2976 2977 err = otx2_init_tc(pf); 2978 if (err) 2979 goto err_mcam_flow_del; 2980 2981 err = otx2_register_dl(pf); 2982 if (err) 2983 goto err_mcam_flow_del; 2984 2985 /* Initialize SR-IOV resources */ 2986 err = otx2_sriov_vfcfg_init(pf); 2987 if (err) 2988 goto err_pf_sriov_init; 2989 2990 /* Enable link notifications */ 2991 otx2_cgx_config_linkevents(pf, true); 2992 2993 #ifdef CONFIG_DCB 2994 err = otx2_dcbnl_set_ops(netdev); 2995 if (err) 2996 goto err_pf_sriov_init; 2997 #endif 2998 2999 otx2_qos_init(pf, qos_txqs); 3000 3001 return 0; 3002 3003 err_pf_sriov_init: 3004 otx2_shutdown_tc(pf); 3005 err_mcam_flow_del: 3006 otx2_mcam_flow_del(pf); 3007 err_unreg_netdev: 3008 unregister_netdev(netdev); 3009 err_mcs_free: 3010 cn10k_mcs_free(pf); 3011 err_del_mcam_entries: 3012 otx2_mcam_flow_del(pf); 3013 err_ptp_destroy: 3014 otx2_ptp_destroy(pf); 3015 err_detach_rsrc: 3016 if (pf->hw.lmt_info) 3017 free_percpu(pf->hw.lmt_info); 3018 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) 3019 qmem_free(pf->dev, pf->dync_lmt); 3020 otx2_detach_resources(&pf->mbox); 3021 err_disable_mbox_intr: 3022 otx2_disable_mbox_intr(pf); 3023 err_mbox_destroy: 3024 otx2_pfaf_mbox_destroy(pf); 3025 err_free_irq_vectors: 3026 pci_free_irq_vectors(hw->pdev); 3027 err_free_netdev: 3028 pci_set_drvdata(pdev, NULL); 3029 free_netdev(netdev); 3030 err_release_regions: 3031 pci_release_regions(pdev); 3032 return err; 3033 } 3034 3035 static void otx2_vf_link_event_task(struct work_struct *work) 3036 { 3037 struct otx2_vf_config *config; 3038 struct cgx_link_info_msg *req; 3039 struct mbox_msghdr *msghdr; 3040 struct otx2_nic *pf; 3041 int vf_idx; 3042 3043 config = container_of(work, struct otx2_vf_config, 3044 link_event_work.work); 3045 vf_idx = config - config->pf->vf_configs; 3046 pf = config->pf; 3047 3048 msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx, 3049 sizeof(*req), sizeof(struct msg_rsp)); 3050 if (!msghdr) { 3051 dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx); 3052 return; 3053 } 3054 3055 req = (struct cgx_link_info_msg *)msghdr; 3056 req->hdr.id = MBOX_MSG_CGX_LINK_EVENT; 3057 req->hdr.sig = OTX2_MBOX_REQ_SIG; 3058 memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); 3059 3060 otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx); 3061 } 3062 3063 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs) 3064 { 3065 struct net_device *netdev = pci_get_drvdata(pdev); 3066 struct otx2_nic *pf = netdev_priv(netdev); 3067 int ret; 3068 3069 /* Init PF <=> VF mailbox stuff */ 3070 ret = otx2_pfvf_mbox_init(pf, numvfs); 3071 if (ret) 3072 return ret; 3073 3074 ret = otx2_register_pfvf_mbox_intr(pf, numvfs); 3075 if (ret) 3076 goto free_mbox; 3077 3078 ret = otx2_pf_flr_init(pf, numvfs); 3079 if (ret) 3080 goto free_intr; 3081 3082 ret = otx2_register_flr_me_intr(pf, numvfs); 3083 if (ret) 3084 goto free_flr; 3085 3086 ret = pci_enable_sriov(pdev, numvfs); 3087 if (ret) 3088 goto free_flr_intr; 3089 3090 return numvfs; 3091 free_flr_intr: 3092 otx2_disable_flr_me_intr(pf); 3093 free_flr: 3094 otx2_flr_wq_destroy(pf); 3095 free_intr: 3096 otx2_disable_pfvf_mbox_intr(pf, numvfs); 3097 free_mbox: 3098 otx2_pfvf_mbox_destroy(pf); 3099 return ret; 3100 } 3101 3102 static int otx2_sriov_disable(struct pci_dev *pdev) 3103 { 3104 struct net_device *netdev = pci_get_drvdata(pdev); 3105 struct otx2_nic *pf = netdev_priv(netdev); 3106 int numvfs = pci_num_vf(pdev); 3107 3108 if (!numvfs) 3109 return 0; 3110 3111 pci_disable_sriov(pdev); 3112 3113 otx2_disable_flr_me_intr(pf); 3114 otx2_flr_wq_destroy(pf); 3115 otx2_disable_pfvf_mbox_intr(pf, numvfs); 3116 otx2_pfvf_mbox_destroy(pf); 3117 3118 return 0; 3119 } 3120 3121 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs) 3122 { 3123 if (numvfs == 0) 3124 return otx2_sriov_disable(pdev); 3125 else 3126 return otx2_sriov_enable(pdev, numvfs); 3127 } 3128 3129 static void otx2_remove(struct pci_dev *pdev) 3130 { 3131 struct net_device *netdev = pci_get_drvdata(pdev); 3132 struct otx2_nic *pf; 3133 3134 if (!netdev) 3135 return; 3136 3137 pf = netdev_priv(netdev); 3138 3139 pf->flags |= OTX2_FLAG_PF_SHUTDOWN; 3140 3141 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) 3142 otx2_config_hw_tx_tstamp(pf, false); 3143 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) 3144 otx2_config_hw_rx_tstamp(pf, false); 3145 3146 /* Disable 802.3x pause frames */ 3147 if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED || 3148 (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) { 3149 pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; 3150 pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; 3151 otx2_config_pause_frm(pf); 3152 } 3153 3154 #ifdef CONFIG_DCB 3155 /* Disable PFC config */ 3156 if (pf->pfc_en) { 3157 pf->pfc_en = 0; 3158 otx2_config_priority_flow_ctrl(pf); 3159 } 3160 #endif 3161 cancel_work_sync(&pf->reset_task); 3162 /* Disable link notifications */ 3163 otx2_cgx_config_linkevents(pf, false); 3164 3165 otx2_unregister_dl(pf); 3166 unregister_netdev(netdev); 3167 cn10k_mcs_free(pf); 3168 otx2_sriov_disable(pf->pdev); 3169 otx2_sriov_vfcfg_cleanup(pf); 3170 if (pf->otx2_wq) 3171 destroy_workqueue(pf->otx2_wq); 3172 3173 otx2_ptp_destroy(pf); 3174 otx2_mcam_flow_del(pf); 3175 otx2_shutdown_tc(pf); 3176 otx2_shutdown_qos(pf); 3177 otx2_detach_resources(&pf->mbox); 3178 if (pf->hw.lmt_info) 3179 free_percpu(pf->hw.lmt_info); 3180 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) 3181 qmem_free(pf->dev, pf->dync_lmt); 3182 otx2_disable_mbox_intr(pf); 3183 otx2_pfaf_mbox_destroy(pf); 3184 pci_free_irq_vectors(pf->pdev); 3185 pci_set_drvdata(pdev, NULL); 3186 free_netdev(netdev); 3187 3188 pci_release_regions(pdev); 3189 } 3190 3191 static struct pci_driver otx2_pf_driver = { 3192 .name = DRV_NAME, 3193 .id_table = otx2_pf_id_table, 3194 .probe = otx2_probe, 3195 .shutdown = otx2_remove, 3196 .remove = otx2_remove, 3197 .sriov_configure = otx2_sriov_configure 3198 }; 3199 3200 static int __init otx2_rvupf_init_module(void) 3201 { 3202 pr_info("%s: %s\n", DRV_NAME, DRV_STRING); 3203 3204 return pci_register_driver(&otx2_pf_driver); 3205 } 3206 3207 static void __exit otx2_rvupf_cleanup_module(void) 3208 { 3209 pci_unregister_driver(&otx2_pf_driver); 3210 } 3211 3212 module_init(otx2_rvupf_init_module); 3213 module_exit(otx2_rvupf_cleanup_module); 3214