1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Physical Function ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/interrupt.h> 10 #include <linux/pci.h> 11 #include <linux/etherdevice.h> 12 #include <linux/of.h> 13 #include <linux/if_vlan.h> 14 #include <linux/iommu.h> 15 #include <net/ip.h> 16 #include <linux/bpf.h> 17 #include <linux/bpf_trace.h> 18 #include <linux/bitfield.h> 19 #include <net/page_pool/types.h> 20 21 #include "otx2_reg.h" 22 #include "otx2_common.h" 23 #include "otx2_txrx.h" 24 #include "otx2_struct.h" 25 #include "otx2_ptp.h" 26 #include "cn10k.h" 27 #include "qos.h" 28 #include <rvu_trace.h> 29 #include "cn10k_ipsec.h" 30 #include "otx2_xsk.h" 31 32 #define DRV_NAME "rvu_nicpf" 33 #define DRV_STRING "Marvell RVU NIC Physical Function Driver" 34 35 /* Supported devices */ 36 static const struct pci_device_id otx2_pf_id_table[] = { 37 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) }, 38 { 0, } /* end of table */ 39 }; 40 41 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>"); 42 MODULE_DESCRIPTION(DRV_STRING); 43 MODULE_LICENSE("GPL v2"); 44 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table); 45 46 static void otx2_vf_link_event_task(struct work_struct *work); 47 48 enum { 49 TYPE_PFAF, 50 TYPE_PFVF, 51 }; 52 53 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable); 54 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable); 55 56 static int otx2_change_mtu(struct net_device *netdev, int new_mtu) 57 { 58 struct otx2_nic *pf = netdev_priv(netdev); 59 bool if_up = netif_running(netdev); 60 int err = 0; 61 62 if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) { 63 netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", 64 netdev->mtu); 65 return -EINVAL; 66 } 67 if (if_up) 68 otx2_stop(netdev); 69 70 netdev_info(netdev, "Changing MTU from %d to %d\n", 71 netdev->mtu, new_mtu); 72 WRITE_ONCE(netdev->mtu, new_mtu); 73 74 if (if_up) 75 err = otx2_open(netdev); 76 77 return err; 78 } 79 80 static void otx2_disable_flr_me_intr(struct otx2_nic *pf) 81 { 82 int irq, vfs = pf->total_vfs; 83 84 /* Disable VFs ME interrupts */ 85 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); 86 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0); 87 free_irq(irq, pf); 88 89 /* Disable VFs FLR interrupts */ 90 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); 91 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0); 92 free_irq(irq, pf); 93 94 if (vfs <= 64) 95 return; 96 97 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 98 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1); 99 free_irq(irq, pf); 100 101 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); 102 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1); 103 free_irq(irq, pf); 104 } 105 106 static void otx2_flr_wq_destroy(struct otx2_nic *pf) 107 { 108 if (!pf->flr_wq) 109 return; 110 destroy_workqueue(pf->flr_wq); 111 pf->flr_wq = NULL; 112 devm_kfree(pf->dev, pf->flr_wrk); 113 } 114 115 static void otx2_flr_handler(struct work_struct *work) 116 { 117 struct flr_work *flrwork = container_of(work, struct flr_work, work); 118 struct otx2_nic *pf = flrwork->pf; 119 struct mbox *mbox = &pf->mbox; 120 struct msg_req *req; 121 int vf, reg = 0; 122 123 vf = flrwork - pf->flr_wrk; 124 125 mutex_lock(&mbox->lock); 126 req = otx2_mbox_alloc_msg_vf_flr(mbox); 127 if (!req) { 128 mutex_unlock(&mbox->lock); 129 return; 130 } 131 req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK; 132 req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK; 133 134 if (!otx2_sync_mbox_msg(&pf->mbox)) { 135 if (vf >= 64) { 136 reg = 1; 137 vf = vf - 64; 138 } 139 /* clear transcation pending bit */ 140 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); 141 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); 142 } 143 144 mutex_unlock(&mbox->lock); 145 } 146 147 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq) 148 { 149 struct otx2_nic *pf = (struct otx2_nic *)pf_irq; 150 int reg, dev, vf, start_vf, num_reg = 1; 151 u64 intr; 152 153 if (pf->total_vfs > 64) 154 num_reg = 2; 155 156 for (reg = 0; reg < num_reg; reg++) { 157 intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg)); 158 if (!intr) 159 continue; 160 start_vf = 64 * reg; 161 for (vf = 0; vf < 64; vf++) { 162 if (!(intr & BIT_ULL(vf))) 163 continue; 164 dev = vf + start_vf; 165 queue_work(pf->flr_wq, &pf->flr_wrk[dev].work); 166 /* Clear interrupt */ 167 otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); 168 /* Disable the interrupt */ 169 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg), 170 BIT_ULL(vf)); 171 } 172 } 173 return IRQ_HANDLED; 174 } 175 176 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq) 177 { 178 struct otx2_nic *pf = (struct otx2_nic *)pf_irq; 179 int vf, reg, num_reg = 1; 180 u64 intr; 181 182 if (pf->total_vfs > 64) 183 num_reg = 2; 184 185 for (reg = 0; reg < num_reg; reg++) { 186 intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg)); 187 if (!intr) 188 continue; 189 for (vf = 0; vf < 64; vf++) { 190 if (!(intr & BIT_ULL(vf))) 191 continue; 192 /* clear trpend bit */ 193 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); 194 /* clear interrupt */ 195 otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf)); 196 } 197 } 198 return IRQ_HANDLED; 199 } 200 201 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) 202 { 203 struct otx2_hw *hw = &pf->hw; 204 char *irq_name; 205 int ret; 206 207 /* Register ME interrupt handler*/ 208 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE]; 209 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); 210 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), 211 otx2_pf_me_intr_handler, 0, irq_name, pf); 212 if (ret) { 213 dev_err(pf->dev, 214 "RVUPF: IRQ registration failed for ME0\n"); 215 } 216 217 /* Register FLR interrupt handler */ 218 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE]; 219 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); 220 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), 221 otx2_pf_flr_intr_handler, 0, irq_name, pf); 222 if (ret) { 223 dev_err(pf->dev, 224 "RVUPF: IRQ registration failed for FLR0\n"); 225 return ret; 226 } 227 228 if (numvfs > 64) { 229 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE]; 230 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1", 231 rvu_get_pf(pf->pcifunc)); 232 ret = request_irq(pci_irq_vector 233 (pf->pdev, RVU_PF_INT_VEC_VFME1), 234 otx2_pf_me_intr_handler, 0, irq_name, pf); 235 if (ret) { 236 dev_err(pf->dev, 237 "RVUPF: IRQ registration failed for ME1\n"); 238 } 239 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE]; 240 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1", 241 rvu_get_pf(pf->pcifunc)); 242 ret = request_irq(pci_irq_vector 243 (pf->pdev, RVU_PF_INT_VEC_VFFLR1), 244 otx2_pf_flr_intr_handler, 0, irq_name, pf); 245 if (ret) { 246 dev_err(pf->dev, 247 "RVUPF: IRQ registration failed for FLR1\n"); 248 return ret; 249 } 250 } 251 252 /* Enable ME interrupt for all VFs*/ 253 otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs)); 254 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs)); 255 256 /* Enable FLR interrupt for all VFs*/ 257 otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs)); 258 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs)); 259 260 if (numvfs > 64) { 261 numvfs -= 64; 262 263 otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs)); 264 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1), 265 INTR_MASK(numvfs)); 266 267 otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs)); 268 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1), 269 INTR_MASK(numvfs)); 270 } 271 return 0; 272 } 273 274 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) 275 { 276 int vf; 277 278 pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI); 279 if (!pf->flr_wq) 280 return -ENOMEM; 281 282 pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs, 283 sizeof(struct flr_work), GFP_KERNEL); 284 if (!pf->flr_wrk) { 285 destroy_workqueue(pf->flr_wq); 286 return -ENOMEM; 287 } 288 289 for (vf = 0; vf < num_vfs; vf++) { 290 pf->flr_wrk[vf].pf = pf; 291 INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler); 292 } 293 294 return 0; 295 } 296 297 static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, 298 int first, int mdevs, u64 intr) 299 { 300 struct otx2_mbox_dev *mdev; 301 struct otx2_mbox *mbox; 302 struct mbox_hdr *hdr; 303 int i; 304 305 for (i = first; i < mdevs; i++) { 306 /* start from 0 */ 307 if (!(intr & BIT_ULL(i - first))) 308 continue; 309 310 mbox = &mw->mbox; 311 mdev = &mbox->dev[i]; 312 hdr = mdev->mbase + mbox->rx_start; 313 /* The hdr->num_msgs is set to zero immediately in the interrupt 314 * handler to ensure that it holds a correct value next time 315 * when the interrupt handler is called. pf->mw[i].num_msgs 316 * holds the data for use in otx2_pfvf_mbox_handler and 317 * pf->mw[i].up_num_msgs holds the data for use in 318 * otx2_pfvf_mbox_up_handler. 319 */ 320 if (hdr->num_msgs) { 321 mw[i].num_msgs = hdr->num_msgs; 322 hdr->num_msgs = 0; 323 queue_work(mbox_wq, &mw[i].mbox_wrk); 324 } 325 326 mbox = &mw->mbox_up; 327 mdev = &mbox->dev[i]; 328 hdr = mdev->mbase + mbox->rx_start; 329 if (hdr->num_msgs) { 330 mw[i].up_num_msgs = hdr->num_msgs; 331 hdr->num_msgs = 0; 332 queue_work(mbox_wq, &mw[i].mbox_up_wrk); 333 } 334 } 335 } 336 337 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev, 338 struct otx2_mbox *pfvf_mbox, void *bbuf_base, 339 int devid) 340 { 341 struct otx2_mbox_dev *src_mdev = mdev; 342 int offset; 343 344 /* Msgs are already copied, trigger VF's mbox irq */ 345 smp_wmb(); 346 347 otx2_mbox_wait_for_zero(pfvf_mbox, devid); 348 349 offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift); 350 writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset); 351 352 /* Restore VF's mbox bounce buffer region address */ 353 src_mdev->mbase = bbuf_base; 354 } 355 356 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, 357 struct otx2_mbox *src_mbox, 358 int dir, int vf, int num_msgs) 359 { 360 struct otx2_mbox_dev *src_mdev, *dst_mdev; 361 struct mbox_hdr *mbox_hdr; 362 struct mbox_hdr *req_hdr; 363 struct mbox *dst_mbox; 364 int dst_size, err; 365 366 if (dir == MBOX_DIR_PFAF) { 367 /* Set VF's mailbox memory as PF's bounce buffer memory, so 368 * that explicit copying of VF's msgs to PF=>AF mbox region 369 * and AF=>PF responses to VF's mbox region can be avoided. 370 */ 371 src_mdev = &src_mbox->dev[vf]; 372 mbox_hdr = src_mbox->hwbase + 373 src_mbox->rx_start + (vf * MBOX_SIZE); 374 375 dst_mbox = &pf->mbox; 376 dst_size = dst_mbox->mbox.tx_size - 377 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); 378 /* Check if msgs fit into destination area and has valid size */ 379 if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size) 380 return -EINVAL; 381 382 dst_mdev = &dst_mbox->mbox.dev[0]; 383 384 mutex_lock(&pf->mbox.lock); 385 dst_mdev->mbase = src_mdev->mbase; 386 dst_mdev->msg_size = mbox_hdr->msg_size; 387 dst_mdev->num_msgs = num_msgs; 388 err = otx2_sync_mbox_msg(dst_mbox); 389 /* Error code -EIO indicate there is a communication failure 390 * to the AF. Rest of the error codes indicate that AF processed 391 * VF messages and set the error codes in response messages 392 * (if any) so simply forward responses to VF. 393 */ 394 if (err == -EIO) { 395 dev_warn(pf->dev, 396 "AF not responding to VF%d messages\n", vf); 397 /* restore PF mbase and exit */ 398 dst_mdev->mbase = pf->mbox.bbuf_base; 399 mutex_unlock(&pf->mbox.lock); 400 return err; 401 } 402 /* At this point, all the VF messages sent to AF are acked 403 * with proper responses and responses are copied to VF 404 * mailbox hence raise interrupt to VF. 405 */ 406 req_hdr = (struct mbox_hdr *)(dst_mdev->mbase + 407 dst_mbox->mbox.rx_start); 408 req_hdr->num_msgs = num_msgs; 409 410 otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox, 411 pf->mbox.bbuf_base, vf); 412 mutex_unlock(&pf->mbox.lock); 413 } else if (dir == MBOX_DIR_PFVF_UP) { 414 src_mdev = &src_mbox->dev[0]; 415 mbox_hdr = src_mbox->hwbase + src_mbox->rx_start; 416 req_hdr = (struct mbox_hdr *)(src_mdev->mbase + 417 src_mbox->rx_start); 418 req_hdr->num_msgs = num_msgs; 419 420 dst_mbox = &pf->mbox_pfvf[0]; 421 dst_size = dst_mbox->mbox_up.tx_size - 422 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); 423 /* Check if msgs fit into destination area */ 424 if (mbox_hdr->msg_size > dst_size) 425 return -EINVAL; 426 427 dst_mdev = &dst_mbox->mbox_up.dev[vf]; 428 dst_mdev->mbase = src_mdev->mbase; 429 dst_mdev->msg_size = mbox_hdr->msg_size; 430 dst_mdev->num_msgs = mbox_hdr->num_msgs; 431 err = otx2_sync_mbox_up_msg(dst_mbox, vf); 432 if (err) { 433 dev_warn(pf->dev, 434 "VF%d is not responding to mailbox\n", vf); 435 return err; 436 } 437 } else if (dir == MBOX_DIR_VFPF_UP) { 438 req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase + 439 src_mbox->rx_start); 440 req_hdr->num_msgs = num_msgs; 441 otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf], 442 &pf->mbox.mbox_up, 443 pf->mbox_pfvf[vf].bbuf_base, 444 0); 445 } 446 447 return 0; 448 } 449 450 static void otx2_pfvf_mbox_handler(struct work_struct *work) 451 { 452 struct mbox_msghdr *msg = NULL; 453 int offset, vf_idx, id, err; 454 struct otx2_mbox_dev *mdev; 455 struct otx2_mbox *mbox; 456 struct mbox *vf_mbox; 457 struct otx2_nic *pf; 458 459 vf_mbox = container_of(work, struct mbox, mbox_wrk); 460 pf = vf_mbox->pfvf; 461 vf_idx = vf_mbox - pf->mbox_pfvf; 462 463 mbox = &pf->mbox_pfvf[0].mbox; 464 mdev = &mbox->dev[vf_idx]; 465 466 offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 467 468 trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)", 469 vf_mbox->num_msgs); 470 471 for (id = 0; id < vf_mbox->num_msgs; id++) { 472 msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + 473 offset); 474 475 if (msg->sig != OTX2_MBOX_REQ_SIG) 476 goto inval_msg; 477 478 /* Set VF's number in each of the msg */ 479 msg->pcifunc &= ~RVU_PFVF_FUNC_MASK; 480 msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK; 481 offset = msg->next_msgoff; 482 } 483 err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx, 484 vf_mbox->num_msgs); 485 if (err) 486 goto inval_msg; 487 return; 488 489 inval_msg: 490 otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id); 491 otx2_mbox_msg_send(mbox, vf_idx); 492 } 493 494 static void otx2_pfvf_mbox_up_handler(struct work_struct *work) 495 { 496 struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk); 497 struct otx2_nic *pf = vf_mbox->pfvf; 498 struct otx2_mbox_dev *mdev; 499 int offset, id, vf_idx = 0; 500 struct mbox_msghdr *msg; 501 struct otx2_mbox *mbox; 502 503 vf_idx = vf_mbox - pf->mbox_pfvf; 504 mbox = &pf->mbox_pfvf[0].mbox_up; 505 mdev = &mbox->dev[vf_idx]; 506 507 offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 508 509 trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)", 510 vf_mbox->up_num_msgs); 511 512 for (id = 0; id < vf_mbox->up_num_msgs; id++) { 513 msg = mdev->mbase + offset; 514 515 if (msg->id >= MBOX_MSG_MAX) { 516 dev_err(pf->dev, 517 "Mbox msg with unknown ID 0x%x\n", msg->id); 518 goto end; 519 } 520 521 if (msg->sig != OTX2_MBOX_RSP_SIG) { 522 dev_err(pf->dev, 523 "Mbox msg with wrong signature %x, ID 0x%x\n", 524 msg->sig, msg->id); 525 goto end; 526 } 527 528 switch (msg->id) { 529 case MBOX_MSG_CGX_LINK_EVENT: 530 case MBOX_MSG_REP_EVENT_UP_NOTIFY: 531 break; 532 default: 533 if (msg->rc) 534 dev_err(pf->dev, 535 "Mbox msg response has err %d, ID 0x%x\n", 536 msg->rc, msg->id); 537 break; 538 } 539 540 end: 541 offset = mbox->rx_start + msg->next_msgoff; 542 if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1)) 543 __otx2_mbox_reset(mbox, vf_idx); 544 mdev->msgs_acked++; 545 } 546 } 547 548 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) 549 { 550 struct otx2_nic *pf = (struct otx2_nic *)(pf_irq); 551 int vfs = pf->total_vfs; 552 struct mbox *mbox; 553 u64 intr; 554 555 mbox = pf->mbox_pfvf; 556 /* Handle VF interrupts */ 557 if (vfs > 64) { 558 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1)); 559 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); 560 otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr); 561 if (intr) 562 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); 563 vfs = 64; 564 } 565 566 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); 567 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr); 568 569 otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr); 570 571 if (intr) 572 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); 573 574 return IRQ_HANDLED; 575 } 576 577 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) 578 { 579 void __iomem *hwbase; 580 struct mbox *mbox; 581 int err, vf; 582 u64 base; 583 584 if (!numvfs) 585 return -EINVAL; 586 587 pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs, 588 sizeof(struct mbox), GFP_KERNEL); 589 if (!pf->mbox_pfvf) 590 return -ENOMEM; 591 592 pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox", 593 WQ_UNBOUND | WQ_HIGHPRI | 594 WQ_MEM_RECLAIM, 0); 595 if (!pf->mbox_pfvf_wq) 596 return -ENOMEM; 597 598 /* On CN10K platform, PF <-> VF mailbox region follows after 599 * PF <-> AF mailbox region. 600 */ 601 if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) 602 base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + 603 MBOX_SIZE; 604 else 605 base = readq((void __iomem *)((u64)pf->reg_base + 606 RVU_PF_VF_BAR4_ADDR)); 607 608 hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); 609 if (!hwbase) { 610 err = -ENOMEM; 611 goto free_wq; 612 } 613 614 mbox = &pf->mbox_pfvf[0]; 615 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, 616 MBOX_DIR_PFVF, numvfs); 617 if (err) 618 goto free_iomem; 619 620 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, 621 MBOX_DIR_PFVF_UP, numvfs); 622 if (err) 623 goto free_iomem; 624 625 for (vf = 0; vf < numvfs; vf++) { 626 mbox->pfvf = pf; 627 INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler); 628 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler); 629 mbox++; 630 } 631 632 return 0; 633 634 free_iomem: 635 if (hwbase) 636 iounmap(hwbase); 637 free_wq: 638 destroy_workqueue(pf->mbox_pfvf_wq); 639 return err; 640 } 641 642 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf) 643 { 644 struct mbox *mbox = &pf->mbox_pfvf[0]; 645 646 if (!mbox) 647 return; 648 649 if (pf->mbox_pfvf_wq) { 650 destroy_workqueue(pf->mbox_pfvf_wq); 651 pf->mbox_pfvf_wq = NULL; 652 } 653 654 if (mbox->mbox.hwbase) 655 iounmap(mbox->mbox.hwbase); 656 657 otx2_mbox_destroy(&mbox->mbox); 658 } 659 660 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) 661 { 662 /* Clear PF <=> VF mailbox IRQ */ 663 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); 664 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); 665 666 /* Enable PF <=> VF mailbox IRQ */ 667 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs)); 668 if (numvfs > 64) { 669 numvfs -= 64; 670 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), 671 INTR_MASK(numvfs)); 672 } 673 } 674 675 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) 676 { 677 int vector; 678 679 /* Disable PF <=> VF mailbox IRQ */ 680 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull); 681 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull); 682 683 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); 684 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0); 685 free_irq(vector, pf); 686 687 if (numvfs > 64) { 688 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); 689 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1); 690 free_irq(vector, pf); 691 } 692 } 693 694 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) 695 { 696 struct otx2_hw *hw = &pf->hw; 697 char *irq_name; 698 int err; 699 700 /* Register MBOX0 interrupt handler */ 701 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE]; 702 if (pf->pcifunc) 703 snprintf(irq_name, NAME_SIZE, 704 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); 705 else 706 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0"); 707 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), 708 otx2_pfvf_mbox_intr_handler, 0, irq_name, pf); 709 if (err) { 710 dev_err(pf->dev, 711 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n"); 712 return err; 713 } 714 715 if (numvfs > 64) { 716 /* Register MBOX1 interrupt handler */ 717 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE]; 718 if (pf->pcifunc) 719 snprintf(irq_name, NAME_SIZE, 720 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); 721 else 722 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1"); 723 err = request_irq(pci_irq_vector(pf->pdev, 724 RVU_PF_INT_VEC_VFPF_MBOX1), 725 otx2_pfvf_mbox_intr_handler, 726 0, irq_name, pf); 727 if (err) { 728 dev_err(pf->dev, 729 "RVUPF: IRQ registration failed for PFVF mbox1 irq\n"); 730 return err; 731 } 732 } 733 734 otx2_enable_pfvf_mbox_intr(pf, numvfs); 735 736 return 0; 737 } 738 739 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf, 740 struct mbox_msghdr *msg) 741 { 742 int devid; 743 744 if (msg->id >= MBOX_MSG_MAX) { 745 dev_err(pf->dev, 746 "Mbox msg with unknown ID 0x%x\n", msg->id); 747 return; 748 } 749 750 if (msg->sig != OTX2_MBOX_RSP_SIG) { 751 dev_err(pf->dev, 752 "Mbox msg with wrong signature %x, ID 0x%x\n", 753 msg->sig, msg->id); 754 return; 755 } 756 757 /* message response heading VF */ 758 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; 759 if (devid) { 760 struct otx2_vf_config *config = &pf->vf_configs[devid - 1]; 761 struct delayed_work *dwork; 762 763 switch (msg->id) { 764 case MBOX_MSG_NIX_LF_START_RX: 765 config->intf_down = false; 766 dwork = &config->link_event_work; 767 schedule_delayed_work(dwork, msecs_to_jiffies(100)); 768 break; 769 case MBOX_MSG_NIX_LF_STOP_RX: 770 config->intf_down = true; 771 break; 772 } 773 774 return; 775 } 776 777 switch (msg->id) { 778 case MBOX_MSG_READY: 779 pf->pcifunc = msg->pcifunc; 780 break; 781 case MBOX_MSG_MSIX_OFFSET: 782 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg); 783 break; 784 case MBOX_MSG_NPA_LF_ALLOC: 785 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg); 786 break; 787 case MBOX_MSG_NIX_LF_ALLOC: 788 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg); 789 break; 790 case MBOX_MSG_NIX_BP_ENABLE: 791 mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg); 792 break; 793 case MBOX_MSG_CGX_STATS: 794 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg); 795 break; 796 case MBOX_MSG_CGX_FEC_STATS: 797 mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg); 798 break; 799 default: 800 if (msg->rc) 801 dev_err(pf->dev, 802 "Mbox msg response has err %d, ID 0x%x\n", 803 msg->rc, msg->id); 804 break; 805 } 806 } 807 808 static void otx2_pfaf_mbox_handler(struct work_struct *work) 809 { 810 struct otx2_mbox_dev *mdev; 811 struct mbox_hdr *rsp_hdr; 812 struct mbox_msghdr *msg; 813 struct otx2_mbox *mbox; 814 struct mbox *af_mbox; 815 struct otx2_nic *pf; 816 int offset, id; 817 u16 num_msgs; 818 819 af_mbox = container_of(work, struct mbox, mbox_wrk); 820 mbox = &af_mbox->mbox; 821 mdev = &mbox->dev[0]; 822 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 823 num_msgs = rsp_hdr->num_msgs; 824 825 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 826 pf = af_mbox->pfvf; 827 828 trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)", 829 num_msgs); 830 831 for (id = 0; id < num_msgs; id++) { 832 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 833 otx2_process_pfaf_mbox_msg(pf, msg); 834 offset = mbox->rx_start + msg->next_msgoff; 835 if (mdev->msgs_acked == (num_msgs - 1)) 836 __otx2_mbox_reset(mbox, 0); 837 mdev->msgs_acked++; 838 } 839 840 } 841 842 static void otx2_handle_link_event(struct otx2_nic *pf) 843 { 844 struct cgx_link_user_info *linfo = &pf->linfo; 845 struct net_device *netdev = pf->netdev; 846 847 if (pf->flags & OTX2_FLAG_PORT_UP) 848 return; 849 850 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name, 851 linfo->link_up ? "UP" : "DOWN", linfo->speed, 852 linfo->full_duplex ? "Full" : "Half"); 853 if (linfo->link_up) { 854 netif_carrier_on(netdev); 855 netif_tx_start_all_queues(netdev); 856 } else { 857 netif_tx_stop_all_queues(netdev); 858 netif_carrier_off(netdev); 859 } 860 } 861 862 static int otx2_mbox_up_handler_rep_event_up_notify(struct otx2_nic *pf, 863 struct rep_event *info, 864 struct msg_rsp *rsp) 865 { 866 struct net_device *netdev = pf->netdev; 867 868 if (info->event == RVU_EVENT_MTU_CHANGE) { 869 netdev->mtu = info->evt_data.mtu; 870 return 0; 871 } 872 873 if (info->event == RVU_EVENT_PORT_STATE) { 874 if (info->evt_data.port_state) { 875 pf->flags |= OTX2_FLAG_PORT_UP; 876 netif_carrier_on(netdev); 877 netif_tx_start_all_queues(netdev); 878 } else { 879 pf->flags &= ~OTX2_FLAG_PORT_UP; 880 netif_tx_stop_all_queues(netdev); 881 netif_carrier_off(netdev); 882 } 883 return 0; 884 } 885 #ifdef CONFIG_RVU_ESWITCH 886 rvu_event_up_notify(pf, info); 887 #endif 888 return 0; 889 } 890 891 int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf, 892 struct mcs_intr_info *event, 893 struct msg_rsp *rsp) 894 { 895 cn10k_handle_mcs_event(pf, event); 896 897 return 0; 898 } 899 900 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, 901 struct cgx_link_info_msg *msg, 902 struct msg_rsp *rsp) 903 { 904 int i; 905 906 /* Copy the link info sent by AF */ 907 pf->linfo = msg->link_info; 908 909 /* notify VFs about link event */ 910 for (i = 0; i < pci_num_vf(pf->pdev); i++) { 911 struct otx2_vf_config *config = &pf->vf_configs[i]; 912 struct delayed_work *dwork = &config->link_event_work; 913 914 if (config->intf_down) 915 continue; 916 917 schedule_delayed_work(dwork, msecs_to_jiffies(100)); 918 } 919 920 /* interface has not been fully configured yet */ 921 if (pf->flags & OTX2_FLAG_INTF_DOWN) 922 return 0; 923 924 otx2_handle_link_event(pf); 925 return 0; 926 } 927 928 static int otx2_process_mbox_msg_up(struct otx2_nic *pf, 929 struct mbox_msghdr *req) 930 { 931 /* Check if valid, if not reply with a invalid msg */ 932 if (req->sig != OTX2_MBOX_REQ_SIG) { 933 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); 934 return -ENODEV; 935 } 936 937 switch (req->id) { 938 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 939 case _id: { \ 940 struct _rsp_type *rsp; \ 941 int err; \ 942 \ 943 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ 944 &pf->mbox.mbox_up, 0, \ 945 sizeof(struct _rsp_type)); \ 946 if (!rsp) \ 947 return -ENOMEM; \ 948 \ 949 rsp->hdr.id = _id; \ 950 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ 951 rsp->hdr.pcifunc = 0; \ 952 rsp->hdr.rc = 0; \ 953 \ 954 err = otx2_mbox_up_handler_ ## _fn_name( \ 955 pf, (struct _req_type *)req, rsp); \ 956 return err; \ 957 } 958 MBOX_UP_CGX_MESSAGES 959 MBOX_UP_MCS_MESSAGES 960 MBOX_UP_REP_MESSAGES 961 #undef M 962 break; 963 default: 964 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); 965 return -ENODEV; 966 } 967 return 0; 968 } 969 970 static void otx2_pfaf_mbox_up_handler(struct work_struct *work) 971 { 972 struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk); 973 struct otx2_mbox *mbox = &af_mbox->mbox_up; 974 struct otx2_mbox_dev *mdev = &mbox->dev[0]; 975 struct otx2_nic *pf = af_mbox->pfvf; 976 int offset, id, devid = 0; 977 struct mbox_hdr *rsp_hdr; 978 struct mbox_msghdr *msg; 979 u16 num_msgs; 980 981 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 982 num_msgs = rsp_hdr->num_msgs; 983 984 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); 985 986 trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)", 987 num_msgs); 988 989 for (id = 0; id < num_msgs; id++) { 990 msg = (struct mbox_msghdr *)(mdev->mbase + offset); 991 992 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; 993 /* Skip processing VF's messages */ 994 if (!devid) 995 otx2_process_mbox_msg_up(pf, msg); 996 offset = mbox->rx_start + msg->next_msgoff; 997 } 998 /* Forward to VF iff VFs are really present */ 999 if (devid && pci_num_vf(pf->pdev)) { 1000 otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up, 1001 MBOX_DIR_PFVF_UP, devid - 1, 1002 num_msgs); 1003 return; 1004 } 1005 1006 otx2_mbox_msg_send(mbox, 0); 1007 } 1008 1009 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) 1010 { 1011 struct otx2_nic *pf = (struct otx2_nic *)pf_irq; 1012 struct mbox *mw = &pf->mbox; 1013 struct otx2_mbox_dev *mdev; 1014 struct otx2_mbox *mbox; 1015 struct mbox_hdr *hdr; 1016 u64 mbox_data; 1017 1018 /* Clear the IRQ */ 1019 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); 1020 1021 1022 mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0); 1023 1024 if (mbox_data & MBOX_UP_MSG) { 1025 mbox_data &= ~MBOX_UP_MSG; 1026 otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data); 1027 1028 mbox = &mw->mbox_up; 1029 mdev = &mbox->dev[0]; 1030 otx2_sync_mbox_bbuf(mbox, 0); 1031 1032 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 1033 if (hdr->num_msgs) 1034 queue_work(pf->mbox_wq, &mw->mbox_up_wrk); 1035 1036 trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", 1037 BIT_ULL(0)); 1038 1039 trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)", 1040 hdr->num_msgs); 1041 } 1042 1043 if (mbox_data & MBOX_DOWN_MSG) { 1044 mbox_data &= ~MBOX_DOWN_MSG; 1045 otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data); 1046 1047 mbox = &mw->mbox; 1048 mdev = &mbox->dev[0]; 1049 otx2_sync_mbox_bbuf(mbox, 0); 1050 1051 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); 1052 if (hdr->num_msgs) 1053 queue_work(pf->mbox_wq, &mw->mbox_wrk); 1054 1055 trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", 1056 BIT_ULL(0)); 1057 1058 trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)", 1059 hdr->num_msgs); 1060 } 1061 1062 return IRQ_HANDLED; 1063 } 1064 1065 void otx2_disable_mbox_intr(struct otx2_nic *pf) 1066 { 1067 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); 1068 1069 /* Disable AF => PF mailbox IRQ */ 1070 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); 1071 free_irq(vector, pf); 1072 } 1073 EXPORT_SYMBOL(otx2_disable_mbox_intr); 1074 1075 int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) 1076 { 1077 struct otx2_hw *hw = &pf->hw; 1078 struct msg_req *req; 1079 char *irq_name; 1080 int err; 1081 1082 /* Register mailbox interrupt handler */ 1083 irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; 1084 snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox"); 1085 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), 1086 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf); 1087 if (err) { 1088 dev_err(pf->dev, 1089 "RVUPF: IRQ registration failed for PFAF mbox irq\n"); 1090 return err; 1091 } 1092 1093 /* Enable mailbox interrupt for msgs coming from AF. 1094 * First clear to avoid spurious interrupts, if any. 1095 */ 1096 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); 1097 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); 1098 1099 if (!probe_af) 1100 return 0; 1101 1102 /* Check mailbox communication with AF */ 1103 req = otx2_mbox_alloc_msg_ready(&pf->mbox); 1104 if (!req) { 1105 otx2_disable_mbox_intr(pf); 1106 return -ENOMEM; 1107 } 1108 err = otx2_sync_mbox_msg(&pf->mbox); 1109 if (err) { 1110 dev_warn(pf->dev, 1111 "AF not responding to mailbox, deferring probe\n"); 1112 otx2_disable_mbox_intr(pf); 1113 return -EPROBE_DEFER; 1114 } 1115 1116 return 0; 1117 } 1118 1119 void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) 1120 { 1121 struct mbox *mbox = &pf->mbox; 1122 1123 if (pf->mbox_wq) { 1124 destroy_workqueue(pf->mbox_wq); 1125 pf->mbox_wq = NULL; 1126 } 1127 1128 if (mbox->mbox.hwbase) 1129 iounmap((void __iomem *)mbox->mbox.hwbase); 1130 1131 otx2_mbox_destroy(&mbox->mbox); 1132 otx2_mbox_destroy(&mbox->mbox_up); 1133 } 1134 EXPORT_SYMBOL(otx2_pfaf_mbox_destroy); 1135 1136 int otx2_pfaf_mbox_init(struct otx2_nic *pf) 1137 { 1138 struct mbox *mbox = &pf->mbox; 1139 void __iomem *hwbase; 1140 int err; 1141 1142 mbox->pfvf = pf; 1143 pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox", 1144 WQ_HIGHPRI | WQ_MEM_RECLAIM); 1145 if (!pf->mbox_wq) 1146 return -ENOMEM; 1147 1148 /* Mailbox is a reserved memory (in RAM) region shared between 1149 * admin function (i.e AF) and this PF, shouldn't be mapped as 1150 * device memory to allow unaligned accesses. 1151 */ 1152 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), 1153 MBOX_SIZE); 1154 if (!hwbase) { 1155 dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); 1156 err = -ENOMEM; 1157 goto exit; 1158 } 1159 1160 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, 1161 MBOX_DIR_PFAF, 1); 1162 if (err) 1163 goto exit; 1164 1165 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, 1166 MBOX_DIR_PFAF_UP, 1); 1167 if (err) 1168 goto exit; 1169 1170 err = otx2_mbox_bbuf_init(mbox, pf->pdev); 1171 if (err) 1172 goto exit; 1173 1174 INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler); 1175 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler); 1176 mutex_init(&mbox->lock); 1177 1178 return 0; 1179 exit: 1180 otx2_pfaf_mbox_destroy(pf); 1181 return err; 1182 } 1183 1184 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable) 1185 { 1186 struct msg_req *msg; 1187 int err; 1188 1189 mutex_lock(&pf->mbox.lock); 1190 if (enable) 1191 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox); 1192 else 1193 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox); 1194 1195 if (!msg) { 1196 mutex_unlock(&pf->mbox.lock); 1197 return -ENOMEM; 1198 } 1199 1200 err = otx2_sync_mbox_msg(&pf->mbox); 1201 mutex_unlock(&pf->mbox.lock); 1202 return err; 1203 } 1204 1205 int otx2_reset_mac_stats(struct otx2_nic *pfvf) 1206 { 1207 struct msg_req *req; 1208 int err; 1209 1210 mutex_lock(&pfvf->mbox.lock); 1211 req = otx2_mbox_alloc_msg_cgx_stats_rst(&pfvf->mbox); 1212 if (!req) { 1213 mutex_unlock(&pfvf->mbox.lock); 1214 return -ENOMEM; 1215 } 1216 1217 err = otx2_sync_mbox_msg(&pfvf->mbox); 1218 mutex_unlock(&pfvf->mbox.lock); 1219 return err; 1220 } 1221 1222 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) 1223 { 1224 struct msg_req *msg; 1225 int err; 1226 1227 if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap, 1228 pf->flow_cfg->dmacflt_max_flows)) 1229 netdev_warn(pf->netdev, 1230 "CGX/RPM internal loopback might not work as DMAC filters are active\n"); 1231 1232 mutex_lock(&pf->mbox.lock); 1233 if (enable) 1234 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); 1235 else 1236 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox); 1237 1238 if (!msg) { 1239 mutex_unlock(&pf->mbox.lock); 1240 return -ENOMEM; 1241 } 1242 1243 err = otx2_sync_mbox_msg(&pf->mbox); 1244 mutex_unlock(&pf->mbox.lock); 1245 return err; 1246 } 1247 1248 int otx2_set_real_num_queues(struct net_device *netdev, 1249 int tx_queues, int rx_queues) 1250 { 1251 int err; 1252 1253 err = netif_set_real_num_tx_queues(netdev, tx_queues); 1254 if (err) { 1255 netdev_err(netdev, 1256 "Failed to set no of Tx queues: %d\n", tx_queues); 1257 return err; 1258 } 1259 1260 err = netif_set_real_num_rx_queues(netdev, rx_queues); 1261 if (err) 1262 netdev_err(netdev, 1263 "Failed to set no of Rx queues: %d\n", rx_queues); 1264 return err; 1265 } 1266 EXPORT_SYMBOL(otx2_set_real_num_queues); 1267 1268 static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = { 1269 "NIX_SQOPERR_OOR", 1270 "NIX_SQOPERR_CTX_FAULT", 1271 "NIX_SQOPERR_CTX_POISON", 1272 "NIX_SQOPERR_DISABLED", 1273 "NIX_SQOPERR_SIZE_ERR", 1274 "NIX_SQOPERR_OFLOW", 1275 "NIX_SQOPERR_SQB_NULL", 1276 "NIX_SQOPERR_SQB_FAULT", 1277 "NIX_SQOPERR_SQE_SZ_ZERO", 1278 }; 1279 1280 static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = { 1281 "NIX_MNQERR_SQ_CTX_FAULT", 1282 "NIX_MNQERR_SQ_CTX_POISON", 1283 "NIX_MNQERR_SQB_FAULT", 1284 "NIX_MNQERR_SQB_POISON", 1285 "NIX_MNQERR_TOTAL_ERR", 1286 "NIX_MNQERR_LSO_ERR", 1287 "NIX_MNQERR_CQ_QUERY_ERR", 1288 "NIX_MNQERR_MAX_SQE_SIZE_ERR", 1289 "NIX_MNQERR_MAXLEN_ERR", 1290 "NIX_MNQERR_SQE_SIZEM1_ZERO", 1291 }; 1292 1293 static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = { 1294 [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD", 1295 [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT", 1296 [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON", 1297 [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT", 1298 [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON", 1299 [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR", 1300 [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR", 1301 [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT", 1302 [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON", 1303 [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR", 1304 [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR", 1305 [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR", 1306 [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR", 1307 [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC", 1308 [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR", 1309 [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT", 1310 [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON", 1311 [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION", 1312 [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL", 1313 [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR", 1314 [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR", 1315 [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT", 1316 [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", 1317 [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", 1318 [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT", 1319 [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR", 1320 }; 1321 1322 static irqreturn_t otx2_q_intr_handler(int irq, void *data) 1323 { 1324 struct otx2_nic *pf = data; 1325 struct otx2_snd_queue *sq; 1326 u64 val, *ptr; 1327 u64 qidx = 0; 1328 1329 /* CQ */ 1330 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { 1331 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT); 1332 val = otx2_atomic64_add((qidx << 44), ptr); 1333 1334 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) | 1335 (val & NIX_CQERRINT_BITS)); 1336 if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42)))) 1337 continue; 1338 1339 if (val & BIT_ULL(42)) { 1340 netdev_err(pf->netdev, 1341 "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", 1342 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); 1343 } else { 1344 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR)) 1345 netdev_err(pf->netdev, "CQ%lld: Doorbell error", 1346 qidx); 1347 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT)) 1348 netdev_err(pf->netdev, 1349 "CQ%lld: Memory fault on CQE write to LLC/DRAM", 1350 qidx); 1351 } 1352 1353 schedule_work(&pf->reset_task); 1354 } 1355 1356 /* SQ */ 1357 for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) { 1358 u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg; 1359 u8 sq_op_err_code, mnq_err_code, snd_err_code; 1360 1361 sq = &pf->qset.sq[qidx]; 1362 if (!sq->sqb_ptrs) 1363 continue; 1364 1365 /* Below debug registers captures first errors corresponding to 1366 * those registers. We don't have to check against SQ qid as 1367 * these are fatal errors. 1368 */ 1369 1370 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); 1371 val = otx2_atomic64_add((qidx << 44), ptr); 1372 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | 1373 (val & NIX_SQINT_BITS)); 1374 1375 if (val & BIT_ULL(42)) { 1376 netdev_err(pf->netdev, 1377 "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", 1378 qidx, otx2_read64(pf, NIX_LF_ERR_INT)); 1379 goto done; 1380 } 1381 1382 sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG); 1383 if (!(sq_op_err_dbg & BIT(44))) 1384 goto chk_mnq_err_dbg; 1385 1386 sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg); 1387 netdev_err(pf->netdev, 1388 "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n", 1389 qidx, sq_op_err_dbg, 1390 nix_sqoperr_e_str[sq_op_err_code], 1391 sq_op_err_code); 1392 1393 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44)); 1394 1395 if (sq_op_err_code == NIX_SQOPERR_SQB_NULL) 1396 goto chk_mnq_err_dbg; 1397 1398 /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure. 1399 * TODO: But we are in irq context. How to call mbox functions which does sleep 1400 */ 1401 1402 chk_mnq_err_dbg: 1403 mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG); 1404 if (!(mnq_err_dbg & BIT(44))) 1405 goto chk_snd_err_dbg; 1406 1407 mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg); 1408 netdev_err(pf->netdev, 1409 "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n", 1410 qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code], 1411 mnq_err_code); 1412 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44)); 1413 1414 chk_snd_err_dbg: 1415 snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG); 1416 if (snd_err_dbg & BIT(44)) { 1417 snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg); 1418 netdev_err(pf->netdev, 1419 "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n", 1420 qidx, snd_err_dbg, 1421 nix_snd_status_e_str[snd_err_code], 1422 snd_err_code); 1423 otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44)); 1424 } 1425 1426 done: 1427 /* Print values and reset */ 1428 if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) 1429 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", 1430 qidx); 1431 1432 schedule_work(&pf->reset_task); 1433 } 1434 1435 return IRQ_HANDLED; 1436 } 1437 1438 irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) 1439 { 1440 struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq; 1441 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev; 1442 int qidx = cq_poll->cint_idx; 1443 1444 /* Disable interrupts. 1445 * 1446 * Completion interrupts behave in a level-triggered interrupt 1447 * fashion, and hence have to be cleared only after it is serviced. 1448 */ 1449 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); 1450 1451 /* Schedule NAPI */ 1452 pf->napi_events++; 1453 napi_schedule_irqoff(&cq_poll->napi); 1454 1455 return IRQ_HANDLED; 1456 } 1457 EXPORT_SYMBOL(otx2_cq_intr_handler); 1458 1459 void otx2_disable_napi(struct otx2_nic *pf) 1460 { 1461 struct otx2_qset *qset = &pf->qset; 1462 struct otx2_cq_poll *cq_poll; 1463 struct work_struct *work; 1464 int qidx; 1465 1466 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { 1467 cq_poll = &qset->napi[qidx]; 1468 work = &cq_poll->dim.work; 1469 if (work->func) 1470 cancel_work_sync(work); 1471 napi_disable(&cq_poll->napi); 1472 netif_napi_del(&cq_poll->napi); 1473 } 1474 } 1475 EXPORT_SYMBOL(otx2_disable_napi); 1476 1477 static void otx2_free_cq_res(struct otx2_nic *pf) 1478 { 1479 struct otx2_qset *qset = &pf->qset; 1480 struct otx2_cq_queue *cq; 1481 int qidx; 1482 1483 /* Disable CQs */ 1484 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false); 1485 for (qidx = 0; qidx < qset->cq_cnt; qidx++) { 1486 cq = &qset->cq[qidx]; 1487 qmem_free(pf->dev, cq->cqe); 1488 } 1489 } 1490 1491 static void otx2_free_sq_res(struct otx2_nic *pf) 1492 { 1493 struct otx2_qset *qset = &pf->qset; 1494 struct otx2_snd_queue *sq; 1495 int qidx; 1496 1497 /* Disable SQs */ 1498 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false); 1499 /* Free SQB pointers */ 1500 otx2_sq_free_sqbs(pf); 1501 for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) { 1502 sq = &qset->sq[qidx]; 1503 /* Skip freeing Qos queues if they are not initialized */ 1504 if (!sq->sqe) 1505 continue; 1506 qmem_free(pf->dev, sq->sqe); 1507 qmem_free(pf->dev, sq->sqe_ring); 1508 qmem_free(pf->dev, sq->cpt_resp); 1509 qmem_free(pf->dev, sq->tso_hdrs); 1510 kfree(sq->sg); 1511 kfree(sq->sqb_ptrs); 1512 } 1513 } 1514 1515 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu) 1516 { 1517 int frame_size; 1518 int total_size; 1519 int rbuf_size; 1520 1521 if (pf->hw.rbuf_len) 1522 return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM; 1523 1524 /* The data transferred by NIX to memory consists of actual packet 1525 * plus additional data which has timestamp and/or EDSA/HIGIG2 1526 * headers if interface is configured in corresponding modes. 1527 * NIX transfers entire data using 6 segments/buffers and writes 1528 * a CQE_RX descriptor with those segment addresses. First segment 1529 * has additional data prepended to packet. Also software omits a 1530 * headroom of 128 bytes in each segment. Hence the total size of 1531 * memory needed to receive a packet with 'mtu' is: 1532 * frame size = mtu + additional data; 1533 * memory = frame_size + headroom * 6; 1534 * each receive buffer size = memory / 6; 1535 */ 1536 frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; 1537 total_size = frame_size + OTX2_HEAD_ROOM * 6; 1538 rbuf_size = total_size / 6; 1539 1540 return ALIGN(rbuf_size, 2048); 1541 } 1542 1543 int otx2_init_hw_resources(struct otx2_nic *pf) 1544 { 1545 struct nix_lf_free_req *free_req; 1546 struct mbox *mbox = &pf->mbox; 1547 struct otx2_hw *hw = &pf->hw; 1548 struct msg_req *req; 1549 int err = 0, lvl; 1550 1551 /* Set required NPA LF's pool counts 1552 * Auras and Pools are used in a 1:1 mapping, 1553 * so, aura count = pool count. 1554 */ 1555 hw->rqpool_cnt = hw->rx_queues; 1556 hw->sqpool_cnt = otx2_get_total_tx_queues(pf); 1557 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; 1558 1559 if (!otx2_rep_dev(pf->pdev)) { 1560 /* Maximum hardware supported transmit length */ 1561 pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; 1562 pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); 1563 } 1564 1565 mutex_lock(&mbox->lock); 1566 /* NPA init */ 1567 err = otx2_config_npa(pf); 1568 if (err) 1569 goto exit; 1570 1571 /* NIX init */ 1572 err = otx2_config_nix(pf); 1573 if (err) 1574 goto err_free_npa_lf; 1575 1576 /* Default disable backpressure on NIX-CPT */ 1577 otx2_nix_cpt_config_bp(pf, false); 1578 1579 /* Enable backpressure for CGX mapped PF/VFs */ 1580 if (!is_otx2_lbkvf(pf->pdev)) 1581 otx2_nix_config_bp(pf, true); 1582 1583 /* Init Auras and pools used by NIX RQ, for free buffer ptrs */ 1584 err = otx2_rq_aura_pool_init(pf); 1585 if (err) { 1586 mutex_unlock(&mbox->lock); 1587 goto err_free_nix_lf; 1588 } 1589 /* Init Auras and pools used by NIX SQ, for queueing SQEs */ 1590 err = otx2_sq_aura_pool_init(pf); 1591 if (err) { 1592 mutex_unlock(&mbox->lock); 1593 goto err_free_rq_ptrs; 1594 } 1595 1596 err = otx2_txsch_alloc(pf); 1597 if (err) { 1598 mutex_unlock(&mbox->lock); 1599 goto err_free_sq_ptrs; 1600 } 1601 1602 #ifdef CONFIG_DCB 1603 if (pf->pfc_en) { 1604 err = otx2_pfc_txschq_alloc(pf); 1605 if (err) { 1606 mutex_unlock(&mbox->lock); 1607 goto err_free_sq_ptrs; 1608 } 1609 } 1610 #endif 1611 1612 err = otx2_config_nix_queues(pf); 1613 if (err) { 1614 mutex_unlock(&mbox->lock); 1615 goto err_free_txsch; 1616 } 1617 1618 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 1619 int idx; 1620 1621 for (idx = 0; idx < pf->hw.txschq_cnt[lvl]; idx++) { 1622 err = otx2_txschq_config(pf, lvl, idx, false); 1623 if (err) { 1624 dev_err(pf->dev, "Failed to config TXSCH\n"); 1625 mutex_unlock(&mbox->lock); 1626 goto err_free_nix_queues; 1627 } 1628 } 1629 } 1630 1631 #ifdef CONFIG_DCB 1632 if (pf->pfc_en) { 1633 err = otx2_pfc_txschq_config(pf); 1634 if (err) { 1635 mutex_unlock(&mbox->lock); 1636 goto err_free_nix_queues; 1637 } 1638 } 1639 #endif 1640 1641 mutex_unlock(&mbox->lock); 1642 return err; 1643 1644 err_free_nix_queues: 1645 otx2_free_sq_res(pf); 1646 otx2_free_cq_res(pf); 1647 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); 1648 err_free_txsch: 1649 otx2_txschq_stop(pf); 1650 err_free_sq_ptrs: 1651 otx2_sq_free_sqbs(pf); 1652 err_free_rq_ptrs: 1653 otx2_free_aura_ptr(pf, AURA_NIX_RQ); 1654 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); 1655 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); 1656 otx2_aura_pool_free(pf); 1657 err_free_nix_lf: 1658 mutex_lock(&mbox->lock); 1659 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); 1660 if (free_req) { 1661 free_req->flags = NIX_LF_DISABLE_FLOWS; 1662 if (otx2_sync_mbox_msg(mbox)) 1663 dev_err(pf->dev, "%s failed to free nixlf\n", __func__); 1664 } 1665 err_free_npa_lf: 1666 /* Reset NPA LF */ 1667 req = otx2_mbox_alloc_msg_npa_lf_free(mbox); 1668 if (req) { 1669 if (otx2_sync_mbox_msg(mbox)) 1670 dev_err(pf->dev, "%s failed to free npalf\n", __func__); 1671 } 1672 exit: 1673 mutex_unlock(&mbox->lock); 1674 return err; 1675 } 1676 EXPORT_SYMBOL(otx2_init_hw_resources); 1677 1678 void otx2_free_hw_resources(struct otx2_nic *pf) 1679 { 1680 struct otx2_qset *qset = &pf->qset; 1681 struct nix_lf_free_req *free_req; 1682 struct mbox *mbox = &pf->mbox; 1683 struct otx2_cq_queue *cq; 1684 struct msg_req *req; 1685 int qidx; 1686 1687 /* Ensure all SQE are processed */ 1688 otx2_sqb_flush(pf); 1689 1690 /* Stop transmission */ 1691 otx2_txschq_stop(pf); 1692 1693 #ifdef CONFIG_DCB 1694 if (pf->pfc_en) 1695 otx2_pfc_txschq_stop(pf); 1696 #endif 1697 1698 if (!otx2_rep_dev(pf->pdev)) 1699 otx2_clean_qos_queues(pf); 1700 1701 mutex_lock(&mbox->lock); 1702 /* Disable backpressure */ 1703 if (!is_otx2_lbkvf(pf->pdev)) 1704 otx2_nix_config_bp(pf, false); 1705 mutex_unlock(&mbox->lock); 1706 1707 /* Disable RQs */ 1708 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); 1709 1710 /*Dequeue all CQEs */ 1711 for (qidx = 0; qidx < qset->cq_cnt; qidx++) { 1712 cq = &qset->cq[qidx]; 1713 if (cq->cq_type == CQ_RX) 1714 otx2_cleanup_rx_cqes(pf, cq, qidx); 1715 else 1716 otx2_cleanup_tx_cqes(pf, cq); 1717 } 1718 otx2_free_pending_sqe(pf); 1719 1720 otx2_free_sq_res(pf); 1721 1722 /* Free RQ buffer pointers*/ 1723 otx2_free_aura_ptr(pf, AURA_NIX_RQ); 1724 1725 otx2_free_cq_res(pf); 1726 1727 /* Free all ingress bandwidth profiles allocated */ 1728 if (!otx2_rep_dev(pf->pdev)) 1729 cn10k_free_all_ipolicers(pf); 1730 1731 mutex_lock(&mbox->lock); 1732 /* Reset NIX LF */ 1733 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); 1734 if (free_req) { 1735 free_req->flags = NIX_LF_DISABLE_FLOWS; 1736 if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN)) 1737 free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG; 1738 if (otx2_sync_mbox_msg(mbox)) 1739 dev_err(pf->dev, "%s failed to free nixlf\n", __func__); 1740 } 1741 mutex_unlock(&mbox->lock); 1742 1743 /* Disable NPA Pool and Aura hw context */ 1744 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); 1745 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); 1746 otx2_aura_pool_free(pf); 1747 1748 mutex_lock(&mbox->lock); 1749 /* Reset NPA LF */ 1750 req = otx2_mbox_alloc_msg_npa_lf_free(mbox); 1751 if (req) { 1752 if (otx2_sync_mbox_msg(mbox)) 1753 dev_err(pf->dev, "%s failed to free npalf\n", __func__); 1754 } 1755 mutex_unlock(&mbox->lock); 1756 } 1757 EXPORT_SYMBOL(otx2_free_hw_resources); 1758 1759 static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf) 1760 { 1761 int vf; 1762 1763 /* The AF driver will determine whether to allow the VF netdev or not */ 1764 if (is_otx2_vf(pfvf->pcifunc)) 1765 return true; 1766 1767 /* check if there are any trusted VFs associated with the PF netdev */ 1768 for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++) 1769 if (pfvf->vf_configs[vf].trusted) 1770 return true; 1771 return false; 1772 } 1773 1774 static void otx2_do_set_rx_mode(struct otx2_nic *pf) 1775 { 1776 struct net_device *netdev = pf->netdev; 1777 struct nix_rx_mode *req; 1778 bool promisc = false; 1779 1780 if (!(netdev->flags & IFF_UP)) 1781 return; 1782 1783 if ((netdev->flags & IFF_PROMISC) || 1784 (netdev_uc_count(netdev) > pf->flow_cfg->ucast_flt_cnt)) { 1785 promisc = true; 1786 } 1787 1788 /* Write unicast address to mcam entries or del from mcam */ 1789 if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) 1790 __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); 1791 1792 mutex_lock(&pf->mbox.lock); 1793 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); 1794 if (!req) { 1795 mutex_unlock(&pf->mbox.lock); 1796 return; 1797 } 1798 1799 req->mode = NIX_RX_MODE_UCAST; 1800 1801 if (promisc) 1802 req->mode |= NIX_RX_MODE_PROMISC; 1803 if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) 1804 req->mode |= NIX_RX_MODE_ALLMULTI; 1805 1806 if (otx2_promisc_use_mce_list(pf)) 1807 req->mode |= NIX_RX_MODE_USE_MCE; 1808 1809 otx2_sync_mbox_msg(&pf->mbox); 1810 mutex_unlock(&pf->mbox.lock); 1811 } 1812 1813 static void otx2_set_irq_coalesce(struct otx2_nic *pfvf) 1814 { 1815 int cint; 1816 1817 for (cint = 0; cint < pfvf->hw.cint_cnt; cint++) 1818 otx2_config_irq_coalescing(pfvf, cint); 1819 } 1820 1821 static void otx2_dim_work(struct work_struct *w) 1822 { 1823 struct dim_cq_moder cur_moder; 1824 struct otx2_cq_poll *cq_poll; 1825 struct otx2_nic *pfvf; 1826 struct dim *dim; 1827 1828 dim = container_of(w, struct dim, work); 1829 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 1830 cq_poll = container_of(dim, struct otx2_cq_poll, dim); 1831 pfvf = (struct otx2_nic *)cq_poll->dev; 1832 pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ? 1833 CQ_TIMER_THRESH_MAX : cur_moder.usec; 1834 pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ? 1835 NAPI_POLL_WEIGHT : cur_moder.pkts; 1836 otx2_set_irq_coalesce(pfvf); 1837 dim->state = DIM_START_MEASURE; 1838 } 1839 1840 void otx2_free_queue_mem(struct otx2_qset *qset) 1841 { 1842 kfree(qset->sq); 1843 qset->sq = NULL; 1844 kfree(qset->cq); 1845 qset->cq = NULL; 1846 kfree(qset->rq); 1847 qset->rq = NULL; 1848 kfree(qset->napi); 1849 qset->napi = NULL; 1850 } 1851 EXPORT_SYMBOL(otx2_free_queue_mem); 1852 1853 int otx2_alloc_queue_mem(struct otx2_nic *pf) 1854 { 1855 struct otx2_qset *qset = &pf->qset; 1856 struct otx2_cq_poll *cq_poll; 1857 1858 1859 /* RQ and SQs are mapped to different CQs, 1860 * so find out max CQ IRQs (i.e CINTs) needed. 1861 */ 1862 pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues; 1863 pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues, 1864 pf->hw.tc_tx_queues); 1865 1866 pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf); 1867 1868 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL); 1869 if (!qset->napi) 1870 return -ENOMEM; 1871 1872 /* CQ size of RQ */ 1873 qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256); 1874 /* CQ size of SQ */ 1875 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K); 1876 1877 qset->cq = kcalloc(pf->qset.cq_cnt, 1878 sizeof(struct otx2_cq_queue), GFP_KERNEL); 1879 if (!qset->cq) 1880 goto err_free_mem; 1881 1882 qset->sq = kcalloc(otx2_get_total_tx_queues(pf), 1883 sizeof(struct otx2_snd_queue), GFP_KERNEL); 1884 if (!qset->sq) 1885 goto err_free_mem; 1886 1887 qset->rq = kcalloc(pf->hw.rx_queues, 1888 sizeof(struct otx2_rcv_queue), GFP_KERNEL); 1889 if (!qset->rq) 1890 goto err_free_mem; 1891 1892 return 0; 1893 1894 err_free_mem: 1895 otx2_free_queue_mem(qset); 1896 return -ENOMEM; 1897 } 1898 EXPORT_SYMBOL(otx2_alloc_queue_mem); 1899 1900 int otx2_open(struct net_device *netdev) 1901 { 1902 struct otx2_nic *pf = netdev_priv(netdev); 1903 struct otx2_cq_poll *cq_poll = NULL; 1904 struct otx2_qset *qset = &pf->qset; 1905 int err = 0, qidx, vec; 1906 char *irq_name; 1907 1908 netif_carrier_off(netdev); 1909 1910 err = otx2_alloc_queue_mem(pf); 1911 if (err) 1912 return err; 1913 1914 err = otx2_init_hw_resources(pf); 1915 if (err) 1916 goto err_free_mem; 1917 1918 /* Register NAPI handler */ 1919 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { 1920 cq_poll = &qset->napi[qidx]; 1921 cq_poll->cint_idx = qidx; 1922 /* RQ0 & SQ0 are mapped to CINT0 and so on.. 1923 * 'cq_ids[0]' points to RQ's CQ and 1924 * 'cq_ids[1]' points to SQ's CQ and 1925 * 'cq_ids[2]' points to XDP's CQ and 1926 */ 1927 cq_poll->cq_ids[CQ_RX] = 1928 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ; 1929 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ? 1930 qidx + pf->hw.rx_queues : CINT_INVALID_CQ; 1931 if (pf->xdp_prog) 1932 cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ? 1933 (qidx + pf->hw.rx_queues + 1934 pf->hw.tx_queues) : 1935 CINT_INVALID_CQ; 1936 else 1937 cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; 1938 1939 cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ? 1940 (qidx + pf->hw.rx_queues + 1941 pf->hw.non_qos_queues) : 1942 CINT_INVALID_CQ; 1943 1944 cq_poll->dev = (void *)pf; 1945 cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; 1946 INIT_WORK(&cq_poll->dim.work, otx2_dim_work); 1947 netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler); 1948 napi_enable(&cq_poll->napi); 1949 } 1950 1951 /* Set maximum frame size allowed in HW */ 1952 err = otx2_hw_set_mtu(pf, netdev->mtu); 1953 if (err) 1954 goto err_disable_napi; 1955 1956 /* Setup segmentation algorithms, if failed, clear offload capability */ 1957 otx2_setup_segmentation(pf); 1958 1959 /* Initialize RSS */ 1960 err = otx2_rss_init(pf); 1961 if (err) 1962 goto err_disable_napi; 1963 1964 /* Register Queue IRQ handlers */ 1965 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START; 1966 irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; 1967 1968 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name); 1969 1970 err = request_irq(pci_irq_vector(pf->pdev, vec), 1971 otx2_q_intr_handler, 0, irq_name, pf); 1972 if (err) { 1973 dev_err(pf->dev, 1974 "RVUPF%d: IRQ registration failed for QERR\n", 1975 rvu_get_pf(pf->pcifunc)); 1976 goto err_disable_napi; 1977 } 1978 1979 /* Enable QINT IRQ */ 1980 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0)); 1981 1982 /* Register CQ IRQ handlers */ 1983 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; 1984 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { 1985 irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; 1986 int name_len; 1987 1988 name_len = snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", 1989 pf->netdev->name, qidx); 1990 if (name_len >= NAME_SIZE) { 1991 dev_err(pf->dev, 1992 "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n", 1993 rvu_get_pf(pf->pcifunc), qidx); 1994 err = -EINVAL; 1995 goto err_free_cints; 1996 } 1997 1998 err = request_irq(pci_irq_vector(pf->pdev, vec), 1999 otx2_cq_intr_handler, 0, irq_name, 2000 &qset->napi[qidx]); 2001 if (err) { 2002 dev_err(pf->dev, 2003 "RVUPF%d: IRQ registration failed for CQ%d\n", 2004 rvu_get_pf(pf->pcifunc), qidx); 2005 goto err_free_cints; 2006 } 2007 vec++; 2008 2009 otx2_config_irq_coalescing(pf, qidx); 2010 2011 /* Enable CQ IRQ */ 2012 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); 2013 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); 2014 } 2015 2016 otx2_set_cints_affinity(pf); 2017 2018 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) 2019 otx2_enable_rxvlan(pf, true); 2020 2021 /* When reinitializing enable time stamping if it is enabled before */ 2022 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) { 2023 pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; 2024 otx2_config_hw_tx_tstamp(pf, true); 2025 } 2026 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) { 2027 pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; 2028 otx2_config_hw_rx_tstamp(pf, true); 2029 } 2030 2031 pf->flags &= ~OTX2_FLAG_INTF_DOWN; 2032 pf->flags &= ~OTX2_FLAG_PORT_UP; 2033 /* 'intf_down' may be checked on any cpu */ 2034 smp_wmb(); 2035 2036 /* Enable QoS configuration before starting tx queues */ 2037 otx2_qos_config_txschq(pf); 2038 2039 /* we have already received link status notification */ 2040 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK)) 2041 otx2_handle_link_event(pf); 2042 2043 /* Install DMAC Filters */ 2044 if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) 2045 otx2_dmacflt_reinstall_flows(pf); 2046 2047 otx2_tc_apply_ingress_police_rules(pf); 2048 2049 err = otx2_rxtx_enable(pf, true); 2050 /* If a mbox communication error happens at this point then interface 2051 * will end up in a state such that it is in down state but hardware 2052 * mcam entries are enabled to receive the packets. Hence disable the 2053 * packet I/O. 2054 */ 2055 if (err == -EIO) 2056 goto err_disable_rxtx; 2057 else if (err) 2058 goto err_tx_stop_queues; 2059 2060 otx2_do_set_rx_mode(pf); 2061 2062 return 0; 2063 2064 err_disable_rxtx: 2065 otx2_rxtx_enable(pf, false); 2066 err_tx_stop_queues: 2067 netif_tx_stop_all_queues(netdev); 2068 netif_carrier_off(netdev); 2069 pf->flags |= OTX2_FLAG_INTF_DOWN; 2070 err_free_cints: 2071 otx2_free_cints(pf, qidx); 2072 vec = pci_irq_vector(pf->pdev, 2073 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); 2074 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); 2075 free_irq(vec, pf); 2076 err_disable_napi: 2077 otx2_disable_napi(pf); 2078 otx2_free_hw_resources(pf); 2079 err_free_mem: 2080 otx2_free_queue_mem(qset); 2081 return err; 2082 } 2083 EXPORT_SYMBOL(otx2_open); 2084 2085 int otx2_stop(struct net_device *netdev) 2086 { 2087 struct otx2_nic *pf = netdev_priv(netdev); 2088 struct otx2_cq_poll *cq_poll = NULL; 2089 struct otx2_qset *qset = &pf->qset; 2090 struct otx2_rss_info *rss; 2091 int qidx, vec, wrk; 2092 2093 /* If the DOWN flag is set resources are already freed */ 2094 if (pf->flags & OTX2_FLAG_INTF_DOWN) 2095 return 0; 2096 2097 netif_carrier_off(netdev); 2098 netif_tx_stop_all_queues(netdev); 2099 2100 pf->flags |= OTX2_FLAG_INTF_DOWN; 2101 /* 'intf_down' may be checked on any cpu */ 2102 smp_wmb(); 2103 2104 /* First stop packet Rx/Tx */ 2105 otx2_rxtx_enable(pf, false); 2106 2107 /* Clear RSS enable flag */ 2108 rss = &pf->hw.rss_info; 2109 rss->enable = false; 2110 if (!netif_is_rxfh_configured(netdev)) 2111 kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); 2112 2113 /* Cleanup Queue IRQ */ 2114 vec = pci_irq_vector(pf->pdev, 2115 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); 2116 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); 2117 free_irq(vec, pf); 2118 2119 /* Cleanup CQ NAPI and IRQ */ 2120 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; 2121 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { 2122 /* Disable interrupt */ 2123 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); 2124 2125 synchronize_irq(pci_irq_vector(pf->pdev, vec)); 2126 2127 cq_poll = &qset->napi[qidx]; 2128 napi_synchronize(&cq_poll->napi); 2129 vec++; 2130 } 2131 2132 netif_tx_disable(netdev); 2133 2134 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) 2135 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); 2136 devm_kfree(pf->dev, pf->refill_wrk); 2137 2138 otx2_free_hw_resources(pf); 2139 otx2_free_cints(pf, pf->hw.cint_cnt); 2140 otx2_disable_napi(pf); 2141 2142 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) 2143 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); 2144 2145 otx2_free_queue_mem(qset); 2146 /* Do not clear RQ/SQ ringsize settings */ 2147 memset_startat(qset, 0, sqe_cnt); 2148 return 0; 2149 } 2150 EXPORT_SYMBOL(otx2_stop); 2151 2152 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) 2153 { 2154 struct otx2_nic *pf = netdev_priv(netdev); 2155 int qidx = skb_get_queue_mapping(skb); 2156 struct otx2_snd_queue *sq; 2157 struct netdev_queue *txq; 2158 int sq_idx; 2159 2160 /* XDP SQs are not mapped with TXQs 2161 * advance qid to derive correct sq mapped with QOS 2162 */ 2163 sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx; 2164 2165 /* Check for minimum and maximum packet length */ 2166 if (skb->len <= ETH_HLEN || 2167 (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) { 2168 dev_kfree_skb(skb); 2169 return NETDEV_TX_OK; 2170 } 2171 2172 sq = &pf->qset.sq[sq_idx]; 2173 txq = netdev_get_tx_queue(netdev, qidx); 2174 2175 if (!otx2_sq_append_skb(pf, txq, sq, skb, qidx)) { 2176 netif_tx_stop_queue(txq); 2177 2178 /* Check again, incase SQBs got freed up */ 2179 smp_mb(); 2180 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) 2181 > sq->sqe_thresh) 2182 netif_tx_wake_queue(txq); 2183 2184 return NETDEV_TX_BUSY; 2185 } 2186 2187 return NETDEV_TX_OK; 2188 } 2189 2190 static int otx2_qos_select_htb_queue(struct otx2_nic *pf, struct sk_buff *skb, 2191 u16 htb_maj_id) 2192 { 2193 u16 classid; 2194 2195 if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id) 2196 classid = TC_H_MIN(skb->priority); 2197 else 2198 classid = READ_ONCE(pf->qos.defcls); 2199 2200 if (!classid) 2201 return 0; 2202 2203 return otx2_get_txq_by_classid(pf, classid); 2204 } 2205 2206 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, 2207 struct net_device *sb_dev) 2208 { 2209 struct otx2_nic *pf = netdev_priv(netdev); 2210 bool qos_enabled; 2211 #ifdef CONFIG_DCB 2212 u8 vlan_prio; 2213 #endif 2214 int txq; 2215 2216 qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues; 2217 if (unlikely(qos_enabled)) { 2218 /* This smp_load_acquire() pairs with smp_store_release() in 2219 * otx2_qos_root_add() called from htb offload root creation 2220 */ 2221 u16 htb_maj_id = smp_load_acquire(&pf->qos.maj_id); 2222 2223 if (unlikely(htb_maj_id)) { 2224 txq = otx2_qos_select_htb_queue(pf, skb, htb_maj_id); 2225 if (txq > 0) 2226 return txq; 2227 goto process_pfc; 2228 } 2229 } 2230 2231 process_pfc: 2232 #ifdef CONFIG_DCB 2233 if (!skb_vlan_tag_present(skb)) 2234 goto pick_tx; 2235 2236 vlan_prio = skb->vlan_tci >> 13; 2237 if ((vlan_prio > pf->hw.tx_queues - 1) || 2238 !pf->pfc_alloc_status[vlan_prio]) 2239 goto pick_tx; 2240 2241 return vlan_prio; 2242 2243 pick_tx: 2244 #endif 2245 txq = netdev_pick_tx(netdev, skb, NULL); 2246 if (unlikely(qos_enabled)) 2247 return txq % pf->hw.tx_queues; 2248 2249 return txq; 2250 } 2251 EXPORT_SYMBOL(otx2_select_queue); 2252 2253 static netdev_features_t otx2_fix_features(struct net_device *dev, 2254 netdev_features_t features) 2255 { 2256 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2257 features |= NETIF_F_HW_VLAN_STAG_RX; 2258 else 2259 features &= ~NETIF_F_HW_VLAN_STAG_RX; 2260 2261 return features; 2262 } 2263 2264 static void otx2_set_rx_mode(struct net_device *netdev) 2265 { 2266 struct otx2_nic *pf = netdev_priv(netdev); 2267 2268 queue_work(pf->otx2_wq, &pf->rx_mode_work); 2269 } 2270 2271 static void otx2_rx_mode_wrk_handler(struct work_struct *work) 2272 { 2273 struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work); 2274 2275 otx2_do_set_rx_mode(pf); 2276 } 2277 2278 static int otx2_set_features(struct net_device *netdev, 2279 netdev_features_t features) 2280 { 2281 netdev_features_t changed = features ^ netdev->features; 2282 struct otx2_nic *pf = netdev_priv(netdev); 2283 2284 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) 2285 return otx2_cgx_config_loopback(pf, 2286 features & NETIF_F_LOOPBACK); 2287 2288 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev)) 2289 return otx2_enable_rxvlan(pf, 2290 features & NETIF_F_HW_VLAN_CTAG_RX); 2291 2292 if (changed & NETIF_F_HW_ESP) 2293 return cn10k_ipsec_ethtool_init(netdev, 2294 features & NETIF_F_HW_ESP); 2295 2296 return otx2_handle_ntuple_tc_features(netdev, features); 2297 } 2298 2299 static void otx2_reset_task(struct work_struct *work) 2300 { 2301 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task); 2302 2303 if (!netif_running(pf->netdev)) 2304 return; 2305 2306 rtnl_lock(); 2307 otx2_stop(pf->netdev); 2308 pf->reset_count++; 2309 otx2_open(pf->netdev); 2310 netif_trans_update(pf->netdev); 2311 rtnl_unlock(); 2312 } 2313 2314 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable) 2315 { 2316 struct msg_req *req; 2317 int err; 2318 2319 if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable) 2320 return 0; 2321 2322 mutex_lock(&pfvf->mbox.lock); 2323 if (enable) 2324 req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox); 2325 else 2326 req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox); 2327 if (!req) { 2328 mutex_unlock(&pfvf->mbox.lock); 2329 return -ENOMEM; 2330 } 2331 2332 err = otx2_sync_mbox_msg(&pfvf->mbox); 2333 if (err) { 2334 mutex_unlock(&pfvf->mbox.lock); 2335 return err; 2336 } 2337 2338 mutex_unlock(&pfvf->mbox.lock); 2339 if (enable) 2340 pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED; 2341 else 2342 pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; 2343 return 0; 2344 } 2345 2346 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) 2347 { 2348 struct msg_req *req; 2349 int err; 2350 2351 if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable) 2352 return 0; 2353 2354 mutex_lock(&pfvf->mbox.lock); 2355 if (enable) 2356 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox); 2357 else 2358 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox); 2359 if (!req) { 2360 mutex_unlock(&pfvf->mbox.lock); 2361 return -ENOMEM; 2362 } 2363 2364 err = otx2_sync_mbox_msg(&pfvf->mbox); 2365 if (err) { 2366 mutex_unlock(&pfvf->mbox.lock); 2367 return err; 2368 } 2369 2370 mutex_unlock(&pfvf->mbox.lock); 2371 if (enable) 2372 pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED; 2373 else 2374 pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; 2375 return 0; 2376 } 2377 2378 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) 2379 { 2380 struct otx2_nic *pfvf = netdev_priv(netdev); 2381 struct hwtstamp_config config; 2382 2383 if (!pfvf->ptp) 2384 return -ENODEV; 2385 2386 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2387 return -EFAULT; 2388 2389 switch (config.tx_type) { 2390 case HWTSTAMP_TX_OFF: 2391 if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC) 2392 pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC; 2393 2394 cancel_delayed_work(&pfvf->ptp->synctstamp_work); 2395 otx2_config_hw_tx_tstamp(pfvf, false); 2396 break; 2397 case HWTSTAMP_TX_ONESTEP_SYNC: 2398 if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) 2399 return -ERANGE; 2400 pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC; 2401 schedule_delayed_work(&pfvf->ptp->synctstamp_work, 2402 msecs_to_jiffies(500)); 2403 fallthrough; 2404 case HWTSTAMP_TX_ON: 2405 otx2_config_hw_tx_tstamp(pfvf, true); 2406 break; 2407 default: 2408 return -ERANGE; 2409 } 2410 2411 switch (config.rx_filter) { 2412 case HWTSTAMP_FILTER_NONE: 2413 otx2_config_hw_rx_tstamp(pfvf, false); 2414 break; 2415 case HWTSTAMP_FILTER_ALL: 2416 case HWTSTAMP_FILTER_SOME: 2417 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2418 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2419 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2420 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2421 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2422 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2423 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2424 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2425 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2426 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2427 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2428 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2429 otx2_config_hw_rx_tstamp(pfvf, true); 2430 config.rx_filter = HWTSTAMP_FILTER_ALL; 2431 break; 2432 default: 2433 return -ERANGE; 2434 } 2435 2436 memcpy(&pfvf->tstamp, &config, sizeof(config)); 2437 2438 return copy_to_user(ifr->ifr_data, &config, 2439 sizeof(config)) ? -EFAULT : 0; 2440 } 2441 EXPORT_SYMBOL(otx2_config_hwtstamp); 2442 2443 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) 2444 { 2445 struct otx2_nic *pfvf = netdev_priv(netdev); 2446 struct hwtstamp_config *cfg = &pfvf->tstamp; 2447 2448 switch (cmd) { 2449 case SIOCSHWTSTAMP: 2450 return otx2_config_hwtstamp(netdev, req); 2451 case SIOCGHWTSTAMP: 2452 return copy_to_user(req->ifr_data, cfg, 2453 sizeof(*cfg)) ? -EFAULT : 0; 2454 default: 2455 return -EOPNOTSUPP; 2456 } 2457 } 2458 EXPORT_SYMBOL(otx2_ioctl); 2459 2460 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) 2461 { 2462 struct npc_install_flow_req *req; 2463 int err; 2464 2465 mutex_lock(&pf->mbox.lock); 2466 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); 2467 if (!req) { 2468 err = -ENOMEM; 2469 goto out; 2470 } 2471 2472 ether_addr_copy(req->packet.dmac, mac); 2473 eth_broadcast_addr((u8 *)&req->mask.dmac); 2474 req->features = BIT_ULL(NPC_DMAC); 2475 req->channel = pf->hw.rx_chan_base; 2476 req->intf = NIX_INTF_RX; 2477 req->default_rule = 1; 2478 req->append = 1; 2479 req->vf = vf + 1; 2480 req->op = NIX_RX_ACTION_DEFAULT; 2481 2482 err = otx2_sync_mbox_msg(&pf->mbox); 2483 out: 2484 mutex_unlock(&pf->mbox.lock); 2485 return err; 2486 } 2487 2488 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2489 { 2490 struct otx2_nic *pf = netdev_priv(netdev); 2491 struct pci_dev *pdev = pf->pdev; 2492 struct otx2_vf_config *config; 2493 int ret; 2494 2495 if (!netif_running(netdev)) 2496 return -EAGAIN; 2497 2498 if (vf >= pf->total_vfs) 2499 return -EINVAL; 2500 2501 if (!is_valid_ether_addr(mac)) 2502 return -EINVAL; 2503 2504 config = &pf->vf_configs[vf]; 2505 ether_addr_copy(config->mac, mac); 2506 2507 ret = otx2_do_set_vf_mac(pf, vf, mac); 2508 if (ret == 0) 2509 dev_info(&pdev->dev, 2510 "Load/Reload VF driver\n"); 2511 2512 return ret; 2513 } 2514 2515 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos, 2516 __be16 proto) 2517 { 2518 struct otx2_flow_config *flow_cfg = pf->flow_cfg; 2519 struct nix_vtag_config_rsp *vtag_rsp; 2520 struct npc_delete_flow_req *del_req; 2521 struct nix_vtag_config *vtag_req; 2522 struct npc_install_flow_req *req; 2523 struct otx2_vf_config *config; 2524 int err = 0; 2525 u32 idx; 2526 2527 config = &pf->vf_configs[vf]; 2528 2529 if (!vlan && !config->vlan) 2530 goto out; 2531 2532 mutex_lock(&pf->mbox.lock); 2533 2534 /* free old tx vtag entry */ 2535 if (config->vlan) { 2536 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); 2537 if (!vtag_req) { 2538 err = -ENOMEM; 2539 goto out; 2540 } 2541 vtag_req->cfg_type = 0; 2542 vtag_req->tx.free_vtag0 = 1; 2543 vtag_req->tx.vtag0_idx = config->tx_vtag_idx; 2544 2545 err = otx2_sync_mbox_msg(&pf->mbox); 2546 if (err) 2547 goto out; 2548 } 2549 2550 if (!vlan && config->vlan) { 2551 /* rx */ 2552 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); 2553 if (!del_req) { 2554 err = -ENOMEM; 2555 goto out; 2556 } 2557 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); 2558 del_req->entry = 2559 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; 2560 err = otx2_sync_mbox_msg(&pf->mbox); 2561 if (err) 2562 goto out; 2563 2564 /* tx */ 2565 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); 2566 if (!del_req) { 2567 err = -ENOMEM; 2568 goto out; 2569 } 2570 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); 2571 del_req->entry = 2572 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; 2573 err = otx2_sync_mbox_msg(&pf->mbox); 2574 2575 goto out; 2576 } 2577 2578 /* rx */ 2579 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); 2580 if (!req) { 2581 err = -ENOMEM; 2582 goto out; 2583 } 2584 2585 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); 2586 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; 2587 req->packet.vlan_tci = htons(vlan); 2588 req->mask.vlan_tci = htons(VLAN_VID_MASK); 2589 /* af fills the destination mac addr */ 2590 eth_broadcast_addr((u8 *)&req->mask.dmac); 2591 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC); 2592 req->channel = pf->hw.rx_chan_base; 2593 req->intf = NIX_INTF_RX; 2594 req->vf = vf + 1; 2595 req->op = NIX_RX_ACTION_DEFAULT; 2596 req->vtag0_valid = true; 2597 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 2598 req->set_cntr = 1; 2599 2600 err = otx2_sync_mbox_msg(&pf->mbox); 2601 if (err) 2602 goto out; 2603 2604 /* tx */ 2605 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); 2606 if (!vtag_req) { 2607 err = -ENOMEM; 2608 goto out; 2609 } 2610 2611 /* configure tx vtag params */ 2612 vtag_req->vtag_size = VTAGSIZE_T4; 2613 vtag_req->cfg_type = 0; /* tx vlan cfg */ 2614 vtag_req->tx.cfg_vtag0 = 1; 2615 vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan; 2616 2617 err = otx2_sync_mbox_msg(&pf->mbox); 2618 if (err) 2619 goto out; 2620 2621 vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp 2622 (&pf->mbox.mbox, 0, &vtag_req->hdr); 2623 if (IS_ERR(vtag_rsp)) { 2624 err = PTR_ERR(vtag_rsp); 2625 goto out; 2626 } 2627 config->tx_vtag_idx = vtag_rsp->vtag0_idx; 2628 2629 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); 2630 if (!req) { 2631 err = -ENOMEM; 2632 goto out; 2633 } 2634 2635 eth_zero_addr((u8 *)&req->mask.dmac); 2636 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); 2637 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; 2638 req->features = BIT_ULL(NPC_DMAC); 2639 req->channel = pf->hw.tx_chan_base; 2640 req->intf = NIX_INTF_TX; 2641 req->vf = vf + 1; 2642 req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT; 2643 req->vtag0_def = vtag_rsp->vtag0_idx; 2644 req->vtag0_op = VTAG_INSERT; 2645 req->set_cntr = 1; 2646 2647 err = otx2_sync_mbox_msg(&pf->mbox); 2648 out: 2649 config->vlan = vlan; 2650 mutex_unlock(&pf->mbox.lock); 2651 return err; 2652 } 2653 2654 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, 2655 __be16 proto) 2656 { 2657 struct otx2_nic *pf = netdev_priv(netdev); 2658 struct pci_dev *pdev = pf->pdev; 2659 2660 if (!netif_running(netdev)) 2661 return -EAGAIN; 2662 2663 if (vf >= pci_num_vf(pdev)) 2664 return -EINVAL; 2665 2666 /* qos is currently unsupported */ 2667 if (vlan >= VLAN_N_VID || qos) 2668 return -EINVAL; 2669 2670 if (proto != htons(ETH_P_8021Q)) 2671 return -EPROTONOSUPPORT; 2672 2673 if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT)) 2674 return -EOPNOTSUPP; 2675 2676 return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto); 2677 } 2678 2679 static int otx2_get_vf_config(struct net_device *netdev, int vf, 2680 struct ifla_vf_info *ivi) 2681 { 2682 struct otx2_nic *pf = netdev_priv(netdev); 2683 struct pci_dev *pdev = pf->pdev; 2684 struct otx2_vf_config *config; 2685 2686 if (!netif_running(netdev)) 2687 return -EAGAIN; 2688 2689 if (vf >= pci_num_vf(pdev)) 2690 return -EINVAL; 2691 2692 config = &pf->vf_configs[vf]; 2693 ivi->vf = vf; 2694 ether_addr_copy(ivi->mac, config->mac); 2695 ivi->vlan = config->vlan; 2696 ivi->trusted = config->trusted; 2697 2698 return 0; 2699 } 2700 2701 static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, 2702 int qidx) 2703 { 2704 u64 dma_addr; 2705 int err = 0; 2706 2707 dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data), 2708 offset_in_page(xdpf->data), xdpf->len, 2709 DMA_TO_DEVICE); 2710 if (dma_mapping_error(pf->dev, dma_addr)) 2711 return -ENOMEM; 2712 2713 err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len, 2714 qidx, OTX2_XDP_REDIRECT); 2715 if (!err) { 2716 otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE); 2717 xdp_return_frame(xdpf); 2718 return -ENOMEM; 2719 } 2720 return 0; 2721 } 2722 2723 static int otx2_xdp_xmit(struct net_device *netdev, int n, 2724 struct xdp_frame **frames, u32 flags) 2725 { 2726 struct otx2_nic *pf = netdev_priv(netdev); 2727 int qidx = smp_processor_id(); 2728 struct otx2_snd_queue *sq; 2729 int drops = 0, i; 2730 2731 if (!netif_running(netdev)) 2732 return -ENETDOWN; 2733 2734 qidx += pf->hw.tx_queues; 2735 sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL; 2736 2737 /* Abort xmit if xdp queue is not */ 2738 if (unlikely(!sq)) 2739 return -ENXIO; 2740 2741 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2742 return -EINVAL; 2743 2744 for (i = 0; i < n; i++) { 2745 struct xdp_frame *xdpf = frames[i]; 2746 int err; 2747 2748 err = otx2_xdp_xmit_tx(pf, xdpf, qidx); 2749 if (err) 2750 drops++; 2751 } 2752 return n - drops; 2753 } 2754 2755 static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog) 2756 { 2757 struct net_device *dev = pf->netdev; 2758 bool if_up = netif_running(pf->netdev); 2759 struct bpf_prog *old_prog; 2760 2761 if (prog && dev->mtu > MAX_XDP_MTU) { 2762 netdev_warn(dev, "Jumbo frames not yet supported with XDP\n"); 2763 return -EOPNOTSUPP; 2764 } 2765 2766 if (if_up) 2767 otx2_stop(pf->netdev); 2768 2769 old_prog = xchg(&pf->xdp_prog, prog); 2770 2771 if (old_prog) 2772 bpf_prog_put(old_prog); 2773 2774 if (pf->xdp_prog) 2775 bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1); 2776 2777 /* Network stack and XDP shared same rx queues. 2778 * Use separate tx queues for XDP and network stack. 2779 */ 2780 if (pf->xdp_prog) { 2781 pf->hw.xdp_queues = pf->hw.rx_queues; 2782 xdp_features_set_redirect_target(dev, false); 2783 } else { 2784 pf->hw.xdp_queues = 0; 2785 xdp_features_clear_redirect_target(dev); 2786 } 2787 2788 if (if_up) 2789 otx2_open(pf->netdev); 2790 2791 return 0; 2792 } 2793 2794 static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp) 2795 { 2796 struct otx2_nic *pf = netdev_priv(netdev); 2797 2798 switch (xdp->command) { 2799 case XDP_SETUP_PROG: 2800 return otx2_xdp_setup(pf, xdp->prog); 2801 case XDP_SETUP_XSK_POOL: 2802 return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id); 2803 default: 2804 return -EINVAL; 2805 } 2806 } 2807 2808 static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf, 2809 int req_perm) 2810 { 2811 struct set_vf_perm *req; 2812 int rc; 2813 2814 mutex_lock(&pf->mbox.lock); 2815 req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox); 2816 if (!req) { 2817 rc = -ENOMEM; 2818 goto out; 2819 } 2820 2821 /* Let AF reset VF permissions as sriov is disabled */ 2822 if (req_perm == OTX2_RESET_VF_PERM) { 2823 req->flags |= RESET_VF_PERM; 2824 } else if (req_perm == OTX2_TRUSTED_VF) { 2825 if (pf->vf_configs[vf].trusted) 2826 req->flags |= VF_TRUSTED; 2827 } 2828 2829 req->vf = vf; 2830 rc = otx2_sync_mbox_msg(&pf->mbox); 2831 out: 2832 mutex_unlock(&pf->mbox.lock); 2833 return rc; 2834 } 2835 2836 static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf, 2837 bool enable) 2838 { 2839 struct otx2_nic *pf = netdev_priv(netdev); 2840 struct pci_dev *pdev = pf->pdev; 2841 int rc; 2842 2843 if (vf >= pci_num_vf(pdev)) 2844 return -EINVAL; 2845 2846 if (pf->vf_configs[vf].trusted == enable) 2847 return 0; 2848 2849 pf->vf_configs[vf].trusted = enable; 2850 rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF); 2851 2852 if (rc) { 2853 pf->vf_configs[vf].trusted = !enable; 2854 } else { 2855 netdev_info(pf->netdev, "VF %d is %strusted\n", 2856 vf, enable ? "" : "not "); 2857 otx2_set_rx_mode(netdev); 2858 } 2859 2860 return rc; 2861 } 2862 2863 static const struct net_device_ops otx2_netdev_ops = { 2864 .ndo_open = otx2_open, 2865 .ndo_stop = otx2_stop, 2866 .ndo_start_xmit = otx2_xmit, 2867 .ndo_select_queue = otx2_select_queue, 2868 .ndo_fix_features = otx2_fix_features, 2869 .ndo_set_mac_address = otx2_set_mac_address, 2870 .ndo_change_mtu = otx2_change_mtu, 2871 .ndo_set_rx_mode = otx2_set_rx_mode, 2872 .ndo_set_features = otx2_set_features, 2873 .ndo_tx_timeout = otx2_tx_timeout, 2874 .ndo_get_stats64 = otx2_get_stats64, 2875 .ndo_eth_ioctl = otx2_ioctl, 2876 .ndo_set_vf_mac = otx2_set_vf_mac, 2877 .ndo_set_vf_vlan = otx2_set_vf_vlan, 2878 .ndo_get_vf_config = otx2_get_vf_config, 2879 .ndo_bpf = otx2_xdp, 2880 .ndo_xsk_wakeup = otx2_xsk_wakeup, 2881 .ndo_xdp_xmit = otx2_xdp_xmit, 2882 .ndo_setup_tc = otx2_setup_tc, 2883 .ndo_set_vf_trust = otx2_ndo_set_vf_trust, 2884 }; 2885 2886 int otx2_wq_init(struct otx2_nic *pf) 2887 { 2888 pf->otx2_wq = create_singlethread_workqueue("otx2_wq"); 2889 if (!pf->otx2_wq) 2890 return -ENOMEM; 2891 2892 INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler); 2893 INIT_WORK(&pf->reset_task, otx2_reset_task); 2894 return 0; 2895 } 2896 2897 int otx2_check_pf_usable(struct otx2_nic *nic) 2898 { 2899 u64 rev; 2900 2901 rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM)); 2902 rev = (rev >> 12) & 0xFF; 2903 /* Check if AF has setup revision for RVUM block, 2904 * otherwise this driver probe should be deferred 2905 * until AF driver comes up. 2906 */ 2907 if (!rev) { 2908 dev_warn(nic->dev, 2909 "AF is not initialized, deferring probe\n"); 2910 return -EPROBE_DEFER; 2911 } 2912 return 0; 2913 } 2914 2915 int otx2_realloc_msix_vectors(struct otx2_nic *pf) 2916 { 2917 struct otx2_hw *hw = &pf->hw; 2918 int num_vec, err; 2919 2920 /* NPA interrupts are inot registered, so alloc only 2921 * upto NIX vector offset. 2922 */ 2923 num_vec = hw->nix_msixoff; 2924 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; 2925 2926 otx2_disable_mbox_intr(pf); 2927 pci_free_irq_vectors(hw->pdev); 2928 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); 2929 if (err < 0) { 2930 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n", 2931 __func__, num_vec); 2932 return err; 2933 } 2934 2935 return otx2_register_mbox_intr(pf, false); 2936 } 2937 EXPORT_SYMBOL(otx2_realloc_msix_vectors); 2938 2939 static int otx2_sriov_vfcfg_init(struct otx2_nic *pf) 2940 { 2941 int i; 2942 2943 pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs, 2944 sizeof(struct otx2_vf_config), 2945 GFP_KERNEL); 2946 if (!pf->vf_configs) 2947 return -ENOMEM; 2948 2949 for (i = 0; i < pf->total_vfs; i++) { 2950 pf->vf_configs[i].pf = pf; 2951 pf->vf_configs[i].intf_down = true; 2952 pf->vf_configs[i].trusted = false; 2953 INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work, 2954 otx2_vf_link_event_task); 2955 } 2956 2957 return 0; 2958 } 2959 2960 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf) 2961 { 2962 int i; 2963 2964 if (!pf->vf_configs) 2965 return; 2966 2967 for (i = 0; i < pf->total_vfs; i++) { 2968 cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work); 2969 otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM); 2970 } 2971 } 2972 2973 int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf) 2974 { 2975 struct device *dev = &pdev->dev; 2976 struct otx2_hw *hw = &pf->hw; 2977 int num_vec, err; 2978 2979 num_vec = pci_msix_vec_count(pdev); 2980 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, 2981 GFP_KERNEL); 2982 if (!hw->irq_name) 2983 return -ENOMEM; 2984 2985 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, 2986 sizeof(cpumask_var_t), GFP_KERNEL); 2987 if (!hw->affinity_mask) 2988 return -ENOMEM; 2989 2990 /* Map CSRs */ 2991 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 2992 if (!pf->reg_base) { 2993 dev_err(dev, "Unable to map physical function CSRs, aborting\n"); 2994 return -ENOMEM; 2995 } 2996 2997 err = otx2_check_pf_usable(pf); 2998 if (err) 2999 return err; 3000 3001 err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, 3002 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); 3003 if (err < 0) { 3004 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", 3005 __func__, num_vec); 3006 return err; 3007 } 3008 3009 otx2_setup_dev_hw_settings(pf); 3010 3011 /* Init PF <=> AF mailbox stuff */ 3012 err = otx2_pfaf_mbox_init(pf); 3013 if (err) 3014 goto err_free_irq_vectors; 3015 3016 /* Register mailbox interrupt */ 3017 err = otx2_register_mbox_intr(pf, true); 3018 if (err) 3019 goto err_mbox_destroy; 3020 3021 /* Request AF to attach NPA and NIX LFs to this PF. 3022 * NIX and NPA LFs are needed for this PF to function as a NIC. 3023 */ 3024 err = otx2_attach_npa_nix(pf); 3025 if (err) 3026 goto err_disable_mbox_intr; 3027 3028 err = otx2_realloc_msix_vectors(pf); 3029 if (err) 3030 goto err_detach_rsrc; 3031 3032 err = cn10k_lmtst_init(pf); 3033 if (err) 3034 goto err_detach_rsrc; 3035 3036 return 0; 3037 3038 err_detach_rsrc: 3039 if (pf->hw.lmt_info) 3040 free_percpu(pf->hw.lmt_info); 3041 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) 3042 qmem_free(pf->dev, pf->dync_lmt); 3043 otx2_detach_resources(&pf->mbox); 3044 err_disable_mbox_intr: 3045 otx2_disable_mbox_intr(pf); 3046 err_mbox_destroy: 3047 otx2_pfaf_mbox_destroy(pf); 3048 err_free_irq_vectors: 3049 pci_free_irq_vectors(hw->pdev); 3050 3051 return err; 3052 } 3053 EXPORT_SYMBOL(otx2_init_rsrc); 3054 3055 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3056 { 3057 struct device *dev = &pdev->dev; 3058 int err, qcount, qos_txqs; 3059 struct net_device *netdev; 3060 struct otx2_nic *pf; 3061 struct otx2_hw *hw; 3062 3063 err = pcim_enable_device(pdev); 3064 if (err) { 3065 dev_err(dev, "Failed to enable PCI device\n"); 3066 return err; 3067 } 3068 3069 err = pcim_request_all_regions(pdev, DRV_NAME); 3070 if (err) { 3071 dev_err(dev, "PCI request regions failed 0x%x\n", err); 3072 return err; 3073 } 3074 3075 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 3076 if (err) { 3077 dev_err(dev, "DMA mask config failed, abort\n"); 3078 return err; 3079 } 3080 3081 pci_set_master(pdev); 3082 3083 /* Set number of queues */ 3084 qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT); 3085 qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); 3086 3087 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount); 3088 if (!netdev) 3089 return -ENOMEM; 3090 3091 pci_set_drvdata(pdev, netdev); 3092 SET_NETDEV_DEV(netdev, &pdev->dev); 3093 pf = netdev_priv(netdev); 3094 pf->netdev = netdev; 3095 pf->pdev = pdev; 3096 pf->dev = dev; 3097 pf->total_vfs = pci_sriov_get_totalvfs(pdev); 3098 pf->flags |= OTX2_FLAG_INTF_DOWN; 3099 3100 hw = &pf->hw; 3101 hw->pdev = pdev; 3102 hw->rx_queues = qcount; 3103 hw->tx_queues = qcount; 3104 hw->non_qos_queues = qcount; 3105 hw->max_queues = qcount; 3106 hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; 3107 /* Use CQE of 128 byte descriptor size by default */ 3108 hw->xqe_size = 128; 3109 3110 err = otx2_init_rsrc(pdev, pf); 3111 if (err) 3112 goto err_free_netdev; 3113 3114 err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues); 3115 if (err) 3116 goto err_detach_rsrc; 3117 3118 /* Assign default mac address */ 3119 otx2_get_mac_from_af(netdev); 3120 3121 /* Don't check for error. Proceed without ptp */ 3122 otx2_ptp_init(pf); 3123 3124 /* NPA's pool is a stack to which SW frees buffer pointers via Aura. 3125 * HW allocates buffer pointer from stack and uses it for DMA'ing 3126 * ingress packet. In some scenarios HW can free back allocated buffer 3127 * pointers to pool. This makes it impossible for SW to maintain a 3128 * parallel list where physical addresses of buffer pointers (IOVAs) 3129 * given to HW can be saved for later reference. 3130 * 3131 * So the only way to convert Rx packet's buffer address is to use 3132 * IOMMU's iova_to_phys() handler which translates the address by 3133 * walking through the translation tables. 3134 */ 3135 pf->iommu_domain = iommu_get_domain_for_dev(dev); 3136 3137 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | 3138 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | 3139 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 3140 NETIF_F_GSO_UDP_L4); 3141 netdev->features |= netdev->hw_features; 3142 3143 err = otx2_mcam_flow_init(pf); 3144 if (err) 3145 goto err_ptp_destroy; 3146 3147 otx2_set_hw_capabilities(pf); 3148 3149 err = cn10k_mcs_init(pf); 3150 if (err) 3151 goto err_del_mcam_entries; 3152 3153 if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT) 3154 netdev->hw_features |= NETIF_F_NTUPLE; 3155 3156 if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT) 3157 netdev->priv_flags |= IFF_UNICAST_FLT; 3158 3159 /* Support TSO on tag interface */ 3160 netdev->vlan_features |= netdev->features; 3161 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 3162 NETIF_F_HW_VLAN_STAG_TX; 3163 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) 3164 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | 3165 NETIF_F_HW_VLAN_STAG_RX; 3166 netdev->features |= netdev->hw_features; 3167 3168 /* HW supports tc offload but mutually exclusive with n-tuple filters */ 3169 if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT) 3170 netdev->hw_features |= NETIF_F_HW_TC; 3171 3172 netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; 3173 3174 netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS); 3175 netdev->watchdog_timeo = OTX2_TX_TIMEOUT; 3176 3177 netdev->netdev_ops = &otx2_netdev_ops; 3178 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; 3179 3180 netdev->min_mtu = OTX2_MIN_MTU; 3181 netdev->max_mtu = otx2_get_max_mtu(pf); 3182 hw->max_mtu = netdev->max_mtu; 3183 3184 /* reset CGX/RPM MAC stats */ 3185 otx2_reset_mac_stats(pf); 3186 3187 err = cn10k_ipsec_init(netdev); 3188 if (err) 3189 goto err_mcs_free; 3190 3191 err = register_netdev(netdev); 3192 if (err) { 3193 dev_err(dev, "Failed to register netdevice\n"); 3194 goto err_ipsec_clean; 3195 } 3196 3197 err = otx2_wq_init(pf); 3198 if (err) 3199 goto err_unreg_netdev; 3200 3201 otx2_set_ethtool_ops(netdev); 3202 3203 err = otx2_init_tc(pf); 3204 if (err) 3205 goto err_mcam_flow_del; 3206 3207 err = otx2_register_dl(pf); 3208 if (err) 3209 goto err_mcam_flow_del; 3210 3211 /* Initialize SR-IOV resources */ 3212 err = otx2_sriov_vfcfg_init(pf); 3213 if (err) 3214 goto err_pf_sriov_init; 3215 3216 /* Enable link notifications */ 3217 otx2_cgx_config_linkevents(pf, true); 3218 3219 pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL); 3220 if (!pf->af_xdp_zc_qidx) { 3221 err = -ENOMEM; 3222 goto err_sriov_cleannup; 3223 } 3224 3225 #ifdef CONFIG_DCB 3226 err = otx2_dcbnl_set_ops(netdev); 3227 if (err) 3228 goto err_free_zc_bmap; 3229 #endif 3230 3231 otx2_qos_init(pf, qos_txqs); 3232 3233 return 0; 3234 3235 #ifdef CONFIG_DCB 3236 err_free_zc_bmap: 3237 bitmap_free(pf->af_xdp_zc_qidx); 3238 #endif 3239 err_sriov_cleannup: 3240 otx2_sriov_vfcfg_cleanup(pf); 3241 err_pf_sriov_init: 3242 otx2_shutdown_tc(pf); 3243 err_mcam_flow_del: 3244 otx2_mcam_flow_del(pf); 3245 err_unreg_netdev: 3246 unregister_netdev(netdev); 3247 err_ipsec_clean: 3248 cn10k_ipsec_clean(pf); 3249 err_mcs_free: 3250 cn10k_mcs_free(pf); 3251 err_del_mcam_entries: 3252 otx2_mcam_flow_del(pf); 3253 err_ptp_destroy: 3254 otx2_ptp_destroy(pf); 3255 err_detach_rsrc: 3256 if (pf->hw.lmt_info) 3257 free_percpu(pf->hw.lmt_info); 3258 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) 3259 qmem_free(pf->dev, pf->dync_lmt); 3260 otx2_detach_resources(&pf->mbox); 3261 otx2_disable_mbox_intr(pf); 3262 otx2_pfaf_mbox_destroy(pf); 3263 pci_free_irq_vectors(hw->pdev); 3264 err_free_netdev: 3265 pci_set_drvdata(pdev, NULL); 3266 free_netdev(netdev); 3267 return err; 3268 } 3269 3270 static void otx2_vf_link_event_task(struct work_struct *work) 3271 { 3272 struct otx2_vf_config *config; 3273 struct cgx_link_info_msg *req; 3274 struct mbox_msghdr *msghdr; 3275 struct delayed_work *dwork; 3276 struct otx2_nic *pf; 3277 int vf_idx; 3278 3279 config = container_of(work, struct otx2_vf_config, 3280 link_event_work.work); 3281 vf_idx = config - config->pf->vf_configs; 3282 pf = config->pf; 3283 3284 if (config->intf_down) 3285 return; 3286 3287 mutex_lock(&pf->mbox.lock); 3288 3289 dwork = &config->link_event_work; 3290 3291 if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) { 3292 schedule_delayed_work(dwork, msecs_to_jiffies(100)); 3293 mutex_unlock(&pf->mbox.lock); 3294 return; 3295 } 3296 3297 msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx, 3298 sizeof(*req), sizeof(struct msg_rsp)); 3299 if (!msghdr) { 3300 dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx); 3301 mutex_unlock(&pf->mbox.lock); 3302 return; 3303 } 3304 3305 req = (struct cgx_link_info_msg *)msghdr; 3306 req->hdr.id = MBOX_MSG_CGX_LINK_EVENT; 3307 req->hdr.sig = OTX2_MBOX_REQ_SIG; 3308 req->hdr.pcifunc = pf->pcifunc; 3309 memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); 3310 3311 otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx); 3312 3313 otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx); 3314 3315 mutex_unlock(&pf->mbox.lock); 3316 } 3317 3318 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs) 3319 { 3320 struct net_device *netdev = pci_get_drvdata(pdev); 3321 struct otx2_nic *pf = netdev_priv(netdev); 3322 int ret; 3323 3324 /* Init PF <=> VF mailbox stuff */ 3325 ret = otx2_pfvf_mbox_init(pf, numvfs); 3326 if (ret) 3327 return ret; 3328 3329 ret = otx2_register_pfvf_mbox_intr(pf, numvfs); 3330 if (ret) 3331 goto free_mbox; 3332 3333 ret = otx2_pf_flr_init(pf, numvfs); 3334 if (ret) 3335 goto free_intr; 3336 3337 ret = otx2_register_flr_me_intr(pf, numvfs); 3338 if (ret) 3339 goto free_flr; 3340 3341 ret = pci_enable_sriov(pdev, numvfs); 3342 if (ret) 3343 goto free_flr_intr; 3344 3345 return numvfs; 3346 free_flr_intr: 3347 otx2_disable_flr_me_intr(pf); 3348 free_flr: 3349 otx2_flr_wq_destroy(pf); 3350 free_intr: 3351 otx2_disable_pfvf_mbox_intr(pf, numvfs); 3352 free_mbox: 3353 otx2_pfvf_mbox_destroy(pf); 3354 return ret; 3355 } 3356 3357 static int otx2_sriov_disable(struct pci_dev *pdev) 3358 { 3359 struct net_device *netdev = pci_get_drvdata(pdev); 3360 struct otx2_nic *pf = netdev_priv(netdev); 3361 int numvfs = pci_num_vf(pdev); 3362 3363 if (!numvfs) 3364 return 0; 3365 3366 pci_disable_sriov(pdev); 3367 3368 otx2_disable_flr_me_intr(pf); 3369 otx2_flr_wq_destroy(pf); 3370 otx2_disable_pfvf_mbox_intr(pf, numvfs); 3371 otx2_pfvf_mbox_destroy(pf); 3372 3373 return 0; 3374 } 3375 3376 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs) 3377 { 3378 if (numvfs == 0) 3379 return otx2_sriov_disable(pdev); 3380 else 3381 return otx2_sriov_enable(pdev, numvfs); 3382 } 3383 3384 static void otx2_ndc_sync(struct otx2_nic *pf) 3385 { 3386 struct mbox *mbox = &pf->mbox; 3387 struct ndc_sync_op *req; 3388 3389 mutex_lock(&mbox->lock); 3390 3391 req = otx2_mbox_alloc_msg_ndc_sync_op(mbox); 3392 if (!req) { 3393 mutex_unlock(&mbox->lock); 3394 return; 3395 } 3396 3397 req->nix_lf_tx_sync = 1; 3398 req->nix_lf_rx_sync = 1; 3399 req->npa_lf_sync = 1; 3400 3401 if (!otx2_sync_mbox_msg(mbox)) 3402 dev_err(pf->dev, "NDC sync operation failed\n"); 3403 3404 mutex_unlock(&mbox->lock); 3405 } 3406 3407 static void otx2_remove(struct pci_dev *pdev) 3408 { 3409 struct net_device *netdev = pci_get_drvdata(pdev); 3410 struct otx2_nic *pf; 3411 3412 if (!netdev) 3413 return; 3414 3415 pf = netdev_priv(netdev); 3416 3417 pf->flags |= OTX2_FLAG_PF_SHUTDOWN; 3418 3419 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) 3420 otx2_config_hw_tx_tstamp(pf, false); 3421 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) 3422 otx2_config_hw_rx_tstamp(pf, false); 3423 3424 /* Disable 802.3x pause frames */ 3425 if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED || 3426 (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) { 3427 pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; 3428 pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; 3429 otx2_config_pause_frm(pf); 3430 } 3431 3432 #ifdef CONFIG_DCB 3433 /* Disable PFC config */ 3434 if (pf->pfc_en) { 3435 pf->pfc_en = 0; 3436 otx2_config_priority_flow_ctrl(pf); 3437 } 3438 #endif 3439 cancel_work_sync(&pf->reset_task); 3440 /* Disable link notifications */ 3441 otx2_cgx_config_linkevents(pf, false); 3442 3443 otx2_unregister_dl(pf); 3444 unregister_netdev(netdev); 3445 cn10k_ipsec_clean(pf); 3446 cn10k_mcs_free(pf); 3447 otx2_sriov_disable(pf->pdev); 3448 otx2_sriov_vfcfg_cleanup(pf); 3449 if (pf->otx2_wq) 3450 destroy_workqueue(pf->otx2_wq); 3451 3452 otx2_ptp_destroy(pf); 3453 otx2_mcam_flow_del(pf); 3454 otx2_shutdown_tc(pf); 3455 otx2_shutdown_qos(pf); 3456 otx2_ndc_sync(pf); 3457 otx2_detach_resources(&pf->mbox); 3458 if (pf->hw.lmt_info) 3459 free_percpu(pf->hw.lmt_info); 3460 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) 3461 qmem_free(pf->dev, pf->dync_lmt); 3462 otx2_disable_mbox_intr(pf); 3463 otx2_pfaf_mbox_destroy(pf); 3464 pci_free_irq_vectors(pf->pdev); 3465 pci_set_drvdata(pdev, NULL); 3466 free_netdev(netdev); 3467 } 3468 3469 static struct pci_driver otx2_pf_driver = { 3470 .name = DRV_NAME, 3471 .id_table = otx2_pf_id_table, 3472 .probe = otx2_probe, 3473 .shutdown = otx2_remove, 3474 .remove = otx2_remove, 3475 .sriov_configure = otx2_sriov_configure 3476 }; 3477 3478 static int __init otx2_rvupf_init_module(void) 3479 { 3480 pr_info("%s: %s\n", DRV_NAME, DRV_STRING); 3481 3482 return pci_register_driver(&otx2_pf_driver); 3483 } 3484 3485 static void __exit otx2_rvupf_cleanup_module(void) 3486 { 3487 pci_unregister_driver(&otx2_pf_driver); 3488 } 3489 3490 module_init(otx2_rvupf_init_module); 3491 module_exit(otx2_rvupf_cleanup_module); 3492