1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell OcteonTx2 RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #ifndef OTX2_COMMON_H 12 #define OTX2_COMMON_H 13 14 #include <linux/pci.h> 15 #include <linux/iommu.h> 16 17 #include <mbox.h> 18 #include "otx2_reg.h" 19 #include "otx2_txrx.h" 20 21 /* PCI device IDs */ 22 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 23 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 24 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 25 26 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 27 28 /* PCI BAR nos */ 29 #define PCI_CFG_REG_BAR_NUM 2 30 #define PCI_MBOX_BAR_NUM 4 31 32 #define NAME_SIZE 32 33 34 enum arua_mapped_qtypes { 35 AURA_NIX_RQ, 36 AURA_NIX_SQ, 37 }; 38 39 /* NIX LF interrupts range*/ 40 #define NIX_LF_QINT_VEC_START 0x00 41 #define NIX_LF_CINT_VEC_START 0x40 42 #define NIX_LF_GINT_VEC 0x80 43 #define NIX_LF_ERR_VEC 0x81 44 #define NIX_LF_POISON_VEC 0x82 45 46 /* RSS configuration */ 47 struct otx2_rss_info { 48 u8 enable; 49 u32 flowkey_cfg; 50 u16 rss_size; 51 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 52 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 53 u8 key[RSS_HASH_KEY_SIZE]; 54 }; 55 56 /* NIX (or NPC) RX errors */ 57 enum otx2_errlvl { 58 NPC_ERRLVL_RE, 59 NPC_ERRLVL_LID_LA, 60 NPC_ERRLVL_LID_LB, 61 NPC_ERRLVL_LID_LC, 62 NPC_ERRLVL_LID_LD, 63 NPC_ERRLVL_LID_LE, 64 NPC_ERRLVL_LID_LF, 65 NPC_ERRLVL_LID_LG, 66 NPC_ERRLVL_LID_LH, 67 NPC_ERRLVL_NIX = 0x0F, 68 }; 69 70 enum otx2_errcodes_re { 71 /* NPC_ERRLVL_RE errcodes */ 72 ERRCODE_FCS = 0x7, 73 ERRCODE_FCS_RCV = 0x8, 74 ERRCODE_UNDERSIZE = 0x10, 75 ERRCODE_OVERSIZE = 0x11, 76 ERRCODE_OL2_LEN_MISMATCH = 0x12, 77 /* NPC_ERRLVL_NIX errcodes */ 78 ERRCODE_OL3_LEN = 0x10, 79 ERRCODE_OL4_LEN = 0x11, 80 ERRCODE_OL4_CSUM = 0x12, 81 ERRCODE_IL3_LEN = 0x20, 82 ERRCODE_IL4_LEN = 0x21, 83 ERRCODE_IL4_CSUM = 0x22, 84 }; 85 86 /* NIX TX stats */ 87 enum nix_stat_lf_tx { 88 TX_UCAST = 0x0, 89 TX_BCAST = 0x1, 90 TX_MCAST = 0x2, 91 TX_DROP = 0x3, 92 TX_OCTS = 0x4, 93 TX_STATS_ENUM_LAST, 94 }; 95 96 /* NIX RX stats */ 97 enum nix_stat_lf_rx { 98 RX_OCTS = 0x0, 99 RX_UCAST = 0x1, 100 RX_BCAST = 0x2, 101 RX_MCAST = 0x3, 102 RX_DROP = 0x4, 103 RX_DROP_OCTS = 0x5, 104 RX_FCS = 0x6, 105 RX_ERR = 0x7, 106 RX_DRP_BCAST = 0x8, 107 RX_DRP_MCAST = 0x9, 108 RX_DRP_L3BCAST = 0xa, 109 RX_DRP_L3MCAST = 0xb, 110 RX_STATS_ENUM_LAST, 111 }; 112 113 struct otx2_dev_stats { 114 u64 rx_bytes; 115 u64 rx_frames; 116 u64 rx_ucast_frames; 117 u64 rx_bcast_frames; 118 u64 rx_mcast_frames; 119 u64 rx_drops; 120 121 u64 tx_bytes; 122 u64 tx_frames; 123 u64 tx_ucast_frames; 124 u64 tx_bcast_frames; 125 u64 tx_mcast_frames; 126 u64 tx_drops; 127 }; 128 129 /* Driver counted stats */ 130 struct otx2_drv_stats { 131 atomic_t rx_fcs_errs; 132 atomic_t rx_oversize_errs; 133 atomic_t rx_undersize_errs; 134 atomic_t rx_csum_errs; 135 atomic_t rx_len_errs; 136 atomic_t rx_other_errs; 137 }; 138 139 struct mbox { 140 struct otx2_mbox mbox; 141 struct work_struct mbox_wrk; 142 struct otx2_mbox mbox_up; 143 struct work_struct mbox_up_wrk; 144 struct otx2_nic *pfvf; 145 void *bbuf_base; /* Bounce buffer for mbox memory */ 146 struct mutex lock; /* serialize mailbox access */ 147 int num_msgs; /* mbox number of messages */ 148 int up_num_msgs; /* mbox_up number of messages */ 149 }; 150 151 struct otx2_hw { 152 struct pci_dev *pdev; 153 struct otx2_rss_info rss_info; 154 u16 rx_queues; 155 u16 tx_queues; 156 u16 max_queues; 157 u16 pool_cnt; 158 u16 rqpool_cnt; 159 u16 sqpool_cnt; 160 161 /* NPA */ 162 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 163 u32 stack_pg_bytes; /* Size of stack page */ 164 u16 sqb_size; 165 166 /* NIX */ 167 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 168 169 /* HW settings, coalescing etc */ 170 u16 rx_chan_base; 171 u16 tx_chan_base; 172 u16 cq_qcount_wait; 173 u16 cq_ecount_wait; 174 u16 rq_skid; 175 u8 cq_time_wait; 176 177 /* For TSO segmentation */ 178 u8 lso_tsov4_idx; 179 u8 lso_tsov6_idx; 180 u8 hw_tso; 181 182 /* MSI-X */ 183 u8 cint_cnt; /* CQ interrupt count */ 184 u16 npa_msixoff; /* Offset of NPA vectors */ 185 u16 nix_msixoff; /* Offset of NIX vectors */ 186 char *irq_name; 187 cpumask_var_t *affinity_mask; 188 189 /* Stats */ 190 struct otx2_dev_stats dev_stats; 191 struct otx2_drv_stats drv_stats; 192 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 193 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 194 }; 195 196 struct otx2_vf_config { 197 struct otx2_nic *pf; 198 struct delayed_work link_event_work; 199 bool intf_down; /* interface was either configured or not */ 200 }; 201 202 struct flr_work { 203 struct work_struct work; 204 struct otx2_nic *pf; 205 }; 206 207 struct refill_work { 208 struct delayed_work pool_refill_work; 209 struct otx2_nic *pf; 210 }; 211 212 struct otx2_nic { 213 void __iomem *reg_base; 214 struct net_device *netdev; 215 void *iommu_domain; 216 u16 max_frs; 217 u16 rbsize; /* Receive buffer size */ 218 219 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 220 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 221 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 222 u64 flags; 223 224 struct otx2_qset qset; 225 struct otx2_hw hw; 226 struct pci_dev *pdev; 227 struct device *dev; 228 229 /* Mbox */ 230 struct mbox mbox; 231 struct mbox *mbox_pfvf; 232 struct workqueue_struct *mbox_wq; 233 struct workqueue_struct *mbox_pfvf_wq; 234 235 u8 total_vfs; 236 u16 pcifunc; /* RVU PF_FUNC */ 237 u16 bpid[NIX_MAX_BPID_CHAN]; 238 struct otx2_vf_config *vf_configs; 239 struct cgx_link_user_info linfo; 240 241 u64 reset_count; 242 struct work_struct reset_task; 243 struct workqueue_struct *flr_wq; 244 struct flr_work *flr_wrk; 245 struct refill_work *refill_wrk; 246 struct workqueue_struct *otx2_wq; 247 struct work_struct rx_mode_work; 248 249 /* Ethtool stuff */ 250 u32 msg_enable; 251 252 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 253 int nix_blkaddr; 254 }; 255 256 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 257 { 258 return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; 259 } 260 261 static inline bool is_96xx_A0(struct pci_dev *pdev) 262 { 263 return (pdev->revision == 0x00) && 264 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 265 } 266 267 static inline bool is_96xx_B0(struct pci_dev *pdev) 268 { 269 return (pdev->revision == 0x01) && 270 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 271 } 272 273 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 274 { 275 struct otx2_hw *hw = &pfvf->hw; 276 277 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 278 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 279 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 280 281 hw->hw_tso = true; 282 283 if (is_96xx_A0(pfvf->pdev)) { 284 hw->hw_tso = false; 285 286 /* Time based irq coalescing is not supported */ 287 pfvf->hw.cq_qcount_wait = 0x0; 288 289 /* Due to HW issue previous silicons required minimum 290 * 600 unused CQE to avoid CQ overflow. 291 */ 292 pfvf->hw.rq_skid = 600; 293 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 294 } 295 } 296 297 /* Register read/write APIs */ 298 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 299 { 300 u64 blkaddr; 301 302 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 303 case BLKTYPE_NIX: 304 blkaddr = nic->nix_blkaddr; 305 break; 306 case BLKTYPE_NPA: 307 blkaddr = BLKADDR_NPA; 308 break; 309 default: 310 blkaddr = BLKADDR_RVUM; 311 break; 312 }; 313 314 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 315 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 316 317 return nic->reg_base + offset; 318 } 319 320 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 321 { 322 void __iomem *addr = otx2_get_regaddr(nic, offset); 323 324 writeq(val, addr); 325 } 326 327 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 328 { 329 void __iomem *addr = otx2_get_regaddr(nic, offset); 330 331 return readq(addr); 332 } 333 334 /* Mbox bounce buffer APIs */ 335 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 336 { 337 struct otx2_mbox *otx2_mbox; 338 struct otx2_mbox_dev *mdev; 339 340 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 341 if (!mbox->bbuf_base) 342 return -ENOMEM; 343 344 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 345 * prepare all mbox messages in bounce buffer instead of directly 346 * in hw mbox memory. 347 */ 348 otx2_mbox = &mbox->mbox; 349 mdev = &otx2_mbox->dev[0]; 350 mdev->mbase = mbox->bbuf_base; 351 352 otx2_mbox = &mbox->mbox_up; 353 mdev = &otx2_mbox->dev[0]; 354 mdev->mbase = mbox->bbuf_base; 355 return 0; 356 } 357 358 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 359 { 360 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 361 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 362 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 363 struct mbox_hdr *hdr; 364 u64 msg_size; 365 366 if (mdev->mbase == hw_mbase) 367 return; 368 369 hdr = hw_mbase + mbox->rx_start; 370 msg_size = hdr->msg_size; 371 372 if (msg_size > mbox->rx_size - msgs_offset) 373 msg_size = mbox->rx_size - msgs_offset; 374 375 /* Copy mbox messages from mbox memory to bounce buffer */ 376 memcpy(mdev->mbase + mbox->rx_start, 377 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 378 } 379 380 /* With the absence of API for 128-bit IO memory access for arm64, 381 * implement required operations at place. 382 */ 383 #if defined(CONFIG_ARM64) 384 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 385 { 386 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 387 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 388 } 389 390 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) 391 { 392 u64 result; 393 394 __asm__ volatile(".cpu generic+lse\n" 395 "ldadd %x[i], %x[r], [%[b]]" 396 : [r]"=r"(result), "+m"(*ptr) 397 : [i]"r"(incr), [b]"r"(ptr) 398 : "memory"); 399 return result; 400 } 401 402 static inline u64 otx2_lmt_flush(uint64_t addr) 403 { 404 u64 result = 0; 405 406 __asm__ volatile(".cpu generic+lse\n" 407 "ldeor xzr,%x[rf],[%[rs]]" 408 : [rf]"=r"(result) 409 : [rs]"r"(addr)); 410 return result; 411 } 412 413 #else 414 #define otx2_write128(lo, hi, addr) 415 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) 416 #define otx2_lmt_flush(addr) ({ 0; }) 417 #endif 418 419 /* Alloc pointer from pool/aura */ 420 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 421 { 422 u64 *ptr = (u64 *)otx2_get_regaddr(pfvf, 423 NPA_LF_AURA_OP_ALLOCX(0)); 424 u64 incr = (u64)aura | BIT_ULL(63); 425 426 return otx2_atomic64_add(incr, ptr); 427 } 428 429 /* Free pointer to a pool/aura */ 430 static inline void otx2_aura_freeptr(struct otx2_nic *pfvf, 431 int aura, s64 buf) 432 { 433 otx2_write128((u64)buf, (u64)aura | BIT_ULL(63), 434 otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0)); 435 } 436 437 /* Update page ref count */ 438 static inline void otx2_get_page(struct otx2_pool *pool) 439 { 440 if (!pool->page) 441 return; 442 443 if (pool->pageref) 444 page_ref_add(pool->page, pool->pageref); 445 pool->pageref = 0; 446 pool->page = NULL; 447 } 448 449 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 450 { 451 if (type == AURA_NIX_SQ) 452 return pfvf->hw.rqpool_cnt + idx; 453 454 /* AURA_NIX_RQ */ 455 return idx; 456 } 457 458 /* Mbox APIs */ 459 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 460 { 461 int err; 462 463 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 464 return 0; 465 otx2_mbox_msg_send(&mbox->mbox, 0); 466 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 467 if (err) 468 return err; 469 470 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 471 } 472 473 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 474 { 475 int err; 476 477 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 478 return 0; 479 otx2_mbox_msg_send(&mbox->mbox_up, devid); 480 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 481 if (err) 482 return err; 483 484 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 485 } 486 487 /* Use this API to send mbox msgs in atomic context 488 * where sleeping is not allowed 489 */ 490 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 491 { 492 int err; 493 494 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 495 return 0; 496 otx2_mbox_msg_send(&mbox->mbox, 0); 497 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 498 if (err) 499 return err; 500 501 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 502 } 503 504 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 505 static struct _req_type __maybe_unused \ 506 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 507 { \ 508 struct _req_type *req; \ 509 \ 510 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 511 &mbox->mbox, 0, sizeof(struct _req_type), \ 512 sizeof(struct _rsp_type)); \ 513 if (!req) \ 514 return NULL; \ 515 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 516 req->hdr.id = _id; \ 517 return req; \ 518 } 519 520 MBOX_MESSAGES 521 #undef M 522 523 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 524 int \ 525 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 526 struct _req_type *req, \ 527 struct _rsp_type *rsp); \ 528 529 MBOX_UP_CGX_MESSAGES 530 #undef M 531 532 /* Time to wait before watchdog kicks off */ 533 #define OTX2_TX_TIMEOUT (100 * HZ) 534 535 #define RVU_PFVF_PF_SHIFT 10 536 #define RVU_PFVF_PF_MASK 0x3F 537 #define RVU_PFVF_FUNC_SHIFT 0 538 #define RVU_PFVF_FUNC_MASK 0x3FF 539 540 static inline int rvu_get_pf(u16 pcifunc) 541 { 542 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 543 } 544 545 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 546 struct page *page, 547 size_t offset, size_t size, 548 enum dma_data_direction dir) 549 { 550 dma_addr_t iova; 551 552 iova = dma_map_page_attrs(pfvf->dev, page, 553 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 554 if (unlikely(dma_mapping_error(pfvf->dev, iova))) 555 return (dma_addr_t)NULL; 556 return iova; 557 } 558 559 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 560 dma_addr_t addr, size_t size, 561 enum dma_data_direction dir) 562 { 563 dma_unmap_page_attrs(pfvf->dev, addr, size, 564 dir, DMA_ATTR_SKIP_CPU_SYNC); 565 } 566 567 /* MSI-X APIs */ 568 void otx2_free_cints(struct otx2_nic *pfvf, int n); 569 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 570 int otx2_set_mac_address(struct net_device *netdev, void *p); 571 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 572 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 573 void otx2_get_mac_from_af(struct net_device *netdev); 574 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 575 int otx2_config_pause_frm(struct otx2_nic *pfvf); 576 577 /* RVU block related APIs */ 578 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 579 int otx2_detach_resources(struct mbox *mbox); 580 int otx2_config_npa(struct otx2_nic *pfvf); 581 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 582 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 583 void otx2_aura_pool_free(struct otx2_nic *pfvf); 584 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 585 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 586 int otx2_config_nix(struct otx2_nic *pfvf); 587 int otx2_config_nix_queues(struct otx2_nic *pfvf); 588 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl); 589 int otx2_txsch_alloc(struct otx2_nic *pfvf); 590 int otx2_txschq_stop(struct otx2_nic *pfvf); 591 void otx2_sqb_flush(struct otx2_nic *pfvf); 592 dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 593 gfp_t gfp); 594 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 595 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 596 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 597 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 598 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 599 600 /* RSS configuration APIs*/ 601 int otx2_rss_init(struct otx2_nic *pfvf); 602 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 603 void otx2_set_rss_key(struct otx2_nic *pfvf); 604 int otx2_set_rss_table(struct otx2_nic *pfvf); 605 606 /* Mbox handlers */ 607 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 608 struct msix_offset_rsp *rsp); 609 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 610 struct npa_lf_alloc_rsp *rsp); 611 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 612 struct nix_lf_alloc_rsp *rsp); 613 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 614 struct nix_txsch_alloc_rsp *rsp); 615 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 616 struct cgx_stats_rsp *rsp); 617 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 618 struct nix_bp_cfg_rsp *rsp); 619 620 /* Device stats APIs */ 621 void otx2_get_dev_stats(struct otx2_nic *pfvf); 622 void otx2_get_stats64(struct net_device *netdev, 623 struct rtnl_link_stats64 *stats); 624 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 625 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 626 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 627 void otx2_set_ethtool_ops(struct net_device *netdev); 628 void otx2vf_set_ethtool_ops(struct net_device *netdev); 629 630 int otx2_open(struct net_device *netdev); 631 int otx2_stop(struct net_device *netdev); 632 int otx2_set_real_num_queues(struct net_device *netdev, 633 int tx_queues, int rx_queues); 634 #endif /* OTX2_COMMON_H */ 635