1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #ifndef OTX2_COMMON_H 9 #define OTX2_COMMON_H 10 11 #include <linux/ethtool.h> 12 #include <linux/pci.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/ptp_clock_kernel.h> 16 #include <linux/timecounter.h> 17 #include <linux/soc/marvell/octeontx2/asm.h> 18 #include <net/macsec.h> 19 #include <net/pkt_cls.h> 20 #include <net/devlink.h> 21 #include <linux/time64.h> 22 #include <linux/dim.h> 23 #include <uapi/linux/if_macsec.h> 24 #include <net/page_pool/helpers.h> 25 26 #include <mbox.h> 27 #include <npc.h> 28 #include "otx2_reg.h" 29 #include "otx2_txrx.h" 30 #include "otx2_devlink.h" 31 #include <rvu_trace.h> 32 #include "qos.h" 33 #include "rep.h" 34 #include "cn10k_ipsec.h" 35 36 /* IPv4 flag more fragment bit */ 37 #define IPV4_FLAG_MORE 0x20 38 39 /* PCI device IDs */ 40 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 41 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 42 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 43 44 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 45 #define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 46 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 47 48 #define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 49 50 /* PCI BAR nos */ 51 #define PCI_CFG_REG_BAR_NUM 2 52 #define PCI_MBOX_BAR_NUM 4 53 54 #define NAME_SIZE 32 55 56 #ifdef CONFIG_DCB 57 /* Max priority supported for PFC */ 58 #define NIX_PF_PFC_PRIO_MAX 8 59 #endif 60 61 /* Number of segments per SG structure */ 62 #define MAX_SEGS_PER_SG 3 63 64 enum arua_mapped_qtypes { 65 AURA_NIX_RQ, 66 AURA_NIX_SQ, 67 }; 68 69 /* NIX LF interrupts range*/ 70 #define NIX_LF_QINT_VEC_START 0x00 71 #define NIX_LF_CINT_VEC_START 0x40 72 #define NIX_LF_GINT_VEC 0x80 73 #define NIX_LF_ERR_VEC 0x81 74 #define NIX_LF_POISON_VEC 0x82 75 76 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 77 #define SEND_CQ_SKID 2000 78 79 #define OTX2_GET_RX_STATS(reg) \ 80 otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) 81 #define OTX2_GET_TX_STATS(reg) \ 82 otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) 83 84 struct otx2_lmt_info { 85 u64 lmt_addr; 86 u16 lmt_id; 87 }; 88 /* RSS configuration */ 89 struct otx2_rss_ctx { 90 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 91 }; 92 93 struct otx2_rss_info { 94 u8 enable; 95 u32 flowkey_cfg; 96 u16 rss_size; 97 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 98 u8 key[RSS_HASH_KEY_SIZE]; 99 struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS]; 100 }; 101 102 /* NIX (or NPC) RX errors */ 103 enum otx2_errlvl { 104 NPC_ERRLVL_RE, 105 NPC_ERRLVL_LID_LA, 106 NPC_ERRLVL_LID_LB, 107 NPC_ERRLVL_LID_LC, 108 NPC_ERRLVL_LID_LD, 109 NPC_ERRLVL_LID_LE, 110 NPC_ERRLVL_LID_LF, 111 NPC_ERRLVL_LID_LG, 112 NPC_ERRLVL_LID_LH, 113 NPC_ERRLVL_NIX = 0x0F, 114 }; 115 116 enum otx2_errcodes_re { 117 /* NPC_ERRLVL_RE errcodes */ 118 ERRCODE_FCS = 0x7, 119 ERRCODE_FCS_RCV = 0x8, 120 ERRCODE_UNDERSIZE = 0x10, 121 ERRCODE_OVERSIZE = 0x11, 122 ERRCODE_OL2_LEN_MISMATCH = 0x12, 123 /* NPC_ERRLVL_NIX errcodes */ 124 ERRCODE_OL3_LEN = 0x10, 125 ERRCODE_OL4_LEN = 0x11, 126 ERRCODE_OL4_CSUM = 0x12, 127 ERRCODE_IL3_LEN = 0x20, 128 ERRCODE_IL4_LEN = 0x21, 129 ERRCODE_IL4_CSUM = 0x22, 130 }; 131 132 enum otx2_xdp_action { 133 OTX2_XDP_TX = BIT(0), 134 OTX2_XDP_REDIRECT = BIT(1), 135 OTX2_AF_XDP_FRAME = BIT(2), 136 }; 137 138 struct otx2_dev_stats { 139 u64 rx_bytes; 140 u64 rx_frames; 141 u64 rx_ucast_frames; 142 u64 rx_bcast_frames; 143 u64 rx_mcast_frames; 144 u64 rx_drops; 145 146 u64 tx_bytes; 147 u64 tx_frames; 148 u64 tx_ucast_frames; 149 u64 tx_bcast_frames; 150 u64 tx_mcast_frames; 151 u64 tx_drops; 152 }; 153 154 /* Driver counted stats */ 155 struct otx2_drv_stats { 156 atomic_t rx_fcs_errs; 157 atomic_t rx_oversize_errs; 158 atomic_t rx_undersize_errs; 159 atomic_t rx_csum_errs; 160 atomic_t rx_len_errs; 161 atomic_t rx_other_errs; 162 }; 163 164 struct mbox { 165 struct otx2_mbox mbox; 166 struct work_struct mbox_wrk; 167 struct otx2_mbox mbox_up; 168 struct work_struct mbox_up_wrk; 169 struct otx2_nic *pfvf; 170 void *bbuf_base; /* Bounce buffer for mbox memory */ 171 struct mutex lock; /* serialize mailbox access */ 172 int num_msgs; /* mbox number of messages */ 173 int up_num_msgs; /* mbox_up number of messages */ 174 }; 175 176 /* Egress rate limiting definitions */ 177 #define MAX_BURST_EXPONENT 0x0FULL 178 #define MAX_BURST_MANTISSA 0xFFULL 179 #define MAX_BURST_SIZE 130816ULL 180 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 181 #define MAX_RATE_EXPONENT 0x0FULL 182 #define MAX_RATE_MANTISSA 0xFFULL 183 184 /* Bitfields in NIX_TLX_PIR register */ 185 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 186 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 187 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 188 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 189 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 190 191 struct otx2_hw { 192 struct pci_dev *pdev; 193 struct otx2_rss_info rss_info; 194 u16 rx_queues; 195 u16 tx_queues; 196 u16 xdp_queues; 197 u16 tc_tx_queues; 198 u16 non_qos_queues; /* tx queues plus xdp queues */ 199 u16 max_queues; 200 u16 pool_cnt; 201 u16 rqpool_cnt; 202 u16 sqpool_cnt; 203 204 #define OTX2_DEFAULT_RBUF_LEN 2048 205 u16 rbuf_len; 206 u32 xqe_size; 207 208 /* NPA */ 209 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 210 u32 stack_pg_bytes; /* Size of stack page */ 211 u16 sqb_size; 212 213 /* NIX */ 214 u8 txschq_link_cfg_lvl; 215 u8 txschq_cnt[NIX_TXSCH_LVL_CNT]; 216 u8 txschq_aggr_lvl_rr_prio; 217 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 218 u16 matchall_ipolicer; 219 u32 dwrr_mtu; 220 u32 max_mtu; 221 u8 smq_link_type; 222 223 /* HW settings, coalescing etc */ 224 u16 rx_chan_base; 225 u16 tx_chan_base; 226 u8 rx_chan_cnt; 227 u8 tx_chan_cnt; 228 u16 cq_qcount_wait; 229 u16 cq_ecount_wait; 230 u16 rq_skid; 231 u8 cq_time_wait; 232 233 /* Segmentation */ 234 u8 lso_tsov4_idx; 235 u8 lso_tsov6_idx; 236 u8 lso_udpv4_idx; 237 u8 lso_udpv6_idx; 238 239 /* RSS */ 240 u8 flowkey_alg_idx; 241 242 /* MSI-X */ 243 u8 cint_cnt; /* CQ interrupt count */ 244 u16 npa_msixoff; /* Offset of NPA vectors */ 245 u16 nix_msixoff; /* Offset of NIX vectors */ 246 char *irq_name; 247 cpumask_var_t *affinity_mask; 248 249 /* Stats */ 250 struct otx2_dev_stats dev_stats; 251 struct otx2_drv_stats drv_stats; 252 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 253 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 254 u64 cgx_fec_corr_blks; 255 u64 cgx_fec_uncorr_blks; 256 u8 cgx_links; /* No. of CGX links present in HW */ 257 u8 lbk_links; /* No. of LBK links present in HW */ 258 u8 tx_link; /* Transmit channel link number */ 259 #define HW_TSO 0 260 #define CN10K_MBOX 1 261 #define CN10K_LMTST 2 262 #define CN10K_RPM 3 263 #define CN10K_PTP_ONESTEP 4 264 #define CN10K_HW_MACSEC 5 265 #define QOS_CIR_PIR_SUPPORT 6 266 unsigned long cap_flag; 267 268 #define LMT_LINE_SIZE 128 269 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ 270 u64 *lmt_base; 271 struct otx2_lmt_info __percpu *lmt_info; 272 }; 273 274 enum vfperm { 275 OTX2_RESET_VF_PERM, 276 OTX2_TRUSTED_VF, 277 }; 278 279 struct otx2_vf_config { 280 struct otx2_nic *pf; 281 struct delayed_work link_event_work; 282 bool intf_down; /* interface was either configured or not */ 283 u8 mac[ETH_ALEN]; 284 u16 vlan; 285 int tx_vtag_idx; 286 bool trusted; 287 }; 288 289 struct flr_work { 290 struct work_struct work; 291 struct otx2_nic *pf; 292 }; 293 294 struct refill_work { 295 struct delayed_work pool_refill_work; 296 struct otx2_nic *pf; 297 struct napi_struct *napi; 298 }; 299 300 /* PTPv2 originTimestamp structure */ 301 struct ptpv2_tstamp { 302 __be16 seconds_msb; /* 16 bits + */ 303 __be32 seconds_lsb; /* 32 bits = 48 bits*/ 304 __be32 nanoseconds; 305 } __packed; 306 307 struct otx2_ptp { 308 struct ptp_clock_info ptp_info; 309 struct ptp_clock *ptp_clock; 310 struct otx2_nic *nic; 311 312 struct cyclecounter cycle_counter; 313 struct timecounter time_counter; 314 315 struct delayed_work extts_work; 316 u64 last_extts; 317 u64 thresh; 318 319 struct ptp_pin_desc extts_config; 320 u64 (*convert_rx_ptp_tstmp)(u64 timestamp); 321 u64 (*convert_tx_ptp_tstmp)(u64 timestamp); 322 u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp); 323 struct delayed_work synctstamp_work; 324 u64 tstamp; 325 u32 base_ns; 326 }; 327 328 #define OTX2_HW_TIMESTAMP_LEN 8 329 330 struct otx2_mac_table { 331 u8 addr[ETH_ALEN]; 332 u16 mcam_entry; 333 bool inuse; 334 }; 335 336 struct otx2_flow_config { 337 u16 *flow_ent; 338 u16 *def_ent; 339 u16 nr_flows; 340 #define OTX2_DEFAULT_FLOWCOUNT 16 341 #define OTX2_DEFAULT_UNICAST_FLOWS 4 342 #define OTX2_MAX_VLAN_FLOWS 1 343 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT 344 u16 unicast_offset; 345 u16 rx_vlan_offset; 346 u16 vf_vlan_offset; 347 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ 348 #define OTX2_VF_VLAN_RX_INDEX 0 349 #define OTX2_VF_VLAN_TX_INDEX 1 350 u32 *bmap_to_dmacindex; 351 unsigned long *dmacflt_bmap; 352 struct list_head flow_list; 353 u32 dmacflt_max_flows; 354 u16 max_flows; 355 refcount_t mark_flows; 356 struct list_head flow_list_tc; 357 u8 ucast_flt_cnt; 358 bool ntuple; 359 u16 ntuple_cnt; 360 }; 361 362 struct dev_hw_ops { 363 int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, 364 u16 sqb_aura); 365 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, 366 int size, int qidx); 367 int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); 368 void (*aura_freeptr)(void *dev, int aura, u64 buf); 369 }; 370 371 #define CN10K_MCS_SA_PER_SC 4 372 373 /* Stats which need to be accumulated in software because 374 * of shared counters in hardware. 375 */ 376 struct cn10k_txsc_stats { 377 u64 InPktsUntagged; 378 u64 InPktsNoTag; 379 u64 InPktsBadTag; 380 u64 InPktsUnknownSCI; 381 u64 InPktsNoSCI; 382 u64 InPktsOverrun; 383 }; 384 385 struct cn10k_rxsc_stats { 386 u64 InOctetsValidated; 387 u64 InOctetsDecrypted; 388 u64 InPktsUnchecked; 389 u64 InPktsDelayed; 390 u64 InPktsOK; 391 u64 InPktsInvalid; 392 u64 InPktsLate; 393 u64 InPktsNotValid; 394 u64 InPktsNotUsingSA; 395 u64 InPktsUnusedSA; 396 }; 397 398 struct cn10k_mcs_txsc { 399 struct macsec_secy *sw_secy; 400 struct cn10k_txsc_stats stats; 401 struct list_head entry; 402 enum macsec_validation_type last_validate_frames; 403 bool last_replay_protect; 404 u16 hw_secy_id_tx; 405 u16 hw_secy_id_rx; 406 u16 hw_flow_id; 407 u16 hw_sc_id; 408 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 409 u8 sa_bmap; 410 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 411 u8 encoding_sa; 412 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 413 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 414 bool vlan_dev; /* macsec running on VLAN ? */ 415 }; 416 417 struct cn10k_mcs_rxsc { 418 struct macsec_secy *sw_secy; 419 struct macsec_rx_sc *sw_rxsc; 420 struct cn10k_rxsc_stats stats; 421 struct list_head entry; 422 u16 hw_flow_id; 423 u16 hw_sc_id; 424 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 425 u8 sa_bmap; 426 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 427 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 428 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 429 }; 430 431 struct cn10k_mcs_cfg { 432 struct list_head txsc_list; 433 struct list_head rxsc_list; 434 }; 435 436 struct otx2_nic { 437 void __iomem *reg_base; 438 struct net_device *netdev; 439 struct dev_hw_ops *hw_ops; 440 void *iommu_domain; 441 u16 tx_max_pktlen; 442 u16 rbsize; /* Receive buffer size */ 443 444 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) 445 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) 446 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 447 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) 448 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) 449 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) 450 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) 451 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7) 452 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) 453 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 454 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 455 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) 456 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) 457 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) 458 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) 459 #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) 460 #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) 461 #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) 462 #define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) 463 #define OTX2_FLAG_PORT_UP BIT_ULL(19) 464 #define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) 465 u64 flags; 466 u64 *cq_op_addr; 467 468 struct bpf_prog *xdp_prog; 469 struct otx2_qset qset; 470 struct otx2_hw hw; 471 struct pci_dev *pdev; 472 struct device *dev; 473 474 /* Mbox */ 475 struct mbox mbox; 476 struct mbox *mbox_pfvf; 477 struct workqueue_struct *mbox_wq; 478 struct workqueue_struct *mbox_pfvf_wq; 479 480 u8 total_vfs; 481 u16 pcifunc; /* RVU PF_FUNC */ 482 u16 bpid[NIX_MAX_BPID_CHAN]; 483 struct otx2_vf_config *vf_configs; 484 struct cgx_link_user_info linfo; 485 486 /* NPC MCAM */ 487 struct otx2_flow_config *flow_cfg; 488 struct otx2_mac_table *mac_table; 489 490 u64 reset_count; 491 struct work_struct reset_task; 492 struct workqueue_struct *flr_wq; 493 struct flr_work *flr_wrk; 494 struct refill_work *refill_wrk; 495 struct workqueue_struct *otx2_wq; 496 struct work_struct rx_mode_work; 497 498 /* Ethtool stuff */ 499 u32 msg_enable; 500 501 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 502 int nix_blkaddr; 503 /* LMTST Lines info */ 504 struct qmem *dync_lmt; 505 u16 tot_lmt_lines; 506 u16 npa_lmt_lines; 507 u32 nix_lmt_size; 508 509 struct otx2_ptp *ptp; 510 struct hwtstamp_config tstamp; 511 512 unsigned long rq_bmap; 513 514 /* Devlink */ 515 struct otx2_devlink *dl; 516 /* PFC */ 517 u8 pfc_en; 518 #ifdef CONFIG_DCB 519 u8 *queue_to_pfc_map; 520 u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 521 bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; 522 #endif 523 /* qos */ 524 struct otx2_qos qos; 525 526 /* napi event count. It is needed for adaptive irq coalescing. */ 527 u32 napi_events; 528 529 #if IS_ENABLED(CONFIG_MACSEC) 530 struct cn10k_mcs_cfg *macsec_cfg; 531 #endif 532 533 #if IS_ENABLED(CONFIG_RVU_ESWITCH) 534 struct rep_dev **reps; 535 int rep_cnt; 536 u16 rep_pf_map[RVU_MAX_REP]; 537 u16 esw_mode; 538 #endif 539 540 /* Inline ipsec */ 541 struct cn10k_ipsec ipsec; 542 /* af_xdp zero-copy */ 543 unsigned long *af_xdp_zc_qidx; 544 }; 545 546 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 547 { 548 return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) || 549 (pdev->device == PCI_DEVID_RVU_REP); 550 } 551 552 static inline bool is_96xx_A0(struct pci_dev *pdev) 553 { 554 return (pdev->revision == 0x00) && 555 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 556 } 557 558 static inline bool is_96xx_B0(struct pci_dev *pdev) 559 { 560 return (pdev->revision == 0x01) && 561 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 562 } 563 564 static inline bool is_otx2_sdp_rep(struct pci_dev *pdev) 565 { 566 return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP; 567 } 568 569 /* REVID for PCIe devices. 570 * Bits 0..1: minor pass, bit 3..2: major pass 571 * bits 7..4: midr id 572 */ 573 #define PCI_REVISION_ID_96XX 0x00 574 #define PCI_REVISION_ID_95XX 0x10 575 #define PCI_REVISION_ID_95XXN 0x20 576 #define PCI_REVISION_ID_98XX 0x30 577 #define PCI_REVISION_ID_95XXMM 0x40 578 #define PCI_REVISION_ID_95XXO 0xE0 579 580 static inline bool is_dev_otx2(struct pci_dev *pdev) 581 { 582 u8 midr = pdev->revision & 0xF0; 583 584 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 585 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 586 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 587 } 588 589 static inline bool is_dev_cn10kb(struct pci_dev *pdev) 590 { 591 return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; 592 } 593 594 static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) 595 { 596 if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && 597 (pdev->revision & 0xFF) == 0x54) 598 return true; 599 600 return false; 601 } 602 603 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 604 { 605 struct otx2_hw *hw = &pfvf->hw; 606 607 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 608 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 609 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 610 611 __set_bit(HW_TSO, &hw->cap_flag); 612 613 if (is_96xx_A0(pfvf->pdev)) { 614 __clear_bit(HW_TSO, &hw->cap_flag); 615 616 /* Time based irq coalescing is not supported */ 617 pfvf->hw.cq_qcount_wait = 0x0; 618 619 /* Due to HW issue previous silicons required minimum 620 * 600 unused CQE to avoid CQ overflow. 621 */ 622 pfvf->hw.rq_skid = 600; 623 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 624 } 625 if (is_96xx_B0(pfvf->pdev)) 626 __clear_bit(HW_TSO, &hw->cap_flag); 627 628 if (!is_dev_otx2(pfvf->pdev)) { 629 __set_bit(CN10K_MBOX, &hw->cap_flag); 630 __set_bit(CN10K_LMTST, &hw->cap_flag); 631 __set_bit(CN10K_RPM, &hw->cap_flag); 632 __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); 633 __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); 634 } 635 } 636 637 /* Register read/write APIs */ 638 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 639 { 640 u64 blkaddr; 641 642 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 643 case BLKTYPE_NIX: 644 blkaddr = nic->nix_blkaddr; 645 break; 646 case BLKTYPE_NPA: 647 blkaddr = BLKADDR_NPA; 648 break; 649 case BLKTYPE_CPT: 650 blkaddr = BLKADDR_CPT0; 651 break; 652 default: 653 blkaddr = BLKADDR_RVUM; 654 break; 655 } 656 657 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 658 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 659 660 return nic->reg_base + offset; 661 } 662 663 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 664 { 665 void __iomem *addr = otx2_get_regaddr(nic, offset); 666 667 writeq(val, addr); 668 } 669 670 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 671 { 672 void __iomem *addr = otx2_get_regaddr(nic, offset); 673 674 return readq(addr); 675 } 676 677 /* Mbox bounce buffer APIs */ 678 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 679 { 680 struct otx2_mbox *otx2_mbox; 681 struct otx2_mbox_dev *mdev; 682 683 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 684 if (!mbox->bbuf_base) 685 return -ENOMEM; 686 687 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 688 * prepare all mbox messages in bounce buffer instead of directly 689 * in hw mbox memory. 690 */ 691 otx2_mbox = &mbox->mbox; 692 mdev = &otx2_mbox->dev[0]; 693 mdev->mbase = mbox->bbuf_base; 694 695 otx2_mbox = &mbox->mbox_up; 696 mdev = &otx2_mbox->dev[0]; 697 mdev->mbase = mbox->bbuf_base; 698 return 0; 699 } 700 701 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 702 { 703 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 704 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 705 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 706 struct mbox_hdr *hdr; 707 u64 msg_size; 708 709 if (mdev->mbase == hw_mbase) 710 return; 711 712 hdr = hw_mbase + mbox->rx_start; 713 msg_size = hdr->msg_size; 714 715 if (msg_size > mbox->rx_size - msgs_offset) 716 msg_size = mbox->rx_size - msgs_offset; 717 718 /* Copy mbox messages from mbox memory to bounce buffer */ 719 memcpy(mdev->mbase + mbox->rx_start, 720 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 721 } 722 723 /* With the absence of API for 128-bit IO memory access for arm64, 724 * implement required operations at place. 725 */ 726 #if defined(CONFIG_ARM64) 727 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 728 { 729 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 730 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 731 } 732 733 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) 734 { 735 u64 result; 736 737 __asm__ volatile(".cpu generic+lse\n" 738 "ldadd %x[i], %x[r], [%[b]]" 739 : [r]"=r"(result), "+m"(*ptr) 740 : [i]"r"(incr), [b]"r"(ptr) 741 : "memory"); 742 return result; 743 } 744 745 #else 746 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) 747 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) 748 #endif 749 750 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, 751 u64 *ptrs, u64 num_ptrs) 752 { 753 struct otx2_lmt_info *lmt_info; 754 u64 size = 0, count_eot = 0; 755 u64 tar_addr, val = 0; 756 757 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); 758 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0); 759 /* LMTID is same as AURA Id */ 760 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63); 761 /* Set if [127:64] of last 128bit word has a valid pointer */ 762 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL; 763 /* Set AURA ID to free pointer */ 764 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF); 765 /* Target address for LMTST flush tells HW how many 128bit 766 * words are valid from NPA_LF_AURA_BATCH_FREE0. 767 * 768 * tar_addr[6:4] is LMTST size-1 in units of 128b. 769 */ 770 if (num_ptrs > 2) { 771 size = (sizeof(u64) * num_ptrs) / 16; 772 if (!count_eot) 773 size++; 774 tar_addr |= ((size - 1) & 0x7) << 4; 775 } 776 dma_wmb(); 777 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); 778 /* Perform LMTST flush */ 779 cn10k_lmt_flush(val, tar_addr); 780 } 781 782 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) 783 { 784 struct otx2_nic *pfvf = dev; 785 u64 ptrs[2]; 786 787 ptrs[1] = buf; 788 get_cpu(); 789 /* Free only one buffer at time during init and teardown */ 790 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 791 put_cpu(); 792 } 793 794 /* Alloc pointer from pool/aura */ 795 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 796 { 797 u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); 798 u64 incr = (u64)aura | BIT_ULL(63); 799 800 return otx2_atomic64_add(incr, ptr); 801 } 802 803 /* Free pointer to a pool/aura */ 804 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf) 805 { 806 struct otx2_nic *pfvf = dev; 807 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0); 808 809 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr); 810 } 811 812 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 813 { 814 if (type == AURA_NIX_SQ) 815 return pfvf->hw.rqpool_cnt + idx; 816 817 /* AURA_NIX_RQ */ 818 return idx; 819 } 820 821 /* Mbox APIs */ 822 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 823 { 824 int err; 825 826 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 827 return 0; 828 otx2_mbox_msg_send(&mbox->mbox, 0); 829 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 830 if (err) 831 return err; 832 833 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 834 } 835 836 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 837 { 838 int err; 839 840 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 841 return 0; 842 otx2_mbox_msg_send_up(&mbox->mbox_up, devid); 843 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 844 if (err) 845 return err; 846 847 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 848 } 849 850 /* Use this API to send mbox msgs in atomic context 851 * where sleeping is not allowed 852 */ 853 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 854 { 855 int err; 856 857 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 858 return 0; 859 otx2_mbox_msg_send(&mbox->mbox, 0); 860 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 861 if (err) 862 return err; 863 864 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 865 } 866 867 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 868 static struct _req_type __maybe_unused \ 869 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 870 { \ 871 struct _req_type *req; \ 872 u16 pcifunc = mbox->pfvf->pcifunc; \ 873 \ 874 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 875 &mbox->mbox, 0, sizeof(struct _req_type), \ 876 sizeof(struct _rsp_type)); \ 877 if (!req) \ 878 return NULL; \ 879 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 880 req->hdr.id = _id; \ 881 req->hdr.pcifunc = pcifunc; \ 882 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \ 883 return req; \ 884 } 885 886 MBOX_MESSAGES 887 #undef M 888 889 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 890 int \ 891 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 892 struct _req_type *req, \ 893 struct _rsp_type *rsp); \ 894 895 MBOX_UP_CGX_MESSAGES 896 MBOX_UP_MCS_MESSAGES 897 #undef M 898 899 /* Time to wait before watchdog kicks off */ 900 #define OTX2_TX_TIMEOUT (100 * HZ) 901 902 #define RVU_PFVF_PF_SHIFT 10 903 #define RVU_PFVF_PF_MASK 0x3F 904 #define RVU_PFVF_FUNC_SHIFT 0 905 #define RVU_PFVF_FUNC_MASK 0x3FF 906 907 static inline bool is_otx2_vf(u16 pcifunc) 908 { 909 return !!(pcifunc & RVU_PFVF_FUNC_MASK); 910 } 911 912 static inline int rvu_get_pf(u16 pcifunc) 913 { 914 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 915 } 916 917 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 918 struct page *page, 919 size_t offset, size_t size, 920 enum dma_data_direction dir) 921 { 922 dma_addr_t iova; 923 924 iova = dma_map_page_attrs(pfvf->dev, page, 925 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 926 if (unlikely(dma_mapping_error(pfvf->dev, iova))) 927 return (dma_addr_t)NULL; 928 return iova; 929 } 930 931 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 932 dma_addr_t addr, size_t size, 933 enum dma_data_direction dir) 934 { 935 dma_unmap_page_attrs(pfvf->dev, addr, size, 936 dir, DMA_ATTR_SKIP_CPU_SYNC); 937 } 938 939 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) 940 { 941 u16 smq; 942 int idx; 943 944 #ifdef CONFIG_DCB 945 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) 946 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; 947 #endif 948 /* check if qidx falls under QOS queues */ 949 if (qidx >= pfvf->hw.non_qos_queues) { 950 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; 951 } else { 952 idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ]; 953 smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx]; 954 } 955 956 return smq; 957 } 958 959 static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf) 960 { 961 return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues; 962 } 963 964 static inline u64 otx2_convert_rate(u64 rate) 965 { 966 u64 converted_rate; 967 968 /* Convert bytes per second to Mbps */ 969 converted_rate = rate * 8; 970 converted_rate = max_t(u64, converted_rate / 1000000, 1); 971 972 return converted_rate; 973 } 974 975 static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf) 976 { 977 /* return here if MCAM entries not allocated */ 978 if (!pfvf->flow_cfg) 979 return 0; 980 981 return pfvf->flow_cfg->nr_flows; 982 } 983 984 /* MSI-X APIs */ 985 void otx2_free_cints(struct otx2_nic *pfvf, int n); 986 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 987 int otx2_set_mac_address(struct net_device *netdev, void *p); 988 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 989 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 990 void otx2_get_mac_from_af(struct net_device *netdev); 991 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 992 int otx2_config_pause_frm(struct otx2_nic *pfvf); 993 void otx2_setup_segmentation(struct otx2_nic *pfvf); 994 int otx2_reset_mac_stats(struct otx2_nic *pfvf); 995 996 /* RVU block related APIs */ 997 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 998 int otx2_detach_resources(struct mbox *mbox); 999 int otx2_config_npa(struct otx2_nic *pfvf); 1000 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 1001 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 1002 void otx2_aura_pool_free(struct otx2_nic *pfvf); 1003 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 1004 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 1005 int otx2_config_nix(struct otx2_nic *pfvf); 1006 int otx2_config_nix_queues(struct otx2_nic *pfvf); 1007 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); 1008 int otx2_txsch_alloc(struct otx2_nic *pfvf); 1009 void otx2_txschq_stop(struct otx2_nic *pfvf); 1010 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); 1011 void otx2_free_pending_sqe(struct otx2_nic *pfvf); 1012 void otx2_sqb_flush(struct otx2_nic *pfvf); 1013 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 1014 dma_addr_t *dma, int qidx, int idx); 1015 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 1016 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 1017 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 1018 int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); 1019 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); 1020 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 1021 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); 1022 int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1023 int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1024 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 1025 dma_addr_t *dma); 1026 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 1027 int stack_pages, int numptrs, int buf_size, int type); 1028 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 1029 int pool_id, int numptrs); 1030 int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf); 1031 void otx2_free_queue_mem(struct otx2_qset *qset); 1032 int otx2_alloc_queue_mem(struct otx2_nic *pf); 1033 int otx2_init_hw_resources(struct otx2_nic *pfvf); 1034 void otx2_free_hw_resources(struct otx2_nic *pf); 1035 int otx2_wq_init(struct otx2_nic *pf); 1036 int otx2_check_pf_usable(struct otx2_nic *pf); 1037 int otx2_pfaf_mbox_init(struct otx2_nic *pf); 1038 int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af); 1039 int otx2_realloc_msix_vectors(struct otx2_nic *pf); 1040 void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); 1041 void otx2_disable_mbox_intr(struct otx2_nic *pf); 1042 void otx2_disable_napi(struct otx2_nic *pf); 1043 irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); 1044 int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); 1045 int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); 1046 int otx2_set_hw_capabilities(struct otx2_nic *pfvf); 1047 1048 /* RSS configuration APIs*/ 1049 int otx2_rss_init(struct otx2_nic *pfvf); 1050 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 1051 void otx2_set_rss_key(struct otx2_nic *pfvf); 1052 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id); 1053 1054 /* Mbox handlers */ 1055 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 1056 struct msix_offset_rsp *rsp); 1057 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 1058 struct npa_lf_alloc_rsp *rsp); 1059 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 1060 struct nix_lf_alloc_rsp *rsp); 1061 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 1062 struct nix_txsch_alloc_rsp *rsp); 1063 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 1064 struct cgx_stats_rsp *rsp); 1065 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, 1066 struct cgx_fec_stats_rsp *rsp); 1067 void otx2_set_fec_stats_count(struct otx2_nic *pfvf); 1068 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 1069 struct nix_bp_cfg_rsp *rsp); 1070 1071 /* Device stats APIs */ 1072 void otx2_get_dev_stats(struct otx2_nic *pfvf); 1073 void otx2_get_stats64(struct net_device *netdev, 1074 struct rtnl_link_stats64 *stats); 1075 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 1076 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf); 1077 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 1078 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 1079 void otx2_set_ethtool_ops(struct net_device *netdev); 1080 void otx2vf_set_ethtool_ops(struct net_device *netdev); 1081 1082 int otx2_open(struct net_device *netdev); 1083 int otx2_stop(struct net_device *netdev); 1084 int otx2_set_real_num_queues(struct net_device *netdev, 1085 int tx_queues, int rx_queues); 1086 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); 1087 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); 1088 1089 /* MCAM filter related APIs */ 1090 int otx2_mcam_flow_init(struct otx2_nic *pf); 1091 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); 1092 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); 1093 void otx2_mcam_flow_del(struct otx2_nic *pf); 1094 int otx2_destroy_ntuple_flows(struct otx2_nic *pf); 1095 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); 1096 int otx2_get_flow(struct otx2_nic *pfvf, 1097 struct ethtool_rxnfc *nfc, u32 location); 1098 int otx2_get_all_flows(struct otx2_nic *pfvf, 1099 struct ethtool_rxnfc *nfc, u32 *rule_locs); 1100 int otx2_add_flow(struct otx2_nic *pfvf, 1101 struct ethtool_rxnfc *nfc); 1102 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); 1103 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); 1104 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); 1105 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); 1106 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); 1107 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); 1108 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); 1109 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, 1110 u64 iova, int len, u16 qidx, u16 flags); 1111 void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, 1112 u64 dma_addr, int len, int *offset, u16 flags); 1113 u16 otx2_get_max_mtu(struct otx2_nic *pfvf); 1114 int otx2_handle_ntuple_tc_features(struct net_device *netdev, 1115 netdev_features_t features); 1116 int otx2_smq_flush(struct otx2_nic *pfvf, int smq); 1117 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, 1118 u64 iova, int size); 1119 int otx2_mcam_entry_init(struct otx2_nic *pfvf); 1120 1121 /* tc support */ 1122 int otx2_init_tc(struct otx2_nic *nic); 1123 void otx2_shutdown_tc(struct otx2_nic *nic); 1124 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1125 void *type_data); 1126 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic); 1127 1128 /* CGX/RPM DMAC filters support */ 1129 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 1130 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1131 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1132 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); 1133 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 1134 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 1135 1136 #ifdef CONFIG_DCB 1137 /* DCB support*/ 1138 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable); 1139 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf); 1140 int otx2_dcbnl_set_ops(struct net_device *dev); 1141 /* PFC support */ 1142 int otx2_pfc_txschq_config(struct otx2_nic *pfvf); 1143 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); 1144 int otx2_pfc_txschq_update(struct otx2_nic *pfvf); 1145 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); 1146 #endif 1147 1148 #if IS_ENABLED(CONFIG_MACSEC) 1149 /* MACSEC offload support */ 1150 int cn10k_mcs_init(struct otx2_nic *pfvf); 1151 void cn10k_mcs_free(struct otx2_nic *pfvf); 1152 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event); 1153 #else 1154 static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; } 1155 static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {} 1156 static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf, 1157 struct mcs_intr_info *event) 1158 {} 1159 #endif /* CONFIG_MACSEC */ 1160 1161 /* qos support */ 1162 static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs) 1163 { 1164 struct otx2_hw *hw = &pfvf->hw; 1165 1166 hw->tc_tx_queues = qos_txqs; 1167 INIT_LIST_HEAD(&pfvf->qos.qos_tree); 1168 mutex_init(&pfvf->qos.qos_lock); 1169 } 1170 1171 static inline void otx2_shutdown_qos(struct otx2_nic *pfvf) 1172 { 1173 mutex_destroy(&pfvf->qos.qos_lock); 1174 } 1175 1176 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, 1177 struct net_device *sb_dev); 1178 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); 1179 void otx2_qos_config_txschq(struct otx2_nic *pfvf); 1180 void otx2_clean_qos_queues(struct otx2_nic *pfvf); 1181 int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); 1182 int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1183 struct flow_cls_offload *cls_flower); 1184 1185 static inline int mcam_entry_cmp(const void *a, const void *b) 1186 { 1187 return *(u16 *)a - *(u16 *)b; 1188 } 1189 1190 dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, 1191 struct sk_buff *skb, int seg, int *len); 1192 void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); 1193 int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); 1194 #endif /* OTX2_COMMON_H */ 1195