1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #ifndef OTX2_COMMON_H 9 #define OTX2_COMMON_H 10 11 #include <linux/ethtool.h> 12 #include <linux/pci.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/ptp_clock_kernel.h> 16 #include <linux/timecounter.h> 17 #include <linux/soc/marvell/octeontx2/asm.h> 18 #include <net/macsec.h> 19 #include <net/pkt_cls.h> 20 #include <net/devlink.h> 21 #include <linux/time64.h> 22 #include <linux/dim.h> 23 #include <uapi/linux/if_macsec.h> 24 #include <net/page_pool/helpers.h> 25 26 #include <mbox.h> 27 #include <npc.h> 28 #include "otx2_reg.h" 29 #include "otx2_txrx.h" 30 #include "otx2_devlink.h" 31 #include <rvu_trace.h> 32 #include "qos.h" 33 #include "rep.h" 34 #include "cn10k_ipsec.h" 35 36 /* IPv4 flag more fragment bit */ 37 #define IPV4_FLAG_MORE 0x20 38 39 /* PCI device IDs */ 40 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 41 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 42 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 43 44 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 45 #define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 46 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 47 48 #define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 49 50 /* PCI BAR nos */ 51 #define PCI_CFG_REG_BAR_NUM 2 52 #define PCI_MBOX_BAR_NUM 4 53 54 #define NAME_SIZE 32 55 56 #ifdef CONFIG_DCB 57 /* Max priority supported for PFC */ 58 #define NIX_PF_PFC_PRIO_MAX 8 59 #endif 60 61 /* Number of segments per SG structure */ 62 #define MAX_SEGS_PER_SG 3 63 64 enum arua_mapped_qtypes { 65 AURA_NIX_RQ, 66 AURA_NIX_SQ, 67 }; 68 69 /* NIX LF interrupts range*/ 70 #define NIX_LF_QINT_VEC_START 0x00 71 #define NIX_LF_CINT_VEC_START 0x40 72 #define NIX_LF_GINT_VEC 0x80 73 #define NIX_LF_ERR_VEC 0x81 74 #define NIX_LF_POISON_VEC 0x82 75 76 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 77 #define SEND_CQ_SKID 2000 78 79 #define OTX2_GET_RX_STATS(reg) \ 80 otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) 81 #define OTX2_GET_TX_STATS(reg) \ 82 otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) 83 84 struct otx2_lmt_info { 85 u64 lmt_addr; 86 u16 lmt_id; 87 }; 88 /* RSS configuration */ 89 struct otx2_rss_ctx { 90 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 91 }; 92 93 struct otx2_rss_info { 94 u8 enable; 95 u32 flowkey_cfg; 96 u16 rss_size; 97 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 98 u8 key[RSS_HASH_KEY_SIZE]; 99 struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS]; 100 }; 101 102 /* NIX (or NPC) RX errors */ 103 enum otx2_errlvl { 104 NPC_ERRLVL_RE, 105 NPC_ERRLVL_LID_LA, 106 NPC_ERRLVL_LID_LB, 107 NPC_ERRLVL_LID_LC, 108 NPC_ERRLVL_LID_LD, 109 NPC_ERRLVL_LID_LE, 110 NPC_ERRLVL_LID_LF, 111 NPC_ERRLVL_LID_LG, 112 NPC_ERRLVL_LID_LH, 113 NPC_ERRLVL_NIX = 0x0F, 114 }; 115 116 enum otx2_errcodes_re { 117 /* NPC_ERRLVL_RE errcodes */ 118 ERRCODE_FCS = 0x7, 119 ERRCODE_FCS_RCV = 0x8, 120 ERRCODE_UNDERSIZE = 0x10, 121 ERRCODE_OVERSIZE = 0x11, 122 ERRCODE_OL2_LEN_MISMATCH = 0x12, 123 /* NPC_ERRLVL_NIX errcodes */ 124 ERRCODE_OL3_LEN = 0x10, 125 ERRCODE_OL4_LEN = 0x11, 126 ERRCODE_OL4_CSUM = 0x12, 127 ERRCODE_IL3_LEN = 0x20, 128 ERRCODE_IL4_LEN = 0x21, 129 ERRCODE_IL4_CSUM = 0x22, 130 }; 131 132 enum otx2_xdp_action { 133 OTX2_XDP_TX = BIT(0), 134 OTX2_XDP_REDIRECT = BIT(1), 135 OTX2_AF_XDP_FRAME = BIT(2), 136 }; 137 138 struct otx2_dev_stats { 139 u64 rx_bytes; 140 u64 rx_frames; 141 u64 rx_ucast_frames; 142 u64 rx_bcast_frames; 143 u64 rx_mcast_frames; 144 u64 rx_drops; 145 146 u64 tx_bytes; 147 u64 tx_frames; 148 u64 tx_ucast_frames; 149 u64 tx_bcast_frames; 150 u64 tx_mcast_frames; 151 u64 tx_drops; 152 }; 153 154 /* Driver counted stats */ 155 struct otx2_drv_stats { 156 atomic_t rx_fcs_errs; 157 atomic_t rx_oversize_errs; 158 atomic_t rx_undersize_errs; 159 atomic_t rx_csum_errs; 160 atomic_t rx_len_errs; 161 atomic_t rx_other_errs; 162 }; 163 164 struct mbox { 165 struct otx2_mbox mbox; 166 struct work_struct mbox_wrk; 167 struct otx2_mbox mbox_up; 168 struct work_struct mbox_up_wrk; 169 struct otx2_nic *pfvf; 170 void *bbuf_base; /* Bounce buffer for mbox memory */ 171 struct mutex lock; /* serialize mailbox access */ 172 int num_msgs; /* mbox number of messages */ 173 int up_num_msgs; /* mbox_up number of messages */ 174 }; 175 176 /* Egress rate limiting definitions */ 177 #define MAX_BURST_EXPONENT 0x0FULL 178 #define MAX_BURST_MANTISSA 0xFFULL 179 #define MAX_BURST_SIZE 130816ULL 180 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 181 #define MAX_RATE_EXPONENT 0x0FULL 182 #define MAX_RATE_MANTISSA 0xFFULL 183 184 /* Bitfields in NIX_TLX_PIR register */ 185 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 186 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 187 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 188 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 189 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 190 191 struct otx2_hw { 192 struct pci_dev *pdev; 193 struct otx2_rss_info rss_info; 194 u16 rx_queues; 195 u16 tx_queues; 196 u16 xdp_queues; 197 u16 tc_tx_queues; 198 u16 non_qos_queues; /* tx queues plus xdp queues */ 199 u16 max_queues; 200 u16 pool_cnt; 201 u16 rqpool_cnt; 202 u16 sqpool_cnt; 203 204 #define OTX2_DEFAULT_RBUF_LEN 2048 205 u16 rbuf_len; 206 u32 xqe_size; 207 208 /* NPA */ 209 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 210 u32 stack_pg_bytes; /* Size of stack page */ 211 u16 sqb_size; 212 213 /* NIX */ 214 u8 txschq_link_cfg_lvl; 215 u8 txschq_cnt[NIX_TXSCH_LVL_CNT]; 216 u8 txschq_aggr_lvl_rr_prio; 217 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 218 u16 matchall_ipolicer; 219 u32 dwrr_mtu; 220 u32 max_mtu; 221 u8 smq_link_type; 222 223 /* HW settings, coalescing etc */ 224 u16 rx_chan_base; 225 u16 tx_chan_base; 226 u8 rx_chan_cnt; 227 u8 tx_chan_cnt; 228 u16 cq_qcount_wait; 229 u16 cq_ecount_wait; 230 u16 rq_skid; 231 u8 cq_time_wait; 232 233 /* Segmentation */ 234 u8 lso_tsov4_idx; 235 u8 lso_tsov6_idx; 236 u8 lso_udpv4_idx; 237 u8 lso_udpv6_idx; 238 239 /* RSS */ 240 u8 flowkey_alg_idx; 241 242 /* MSI-X */ 243 u8 cint_cnt; /* CQ interrupt count */ 244 u16 npa_msixoff; /* Offset of NPA vectors */ 245 u16 nix_msixoff; /* Offset of NIX vectors */ 246 char *irq_name; 247 cpumask_var_t *affinity_mask; 248 249 /* Stats */ 250 struct otx2_dev_stats dev_stats; 251 struct otx2_drv_stats drv_stats; 252 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 253 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 254 u64 cgx_fec_corr_blks; 255 u64 cgx_fec_uncorr_blks; 256 u8 cgx_links; /* No. of CGX links present in HW */ 257 u8 lbk_links; /* No. of LBK links present in HW */ 258 u8 tx_link; /* Transmit channel link number */ 259 #define HW_TSO 0 260 #define CN10K_MBOX 1 261 #define CN10K_LMTST 2 262 #define CN10K_RPM 3 263 #define CN10K_PTP_ONESTEP 4 264 #define CN10K_HW_MACSEC 5 265 #define QOS_CIR_PIR_SUPPORT 6 266 unsigned long cap_flag; 267 268 #define LMT_LINE_SIZE 128 269 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ 270 u64 *lmt_base; 271 struct otx2_lmt_info __percpu *lmt_info; 272 }; 273 274 enum vfperm { 275 OTX2_RESET_VF_PERM, 276 OTX2_TRUSTED_VF, 277 }; 278 279 struct otx2_vf_config { 280 struct otx2_nic *pf; 281 struct delayed_work link_event_work; 282 bool intf_down; /* interface was either configured or not */ 283 u8 mac[ETH_ALEN]; 284 u16 vlan; 285 int tx_vtag_idx; 286 bool trusted; 287 }; 288 289 struct flr_work { 290 struct work_struct work; 291 struct otx2_nic *pf; 292 }; 293 294 struct refill_work { 295 struct delayed_work pool_refill_work; 296 struct otx2_nic *pf; 297 struct napi_struct *napi; 298 }; 299 300 /* PTPv2 originTimestamp structure */ 301 struct ptpv2_tstamp { 302 __be16 seconds_msb; /* 16 bits + */ 303 __be32 seconds_lsb; /* 32 bits = 48 bits*/ 304 __be32 nanoseconds; 305 } __packed; 306 307 struct otx2_ptp { 308 struct ptp_clock_info ptp_info; 309 struct ptp_clock *ptp_clock; 310 struct otx2_nic *nic; 311 312 struct cyclecounter cycle_counter; 313 struct timecounter time_counter; 314 315 struct delayed_work extts_work; 316 u64 last_extts; 317 u64 thresh; 318 319 struct ptp_pin_desc extts_config; 320 u64 (*convert_rx_ptp_tstmp)(u64 timestamp); 321 u64 (*convert_tx_ptp_tstmp)(u64 timestamp); 322 u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp); 323 struct delayed_work synctstamp_work; 324 u64 tstamp; 325 u32 base_ns; 326 }; 327 328 #define OTX2_HW_TIMESTAMP_LEN 8 329 330 struct otx2_mac_table { 331 u8 addr[ETH_ALEN]; 332 u16 mcam_entry; 333 bool inuse; 334 }; 335 336 struct otx2_flow_config { 337 u16 *flow_ent; 338 u16 *def_ent; 339 u16 nr_flows; 340 #define OTX2_DEFAULT_FLOWCOUNT 16 341 #define OTX2_DEFAULT_UNICAST_FLOWS 4 342 #define OTX2_MAX_VLAN_FLOWS 1 343 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT 344 u16 unicast_offset; 345 u16 rx_vlan_offset; 346 u16 vf_vlan_offset; 347 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ 348 #define OTX2_VF_VLAN_RX_INDEX 0 349 #define OTX2_VF_VLAN_TX_INDEX 1 350 u32 *bmap_to_dmacindex; 351 unsigned long *dmacflt_bmap; 352 struct list_head flow_list; 353 u32 dmacflt_max_flows; 354 u16 max_flows; 355 refcount_t mark_flows; 356 struct list_head flow_list_tc; 357 u8 ucast_flt_cnt; 358 bool ntuple; 359 }; 360 361 struct dev_hw_ops { 362 int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, 363 u16 sqb_aura); 364 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, 365 int size, int qidx); 366 int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); 367 void (*aura_freeptr)(void *dev, int aura, u64 buf); 368 }; 369 370 #define CN10K_MCS_SA_PER_SC 4 371 372 /* Stats which need to be accumulated in software because 373 * of shared counters in hardware. 374 */ 375 struct cn10k_txsc_stats { 376 u64 InPktsUntagged; 377 u64 InPktsNoTag; 378 u64 InPktsBadTag; 379 u64 InPktsUnknownSCI; 380 u64 InPktsNoSCI; 381 u64 InPktsOverrun; 382 }; 383 384 struct cn10k_rxsc_stats { 385 u64 InOctetsValidated; 386 u64 InOctetsDecrypted; 387 u64 InPktsUnchecked; 388 u64 InPktsDelayed; 389 u64 InPktsOK; 390 u64 InPktsInvalid; 391 u64 InPktsLate; 392 u64 InPktsNotValid; 393 u64 InPktsNotUsingSA; 394 u64 InPktsUnusedSA; 395 }; 396 397 struct cn10k_mcs_txsc { 398 struct macsec_secy *sw_secy; 399 struct cn10k_txsc_stats stats; 400 struct list_head entry; 401 enum macsec_validation_type last_validate_frames; 402 bool last_replay_protect; 403 u16 hw_secy_id_tx; 404 u16 hw_secy_id_rx; 405 u16 hw_flow_id; 406 u16 hw_sc_id; 407 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 408 u8 sa_bmap; 409 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 410 u8 encoding_sa; 411 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 412 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 413 bool vlan_dev; /* macsec running on VLAN ? */ 414 }; 415 416 struct cn10k_mcs_rxsc { 417 struct macsec_secy *sw_secy; 418 struct macsec_rx_sc *sw_rxsc; 419 struct cn10k_rxsc_stats stats; 420 struct list_head entry; 421 u16 hw_flow_id; 422 u16 hw_sc_id; 423 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 424 u8 sa_bmap; 425 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 426 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 427 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 428 }; 429 430 struct cn10k_mcs_cfg { 431 struct list_head txsc_list; 432 struct list_head rxsc_list; 433 }; 434 435 struct otx2_nic { 436 void __iomem *reg_base; 437 struct net_device *netdev; 438 struct dev_hw_ops *hw_ops; 439 void *iommu_domain; 440 u16 tx_max_pktlen; 441 u16 rbsize; /* Receive buffer size */ 442 443 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) 444 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) 445 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 446 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) 447 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) 448 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) 449 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) 450 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7) 451 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) 452 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 453 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 454 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) 455 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) 456 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) 457 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) 458 #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) 459 #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) 460 #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) 461 #define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) 462 #define OTX2_FLAG_PORT_UP BIT_ULL(19) 463 #define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) 464 u64 flags; 465 u64 *cq_op_addr; 466 467 struct bpf_prog *xdp_prog; 468 struct otx2_qset qset; 469 struct otx2_hw hw; 470 struct pci_dev *pdev; 471 struct device *dev; 472 473 /* Mbox */ 474 struct mbox mbox; 475 struct mbox *mbox_pfvf; 476 struct workqueue_struct *mbox_wq; 477 struct workqueue_struct *mbox_pfvf_wq; 478 479 u8 total_vfs; 480 u16 pcifunc; /* RVU PF_FUNC */ 481 u16 bpid[NIX_MAX_BPID_CHAN]; 482 struct otx2_vf_config *vf_configs; 483 struct cgx_link_user_info linfo; 484 485 /* NPC MCAM */ 486 struct otx2_flow_config *flow_cfg; 487 struct otx2_mac_table *mac_table; 488 489 u64 reset_count; 490 struct work_struct reset_task; 491 struct workqueue_struct *flr_wq; 492 struct flr_work *flr_wrk; 493 struct refill_work *refill_wrk; 494 struct workqueue_struct *otx2_wq; 495 struct work_struct rx_mode_work; 496 497 /* Ethtool stuff */ 498 u32 msg_enable; 499 500 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 501 int nix_blkaddr; 502 /* LMTST Lines info */ 503 struct qmem *dync_lmt; 504 u16 tot_lmt_lines; 505 u16 npa_lmt_lines; 506 u32 nix_lmt_size; 507 508 struct otx2_ptp *ptp; 509 struct hwtstamp_config tstamp; 510 511 unsigned long rq_bmap; 512 513 /* Devlink */ 514 struct otx2_devlink *dl; 515 /* PFC */ 516 u8 pfc_en; 517 #ifdef CONFIG_DCB 518 u8 *queue_to_pfc_map; 519 u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 520 bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; 521 #endif 522 /* qos */ 523 struct otx2_qos qos; 524 525 /* napi event count. It is needed for adaptive irq coalescing. */ 526 u32 napi_events; 527 528 #if IS_ENABLED(CONFIG_MACSEC) 529 struct cn10k_mcs_cfg *macsec_cfg; 530 #endif 531 532 #if IS_ENABLED(CONFIG_RVU_ESWITCH) 533 struct rep_dev **reps; 534 int rep_cnt; 535 u16 rep_pf_map[RVU_MAX_REP]; 536 u16 esw_mode; 537 #endif 538 539 /* Inline ipsec */ 540 struct cn10k_ipsec ipsec; 541 /* af_xdp zero-copy */ 542 unsigned long *af_xdp_zc_qidx; 543 }; 544 545 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 546 { 547 return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) || 548 (pdev->device == PCI_DEVID_RVU_REP); 549 } 550 551 static inline bool is_96xx_A0(struct pci_dev *pdev) 552 { 553 return (pdev->revision == 0x00) && 554 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 555 } 556 557 static inline bool is_96xx_B0(struct pci_dev *pdev) 558 { 559 return (pdev->revision == 0x01) && 560 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 561 } 562 563 static inline bool is_otx2_sdp_rep(struct pci_dev *pdev) 564 { 565 return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP; 566 } 567 568 /* REVID for PCIe devices. 569 * Bits 0..1: minor pass, bit 3..2: major pass 570 * bits 7..4: midr id 571 */ 572 #define PCI_REVISION_ID_96XX 0x00 573 #define PCI_REVISION_ID_95XX 0x10 574 #define PCI_REVISION_ID_95XXN 0x20 575 #define PCI_REVISION_ID_98XX 0x30 576 #define PCI_REVISION_ID_95XXMM 0x40 577 #define PCI_REVISION_ID_95XXO 0xE0 578 579 static inline bool is_dev_otx2(struct pci_dev *pdev) 580 { 581 u8 midr = pdev->revision & 0xF0; 582 583 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 584 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 585 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 586 } 587 588 static inline bool is_dev_cn10kb(struct pci_dev *pdev) 589 { 590 return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; 591 } 592 593 static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) 594 { 595 if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && 596 (pdev->revision & 0xFF) == 0x54) 597 return true; 598 599 return false; 600 } 601 602 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 603 { 604 struct otx2_hw *hw = &pfvf->hw; 605 606 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 607 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 608 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 609 610 __set_bit(HW_TSO, &hw->cap_flag); 611 612 if (is_96xx_A0(pfvf->pdev)) { 613 __clear_bit(HW_TSO, &hw->cap_flag); 614 615 /* Time based irq coalescing is not supported */ 616 pfvf->hw.cq_qcount_wait = 0x0; 617 618 /* Due to HW issue previous silicons required minimum 619 * 600 unused CQE to avoid CQ overflow. 620 */ 621 pfvf->hw.rq_skid = 600; 622 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 623 } 624 if (is_96xx_B0(pfvf->pdev)) 625 __clear_bit(HW_TSO, &hw->cap_flag); 626 627 if (!is_dev_otx2(pfvf->pdev)) { 628 __set_bit(CN10K_MBOX, &hw->cap_flag); 629 __set_bit(CN10K_LMTST, &hw->cap_flag); 630 __set_bit(CN10K_RPM, &hw->cap_flag); 631 __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); 632 __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); 633 } 634 635 if (is_dev_cn10kb(pfvf->pdev)) 636 __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); 637 } 638 639 /* Register read/write APIs */ 640 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 641 { 642 u64 blkaddr; 643 644 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 645 case BLKTYPE_NIX: 646 blkaddr = nic->nix_blkaddr; 647 break; 648 case BLKTYPE_NPA: 649 blkaddr = BLKADDR_NPA; 650 break; 651 case BLKTYPE_CPT: 652 blkaddr = BLKADDR_CPT0; 653 break; 654 default: 655 blkaddr = BLKADDR_RVUM; 656 break; 657 } 658 659 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 660 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 661 662 return nic->reg_base + offset; 663 } 664 665 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 666 { 667 void __iomem *addr = otx2_get_regaddr(nic, offset); 668 669 writeq(val, addr); 670 } 671 672 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 673 { 674 void __iomem *addr = otx2_get_regaddr(nic, offset); 675 676 return readq(addr); 677 } 678 679 /* Mbox bounce buffer APIs */ 680 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 681 { 682 struct otx2_mbox *otx2_mbox; 683 struct otx2_mbox_dev *mdev; 684 685 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 686 if (!mbox->bbuf_base) 687 return -ENOMEM; 688 689 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 690 * prepare all mbox messages in bounce buffer instead of directly 691 * in hw mbox memory. 692 */ 693 otx2_mbox = &mbox->mbox; 694 mdev = &otx2_mbox->dev[0]; 695 mdev->mbase = mbox->bbuf_base; 696 697 otx2_mbox = &mbox->mbox_up; 698 mdev = &otx2_mbox->dev[0]; 699 mdev->mbase = mbox->bbuf_base; 700 return 0; 701 } 702 703 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 704 { 705 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 706 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 707 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 708 struct mbox_hdr *hdr; 709 u64 msg_size; 710 711 if (mdev->mbase == hw_mbase) 712 return; 713 714 hdr = hw_mbase + mbox->rx_start; 715 msg_size = hdr->msg_size; 716 717 if (msg_size > mbox->rx_size - msgs_offset) 718 msg_size = mbox->rx_size - msgs_offset; 719 720 /* Copy mbox messages from mbox memory to bounce buffer */ 721 memcpy(mdev->mbase + mbox->rx_start, 722 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 723 } 724 725 /* With the absence of API for 128-bit IO memory access for arm64, 726 * implement required operations at place. 727 */ 728 #if defined(CONFIG_ARM64) 729 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 730 { 731 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 732 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 733 } 734 735 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) 736 { 737 u64 result; 738 739 __asm__ volatile(".cpu generic+lse\n" 740 "ldadd %x[i], %x[r], [%[b]]" 741 : [r]"=r"(result), "+m"(*ptr) 742 : [i]"r"(incr), [b]"r"(ptr) 743 : "memory"); 744 return result; 745 } 746 747 #else 748 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) 749 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) 750 #endif 751 752 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, 753 u64 *ptrs, u64 num_ptrs) 754 { 755 struct otx2_lmt_info *lmt_info; 756 u64 size = 0, count_eot = 0; 757 u64 tar_addr, val = 0; 758 759 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); 760 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0); 761 /* LMTID is same as AURA Id */ 762 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63); 763 /* Set if [127:64] of last 128bit word has a valid pointer */ 764 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL; 765 /* Set AURA ID to free pointer */ 766 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF); 767 /* Target address for LMTST flush tells HW how many 128bit 768 * words are valid from NPA_LF_AURA_BATCH_FREE0. 769 * 770 * tar_addr[6:4] is LMTST size-1 in units of 128b. 771 */ 772 if (num_ptrs > 2) { 773 size = (sizeof(u64) * num_ptrs) / 16; 774 if (!count_eot) 775 size++; 776 tar_addr |= ((size - 1) & 0x7) << 4; 777 } 778 dma_wmb(); 779 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); 780 /* Perform LMTST flush */ 781 cn10k_lmt_flush(val, tar_addr); 782 } 783 784 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) 785 { 786 struct otx2_nic *pfvf = dev; 787 u64 ptrs[2]; 788 789 ptrs[1] = buf; 790 get_cpu(); 791 /* Free only one buffer at time during init and teardown */ 792 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 793 put_cpu(); 794 } 795 796 /* Alloc pointer from pool/aura */ 797 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 798 { 799 u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); 800 u64 incr = (u64)aura | BIT_ULL(63); 801 802 return otx2_atomic64_add(incr, ptr); 803 } 804 805 /* Free pointer to a pool/aura */ 806 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf) 807 { 808 struct otx2_nic *pfvf = dev; 809 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0); 810 811 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr); 812 } 813 814 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 815 { 816 if (type == AURA_NIX_SQ) 817 return pfvf->hw.rqpool_cnt + idx; 818 819 /* AURA_NIX_RQ */ 820 return idx; 821 } 822 823 /* Mbox APIs */ 824 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 825 { 826 int err; 827 828 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 829 return 0; 830 otx2_mbox_msg_send(&mbox->mbox, 0); 831 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 832 if (err) 833 return err; 834 835 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 836 } 837 838 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 839 { 840 int err; 841 842 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 843 return 0; 844 otx2_mbox_msg_send_up(&mbox->mbox_up, devid); 845 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 846 if (err) 847 return err; 848 849 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 850 } 851 852 /* Use this API to send mbox msgs in atomic context 853 * where sleeping is not allowed 854 */ 855 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 856 { 857 int err; 858 859 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 860 return 0; 861 otx2_mbox_msg_send(&mbox->mbox, 0); 862 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 863 if (err) 864 return err; 865 866 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 867 } 868 869 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 870 static struct _req_type __maybe_unused \ 871 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 872 { \ 873 struct _req_type *req; \ 874 \ 875 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 876 &mbox->mbox, 0, sizeof(struct _req_type), \ 877 sizeof(struct _rsp_type)); \ 878 if (!req) \ 879 return NULL; \ 880 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 881 req->hdr.id = _id; \ 882 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ 883 return req; \ 884 } 885 886 MBOX_MESSAGES 887 #undef M 888 889 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 890 int \ 891 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 892 struct _req_type *req, \ 893 struct _rsp_type *rsp); \ 894 895 MBOX_UP_CGX_MESSAGES 896 MBOX_UP_MCS_MESSAGES 897 #undef M 898 899 /* Time to wait before watchdog kicks off */ 900 #define OTX2_TX_TIMEOUT (100 * HZ) 901 902 #define RVU_PFVF_PF_SHIFT 10 903 #define RVU_PFVF_PF_MASK 0x3F 904 #define RVU_PFVF_FUNC_SHIFT 0 905 #define RVU_PFVF_FUNC_MASK 0x3FF 906 907 static inline bool is_otx2_vf(u16 pcifunc) 908 { 909 return !!(pcifunc & RVU_PFVF_FUNC_MASK); 910 } 911 912 static inline int rvu_get_pf(u16 pcifunc) 913 { 914 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; 915 } 916 917 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 918 struct page *page, 919 size_t offset, size_t size, 920 enum dma_data_direction dir) 921 { 922 dma_addr_t iova; 923 924 iova = dma_map_page_attrs(pfvf->dev, page, 925 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 926 if (unlikely(dma_mapping_error(pfvf->dev, iova))) 927 return (dma_addr_t)NULL; 928 return iova; 929 } 930 931 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 932 dma_addr_t addr, size_t size, 933 enum dma_data_direction dir) 934 { 935 dma_unmap_page_attrs(pfvf->dev, addr, size, 936 dir, DMA_ATTR_SKIP_CPU_SYNC); 937 } 938 939 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) 940 { 941 u16 smq; 942 int idx; 943 944 #ifdef CONFIG_DCB 945 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) 946 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; 947 #endif 948 /* check if qidx falls under QOS queues */ 949 if (qidx >= pfvf->hw.non_qos_queues) { 950 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; 951 } else { 952 idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ]; 953 smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx]; 954 } 955 956 return smq; 957 } 958 959 static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf) 960 { 961 return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues; 962 } 963 964 static inline u64 otx2_convert_rate(u64 rate) 965 { 966 u64 converted_rate; 967 968 /* Convert bytes per second to Mbps */ 969 converted_rate = rate * 8; 970 converted_rate = max_t(u64, converted_rate / 1000000, 1); 971 972 return converted_rate; 973 } 974 975 static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf) 976 { 977 /* return here if MCAM entries not allocated */ 978 if (!pfvf->flow_cfg) 979 return 0; 980 981 return pfvf->flow_cfg->nr_flows; 982 } 983 984 /* MSI-X APIs */ 985 void otx2_free_cints(struct otx2_nic *pfvf, int n); 986 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 987 int otx2_set_mac_address(struct net_device *netdev, void *p); 988 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 989 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 990 void otx2_get_mac_from_af(struct net_device *netdev); 991 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 992 int otx2_config_pause_frm(struct otx2_nic *pfvf); 993 void otx2_setup_segmentation(struct otx2_nic *pfvf); 994 int otx2_reset_mac_stats(struct otx2_nic *pfvf); 995 996 /* RVU block related APIs */ 997 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 998 int otx2_detach_resources(struct mbox *mbox); 999 int otx2_config_npa(struct otx2_nic *pfvf); 1000 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 1001 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 1002 void otx2_aura_pool_free(struct otx2_nic *pfvf); 1003 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 1004 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 1005 int otx2_config_nix(struct otx2_nic *pfvf); 1006 int otx2_config_nix_queues(struct otx2_nic *pfvf); 1007 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); 1008 int otx2_txsch_alloc(struct otx2_nic *pfvf); 1009 void otx2_txschq_stop(struct otx2_nic *pfvf); 1010 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); 1011 void otx2_free_pending_sqe(struct otx2_nic *pfvf); 1012 void otx2_sqb_flush(struct otx2_nic *pfvf); 1013 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 1014 dma_addr_t *dma, int qidx, int idx); 1015 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 1016 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 1017 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 1018 int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); 1019 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); 1020 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 1021 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); 1022 int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1023 int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1024 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 1025 dma_addr_t *dma); 1026 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 1027 int stack_pages, int numptrs, int buf_size, int type); 1028 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 1029 int pool_id, int numptrs); 1030 int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf); 1031 void otx2_free_queue_mem(struct otx2_qset *qset); 1032 int otx2_alloc_queue_mem(struct otx2_nic *pf); 1033 int otx2_init_hw_resources(struct otx2_nic *pfvf); 1034 void otx2_free_hw_resources(struct otx2_nic *pf); 1035 int otx2_wq_init(struct otx2_nic *pf); 1036 int otx2_check_pf_usable(struct otx2_nic *pf); 1037 int otx2_pfaf_mbox_init(struct otx2_nic *pf); 1038 int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af); 1039 int otx2_realloc_msix_vectors(struct otx2_nic *pf); 1040 void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); 1041 void otx2_disable_mbox_intr(struct otx2_nic *pf); 1042 void otx2_disable_napi(struct otx2_nic *pf); 1043 irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); 1044 int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); 1045 int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); 1046 1047 /* RSS configuration APIs*/ 1048 int otx2_rss_init(struct otx2_nic *pfvf); 1049 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 1050 void otx2_set_rss_key(struct otx2_nic *pfvf); 1051 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id); 1052 1053 /* Mbox handlers */ 1054 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 1055 struct msix_offset_rsp *rsp); 1056 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 1057 struct npa_lf_alloc_rsp *rsp); 1058 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 1059 struct nix_lf_alloc_rsp *rsp); 1060 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 1061 struct nix_txsch_alloc_rsp *rsp); 1062 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 1063 struct cgx_stats_rsp *rsp); 1064 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, 1065 struct cgx_fec_stats_rsp *rsp); 1066 void otx2_set_fec_stats_count(struct otx2_nic *pfvf); 1067 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 1068 struct nix_bp_cfg_rsp *rsp); 1069 1070 /* Device stats APIs */ 1071 void otx2_get_dev_stats(struct otx2_nic *pfvf); 1072 void otx2_get_stats64(struct net_device *netdev, 1073 struct rtnl_link_stats64 *stats); 1074 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 1075 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf); 1076 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 1077 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 1078 void otx2_set_ethtool_ops(struct net_device *netdev); 1079 void otx2vf_set_ethtool_ops(struct net_device *netdev); 1080 1081 int otx2_open(struct net_device *netdev); 1082 int otx2_stop(struct net_device *netdev); 1083 int otx2_set_real_num_queues(struct net_device *netdev, 1084 int tx_queues, int rx_queues); 1085 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); 1086 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); 1087 1088 /* MCAM filter related APIs */ 1089 int otx2_mcam_flow_init(struct otx2_nic *pf); 1090 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); 1091 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); 1092 void otx2_mcam_flow_del(struct otx2_nic *pf); 1093 int otx2_destroy_ntuple_flows(struct otx2_nic *pf); 1094 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); 1095 int otx2_get_flow(struct otx2_nic *pfvf, 1096 struct ethtool_rxnfc *nfc, u32 location); 1097 int otx2_get_all_flows(struct otx2_nic *pfvf, 1098 struct ethtool_rxnfc *nfc, u32 *rule_locs); 1099 int otx2_add_flow(struct otx2_nic *pfvf, 1100 struct ethtool_rxnfc *nfc); 1101 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); 1102 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); 1103 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); 1104 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); 1105 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); 1106 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); 1107 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); 1108 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, 1109 u64 iova, int len, u16 qidx, u16 flags); 1110 u16 otx2_get_max_mtu(struct otx2_nic *pfvf); 1111 int otx2_handle_ntuple_tc_features(struct net_device *netdev, 1112 netdev_features_t features); 1113 int otx2_smq_flush(struct otx2_nic *pfvf, int smq); 1114 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, 1115 u64 iova, int size); 1116 int otx2_mcam_entry_init(struct otx2_nic *pfvf); 1117 1118 /* tc support */ 1119 int otx2_init_tc(struct otx2_nic *nic); 1120 void otx2_shutdown_tc(struct otx2_nic *nic); 1121 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1122 void *type_data); 1123 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic); 1124 1125 /* CGX/RPM DMAC filters support */ 1126 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 1127 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1128 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1129 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); 1130 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 1131 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 1132 1133 #ifdef CONFIG_DCB 1134 /* DCB support*/ 1135 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable); 1136 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf); 1137 int otx2_dcbnl_set_ops(struct net_device *dev); 1138 /* PFC support */ 1139 int otx2_pfc_txschq_config(struct otx2_nic *pfvf); 1140 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); 1141 int otx2_pfc_txschq_update(struct otx2_nic *pfvf); 1142 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); 1143 #endif 1144 1145 #if IS_ENABLED(CONFIG_MACSEC) 1146 /* MACSEC offload support */ 1147 int cn10k_mcs_init(struct otx2_nic *pfvf); 1148 void cn10k_mcs_free(struct otx2_nic *pfvf); 1149 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event); 1150 #else 1151 static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; } 1152 static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {} 1153 static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf, 1154 struct mcs_intr_info *event) 1155 {} 1156 #endif /* CONFIG_MACSEC */ 1157 1158 /* qos support */ 1159 static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs) 1160 { 1161 struct otx2_hw *hw = &pfvf->hw; 1162 1163 hw->tc_tx_queues = qos_txqs; 1164 INIT_LIST_HEAD(&pfvf->qos.qos_tree); 1165 mutex_init(&pfvf->qos.qos_lock); 1166 } 1167 1168 static inline void otx2_shutdown_qos(struct otx2_nic *pfvf) 1169 { 1170 mutex_destroy(&pfvf->qos.qos_lock); 1171 } 1172 1173 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, 1174 struct net_device *sb_dev); 1175 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); 1176 void otx2_qos_config_txschq(struct otx2_nic *pfvf); 1177 void otx2_clean_qos_queues(struct otx2_nic *pfvf); 1178 int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); 1179 int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1180 struct flow_cls_offload *cls_flower); 1181 1182 static inline int mcam_entry_cmp(const void *a, const void *b) 1183 { 1184 return *(u16 *)a - *(u16 *)b; 1185 } 1186 1187 dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, 1188 struct sk_buff *skb, int seg, int *len); 1189 void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); 1190 int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); 1191 #endif /* OTX2_COMMON_H */ 1192