1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #ifndef OTX2_COMMON_H 9 #define OTX2_COMMON_H 10 11 #include <linux/ethtool.h> 12 #include <linux/pci.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/ptp_clock_kernel.h> 16 #include <linux/timecounter.h> 17 #include <linux/soc/marvell/silicons.h> 18 #include <linux/soc/marvell/octeontx2/asm.h> 19 #include <net/macsec.h> 20 #include <net/pkt_cls.h> 21 #include <net/devlink.h> 22 #include <linux/time64.h> 23 #include <linux/dim.h> 24 #include <uapi/linux/if_macsec.h> 25 #include <net/page_pool/helpers.h> 26 27 #include <mbox.h> 28 #include <npc.h> 29 #include "otx2_reg.h" 30 #include "otx2_txrx.h" 31 #include "otx2_devlink.h" 32 #include <rvu.h> 33 #include <rvu_trace.h> 34 #include "qos.h" 35 #include "rep.h" 36 #include "cn10k_ipsec.h" 37 #include "cn20k.h" 38 39 /* IPv4 flag more fragment bit */ 40 #define IPV4_FLAG_MORE 0x20 41 42 /* PCI device IDs */ 43 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063 44 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064 45 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 46 47 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 48 #define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 49 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 50 51 #define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 52 53 /* PCI BAR nos */ 54 #define PCI_CFG_REG_BAR_NUM 2 55 #define PCI_MBOX_BAR_NUM 4 56 57 #define NAME_SIZE 32 58 59 #ifdef CONFIG_DCB 60 /* Max priority supported for PFC */ 61 #define NIX_PF_PFC_PRIO_MAX 8 62 #endif 63 64 /* Number of segments per SG structure */ 65 #define MAX_SEGS_PER_SG 3 66 67 irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq); 68 irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq); 69 irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq); 70 irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq); 71 irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq); 72 73 enum arua_mapped_qtypes { 74 AURA_NIX_RQ, 75 AURA_NIX_SQ, 76 }; 77 78 /* NIX LF interrupts range*/ 79 #define NIX_LF_QINT_VEC_START 0x00 80 #define NIX_LF_CINT_VEC_START 0x40 81 #define NIX_LF_GINT_VEC 0x80 82 #define NIX_LF_ERR_VEC 0x81 83 #define NIX_LF_POISON_VEC 0x82 84 85 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ 86 #define SEND_CQ_SKID 2000 87 88 #define OTX2_GET_RX_STATS(reg) \ 89 otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) 90 #define OTX2_GET_TX_STATS(reg) \ 91 otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) 92 93 struct otx2_lmt_info { 94 u64 lmt_addr; 95 u16 lmt_id; 96 }; 97 98 struct otx2_rss_info { 99 u8 enable; 100 u32 flowkey_cfg; 101 u16 rss_size; 102 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ 103 u8 key[RSS_HASH_KEY_SIZE]; 104 u32 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; 105 }; 106 107 /* NIX (or NPC) RX errors */ 108 enum otx2_errlvl { 109 NPC_ERRLVL_RE, 110 NPC_ERRLVL_LID_LA, 111 NPC_ERRLVL_LID_LB, 112 NPC_ERRLVL_LID_LC, 113 NPC_ERRLVL_LID_LD, 114 NPC_ERRLVL_LID_LE, 115 NPC_ERRLVL_LID_LF, 116 NPC_ERRLVL_LID_LG, 117 NPC_ERRLVL_LID_LH, 118 NPC_ERRLVL_NIX = 0x0F, 119 }; 120 121 enum otx2_errcodes_re { 122 /* NPC_ERRLVL_RE errcodes */ 123 ERRCODE_FCS = 0x7, 124 ERRCODE_FCS_RCV = 0x8, 125 ERRCODE_UNDERSIZE = 0x10, 126 ERRCODE_OVERSIZE = 0x11, 127 ERRCODE_OL2_LEN_MISMATCH = 0x12, 128 /* NPC_ERRLVL_NIX errcodes */ 129 ERRCODE_OL3_LEN = 0x10, 130 ERRCODE_OL4_LEN = 0x11, 131 ERRCODE_OL4_CSUM = 0x12, 132 ERRCODE_IL3_LEN = 0x20, 133 ERRCODE_IL4_LEN = 0x21, 134 ERRCODE_IL4_CSUM = 0x22, 135 }; 136 137 enum otx2_xdp_action { 138 OTX2_XDP_TX = BIT(0), 139 OTX2_XDP_REDIRECT = BIT(1), 140 OTX2_AF_XDP_FRAME = BIT(2), 141 }; 142 143 struct otx2_dev_stats { 144 u64 rx_bytes; 145 u64 rx_frames; 146 u64 rx_ucast_frames; 147 u64 rx_bcast_frames; 148 u64 rx_mcast_frames; 149 u64 rx_drops; 150 151 u64 tx_bytes; 152 u64 tx_frames; 153 u64 tx_ucast_frames; 154 u64 tx_bcast_frames; 155 u64 tx_mcast_frames; 156 u64 tx_drops; 157 atomic_long_t tx_discards; 158 }; 159 160 /* Driver counted stats */ 161 struct otx2_drv_stats { 162 atomic_t rx_fcs_errs; 163 atomic_t rx_oversize_errs; 164 atomic_t rx_undersize_errs; 165 atomic_t rx_csum_errs; 166 atomic_t rx_len_errs; 167 atomic_t rx_other_errs; 168 }; 169 170 struct mbox { 171 struct otx2_mbox mbox; 172 struct work_struct mbox_wrk; 173 struct otx2_mbox mbox_up; 174 struct work_struct mbox_up_wrk; 175 struct otx2_nic *pfvf; 176 void *bbuf_base; /* Bounce buffer for mbox memory */ 177 struct mutex lock; /* serialize mailbox access */ 178 int num_msgs; /* mbox number of messages */ 179 int up_num_msgs; /* mbox_up number of messages */ 180 }; 181 182 /* Egress rate limiting definitions */ 183 #define MAX_BURST_EXPONENT 0x0FULL 184 #define MAX_BURST_MANTISSA 0xFFULL 185 #define MAX_BURST_SIZE 130816ULL 186 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 187 #define MAX_RATE_EXPONENT 0x0FULL 188 #define MAX_RATE_MANTISSA 0xFFULL 189 190 /* Bitfields in NIX_TLX_PIR register */ 191 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 192 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 193 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 194 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 195 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 196 197 struct otx2_hw { 198 struct pci_dev *pdev; 199 struct otx2_rss_info rss_info; 200 u16 rx_queues; 201 u16 tx_queues; 202 u16 xdp_queues; 203 u16 tc_tx_queues; 204 u16 non_qos_queues; /* tx queues plus xdp queues */ 205 u16 max_queues; 206 u16 pool_cnt; 207 u16 rqpool_cnt; 208 u16 sqpool_cnt; 209 210 #define OTX2_DEFAULT_RBUF_LEN 2048 211 u16 rbuf_len; 212 u32 xqe_size; 213 214 /* NPA */ 215 u32 stack_pg_ptrs; /* No of ptrs per stack page */ 216 u32 stack_pg_bytes; /* Size of stack page */ 217 u16 sqb_size; 218 219 /* NIX */ 220 u8 txschq_link_cfg_lvl; 221 u8 txschq_cnt[NIX_TXSCH_LVL_CNT]; 222 u8 txschq_aggr_lvl_rr_prio; 223 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 224 u16 matchall_ipolicer; 225 u32 dwrr_mtu; 226 u32 max_mtu; 227 u8 smq_link_type; 228 229 /* HW settings, coalescing etc */ 230 u16 rx_chan_base; 231 u16 tx_chan_base; 232 u8 rx_chan_cnt; 233 u8 tx_chan_cnt; 234 u16 cq_qcount_wait; 235 u16 cq_ecount_wait; 236 u16 rq_skid; 237 u8 cq_time_wait; 238 239 /* Segmentation */ 240 u8 lso_tsov4_idx; 241 u8 lso_tsov6_idx; 242 u8 lso_udpv4_idx; 243 u8 lso_udpv6_idx; 244 245 /* RSS */ 246 u8 flowkey_alg_idx; 247 248 /* MSI-X */ 249 u8 cint_cnt; /* CQ interrupt count */ 250 u16 npa_msixoff; /* Offset of NPA vectors */ 251 u16 nix_msixoff; /* Offset of NIX vectors */ 252 char *irq_name; 253 cpumask_var_t *affinity_mask; 254 struct pf_irq_data *pfvf_irq_devid[4]; 255 256 /* Stats */ 257 struct otx2_dev_stats dev_stats; 258 struct otx2_drv_stats drv_stats; 259 u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; 260 u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; 261 u64 cgx_fec_corr_blks; 262 u64 cgx_fec_uncorr_blks; 263 u8 cgx_links; /* No. of CGX links present in HW */ 264 u8 lbk_links; /* No. of LBK links present in HW */ 265 u8 tx_link; /* Transmit channel link number */ 266 #define HW_TSO 0 267 #define CN10K_MBOX 1 268 #define CN10K_LMTST 2 269 #define CN10K_RPM 3 270 #define CN10K_PTP_ONESTEP 4 271 #define CN10K_HW_MACSEC 5 272 #define QOS_CIR_PIR_SUPPORT 6 273 unsigned long cap_flag; 274 275 #define LMT_LINE_SIZE 128 276 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ 277 u64 *lmt_base; 278 struct otx2_lmt_info __percpu *lmt_info; 279 }; 280 281 enum vfperm { 282 OTX2_RESET_VF_PERM, 283 OTX2_TRUSTED_VF, 284 }; 285 286 struct otx2_vf_config { 287 struct otx2_nic *pf; 288 struct delayed_work link_event_work; 289 bool intf_down; /* interface was either configured or not */ 290 u8 mac[ETH_ALEN]; 291 u16 vlan; 292 int tx_vtag_idx; 293 bool trusted; 294 }; 295 296 struct flr_work { 297 struct work_struct work; 298 struct otx2_nic *pf; 299 }; 300 301 struct refill_work { 302 struct delayed_work pool_refill_work; 303 struct otx2_nic *pf; 304 struct napi_struct *napi; 305 }; 306 307 /* PTPv2 originTimestamp structure */ 308 struct ptpv2_tstamp { 309 __be16 seconds_msb; /* 16 bits + */ 310 __be32 seconds_lsb; /* 32 bits = 48 bits*/ 311 __be32 nanoseconds; 312 } __packed; 313 314 struct otx2_ptp { 315 struct ptp_clock_info ptp_info; 316 struct ptp_clock *ptp_clock; 317 struct otx2_nic *nic; 318 319 struct cyclecounter cycle_counter; 320 struct timecounter time_counter; 321 322 struct delayed_work extts_work; 323 u64 last_extts; 324 u64 thresh; 325 326 struct ptp_pin_desc extts_config; 327 u64 (*convert_rx_ptp_tstmp)(u64 timestamp); 328 u64 (*convert_tx_ptp_tstmp)(u64 timestamp); 329 u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp); 330 struct delayed_work synctstamp_work; 331 u64 tstamp; 332 u32 base_ns; 333 }; 334 335 #define OTX2_HW_TIMESTAMP_LEN 8 336 337 struct otx2_mac_table { 338 u8 addr[ETH_ALEN]; 339 u16 mcam_entry; 340 bool inuse; 341 }; 342 343 struct otx2_flow_config { 344 u16 *flow_ent; 345 u16 *def_ent; 346 u16 nr_flows; 347 #define OTX2_DEFAULT_FLOWCOUNT 16 348 #define OTX2_DEFAULT_UNICAST_FLOWS 4 349 #define OTX2_MAX_VLAN_FLOWS 1 350 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT 351 u16 unicast_offset; 352 u16 rx_vlan_offset; 353 u16 vf_vlan_offset; 354 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ 355 #define OTX2_VF_VLAN_RX_INDEX 0 356 #define OTX2_VF_VLAN_TX_INDEX 1 357 u32 *bmap_to_dmacindex; 358 unsigned long *dmacflt_bmap; 359 struct list_head flow_list; 360 u32 dmacflt_max_flows; 361 u16 max_flows; 362 refcount_t mark_flows; 363 struct list_head flow_list_tc; 364 u8 ucast_flt_cnt; 365 bool ntuple; 366 u16 ntuple_cnt; 367 }; 368 369 struct dev_hw_ops { 370 int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, 371 u16 sqb_aura); 372 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, 373 int size, int qidx); 374 int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); 375 void (*aura_freeptr)(void *dev, int aura, u64 buf); 376 irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq); 377 irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq); 378 irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq); 379 int (*aura_aq_init)(struct otx2_nic *pfvf, int aura_id, 380 int pool_id, int numptrs); 381 int (*pool_aq_init)(struct otx2_nic *pfvf, u16 pool_id, 382 int stack_pages, int numptrs, int buf_size, 383 int type); 384 }; 385 386 #define CN10K_MCS_SA_PER_SC 4 387 388 /* Stats which need to be accumulated in software because 389 * of shared counters in hardware. 390 */ 391 struct cn10k_txsc_stats { 392 u64 InPktsUntagged; 393 u64 InPktsNoTag; 394 u64 InPktsBadTag; 395 u64 InPktsUnknownSCI; 396 u64 InPktsNoSCI; 397 u64 InPktsOverrun; 398 }; 399 400 struct cn10k_rxsc_stats { 401 u64 InOctetsValidated; 402 u64 InOctetsDecrypted; 403 u64 InPktsUnchecked; 404 u64 InPktsDelayed; 405 u64 InPktsOK; 406 u64 InPktsInvalid; 407 u64 InPktsLate; 408 u64 InPktsNotValid; 409 u64 InPktsNotUsingSA; 410 u64 InPktsUnusedSA; 411 }; 412 413 struct cn10k_mcs_txsc { 414 struct macsec_secy *sw_secy; 415 struct cn10k_txsc_stats stats; 416 struct list_head entry; 417 enum macsec_validation_type last_validate_frames; 418 bool last_replay_protect; 419 u16 hw_secy_id_tx; 420 u16 hw_secy_id_rx; 421 u16 hw_flow_id; 422 u16 hw_sc_id; 423 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 424 u8 sa_bmap; 425 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 426 u8 encoding_sa; 427 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 428 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 429 bool vlan_dev; /* macsec running on VLAN ? */ 430 }; 431 432 struct cn10k_mcs_rxsc { 433 struct macsec_secy *sw_secy; 434 struct macsec_rx_sc *sw_rxsc; 435 struct cn10k_rxsc_stats stats; 436 struct list_head entry; 437 u16 hw_flow_id; 438 u16 hw_sc_id; 439 u16 hw_sa_id[CN10K_MCS_SA_PER_SC]; 440 u8 sa_bmap; 441 u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN]; 442 u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN]; 443 ssci_t ssci[CN10K_MCS_SA_PER_SC]; 444 }; 445 446 struct cn10k_mcs_cfg { 447 struct list_head txsc_list; 448 struct list_head rxsc_list; 449 }; 450 451 struct pf_irq_data { 452 u64 intr_status; 453 void (*pf_queue_work_hdlr)(struct mbox *mb, struct workqueue_struct *mw, 454 int first, int mdevs, u64 intr); 455 struct otx2_nic *pf; 456 int vec_num; 457 int start; 458 int mdevs; 459 }; 460 461 struct otx2_nic { 462 void __iomem *reg_base; 463 struct net_device *netdev; 464 struct dev_hw_ops *hw_ops; 465 void *iommu_domain; 466 u16 tx_max_pktlen; 467 u16 rbsize; /* Receive buffer size */ 468 469 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) 470 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) 471 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) 472 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) 473 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) 474 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) 475 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) 476 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7) 477 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) 478 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) 479 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) 480 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) 481 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) 482 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) 483 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) 484 #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) 485 #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) 486 #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) 487 #define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) 488 #define OTX2_FLAG_PORT_UP BIT_ULL(19) 489 #define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) 490 u64 flags; 491 u64 *cq_op_addr; 492 493 struct bpf_prog *xdp_prog; 494 struct otx2_qset qset; 495 struct otx2_hw hw; 496 struct pci_dev *pdev; 497 struct device *dev; 498 499 /* Mbox */ 500 struct mbox mbox; 501 struct mbox *mbox_pfvf; 502 struct workqueue_struct *mbox_wq; 503 struct workqueue_struct *mbox_pfvf_wq; 504 struct qmem *pfvf_mbox_addr; 505 506 u8 total_vfs; 507 u16 pcifunc; /* RVU PF_FUNC */ 508 u16 bpid[NIX_MAX_BPID_CHAN]; 509 struct otx2_vf_config *vf_configs; 510 struct cgx_link_user_info linfo; 511 512 /* NPC MCAM */ 513 struct otx2_flow_config *flow_cfg; 514 struct otx2_mac_table *mac_table; 515 516 u64 reset_count; 517 struct work_struct reset_task; 518 struct workqueue_struct *flr_wq; 519 struct flr_work *flr_wrk; 520 struct refill_work *refill_wrk; 521 struct workqueue_struct *otx2_wq; 522 struct work_struct rx_mode_work; 523 524 /* Ethtool stuff */ 525 u32 msg_enable; 526 527 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ 528 int nix_blkaddr; 529 /* LMTST Lines info */ 530 struct qmem *dync_lmt; 531 u16 tot_lmt_lines; 532 u16 npa_lmt_lines; 533 u32 nix_lmt_size; 534 535 struct otx2_ptp *ptp; 536 struct kernel_hwtstamp_config tstamp; 537 538 unsigned long rq_bmap; 539 540 /* Devlink */ 541 struct otx2_devlink *dl; 542 /* PFC */ 543 u8 pfc_en; 544 #ifdef CONFIG_DCB 545 u8 *queue_to_pfc_map; 546 u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; 547 bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; 548 #endif 549 /* qos */ 550 struct otx2_qos qos; 551 552 /* napi event count. It is needed for adaptive irq coalescing. */ 553 u32 napi_events; 554 555 #if IS_ENABLED(CONFIG_MACSEC) 556 struct cn10k_mcs_cfg *macsec_cfg; 557 #endif 558 559 #if IS_ENABLED(CONFIG_RVU_ESWITCH) 560 struct rep_dev **reps; 561 int rep_cnt; 562 u16 rep_pf_map[RVU_MAX_REP]; 563 u16 esw_mode; 564 #endif 565 566 /* Inline ipsec */ 567 struct cn10k_ipsec ipsec; 568 /* af_xdp zero-copy */ 569 unsigned long *af_xdp_zc_qidx; 570 }; 571 572 static inline bool is_otx2_lbkvf(struct pci_dev *pdev) 573 { 574 return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) || 575 (pdev->device == PCI_DEVID_RVU_REP); 576 } 577 578 static inline bool is_96xx_A0(struct pci_dev *pdev) 579 { 580 return (pdev->revision == 0x00) && 581 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 582 } 583 584 static inline bool is_96xx_B0(struct pci_dev *pdev) 585 { 586 return (pdev->revision == 0x01) && 587 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); 588 } 589 590 static inline bool is_otx2_sdp_rep(struct pci_dev *pdev) 591 { 592 return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP; 593 } 594 595 /* REVID for PCIe devices. 596 * Bits 0..1: minor pass, bit 3..2: major pass 597 * bits 7..4: midr id 598 */ 599 #define PCI_REVISION_ID_96XX 0x00 600 #define PCI_REVISION_ID_95XX 0x10 601 #define PCI_REVISION_ID_95XXN 0x20 602 #define PCI_REVISION_ID_98XX 0x30 603 #define PCI_REVISION_ID_95XXMM 0x40 604 #define PCI_REVISION_ID_95XXO 0xE0 605 606 static inline bool is_dev_otx2(struct pci_dev *pdev) 607 { 608 u8 midr = pdev->revision & 0xF0; 609 610 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || 611 midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || 612 midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); 613 } 614 615 static inline bool is_dev_cn10kb(struct pci_dev *pdev) 616 { 617 return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; 618 } 619 620 static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) 621 { 622 if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && 623 (pdev->revision & 0xFF) == 0x54) 624 return true; 625 626 return false; 627 } 628 629 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) 630 { 631 struct otx2_hw *hw = &pfvf->hw; 632 633 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; 634 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; 635 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; 636 637 __set_bit(HW_TSO, &hw->cap_flag); 638 639 if (is_96xx_A0(pfvf->pdev)) { 640 __clear_bit(HW_TSO, &hw->cap_flag); 641 642 /* Time based irq coalescing is not supported */ 643 pfvf->hw.cq_qcount_wait = 0x0; 644 645 /* Due to HW issue previous silicons required minimum 646 * 600 unused CQE to avoid CQ overflow. 647 */ 648 pfvf->hw.rq_skid = 600; 649 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); 650 } 651 if (is_96xx_B0(pfvf->pdev)) 652 __clear_bit(HW_TSO, &hw->cap_flag); 653 654 if (!is_dev_otx2(pfvf->pdev)) { 655 __set_bit(CN10K_MBOX, &hw->cap_flag); 656 __set_bit(CN10K_LMTST, &hw->cap_flag); 657 __set_bit(CN10K_RPM, &hw->cap_flag); 658 __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); 659 __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); 660 } 661 } 662 663 /* Register read/write APIs */ 664 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) 665 { 666 u64 blkaddr; 667 668 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) { 669 case BLKTYPE_NIX: 670 blkaddr = nic->nix_blkaddr; 671 break; 672 case BLKTYPE_NPA: 673 blkaddr = BLKADDR_NPA; 674 break; 675 case BLKTYPE_CPT: 676 blkaddr = BLKADDR_CPT0; 677 break; 678 default: 679 blkaddr = BLKADDR_RVUM; 680 break; 681 } 682 683 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT); 684 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT); 685 686 return nic->reg_base + offset; 687 } 688 689 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val) 690 { 691 void __iomem *addr = otx2_get_regaddr(nic, offset); 692 693 writeq(val, addr); 694 } 695 696 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset) 697 { 698 void __iomem *addr = otx2_get_regaddr(nic, offset); 699 700 return readq(addr); 701 } 702 703 /* Mbox bounce buffer APIs */ 704 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev) 705 { 706 struct otx2_mbox *otx2_mbox; 707 struct otx2_mbox_dev *mdev; 708 709 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); 710 if (!mbox->bbuf_base) 711 return -ENOMEM; 712 713 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF 714 * prepare all mbox messages in bounce buffer instead of directly 715 * in hw mbox memory. 716 */ 717 otx2_mbox = &mbox->mbox; 718 mdev = &otx2_mbox->dev[0]; 719 mdev->mbase = mbox->bbuf_base; 720 721 otx2_mbox = &mbox->mbox_up; 722 mdev = &otx2_mbox->dev[0]; 723 mdev->mbase = mbox->bbuf_base; 724 return 0; 725 } 726 727 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid) 728 { 729 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 730 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); 731 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; 732 struct mbox_hdr *hdr; 733 u64 msg_size; 734 735 if (mdev->mbase == hw_mbase) 736 return; 737 738 hdr = hw_mbase + mbox->rx_start; 739 msg_size = hdr->msg_size; 740 741 if (msg_size > mbox->rx_size - msgs_offset) 742 msg_size = mbox->rx_size - msgs_offset; 743 744 /* Copy mbox messages from mbox memory to bounce buffer */ 745 memcpy(mdev->mbase + mbox->rx_start, 746 hw_mbase + mbox->rx_start, msg_size + msgs_offset); 747 } 748 749 /* With the absence of API for 128-bit IO memory access for arm64, 750 * implement required operations at place. 751 */ 752 #if defined(CONFIG_ARM64) 753 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) 754 { 755 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!" 756 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); 757 } 758 759 static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) 760 { 761 u64 __iomem *ptr = addr; 762 u64 result; 763 764 __asm__ volatile(".cpu generic+lse\n" 765 "ldadd %x[i], %x[r], [%[b]]" 766 : [r]"=r"(result), "+m"(*ptr) 767 : [i]"r"(incr), [b]"r"(ptr) 768 : "memory"); 769 return result; 770 } 771 772 #else 773 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) 774 775 static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) 776 { 777 return 0; 778 } 779 #endif 780 781 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, 782 u64 *ptrs, u64 num_ptrs) 783 { 784 struct otx2_lmt_info *lmt_info; 785 u64 size = 0, count_eot = 0; 786 u64 tar_addr, val = 0; 787 788 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); 789 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0); 790 /* LMTID is same as AURA Id */ 791 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63); 792 /* Set if [127:64] of last 128bit word has a valid pointer */ 793 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL; 794 /* Set AURA ID to free pointer */ 795 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF); 796 /* Target address for LMTST flush tells HW how many 128bit 797 * words are valid from NPA_LF_AURA_BATCH_FREE0. 798 * 799 * tar_addr[6:4] is LMTST size-1 in units of 128b. 800 */ 801 if (num_ptrs > 2) { 802 size = (sizeof(u64) * num_ptrs) / 16; 803 if (!count_eot) 804 size++; 805 tar_addr |= ((size - 1) & 0x7) << 4; 806 } 807 dma_wmb(); 808 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); 809 /* Perform LMTST flush */ 810 cn10k_lmt_flush(val, tar_addr); 811 } 812 813 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) 814 { 815 struct otx2_nic *pfvf = dev; 816 u64 ptrs[2]; 817 818 ptrs[1] = buf; 819 get_cpu(); 820 /* Free only one buffer at time during init and teardown */ 821 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 822 put_cpu(); 823 } 824 825 /* Alloc pointer from pool/aura */ 826 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) 827 { 828 void __iomem *ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); 829 u64 incr = (u64)aura | BIT_ULL(63); 830 831 return otx2_atomic64_add(incr, ptr); 832 } 833 834 /* Free pointer to a pool/aura */ 835 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf) 836 { 837 struct otx2_nic *pfvf = dev; 838 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0); 839 840 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr); 841 } 842 843 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx) 844 { 845 if (type == AURA_NIX_SQ) 846 return pfvf->hw.rqpool_cnt + idx; 847 848 /* AURA_NIX_RQ */ 849 return idx; 850 } 851 852 /* Mbox APIs */ 853 static inline int otx2_sync_mbox_msg(struct mbox *mbox) 854 { 855 int err; 856 857 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 858 return 0; 859 otx2_mbox_msg_send(&mbox->mbox, 0); 860 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); 861 if (err) 862 return err; 863 864 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 865 } 866 867 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) 868 { 869 int err; 870 871 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) 872 return 0; 873 otx2_mbox_msg_send_up(&mbox->mbox_up, devid); 874 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); 875 if (err) 876 return err; 877 878 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); 879 } 880 881 /* Use this API to send mbox msgs in atomic context 882 * where sleeping is not allowed 883 */ 884 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox) 885 { 886 int err; 887 888 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) 889 return 0; 890 otx2_mbox_msg_send(&mbox->mbox, 0); 891 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); 892 if (err) 893 return err; 894 895 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); 896 } 897 898 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 899 static struct _req_type __maybe_unused \ 900 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ 901 { \ 902 struct _req_type *req; \ 903 u16 pcifunc = mbox->pfvf->pcifunc; \ 904 \ 905 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 906 &mbox->mbox, 0, sizeof(struct _req_type), \ 907 sizeof(struct _rsp_type)); \ 908 if (!req) \ 909 return NULL; \ 910 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 911 req->hdr.id = _id; \ 912 req->hdr.pcifunc = pcifunc; \ 913 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \ 914 return req; \ 915 } 916 917 MBOX_MESSAGES 918 #undef M 919 920 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 921 int \ 922 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ 923 struct _req_type *req, \ 924 struct _rsp_type *rsp); \ 925 926 MBOX_UP_CGX_MESSAGES 927 MBOX_UP_MCS_MESSAGES 928 #undef M 929 930 /* Time to wait before watchdog kicks off */ 931 #define OTX2_TX_TIMEOUT (100 * HZ) 932 933 static inline bool is_otx2_vf(u16 pcifunc) 934 { 935 return !!(pcifunc & RVU_PFVF_FUNC_MASK); 936 } 937 938 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, 939 struct page *page, 940 size_t offset, size_t size, 941 enum dma_data_direction dir) 942 { 943 dma_addr_t iova; 944 945 iova = dma_map_page_attrs(pfvf->dev, page, 946 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC); 947 if (unlikely(dma_mapping_error(pfvf->dev, iova))) 948 return (dma_addr_t)NULL; 949 return iova; 950 } 951 952 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, 953 dma_addr_t addr, size_t size, 954 enum dma_data_direction dir) 955 { 956 dma_unmap_page_attrs(pfvf->dev, addr, size, 957 dir, DMA_ATTR_SKIP_CPU_SYNC); 958 } 959 960 static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) 961 { 962 u16 smq; 963 int idx; 964 965 #ifdef CONFIG_DCB 966 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) 967 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; 968 #endif 969 /* check if qidx falls under QOS queues */ 970 if (qidx >= pfvf->hw.non_qos_queues) { 971 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; 972 } else { 973 idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ]; 974 smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx]; 975 } 976 977 return smq; 978 } 979 980 static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf) 981 { 982 return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues; 983 } 984 985 static inline u64 otx2_convert_rate(u64 rate) 986 { 987 u64 converted_rate; 988 989 /* Convert bytes per second to Mbps */ 990 converted_rate = rate * 8; 991 converted_rate = max_t(u64, converted_rate / 1000000, 1); 992 993 return converted_rate; 994 } 995 996 static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf) 997 { 998 /* return here if MCAM entries not allocated */ 999 if (!pfvf->flow_cfg) 1000 return 0; 1001 1002 return pfvf->flow_cfg->nr_flows; 1003 } 1004 1005 /* MSI-X APIs */ 1006 void otx2_free_cints(struct otx2_nic *pfvf, int n); 1007 void otx2_set_cints_affinity(struct otx2_nic *pfvf); 1008 int otx2_set_mac_address(struct net_device *netdev, void *p); 1009 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu); 1010 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq); 1011 void otx2_get_mac_from_af(struct net_device *netdev); 1012 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); 1013 int otx2_config_pause_frm(struct otx2_nic *pfvf); 1014 void otx2_setup_segmentation(struct otx2_nic *pfvf); 1015 int otx2_reset_mac_stats(struct otx2_nic *pfvf); 1016 1017 /* RVU block related APIs */ 1018 int otx2_attach_npa_nix(struct otx2_nic *pfvf); 1019 int otx2_detach_resources(struct mbox *mbox); 1020 int otx2_config_npa(struct otx2_nic *pfvf); 1021 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf); 1022 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf); 1023 void otx2_aura_pool_free(struct otx2_nic *pfvf); 1024 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type); 1025 void otx2_sq_free_sqbs(struct otx2_nic *pfvf); 1026 int otx2_config_nix(struct otx2_nic *pfvf); 1027 int otx2_config_nix_queues(struct otx2_nic *pfvf); 1028 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en); 1029 int otx2_txsch_alloc(struct otx2_nic *pfvf); 1030 void otx2_txschq_stop(struct otx2_nic *pfvf); 1031 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); 1032 void otx2_free_pending_sqe(struct otx2_nic *pfvf); 1033 void otx2_sqb_flush(struct otx2_nic *pfvf); 1034 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, 1035 dma_addr_t *dma, int qidx, int idx); 1036 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); 1037 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); 1038 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); 1039 int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); 1040 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); 1041 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); 1042 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); 1043 int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1044 int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); 1045 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, 1046 dma_addr_t *dma); 1047 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, 1048 int stack_pages, int numptrs, int buf_size, int type); 1049 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, 1050 int pool_id, int numptrs); 1051 int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf); 1052 void otx2_free_queue_mem(struct otx2_qset *qset); 1053 int otx2_alloc_queue_mem(struct otx2_nic *pf); 1054 int otx2_init_hw_resources(struct otx2_nic *pfvf); 1055 void otx2_free_hw_resources(struct otx2_nic *pf); 1056 int otx2_wq_init(struct otx2_nic *pf); 1057 int otx2_check_pf_usable(struct otx2_nic *pf); 1058 int otx2_pfaf_mbox_init(struct otx2_nic *pf); 1059 int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af); 1060 int otx2_realloc_msix_vectors(struct otx2_nic *pf); 1061 void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); 1062 void otx2_disable_mbox_intr(struct otx2_nic *pf); 1063 void otx2_disable_napi(struct otx2_nic *pf); 1064 irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); 1065 int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); 1066 int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); 1067 int otx2_set_hw_capabilities(struct otx2_nic *pfvf); 1068 int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, 1069 int pool_id, int numptrs); 1070 int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, 1071 int stack_pages, int numptrs, int buf_size, int type); 1072 1073 /* RSS configuration APIs*/ 1074 int otx2_rss_init(struct otx2_nic *pfvf); 1075 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); 1076 void otx2_set_rss_key(struct otx2_nic *pfvf); 1077 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl); 1078 1079 /* Mbox handlers */ 1080 void mbox_handler_msix_offset(struct otx2_nic *pfvf, 1081 struct msix_offset_rsp *rsp); 1082 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, 1083 struct npa_lf_alloc_rsp *rsp); 1084 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, 1085 struct nix_lf_alloc_rsp *rsp); 1086 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, 1087 struct nix_txsch_alloc_rsp *rsp); 1088 void mbox_handler_cgx_stats(struct otx2_nic *pfvf, 1089 struct cgx_stats_rsp *rsp); 1090 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, 1091 struct cgx_fec_stats_rsp *rsp); 1092 void otx2_set_fec_stats_count(struct otx2_nic *pfvf); 1093 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, 1094 struct nix_bp_cfg_rsp *rsp); 1095 1096 /* Device stats APIs */ 1097 void otx2_get_dev_stats(struct otx2_nic *pfvf); 1098 void otx2_get_stats64(struct net_device *netdev, 1099 struct rtnl_link_stats64 *stats); 1100 void otx2_update_lmac_stats(struct otx2_nic *pfvf); 1101 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf); 1102 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx); 1103 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx); 1104 void otx2_set_ethtool_ops(struct net_device *netdev); 1105 void otx2vf_set_ethtool_ops(struct net_device *netdev); 1106 1107 int otx2_open(struct net_device *netdev); 1108 int otx2_stop(struct net_device *netdev); 1109 int otx2_set_real_num_queues(struct net_device *netdev, 1110 int tx_queues, int rx_queues); 1111 int otx2_config_hwtstamp_get(struct net_device *netdev, 1112 struct kernel_hwtstamp_config *config); 1113 int otx2_config_hwtstamp_set(struct net_device *netdev, 1114 struct kernel_hwtstamp_config *config, 1115 struct netlink_ext_ack *extack); 1116 1117 /* MCAM filter related APIs */ 1118 int otx2_mcam_flow_init(struct otx2_nic *pf); 1119 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); 1120 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); 1121 void otx2_mcam_flow_del(struct otx2_nic *pf); 1122 int otx2_destroy_ntuple_flows(struct otx2_nic *pf); 1123 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); 1124 int otx2_get_flow(struct otx2_nic *pfvf, 1125 struct ethtool_rxnfc *nfc, u32 location); 1126 int otx2_get_all_flows(struct otx2_nic *pfvf, 1127 struct ethtool_rxnfc *nfc, u32 *rule_locs); 1128 int otx2_add_flow(struct otx2_nic *pfvf, 1129 struct ethtool_rxnfc *nfc); 1130 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); 1131 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); 1132 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); 1133 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); 1134 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); 1135 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); 1136 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); 1137 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, 1138 u64 iova, int len, u16 qidx, u16 flags); 1139 void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, 1140 u64 dma_addr, int len, int *offset, u16 flags); 1141 u16 otx2_get_max_mtu(struct otx2_nic *pfvf); 1142 int otx2_handle_ntuple_tc_features(struct net_device *netdev, 1143 netdev_features_t features); 1144 int otx2_smq_flush(struct otx2_nic *pfvf, int smq); 1145 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, 1146 u64 iova, int size); 1147 int otx2_mcam_entry_init(struct otx2_nic *pfvf); 1148 1149 /* tc support */ 1150 int otx2_init_tc(struct otx2_nic *nic); 1151 void otx2_shutdown_tc(struct otx2_nic *nic); 1152 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1153 void *type_data); 1154 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic); 1155 1156 /* CGX/RPM DMAC filters support */ 1157 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); 1158 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1159 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); 1160 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos); 1161 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); 1162 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); 1163 1164 #ifdef CONFIG_DCB 1165 /* DCB support*/ 1166 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable); 1167 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf); 1168 int otx2_dcbnl_set_ops(struct net_device *dev); 1169 /* PFC support */ 1170 int otx2_pfc_txschq_config(struct otx2_nic *pfvf); 1171 int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); 1172 int otx2_pfc_txschq_update(struct otx2_nic *pfvf); 1173 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); 1174 #endif 1175 1176 #if IS_ENABLED(CONFIG_MACSEC) 1177 /* MACSEC offload support */ 1178 int cn10k_mcs_init(struct otx2_nic *pfvf); 1179 void cn10k_mcs_free(struct otx2_nic *pfvf); 1180 void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event); 1181 #else 1182 static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; } 1183 static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {} 1184 static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf, 1185 struct mcs_intr_info *event) 1186 {} 1187 #endif /* CONFIG_MACSEC */ 1188 1189 /* qos support */ 1190 static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs) 1191 { 1192 struct otx2_hw *hw = &pfvf->hw; 1193 1194 hw->tc_tx_queues = qos_txqs; 1195 INIT_LIST_HEAD(&pfvf->qos.qos_tree); 1196 mutex_init(&pfvf->qos.qos_lock); 1197 } 1198 1199 static inline void otx2_shutdown_qos(struct otx2_nic *pfvf) 1200 { 1201 mutex_destroy(&pfvf->qos.qos_lock); 1202 } 1203 1204 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, 1205 struct net_device *sb_dev); 1206 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); 1207 void otx2_qos_config_txschq(struct otx2_nic *pfvf); 1208 void otx2_clean_qos_queues(struct otx2_nic *pfvf); 1209 int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); 1210 int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1211 struct flow_cls_offload *cls_flower); 1212 1213 static inline int mcam_entry_cmp(const void *a, const void *b) 1214 { 1215 return *(u16 *)a - *(u16 *)b; 1216 } 1217 1218 dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, 1219 struct sk_buff *skb, int seg, int *len); 1220 void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); 1221 int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); 1222 void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, 1223 int first, int mdevs, u64 intr); 1224 #endif /* OTX2_COMMON_H */ 1225