1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _MANA_H 5 #define _MANA_H 6 7 #include <net/xdp.h> 8 9 #include "gdma.h" 10 #include "hw_channel.h" 11 12 /* Microsoft Azure Network Adapter (MANA)'s definitions 13 * 14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of 15 * them are naturally aligned and hence don't need __packed. 16 */ 17 18 /* MANA protocol version */ 19 #define MANA_MAJOR_VERSION 0 20 #define MANA_MINOR_VERSION 1 21 #define MANA_MICRO_VERSION 1 22 23 typedef u64 mana_handle_t; 24 #define INVALID_MANA_HANDLE ((mana_handle_t)-1) 25 26 enum TRI_STATE { 27 TRI_STATE_UNKNOWN = -1, 28 TRI_STATE_FALSE = 0, 29 TRI_STATE_TRUE = 1 30 }; 31 32 /* Number of entries for hardware indirection table must be in power of 2 */ 33 #define MANA_INDIRECT_TABLE_MAX_SIZE 512 34 #define MANA_INDIRECT_TABLE_DEF_SIZE 64 35 36 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ 37 #define MANA_HASH_KEY_SIZE 40 38 39 #define COMP_ENTRY_SIZE 64 40 41 /* This Max value for RX buffers is derived from __alloc_page()'s max page 42 * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer 43 * size beyond this value gets rejected by __alloc_page() call. 44 */ 45 #define MAX_RX_BUFFERS_PER_QUEUE 8192 46 #define DEF_RX_BUFFERS_PER_QUEUE 1024 47 #define MIN_RX_BUFFERS_PER_QUEUE 128 48 49 /* This max value for TX buffers is derived as the maximum allocatable 50 * pages supported on host per guest through testing. TX buffer size beyond 51 * this value is rejected by the hardware. 52 */ 53 #define MAX_TX_BUFFERS_PER_QUEUE 16384 54 #define DEF_TX_BUFFERS_PER_QUEUE 256 55 #define MIN_TX_BUFFERS_PER_QUEUE 128 56 57 #define EQ_SIZE (8 * MANA_PAGE_SIZE) 58 59 #define LOG2_EQ_THROTTLE 3 60 61 #define MAX_PORTS_IN_MANA_DEV 256 62 63 /* Update this count whenever the respective structures are changed */ 64 #define MANA_STATS_RX_COUNT 5 65 #define MANA_STATS_TX_COUNT 11 66 67 struct mana_stats_rx { 68 u64 packets; 69 u64 bytes; 70 u64 xdp_drop; 71 u64 xdp_tx; 72 u64 xdp_redirect; 73 struct u64_stats_sync syncp; 74 }; 75 76 struct mana_stats_tx { 77 u64 packets; 78 u64 bytes; 79 u64 xdp_xmit; 80 u64 tso_packets; 81 u64 tso_bytes; 82 u64 tso_inner_packets; 83 u64 tso_inner_bytes; 84 u64 short_pkt_fmt; 85 u64 long_pkt_fmt; 86 u64 csum_partial; 87 u64 mana_map_err; 88 struct u64_stats_sync syncp; 89 }; 90 91 struct mana_txq { 92 struct gdma_queue *gdma_sq; 93 94 union { 95 u32 gdma_txq_id; 96 struct { 97 u32 reserved1 : 10; 98 u32 vsq_frame : 14; 99 u32 reserved2 : 8; 100 }; 101 }; 102 103 u16 vp_offset; 104 105 struct net_device *ndev; 106 107 /* The SKBs are sent to the HW and we are waiting for the CQEs. */ 108 struct sk_buff_head pending_skbs; 109 struct netdev_queue *net_txq; 110 111 atomic_t pending_sends; 112 113 bool napi_initialized; 114 115 struct mana_stats_tx stats; 116 }; 117 118 /* skb data and frags dma mappings */ 119 struct mana_skb_head { 120 /* GSO pkts may have 2 SGEs for the linear part*/ 121 dma_addr_t dma_handle[MAX_SKB_FRAGS + 2]; 122 123 u32 size[MAX_SKB_FRAGS + 2]; 124 }; 125 126 #define MANA_HEADROOM sizeof(struct mana_skb_head) 127 128 enum mana_tx_pkt_format { 129 MANA_SHORT_PKT_FMT = 0, 130 MANA_LONG_PKT_FMT = 1, 131 }; 132 133 struct mana_tx_short_oob { 134 u32 pkt_fmt : 2; 135 u32 is_outer_ipv4 : 1; 136 u32 is_outer_ipv6 : 1; 137 u32 comp_iphdr_csum : 1; 138 u32 comp_tcp_csum : 1; 139 u32 comp_udp_csum : 1; 140 u32 supress_txcqe_gen : 1; 141 u32 vcq_num : 24; 142 143 u32 trans_off : 10; /* Transport header offset */ 144 u32 vsq_frame : 14; 145 u32 short_vp_offset : 8; 146 }; /* HW DATA */ 147 148 struct mana_tx_long_oob { 149 u32 is_encap : 1; 150 u32 inner_is_ipv6 : 1; 151 u32 inner_tcp_opt : 1; 152 u32 inject_vlan_pri_tag : 1; 153 u32 reserved1 : 12; 154 u32 pcp : 3; /* 802.1Q */ 155 u32 dei : 1; /* 802.1Q */ 156 u32 vlan_id : 12; /* 802.1Q */ 157 158 u32 inner_frame_offset : 10; 159 u32 inner_ip_rel_offset : 6; 160 u32 long_vp_offset : 12; 161 u32 reserved2 : 4; 162 163 u32 reserved3; 164 u32 reserved4; 165 }; /* HW DATA */ 166 167 struct mana_tx_oob { 168 struct mana_tx_short_oob s_oob; 169 struct mana_tx_long_oob l_oob; 170 }; /* HW DATA */ 171 172 enum mana_cq_type { 173 MANA_CQ_TYPE_RX, 174 MANA_CQ_TYPE_TX, 175 }; 176 177 enum mana_cqe_type { 178 CQE_INVALID = 0, 179 CQE_RX_OKAY = 1, 180 CQE_RX_COALESCED_4 = 2, 181 CQE_RX_OBJECT_FENCE = 3, 182 CQE_RX_TRUNCATED = 4, 183 184 CQE_TX_OKAY = 32, 185 CQE_TX_SA_DROP = 33, 186 CQE_TX_MTU_DROP = 34, 187 CQE_TX_INVALID_OOB = 35, 188 CQE_TX_INVALID_ETH_TYPE = 36, 189 CQE_TX_HDR_PROCESSING_ERROR = 37, 190 CQE_TX_VF_DISABLED = 38, 191 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, 192 CQE_TX_VPORT_DISABLED = 40, 193 CQE_TX_VLAN_TAGGING_VIOLATION = 41, 194 }; 195 196 #define MANA_CQE_COMPLETION 1 197 198 struct mana_cqe_header { 199 u32 cqe_type : 6; 200 u32 client_type : 2; 201 u32 vendor_err : 24; 202 }; /* HW DATA */ 203 204 /* NDIS HASH Types */ 205 #define NDIS_HASH_IPV4 BIT(0) 206 #define NDIS_HASH_TCP_IPV4 BIT(1) 207 #define NDIS_HASH_UDP_IPV4 BIT(2) 208 #define NDIS_HASH_IPV6 BIT(3) 209 #define NDIS_HASH_TCP_IPV6 BIT(4) 210 #define NDIS_HASH_UDP_IPV6 BIT(5) 211 #define NDIS_HASH_IPV6_EX BIT(6) 212 #define NDIS_HASH_TCP_IPV6_EX BIT(7) 213 #define NDIS_HASH_UDP_IPV6_EX BIT(8) 214 215 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) 216 #define MANA_HASH_L4 \ 217 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ 218 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) 219 220 struct mana_rxcomp_perpkt_info { 221 u32 pkt_len : 16; 222 u32 reserved1 : 16; 223 u32 reserved2; 224 u32 pkt_hash; 225 }; /* HW DATA */ 226 227 #define MANA_RXCOMP_OOB_NUM_PPI 4 228 229 /* Receive completion OOB */ 230 struct mana_rxcomp_oob { 231 struct mana_cqe_header cqe_hdr; 232 233 u32 rx_vlan_id : 12; 234 u32 rx_vlantag_present : 1; 235 u32 rx_outer_iphdr_csum_succeed : 1; 236 u32 rx_outer_iphdr_csum_fail : 1; 237 u32 reserved1 : 1; 238 u32 rx_hashtype : 9; 239 u32 rx_iphdr_csum_succeed : 1; 240 u32 rx_iphdr_csum_fail : 1; 241 u32 rx_tcp_csum_succeed : 1; 242 u32 rx_tcp_csum_fail : 1; 243 u32 rx_udp_csum_succeed : 1; 244 u32 rx_udp_csum_fail : 1; 245 u32 reserved2 : 1; 246 247 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; 248 249 u32 rx_wqe_offset; 250 }; /* HW DATA */ 251 252 struct mana_tx_comp_oob { 253 struct mana_cqe_header cqe_hdr; 254 255 u32 tx_data_offset; 256 257 u32 tx_sgl_offset : 5; 258 u32 tx_wqe_offset : 27; 259 260 u32 reserved[12]; 261 }; /* HW DATA */ 262 263 struct mana_rxq; 264 265 #define CQE_POLLING_BUFFER 512 266 267 struct mana_cq { 268 struct gdma_queue *gdma_cq; 269 270 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ 271 u32 gdma_id; 272 273 /* Type of the CQ: TX or RX */ 274 enum mana_cq_type type; 275 276 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. 277 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. 278 */ 279 struct mana_rxq *rxq; 280 281 /* Pointer to the mana_txq that is pushing TX CQEs to the queue. 282 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. 283 */ 284 struct mana_txq *txq; 285 286 /* Buffer which the CQ handler can copy the CQE's into. */ 287 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; 288 289 /* NAPI data */ 290 struct napi_struct napi; 291 int work_done; 292 int work_done_since_doorbell; 293 int budget; 294 }; 295 296 struct mana_recv_buf_oob { 297 /* A valid GDMA work request representing the data buffer. */ 298 struct gdma_wqe_request wqe_req; 299 300 void *buf_va; 301 bool from_pool; /* allocated from a page pool */ 302 303 /* SGL of the buffer going to be sent as part of the work request. */ 304 u32 num_sge; 305 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; 306 307 /* Required to store the result of mana_gd_post_work_request. 308 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the 309 * work queue when the WQE is consumed. 310 */ 311 struct gdma_posted_wqe_info wqe_inf; 312 }; 313 314 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ 315 + ETH_HLEN) 316 317 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) 318 319 struct mana_rxq { 320 struct gdma_queue *gdma_rq; 321 /* Cache the gdma receive queue id */ 322 u32 gdma_id; 323 324 /* Index of RQ in the vPort, not gdma receive queue id */ 325 u32 rxq_idx; 326 327 u32 datasize; 328 u32 alloc_size; 329 u32 headroom; 330 331 mana_handle_t rxobj; 332 333 struct mana_cq rx_cq; 334 335 struct completion fence_event; 336 337 struct net_device *ndev; 338 339 /* Total number of receive buffers to be allocated */ 340 u32 num_rx_buf; 341 342 u32 buf_index; 343 344 struct mana_stats_rx stats; 345 346 struct bpf_prog __rcu *bpf_prog; 347 struct xdp_rxq_info xdp_rxq; 348 void *xdp_save_va; /* for reusing */ 349 bool xdp_flush; 350 int xdp_rc; /* XDP redirect return code */ 351 352 struct page_pool *page_pool; 353 struct dentry *mana_rx_debugfs; 354 355 /* MUST BE THE LAST MEMBER: 356 * Each receive buffer has an associated mana_recv_buf_oob. 357 */ 358 struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); 359 }; 360 361 struct mana_tx_qp { 362 struct mana_txq txq; 363 364 struct mana_cq tx_cq; 365 366 mana_handle_t tx_object; 367 368 struct dentry *mana_tx_debugfs; 369 }; 370 371 struct mana_ethtool_stats { 372 u64 stop_queue; 373 u64 wake_queue; 374 u64 hc_rx_discards_no_wqe; 375 u64 hc_rx_err_vport_disabled; 376 u64 hc_rx_bytes; 377 u64 hc_rx_ucast_pkts; 378 u64 hc_rx_ucast_bytes; 379 u64 hc_rx_bcast_pkts; 380 u64 hc_rx_bcast_bytes; 381 u64 hc_rx_mcast_pkts; 382 u64 hc_rx_mcast_bytes; 383 u64 hc_tx_err_gf_disabled; 384 u64 hc_tx_err_vport_disabled; 385 u64 hc_tx_err_inval_vportoffset_pkt; 386 u64 hc_tx_err_vlan_enforcement; 387 u64 hc_tx_err_eth_type_enforcement; 388 u64 hc_tx_err_sa_enforcement; 389 u64 hc_tx_err_sqpdid_enforcement; 390 u64 hc_tx_err_cqpdid_enforcement; 391 u64 hc_tx_err_mtu_violation; 392 u64 hc_tx_err_inval_oob; 393 u64 hc_tx_bytes; 394 u64 hc_tx_ucast_pkts; 395 u64 hc_tx_ucast_bytes; 396 u64 hc_tx_bcast_pkts; 397 u64 hc_tx_bcast_bytes; 398 u64 hc_tx_mcast_pkts; 399 u64 hc_tx_mcast_bytes; 400 u64 hc_tx_err_gdma; 401 u64 tx_cqe_err; 402 u64 tx_cqe_unknown_type; 403 u64 rx_coalesced_err; 404 u64 rx_cqe_unknown_type; 405 }; 406 407 struct mana_ethtool_phy_stats { 408 /* Drop Counters */ 409 u64 rx_pkt_drop_phy; 410 u64 tx_pkt_drop_phy; 411 412 /* Per TC traffic Counters */ 413 u64 rx_pkt_tc0_phy; 414 u64 tx_pkt_tc0_phy; 415 u64 rx_pkt_tc1_phy; 416 u64 tx_pkt_tc1_phy; 417 u64 rx_pkt_tc2_phy; 418 u64 tx_pkt_tc2_phy; 419 u64 rx_pkt_tc3_phy; 420 u64 tx_pkt_tc3_phy; 421 u64 rx_pkt_tc4_phy; 422 u64 tx_pkt_tc4_phy; 423 u64 rx_pkt_tc5_phy; 424 u64 tx_pkt_tc5_phy; 425 u64 rx_pkt_tc6_phy; 426 u64 tx_pkt_tc6_phy; 427 u64 rx_pkt_tc7_phy; 428 u64 tx_pkt_tc7_phy; 429 430 u64 rx_byte_tc0_phy; 431 u64 tx_byte_tc0_phy; 432 u64 rx_byte_tc1_phy; 433 u64 tx_byte_tc1_phy; 434 u64 rx_byte_tc2_phy; 435 u64 tx_byte_tc2_phy; 436 u64 rx_byte_tc3_phy; 437 u64 tx_byte_tc3_phy; 438 u64 rx_byte_tc4_phy; 439 u64 tx_byte_tc4_phy; 440 u64 rx_byte_tc5_phy; 441 u64 tx_byte_tc5_phy; 442 u64 rx_byte_tc6_phy; 443 u64 tx_byte_tc6_phy; 444 u64 rx_byte_tc7_phy; 445 u64 tx_byte_tc7_phy; 446 447 /* Per TC pause Counters */ 448 u64 rx_pause_tc0_phy; 449 u64 tx_pause_tc0_phy; 450 u64 rx_pause_tc1_phy; 451 u64 tx_pause_tc1_phy; 452 u64 rx_pause_tc2_phy; 453 u64 tx_pause_tc2_phy; 454 u64 rx_pause_tc3_phy; 455 u64 tx_pause_tc3_phy; 456 u64 rx_pause_tc4_phy; 457 u64 tx_pause_tc4_phy; 458 u64 rx_pause_tc5_phy; 459 u64 tx_pause_tc5_phy; 460 u64 rx_pause_tc6_phy; 461 u64 tx_pause_tc6_phy; 462 u64 rx_pause_tc7_phy; 463 u64 tx_pause_tc7_phy; 464 }; 465 466 struct mana_context { 467 struct gdma_dev *gdma_dev; 468 469 u16 num_ports; 470 u8 bm_hostmode; 471 472 struct mana_eq *eqs; 473 struct dentry *mana_eqs_debugfs; 474 475 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 476 }; 477 478 struct mana_port_context { 479 struct mana_context *ac; 480 struct net_device *ndev; 481 482 u8 mac_addr[ETH_ALEN]; 483 484 enum TRI_STATE rss_state; 485 486 mana_handle_t default_rxobj; 487 bool tx_shortform_allowed; 488 u16 tx_vp_offset; 489 490 struct mana_tx_qp *tx_qp; 491 492 /* Indirection Table for RX & TX. The values are queue indexes */ 493 u32 *indir_table; 494 u32 indir_table_sz; 495 496 /* Indirection table containing RxObject Handles */ 497 mana_handle_t *rxobj_table; 498 499 /* Hash key used by the NIC */ 500 u8 hashkey[MANA_HASH_KEY_SIZE]; 501 502 /* This points to an array of num_queues of RQ pointers. */ 503 struct mana_rxq **rxqs; 504 505 /* pre-allocated rx buffer array */ 506 void **rxbufs_pre; 507 dma_addr_t *das_pre; 508 int rxbpre_total; 509 u32 rxbpre_datasize; 510 u32 rxbpre_alloc_size; 511 u32 rxbpre_headroom; 512 513 struct bpf_prog *bpf_prog; 514 515 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ 516 unsigned int max_queues; 517 unsigned int num_queues; 518 519 unsigned int rx_queue_size; 520 unsigned int tx_queue_size; 521 522 mana_handle_t port_handle; 523 mana_handle_t pf_filter_handle; 524 525 /* Mutex for sharing access to vport_use_count */ 526 struct mutex vport_mutex; 527 int vport_use_count; 528 529 u16 port_idx; 530 531 bool port_is_up; 532 bool port_st_save; /* Saved port state */ 533 534 struct mana_ethtool_stats eth_stats; 535 536 struct mana_ethtool_phy_stats phy_stats; 537 538 /* Debugfs */ 539 struct dentry *mana_port_debugfs; 540 }; 541 542 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); 543 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, 544 bool update_hash, bool update_tab); 545 546 int mana_alloc_queues(struct net_device *ndev); 547 int mana_attach(struct net_device *ndev); 548 int mana_detach(struct net_device *ndev, bool from_close); 549 550 int mana_probe(struct gdma_dev *gd, bool resuming); 551 void mana_remove(struct gdma_dev *gd, bool suspending); 552 553 int mana_rdma_probe(struct gdma_dev *gd); 554 void mana_rdma_remove(struct gdma_dev *gd); 555 556 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); 557 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 558 u32 flags); 559 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 560 struct xdp_buff *xdp, void *buf_va, uint pkt_len); 561 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); 562 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); 563 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 564 void mana_query_gf_stats(struct mana_port_context *apc); 565 void mana_query_phy_stats(struct mana_port_context *apc); 566 int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); 567 void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); 568 569 extern const struct ethtool_ops mana_ethtool_ops; 570 extern struct dentry *mana_debugfs_root; 571 572 /* A CQ can be created not associated with any EQ */ 573 #define GDMA_CQ_NO_EQ 0xffff 574 575 struct mana_obj_spec { 576 u32 queue_index; 577 u64 gdma_region; 578 u32 queue_size; 579 u32 attached_eq; 580 u32 modr_ctx_id; 581 }; 582 583 enum mana_command_code { 584 MANA_QUERY_DEV_CONFIG = 0x20001, 585 MANA_QUERY_GF_STAT = 0x20002, 586 MANA_CONFIG_VPORT_TX = 0x20003, 587 MANA_CREATE_WQ_OBJ = 0x20004, 588 MANA_DESTROY_WQ_OBJ = 0x20005, 589 MANA_FENCE_RQ = 0x20006, 590 MANA_CONFIG_VPORT_RX = 0x20007, 591 MANA_QUERY_VPORT_CONFIG = 0x20008, 592 MANA_QUERY_PHY_STAT = 0x2000c, 593 594 /* Privileged commands for the PF mode */ 595 MANA_REGISTER_FILTER = 0x28000, 596 MANA_DEREGISTER_FILTER = 0x28001, 597 MANA_REGISTER_HW_PORT = 0x28003, 598 MANA_DEREGISTER_HW_PORT = 0x28004, 599 }; 600 601 /* Query Device Configuration */ 602 struct mana_query_device_cfg_req { 603 struct gdma_req_hdr hdr; 604 605 /* MANA Nic Driver Capability flags */ 606 u64 mn_drv_cap_flags1; 607 u64 mn_drv_cap_flags2; 608 u64 mn_drv_cap_flags3; 609 u64 mn_drv_cap_flags4; 610 611 u32 proto_major_ver; 612 u32 proto_minor_ver; 613 u32 proto_micro_ver; 614 615 u32 reserved; 616 }; /* HW DATA */ 617 618 struct mana_query_device_cfg_resp { 619 struct gdma_resp_hdr hdr; 620 621 u64 pf_cap_flags1; 622 u64 pf_cap_flags2; 623 u64 pf_cap_flags3; 624 u64 pf_cap_flags4; 625 626 u16 max_num_vports; 627 u8 bm_hostmode; /* response v3: Bare Metal Host Mode */ 628 u8 reserved; 629 u32 max_num_eqs; 630 631 /* response v2: */ 632 u16 adapter_mtu; 633 u16 reserved2; 634 u32 reserved3; 635 }; /* HW DATA */ 636 637 /* Query vPort Configuration */ 638 struct mana_query_vport_cfg_req { 639 struct gdma_req_hdr hdr; 640 u32 vport_index; 641 }; /* HW DATA */ 642 643 struct mana_query_vport_cfg_resp { 644 struct gdma_resp_hdr hdr; 645 u32 max_num_sq; 646 u32 max_num_rq; 647 u32 num_indirection_ent; 648 u32 reserved1; 649 u8 mac_addr[6]; 650 u8 reserved2[2]; 651 mana_handle_t vport; 652 }; /* HW DATA */ 653 654 /* Configure vPort */ 655 struct mana_config_vport_req { 656 struct gdma_req_hdr hdr; 657 mana_handle_t vport; 658 u32 pdid; 659 u32 doorbell_pageid; 660 }; /* HW DATA */ 661 662 struct mana_config_vport_resp { 663 struct gdma_resp_hdr hdr; 664 u16 tx_vport_offset; 665 u8 short_form_allowed; 666 u8 reserved; 667 }; /* HW DATA */ 668 669 /* Create WQ Object */ 670 struct mana_create_wqobj_req { 671 struct gdma_req_hdr hdr; 672 mana_handle_t vport; 673 u32 wq_type; 674 u32 reserved; 675 u64 wq_gdma_region; 676 u64 cq_gdma_region; 677 u32 wq_size; 678 u32 cq_size; 679 u32 cq_moderation_ctx_id; 680 u32 cq_parent_qid; 681 }; /* HW DATA */ 682 683 struct mana_create_wqobj_resp { 684 struct gdma_resp_hdr hdr; 685 u32 wq_id; 686 u32 cq_id; 687 mana_handle_t wq_obj; 688 }; /* HW DATA */ 689 690 /* Destroy WQ Object */ 691 struct mana_destroy_wqobj_req { 692 struct gdma_req_hdr hdr; 693 u32 wq_type; 694 u32 reserved; 695 mana_handle_t wq_obj_handle; 696 }; /* HW DATA */ 697 698 struct mana_destroy_wqobj_resp { 699 struct gdma_resp_hdr hdr; 700 }; /* HW DATA */ 701 702 /* Fence RQ */ 703 struct mana_fence_rq_req { 704 struct gdma_req_hdr hdr; 705 mana_handle_t wq_obj_handle; 706 }; /* HW DATA */ 707 708 struct mana_fence_rq_resp { 709 struct gdma_resp_hdr hdr; 710 }; /* HW DATA */ 711 712 /* Query stats RQ */ 713 struct mana_query_gf_stat_req { 714 struct gdma_req_hdr hdr; 715 u64 req_stats; 716 }; /* HW DATA */ 717 718 struct mana_query_gf_stat_resp { 719 struct gdma_resp_hdr hdr; 720 u64 reported_stats; 721 /* rx errors/discards */ 722 u64 rx_discards_nowqe; 723 u64 rx_err_vport_disabled; 724 /* rx bytes/packets */ 725 u64 hc_rx_bytes; 726 u64 hc_rx_ucast_pkts; 727 u64 hc_rx_ucast_bytes; 728 u64 hc_rx_bcast_pkts; 729 u64 hc_rx_bcast_bytes; 730 u64 hc_rx_mcast_pkts; 731 u64 hc_rx_mcast_bytes; 732 /* tx errors */ 733 u64 tx_err_gf_disabled; 734 u64 tx_err_vport_disabled; 735 u64 tx_err_inval_vport_offset_pkt; 736 u64 tx_err_vlan_enforcement; 737 u64 tx_err_ethtype_enforcement; 738 u64 tx_err_SA_enforcement; 739 u64 tx_err_SQPDID_enforcement; 740 u64 tx_err_CQPDID_enforcement; 741 u64 tx_err_mtu_violation; 742 u64 tx_err_inval_oob; 743 /* tx bytes/packets */ 744 u64 hc_tx_bytes; 745 u64 hc_tx_ucast_pkts; 746 u64 hc_tx_ucast_bytes; 747 u64 hc_tx_bcast_pkts; 748 u64 hc_tx_bcast_bytes; 749 u64 hc_tx_mcast_pkts; 750 u64 hc_tx_mcast_bytes; 751 /* tx error */ 752 u64 tx_err_gdma; 753 }; /* HW DATA */ 754 755 /* Query phy stats */ 756 struct mana_query_phy_stat_req { 757 struct gdma_req_hdr hdr; 758 u64 req_stats; 759 }; /* HW DATA */ 760 761 struct mana_query_phy_stat_resp { 762 struct gdma_resp_hdr hdr; 763 u64 reported_stats; 764 765 /* Aggregate Drop Counters */ 766 u64 rx_pkt_drop_phy; 767 u64 tx_pkt_drop_phy; 768 769 /* Per TC(Traffic class) traffic Counters */ 770 u64 rx_pkt_tc0_phy; 771 u64 tx_pkt_tc0_phy; 772 u64 rx_pkt_tc1_phy; 773 u64 tx_pkt_tc1_phy; 774 u64 rx_pkt_tc2_phy; 775 u64 tx_pkt_tc2_phy; 776 u64 rx_pkt_tc3_phy; 777 u64 tx_pkt_tc3_phy; 778 u64 rx_pkt_tc4_phy; 779 u64 tx_pkt_tc4_phy; 780 u64 rx_pkt_tc5_phy; 781 u64 tx_pkt_tc5_phy; 782 u64 rx_pkt_tc6_phy; 783 u64 tx_pkt_tc6_phy; 784 u64 rx_pkt_tc7_phy; 785 u64 tx_pkt_tc7_phy; 786 787 u64 rx_byte_tc0_phy; 788 u64 tx_byte_tc0_phy; 789 u64 rx_byte_tc1_phy; 790 u64 tx_byte_tc1_phy; 791 u64 rx_byte_tc2_phy; 792 u64 tx_byte_tc2_phy; 793 u64 rx_byte_tc3_phy; 794 u64 tx_byte_tc3_phy; 795 u64 rx_byte_tc4_phy; 796 u64 tx_byte_tc4_phy; 797 u64 rx_byte_tc5_phy; 798 u64 tx_byte_tc5_phy; 799 u64 rx_byte_tc6_phy; 800 u64 tx_byte_tc6_phy; 801 u64 rx_byte_tc7_phy; 802 u64 tx_byte_tc7_phy; 803 804 /* Per TC(Traffic Class) pause Counters */ 805 u64 rx_pause_tc0_phy; 806 u64 tx_pause_tc0_phy; 807 u64 rx_pause_tc1_phy; 808 u64 tx_pause_tc1_phy; 809 u64 rx_pause_tc2_phy; 810 u64 tx_pause_tc2_phy; 811 u64 rx_pause_tc3_phy; 812 u64 tx_pause_tc3_phy; 813 u64 rx_pause_tc4_phy; 814 u64 tx_pause_tc4_phy; 815 u64 rx_pause_tc5_phy; 816 u64 tx_pause_tc5_phy; 817 u64 rx_pause_tc6_phy; 818 u64 tx_pause_tc6_phy; 819 u64 rx_pause_tc7_phy; 820 u64 tx_pause_tc7_phy; 821 }; /* HW DATA */ 822 823 /* Configure vPort Rx Steering */ 824 struct mana_cfg_rx_steer_req_v2 { 825 struct gdma_req_hdr hdr; 826 mana_handle_t vport; 827 u16 num_indir_entries; 828 u16 indir_tab_offset; 829 u32 rx_enable; 830 u32 rss_enable; 831 u8 update_default_rxobj; 832 u8 update_hashkey; 833 u8 update_indir_tab; 834 u8 reserved; 835 mana_handle_t default_rxobj; 836 u8 hashkey[MANA_HASH_KEY_SIZE]; 837 u8 cqe_coalescing_enable; 838 u8 reserved2[7]; 839 mana_handle_t indir_tab[] __counted_by(num_indir_entries); 840 }; /* HW DATA */ 841 842 struct mana_cfg_rx_steer_resp { 843 struct gdma_resp_hdr hdr; 844 }; /* HW DATA */ 845 846 /* Register HW vPort */ 847 struct mana_register_hw_vport_req { 848 struct gdma_req_hdr hdr; 849 u16 attached_gfid; 850 u8 is_pf_default_vport; 851 u8 reserved1; 852 u8 allow_all_ether_types; 853 u8 reserved2; 854 u8 reserved3; 855 u8 reserved4; 856 }; /* HW DATA */ 857 858 struct mana_register_hw_vport_resp { 859 struct gdma_resp_hdr hdr; 860 mana_handle_t hw_vport_handle; 861 }; /* HW DATA */ 862 863 /* Deregister HW vPort */ 864 struct mana_deregister_hw_vport_req { 865 struct gdma_req_hdr hdr; 866 mana_handle_t hw_vport_handle; 867 }; /* HW DATA */ 868 869 struct mana_deregister_hw_vport_resp { 870 struct gdma_resp_hdr hdr; 871 }; /* HW DATA */ 872 873 /* Register filter */ 874 struct mana_register_filter_req { 875 struct gdma_req_hdr hdr; 876 mana_handle_t vport; 877 u8 mac_addr[6]; 878 u8 reserved1; 879 u8 reserved2; 880 u8 reserved3; 881 u8 reserved4; 882 u16 reserved5; 883 u32 reserved6; 884 u32 reserved7; 885 u32 reserved8; 886 }; /* HW DATA */ 887 888 struct mana_register_filter_resp { 889 struct gdma_resp_hdr hdr; 890 mana_handle_t filter_handle; 891 }; /* HW DATA */ 892 893 /* Deregister filter */ 894 struct mana_deregister_filter_req { 895 struct gdma_req_hdr hdr; 896 mana_handle_t filter_handle; 897 }; /* HW DATA */ 898 899 struct mana_deregister_filter_resp { 900 struct gdma_resp_hdr hdr; 901 }; /* HW DATA */ 902 903 /* Requested GF stats Flags */ 904 /* Rx discards/Errors */ 905 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001 906 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002 907 /* Rx bytes/pkts */ 908 #define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004 909 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008 910 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010 911 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020 912 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040 913 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080 914 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100 915 /* Tx errors */ 916 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200 917 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400 918 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \ 919 0x0000000000000800 920 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000 921 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \ 922 0x0000000000002000 923 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000 924 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000 925 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000 926 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000 927 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000 928 /* Tx bytes/pkts */ 929 #define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000 930 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000 931 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000 932 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000 933 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000 934 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000 935 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000 936 /* Tx error */ 937 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000 938 939 #define MANA_MAX_NUM_QUEUES 64 940 941 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) 942 943 struct mana_tx_package { 944 struct gdma_wqe_request wqe_req; 945 struct gdma_sge sgl_array[5]; 946 struct gdma_sge *sgl_ptr; 947 948 struct mana_tx_oob tx_oob; 949 950 struct gdma_posted_wqe_info wqe_info; 951 }; 952 953 int mana_create_wq_obj(struct mana_port_context *apc, 954 mana_handle_t vport, 955 u32 wq_type, struct mana_obj_spec *wq_spec, 956 struct mana_obj_spec *cq_spec, 957 mana_handle_t *wq_obj); 958 959 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 960 mana_handle_t wq_obj); 961 962 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 963 u32 doorbell_pg_id); 964 void mana_uncfg_vport(struct mana_port_context *apc); 965 966 struct net_device *mana_get_primary_netdev(struct mana_context *ac, 967 u32 port_index, 968 netdevice_tracker *tracker); 969 #endif /* _MANA_H */ 970