1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _MANA_H 5 #define _MANA_H 6 7 #include <net/xdp.h> 8 #include <net/net_shaper.h> 9 10 #include "gdma.h" 11 #include "hw_channel.h" 12 13 /* Microsoft Azure Network Adapter (MANA)'s definitions 14 * 15 * Structures labeled with "HW DATA" are exchanged with the hardware. All of 16 * them are naturally aligned and hence don't need __packed. 17 */ 18 19 /* MANA protocol version */ 20 #define MANA_MAJOR_VERSION 0 21 #define MANA_MINOR_VERSION 1 22 #define MANA_MICRO_VERSION 1 23 24 typedef u64 mana_handle_t; 25 #define INVALID_MANA_HANDLE ((mana_handle_t)-1) 26 27 enum TRI_STATE { 28 TRI_STATE_UNKNOWN = -1, 29 TRI_STATE_FALSE = 0, 30 TRI_STATE_TRUE = 1 31 }; 32 33 /* Number of entries for hardware indirection table must be in power of 2 */ 34 #define MANA_INDIRECT_TABLE_MAX_SIZE 512 35 #define MANA_INDIRECT_TABLE_DEF_SIZE 64 36 37 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ 38 #define MANA_HASH_KEY_SIZE 40 39 40 #define COMP_ENTRY_SIZE 64 41 42 /* This Max value for RX buffers is derived from __alloc_page()'s max page 43 * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer 44 * size beyond this value gets rejected by __alloc_page() call. 45 */ 46 #define MAX_RX_BUFFERS_PER_QUEUE 8192 47 #define DEF_RX_BUFFERS_PER_QUEUE 1024 48 #define MIN_RX_BUFFERS_PER_QUEUE 128 49 50 /* This max value for TX buffers is derived as the maximum allocatable 51 * pages supported on host per guest through testing. TX buffer size beyond 52 * this value is rejected by the hardware. 53 */ 54 #define MAX_TX_BUFFERS_PER_QUEUE 16384 55 #define DEF_TX_BUFFERS_PER_QUEUE 256 56 #define MIN_TX_BUFFERS_PER_QUEUE 128 57 58 #define EQ_SIZE (8 * MANA_PAGE_SIZE) 59 60 #define LOG2_EQ_THROTTLE 3 61 62 #define MAX_PORTS_IN_MANA_DEV 256 63 64 /* Update this count whenever the respective structures are changed */ 65 #define MANA_STATS_RX_COUNT 5 66 #define MANA_STATS_TX_COUNT 11 67 68 #define MANA_RX_FRAG_ALIGNMENT 64 69 70 struct mana_stats_rx { 71 u64 packets; 72 u64 bytes; 73 u64 xdp_drop; 74 u64 xdp_tx; 75 u64 xdp_redirect; 76 struct u64_stats_sync syncp; 77 }; 78 79 struct mana_stats_tx { 80 u64 packets; 81 u64 bytes; 82 u64 xdp_xmit; 83 u64 tso_packets; 84 u64 tso_bytes; 85 u64 tso_inner_packets; 86 u64 tso_inner_bytes; 87 u64 short_pkt_fmt; 88 u64 long_pkt_fmt; 89 u64 csum_partial; 90 u64 mana_map_err; 91 struct u64_stats_sync syncp; 92 }; 93 94 struct mana_txq { 95 struct gdma_queue *gdma_sq; 96 97 union { 98 u32 gdma_txq_id; 99 struct { 100 u32 reserved1 : 10; 101 u32 vsq_frame : 14; 102 u32 reserved2 : 8; 103 }; 104 }; 105 106 u16 vp_offset; 107 108 struct net_device *ndev; 109 110 /* The SKBs are sent to the HW and we are waiting for the CQEs. */ 111 struct sk_buff_head pending_skbs; 112 struct netdev_queue *net_txq; 113 114 atomic_t pending_sends; 115 116 bool napi_initialized; 117 118 struct mana_stats_tx stats; 119 }; 120 121 /* skb data and frags dma mappings */ 122 struct mana_skb_head { 123 /* GSO pkts may have 2 SGEs for the linear part*/ 124 dma_addr_t dma_handle[MAX_SKB_FRAGS + 2]; 125 126 u32 size[MAX_SKB_FRAGS + 2]; 127 }; 128 129 #define MANA_HEADROOM sizeof(struct mana_skb_head) 130 131 enum mana_tx_pkt_format { 132 MANA_SHORT_PKT_FMT = 0, 133 MANA_LONG_PKT_FMT = 1, 134 }; 135 136 struct mana_tx_short_oob { 137 u32 pkt_fmt : 2; 138 u32 is_outer_ipv4 : 1; 139 u32 is_outer_ipv6 : 1; 140 u32 comp_iphdr_csum : 1; 141 u32 comp_tcp_csum : 1; 142 u32 comp_udp_csum : 1; 143 u32 supress_txcqe_gen : 1; 144 u32 vcq_num : 24; 145 146 u32 trans_off : 10; /* Transport header offset */ 147 u32 vsq_frame : 14; 148 u32 short_vp_offset : 8; 149 }; /* HW DATA */ 150 151 struct mana_tx_long_oob { 152 u32 is_encap : 1; 153 u32 inner_is_ipv6 : 1; 154 u32 inner_tcp_opt : 1; 155 u32 inject_vlan_pri_tag : 1; 156 u32 reserved1 : 12; 157 u32 pcp : 3; /* 802.1Q */ 158 u32 dei : 1; /* 802.1Q */ 159 u32 vlan_id : 12; /* 802.1Q */ 160 161 u32 inner_frame_offset : 10; 162 u32 inner_ip_rel_offset : 6; 163 u32 long_vp_offset : 12; 164 u32 reserved2 : 4; 165 166 u32 reserved3; 167 u32 reserved4; 168 }; /* HW DATA */ 169 170 struct mana_tx_oob { 171 struct mana_tx_short_oob s_oob; 172 struct mana_tx_long_oob l_oob; 173 }; /* HW DATA */ 174 175 enum mana_cq_type { 176 MANA_CQ_TYPE_RX, 177 MANA_CQ_TYPE_TX, 178 }; 179 180 enum mana_cqe_type { 181 CQE_INVALID = 0, 182 CQE_RX_OKAY = 1, 183 CQE_RX_COALESCED_4 = 2, 184 CQE_RX_OBJECT_FENCE = 3, 185 CQE_RX_TRUNCATED = 4, 186 187 CQE_TX_OKAY = 32, 188 CQE_TX_SA_DROP = 33, 189 CQE_TX_MTU_DROP = 34, 190 CQE_TX_INVALID_OOB = 35, 191 CQE_TX_INVALID_ETH_TYPE = 36, 192 CQE_TX_HDR_PROCESSING_ERROR = 37, 193 CQE_TX_VF_DISABLED = 38, 194 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, 195 CQE_TX_VPORT_DISABLED = 40, 196 CQE_TX_VLAN_TAGGING_VIOLATION = 41, 197 }; 198 199 #define MANA_CQE_COMPLETION 1 200 201 struct mana_cqe_header { 202 u32 cqe_type : 6; 203 u32 client_type : 2; 204 u32 vendor_err : 24; 205 }; /* HW DATA */ 206 207 /* NDIS HASH Types */ 208 #define NDIS_HASH_IPV4 BIT(0) 209 #define NDIS_HASH_TCP_IPV4 BIT(1) 210 #define NDIS_HASH_UDP_IPV4 BIT(2) 211 #define NDIS_HASH_IPV6 BIT(3) 212 #define NDIS_HASH_TCP_IPV6 BIT(4) 213 #define NDIS_HASH_UDP_IPV6 BIT(5) 214 #define NDIS_HASH_IPV6_EX BIT(6) 215 #define NDIS_HASH_TCP_IPV6_EX BIT(7) 216 #define NDIS_HASH_UDP_IPV6_EX BIT(8) 217 218 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) 219 #define MANA_HASH_L4 \ 220 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ 221 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) 222 223 struct mana_rxcomp_perpkt_info { 224 u32 pkt_len : 16; 225 u32 reserved1 : 16; 226 u32 reserved2; 227 u32 pkt_hash; 228 }; /* HW DATA */ 229 230 #define MANA_RXCOMP_OOB_NUM_PPI 4 231 232 /* Receive completion OOB */ 233 struct mana_rxcomp_oob { 234 struct mana_cqe_header cqe_hdr; 235 236 u32 rx_vlan_id : 12; 237 u32 rx_vlantag_present : 1; 238 u32 rx_outer_iphdr_csum_succeed : 1; 239 u32 rx_outer_iphdr_csum_fail : 1; 240 u32 reserved1 : 1; 241 u32 rx_hashtype : 9; 242 u32 rx_iphdr_csum_succeed : 1; 243 u32 rx_iphdr_csum_fail : 1; 244 u32 rx_tcp_csum_succeed : 1; 245 u32 rx_tcp_csum_fail : 1; 246 u32 rx_udp_csum_succeed : 1; 247 u32 rx_udp_csum_fail : 1; 248 u32 reserved2 : 1; 249 250 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; 251 252 u32 rx_wqe_offset; 253 }; /* HW DATA */ 254 255 struct mana_tx_comp_oob { 256 struct mana_cqe_header cqe_hdr; 257 258 u32 tx_data_offset; 259 260 u32 tx_sgl_offset : 5; 261 u32 tx_wqe_offset : 27; 262 263 u32 reserved[12]; 264 }; /* HW DATA */ 265 266 struct mana_rxq; 267 268 #define CQE_POLLING_BUFFER 512 269 270 struct mana_cq { 271 struct gdma_queue *gdma_cq; 272 273 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ 274 u32 gdma_id; 275 276 /* Type of the CQ: TX or RX */ 277 enum mana_cq_type type; 278 279 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. 280 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. 281 */ 282 struct mana_rxq *rxq; 283 284 /* Pointer to the mana_txq that is pushing TX CQEs to the queue. 285 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. 286 */ 287 struct mana_txq *txq; 288 289 /* Buffer which the CQ handler can copy the CQE's into. */ 290 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; 291 292 /* NAPI data */ 293 struct napi_struct napi; 294 int work_done; 295 int work_done_since_doorbell; 296 int budget; 297 }; 298 299 struct mana_recv_buf_oob { 300 /* A valid GDMA work request representing the data buffer. */ 301 struct gdma_wqe_request wqe_req; 302 303 void *buf_va; 304 bool from_pool; /* allocated from a page pool */ 305 306 /* SGL of the buffer going to be sent as part of the work request. */ 307 u32 num_sge; 308 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; 309 310 /* Required to store the result of mana_gd_post_work_request. 311 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the 312 * work queue when the WQE is consumed. 313 */ 314 struct gdma_posted_wqe_info wqe_inf; 315 }; 316 317 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ 318 + ETH_HLEN) 319 320 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) 321 322 struct mana_rxq { 323 struct gdma_queue *gdma_rq; 324 /* Cache the gdma receive queue id */ 325 u32 gdma_id; 326 327 /* Index of RQ in the vPort, not gdma receive queue id */ 328 u32 rxq_idx; 329 330 u32 datasize; 331 u32 alloc_size; 332 u32 headroom; 333 u32 frag_count; 334 335 mana_handle_t rxobj; 336 337 struct mana_cq rx_cq; 338 339 struct completion fence_event; 340 341 struct net_device *ndev; 342 343 /* Total number of receive buffers to be allocated */ 344 u32 num_rx_buf; 345 346 u32 buf_index; 347 348 struct mana_stats_rx stats; 349 350 struct bpf_prog __rcu *bpf_prog; 351 struct xdp_rxq_info xdp_rxq; 352 void *xdp_save_va; /* for reusing */ 353 bool xdp_flush; 354 int xdp_rc; /* XDP redirect return code */ 355 356 struct page_pool *page_pool; 357 struct dentry *mana_rx_debugfs; 358 359 /* MUST BE THE LAST MEMBER: 360 * Each receive buffer has an associated mana_recv_buf_oob. 361 */ 362 struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); 363 }; 364 365 struct mana_tx_qp { 366 struct mana_txq txq; 367 368 struct mana_cq tx_cq; 369 370 mana_handle_t tx_object; 371 372 struct dentry *mana_tx_debugfs; 373 }; 374 375 struct mana_ethtool_stats { 376 u64 stop_queue; 377 u64 wake_queue; 378 u64 hc_rx_discards_no_wqe; 379 u64 hc_rx_err_vport_disabled; 380 u64 hc_rx_bytes; 381 u64 hc_rx_ucast_pkts; 382 u64 hc_rx_ucast_bytes; 383 u64 hc_rx_bcast_pkts; 384 u64 hc_rx_bcast_bytes; 385 u64 hc_rx_mcast_pkts; 386 u64 hc_rx_mcast_bytes; 387 u64 hc_tx_err_gf_disabled; 388 u64 hc_tx_err_vport_disabled; 389 u64 hc_tx_err_inval_vportoffset_pkt; 390 u64 hc_tx_err_vlan_enforcement; 391 u64 hc_tx_err_eth_type_enforcement; 392 u64 hc_tx_err_sa_enforcement; 393 u64 hc_tx_err_sqpdid_enforcement; 394 u64 hc_tx_err_cqpdid_enforcement; 395 u64 hc_tx_err_mtu_violation; 396 u64 hc_tx_err_inval_oob; 397 u64 hc_tx_bytes; 398 u64 hc_tx_ucast_pkts; 399 u64 hc_tx_ucast_bytes; 400 u64 hc_tx_bcast_pkts; 401 u64 hc_tx_bcast_bytes; 402 u64 hc_tx_mcast_pkts; 403 u64 hc_tx_mcast_bytes; 404 u64 hc_tx_err_gdma; 405 u64 tx_cqe_err; 406 u64 tx_cqe_unknown_type; 407 u64 rx_coalesced_err; 408 u64 rx_cqe_unknown_type; 409 }; 410 411 struct mana_ethtool_phy_stats { 412 /* Drop Counters */ 413 u64 rx_pkt_drop_phy; 414 u64 tx_pkt_drop_phy; 415 416 /* Per TC traffic Counters */ 417 u64 rx_pkt_tc0_phy; 418 u64 tx_pkt_tc0_phy; 419 u64 rx_pkt_tc1_phy; 420 u64 tx_pkt_tc1_phy; 421 u64 rx_pkt_tc2_phy; 422 u64 tx_pkt_tc2_phy; 423 u64 rx_pkt_tc3_phy; 424 u64 tx_pkt_tc3_phy; 425 u64 rx_pkt_tc4_phy; 426 u64 tx_pkt_tc4_phy; 427 u64 rx_pkt_tc5_phy; 428 u64 tx_pkt_tc5_phy; 429 u64 rx_pkt_tc6_phy; 430 u64 tx_pkt_tc6_phy; 431 u64 rx_pkt_tc7_phy; 432 u64 tx_pkt_tc7_phy; 433 434 u64 rx_byte_tc0_phy; 435 u64 tx_byte_tc0_phy; 436 u64 rx_byte_tc1_phy; 437 u64 tx_byte_tc1_phy; 438 u64 rx_byte_tc2_phy; 439 u64 tx_byte_tc2_phy; 440 u64 rx_byte_tc3_phy; 441 u64 tx_byte_tc3_phy; 442 u64 rx_byte_tc4_phy; 443 u64 tx_byte_tc4_phy; 444 u64 rx_byte_tc5_phy; 445 u64 tx_byte_tc5_phy; 446 u64 rx_byte_tc6_phy; 447 u64 tx_byte_tc6_phy; 448 u64 rx_byte_tc7_phy; 449 u64 tx_byte_tc7_phy; 450 451 /* Per TC pause Counters */ 452 u64 rx_pause_tc0_phy; 453 u64 tx_pause_tc0_phy; 454 u64 rx_pause_tc1_phy; 455 u64 tx_pause_tc1_phy; 456 u64 rx_pause_tc2_phy; 457 u64 tx_pause_tc2_phy; 458 u64 rx_pause_tc3_phy; 459 u64 tx_pause_tc3_phy; 460 u64 rx_pause_tc4_phy; 461 u64 tx_pause_tc4_phy; 462 u64 rx_pause_tc5_phy; 463 u64 tx_pause_tc5_phy; 464 u64 rx_pause_tc6_phy; 465 u64 tx_pause_tc6_phy; 466 u64 rx_pause_tc7_phy; 467 u64 tx_pause_tc7_phy; 468 }; 469 470 struct mana_context { 471 struct gdma_dev *gdma_dev; 472 473 u16 num_ports; 474 u8 bm_hostmode; 475 476 struct mana_eq *eqs; 477 struct dentry *mana_eqs_debugfs; 478 479 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 480 }; 481 482 struct mana_port_context { 483 struct mana_context *ac; 484 struct net_device *ndev; 485 486 u8 mac_addr[ETH_ALEN]; 487 488 enum TRI_STATE rss_state; 489 490 mana_handle_t default_rxobj; 491 bool tx_shortform_allowed; 492 u16 tx_vp_offset; 493 494 struct mana_tx_qp *tx_qp; 495 496 /* Indirection Table for RX & TX. The values are queue indexes */ 497 u32 *indir_table; 498 u32 indir_table_sz; 499 500 /* Indirection table containing RxObject Handles */ 501 mana_handle_t *rxobj_table; 502 503 /* Hash key used by the NIC */ 504 u8 hashkey[MANA_HASH_KEY_SIZE]; 505 506 /* This points to an array of num_queues of RQ pointers. */ 507 struct mana_rxq **rxqs; 508 509 /* pre-allocated rx buffer array */ 510 void **rxbufs_pre; 511 dma_addr_t *das_pre; 512 int rxbpre_total; 513 u32 rxbpre_datasize; 514 u32 rxbpre_alloc_size; 515 u32 rxbpre_headroom; 516 u32 rxbpre_frag_count; 517 518 struct bpf_prog *bpf_prog; 519 520 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ 521 unsigned int max_queues; 522 unsigned int num_queues; 523 524 unsigned int rx_queue_size; 525 unsigned int tx_queue_size; 526 527 mana_handle_t port_handle; 528 mana_handle_t pf_filter_handle; 529 530 /* Mutex for sharing access to vport_use_count */ 531 struct mutex vport_mutex; 532 int vport_use_count; 533 534 /* Net shaper handle*/ 535 struct net_shaper_handle handle; 536 537 u16 port_idx; 538 /* Currently configured speed (mbps) */ 539 u32 speed; 540 /* Maximum speed supported by the SKU (mbps) */ 541 u32 max_speed; 542 543 bool port_is_up; 544 bool port_st_save; /* Saved port state */ 545 546 struct mana_ethtool_stats eth_stats; 547 548 struct mana_ethtool_phy_stats phy_stats; 549 550 /* Debugfs */ 551 struct dentry *mana_port_debugfs; 552 }; 553 554 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); 555 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, 556 bool update_hash, bool update_tab); 557 558 int mana_alloc_queues(struct net_device *ndev); 559 int mana_attach(struct net_device *ndev); 560 int mana_detach(struct net_device *ndev, bool from_close); 561 562 int mana_probe(struct gdma_dev *gd, bool resuming); 563 void mana_remove(struct gdma_dev *gd, bool suspending); 564 565 int mana_rdma_probe(struct gdma_dev *gd); 566 void mana_rdma_remove(struct gdma_dev *gd); 567 568 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); 569 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 570 u32 flags); 571 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 572 struct xdp_buff *xdp, void *buf_va, uint pkt_len); 573 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); 574 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); 575 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 576 void mana_query_gf_stats(struct mana_port_context *apc); 577 int mana_query_link_cfg(struct mana_port_context *apc); 578 int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, 579 int enable_clamping); 580 void mana_query_phy_stats(struct mana_port_context *apc); 581 int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); 582 void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); 583 584 extern const struct ethtool_ops mana_ethtool_ops; 585 extern struct dentry *mana_debugfs_root; 586 587 /* A CQ can be created not associated with any EQ */ 588 #define GDMA_CQ_NO_EQ 0xffff 589 590 struct mana_obj_spec { 591 u32 queue_index; 592 u64 gdma_region; 593 u32 queue_size; 594 u32 attached_eq; 595 u32 modr_ctx_id; 596 }; 597 598 enum mana_command_code { 599 MANA_QUERY_DEV_CONFIG = 0x20001, 600 MANA_QUERY_GF_STAT = 0x20002, 601 MANA_CONFIG_VPORT_TX = 0x20003, 602 MANA_CREATE_WQ_OBJ = 0x20004, 603 MANA_DESTROY_WQ_OBJ = 0x20005, 604 MANA_FENCE_RQ = 0x20006, 605 MANA_CONFIG_VPORT_RX = 0x20007, 606 MANA_QUERY_VPORT_CONFIG = 0x20008, 607 MANA_QUERY_LINK_CONFIG = 0x2000A, 608 MANA_SET_BW_CLAMP = 0x2000B, 609 MANA_QUERY_PHY_STAT = 0x2000c, 610 611 /* Privileged commands for the PF mode */ 612 MANA_REGISTER_FILTER = 0x28000, 613 MANA_DEREGISTER_FILTER = 0x28001, 614 MANA_REGISTER_HW_PORT = 0x28003, 615 MANA_DEREGISTER_HW_PORT = 0x28004, 616 }; 617 618 /* Query Link Configuration*/ 619 struct mana_query_link_config_req { 620 struct gdma_req_hdr hdr; 621 mana_handle_t vport; 622 }; /* HW DATA */ 623 624 struct mana_query_link_config_resp { 625 struct gdma_resp_hdr hdr; 626 u32 qos_speed_mbps; 627 u8 qos_unconfigured; 628 u8 reserved1[3]; 629 u32 link_speed_mbps; 630 u8 reserved2[4]; 631 }; /* HW DATA */ 632 633 /* Set Bandwidth Clamp*/ 634 struct mana_set_bw_clamp_req { 635 struct gdma_req_hdr hdr; 636 mana_handle_t vport; 637 enum TRI_STATE enable_clamping; 638 u32 link_speed_mbps; 639 }; /* HW DATA */ 640 641 struct mana_set_bw_clamp_resp { 642 struct gdma_resp_hdr hdr; 643 u8 qos_unconfigured; 644 u8 reserved[7]; 645 }; /* HW DATA */ 646 647 /* Query Device Configuration */ 648 struct mana_query_device_cfg_req { 649 struct gdma_req_hdr hdr; 650 651 /* MANA Nic Driver Capability flags */ 652 u64 mn_drv_cap_flags1; 653 u64 mn_drv_cap_flags2; 654 u64 mn_drv_cap_flags3; 655 u64 mn_drv_cap_flags4; 656 657 u32 proto_major_ver; 658 u32 proto_minor_ver; 659 u32 proto_micro_ver; 660 661 u32 reserved; 662 }; /* HW DATA */ 663 664 struct mana_query_device_cfg_resp { 665 struct gdma_resp_hdr hdr; 666 667 u64 pf_cap_flags1; 668 u64 pf_cap_flags2; 669 u64 pf_cap_flags3; 670 u64 pf_cap_flags4; 671 672 u16 max_num_vports; 673 u8 bm_hostmode; /* response v3: Bare Metal Host Mode */ 674 u8 reserved; 675 u32 max_num_eqs; 676 677 /* response v2: */ 678 u16 adapter_mtu; 679 u16 reserved2; 680 u32 reserved3; 681 }; /* HW DATA */ 682 683 /* Query vPort Configuration */ 684 struct mana_query_vport_cfg_req { 685 struct gdma_req_hdr hdr; 686 u32 vport_index; 687 }; /* HW DATA */ 688 689 struct mana_query_vport_cfg_resp { 690 struct gdma_resp_hdr hdr; 691 u32 max_num_sq; 692 u32 max_num_rq; 693 u32 num_indirection_ent; 694 u32 reserved1; 695 u8 mac_addr[6]; 696 u8 reserved2[2]; 697 mana_handle_t vport; 698 }; /* HW DATA */ 699 700 /* Configure vPort */ 701 struct mana_config_vport_req { 702 struct gdma_req_hdr hdr; 703 mana_handle_t vport; 704 u32 pdid; 705 u32 doorbell_pageid; 706 }; /* HW DATA */ 707 708 struct mana_config_vport_resp { 709 struct gdma_resp_hdr hdr; 710 u16 tx_vport_offset; 711 u8 short_form_allowed; 712 u8 reserved; 713 }; /* HW DATA */ 714 715 /* Create WQ Object */ 716 struct mana_create_wqobj_req { 717 struct gdma_req_hdr hdr; 718 mana_handle_t vport; 719 u32 wq_type; 720 u32 reserved; 721 u64 wq_gdma_region; 722 u64 cq_gdma_region; 723 u32 wq_size; 724 u32 cq_size; 725 u32 cq_moderation_ctx_id; 726 u32 cq_parent_qid; 727 }; /* HW DATA */ 728 729 struct mana_create_wqobj_resp { 730 struct gdma_resp_hdr hdr; 731 u32 wq_id; 732 u32 cq_id; 733 mana_handle_t wq_obj; 734 }; /* HW DATA */ 735 736 /* Destroy WQ Object */ 737 struct mana_destroy_wqobj_req { 738 struct gdma_req_hdr hdr; 739 u32 wq_type; 740 u32 reserved; 741 mana_handle_t wq_obj_handle; 742 }; /* HW DATA */ 743 744 struct mana_destroy_wqobj_resp { 745 struct gdma_resp_hdr hdr; 746 }; /* HW DATA */ 747 748 /* Fence RQ */ 749 struct mana_fence_rq_req { 750 struct gdma_req_hdr hdr; 751 mana_handle_t wq_obj_handle; 752 }; /* HW DATA */ 753 754 struct mana_fence_rq_resp { 755 struct gdma_resp_hdr hdr; 756 }; /* HW DATA */ 757 758 /* Query stats RQ */ 759 struct mana_query_gf_stat_req { 760 struct gdma_req_hdr hdr; 761 u64 req_stats; 762 }; /* HW DATA */ 763 764 struct mana_query_gf_stat_resp { 765 struct gdma_resp_hdr hdr; 766 u64 reported_stats; 767 /* rx errors/discards */ 768 u64 rx_discards_nowqe; 769 u64 rx_err_vport_disabled; 770 /* rx bytes/packets */ 771 u64 hc_rx_bytes; 772 u64 hc_rx_ucast_pkts; 773 u64 hc_rx_ucast_bytes; 774 u64 hc_rx_bcast_pkts; 775 u64 hc_rx_bcast_bytes; 776 u64 hc_rx_mcast_pkts; 777 u64 hc_rx_mcast_bytes; 778 /* tx errors */ 779 u64 tx_err_gf_disabled; 780 u64 tx_err_vport_disabled; 781 u64 tx_err_inval_vport_offset_pkt; 782 u64 tx_err_vlan_enforcement; 783 u64 tx_err_ethtype_enforcement; 784 u64 tx_err_SA_enforcement; 785 u64 tx_err_SQPDID_enforcement; 786 u64 tx_err_CQPDID_enforcement; 787 u64 tx_err_mtu_violation; 788 u64 tx_err_inval_oob; 789 /* tx bytes/packets */ 790 u64 hc_tx_bytes; 791 u64 hc_tx_ucast_pkts; 792 u64 hc_tx_ucast_bytes; 793 u64 hc_tx_bcast_pkts; 794 u64 hc_tx_bcast_bytes; 795 u64 hc_tx_mcast_pkts; 796 u64 hc_tx_mcast_bytes; 797 /* tx error */ 798 u64 tx_err_gdma; 799 }; /* HW DATA */ 800 801 /* Query phy stats */ 802 struct mana_query_phy_stat_req { 803 struct gdma_req_hdr hdr; 804 u64 req_stats; 805 }; /* HW DATA */ 806 807 struct mana_query_phy_stat_resp { 808 struct gdma_resp_hdr hdr; 809 u64 reported_stats; 810 811 /* Aggregate Drop Counters */ 812 u64 rx_pkt_drop_phy; 813 u64 tx_pkt_drop_phy; 814 815 /* Per TC(Traffic class) traffic Counters */ 816 u64 rx_pkt_tc0_phy; 817 u64 tx_pkt_tc0_phy; 818 u64 rx_pkt_tc1_phy; 819 u64 tx_pkt_tc1_phy; 820 u64 rx_pkt_tc2_phy; 821 u64 tx_pkt_tc2_phy; 822 u64 rx_pkt_tc3_phy; 823 u64 tx_pkt_tc3_phy; 824 u64 rx_pkt_tc4_phy; 825 u64 tx_pkt_tc4_phy; 826 u64 rx_pkt_tc5_phy; 827 u64 tx_pkt_tc5_phy; 828 u64 rx_pkt_tc6_phy; 829 u64 tx_pkt_tc6_phy; 830 u64 rx_pkt_tc7_phy; 831 u64 tx_pkt_tc7_phy; 832 833 u64 rx_byte_tc0_phy; 834 u64 tx_byte_tc0_phy; 835 u64 rx_byte_tc1_phy; 836 u64 tx_byte_tc1_phy; 837 u64 rx_byte_tc2_phy; 838 u64 tx_byte_tc2_phy; 839 u64 rx_byte_tc3_phy; 840 u64 tx_byte_tc3_phy; 841 u64 rx_byte_tc4_phy; 842 u64 tx_byte_tc4_phy; 843 u64 rx_byte_tc5_phy; 844 u64 tx_byte_tc5_phy; 845 u64 rx_byte_tc6_phy; 846 u64 tx_byte_tc6_phy; 847 u64 rx_byte_tc7_phy; 848 u64 tx_byte_tc7_phy; 849 850 /* Per TC(Traffic Class) pause Counters */ 851 u64 rx_pause_tc0_phy; 852 u64 tx_pause_tc0_phy; 853 u64 rx_pause_tc1_phy; 854 u64 tx_pause_tc1_phy; 855 u64 rx_pause_tc2_phy; 856 u64 tx_pause_tc2_phy; 857 u64 rx_pause_tc3_phy; 858 u64 tx_pause_tc3_phy; 859 u64 rx_pause_tc4_phy; 860 u64 tx_pause_tc4_phy; 861 u64 rx_pause_tc5_phy; 862 u64 tx_pause_tc5_phy; 863 u64 rx_pause_tc6_phy; 864 u64 tx_pause_tc6_phy; 865 u64 rx_pause_tc7_phy; 866 u64 tx_pause_tc7_phy; 867 }; /* HW DATA */ 868 869 /* Configure vPort Rx Steering */ 870 struct mana_cfg_rx_steer_req_v2 { 871 struct gdma_req_hdr hdr; 872 mana_handle_t vport; 873 u16 num_indir_entries; 874 u16 indir_tab_offset; 875 u32 rx_enable; 876 u32 rss_enable; 877 u8 update_default_rxobj; 878 u8 update_hashkey; 879 u8 update_indir_tab; 880 u8 reserved; 881 mana_handle_t default_rxobj; 882 u8 hashkey[MANA_HASH_KEY_SIZE]; 883 u8 cqe_coalescing_enable; 884 u8 reserved2[7]; 885 mana_handle_t indir_tab[] __counted_by(num_indir_entries); 886 }; /* HW DATA */ 887 888 struct mana_cfg_rx_steer_resp { 889 struct gdma_resp_hdr hdr; 890 }; /* HW DATA */ 891 892 /* Register HW vPort */ 893 struct mana_register_hw_vport_req { 894 struct gdma_req_hdr hdr; 895 u16 attached_gfid; 896 u8 is_pf_default_vport; 897 u8 reserved1; 898 u8 allow_all_ether_types; 899 u8 reserved2; 900 u8 reserved3; 901 u8 reserved4; 902 }; /* HW DATA */ 903 904 struct mana_register_hw_vport_resp { 905 struct gdma_resp_hdr hdr; 906 mana_handle_t hw_vport_handle; 907 }; /* HW DATA */ 908 909 /* Deregister HW vPort */ 910 struct mana_deregister_hw_vport_req { 911 struct gdma_req_hdr hdr; 912 mana_handle_t hw_vport_handle; 913 }; /* HW DATA */ 914 915 struct mana_deregister_hw_vport_resp { 916 struct gdma_resp_hdr hdr; 917 }; /* HW DATA */ 918 919 /* Register filter */ 920 struct mana_register_filter_req { 921 struct gdma_req_hdr hdr; 922 mana_handle_t vport; 923 u8 mac_addr[6]; 924 u8 reserved1; 925 u8 reserved2; 926 u8 reserved3; 927 u8 reserved4; 928 u16 reserved5; 929 u32 reserved6; 930 u32 reserved7; 931 u32 reserved8; 932 }; /* HW DATA */ 933 934 struct mana_register_filter_resp { 935 struct gdma_resp_hdr hdr; 936 mana_handle_t filter_handle; 937 }; /* HW DATA */ 938 939 /* Deregister filter */ 940 struct mana_deregister_filter_req { 941 struct gdma_req_hdr hdr; 942 mana_handle_t filter_handle; 943 }; /* HW DATA */ 944 945 struct mana_deregister_filter_resp { 946 struct gdma_resp_hdr hdr; 947 }; /* HW DATA */ 948 949 /* Requested GF stats Flags */ 950 /* Rx discards/Errors */ 951 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001 952 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002 953 /* Rx bytes/pkts */ 954 #define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004 955 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008 956 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010 957 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020 958 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040 959 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080 960 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100 961 /* Tx errors */ 962 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200 963 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400 964 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \ 965 0x0000000000000800 966 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000 967 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \ 968 0x0000000000002000 969 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000 970 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000 971 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000 972 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000 973 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000 974 /* Tx bytes/pkts */ 975 #define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000 976 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000 977 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000 978 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000 979 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000 980 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000 981 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000 982 /* Tx error */ 983 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000 984 985 #define MANA_MAX_NUM_QUEUES 64 986 987 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) 988 989 struct mana_tx_package { 990 struct gdma_wqe_request wqe_req; 991 struct gdma_sge sgl_array[5]; 992 struct gdma_sge *sgl_ptr; 993 994 struct mana_tx_oob tx_oob; 995 996 struct gdma_posted_wqe_info wqe_info; 997 }; 998 999 int mana_create_wq_obj(struct mana_port_context *apc, 1000 mana_handle_t vport, 1001 u32 wq_type, struct mana_obj_spec *wq_spec, 1002 struct mana_obj_spec *cq_spec, 1003 mana_handle_t *wq_obj); 1004 1005 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 1006 mana_handle_t wq_obj); 1007 1008 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 1009 u32 doorbell_pg_id); 1010 void mana_uncfg_vport(struct mana_port_context *apc); 1011 1012 struct net_device *mana_get_primary_netdev(struct mana_context *ac, 1013 u32 port_index, 1014 netdevice_tracker *tracker); 1015 #endif /* _MANA_H */ 1016