1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _MANA_H 5 #define _MANA_H 6 7 #include <net/xdp.h> 8 9 #include "gdma.h" 10 #include "hw_channel.h" 11 12 /* Microsoft Azure Network Adapter (MANA)'s definitions 13 * 14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of 15 * them are naturally aligned and hence don't need __packed. 16 */ 17 18 /* MANA protocol version */ 19 #define MANA_MAJOR_VERSION 0 20 #define MANA_MINOR_VERSION 1 21 #define MANA_MICRO_VERSION 1 22 23 typedef u64 mana_handle_t; 24 #define INVALID_MANA_HANDLE ((mana_handle_t)-1) 25 26 enum TRI_STATE { 27 TRI_STATE_UNKNOWN = -1, 28 TRI_STATE_FALSE = 0, 29 TRI_STATE_TRUE = 1 30 }; 31 32 /* Number of entries for hardware indirection table must be in power of 2 */ 33 #define MANA_INDIRECT_TABLE_MAX_SIZE 512 34 #define MANA_INDIRECT_TABLE_DEF_SIZE 64 35 36 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ 37 #define MANA_HASH_KEY_SIZE 40 38 39 #define COMP_ENTRY_SIZE 64 40 41 #define RX_BUFFERS_PER_QUEUE 512 42 43 #define MAX_SEND_BUFFERS_PER_QUEUE 256 44 45 #define EQ_SIZE (8 * MANA_PAGE_SIZE) 46 47 #define LOG2_EQ_THROTTLE 3 48 49 #define MAX_PORTS_IN_MANA_DEV 256 50 51 /* Update this count whenever the respective structures are changed */ 52 #define MANA_STATS_RX_COUNT 5 53 #define MANA_STATS_TX_COUNT 11 54 55 struct mana_stats_rx { 56 u64 packets; 57 u64 bytes; 58 u64 xdp_drop; 59 u64 xdp_tx; 60 u64 xdp_redirect; 61 struct u64_stats_sync syncp; 62 }; 63 64 struct mana_stats_tx { 65 u64 packets; 66 u64 bytes; 67 u64 xdp_xmit; 68 u64 tso_packets; 69 u64 tso_bytes; 70 u64 tso_inner_packets; 71 u64 tso_inner_bytes; 72 u64 short_pkt_fmt; 73 u64 long_pkt_fmt; 74 u64 csum_partial; 75 u64 mana_map_err; 76 struct u64_stats_sync syncp; 77 }; 78 79 struct mana_txq { 80 struct gdma_queue *gdma_sq; 81 82 union { 83 u32 gdma_txq_id; 84 struct { 85 u32 reserved1 : 10; 86 u32 vsq_frame : 14; 87 u32 reserved2 : 8; 88 }; 89 }; 90 91 u16 vp_offset; 92 93 struct net_device *ndev; 94 95 /* The SKBs are sent to the HW and we are waiting for the CQEs. */ 96 struct sk_buff_head pending_skbs; 97 struct netdev_queue *net_txq; 98 99 atomic_t pending_sends; 100 101 struct mana_stats_tx stats; 102 }; 103 104 /* skb data and frags dma mappings */ 105 struct mana_skb_head { 106 /* GSO pkts may have 2 SGEs for the linear part*/ 107 dma_addr_t dma_handle[MAX_SKB_FRAGS + 2]; 108 109 u32 size[MAX_SKB_FRAGS + 2]; 110 }; 111 112 #define MANA_HEADROOM sizeof(struct mana_skb_head) 113 114 enum mana_tx_pkt_format { 115 MANA_SHORT_PKT_FMT = 0, 116 MANA_LONG_PKT_FMT = 1, 117 }; 118 119 struct mana_tx_short_oob { 120 u32 pkt_fmt : 2; 121 u32 is_outer_ipv4 : 1; 122 u32 is_outer_ipv6 : 1; 123 u32 comp_iphdr_csum : 1; 124 u32 comp_tcp_csum : 1; 125 u32 comp_udp_csum : 1; 126 u32 supress_txcqe_gen : 1; 127 u32 vcq_num : 24; 128 129 u32 trans_off : 10; /* Transport header offset */ 130 u32 vsq_frame : 14; 131 u32 short_vp_offset : 8; 132 }; /* HW DATA */ 133 134 struct mana_tx_long_oob { 135 u32 is_encap : 1; 136 u32 inner_is_ipv6 : 1; 137 u32 inner_tcp_opt : 1; 138 u32 inject_vlan_pri_tag : 1; 139 u32 reserved1 : 12; 140 u32 pcp : 3; /* 802.1Q */ 141 u32 dei : 1; /* 802.1Q */ 142 u32 vlan_id : 12; /* 802.1Q */ 143 144 u32 inner_frame_offset : 10; 145 u32 inner_ip_rel_offset : 6; 146 u32 long_vp_offset : 12; 147 u32 reserved2 : 4; 148 149 u32 reserved3; 150 u32 reserved4; 151 }; /* HW DATA */ 152 153 struct mana_tx_oob { 154 struct mana_tx_short_oob s_oob; 155 struct mana_tx_long_oob l_oob; 156 }; /* HW DATA */ 157 158 enum mana_cq_type { 159 MANA_CQ_TYPE_RX, 160 MANA_CQ_TYPE_TX, 161 }; 162 163 enum mana_cqe_type { 164 CQE_INVALID = 0, 165 CQE_RX_OKAY = 1, 166 CQE_RX_COALESCED_4 = 2, 167 CQE_RX_OBJECT_FENCE = 3, 168 CQE_RX_TRUNCATED = 4, 169 170 CQE_TX_OKAY = 32, 171 CQE_TX_SA_DROP = 33, 172 CQE_TX_MTU_DROP = 34, 173 CQE_TX_INVALID_OOB = 35, 174 CQE_TX_INVALID_ETH_TYPE = 36, 175 CQE_TX_HDR_PROCESSING_ERROR = 37, 176 CQE_TX_VF_DISABLED = 38, 177 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, 178 CQE_TX_VPORT_DISABLED = 40, 179 CQE_TX_VLAN_TAGGING_VIOLATION = 41, 180 }; 181 182 #define MANA_CQE_COMPLETION 1 183 184 struct mana_cqe_header { 185 u32 cqe_type : 6; 186 u32 client_type : 2; 187 u32 vendor_err : 24; 188 }; /* HW DATA */ 189 190 /* NDIS HASH Types */ 191 #define NDIS_HASH_IPV4 BIT(0) 192 #define NDIS_HASH_TCP_IPV4 BIT(1) 193 #define NDIS_HASH_UDP_IPV4 BIT(2) 194 #define NDIS_HASH_IPV6 BIT(3) 195 #define NDIS_HASH_TCP_IPV6 BIT(4) 196 #define NDIS_HASH_UDP_IPV6 BIT(5) 197 #define NDIS_HASH_IPV6_EX BIT(6) 198 #define NDIS_HASH_TCP_IPV6_EX BIT(7) 199 #define NDIS_HASH_UDP_IPV6_EX BIT(8) 200 201 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) 202 #define MANA_HASH_L4 \ 203 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ 204 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) 205 206 struct mana_rxcomp_perpkt_info { 207 u32 pkt_len : 16; 208 u32 reserved1 : 16; 209 u32 reserved2; 210 u32 pkt_hash; 211 }; /* HW DATA */ 212 213 #define MANA_RXCOMP_OOB_NUM_PPI 4 214 215 /* Receive completion OOB */ 216 struct mana_rxcomp_oob { 217 struct mana_cqe_header cqe_hdr; 218 219 u32 rx_vlan_id : 12; 220 u32 rx_vlantag_present : 1; 221 u32 rx_outer_iphdr_csum_succeed : 1; 222 u32 rx_outer_iphdr_csum_fail : 1; 223 u32 reserved1 : 1; 224 u32 rx_hashtype : 9; 225 u32 rx_iphdr_csum_succeed : 1; 226 u32 rx_iphdr_csum_fail : 1; 227 u32 rx_tcp_csum_succeed : 1; 228 u32 rx_tcp_csum_fail : 1; 229 u32 rx_udp_csum_succeed : 1; 230 u32 rx_udp_csum_fail : 1; 231 u32 reserved2 : 1; 232 233 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; 234 235 u32 rx_wqe_offset; 236 }; /* HW DATA */ 237 238 struct mana_tx_comp_oob { 239 struct mana_cqe_header cqe_hdr; 240 241 u32 tx_data_offset; 242 243 u32 tx_sgl_offset : 5; 244 u32 tx_wqe_offset : 27; 245 246 u32 reserved[12]; 247 }; /* HW DATA */ 248 249 struct mana_rxq; 250 251 #define CQE_POLLING_BUFFER 512 252 253 struct mana_cq { 254 struct gdma_queue *gdma_cq; 255 256 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ 257 u32 gdma_id; 258 259 /* Type of the CQ: TX or RX */ 260 enum mana_cq_type type; 261 262 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. 263 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. 264 */ 265 struct mana_rxq *rxq; 266 267 /* Pointer to the mana_txq that is pushing TX CQEs to the queue. 268 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. 269 */ 270 struct mana_txq *txq; 271 272 /* Buffer which the CQ handler can copy the CQE's into. */ 273 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; 274 275 /* NAPI data */ 276 struct napi_struct napi; 277 int work_done; 278 int work_done_since_doorbell; 279 int budget; 280 }; 281 282 struct mana_recv_buf_oob { 283 /* A valid GDMA work request representing the data buffer. */ 284 struct gdma_wqe_request wqe_req; 285 286 void *buf_va; 287 bool from_pool; /* allocated from a page pool */ 288 289 /* SGL of the buffer going to be sent has part of the work request. */ 290 u32 num_sge; 291 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; 292 293 /* Required to store the result of mana_gd_post_work_request. 294 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the 295 * work queue when the WQE is consumed. 296 */ 297 struct gdma_posted_wqe_info wqe_inf; 298 }; 299 300 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ 301 + ETH_HLEN) 302 303 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) 304 305 struct mana_rxq { 306 struct gdma_queue *gdma_rq; 307 /* Cache the gdma receive queue id */ 308 u32 gdma_id; 309 310 /* Index of RQ in the vPort, not gdma receive queue id */ 311 u32 rxq_idx; 312 313 u32 datasize; 314 u32 alloc_size; 315 u32 headroom; 316 317 mana_handle_t rxobj; 318 319 struct mana_cq rx_cq; 320 321 struct completion fence_event; 322 323 struct net_device *ndev; 324 325 /* Total number of receive buffers to be allocated */ 326 u32 num_rx_buf; 327 328 u32 buf_index; 329 330 struct mana_stats_rx stats; 331 332 struct bpf_prog __rcu *bpf_prog; 333 struct xdp_rxq_info xdp_rxq; 334 void *xdp_save_va; /* for reusing */ 335 bool xdp_flush; 336 int xdp_rc; /* XDP redirect return code */ 337 338 struct page_pool *page_pool; 339 340 /* MUST BE THE LAST MEMBER: 341 * Each receive buffer has an associated mana_recv_buf_oob. 342 */ 343 struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); 344 }; 345 346 struct mana_tx_qp { 347 struct mana_txq txq; 348 349 struct mana_cq tx_cq; 350 351 mana_handle_t tx_object; 352 }; 353 354 struct mana_ethtool_stats { 355 u64 stop_queue; 356 u64 wake_queue; 357 u64 hc_rx_discards_no_wqe; 358 u64 hc_rx_err_vport_disabled; 359 u64 hc_rx_bytes; 360 u64 hc_rx_ucast_pkts; 361 u64 hc_rx_ucast_bytes; 362 u64 hc_rx_bcast_pkts; 363 u64 hc_rx_bcast_bytes; 364 u64 hc_rx_mcast_pkts; 365 u64 hc_rx_mcast_bytes; 366 u64 hc_tx_err_gf_disabled; 367 u64 hc_tx_err_vport_disabled; 368 u64 hc_tx_err_inval_vportoffset_pkt; 369 u64 hc_tx_err_vlan_enforcement; 370 u64 hc_tx_err_eth_type_enforcement; 371 u64 hc_tx_err_sa_enforcement; 372 u64 hc_tx_err_sqpdid_enforcement; 373 u64 hc_tx_err_cqpdid_enforcement; 374 u64 hc_tx_err_mtu_violation; 375 u64 hc_tx_err_inval_oob; 376 u64 hc_tx_bytes; 377 u64 hc_tx_ucast_pkts; 378 u64 hc_tx_ucast_bytes; 379 u64 hc_tx_bcast_pkts; 380 u64 hc_tx_bcast_bytes; 381 u64 hc_tx_mcast_pkts; 382 u64 hc_tx_mcast_bytes; 383 u64 hc_tx_err_gdma; 384 u64 tx_cqe_err; 385 u64 tx_cqe_unknown_type; 386 u64 rx_coalesced_err; 387 u64 rx_cqe_unknown_type; 388 }; 389 390 struct mana_context { 391 struct gdma_dev *gdma_dev; 392 393 u16 num_ports; 394 395 struct mana_eq *eqs; 396 397 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 398 }; 399 400 struct mana_port_context { 401 struct mana_context *ac; 402 struct net_device *ndev; 403 404 u8 mac_addr[ETH_ALEN]; 405 406 enum TRI_STATE rss_state; 407 408 mana_handle_t default_rxobj; 409 bool tx_shortform_allowed; 410 u16 tx_vp_offset; 411 412 struct mana_tx_qp *tx_qp; 413 414 /* Indirection Table for RX & TX. The values are queue indexes */ 415 u32 *indir_table; 416 u32 indir_table_sz; 417 418 /* Indirection table containing RxObject Handles */ 419 mana_handle_t *rxobj_table; 420 421 /* Hash key used by the NIC */ 422 u8 hashkey[MANA_HASH_KEY_SIZE]; 423 424 /* This points to an array of num_queues of RQ pointers. */ 425 struct mana_rxq **rxqs; 426 427 /* pre-allocated rx buffer array */ 428 void **rxbufs_pre; 429 dma_addr_t *das_pre; 430 int rxbpre_total; 431 u32 rxbpre_datasize; 432 u32 rxbpre_alloc_size; 433 u32 rxbpre_headroom; 434 435 struct bpf_prog *bpf_prog; 436 437 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ 438 unsigned int max_queues; 439 unsigned int num_queues; 440 441 mana_handle_t port_handle; 442 mana_handle_t pf_filter_handle; 443 444 /* Mutex for sharing access to vport_use_count */ 445 struct mutex vport_mutex; 446 int vport_use_count; 447 448 u16 port_idx; 449 450 bool port_is_up; 451 bool port_st_save; /* Saved port state */ 452 453 struct mana_ethtool_stats eth_stats; 454 }; 455 456 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); 457 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, 458 bool update_hash, bool update_tab); 459 460 int mana_alloc_queues(struct net_device *ndev); 461 int mana_attach(struct net_device *ndev); 462 int mana_detach(struct net_device *ndev, bool from_close); 463 464 int mana_probe(struct gdma_dev *gd, bool resuming); 465 void mana_remove(struct gdma_dev *gd, bool suspending); 466 467 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); 468 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 469 u32 flags); 470 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 471 struct xdp_buff *xdp, void *buf_va, uint pkt_len); 472 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); 473 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); 474 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 475 void mana_query_gf_stats(struct mana_port_context *apc); 476 477 extern const struct ethtool_ops mana_ethtool_ops; 478 479 /* A CQ can be created not associated with any EQ */ 480 #define GDMA_CQ_NO_EQ 0xffff 481 482 struct mana_obj_spec { 483 u32 queue_index; 484 u64 gdma_region; 485 u32 queue_size; 486 u32 attached_eq; 487 u32 modr_ctx_id; 488 }; 489 490 enum mana_command_code { 491 MANA_QUERY_DEV_CONFIG = 0x20001, 492 MANA_QUERY_GF_STAT = 0x20002, 493 MANA_CONFIG_VPORT_TX = 0x20003, 494 MANA_CREATE_WQ_OBJ = 0x20004, 495 MANA_DESTROY_WQ_OBJ = 0x20005, 496 MANA_FENCE_RQ = 0x20006, 497 MANA_CONFIG_VPORT_RX = 0x20007, 498 MANA_QUERY_VPORT_CONFIG = 0x20008, 499 500 /* Privileged commands for the PF mode */ 501 MANA_REGISTER_FILTER = 0x28000, 502 MANA_DEREGISTER_FILTER = 0x28001, 503 MANA_REGISTER_HW_PORT = 0x28003, 504 MANA_DEREGISTER_HW_PORT = 0x28004, 505 }; 506 507 /* Query Device Configuration */ 508 struct mana_query_device_cfg_req { 509 struct gdma_req_hdr hdr; 510 511 /* MANA Nic Driver Capability flags */ 512 u64 mn_drv_cap_flags1; 513 u64 mn_drv_cap_flags2; 514 u64 mn_drv_cap_flags3; 515 u64 mn_drv_cap_flags4; 516 517 u32 proto_major_ver; 518 u32 proto_minor_ver; 519 u32 proto_micro_ver; 520 521 u32 reserved; 522 }; /* HW DATA */ 523 524 struct mana_query_device_cfg_resp { 525 struct gdma_resp_hdr hdr; 526 527 u64 pf_cap_flags1; 528 u64 pf_cap_flags2; 529 u64 pf_cap_flags3; 530 u64 pf_cap_flags4; 531 532 u16 max_num_vports; 533 u16 reserved; 534 u32 max_num_eqs; 535 536 /* response v2: */ 537 u16 adapter_mtu; 538 u16 reserved2; 539 u32 reserved3; 540 }; /* HW DATA */ 541 542 /* Query vPort Configuration */ 543 struct mana_query_vport_cfg_req { 544 struct gdma_req_hdr hdr; 545 u32 vport_index; 546 }; /* HW DATA */ 547 548 struct mana_query_vport_cfg_resp { 549 struct gdma_resp_hdr hdr; 550 u32 max_num_sq; 551 u32 max_num_rq; 552 u32 num_indirection_ent; 553 u32 reserved1; 554 u8 mac_addr[6]; 555 u8 reserved2[2]; 556 mana_handle_t vport; 557 }; /* HW DATA */ 558 559 /* Configure vPort */ 560 struct mana_config_vport_req { 561 struct gdma_req_hdr hdr; 562 mana_handle_t vport; 563 u32 pdid; 564 u32 doorbell_pageid; 565 }; /* HW DATA */ 566 567 struct mana_config_vport_resp { 568 struct gdma_resp_hdr hdr; 569 u16 tx_vport_offset; 570 u8 short_form_allowed; 571 u8 reserved; 572 }; /* HW DATA */ 573 574 /* Create WQ Object */ 575 struct mana_create_wqobj_req { 576 struct gdma_req_hdr hdr; 577 mana_handle_t vport; 578 u32 wq_type; 579 u32 reserved; 580 u64 wq_gdma_region; 581 u64 cq_gdma_region; 582 u32 wq_size; 583 u32 cq_size; 584 u32 cq_moderation_ctx_id; 585 u32 cq_parent_qid; 586 }; /* HW DATA */ 587 588 struct mana_create_wqobj_resp { 589 struct gdma_resp_hdr hdr; 590 u32 wq_id; 591 u32 cq_id; 592 mana_handle_t wq_obj; 593 }; /* HW DATA */ 594 595 /* Destroy WQ Object */ 596 struct mana_destroy_wqobj_req { 597 struct gdma_req_hdr hdr; 598 u32 wq_type; 599 u32 reserved; 600 mana_handle_t wq_obj_handle; 601 }; /* HW DATA */ 602 603 struct mana_destroy_wqobj_resp { 604 struct gdma_resp_hdr hdr; 605 }; /* HW DATA */ 606 607 /* Fence RQ */ 608 struct mana_fence_rq_req { 609 struct gdma_req_hdr hdr; 610 mana_handle_t wq_obj_handle; 611 }; /* HW DATA */ 612 613 struct mana_fence_rq_resp { 614 struct gdma_resp_hdr hdr; 615 }; /* HW DATA */ 616 617 /* Query stats RQ */ 618 struct mana_query_gf_stat_req { 619 struct gdma_req_hdr hdr; 620 u64 req_stats; 621 }; /* HW DATA */ 622 623 struct mana_query_gf_stat_resp { 624 struct gdma_resp_hdr hdr; 625 u64 reported_stats; 626 /* rx errors/discards */ 627 u64 rx_discards_nowqe; 628 u64 rx_err_vport_disabled; 629 /* rx bytes/packets */ 630 u64 hc_rx_bytes; 631 u64 hc_rx_ucast_pkts; 632 u64 hc_rx_ucast_bytes; 633 u64 hc_rx_bcast_pkts; 634 u64 hc_rx_bcast_bytes; 635 u64 hc_rx_mcast_pkts; 636 u64 hc_rx_mcast_bytes; 637 /* tx errors */ 638 u64 tx_err_gf_disabled; 639 u64 tx_err_vport_disabled; 640 u64 tx_err_inval_vport_offset_pkt; 641 u64 tx_err_vlan_enforcement; 642 u64 tx_err_ethtype_enforcement; 643 u64 tx_err_SA_enforcement; 644 u64 tx_err_SQPDID_enforcement; 645 u64 tx_err_CQPDID_enforcement; 646 u64 tx_err_mtu_violation; 647 u64 tx_err_inval_oob; 648 /* tx bytes/packets */ 649 u64 hc_tx_bytes; 650 u64 hc_tx_ucast_pkts; 651 u64 hc_tx_ucast_bytes; 652 u64 hc_tx_bcast_pkts; 653 u64 hc_tx_bcast_bytes; 654 u64 hc_tx_mcast_pkts; 655 u64 hc_tx_mcast_bytes; 656 /* tx error */ 657 u64 tx_err_gdma; 658 }; /* HW DATA */ 659 660 /* Configure vPort Rx Steering */ 661 struct mana_cfg_rx_steer_req_v2 { 662 struct gdma_req_hdr hdr; 663 mana_handle_t vport; 664 u16 num_indir_entries; 665 u16 indir_tab_offset; 666 u32 rx_enable; 667 u32 rss_enable; 668 u8 update_default_rxobj; 669 u8 update_hashkey; 670 u8 update_indir_tab; 671 u8 reserved; 672 mana_handle_t default_rxobj; 673 u8 hashkey[MANA_HASH_KEY_SIZE]; 674 u8 cqe_coalescing_enable; 675 u8 reserved2[7]; 676 mana_handle_t indir_tab[] __counted_by(num_indir_entries); 677 }; /* HW DATA */ 678 679 struct mana_cfg_rx_steer_resp { 680 struct gdma_resp_hdr hdr; 681 }; /* HW DATA */ 682 683 /* Register HW vPort */ 684 struct mana_register_hw_vport_req { 685 struct gdma_req_hdr hdr; 686 u16 attached_gfid; 687 u8 is_pf_default_vport; 688 u8 reserved1; 689 u8 allow_all_ether_types; 690 u8 reserved2; 691 u8 reserved3; 692 u8 reserved4; 693 }; /* HW DATA */ 694 695 struct mana_register_hw_vport_resp { 696 struct gdma_resp_hdr hdr; 697 mana_handle_t hw_vport_handle; 698 }; /* HW DATA */ 699 700 /* Deregister HW vPort */ 701 struct mana_deregister_hw_vport_req { 702 struct gdma_req_hdr hdr; 703 mana_handle_t hw_vport_handle; 704 }; /* HW DATA */ 705 706 struct mana_deregister_hw_vport_resp { 707 struct gdma_resp_hdr hdr; 708 }; /* HW DATA */ 709 710 /* Register filter */ 711 struct mana_register_filter_req { 712 struct gdma_req_hdr hdr; 713 mana_handle_t vport; 714 u8 mac_addr[6]; 715 u8 reserved1; 716 u8 reserved2; 717 u8 reserved3; 718 u8 reserved4; 719 u16 reserved5; 720 u32 reserved6; 721 u32 reserved7; 722 u32 reserved8; 723 }; /* HW DATA */ 724 725 struct mana_register_filter_resp { 726 struct gdma_resp_hdr hdr; 727 mana_handle_t filter_handle; 728 }; /* HW DATA */ 729 730 /* Deregister filter */ 731 struct mana_deregister_filter_req { 732 struct gdma_req_hdr hdr; 733 mana_handle_t filter_handle; 734 }; /* HW DATA */ 735 736 struct mana_deregister_filter_resp { 737 struct gdma_resp_hdr hdr; 738 }; /* HW DATA */ 739 740 /* Requested GF stats Flags */ 741 /* Rx discards/Errors */ 742 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001 743 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002 744 /* Rx bytes/pkts */ 745 #define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004 746 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008 747 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010 748 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020 749 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040 750 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080 751 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100 752 /* Tx errors */ 753 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200 754 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400 755 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \ 756 0x0000000000000800 757 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000 758 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \ 759 0x0000000000002000 760 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000 761 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000 762 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000 763 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000 764 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000 765 /* Tx bytes/pkts */ 766 #define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000 767 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000 768 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000 769 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000 770 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000 771 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000 772 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000 773 /* Tx error */ 774 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000 775 776 #define MANA_MAX_NUM_QUEUES 64 777 778 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) 779 780 struct mana_tx_package { 781 struct gdma_wqe_request wqe_req; 782 struct gdma_sge sgl_array[5]; 783 struct gdma_sge *sgl_ptr; 784 785 struct mana_tx_oob tx_oob; 786 787 struct gdma_posted_wqe_info wqe_info; 788 }; 789 790 int mana_create_wq_obj(struct mana_port_context *apc, 791 mana_handle_t vport, 792 u32 wq_type, struct mana_obj_spec *wq_spec, 793 struct mana_obj_spec *cq_spec, 794 mana_handle_t *wq_obj); 795 796 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 797 mana_handle_t wq_obj); 798 799 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 800 u32 doorbell_pg_id); 801 void mana_uncfg_vport(struct mana_port_context *apc); 802 803 struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index); 804 #endif /* _MANA_H */ 805