1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _MANA_H 5 #define _MANA_H 6 7 #include "gdma.h" 8 #include "hw_channel.h" 9 10 /* Microsoft Azure Network Adapter (MANA)'s definitions 11 * 12 * Structures labeled with "HW DATA" are exchanged with the hardware. All of 13 * them are naturally aligned and hence don't need __packed. 14 */ 15 16 /* MANA protocol version */ 17 #define MANA_MAJOR_VERSION 0 18 #define MANA_MINOR_VERSION 1 19 #define MANA_MICRO_VERSION 1 20 21 typedef u64 mana_handle_t; 22 #define INVALID_MANA_HANDLE ((mana_handle_t)-1) 23 24 enum TRI_STATE { 25 TRI_STATE_UNKNOWN = -1, 26 TRI_STATE_FALSE = 0, 27 TRI_STATE_TRUE = 1 28 }; 29 30 /* Number of entries for hardware indirection table must be in power of 2 */ 31 #define MANA_INDIRECT_TABLE_SIZE 64 32 #define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) 33 34 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ 35 #define MANA_HASH_KEY_SIZE 40 36 37 #define COMP_ENTRY_SIZE 64 38 39 #define ADAPTER_MTU_SIZE 1500 40 #define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14) 41 42 #define RX_BUFFERS_PER_QUEUE 512 43 44 #define MAX_SEND_BUFFERS_PER_QUEUE 256 45 46 #define EQ_SIZE (8 * PAGE_SIZE) 47 #define LOG2_EQ_THROTTLE 3 48 49 #define MAX_PORTS_IN_MANA_DEV 256 50 51 struct mana_stats_rx { 52 u64 packets; 53 u64 bytes; 54 u64 xdp_drop; 55 u64 xdp_tx; 56 u64 xdp_redirect; 57 struct u64_stats_sync syncp; 58 }; 59 60 struct mana_stats_tx { 61 u64 packets; 62 u64 bytes; 63 u64 xdp_xmit; 64 struct u64_stats_sync syncp; 65 }; 66 67 struct mana_txq { 68 struct gdma_queue *gdma_sq; 69 70 union { 71 u32 gdma_txq_id; 72 struct { 73 u32 reserved1 : 10; 74 u32 vsq_frame : 14; 75 u32 reserved2 : 8; 76 }; 77 }; 78 79 u16 vp_offset; 80 81 struct net_device *ndev; 82 83 /* The SKBs are sent to the HW and we are waiting for the CQEs. */ 84 struct sk_buff_head pending_skbs; 85 struct netdev_queue *net_txq; 86 87 atomic_t pending_sends; 88 89 struct mana_stats_tx stats; 90 }; 91 92 /* skb data and frags dma mappings */ 93 struct mana_skb_head { 94 dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; 95 96 u32 size[MAX_SKB_FRAGS + 1]; 97 }; 98 99 #define MANA_HEADROOM sizeof(struct mana_skb_head) 100 101 enum mana_tx_pkt_format { 102 MANA_SHORT_PKT_FMT = 0, 103 MANA_LONG_PKT_FMT = 1, 104 }; 105 106 struct mana_tx_short_oob { 107 u32 pkt_fmt : 2; 108 u32 is_outer_ipv4 : 1; 109 u32 is_outer_ipv6 : 1; 110 u32 comp_iphdr_csum : 1; 111 u32 comp_tcp_csum : 1; 112 u32 comp_udp_csum : 1; 113 u32 supress_txcqe_gen : 1; 114 u32 vcq_num : 24; 115 116 u32 trans_off : 10; /* Transport header offset */ 117 u32 vsq_frame : 14; 118 u32 short_vp_offset : 8; 119 }; /* HW DATA */ 120 121 struct mana_tx_long_oob { 122 u32 is_encap : 1; 123 u32 inner_is_ipv6 : 1; 124 u32 inner_tcp_opt : 1; 125 u32 inject_vlan_pri_tag : 1; 126 u32 reserved1 : 12; 127 u32 pcp : 3; /* 802.1Q */ 128 u32 dei : 1; /* 802.1Q */ 129 u32 vlan_id : 12; /* 802.1Q */ 130 131 u32 inner_frame_offset : 10; 132 u32 inner_ip_rel_offset : 6; 133 u32 long_vp_offset : 12; 134 u32 reserved2 : 4; 135 136 u32 reserved3; 137 u32 reserved4; 138 }; /* HW DATA */ 139 140 struct mana_tx_oob { 141 struct mana_tx_short_oob s_oob; 142 struct mana_tx_long_oob l_oob; 143 }; /* HW DATA */ 144 145 enum mana_cq_type { 146 MANA_CQ_TYPE_RX, 147 MANA_CQ_TYPE_TX, 148 }; 149 150 enum mana_cqe_type { 151 CQE_INVALID = 0, 152 CQE_RX_OKAY = 1, 153 CQE_RX_COALESCED_4 = 2, 154 CQE_RX_OBJECT_FENCE = 3, 155 CQE_RX_TRUNCATED = 4, 156 157 CQE_TX_OKAY = 32, 158 CQE_TX_SA_DROP = 33, 159 CQE_TX_MTU_DROP = 34, 160 CQE_TX_INVALID_OOB = 35, 161 CQE_TX_INVALID_ETH_TYPE = 36, 162 CQE_TX_HDR_PROCESSING_ERROR = 37, 163 CQE_TX_VF_DISABLED = 38, 164 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, 165 CQE_TX_VPORT_DISABLED = 40, 166 CQE_TX_VLAN_TAGGING_VIOLATION = 41, 167 }; 168 169 #define MANA_CQE_COMPLETION 1 170 171 struct mana_cqe_header { 172 u32 cqe_type : 6; 173 u32 client_type : 2; 174 u32 vendor_err : 24; 175 }; /* HW DATA */ 176 177 /* NDIS HASH Types */ 178 #define NDIS_HASH_IPV4 BIT(0) 179 #define NDIS_HASH_TCP_IPV4 BIT(1) 180 #define NDIS_HASH_UDP_IPV4 BIT(2) 181 #define NDIS_HASH_IPV6 BIT(3) 182 #define NDIS_HASH_TCP_IPV6 BIT(4) 183 #define NDIS_HASH_UDP_IPV6 BIT(5) 184 #define NDIS_HASH_IPV6_EX BIT(6) 185 #define NDIS_HASH_TCP_IPV6_EX BIT(7) 186 #define NDIS_HASH_UDP_IPV6_EX BIT(8) 187 188 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) 189 #define MANA_HASH_L4 \ 190 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ 191 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) 192 193 struct mana_rxcomp_perpkt_info { 194 u32 pkt_len : 16; 195 u32 reserved1 : 16; 196 u32 reserved2; 197 u32 pkt_hash; 198 }; /* HW DATA */ 199 200 #define MANA_RXCOMP_OOB_NUM_PPI 4 201 202 /* Receive completion OOB */ 203 struct mana_rxcomp_oob { 204 struct mana_cqe_header cqe_hdr; 205 206 u32 rx_vlan_id : 12; 207 u32 rx_vlantag_present : 1; 208 u32 rx_outer_iphdr_csum_succeed : 1; 209 u32 rx_outer_iphdr_csum_fail : 1; 210 u32 reserved1 : 1; 211 u32 rx_hashtype : 9; 212 u32 rx_iphdr_csum_succeed : 1; 213 u32 rx_iphdr_csum_fail : 1; 214 u32 rx_tcp_csum_succeed : 1; 215 u32 rx_tcp_csum_fail : 1; 216 u32 rx_udp_csum_succeed : 1; 217 u32 rx_udp_csum_fail : 1; 218 u32 reserved2 : 1; 219 220 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; 221 222 u32 rx_wqe_offset; 223 }; /* HW DATA */ 224 225 struct mana_tx_comp_oob { 226 struct mana_cqe_header cqe_hdr; 227 228 u32 tx_data_offset; 229 230 u32 tx_sgl_offset : 5; 231 u32 tx_wqe_offset : 27; 232 233 u32 reserved[12]; 234 }; /* HW DATA */ 235 236 struct mana_rxq; 237 238 #define CQE_POLLING_BUFFER 512 239 240 struct mana_cq { 241 struct gdma_queue *gdma_cq; 242 243 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ 244 u32 gdma_id; 245 246 /* Type of the CQ: TX or RX */ 247 enum mana_cq_type type; 248 249 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. 250 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. 251 */ 252 struct mana_rxq *rxq; 253 254 /* Pointer to the mana_txq that is pushing TX CQEs to the queue. 255 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. 256 */ 257 struct mana_txq *txq; 258 259 /* Buffer which the CQ handler can copy the CQE's into. */ 260 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; 261 262 /* NAPI data */ 263 struct napi_struct napi; 264 int work_done; 265 int budget; 266 }; 267 268 struct mana_recv_buf_oob { 269 /* A valid GDMA work request representing the data buffer. */ 270 struct gdma_wqe_request wqe_req; 271 272 void *buf_va; 273 dma_addr_t buf_dma_addr; 274 275 /* SGL of the buffer going to be sent has part of the work request. */ 276 u32 num_sge; 277 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; 278 279 /* Required to store the result of mana_gd_post_work_request. 280 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the 281 * work queue when the WQE is consumed. 282 */ 283 struct gdma_posted_wqe_info wqe_inf; 284 }; 285 286 struct mana_rxq { 287 struct gdma_queue *gdma_rq; 288 /* Cache the gdma receive queue id */ 289 u32 gdma_id; 290 291 /* Index of RQ in the vPort, not gdma receive queue id */ 292 u32 rxq_idx; 293 294 u32 datasize; 295 296 mana_handle_t rxobj; 297 298 struct mana_cq rx_cq; 299 300 struct completion fence_event; 301 302 struct net_device *ndev; 303 304 /* Total number of receive buffers to be allocated */ 305 u32 num_rx_buf; 306 307 u32 buf_index; 308 309 struct mana_stats_rx stats; 310 311 struct bpf_prog __rcu *bpf_prog; 312 struct xdp_rxq_info xdp_rxq; 313 struct page *xdp_save_page; 314 bool xdp_flush; 315 int xdp_rc; /* XDP redirect return code */ 316 317 /* MUST BE THE LAST MEMBER: 318 * Each receive buffer has an associated mana_recv_buf_oob. 319 */ 320 struct mana_recv_buf_oob rx_oobs[]; 321 }; 322 323 struct mana_tx_qp { 324 struct mana_txq txq; 325 326 struct mana_cq tx_cq; 327 328 mana_handle_t tx_object; 329 }; 330 331 struct mana_ethtool_stats { 332 u64 stop_queue; 333 u64 wake_queue; 334 }; 335 336 struct mana_context { 337 struct gdma_dev *gdma_dev; 338 339 u16 num_ports; 340 341 struct mana_eq *eqs; 342 343 struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; 344 }; 345 346 struct mana_port_context { 347 struct mana_context *ac; 348 struct net_device *ndev; 349 350 u8 mac_addr[ETH_ALEN]; 351 352 enum TRI_STATE rss_state; 353 354 mana_handle_t default_rxobj; 355 bool tx_shortform_allowed; 356 u16 tx_vp_offset; 357 358 struct mana_tx_qp *tx_qp; 359 360 /* Indirection Table for RX & TX. The values are queue indexes */ 361 u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; 362 363 /* Indirection table containing RxObject Handles */ 364 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; 365 366 /* Hash key used by the NIC */ 367 u8 hashkey[MANA_HASH_KEY_SIZE]; 368 369 /* This points to an array of num_queues of RQ pointers. */ 370 struct mana_rxq **rxqs; 371 372 struct bpf_prog *bpf_prog; 373 374 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ 375 unsigned int max_queues; 376 unsigned int num_queues; 377 378 mana_handle_t port_handle; 379 mana_handle_t pf_filter_handle; 380 381 /* Mutex for sharing access to vport_use_count */ 382 struct mutex vport_mutex; 383 int vport_use_count; 384 385 u16 port_idx; 386 387 bool port_is_up; 388 bool port_st_save; /* Saved port state */ 389 390 struct mana_ethtool_stats eth_stats; 391 }; 392 393 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); 394 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, 395 bool update_hash, bool update_tab); 396 397 int mana_alloc_queues(struct net_device *ndev); 398 int mana_attach(struct net_device *ndev); 399 int mana_detach(struct net_device *ndev, bool from_close); 400 401 int mana_probe(struct gdma_dev *gd, bool resuming); 402 void mana_remove(struct gdma_dev *gd, bool suspending); 403 404 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); 405 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, 406 u32 flags); 407 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, 408 struct xdp_buff *xdp, void *buf_va, uint pkt_len); 409 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); 410 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); 411 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 412 413 extern const struct ethtool_ops mana_ethtool_ops; 414 415 /* A CQ can be created not associated with any EQ */ 416 #define GDMA_CQ_NO_EQ 0xffff 417 418 struct mana_obj_spec { 419 u32 queue_index; 420 u64 gdma_region; 421 u32 queue_size; 422 u32 attached_eq; 423 u32 modr_ctx_id; 424 }; 425 426 enum mana_command_code { 427 MANA_QUERY_DEV_CONFIG = 0x20001, 428 MANA_QUERY_GF_STAT = 0x20002, 429 MANA_CONFIG_VPORT_TX = 0x20003, 430 MANA_CREATE_WQ_OBJ = 0x20004, 431 MANA_DESTROY_WQ_OBJ = 0x20005, 432 MANA_FENCE_RQ = 0x20006, 433 MANA_CONFIG_VPORT_RX = 0x20007, 434 MANA_QUERY_VPORT_CONFIG = 0x20008, 435 436 /* Privileged commands for the PF mode */ 437 MANA_REGISTER_FILTER = 0x28000, 438 MANA_DEREGISTER_FILTER = 0x28001, 439 MANA_REGISTER_HW_PORT = 0x28003, 440 MANA_DEREGISTER_HW_PORT = 0x28004, 441 }; 442 443 /* Query Device Configuration */ 444 struct mana_query_device_cfg_req { 445 struct gdma_req_hdr hdr; 446 447 /* MANA Nic Driver Capability flags */ 448 u64 mn_drv_cap_flags1; 449 u64 mn_drv_cap_flags2; 450 u64 mn_drv_cap_flags3; 451 u64 mn_drv_cap_flags4; 452 453 u32 proto_major_ver; 454 u32 proto_minor_ver; 455 u32 proto_micro_ver; 456 457 u32 reserved; 458 }; /* HW DATA */ 459 460 struct mana_query_device_cfg_resp { 461 struct gdma_resp_hdr hdr; 462 463 u64 pf_cap_flags1; 464 u64 pf_cap_flags2; 465 u64 pf_cap_flags3; 466 u64 pf_cap_flags4; 467 468 u16 max_num_vports; 469 u16 reserved; 470 u32 max_num_eqs; 471 }; /* HW DATA */ 472 473 /* Query vPort Configuration */ 474 struct mana_query_vport_cfg_req { 475 struct gdma_req_hdr hdr; 476 u32 vport_index; 477 }; /* HW DATA */ 478 479 struct mana_query_vport_cfg_resp { 480 struct gdma_resp_hdr hdr; 481 u32 max_num_sq; 482 u32 max_num_rq; 483 u32 num_indirection_ent; 484 u32 reserved1; 485 u8 mac_addr[6]; 486 u8 reserved2[2]; 487 mana_handle_t vport; 488 }; /* HW DATA */ 489 490 /* Configure vPort */ 491 struct mana_config_vport_req { 492 struct gdma_req_hdr hdr; 493 mana_handle_t vport; 494 u32 pdid; 495 u32 doorbell_pageid; 496 }; /* HW DATA */ 497 498 struct mana_config_vport_resp { 499 struct gdma_resp_hdr hdr; 500 u16 tx_vport_offset; 501 u8 short_form_allowed; 502 u8 reserved; 503 }; /* HW DATA */ 504 505 /* Create WQ Object */ 506 struct mana_create_wqobj_req { 507 struct gdma_req_hdr hdr; 508 mana_handle_t vport; 509 u32 wq_type; 510 u32 reserved; 511 u64 wq_gdma_region; 512 u64 cq_gdma_region; 513 u32 wq_size; 514 u32 cq_size; 515 u32 cq_moderation_ctx_id; 516 u32 cq_parent_qid; 517 }; /* HW DATA */ 518 519 struct mana_create_wqobj_resp { 520 struct gdma_resp_hdr hdr; 521 u32 wq_id; 522 u32 cq_id; 523 mana_handle_t wq_obj; 524 }; /* HW DATA */ 525 526 /* Destroy WQ Object */ 527 struct mana_destroy_wqobj_req { 528 struct gdma_req_hdr hdr; 529 u32 wq_type; 530 u32 reserved; 531 mana_handle_t wq_obj_handle; 532 }; /* HW DATA */ 533 534 struct mana_destroy_wqobj_resp { 535 struct gdma_resp_hdr hdr; 536 }; /* HW DATA */ 537 538 /* Fence RQ */ 539 struct mana_fence_rq_req { 540 struct gdma_req_hdr hdr; 541 mana_handle_t wq_obj_handle; 542 }; /* HW DATA */ 543 544 struct mana_fence_rq_resp { 545 struct gdma_resp_hdr hdr; 546 }; /* HW DATA */ 547 548 /* Configure vPort Rx Steering */ 549 struct mana_cfg_rx_steer_req { 550 struct gdma_req_hdr hdr; 551 mana_handle_t vport; 552 u16 num_indir_entries; 553 u16 indir_tab_offset; 554 u32 rx_enable; 555 u32 rss_enable; 556 u8 update_default_rxobj; 557 u8 update_hashkey; 558 u8 update_indir_tab; 559 u8 reserved; 560 mana_handle_t default_rxobj; 561 u8 hashkey[MANA_HASH_KEY_SIZE]; 562 }; /* HW DATA */ 563 564 struct mana_cfg_rx_steer_resp { 565 struct gdma_resp_hdr hdr; 566 }; /* HW DATA */ 567 568 /* Register HW vPort */ 569 struct mana_register_hw_vport_req { 570 struct gdma_req_hdr hdr; 571 u16 attached_gfid; 572 u8 is_pf_default_vport; 573 u8 reserved1; 574 u8 allow_all_ether_types; 575 u8 reserved2; 576 u8 reserved3; 577 u8 reserved4; 578 }; /* HW DATA */ 579 580 struct mana_register_hw_vport_resp { 581 struct gdma_resp_hdr hdr; 582 mana_handle_t hw_vport_handle; 583 }; /* HW DATA */ 584 585 /* Deregister HW vPort */ 586 struct mana_deregister_hw_vport_req { 587 struct gdma_req_hdr hdr; 588 mana_handle_t hw_vport_handle; 589 }; /* HW DATA */ 590 591 struct mana_deregister_hw_vport_resp { 592 struct gdma_resp_hdr hdr; 593 }; /* HW DATA */ 594 595 /* Register filter */ 596 struct mana_register_filter_req { 597 struct gdma_req_hdr hdr; 598 mana_handle_t vport; 599 u8 mac_addr[6]; 600 u8 reserved1; 601 u8 reserved2; 602 u8 reserved3; 603 u8 reserved4; 604 u16 reserved5; 605 u32 reserved6; 606 u32 reserved7; 607 u32 reserved8; 608 }; /* HW DATA */ 609 610 struct mana_register_filter_resp { 611 struct gdma_resp_hdr hdr; 612 mana_handle_t filter_handle; 613 }; /* HW DATA */ 614 615 /* Deregister filter */ 616 struct mana_deregister_filter_req { 617 struct gdma_req_hdr hdr; 618 mana_handle_t filter_handle; 619 }; /* HW DATA */ 620 621 struct mana_deregister_filter_resp { 622 struct gdma_resp_hdr hdr; 623 }; /* HW DATA */ 624 625 #define MANA_MAX_NUM_QUEUES 64 626 627 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) 628 629 struct mana_tx_package { 630 struct gdma_wqe_request wqe_req; 631 struct gdma_sge sgl_array[5]; 632 struct gdma_sge *sgl_ptr; 633 634 struct mana_tx_oob tx_oob; 635 636 struct gdma_posted_wqe_info wqe_info; 637 }; 638 639 int mana_create_wq_obj(struct mana_port_context *apc, 640 mana_handle_t vport, 641 u32 wq_type, struct mana_obj_spec *wq_spec, 642 struct mana_obj_spec *cq_spec, 643 mana_handle_t *wq_obj); 644 645 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, 646 mana_handle_t wq_obj); 647 648 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, 649 u32 doorbell_pg_id); 650 void mana_uncfg_vport(struct mana_port_context *apc); 651 #endif /* _MANA_H */ 652