1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6 #ifndef ENA_COM 7 #define ENA_COM 8 9 #include <linux/compiler.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/gfp.h> 13 #include <linux/io.h> 14 #include <linux/prefetch.h> 15 #include <linux/sched.h> 16 #include <linux/sizes.h> 17 #include <linux/spinlock.h> 18 #include <linux/types.h> 19 #include <linux/wait.h> 20 #include <linux/netdevice.h> 21 22 #include "ena_common_defs.h" 23 #include "ena_admin_defs.h" 24 #include "ena_eth_io_defs.h" 25 #include "ena_regs_defs.h" 26 27 #undef pr_fmt 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #define ENA_MAX_NUM_IO_QUEUES 128U 31 /* We need to queues for each IO (on for Tx and one for Rx) */ 32 #define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) 33 34 #define ENA_MAX_HANDLERS 256 35 36 #define ENA_MAX_PHYS_ADDR_SIZE_BITS 48 37 38 /* Unit in usec */ 39 #define ENA_REG_READ_TIMEOUT 200000 40 41 #define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry)) 42 #define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry)) 43 #define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry)) 44 45 #define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512 46 47 /*****************************************************************************/ 48 /*****************************************************************************/ 49 /* ENA adaptive interrupt moderation settings */ 50 51 #define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64 52 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS 20 53 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 54 55 #define ENA_HASH_KEY_SIZE 40 56 57 #define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF 58 59 #define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 60 61 struct ena_llq_configurations { 62 enum ena_admin_llq_header_location llq_header_location; 63 enum ena_admin_llq_ring_entry_size llq_ring_entry_size; 64 enum ena_admin_llq_stride_ctrl llq_stride_ctrl; 65 enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header; 66 u16 llq_ring_entry_size_value; 67 }; 68 69 enum queue_direction { 70 ENA_COM_IO_QUEUE_DIRECTION_TX, 71 ENA_COM_IO_QUEUE_DIRECTION_RX 72 }; 73 74 struct ena_com_buf { 75 dma_addr_t paddr; /**< Buffer physical address */ 76 u16 len; /**< Buffer length in bytes */ 77 }; 78 79 struct ena_com_rx_buf_info { 80 u16 len; 81 u16 req_id; 82 }; 83 84 struct ena_com_io_desc_addr { 85 u8 __iomem *pbuf_dev_addr; /* LLQ address */ 86 u8 *virt_addr; 87 dma_addr_t phys_addr; 88 }; 89 90 struct ena_com_tx_meta { 91 u16 mss; 92 u16 l3_hdr_len; 93 u16 l3_hdr_offset; 94 u16 l4_hdr_len; /* In words */ 95 }; 96 97 struct ena_com_llq_info { 98 u16 header_location_ctrl; 99 u16 desc_stride_ctrl; 100 u16 desc_list_entry_size_ctrl; 101 u16 desc_list_entry_size; 102 u16 descs_num_before_header; 103 u16 descs_per_entry; 104 u16 max_entries_in_tx_burst; 105 bool disable_meta_caching; 106 }; 107 108 struct ena_com_io_cq { 109 struct ena_com_io_desc_addr cdesc_addr; 110 111 /* Interrupt unmask register */ 112 u32 __iomem *unmask_reg; 113 114 /* numa configuration register (for TPH) */ 115 u32 __iomem *numa_node_cfg_reg; 116 117 /* The value to write to the above register to unmask 118 * the interrupt of this queue 119 */ 120 u32 msix_vector ____cacheline_aligned; 121 122 enum queue_direction direction; 123 124 /* holds the number of cdesc of the current packet */ 125 u16 cur_rx_pkt_cdesc_count; 126 /* save the first cdesc idx of the current packet */ 127 u16 cur_rx_pkt_cdesc_start_idx; 128 129 u16 q_depth; 130 /* Caller qid */ 131 u16 qid; 132 133 /* Device queue index */ 134 u16 idx; 135 u16 head; 136 u8 phase; 137 u8 cdesc_entry_size_in_bytes; 138 139 } ____cacheline_aligned; 140 141 struct ena_com_io_bounce_buffer_control { 142 u8 *base_buffer; 143 u16 next_to_use; 144 u16 buffer_size; 145 u16 buffers_num; /* Must be a power of 2 */ 146 }; 147 148 /* This struct is to keep tracking the current location of the next llq entry */ 149 struct ena_com_llq_pkt_ctrl { 150 u8 *curr_bounce_buf; 151 u16 idx; 152 u16 descs_left_in_line; 153 }; 154 155 struct ena_com_io_sq { 156 struct ena_com_io_desc_addr desc_addr; 157 158 u32 __iomem *db_addr; 159 160 enum queue_direction direction; 161 enum ena_admin_placement_policy_type mem_queue_type; 162 163 bool disable_meta_caching; 164 165 u32 msix_vector; 166 struct ena_com_tx_meta cached_tx_meta; 167 struct ena_com_llq_info llq_info; 168 struct ena_com_llq_pkt_ctrl llq_buf_ctrl; 169 struct ena_com_io_bounce_buffer_control bounce_buf_ctrl; 170 171 u16 q_depth; 172 u16 qid; 173 174 u16 idx; 175 u16 tail; 176 u16 next_to_comp; 177 u16 llq_last_copy_tail; 178 u32 tx_max_header_size; 179 u8 phase; 180 u8 desc_entry_size; 181 u8 dma_addr_bits; 182 u16 entries_in_tx_burst_left; 183 } ____cacheline_aligned; 184 185 struct ena_com_admin_cq { 186 struct ena_admin_acq_entry *entries; 187 dma_addr_t dma_addr; 188 189 u16 head; 190 u8 phase; 191 }; 192 193 struct ena_com_admin_sq { 194 struct ena_admin_aq_entry *entries; 195 dma_addr_t dma_addr; 196 197 u32 __iomem *db_addr; 198 199 u16 head; 200 u16 tail; 201 u8 phase; 202 203 }; 204 205 struct ena_com_stats_admin { 206 u64 aborted_cmd; 207 u64 submitted_cmd; 208 u64 completed_cmd; 209 u64 out_of_space; 210 u64 no_completion; 211 }; 212 213 struct ena_com_stats_phc { 214 u64 phc_cnt; 215 u64 phc_exp; 216 u64 phc_skp; 217 u64 phc_err_dv; 218 u64 phc_err_ts; 219 }; 220 221 struct ena_com_admin_queue { 222 void *q_dmadev; 223 struct ena_com_dev *ena_dev; 224 spinlock_t q_lock; /* spinlock for the admin queue */ 225 226 struct ena_comp_ctx *comp_ctx; 227 u32 completion_timeout; 228 u16 q_depth; 229 struct ena_com_admin_cq cq; 230 struct ena_com_admin_sq sq; 231 232 /* Indicate if the admin queue should poll for completion */ 233 bool polling; 234 235 u16 curr_cmd_id; 236 237 /* Indicate that the ena was initialized and can 238 * process new admin commands 239 */ 240 bool running_state; 241 242 /* Count the number of outstanding admin commands */ 243 atomic_t outstanding_cmds; 244 245 struct ena_com_stats_admin stats; 246 }; 247 248 struct ena_aenq_handlers; 249 250 struct ena_com_aenq { 251 u16 head; 252 u8 phase; 253 struct ena_admin_aenq_entry *entries; 254 dma_addr_t dma_addr; 255 u16 q_depth; 256 struct ena_aenq_handlers *aenq_handlers; 257 }; 258 259 struct ena_com_mmio_read { 260 struct ena_admin_ena_mmio_req_read_less_resp *read_resp; 261 dma_addr_t read_resp_dma_addr; 262 u32 reg_read_to; /* in us */ 263 u16 seq_num; 264 bool readless_supported; 265 /* spin lock to ensure a single outstanding read */ 266 spinlock_t lock; 267 }; 268 269 /* PTP hardware clock (PHC) MMIO read data info */ 270 struct ena_com_phc_info { 271 /* Internal PHC statistics */ 272 struct ena_com_stats_phc stats; 273 274 /* PHC shared memory - virtual address */ 275 struct ena_admin_phc_resp *virt_addr; 276 277 /* System time of last PHC request */ 278 ktime_t system_time; 279 280 /* Spin lock to ensure a single outstanding PHC read */ 281 spinlock_t lock; 282 283 /* PHC doorbell address as an offset to PCIe MMIO REG BAR */ 284 u32 doorbell_offset; 285 286 /* Shared memory read expire timeout (usec) 287 * Max time for valid PHC retrieval, passing this threshold will fail 288 * the get time request and block new PHC requests for block_timeout_usec 289 * in order to prevent floods on busy device 290 */ 291 u32 expire_timeout_usec; 292 293 /* Shared memory read abort timeout (usec) 294 * PHC requests block period, blocking starts once PHC request expired 295 * in order to prevent floods on busy device, 296 * any PHC requests during block period will be skipped 297 */ 298 u32 block_timeout_usec; 299 300 /* PHC shared memory - physical address */ 301 dma_addr_t phys_addr; 302 303 /* Request id sent to the device */ 304 u16 req_id; 305 306 /* True if PHC is active in the device */ 307 bool active; 308 }; 309 310 struct ena_rss { 311 /* Indirect table */ 312 u16 *host_rss_ind_tbl; 313 struct ena_admin_rss_ind_table_entry *rss_ind_tbl; 314 dma_addr_t rss_ind_tbl_dma_addr; 315 u16 tbl_log_size; 316 317 /* Hash key */ 318 enum ena_admin_hash_functions hash_func; 319 struct ena_admin_feature_rss_flow_hash_control *hash_key; 320 dma_addr_t hash_key_dma_addr; 321 u32 hash_init_val; 322 323 /* Flow Control */ 324 struct ena_admin_feature_rss_hash_control *hash_ctrl; 325 dma_addr_t hash_ctrl_dma_addr; 326 327 }; 328 329 struct ena_customer_metrics { 330 /* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK 331 * and ena_admin_customer_metrics_id 332 */ 333 u64 supported_metrics; 334 dma_addr_t buffer_dma_addr; 335 void *buffer_virt_addr; 336 u32 buffer_len; 337 }; 338 339 struct ena_host_attribute { 340 /* Debug area */ 341 u8 *debug_area_virt_addr; 342 dma_addr_t debug_area_dma_addr; 343 u32 debug_area_size; 344 345 /* Host information */ 346 struct ena_admin_host_info *host_info; 347 dma_addr_t host_info_dma_addr; 348 }; 349 350 /* Each ena_dev is a PCI function. */ 351 struct ena_com_dev { 352 struct ena_com_admin_queue admin_queue; 353 struct ena_com_aenq aenq; 354 struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES]; 355 struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES]; 356 u8 __iomem *reg_bar; 357 void __iomem *mem_bar; 358 void *dmadev; 359 struct net_device *net_device; 360 361 enum ena_admin_placement_policy_type tx_mem_queue_type; 362 u32 tx_max_header_size; 363 u16 stats_func; /* Selected function for extended statistic dump */ 364 u16 stats_queue; /* Selected queue for extended statistic dump */ 365 366 u32 ena_min_poll_delay_us; 367 368 struct ena_com_mmio_read mmio_read; 369 struct ena_com_phc_info phc; 370 371 struct ena_rss rss; 372 u32 supported_features; 373 u32 capabilities; 374 u32 dma_addr_bits; 375 376 struct ena_host_attribute host_attr; 377 bool adaptive_coalescing; 378 u16 intr_delay_resolution; 379 380 /* interrupt moderation intervals are in usec divided by 381 * intr_delay_resolution, which is supplied by the device. 382 */ 383 u32 intr_moder_tx_interval; 384 u32 intr_moder_rx_interval; 385 386 struct ena_intr_moder_entry *intr_moder_tbl; 387 388 struct ena_com_llq_info llq_info; 389 390 struct ena_customer_metrics customer_metrics; 391 }; 392 393 struct ena_com_dev_get_features_ctx { 394 struct ena_admin_queue_feature_desc max_queues; 395 struct ena_admin_queue_ext_feature_desc max_queue_ext; 396 struct ena_admin_device_attr_feature_desc dev_attr; 397 struct ena_admin_feature_aenq_desc aenq; 398 struct ena_admin_feature_offload_desc offload; 399 struct ena_admin_ena_hw_hints hw_hints; 400 struct ena_admin_feature_llq_desc llq; 401 }; 402 403 struct ena_com_create_io_ctx { 404 enum ena_admin_placement_policy_type mem_queue_type; 405 enum queue_direction direction; 406 int numa_node; 407 u32 msix_vector; 408 u16 queue_size; 409 u16 qid; 410 }; 411 412 typedef void (*ena_aenq_handler)(void *data, 413 struct ena_admin_aenq_entry *aenq_e); 414 415 /* Holds aenq handlers. Indexed by AENQ event group */ 416 struct ena_aenq_handlers { 417 ena_aenq_handler handlers[ENA_MAX_HANDLERS]; 418 ena_aenq_handler unimplemented_handler; 419 }; 420 421 /*****************************************************************************/ 422 /*****************************************************************************/ 423 424 /* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism 425 * @ena_dev: ENA communication layer struct 426 * 427 * Initialize the register read mechanism. 428 * 429 * @note: This method must be the first stage in the initialization sequence. 430 * 431 * @return - 0 on success, negative value on failure. 432 */ 433 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); 434 435 /* ena_com_phc_init - Allocate and initialize PHC feature 436 * @ena_dev: ENA communication layer struct 437 * @note: This method assumes PHC is supported by the device 438 * @return - 0 on success, negative value on failure 439 */ 440 int ena_com_phc_init(struct ena_com_dev *ena_dev); 441 442 /* ena_com_phc_supported - Return if PHC feature is supported by the device 443 * @ena_dev: ENA communication layer struct 444 * @note: This method must be called after getting supported features 445 * @return - supported or not 446 */ 447 bool ena_com_phc_supported(struct ena_com_dev *ena_dev); 448 449 /* ena_com_phc_config - Configure PHC feature 450 * @ena_dev: ENA communication layer struct 451 * Configure PHC feature in driver and device 452 * @note: This method assumes PHC is supported by the device 453 * @return - 0 on success, negative value on failure 454 */ 455 int ena_com_phc_config(struct ena_com_dev *ena_dev); 456 457 /* ena_com_phc_destroy - Destroy PHC feature 458 * @ena_dev: ENA communication layer struct 459 */ 460 void ena_com_phc_destroy(struct ena_com_dev *ena_dev); 461 462 /* ena_com_phc_get_timestamp - Retrieve PHC timestamp 463 * @ena_dev: ENA communication layer struct 464 * @timestamp: Retrieved PHC timestamp 465 * @return - 0 on success, negative value on failure 466 */ 467 int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp); 468 469 /* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism 470 * @ena_dev: ENA communication layer struct 471 * @readless_supported: readless mode (enable/disable) 472 */ 473 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, 474 bool readless_supported); 475 476 /* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return 477 * value physical address. 478 * @ena_dev: ENA communication layer struct 479 */ 480 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev); 481 482 /* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism 483 * @ena_dev: ENA communication layer struct 484 */ 485 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); 486 487 /* ena_com_admin_init - Init the admin and the async queues 488 * @ena_dev: ENA communication layer struct 489 * @aenq_handlers: Those handlers to be called upon event. 490 * 491 * Initialize the admin submission and completion queues. 492 * Initialize the asynchronous events notification queues. 493 * 494 * @return - 0 on success, negative value on failure. 495 */ 496 int ena_com_admin_init(struct ena_com_dev *ena_dev, 497 struct ena_aenq_handlers *aenq_handlers); 498 499 /* ena_com_admin_destroy - Destroy the admin and the async events queues. 500 * @ena_dev: ENA communication layer struct 501 * 502 * @note: Before calling this method, the caller must validate that the device 503 * won't send any additional admin completions/aenq. 504 * To achieve that, a FLR is recommended. 505 */ 506 void ena_com_admin_destroy(struct ena_com_dev *ena_dev); 507 508 /* ena_com_dev_reset - Perform device FLR to the device. 509 * @ena_dev: ENA communication layer struct 510 * @reset_reason: Specify what is the trigger for the reset in case of an error. 511 * 512 * @return - 0 on success, negative value on failure. 513 */ 514 int ena_com_dev_reset(struct ena_com_dev *ena_dev, 515 enum ena_regs_reset_reason_types reset_reason); 516 517 /* ena_com_create_io_queue - Create io queue. 518 * @ena_dev: ENA communication layer struct 519 * @ctx - create context structure 520 * 521 * Create the submission and the completion queues. 522 * 523 * @return - 0 on success, negative value on failure. 524 */ 525 int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 526 struct ena_com_create_io_ctx *ctx); 527 528 /* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid. 529 * @ena_dev: ENA communication layer struct 530 * @qid - the caller virtual queue id. 531 */ 532 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid); 533 534 /* ena_com_get_io_handlers - Return the io queue handlers 535 * @ena_dev: ENA communication layer struct 536 * @qid - the caller virtual queue id. 537 * @io_sq - IO submission queue handler 538 * @io_cq - IO completion queue handler. 539 * 540 * @return - 0 on success, negative value on failure. 541 */ 542 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 543 struct ena_com_io_sq **io_sq, 544 struct ena_com_io_cq **io_cq); 545 546 /* ena_com_admin_aenq_enable - ENAble asynchronous event notifications 547 * @ena_dev: ENA communication layer struct 548 * 549 * After this method, aenq event can be received via AENQ. 550 */ 551 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev); 552 553 /* ena_com_set_admin_running_state - Set the state of the admin queue 554 * @ena_dev: ENA communication layer struct 555 * 556 * Change the state of the admin queue (enable/disable) 557 */ 558 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state); 559 560 /* ena_com_get_admin_running_state - Get the admin queue state 561 * @ena_dev: ENA communication layer struct 562 * 563 * Retrieve the state of the admin queue (enable/disable) 564 * 565 * @return - current polling mode (enable/disable) 566 */ 567 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev); 568 569 /* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode 570 * @ena_dev: ENA communication layer struct 571 * @polling: ENAble/Disable polling mode 572 * 573 * Set the admin completion mode. 574 */ 575 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling); 576 577 /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler 578 * @ena_dev: ENA communication layer struct 579 * 580 * This method goes over the admin completion queue and wakes up all the pending 581 * threads that wait on the commands wait event. 582 * 583 * @note: Should be called after MSI-X interrupt. 584 */ 585 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); 586 587 /* ena_com_aenq_intr_handler - AENQ interrupt handler 588 * @ena_dev: ENA communication layer struct 589 * 590 * This method goes over the async event notification queue and calls the proper 591 * aenq handler. 592 */ 593 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data); 594 595 /* ena_com_abort_admin_commands - Abort all the outstanding admin commands. 596 * @ena_dev: ENA communication layer struct 597 * 598 * This method aborts all the outstanding admin commands. 599 * The caller should then call ena_com_wait_for_abort_completion to make sure 600 * all the commands were completed. 601 */ 602 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); 603 604 /* ena_com_wait_for_abort_completion - Wait for admin commands abort. 605 * @ena_dev: ENA communication layer struct 606 * 607 * This method waits until all the outstanding admin commands are completed. 608 */ 609 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); 610 611 /* ena_com_validate_version - Validate the device parameters 612 * @ena_dev: ENA communication layer struct 613 * 614 * This method verifies the device parameters are the same as the saved 615 * parameters in ena_dev. 616 * This method is useful after device reset, to validate the device mac address 617 * and the device offloads are the same as before the reset. 618 * 619 * @return - 0 on success negative value otherwise. 620 */ 621 int ena_com_validate_version(struct ena_com_dev *ena_dev); 622 623 /* ena_com_get_link_params - Retrieve physical link parameters. 624 * @ena_dev: ENA communication layer struct 625 * @resp: Link parameters 626 * 627 * Retrieve the physical link parameters, 628 * like speed, auto-negotiation and full duplex support. 629 * 630 * @return - 0 on Success negative value otherwise. 631 */ 632 int ena_com_get_link_params(struct ena_com_dev *ena_dev, 633 struct ena_admin_get_feat_resp *resp); 634 635 /* ena_com_get_dma_width - Retrieve physical dma address width the device 636 * supports. 637 * @ena_dev: ENA communication layer struct 638 * 639 * Retrieve the maximum physical address bits the device can handle. 640 * 641 * @return: > 0 on Success and negative value otherwise. 642 */ 643 int ena_com_get_dma_width(struct ena_com_dev *ena_dev); 644 645 /* ena_com_set_aenq_config - Set aenq groups configurations 646 * @ena_dev: ENA communication layer struct 647 * @groups flag: bit fields flags of enum ena_admin_aenq_group. 648 * 649 * Configure which aenq event group the driver would like to receive. 650 * 651 * @return: 0 on Success and negative value otherwise. 652 */ 653 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag); 654 655 /* ena_com_get_dev_attr_feat - Get device features 656 * @ena_dev: ENA communication layer struct 657 * @get_feat_ctx: returned context that contain the get features. 658 * 659 * @return: 0 on Success and negative value otherwise. 660 */ 661 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 662 struct ena_com_dev_get_features_ctx *get_feat_ctx); 663 664 /* ena_com_get_eni_stats - Get extended network interface statistics 665 * @ena_dev: ENA communication layer struct 666 * @stats: stats return value 667 * 668 * @return: 0 on Success and negative value otherwise. 669 */ 670 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, 671 struct ena_admin_eni_stats *stats); 672 673 /* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics 674 * @ena_dev: ENA communication layer struct 675 * @info: ena srd stats and flags 676 * 677 * @return: 0 on Success and negative value otherwise. 678 */ 679 int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev, 680 struct ena_admin_ena_srd_info *info); 681 682 /* ena_com_get_customer_metrics - Get customer metrics for network interface 683 * @ena_dev: ENA communication layer struct 684 * @buffer: buffer for returned customer metrics 685 * @len: size of the buffer 686 * 687 * @return: 0 on Success and negative value otherwise. 688 */ 689 int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len); 690 691 /* ena_com_set_dev_mtu - Configure the device mtu. 692 * @ena_dev: ENA communication layer struct 693 * @mtu: mtu value 694 * 695 * @return: 0 on Success and negative value otherwise. 696 */ 697 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu); 698 699 /* ena_com_rss_init - Init RSS 700 * @ena_dev: ENA communication layer struct 701 * @log_size: indirection log size 702 * 703 * Allocate RSS/RFS resources. 704 * The caller then can configure rss using ena_com_set_hash_function, 705 * ena_com_set_hash_ctrl and ena_com_indirect_table_set. 706 * 707 * @return: 0 on Success and negative value otherwise. 708 */ 709 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); 710 711 /* ena_com_rss_destroy - Destroy rss 712 * @ena_dev: ENA communication layer struct 713 * 714 * Free all the RSS/RFS resources. 715 */ 716 void ena_com_rss_destroy(struct ena_com_dev *ena_dev); 717 718 /* ena_com_get_current_hash_function - Get RSS hash function 719 * @ena_dev: ENA communication layer struct 720 * 721 * Return the current hash function. 722 * @return: 0 or one of the ena_admin_hash_functions values. 723 */ 724 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev); 725 726 /* ena_com_fill_hash_function - Fill RSS hash function 727 * @ena_dev: ENA communication layer struct 728 * @func: The hash function (Toeplitz or crc) 729 * @key: Hash key (for toeplitz hash) 730 * @key_len: key length (max length 10 DW) 731 * @init_val: initial value for the hash function 732 * 733 * Fill the ena_dev resources with the desire hash function, hash key, key_len 734 * and key initial value (if needed by the hash function). 735 * To flush the key into the device the caller should call 736 * ena_com_set_hash_function. 737 * 738 * @return: 0 on Success and negative value otherwise. 739 */ 740 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 741 enum ena_admin_hash_functions func, 742 const u8 *key, u16 key_len, u32 init_val); 743 744 /* ena_com_set_hash_function - Flush the hash function and it dependencies to 745 * the device. 746 * @ena_dev: ENA communication layer struct 747 * 748 * Flush the hash function and it dependencies (key, key length and 749 * initial value) if needed. 750 * 751 * @note: Prior to this method the caller should call ena_com_fill_hash_function 752 * 753 * @return: 0 on Success and negative value otherwise. 754 */ 755 int ena_com_set_hash_function(struct ena_com_dev *ena_dev); 756 757 /* ena_com_get_hash_function - Retrieve the hash function from the device. 758 * @ena_dev: ENA communication layer struct 759 * @func: hash function 760 * 761 * Retrieve the hash function from the device. 762 * 763 * @note: If the caller called ena_com_fill_hash_function but didn't flush 764 * it to the device, the new configuration will be lost. 765 * 766 * @return: 0 on Success and negative value otherwise. 767 */ 768 int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 769 enum ena_admin_hash_functions *func); 770 771 /* ena_com_get_hash_key - Retrieve the hash key 772 * @ena_dev: ENA communication layer struct 773 * @key: hash key 774 * 775 * Retrieve the hash key. 776 * 777 * @note: If the caller called ena_com_fill_hash_key but didn't flush 778 * it to the device, the new configuration will be lost. 779 * 780 * @return: 0 on Success and negative value otherwise. 781 */ 782 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key); 783 /* ena_com_fill_hash_ctrl - Fill RSS hash control 784 * @ena_dev: ENA communication layer struct. 785 * @proto: The protocol to configure. 786 * @hash_fields: bit mask of ena_admin_flow_hash_fields 787 * 788 * Fill the ena_dev resources with the desire hash control (the ethernet 789 * fields that take part of the hash) for a specific protocol. 790 * To flush the hash control to the device, the caller should call 791 * ena_com_set_hash_ctrl. 792 * 793 * @return: 0 on Success and negative value otherwise. 794 */ 795 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 796 enum ena_admin_flow_hash_proto proto, 797 u16 hash_fields); 798 799 /* ena_com_set_hash_ctrl - Flush the hash control resources to the device. 800 * @ena_dev: ENA communication layer struct 801 * 802 * Flush the hash control (the ethernet fields that take part of the hash) 803 * 804 * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl. 805 * 806 * @return: 0 on Success and negative value otherwise. 807 */ 808 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); 809 810 /* ena_com_get_hash_ctrl - Retrieve the hash control from the device. 811 * @ena_dev: ENA communication layer struct 812 * @proto: The protocol to retrieve. 813 * @fields: bit mask of ena_admin_flow_hash_fields. 814 * 815 * Retrieve the hash control from the device. 816 * 817 * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush 818 * it to the device, the new configuration will be lost. 819 * 820 * @return: 0 on Success and negative value otherwise. 821 */ 822 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 823 enum ena_admin_flow_hash_proto proto, 824 u16 *fields); 825 826 /* ena_com_set_default_hash_ctrl - Set the hash control to a default 827 * configuration. 828 * @ena_dev: ENA communication layer struct 829 * 830 * Fill the ena_dev resources with the default hash control configuration. 831 * To flush the hash control to the device, the caller should call 832 * ena_com_set_hash_ctrl. 833 * 834 * @return: 0 on Success and negative value otherwise. 835 */ 836 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev); 837 838 /* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS 839 * indirection table 840 * @ena_dev: ENA communication layer struct. 841 * @entry_idx - indirection table entry. 842 * @entry_value - redirection value 843 * 844 * Fill a single entry of the RSS indirection table in the ena_dev resources. 845 * To flush the indirection table to the device, the called should call 846 * ena_com_indirect_table_set. 847 * 848 * @return: 0 on Success and negative value otherwise. 849 */ 850 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 851 u16 entry_idx, u16 entry_value); 852 853 /* ena_com_indirect_table_set - Flush the indirection table to the device. 854 * @ena_dev: ENA communication layer struct 855 * 856 * Flush the indirection hash control to the device. 857 * Prior to this method the caller should call ena_com_indirect_table_fill_entry 858 * 859 * @return: 0 on Success and negative value otherwise. 860 */ 861 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); 862 863 /* ena_com_indirect_table_get - Retrieve the indirection table from the device. 864 * @ena_dev: ENA communication layer struct 865 * @ind_tbl: indirection table 866 * 867 * Retrieve the RSS indirection table from the device. 868 * 869 * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush 870 * it to the device, the new configuration will be lost. 871 * 872 * @return: 0 on Success and negative value otherwise. 873 */ 874 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl); 875 876 /* ena_com_allocate_host_info - Allocate host info resources. 877 * @ena_dev: ENA communication layer struct 878 * 879 * @return: 0 on Success and negative value otherwise. 880 */ 881 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev); 882 883 /* ena_com_allocate_debug_area - Allocate debug area. 884 * @ena_dev: ENA communication layer struct 885 * @debug_area_size - debug area size. 886 * 887 * @return: 0 on Success and negative value otherwise. 888 */ 889 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 890 u32 debug_area_size); 891 892 /* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources. 893 * @ena_dev: ENA communication layer struct 894 * 895 * @return: 0 on Success and negative value otherwise. 896 */ 897 int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev); 898 899 /* ena_com_delete_debug_area - Free the debug area resources. 900 * @ena_dev: ENA communication layer struct 901 * 902 * Free the allocated debug area. 903 */ 904 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); 905 906 /* ena_com_delete_host_info - Free the host info resources. 907 * @ena_dev: ENA communication layer struct 908 * 909 * Free the allocated host info. 910 */ 911 void ena_com_delete_host_info(struct ena_com_dev *ena_dev); 912 913 /* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources. 914 * @ena_dev: ENA communication layer struct 915 * 916 * Free the allocated customer metrics area. 917 */ 918 void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev); 919 920 /* ena_com_set_host_attributes - Update the device with the host 921 * attributes (debug area and host info) base address. 922 * @ena_dev: ENA communication layer struct 923 * 924 * @return: 0 on Success and negative value otherwise. 925 */ 926 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev); 927 928 /* ena_com_create_io_cq - Create io completion queue. 929 * @ena_dev: ENA communication layer struct 930 * @io_cq - io completion queue handler 931 932 * Create IO completion queue. 933 * 934 * @return - 0 on success, negative value on failure. 935 */ 936 int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 937 struct ena_com_io_cq *io_cq); 938 939 /* ena_com_destroy_io_cq - Destroy io completion queue. 940 * @ena_dev: ENA communication layer struct 941 * @io_cq - io completion queue handler 942 943 * Destroy IO completion queue. 944 * 945 * @return - 0 on success, negative value on failure. 946 */ 947 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 948 struct ena_com_io_cq *io_cq); 949 950 /* ena_com_execute_admin_command - Execute admin command 951 * @admin_queue: admin queue. 952 * @cmd: the admin command to execute. 953 * @cmd_size: the command size. 954 * @cmd_completion: command completion return value. 955 * @cmd_comp_size: command completion size. 956 957 * Submit an admin command and then wait until the device returns a 958 * completion. 959 * The completion will be copied into cmd_comp. 960 * 961 * @return - 0 on success, negative value on failure. 962 */ 963 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 964 struct ena_admin_aq_entry *cmd, 965 size_t cmd_size, 966 struct ena_admin_acq_entry *cmd_comp, 967 size_t cmd_comp_size); 968 969 /* ena_com_init_interrupt_moderation - Init interrupt moderation 970 * @ena_dev: ENA communication layer struct 971 * 972 * @return - 0 on success, negative value on failure. 973 */ 974 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev); 975 976 /* ena_com_interrupt_moderation_supported - Return if interrupt moderation 977 * capability is supported by the device. 978 * 979 * @return - supported or not. 980 */ 981 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); 982 983 /* ena_com_update_nonadaptive_moderation_interval_tx - Update the 984 * non-adaptive interval in Tx direction. 985 * @ena_dev: ENA communication layer struct 986 * @tx_coalesce_usecs: Interval in usec. 987 * 988 * @return - 0 on success, negative value on failure. 989 */ 990 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 991 u32 tx_coalesce_usecs); 992 993 /* ena_com_update_nonadaptive_moderation_interval_rx - Update the 994 * non-adaptive interval in Rx direction. 995 * @ena_dev: ENA communication layer struct 996 * @rx_coalesce_usecs: Interval in usec. 997 * 998 * @return - 0 on success, negative value on failure. 999 */ 1000 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 1001 u32 rx_coalesce_usecs); 1002 1003 /* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the 1004 * non-adaptive interval in Tx direction. 1005 * @ena_dev: ENA communication layer struct 1006 * 1007 * @return - interval in usec 1008 */ 1009 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); 1010 1011 /* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the 1012 * non-adaptive interval in Rx direction. 1013 * @ena_dev: ENA communication layer struct 1014 * 1015 * @return - interval in usec 1016 */ 1017 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); 1018 1019 /* ena_com_config_dev_mode - Configure the placement policy of the device. 1020 * @ena_dev: ENA communication layer struct 1021 * @llq_features: LLQ feature descriptor, retrieve via 1022 * ena_com_get_dev_attr_feat. 1023 * @ena_llq_config: The default driver LLQ parameters configurations 1024 */ 1025 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 1026 struct ena_admin_feature_llq_desc *llq_features, 1027 struct ena_llq_configurations *llq_default_config); 1028 1029 /* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq. 1030 * @io_sq: IO submit queue struct 1031 * 1032 * @return - ena_com_dev struct extracted from io_sq 1033 */ 1034 static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq) 1035 { 1036 return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]); 1037 } 1038 1039 /* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq. 1040 * @io_sq: IO submit queue struct 1041 * 1042 * @return - ena_com_dev struct extracted from io_sq 1043 */ 1044 static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq) 1045 { 1046 return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]); 1047 } 1048 1049 static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) 1050 { 1051 return ena_dev->adaptive_coalescing; 1052 } 1053 1054 static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) 1055 { 1056 ena_dev->adaptive_coalescing = true; 1057 } 1058 1059 static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) 1060 { 1061 ena_dev->adaptive_coalescing = false; 1062 } 1063 1064 /* ena_com_get_cap - query whether device supports a capability. 1065 * @ena_dev: ENA communication layer struct 1066 * @cap_id: enum value representing the capability 1067 * 1068 * @return - true if capability is supported or false otherwise 1069 */ 1070 static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev, 1071 enum ena_admin_aq_caps_id cap_id) 1072 { 1073 return !!(ena_dev->capabilities & BIT(cap_id)); 1074 } 1075 1076 /* ena_com_get_customer_metric_support - query whether device supports a given customer metric. 1077 * @ena_dev: ENA communication layer struct 1078 * @metric_id: enum value representing the customer metric 1079 * 1080 * @return - true if customer metric is supported or false otherwise 1081 */ 1082 static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev, 1083 enum ena_admin_customer_metrics_id metric_id) 1084 { 1085 return !!(ena_dev->customer_metrics.supported_metrics & BIT(metric_id)); 1086 } 1087 1088 /* ena_com_get_customer_metric_count - return the number of supported customer metrics. 1089 * @ena_dev: ENA communication layer struct 1090 * 1091 * @return - the number of supported customer metrics 1092 */ 1093 static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev) 1094 { 1095 return hweight64(ena_dev->customer_metrics.supported_metrics); 1096 } 1097 1098 /* ena_com_update_intr_reg - Prepare interrupt register 1099 * @intr_reg: interrupt register to update. 1100 * @rx_delay_interval: Rx interval in usecs 1101 * @tx_delay_interval: Tx interval in usecs 1102 * @unmask: unmask enable/disable 1103 * 1104 * Prepare interrupt update register with the supplied parameters. 1105 */ 1106 static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, 1107 u32 rx_delay_interval, 1108 u32 tx_delay_interval, 1109 bool unmask) 1110 { 1111 intr_reg->intr_control = 0; 1112 intr_reg->intr_control |= rx_delay_interval & 1113 ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; 1114 1115 intr_reg->intr_control |= 1116 (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) 1117 & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; 1118 1119 if (unmask) 1120 intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; 1121 } 1122 1123 static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl) 1124 { 1125 u16 size, buffers_num; 1126 u8 *buf; 1127 1128 size = bounce_buf_ctrl->buffer_size; 1129 buffers_num = bounce_buf_ctrl->buffers_num; 1130 1131 buf = bounce_buf_ctrl->base_buffer + 1132 (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size; 1133 1134 prefetchw(bounce_buf_ctrl->base_buffer + 1135 (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size); 1136 1137 return buf; 1138 } 1139 1140 #endif /* !(ENA_COM) */ 1141