1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef ENA_COM 35 #define ENA_COM 36 37 #include "ena_plat.h" 38 39 #define ENA_MAX_NUM_IO_QUEUES 128U 40 /* We need to queues for each IO (on for Tx and one for Rx) */ 41 #define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) 42 43 #define ENA_MAX_HANDLERS 256 44 45 #define ENA_MAX_PHYS_ADDR_SIZE_BITS 48 46 47 /* Unit in usec */ 48 #define ENA_REG_READ_TIMEOUT 200000 49 50 #define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry)) 51 #define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry)) 52 #define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry)) 53 54 /* Macros used to extract LSB/MSB from the 55 * enums defining the reset reasons 56 */ 57 #define ENA_RESET_REASON_LSB_OFFSET 0 58 #define ENA_RESET_REASON_LSB_MASK 0xf 59 #define ENA_RESET_REASON_MSB_OFFSET 4 60 #define ENA_RESET_REASON_MSB_MASK 0xf0 61 62 #define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512 63 64 /*****************************************************************************/ 65 /*****************************************************************************/ 66 /* ENA adaptive interrupt moderation settings */ 67 68 #define ENA_INTR_INITIAL_TX_INTERVAL_USECS ENA_INTR_INITIAL_TX_INTERVAL_USECS_PLAT 69 #define ENA_INTR_INITIAL_RX_INTERVAL_USECS ENA_INTR_INITIAL_RX_INTERVAL_USECS_PLAT 70 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 71 72 #define ENA_HASH_KEY_SIZE 40 73 74 #define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF 75 76 #define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 77 78 struct ena_llq_configurations { 79 enum ena_admin_llq_header_location llq_header_location; 80 enum ena_admin_llq_ring_entry_size llq_ring_entry_size; 81 enum ena_admin_llq_stride_ctrl llq_stride_ctrl; 82 enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header; 83 u16 llq_ring_entry_size_value; 84 }; 85 86 enum queue_direction { 87 ENA_COM_IO_QUEUE_DIRECTION_TX, 88 ENA_COM_IO_QUEUE_DIRECTION_RX 89 }; 90 91 struct ena_com_buf { 92 dma_addr_t paddr; /**< Buffer physical address */ 93 u16 len; /**< Buffer length in bytes */ 94 }; 95 96 struct ena_com_rx_buf_info { 97 u16 len; 98 u16 req_id; 99 }; 100 101 struct ena_com_io_desc_addr { 102 u8 __iomem *pbuf_dev_addr; /* LLQ address */ 103 u8 *virt_addr; 104 dma_addr_t phys_addr; 105 ena_mem_handle_t mem_handle; 106 }; 107 108 struct ena_com_tx_meta { 109 u16 mss; 110 u16 l3_hdr_len; 111 u16 l3_hdr_offset; 112 u16 l4_hdr_len; /* In words */ 113 }; 114 115 struct ena_com_llq_info { 116 u16 header_location_ctrl; 117 u16 desc_stride_ctrl; 118 u16 desc_list_entry_size_ctrl; 119 u16 desc_list_entry_size; 120 u16 descs_num_before_header; 121 u16 descs_per_entry; 122 u16 max_entries_in_tx_burst; 123 bool disable_meta_caching; 124 }; 125 126 struct ena_com_io_cq { 127 struct ena_com_io_desc_addr cdesc_addr; 128 void *bus; 129 130 /* Interrupt unmask register */ 131 u32 __iomem *unmask_reg; 132 133 134 /* numa configuration register (for TPH) */ 135 u32 __iomem *numa_node_cfg_reg; 136 137 /* The value to write to the above register to unmask 138 * the interrupt of this queue 139 */ 140 u32 msix_vector ____cacheline_aligned; 141 142 enum queue_direction direction; 143 144 /* holds the number of cdesc of the current packet */ 145 u16 cur_rx_pkt_cdesc_count; 146 /* save the first cdesc idx of the current packet */ 147 u16 cur_rx_pkt_cdesc_start_idx; 148 149 u16 q_depth; 150 /* Caller qid */ 151 u16 qid; 152 153 /* Device queue index */ 154 u16 idx; 155 u16 head; 156 u8 phase; 157 u8 cdesc_entry_size_in_bytes; 158 159 } ____cacheline_aligned; 160 161 struct ena_com_io_bounce_buffer_control { 162 u8 *base_buffer; 163 u16 next_to_use; 164 u16 buffer_size; 165 u16 buffers_num; /* Must be a power of 2 */ 166 }; 167 168 /* This struct is to keep tracking the current location of the next llq entry */ 169 struct ena_com_llq_pkt_ctrl { 170 u8 *curr_bounce_buf; 171 u16 idx; 172 u16 descs_left_in_line; 173 }; 174 175 struct ena_com_io_sq { 176 struct ena_com_io_desc_addr desc_addr; 177 void *bus; 178 179 u32 __iomem *db_addr; 180 181 enum queue_direction direction; 182 enum ena_admin_placement_policy_type mem_queue_type; 183 184 bool disable_meta_caching; 185 186 u32 msix_vector; 187 struct ena_com_tx_meta cached_tx_meta; 188 struct ena_com_llq_info llq_info; 189 struct ena_com_llq_pkt_ctrl llq_buf_ctrl; 190 struct ena_com_io_bounce_buffer_control bounce_buf_ctrl; 191 192 u16 q_depth; 193 u16 qid; 194 195 u16 idx; 196 u16 tail; 197 u16 next_to_comp; 198 u16 llq_last_copy_tail; 199 u32 tx_max_header_size; 200 u8 phase; 201 u8 desc_entry_size; 202 u8 dma_addr_bits; 203 u16 entries_in_tx_burst_left; 204 } ____cacheline_aligned; 205 206 struct ena_com_admin_cq { 207 struct ena_admin_acq_entry *entries; 208 ena_mem_handle_t mem_handle; 209 dma_addr_t dma_addr; 210 211 u16 head; 212 u8 phase; 213 }; 214 215 struct ena_com_admin_sq { 216 struct ena_admin_aq_entry *entries; 217 ena_mem_handle_t mem_handle; 218 dma_addr_t dma_addr; 219 220 u32 __iomem *db_addr; 221 222 u16 head; 223 u16 tail; 224 u8 phase; 225 226 }; 227 228 struct ena_com_stats_admin { 229 u64 aborted_cmd; 230 u64 submitted_cmd; 231 u64 completed_cmd; 232 u64 out_of_space; 233 u64 no_completion; 234 }; 235 236 struct ena_com_stats_phc { 237 u64 phc_cnt; 238 u64 phc_exp; 239 u64 phc_skp; 240 u64 phc_err; 241 }; 242 243 struct ena_com_admin_queue { 244 void *q_dmadev; 245 void *bus; 246 struct ena_com_dev *ena_dev; 247 ena_spinlock_t q_lock; /* spinlock for the admin queue */ 248 249 struct ena_comp_ctx *comp_ctx; 250 u32 completion_timeout; 251 u16 q_depth; 252 struct ena_com_admin_cq cq; 253 struct ena_com_admin_sq sq; 254 255 /* Indicate if the admin queue should poll for completion */ 256 bool polling; 257 258 /* Define if fallback to polling mode should occur */ 259 bool auto_polling; 260 261 u16 curr_cmd_id; 262 263 /* Indicate that the ena was initialized and can 264 * process new admin commands 265 */ 266 bool running_state; 267 268 bool is_missing_admin_interrupt; 269 270 /* Count the number of outstanding admin commands */ 271 ena_atomic32_t outstanding_cmds; 272 273 struct ena_com_stats_admin stats; 274 }; 275 276 struct ena_aenq_handlers; 277 278 struct ena_com_aenq { 279 u16 head; 280 u8 phase; 281 struct ena_admin_aenq_entry *entries; 282 dma_addr_t dma_addr; 283 ena_mem_handle_t mem_handle; 284 u16 q_depth; 285 struct ena_aenq_handlers *aenq_handlers; 286 }; 287 288 struct ena_com_mmio_read { 289 struct ena_admin_ena_mmio_req_read_less_resp *read_resp; 290 dma_addr_t read_resp_dma_addr; 291 ena_mem_handle_t read_resp_mem_handle; 292 u32 reg_read_to; /* in us */ 293 u16 seq_num; 294 bool readless_supported; 295 /* spin lock to ensure a single outstanding read */ 296 ena_spinlock_t lock; 297 }; 298 299 /* PTP hardware clock (PHC) MMIO read data info */ 300 struct ena_com_phc_info { 301 /* Internal PHC statistics */ 302 struct ena_com_stats_phc stats; 303 304 /* PHC shared memory - virtual address */ 305 struct ena_admin_phc_resp *virt_addr; 306 307 /* System time of last PHC request */ 308 ena_time_high_res_t system_time; 309 310 /* Spin lock to ensure a single outstanding PHC read */ 311 ena_spinlock_t lock; 312 313 /* PHC doorbell address as an offset to PCIe MMIO REG BAR */ 314 u32 doorbell_offset; 315 316 /* Shared memory read expire timeout (usec) 317 * Max time for valid PHC retrieval, passing this threshold will fail the get time request 318 * and block new PHC requests for block_timeout_usec in order to prevent floods on busy 319 * device 320 */ 321 u32 expire_timeout_usec; 322 323 /* Shared memory read abort timeout (usec) 324 * PHC requests block period, blocking starts once PHC request expired in order to prevent 325 * floods on busy device, any PHC requests during block period will be skipped 326 */ 327 u32 block_timeout_usec; 328 329 /* PHC shared memory - physical address */ 330 dma_addr_t phys_addr; 331 332 /* PHC shared memory handle */ 333 ena_mem_handle_t mem_handle; 334 335 /* Cached error bound per timestamp sample */ 336 u32 error_bound; 337 338 /* Request id sent to the device */ 339 u16 req_id; 340 341 /* True if PHC is active in the device */ 342 bool active; 343 }; 344 345 struct ena_rss { 346 /* Indirect table */ 347 u16 *host_rss_ind_tbl; 348 struct ena_admin_rss_ind_table_entry *rss_ind_tbl; 349 dma_addr_t rss_ind_tbl_dma_addr; 350 ena_mem_handle_t rss_ind_tbl_mem_handle; 351 u16 tbl_log_size; 352 353 /* Hash key */ 354 enum ena_admin_hash_functions hash_func; 355 struct ena_admin_feature_rss_flow_hash_control *hash_key; 356 dma_addr_t hash_key_dma_addr; 357 ena_mem_handle_t hash_key_mem_handle; 358 u32 hash_init_val; 359 360 /* Flow Control */ 361 struct ena_admin_feature_rss_hash_control *hash_ctrl; 362 dma_addr_t hash_ctrl_dma_addr; 363 ena_mem_handle_t hash_ctrl_mem_handle; 364 365 }; 366 367 struct ena_customer_metrics { 368 /* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK 369 * and ena_admin_customer_metrics_id 370 */ 371 uint64_t supported_metrics; 372 dma_addr_t buffer_dma_addr; 373 void *buffer_virt_addr; 374 ena_mem_handle_t buffer_dma_handle; 375 u32 buffer_len; 376 }; 377 378 struct ena_host_attribute { 379 /* Debug area */ 380 u8 *debug_area_virt_addr; 381 dma_addr_t debug_area_dma_addr; 382 ena_mem_handle_t debug_area_dma_handle; 383 u32 debug_area_size; 384 385 /* Host information */ 386 struct ena_admin_host_info *host_info; 387 dma_addr_t host_info_dma_addr; 388 ena_mem_handle_t host_info_dma_handle; 389 }; 390 391 /* Each ena_dev is a PCI function. */ 392 struct ena_com_dev { 393 struct ena_com_admin_queue admin_queue; 394 struct ena_com_aenq aenq; 395 struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES]; 396 struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES]; 397 u8 __iomem *reg_bar; 398 void __iomem *mem_bar; 399 void *dmadev; 400 void *bus; 401 ena_netdev *net_device; 402 403 enum ena_admin_placement_policy_type tx_mem_queue_type; 404 u32 tx_max_header_size; 405 u16 stats_func; /* Selected function for extended statistic dump */ 406 u16 stats_queue; /* Selected queue for extended statistic dump */ 407 408 u32 ena_min_poll_delay_us; 409 410 struct ena_com_mmio_read mmio_read; 411 struct ena_com_phc_info phc; 412 413 struct ena_rss rss; 414 u32 supported_features; 415 u32 capabilities; 416 u32 dma_addr_bits; 417 418 struct ena_host_attribute host_attr; 419 bool adaptive_coalescing; 420 u16 intr_delay_resolution; 421 422 /* interrupt moderation intervals are in usec divided by 423 * intr_delay_resolution, which is supplied by the device. 424 */ 425 u32 intr_moder_tx_interval; 426 u32 intr_moder_rx_interval; 427 428 struct ena_intr_moder_entry *intr_moder_tbl; 429 430 struct ena_com_llq_info llq_info; 431 432 struct ena_customer_metrics customer_metrics; 433 }; 434 435 struct ena_com_dev_get_features_ctx { 436 struct ena_admin_queue_feature_desc max_queues; 437 struct ena_admin_queue_ext_feature_desc max_queue_ext; 438 struct ena_admin_device_attr_feature_desc dev_attr; 439 struct ena_admin_feature_aenq_desc aenq; 440 struct ena_admin_feature_offload_desc offload; 441 struct ena_admin_ena_hw_hints hw_hints; 442 struct ena_admin_feature_llq_desc llq; 443 }; 444 445 struct ena_com_create_io_ctx { 446 enum ena_admin_placement_policy_type mem_queue_type; 447 enum queue_direction direction; 448 int numa_node; 449 u32 msix_vector; 450 u16 queue_size; 451 u16 qid; 452 }; 453 454 typedef void (*ena_aenq_handler)(void *data, 455 struct ena_admin_aenq_entry *aenq_e); 456 457 /* Holds aenq handlers. Indexed by AENQ event group */ 458 struct ena_aenq_handlers { 459 ena_aenq_handler handlers[ENA_MAX_HANDLERS]; 460 ena_aenq_handler unimplemented_handler; 461 }; 462 463 /*****************************************************************************/ 464 /*****************************************************************************/ 465 #if defined(__cplusplus) 466 extern "C" { 467 #endif 468 469 /* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism 470 * @ena_dev: ENA communication layer struct 471 * 472 * Initialize the register read mechanism. 473 * 474 * @note: This method must be the first stage in the initialization sequence. 475 * 476 * @return - 0 on success, negative value on failure. 477 */ 478 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); 479 480 /* ena_com_phc_init - Allocate and initialize PHC feature 481 * @ena_dev: ENA communication layer struct 482 * @note: This method assumes PHC is supported by the device 483 * @return - 0 on success, negative value on failure 484 */ 485 int ena_com_phc_init(struct ena_com_dev *ena_dev); 486 487 /* ena_com_phc_supported - Return if PHC feature is supported by the device 488 * @ena_dev: ENA communication layer struct 489 * @note: This method must be called after getting supported features 490 * @return - supported or not 491 */ 492 bool ena_com_phc_supported(struct ena_com_dev *ena_dev); 493 494 /* ena_com_phc_config - Configure PHC feature 495 * @ena_dev: ENA communication layer struct 496 * Configure PHC feature in driver and device 497 * @note: This method assumes PHC is supported by the device 498 * @return - 0 on success, negative value on failure 499 */ 500 int ena_com_phc_config(struct ena_com_dev *ena_dev); 501 502 /* ena_com_phc_destroy - Destroy PHC feature 503 * @ena_dev: ENA communication layer struct 504 */ 505 void ena_com_phc_destroy(struct ena_com_dev *ena_dev); 506 507 /* ena_com_phc_get_timestamp - Retrieve PHC timestamp 508 * @ena_dev: ENA communication layer struct 509 * @timestamp: Retrieved PHC timestamp 510 * @return - 0 on success, negative value on failure 511 */ 512 int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp); 513 514 /* ena_com_phc_get_error_bound - Retrieve cached PHC error bound 515 * @ena_dev: ENA communication layer struct 516 * @error_bound: Cached PHC error bound 517 * @return - 0 on success, negative value on failure 518 */ 519 int ena_com_phc_get_error_bound(struct ena_com_dev *ena_dev, u32 *error_bound); 520 521 /* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism 522 * @ena_dev: ENA communication layer struct 523 * @readless_supported: readless mode (enable/disable) 524 */ 525 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, 526 bool readless_supported); 527 528 /* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return 529 * value physical address. 530 * @ena_dev: ENA communication layer struct 531 */ 532 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev); 533 534 /* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism 535 * @ena_dev: ENA communication layer struct 536 */ 537 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); 538 539 /* ena_com_admin_init - Init the admin and the async queues 540 * @ena_dev: ENA communication layer struct 541 * @aenq_handlers: Those handlers to be called upon event. 542 * 543 * Initialize the admin submission and completion queues. 544 * Initialize the asynchronous events notification queues. 545 * 546 * @return - 0 on success, negative value on failure. 547 */ 548 int ena_com_admin_init(struct ena_com_dev *ena_dev, 549 struct ena_aenq_handlers *aenq_handlers); 550 551 /* ena_com_admin_destroy - Destroy the admin and the async events queues. 552 * @ena_dev: ENA communication layer struct 553 * 554 * @note: Before calling this method, the caller must validate that the device 555 * won't send any additional admin completions/aenq. 556 * To achieve that, a FLR is recommended. 557 */ 558 void ena_com_admin_destroy(struct ena_com_dev *ena_dev); 559 560 /* ena_com_dev_reset - Perform device FLR to the device. 561 * @ena_dev: ENA communication layer struct 562 * @reset_reason: Specify what is the trigger for the reset in case of an error. 563 * 564 * @return - 0 on success, negative value on failure. 565 */ 566 int ena_com_dev_reset(struct ena_com_dev *ena_dev, 567 enum ena_regs_reset_reason_types reset_reason); 568 569 /* ena_com_create_io_queue - Create io queue. 570 * @ena_dev: ENA communication layer struct 571 * @ctx - create context structure 572 * 573 * Create the submission and the completion queues. 574 * 575 * @return - 0 on success, negative value on failure. 576 */ 577 int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 578 struct ena_com_create_io_ctx *ctx); 579 580 /* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid. 581 * @ena_dev: ENA communication layer struct 582 * @qid - the caller virtual queue id. 583 */ 584 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid); 585 586 /* ena_com_get_io_handlers - Return the io queue handlers 587 * @ena_dev: ENA communication layer struct 588 * @qid - the caller virtual queue id. 589 * @io_sq - IO submission queue handler 590 * @io_cq - IO completion queue handler. 591 * 592 * @return - 0 on success, negative value on failure. 593 */ 594 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 595 struct ena_com_io_sq **io_sq, 596 struct ena_com_io_cq **io_cq); 597 598 /* ena_com_admin_aenq_enable - ENAble asynchronous event notifications 599 * @ena_dev: ENA communication layer struct 600 * 601 * After this method, aenq event can be received via AENQ. 602 */ 603 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev); 604 605 /* ena_com_set_admin_running_state - Set the state of the admin queue 606 * @ena_dev: ENA communication layer struct 607 * 608 * Change the state of the admin queue (enable/disable) 609 */ 610 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state); 611 612 /* ena_com_get_admin_running_state - Get the admin queue state 613 * @ena_dev: ENA communication layer struct 614 * 615 * Retrieve the state of the admin queue (enable/disable) 616 * 617 * @return - current polling mode (enable/disable) 618 */ 619 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev); 620 621 /* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode 622 * @ena_dev: ENA communication layer struct 623 * @polling: ENAble/Disable polling mode 624 * 625 * Set the admin completion mode. 626 */ 627 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling); 628 629 /* ena_com_get_admin_polling_mode - Get the admin completion queue polling mode 630 * @ena_dev: ENA communication layer struct 631 * 632 * Get the admin completion mode. 633 * If polling mode is on, ena_com_execute_admin_command will perform a 634 * polling on the admin completion queue for the commands completion, 635 * otherwise it will wait on wait event. 636 * 637 * @return state 638 */ 639 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev); 640 641 /* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode 642 * @ena_dev: ENA communication layer struct 643 * @polling: Enable/Disable polling mode 644 * 645 * Set the autopolling mode. 646 * If autopolling is on: 647 * In case of missing interrupt when data is available switch to polling. 648 */ 649 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, 650 bool polling); 651 652 /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler 653 * @ena_dev: ENA communication layer struct 654 * 655 * This method goes over the admin completion queue and wakes up all the pending 656 * threads that wait on the commands wait event. 657 * 658 * @note: Should be called after MSI-X interrupt. 659 */ 660 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); 661 662 /* ena_com_aenq_intr_handler - AENQ interrupt handler 663 * @ena_dev: ENA communication layer struct 664 * 665 * This method goes over the async event notification queue and calls the proper 666 * aenq handler. 667 */ 668 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data); 669 670 /* ena_com_aenq_has_keep_alive - Retrieve if there is a keep alive notification in the aenq 671 * @ena_dev: ENA communication layer struct 672 * 673 * This method goes over the async event notification queue and returns if there 674 * is a keep alive notification. 675 * 676 * @return - true if there is a keep alive notification in the aenq or false otherwise 677 */ 678 bool ena_com_aenq_has_keep_alive(struct ena_com_dev *ena_dev); 679 680 /* ena_com_abort_admin_commands - Abort all the outstanding admin commands. 681 * @ena_dev: ENA communication layer struct 682 * 683 * This method aborts all the outstanding admin commands. 684 * The caller should then call ena_com_wait_for_abort_completion to make sure 685 * all the commands were completed. 686 */ 687 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); 688 689 /* ena_com_wait_for_abort_completion - Wait for admin commands abort. 690 * @ena_dev: ENA communication layer struct 691 * 692 * This method waits until all the outstanding admin commands are completed. 693 */ 694 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); 695 696 /* ena_com_validate_version - Validate the device parameters 697 * @ena_dev: ENA communication layer struct 698 * 699 * This method verifies the device parameters are the same as the saved 700 * parameters in ena_dev. 701 * This method is useful after device reset, to validate the device mac address 702 * and the device offloads are the same as before the reset. 703 * 704 * @return - 0 on success negative value otherwise. 705 */ 706 int ena_com_validate_version(struct ena_com_dev *ena_dev); 707 708 /* ena_com_get_link_params - Retrieve physical link parameters. 709 * @ena_dev: ENA communication layer struct 710 * @resp: Link parameters 711 * 712 * Retrieve the physical link parameters, 713 * like speed, auto-negotiation and full duplex support. 714 * 715 * @return - 0 on Success negative value otherwise. 716 */ 717 int ena_com_get_link_params(struct ena_com_dev *ena_dev, 718 struct ena_admin_get_feat_resp *resp); 719 720 /* ena_com_get_dma_width - Retrieve physical dma address width the device 721 * supports. 722 * @ena_dev: ENA communication layer struct 723 * 724 * Retrieve the maximum physical address bits the device can handle. 725 * 726 * @return: > 0 on Success and negative value otherwise. 727 */ 728 int ena_com_get_dma_width(struct ena_com_dev *ena_dev); 729 730 /* ena_com_set_aenq_config - Set aenq groups configurations 731 * @ena_dev: ENA communication layer struct 732 * @groups flag: bit fields flags of enum ena_admin_aenq_group. 733 * 734 * Configure which aenq event group the driver would like to receive. 735 * 736 * @return: 0 on Success and negative value otherwise. 737 */ 738 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag); 739 740 /* ena_com_get_dev_attr_feat - Get device features 741 * @ena_dev: ENA communication layer struct 742 * @get_feat_ctx: returned context that contain the get features. 743 * 744 * @return: 0 on Success and negative value otherwise. 745 */ 746 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 747 struct ena_com_dev_get_features_ctx *get_feat_ctx); 748 749 /* ena_com_get_dev_basic_stats - Get device basic statistics 750 * @ena_dev: ENA communication layer struct 751 * @stats: stats return value 752 * 753 * @return: 0 on Success and negative value otherwise. 754 */ 755 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 756 struct ena_admin_basic_stats *stats); 757 758 /* ena_com_get_eni_stats - Get extended network interface statistics 759 * @ena_dev: ENA communication layer struct 760 * @stats: stats return value 761 * 762 * @return: 0 on Success and negative value otherwise. 763 */ 764 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, 765 struct ena_admin_eni_stats *stats); 766 767 /* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics 768 * @ena_dev: ENA communication layer struct 769 * @info: ena srd stats and flags 770 * 771 * @return: 0 on Success and negative value otherwise. 772 */ 773 int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev, 774 struct ena_admin_ena_srd_info *info); 775 776 /* ena_com_get_customer_metrics - Get customer metrics for network interface 777 * @ena_dev: ENA communication layer struct 778 * @buffer: buffer for returned customer metrics 779 * @len: size of the buffer 780 * 781 * @return: 0 on Success and negative value otherwise. 782 */ 783 int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len); 784 785 /* ena_com_set_dev_mtu - Configure the device mtu. 786 * @ena_dev: ENA communication layer struct 787 * @mtu: mtu value 788 * 789 * @return: 0 on Success and negative value otherwise. 790 */ 791 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu); 792 793 /* ena_com_get_offload_settings - Retrieve the device offloads capabilities 794 * @ena_dev: ENA communication layer struct 795 * @offlad: offload return value 796 * 797 * @return: 0 on Success and negative value otherwise. 798 */ 799 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 800 struct ena_admin_feature_offload_desc *offload); 801 802 /* ena_com_rss_init - Init RSS 803 * @ena_dev: ENA communication layer struct 804 * @log_size: indirection log size 805 * 806 * Allocate RSS/RFS resources. 807 * The caller then can configure rss using ena_com_set_hash_function, 808 * ena_com_set_hash_ctrl and ena_com_indirect_table_set. 809 * 810 * @return: 0 on Success and negative value otherwise. 811 */ 812 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); 813 814 /* ena_com_rss_destroy - Destroy rss 815 * @ena_dev: ENA communication layer struct 816 * 817 * Free all the RSS/RFS resources. 818 */ 819 void ena_com_rss_destroy(struct ena_com_dev *ena_dev); 820 821 /* ena_com_get_current_hash_function - Get RSS hash function 822 * @ena_dev: ENA communication layer struct 823 * 824 * Return the current hash function. 825 * @return: 0 or one of the ena_admin_hash_functions values. 826 */ 827 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev); 828 829 /* ena_com_fill_hash_function - Fill RSS hash function 830 * @ena_dev: ENA communication layer struct 831 * @func: The hash function (Toeplitz or crc) 832 * @key: Hash key (for toeplitz hash) 833 * @key_len: key length (max length 10 DW) 834 * @init_val: initial value for the hash function 835 * 836 * Fill the ena_dev resources with the desire hash function, hash key, key_len 837 * and key initial value (if needed by the hash function). 838 * To flush the key into the device the caller should call 839 * ena_com_set_hash_function. 840 * 841 * @return: 0 on Success and negative value otherwise. 842 */ 843 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 844 enum ena_admin_hash_functions func, 845 const u8 *key, u16 key_len, u32 init_val); 846 847 /* ena_com_set_hash_function - Flush the hash function and it dependencies to 848 * the device. 849 * @ena_dev: ENA communication layer struct 850 * 851 * Flush the hash function and it dependencies (key, key length and 852 * initial value) if needed. 853 * 854 * @note: Prior to this method the caller should call ena_com_fill_hash_function 855 * 856 * @return: 0 on Success and negative value otherwise. 857 */ 858 int ena_com_set_hash_function(struct ena_com_dev *ena_dev); 859 860 /* ena_com_get_hash_function - Retrieve the hash function from the device. 861 * @ena_dev: ENA communication layer struct 862 * @func: hash function 863 * 864 * Retrieve the hash function from the device. 865 * 866 * @note: If the caller called ena_com_fill_hash_function but didn't flush 867 * it to the device, the new configuration will be lost. 868 * 869 * @return: 0 on Success and negative value otherwise. 870 */ 871 int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 872 enum ena_admin_hash_functions *func); 873 874 /* ena_com_get_hash_key - Retrieve the hash key 875 * @ena_dev: ENA communication layer struct 876 * @key: hash key 877 * 878 * Retrieve the hash key. 879 * 880 * @note: If the caller called ena_com_fill_hash_key but didn't flush 881 * it to the device, the new configuration will be lost. 882 * 883 * @return: 0 on Success and negative value otherwise. 884 */ 885 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key); 886 /* ena_com_fill_hash_ctrl - Fill RSS hash control 887 * @ena_dev: ENA communication layer struct. 888 * @proto: The protocol to configure. 889 * @hash_fields: bit mask of ena_admin_flow_hash_fields 890 * 891 * Fill the ena_dev resources with the desire hash control (the ethernet 892 * fields that take part of the hash) for a specific protocol. 893 * To flush the hash control to the device, the caller should call 894 * ena_com_set_hash_ctrl. 895 * 896 * @return: 0 on Success and negative value otherwise. 897 */ 898 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 899 enum ena_admin_flow_hash_proto proto, 900 u16 hash_fields); 901 902 /* ena_com_set_hash_ctrl - Flush the hash control resources to the device. 903 * @ena_dev: ENA communication layer struct 904 * 905 * Flush the hash control (the ethernet fields that take part of the hash) 906 * 907 * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl. 908 * 909 * @return: 0 on Success and negative value otherwise. 910 */ 911 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); 912 913 /* ena_com_get_hash_ctrl - Retrieve the hash control from the device. 914 * @ena_dev: ENA communication layer struct 915 * @proto: The protocol to retrieve. 916 * @fields: bit mask of ena_admin_flow_hash_fields. 917 * 918 * Retrieve the hash control from the device. 919 * 920 * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush 921 * it to the device, the new configuration will be lost. 922 * 923 * @return: 0 on Success and negative value otherwise. 924 */ 925 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 926 enum ena_admin_flow_hash_proto proto, 927 u16 *fields); 928 929 /* ena_com_set_default_hash_ctrl - Set the hash control to a default 930 * configuration. 931 * @ena_dev: ENA communication layer struct 932 * 933 * Fill the ena_dev resources with the default hash control configuration. 934 * To flush the hash control to the device, the caller should call 935 * ena_com_set_hash_ctrl. 936 * 937 * @return: 0 on Success and negative value otherwise. 938 */ 939 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev); 940 941 /* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS 942 * indirection table 943 * @ena_dev: ENA communication layer struct. 944 * @entry_idx - indirection table entry. 945 * @entry_value - redirection value 946 * 947 * Fill a single entry of the RSS indirection table in the ena_dev resources. 948 * To flush the indirection table to the device, the called should call 949 * ena_com_indirect_table_set. 950 * 951 * @return: 0 on Success and negative value otherwise. 952 */ 953 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 954 u16 entry_idx, u16 entry_value); 955 956 /* ena_com_indirect_table_set - Flush the indirection table to the device. 957 * @ena_dev: ENA communication layer struct 958 * 959 * Flush the indirection hash control to the device. 960 * Prior to this method the caller should call ena_com_indirect_table_fill_entry 961 * 962 * @return: 0 on Success and negative value otherwise. 963 */ 964 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); 965 966 /* ena_com_indirect_table_get - Retrieve the indirection table from the device. 967 * @ena_dev: ENA communication layer struct 968 * @ind_tbl: indirection table 969 * 970 * Retrieve the RSS indirection table from the device. 971 * 972 * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush 973 * it to the device, the new configuration will be lost. 974 * 975 * @return: 0 on Success and negative value otherwise. 976 */ 977 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl); 978 979 /* ena_com_allocate_host_info - Allocate host info resources. 980 * @ena_dev: ENA communication layer struct 981 * 982 * @return: 0 on Success and negative value otherwise. 983 */ 984 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev); 985 986 /* ena_com_allocate_debug_area - Allocate debug area. 987 * @ena_dev: ENA communication layer struct 988 * @debug_area_size - debug area size. 989 * 990 * @return: 0 on Success and negative value otherwise. 991 */ 992 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 993 u32 debug_area_size); 994 995 /* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources. 996 * @ena_dev: ENA communication layer struct 997 * 998 * @return: 0 on Success and negative value otherwise. 999 */ 1000 int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev); 1001 1002 /* ena_com_delete_debug_area - Free the debug area resources. 1003 * @ena_dev: ENA communication layer struct 1004 * 1005 * Free the allocated debug area. 1006 */ 1007 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); 1008 1009 /* ena_com_delete_host_info - Free the host info resources. 1010 * @ena_dev: ENA communication layer struct 1011 * 1012 * Free the allocated host info. 1013 */ 1014 void ena_com_delete_host_info(struct ena_com_dev *ena_dev); 1015 1016 /* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources. 1017 * @ena_dev: ENA communication layer struct 1018 * 1019 * Free the allocated customer metrics area. 1020 */ 1021 void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev); 1022 1023 /* ena_com_set_host_attributes - Update the device with the host 1024 * attributes (debug area and host info) base address. 1025 * @ena_dev: ENA communication layer struct 1026 * 1027 * @return: 0 on Success and negative value otherwise. 1028 */ 1029 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev); 1030 1031 /* ena_com_create_io_cq - Create io completion queue. 1032 * @ena_dev: ENA communication layer struct 1033 * @io_cq - io completion queue handler 1034 1035 * Create IO completion queue. 1036 * 1037 * @return - 0 on success, negative value on failure. 1038 */ 1039 int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 1040 struct ena_com_io_cq *io_cq); 1041 1042 /* ena_com_destroy_io_cq - Destroy io completion queue. 1043 * @ena_dev: ENA communication layer struct 1044 * @io_cq - io completion queue handler 1045 1046 * Destroy IO completion queue. 1047 * 1048 * @return - 0 on success, negative value on failure. 1049 */ 1050 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 1051 struct ena_com_io_cq *io_cq); 1052 1053 /* ena_com_execute_admin_command - Execute admin command 1054 * @admin_queue: admin queue. 1055 * @cmd: the admin command to execute. 1056 * @cmd_size: the command size. 1057 * @cmd_completion: command completion return value. 1058 * @cmd_comp_size: command completion size. 1059 1060 * Submit an admin command and then wait until the device returns a 1061 * completion. 1062 * The completion will be copied into cmd_comp. 1063 * 1064 * @return - 0 on success, negative value on failure. 1065 */ 1066 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 1067 struct ena_admin_aq_entry *cmd, 1068 size_t cmd_size, 1069 struct ena_admin_acq_entry *cmd_comp, 1070 size_t cmd_comp_size); 1071 1072 /* ena_com_init_interrupt_moderation - Init interrupt moderation 1073 * @ena_dev: ENA communication layer struct 1074 * 1075 * @return - 0 on success, negative value on failure. 1076 */ 1077 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev); 1078 1079 /* ena_com_interrupt_moderation_supported - Return if interrupt moderation 1080 * capability is supported by the device. 1081 * 1082 * @return - supported or not. 1083 */ 1084 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); 1085 1086 /* ena_com_update_nonadaptive_moderation_interval_tx - Update the 1087 * non-adaptive interval in Tx direction. 1088 * @ena_dev: ENA communication layer struct 1089 * @tx_coalesce_usecs: Interval in usec. 1090 * 1091 * @return - 0 on success, negative value on failure. 1092 */ 1093 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 1094 u32 tx_coalesce_usecs); 1095 1096 /* ena_com_update_nonadaptive_moderation_interval_rx - Update the 1097 * non-adaptive interval in Rx direction. 1098 * @ena_dev: ENA communication layer struct 1099 * @rx_coalesce_usecs: Interval in usec. 1100 * 1101 * @return - 0 on success, negative value on failure. 1102 */ 1103 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 1104 u32 rx_coalesce_usecs); 1105 1106 /* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the 1107 * non-adaptive interval in Tx direction. 1108 * @ena_dev: ENA communication layer struct 1109 * 1110 * @return - interval in usec 1111 */ 1112 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); 1113 1114 /* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the 1115 * non-adaptive interval in Rx direction. 1116 * @ena_dev: ENA communication layer struct 1117 * 1118 * @return - interval in usec 1119 */ 1120 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); 1121 1122 /* ena_com_config_dev_mode - Configure the placement policy of the device. 1123 * @ena_dev: ENA communication layer struct 1124 * @llq_features: LLQ feature descriptor, retrieve via 1125 * ena_com_get_dev_attr_feat. 1126 * @ena_llq_config: The default driver LLQ parameters configurations 1127 */ 1128 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 1129 struct ena_admin_feature_llq_desc *llq_features, 1130 struct ena_llq_configurations *llq_default_config); 1131 1132 /* ena_com_get_missing_admin_interrupt - Return if there is a missing admin interrupt 1133 * @ena_dev: ENA communication layer struct 1134 * 1135 * @return - true if there is a missing admin interrupt or false otherwise 1136 */ 1137 static inline bool ena_com_get_missing_admin_interrupt(struct ena_com_dev *ena_dev) 1138 { 1139 return ena_dev->admin_queue.is_missing_admin_interrupt; 1140 } 1141 1142 /* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq. 1143 * @io_sq: IO submit queue struct 1144 * 1145 * @return - ena_com_dev struct extracted from io_sq 1146 */ 1147 static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq) 1148 { 1149 return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]); 1150 } 1151 1152 /* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq. 1153 * @io_sq: IO submit queue struct 1154 * 1155 * @return - ena_com_dev struct extracted from io_sq 1156 */ 1157 static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq) 1158 { 1159 return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]); 1160 } 1161 1162 static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) 1163 { 1164 return ena_dev->adaptive_coalescing; 1165 } 1166 1167 static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) 1168 { 1169 ena_dev->adaptive_coalescing = true; 1170 } 1171 1172 static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) 1173 { 1174 ena_dev->adaptive_coalescing = false; 1175 } 1176 1177 /* ena_com_get_cap - query whether device supports a capability. 1178 * @ena_dev: ENA communication layer struct 1179 * @cap_id: enum value representing the capability 1180 * 1181 * @return - true if capability is supported or false otherwise 1182 */ 1183 static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev, 1184 enum ena_admin_aq_caps_id cap_id) 1185 { 1186 return !!(ena_dev->capabilities & BIT(cap_id)); 1187 } 1188 1189 /* ena_com_get_customer_metric_support - query whether device supports a given customer metric. 1190 * @ena_dev: ENA communication layer struct 1191 * @metric_id: enum value representing the customer metric 1192 * 1193 * @return - true if customer metric is supported or false otherwise 1194 */ 1195 static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev, 1196 enum ena_admin_customer_metrics_id metric_id) 1197 { 1198 return !!(ena_dev->customer_metrics.supported_metrics & BIT64(metric_id)); 1199 } 1200 1201 /* ena_com_get_customer_metric_count - return the number of supported customer metrics. 1202 * @ena_dev: ENA communication layer struct 1203 * 1204 * @return - the number of supported customer metrics 1205 */ 1206 static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev) 1207 { 1208 return ENA_BITS_PER_U64(ena_dev->customer_metrics.supported_metrics); 1209 } 1210 1211 /* ena_com_update_intr_reg - Prepare interrupt register 1212 * @intr_reg: interrupt register to update. 1213 * @rx_delay_interval: Rx interval in usecs 1214 * @tx_delay_interval: Tx interval in usecs 1215 * @unmask: unmask enable/disable 1216 * @no_moderation_update: 0 - Indicates that any of the TX/RX intervals was 1217 * updated, 1 - otherwise 1218 * 1219 * Prepare interrupt update register with the supplied parameters. 1220 */ 1221 static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, 1222 u32 rx_delay_interval, 1223 u32 tx_delay_interval, 1224 bool unmask, 1225 bool no_moderation_update) 1226 { 1227 intr_reg->intr_control = 0; 1228 intr_reg->intr_control |= rx_delay_interval & 1229 ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; 1230 1231 intr_reg->intr_control |= 1232 (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) 1233 & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; 1234 1235 if (unmask) 1236 intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; 1237 1238 intr_reg->intr_control |= 1239 (((u32)no_moderation_update) << ENA_ETH_IO_INTR_REG_NO_MODERATION_UPDATE_SHIFT) & 1240 ENA_ETH_IO_INTR_REG_NO_MODERATION_UPDATE_MASK; 1241 } 1242 1243 static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl) 1244 { 1245 u16 size, buffers_num; 1246 u8 *buf; 1247 1248 size = bounce_buf_ctrl->buffer_size; 1249 buffers_num = bounce_buf_ctrl->buffers_num; 1250 1251 buf = bounce_buf_ctrl->base_buffer + 1252 (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size; 1253 1254 prefetchw(bounce_buf_ctrl->base_buffer + 1255 (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size); 1256 1257 return buf; 1258 } 1259 1260 #ifdef ENA_EXTENDED_STATS 1261 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff, 1262 u32 len); 1263 1264 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev, 1265 u32 funct_queue); 1266 #endif 1267 #if defined(__cplusplus) 1268 } 1269 #endif /* __cplusplus */ 1270 #endif /* !(ENA_COM) */ 1271