ena.c (b1c38df05d79c81ee1e9fd0942774820a4ffcb63) ena.c (d0419551d96c8f995bdf6388a8e69684be33f9b5)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 2817 unchanged lines hidden (view full) ---

2826 max_queues->max_sq_depth);
2827
2828 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2829 max_queues->max_packet_tx_descs);
2830 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2831 max_queues->max_packet_rx_descs);
2832 }
2833
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 2817 unchanged lines hidden (view full) ---

2826 max_queues->max_sq_depth);
2827
2828 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2829 max_queues->max_packet_tx_descs);
2830 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2831 max_queues->max_packet_rx_descs);
2832 }
2833
2834 /* round down to the nearest power of 2 */
2835 max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2836 max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2837
2838 /*
2839 * When using large headers, we multiply the entry size by 2,
2840 * and therefore divide the queue size by 2, leaving the amount
2841 * of memory used by the queues unchanged.
2842 */
2843 if (adapter->llq_policy == ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
2834 if (adapter->llq_policy == ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
2844 if (ena_dev->tx_mem_queue_type ==
2845 ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2846 max_tx_queue_size /= 2;
2847 ena_log(ctx->pdev, INFO,
2848 "Using large headers and decreasing maximum Tx queue size to %d\n",
2849 max_tx_queue_size);
2835 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2836 if (llq->max_wide_llq_depth != max_tx_queue_size) {
2837 if (llq->max_wide_llq_depth == 0) {
2838 /* if there is no large llq max depth from device, we divide
2839 * the queue size by 2, leaving the amount of memory
2840 * used by the queues unchanged.
2841 */
2842 max_tx_queue_size /= 2;
2843 } else {
2844 max_tx_queue_size = llq->max_wide_llq_depth;
2845 }
2846 ena_log(ctx->pdev, INFO,
2847 "Using large LLQ headers and decreasing maximum Tx queue size to %d\n",
2848 max_tx_queue_size);
2849 } else {
2850 ena_log(ctx->pdev, INFO, "Using large LLQ headers\n");
2851 }
2850 } else {
2851 ena_log(ctx->pdev, WARN,
2852 "Using large headers failed: LLQ is disabled or device does not support large headers\n");
2853 }
2854 }
2855
2852 } else {
2853 ena_log(ctx->pdev, WARN,
2854 "Using large headers failed: LLQ is disabled or device does not support large headers\n");
2855 }
2856 }
2857
2858 /* round down to the nearest power of 2 */
2859 max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2860 max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2861
2856 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2857 max_tx_queue_size);
2858 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2859 max_rx_queue_size);
2860
2861 tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2862 rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2863

--- 1384 unchanged lines hidden ---
2862 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2863 max_tx_queue_size);
2864 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2865 max_rx_queue_size);
2866
2867 tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2868 rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2869

--- 1384 unchanged lines hidden ---