Lines Matching +full:queue +full:- +full:rx
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
4 * Copyright (C) 2015-2024 Google LLC
10 #include <linux/dma-mapping.h>
35 /* 1 for management, 1 for rx, 1 for tx */
38 /* Numbers of gve tx/rx stats in stats report. */
45 /* Numbers of NIC tx/rx stats in stats report. */
51 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
73 (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
91 /* 2K buffers for DQO-QPL */
97 * allocs and uses a non-qpl page on the receive path of DQO QPL to free
110 /* The page info for a single slot in the RX data queue */
123 u16 pad; /* adjustment for rx padding */
127 /* A list of pages registered with the device during setup and used by a queue
142 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
148 /* RX buffer queue for posting buffers to HW.
149 * Each RX (completion) queue has a corresponding buffer queue.
159 /* RX completion queue to receive packets from HW. */
165 * post more buffers than the queue size to avoid HW overrunning the
166 * queue.
204 /* Linked list index to next element in the list, or -1 if none */
208 /* `head` and `tail` are indices into an array, or -1 if empty. */
234 /* Contains datapath state used to represent an RX queue. */
239 u16 packet_buffer_truesize; /* Total size of RX buffer */
265 * buf_states, or -1 if empty.
270 * buf_states, or -1 if empty.
282 * buf_states, or -1 if empty.
289 /* qpl assigned to this queue */
292 /* index into queue page list */
298 /* Address info of the buffers for header-split */
305 u64 rbytes; /* free-running bytes received */
306 u64 rx_hsplit_bytes; /* free-running header bytes received */
307 u64 rpackets; /* free-running packets received */
308 u32 cnt; /* free-running total number of completed packets */
309 u32 fill_cnt; /* free-running total number of descs and buffs posted */
311 u64 rx_hsplit_pkt; /* free-running packets with headers split */
312 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
313 u64 rx_copied_pkt; /* free-running total number of copied packets */
314 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
315 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
316 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
317 /* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
319 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
320 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
321 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
322 u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
327 u32 q_num; /* queue index */
330 dma_addr_t q_resources_bus; /* dma address for the queue resources */
376 /* A TX buffer - each queue has one */
400 * re-injection completion.
440 /* Linked list index to next element in the list, or -1 if none */
443 /* Linked list index to prev element in the list, or -1 if none.
458 * freed if the corresponding re-injection completion is not received
464 /* Contains datapath state used to represent a TX queue. */
466 /* Cacheline 0 -- Accessed & dirtied during transmit */
481 * pending_packets, or -1 if empty.
507 * tx_qpl_buf_next, or -1 if empty.
529 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
547 * pending_packets, or -1 if empty.
576 * tx_qpl_buf_next, or -1 if empty.
592 u64 pkt_done; /* free-running - total packets completed */
593 u64 bytes_done; /* free-running - total bytes completed */
594 u64 dropped_pkt; /* free-running - total packets dropped */
597 /* Cacheline 2 -- Read-mostly fields */
621 /* qpl assigned to this queue */
638 u32 mask; /* masks req and done down to queue size */
641 /* Slow-path fields */
642 u32 q_num ____cacheline_aligned; /* queue idx */
643 u32 stop_queue; /* count of queue stops */
644 u32 wake_queue; /* count of queue wakes */
645 u32 queue_timeout; /* count of queue timeouts */
647 u32 last_kick_msec; /* Last time the queue was kicked */
649 dma_addr_t q_resources_bus; /* dma address of the queue resources */
667 struct gve_rx_ring *rx; /* rx rings on this block */ member
671 /* Tracks allowed and current rx queue settings */
678 /* Tracks allowed and current tx queue settings */
717 /* Parameters for allocating resources for rx queues */
731 struct gve_rx_ring *rx; member
793 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ member
821 unsigned long *xsk_pools; /* bitmap of RX queues with XSK pools */
822 u32 num_ntfy_blks; /* split between TX and RX so must be even */
833 /* Admin queue - see gve_adminq.h*/
839 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
840 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
841 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
842 /* free-running count of per AQ cmd executed */
869 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
901 u16 header_buf_size; /* device configured, header-split supported if non-zero */
943 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); in gve_get_do_reset()
948 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); in gve_set_do_reset()
953 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags); in gve_clear_do_reset()
959 &priv->service_task_flags); in gve_get_reset_in_progress()
964 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); in gve_set_reset_in_progress()
969 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags); in gve_clear_reset_in_progress()
975 &priv->service_task_flags); in gve_get_probe_in_progress()
980 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); in gve_set_probe_in_progress()
985 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags); in gve_clear_probe_in_progress()
991 &priv->service_task_flags); in gve_get_do_report_stats()
996 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); in gve_set_do_report_stats()
1001 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags); in gve_clear_do_report_stats()
1006 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); in gve_get_admin_queue_ok()
1011 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); in gve_set_admin_queue_ok()
1016 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags); in gve_clear_admin_queue_ok()
1021 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); in gve_get_device_resources_ok()
1026 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); in gve_set_device_resources_ok()
1031 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags); in gve_clear_device_resources_ok()
1036 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); in gve_get_device_rings_ok()
1041 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); in gve_set_device_rings_ok()
1046 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags); in gve_clear_device_rings_ok()
1051 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); in gve_get_napi_enabled()
1056 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); in gve_set_napi_enabled()
1061 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags); in gve_clear_napi_enabled()
1066 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); in gve_get_report_stats()
1071 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags); in gve_clear_report_stats()
1079 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; in gve_irq_doorbell()
1089 /* Returns the index into ntfy_blocks of the given rx ring's block
1093 return (priv->num_ntfy_blks / 2) + queue_idx; in gve_rx_idx_to_ntfy()
1098 return priv->queue_format == GVE_GQI_QPL_FORMAT || in gve_is_qpl()
1099 priv->queue_format == GVE_DQO_QPL_FORMAT; in gve_is_qpl()
1102 /* Returns the number of tx queue page lists */
1108 return tx_cfg->num_queues + tx_cfg->num_xdp_queues; in gve_num_tx_qpls()
1111 /* Returns the number of rx queue page lists */
1117 return rx_cfg->num_queues; in gve_num_rx_qpls()
1127 return priv->tx_cfg.max_queues + rx_qid; in gve_rx_qpl_id()
1133 return tx_cfg->max_queues + rx_qid; in gve_get_rx_qpl_id()
1149 * out-of-order completions. Set it to two times of ring size. in gve_get_rx_pages_per_qpl_dqo()
1154 /* Returns the correct dma direction for tx and rx qpls */
1158 if (id < gve_rx_start_qpl_id(&priv->tx_cfg)) in gve_qpl_dma_dir()
1166 return priv->queue_format == GVE_GQI_RDA_FORMAT || in gve_is_gqi()
1167 priv->queue_format == GVE_GQI_QPL_FORMAT; in gve_is_gqi()
1172 return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues; in gve_num_tx_queues()
1177 return priv->tx_cfg.num_queues + queue_id; in gve_xdp_tx_queue_id()
1187 switch (priv->queue_format) { in gve_supports_xdp_xmit()
1234 /* rx handling */
1235 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1237 bool gve_rx_work_pending(struct gve_rx_ring *rx);
1240 struct gve_rx_ring *rx,
1242 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1253 /* rx buffer handling */
1257 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1258 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1260 void gve_free_buf_state(struct gve_rx_ring *rx,
1262 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1264 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1266 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1267 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1269 void gve_free_to_page_pool(struct gve_rx_ring *rx,
1272 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1275 void gve_reuse_buffer(struct gve_rx_ring *rx,
1277 void gve_free_buffer(struct gve_rx_ring *rx,
1279 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1281 struct gve_rx_ring *rx,
1313 return -EOPNOTSUPP; in gve_clock_nic_ts_read()