xref: /linux/drivers/net/ethernet/amazon/ena/ena_eth_com.h (revision 9dbbc3b9d09d6deba9f3b9e1d5b355032ed46a75)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #ifndef ENA_ETH_COM_H_
7 #define ENA_ETH_COM_H_
8 
9 #include "ena_com.h"
10 
11 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
12 #define ENA_COMP_HEAD_THRESH 4
13 
14 struct ena_com_tx_ctx {
15 	struct ena_com_tx_meta ena_meta;
16 	struct ena_com_buf *ena_bufs;
17 	/* For LLQ, header buffer - pushed to the device mem space */
18 	void *push_header;
19 
20 	enum ena_eth_io_l3_proto_index l3_proto;
21 	enum ena_eth_io_l4_proto_index l4_proto;
22 	u16 num_bufs;
23 	u16 req_id;
24 	/* For regular queue, indicate the size of the header
25 	 * For LLQ, indicate the size of the pushed buffer
26 	 */
27 	u16 header_len;
28 
29 	u8 meta_valid;
30 	u8 tso_enable;
31 	u8 l3_csum_enable;
32 	u8 l4_csum_enable;
33 	u8 l4_csum_partial;
34 	u8 df; /* Don't fragment */
35 };
36 
37 struct ena_com_rx_ctx {
38 	struct ena_com_rx_buf_info *ena_bufs;
39 	enum ena_eth_io_l3_proto_index l3_proto;
40 	enum ena_eth_io_l4_proto_index l4_proto;
41 	bool l3_csum_err;
42 	bool l4_csum_err;
43 	u8 l4_csum_checked;
44 	/* fragmented packet */
45 	bool frag;
46 	u32 hash;
47 	u16 descs;
48 	int max_bufs;
49 	u8 pkt_offset;
50 };
51 
52 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
53 		       struct ena_com_tx_ctx *ena_tx_ctx,
54 		       int *nb_hw_desc);
55 
56 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
57 		   struct ena_com_io_sq *io_sq,
58 		   struct ena_com_rx_ctx *ena_rx_ctx);
59 
60 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
61 			       struct ena_com_buf *ena_buf,
62 			       u16 req_id);
63 
64 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
65 
66 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
67 				       struct ena_eth_io_intr_reg *intr_reg)
68 {
69 	writel(intr_reg->intr_control, io_cq->unmask_reg);
70 }
71 
72 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
73 {
74 	u16 tail, next_to_comp, cnt;
75 
76 	next_to_comp = io_sq->next_to_comp;
77 	tail = io_sq->tail;
78 	cnt = tail - next_to_comp;
79 
80 	return io_sq->q_depth - 1 - cnt;
81 }
82 
83 /* Check if the submission queue has enough space to hold required_buffers */
84 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
85 						u16 required_buffers)
86 {
87 	int temp;
88 
89 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
90 		return ena_com_free_q_entries(io_sq) >= required_buffers;
91 
92 	/* This calculation doesn't need to be 100% accurate. So to reduce
93 	 * the calculation overhead just Subtract 2 lines from the free descs
94 	 * (one for the header line and one to compensate the devision
95 	 * down calculation.
96 	 */
97 	temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
98 
99 	return ena_com_free_q_entries(io_sq) > temp;
100 }
101 
102 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
103 					     struct ena_com_tx_ctx *ena_tx_ctx)
104 {
105 	if (!ena_tx_ctx->meta_valid)
106 		return false;
107 
108 	return !!memcmp(&io_sq->cached_tx_meta,
109 			&ena_tx_ctx->ena_meta,
110 			sizeof(struct ena_com_tx_meta));
111 }
112 
113 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
114 {
115 	return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
116 	       io_sq->llq_info.max_entries_in_tx_burst > 0;
117 }
118 
119 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
120 					      struct ena_com_tx_ctx *ena_tx_ctx)
121 {
122 	struct ena_com_llq_info *llq_info;
123 	int descs_after_first_entry;
124 	int num_entries_needed = 1;
125 	u16 num_descs;
126 
127 	if (!is_llq_max_tx_burst_exists(io_sq))
128 		return false;
129 
130 	llq_info = &io_sq->llq_info;
131 	num_descs = ena_tx_ctx->num_bufs;
132 
133 	if (llq_info->disable_meta_caching ||
134 	    unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
135 		++num_descs;
136 
137 	if (num_descs > llq_info->descs_num_before_header) {
138 		descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
139 		num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
140 						   llq_info->descs_per_entry);
141 	}
142 
143 	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
144 		   "Queue: %d num_descs: %d num_entries_needed: %d\n",
145 		   io_sq->qid, num_descs, num_entries_needed);
146 
147 	return num_entries_needed > io_sq->entries_in_tx_burst_left;
148 }
149 
150 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
151 {
152 	u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
153 	u16 tail = io_sq->tail;
154 
155 	netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
156 		   "Write submission queue doorbell for queue: %d tail: %d\n",
157 		   io_sq->qid, tail);
158 
159 	writel(tail, io_sq->db_addr);
160 
161 	if (is_llq_max_tx_burst_exists(io_sq)) {
162 		netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
163 			   "Reset available entries in tx burst for queue %d to %d\n",
164 			   io_sq->qid, max_entries_in_tx_burst);
165 		io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
166 	}
167 
168 	return 0;
169 }
170 
171 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
172 {
173 	u16 unreported_comp, head;
174 	bool need_update;
175 
176 	if (unlikely(io_cq->cq_head_db_reg)) {
177 		head = io_cq->head;
178 		unreported_comp = head - io_cq->last_head_update;
179 		need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
180 
181 		if (unlikely(need_update)) {
182 			netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
183 				   "Write completion queue doorbell for queue %d: head: %d\n",
184 				   io_cq->qid, head);
185 			writel(head, io_cq->cq_head_db_reg);
186 			io_cq->last_head_update = head;
187 		}
188 	}
189 
190 	return 0;
191 }
192 
193 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
194 					    u8 numa_node)
195 {
196 	struct ena_eth_io_numa_node_cfg_reg numa_cfg;
197 
198 	if (!io_cq->numa_node_cfg_reg)
199 		return;
200 
201 	numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
202 		| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
203 
204 	writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
205 }
206 
207 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
208 {
209 	io_sq->next_to_comp += elem;
210 }
211 
212 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
213 {
214 	io_cq->head++;
215 
216 	/* Switch phase bit in case of wrap around */
217 	if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
218 		io_cq->phase ^= 1;
219 }
220 
221 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
222 					     u16 *req_id)
223 {
224 	u8 expected_phase, cdesc_phase;
225 	struct ena_eth_io_tx_cdesc *cdesc;
226 	u16 masked_head;
227 
228 	masked_head = io_cq->head & (io_cq->q_depth - 1);
229 	expected_phase = io_cq->phase;
230 
231 	cdesc = (struct ena_eth_io_tx_cdesc *)
232 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
233 		(masked_head * io_cq->cdesc_entry_size_in_bytes));
234 
235 	/* When the current completion descriptor phase isn't the same as the
236 	 * expected, it mean that the device still didn't update
237 	 * this completion.
238 	 */
239 	cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
240 	if (cdesc_phase != expected_phase)
241 		return -EAGAIN;
242 
243 	dma_rmb();
244 
245 	*req_id = READ_ONCE(cdesc->req_id);
246 	if (unlikely(*req_id >= io_cq->q_depth)) {
247 		netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
248 			   "Invalid req id %d\n", cdesc->req_id);
249 		return -EINVAL;
250 	}
251 
252 	ena_com_cq_inc_head(io_cq);
253 
254 	return 0;
255 }
256 
257 #endif /* ENA_ETH_COM_H_ */
258