xref: /freebsd/sys/dev/gve/gve_dqo.h (revision 5036d9652a5701d00e9e40ea942c278e9f77d33d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2024 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /* GVE DQO Descriptor formats */
33 
34 #ifndef _GVE_DESC_DQO_H_
35 #define _GVE_DESC_DQO_H_
36 
37 #include "gve_plat.h"
38 
39 #define GVE_ITR_ENABLE_BIT_DQO BIT(0)
40 #define GVE_ITR_NO_UPDATE_DQO (3 << 3)
41 #define GVE_ITR_INTERVAL_DQO_SHIFT 5
42 #define GVE_ITR_INTERVAL_DQO_MASK ((1 << 12) - 1)
43 #define GVE_TX_IRQ_RATELIMIT_US_DQO 50
44 #define GVE_RX_IRQ_RATELIMIT_US_DQO 20
45 
46 #define GVE_TX_MAX_HDR_SIZE_DQO 255
47 #define GVE_TX_MIN_TSO_MSS_DQO 88
48 
49 /*
50  * Ringing the doorbell too often can hurt performance.
51  *
52  * HW requires this value to be at least 8.
53  */
54 #define GVE_RX_BUF_THRESH_DQO 32
55 
56 /*
57  * Start dropping RX fragments if at least these many
58  * buffers cannot be posted to the NIC.
59  */
60 #define GVE_RX_DQO_MIN_PENDING_BUFS 128
61 
62 #define GVE_DQ_NUM_FRAGS_IN_PAGE (PAGE_SIZE / GVE_DEFAULT_RX_BUFFER_SIZE)
63 
64 /*
65  * gve_rx_qpl_buf_id_dqo's 11 bit wide buf_id field limits the total
66  * number of pages per QPL to 2048.
67  */
68 #define GVE_RX_NUM_QPL_PAGES_DQO 2048
69 
70 /* 2K TX buffers for DQO-QPL */
71 #define GVE_TX_BUF_SHIFT_DQO 11
72 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
73 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
74 
75 #define GVE_TX_NUM_QPL_PAGES_DQO 512
76 
77 /* Basic TX descriptor (DTYPE 0x0C) */
78 struct gve_tx_pkt_desc_dqo {
79 	__le64 buf_addr;
80 
81 	/* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */
82 	uint8_t dtype:5;
83 
84 	/* Denotes the last descriptor of a packet. */
85 	uint8_t end_of_packet:1;
86 	uint8_t checksum_offload_enable:1;
87 
88 	/* If set, will generate a descriptor completion for this descriptor. */
89 	uint8_t report_event:1;
90 	uint8_t reserved0;
91 	__le16 reserved1;
92 
93 	/* The TX completion for this packet will contain this tag. */
94 	__le16 compl_tag;
95 	uint16_t buf_size:14;
96 	uint16_t reserved2:2;
97 } __packed;
98 _Static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16,
99     "gve: bad dqo desc struct length");
100 
101 #define GVE_TX_PKT_DESC_DTYPE_DQO 0xc
102 
103 /*
104  * Maximum number of data descriptors allowed per packet, or per-TSO segment.
105  */
106 #define GVE_TX_MAX_DATA_DESCS_DQO 10
107 #define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1)
108 #define GVE_TSO_MAXSIZE_DQO IP_MAXPACKET
109 
110 _Static_assert(GVE_TX_MAX_BUF_SIZE_DQO * GVE_TX_MAX_DATA_DESCS_DQO >=
111     GVE_TSO_MAXSIZE_DQO,
112     "gve: bad tso parameters");
113 
114 /*
115  * "report_event" on TX packet descriptors may only be reported on the last
116  * descriptor of a TX packet, and they must be spaced apart with at least this
117  * value.
118  */
119 #define GVE_TX_MIN_RE_INTERVAL 32
120 
121 struct gve_tx_context_cmd_dtype {
122 	uint8_t dtype:5;
123 	uint8_t tso:1;
124 	uint8_t reserved1:2;
125 	uint8_t reserved2;
126 };
127 
128 _Static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2,
129     "gve: bad dqo desc struct length");
130 
131 /*
132  * TX Native TSO Context DTYPE (0x05)
133  *
134  * "flex" fields allow the driver to send additional packet context to HW.
135  */
136 struct gve_tx_tso_context_desc_dqo {
137 	/* The L4 payload bytes that should be segmented. */
138 	uint32_t tso_total_len:24;
139 	uint32_t flex10:8;
140 
141 	/* Max segment size in TSO excluding headers. */
142 	uint16_t mss:14;
143 	uint16_t reserved:2;
144 
145 	uint8_t header_len; /* Header length to use for TSO offload */
146 	uint8_t flex11;
147 	struct gve_tx_context_cmd_dtype cmd_dtype;
148 	uint8_t flex0;
149 	uint8_t flex5;
150 	uint8_t flex6;
151 	uint8_t flex7;
152 	uint8_t flex8;
153 	uint8_t flex9;
154 } __packed;
155 _Static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16,
156     "gve: bad dqo desc struct length");
157 
158 #define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5
159 
160 /* General context descriptor for sending metadata. */
161 struct gve_tx_general_context_desc_dqo {
162 	uint8_t flex4;
163 	uint8_t flex5;
164 	uint8_t flex6;
165 	uint8_t flex7;
166 	uint8_t flex8;
167 	uint8_t flex9;
168 	uint8_t flex10;
169 	uint8_t flex11;
170 	struct gve_tx_context_cmd_dtype cmd_dtype;
171 	uint16_t reserved;
172 	uint8_t flex0;
173 	uint8_t flex1;
174 	uint8_t flex2;
175 	uint8_t flex3;
176 } __packed;
177 _Static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16,
178     "gve: bad dqo desc struct length");
179 
180 #define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4
181 
182 /*
183  * Logical structure of metadata which is packed into context descriptor flex
184  * fields.
185  */
186 struct gve_tx_metadata_dqo {
187 	union {
188 		struct {
189 			uint8_t version;
190 
191 			/*
192 			 * A zero value means no l4_hash was associated with the
193 			 * mbuf.
194 			 */
195 			uint16_t path_hash:15;
196 
197 			/*
198 			 * Should be set to 1 if the flow associated with the
199 			 * mbuf had a rehash from the TCP stack.
200 			 */
201 			uint16_t rehash_event:1;
202 		}  __packed;
203 		uint8_t bytes[12];
204 	};
205 }  __packed;
206 _Static_assert(sizeof(struct gve_tx_metadata_dqo) == 12,
207     "gve: bad dqo desc struct length");
208 
209 #define GVE_TX_METADATA_VERSION_DQO 0
210 
211 /* TX completion descriptor */
212 struct gve_tx_compl_desc_dqo {
213 	/* For types 0-4 this is the TX queue ID associated with this
214 	 * completion.
215 	 */
216 	uint16_t id:11;
217 
218 	/* See: GVE_COMPL_TYPE_DQO* */
219 	uint16_t type:3;
220 	uint16_t reserved0:1;
221 
222 	/* Flipped by HW to notify the descriptor is populated. */
223 	uint16_t generation:1;
224 	union {
225 		/* For descriptor completions, this is the last index fetched
226 		 * by HW + 1.
227 		 */
228 		__le16 tx_head;
229 
230 		/* For packet completions, this is the completion tag set on the
231 		 * TX packet descriptors.
232 		 */
233 		__le16 completion_tag;
234 	};
235 	__le32 reserved1;
236 } __packed;
237 _Static_assert(sizeof(struct gve_tx_compl_desc_dqo) == 8,
238     "gve: bad dqo desc struct length");
239 
240 union gve_tx_desc_dqo {
241 	struct gve_tx_pkt_desc_dqo pkt;
242 	struct gve_tx_tso_context_desc_dqo tso_ctx;
243 	struct gve_tx_general_context_desc_dqo general_ctx;
244 };
245 
246 #define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */
247 #define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */
248 
249 /* Descriptor to post buffers to HW on buffer queue. */
250 struct gve_rx_desc_dqo {
251 	__le16 buf_id; /* ID returned in Rx completion descriptor */
252 	__le16 reserved0;
253 	__le32 reserved1;
254 	__le64 buf_addr; /* DMA address of the buffer */
255 	__le64 header_buf_addr;
256 	__le64 reserved2;
257 } __packed;
258 _Static_assert(sizeof(struct gve_rx_desc_dqo) == 32,
259     "gve: bad dqo desc struct length");
260 
261 /* Descriptor for HW to notify SW of new packets received on RX queue. */
262 struct gve_rx_compl_desc_dqo {
263 	/* Must be 1 */
264 	uint8_t rxdid:4;
265 	uint8_t reserved0:4;
266 
267 	/* Packet originated from this system rather than the network. */
268 	uint8_t loopback:1;
269 	/* Set when IPv6 packet contains a destination options header or routing
270 	 * header.
271 	 */
272 	uint8_t ipv6_ex_add:1;
273 	/* Invalid packet was received. */
274 	uint8_t rx_error:1;
275 	uint8_t reserved1:5;
276 
277 	uint16_t packet_type:10;
278 	uint16_t ip_hdr_err:1;
279 	uint16_t udp_len_err:1;
280 	uint16_t raw_cs_invalid:1;
281 	uint16_t reserved2:3;
282 
283 	uint16_t packet_len:14;
284 	/* Flipped by HW to notify the descriptor is populated. */
285 	uint16_t generation:1;
286 	/* Should be zero. */
287 	uint16_t buffer_queue_id:1;
288 
289 	uint16_t header_len:10;
290 	uint16_t rsc:1;
291 	uint16_t split_header:1;
292 	uint16_t reserved3:4;
293 
294 	uint8_t descriptor_done:1;
295 	uint8_t end_of_packet:1;
296 	uint8_t header_buffer_overflow:1;
297 	uint8_t l3_l4_processed:1;
298 	uint8_t csum_ip_err:1;
299 	uint8_t csum_l4_err:1;
300 	uint8_t csum_external_ip_err:1;
301 	uint8_t csum_external_udp_err:1;
302 
303 	uint8_t status_error1;
304 
305 	__le16 reserved5;
306 	__le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
307 
308 	union {
309 		/* Packet checksum. */
310 		__le16 raw_cs;
311 		/* Segment length for RSC packets. */
312 		__le16 rsc_seg_len;
313 	};
314 	__le32 hash;
315 	__le32 reserved6;
316 	__le64 reserved7;
317 } __packed;
318 
319 _Static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32,
320     "gve: bad dqo desc struct length");
321 #endif /* _GVE_DESC_DQO_H_ */
322