1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2024 Google LLC 5 * 6 * Redistribution and use in source and binary forms, with or without modification, 7 * are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, this 10 * list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * 3. Neither the name of the copyright holder nor the names of its contributors 17 * may be used to endorse or promote products derived from this software without 18 * specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* GVE DQO Descriptor formats */ 33 34 #ifndef _GVE_DESC_DQO_H_ 35 #define _GVE_DESC_DQO_H_ 36 37 #include "gve_plat.h" 38 39 #define GVE_ITR_ENABLE_BIT_DQO BIT(0) 40 #define GVE_ITR_NO_UPDATE_DQO (3 << 3) 41 #define GVE_ITR_INTERVAL_DQO_SHIFT 5 42 #define GVE_ITR_INTERVAL_DQO_MASK ((1 << 12) - 1) 43 #define GVE_TX_IRQ_RATELIMIT_US_DQO 50 44 #define GVE_RX_IRQ_RATELIMIT_US_DQO 20 45 46 #define GVE_TX_MAX_HDR_SIZE_DQO 255 47 #define GVE_TX_MIN_TSO_MSS_DQO 88 48 49 /* 50 * Ringing the doorbell too often can hurt performance. 51 * 52 * HW requires this value to be at least 8. 53 */ 54 #define GVE_RX_BUF_THRESH_DQO 32 55 56 /* 57 * Start dropping RX fragments if at least these many 58 * buffers cannot be posted to the NIC. 59 */ 60 #define GVE_RX_DQO_MIN_PENDING_BUFS 128 61 62 /* 63 * gve_rx_qpl_buf_id_dqo's 11 bit wide buf_id field limits the total 64 * number of pages per QPL to 2048. 65 */ 66 #define GVE_RX_NUM_QPL_PAGES_DQO 2048 67 68 /* 2K TX buffers for DQO-QPL */ 69 #define GVE_TX_BUF_SHIFT_DQO 11 70 #define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO) 71 #define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO) 72 73 #define GVE_TX_NUM_QPL_PAGES_DQO 512 74 75 /* Basic TX descriptor (DTYPE 0x0C) */ 76 struct gve_tx_pkt_desc_dqo { 77 __le64 buf_addr; 78 79 /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */ 80 uint8_t dtype:5; 81 82 /* Denotes the last descriptor of a packet. */ 83 uint8_t end_of_packet:1; 84 uint8_t checksum_offload_enable:1; 85 86 /* If set, will generate a descriptor completion for this descriptor. */ 87 uint8_t report_event:1; 88 uint8_t reserved0; 89 __le16 reserved1; 90 91 /* The TX completion for this packet will contain this tag. */ 92 __le16 compl_tag; 93 uint16_t buf_size:14; 94 uint16_t reserved2:2; 95 } __packed; 96 _Static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16, 97 "gve: bad dqo desc struct length"); 98 99 #define GVE_TX_PKT_DESC_DTYPE_DQO 0xc 100 101 /* 102 * Maximum number of data descriptors allowed per packet, or per-TSO segment. 103 */ 104 #define GVE_TX_MAX_DATA_DESCS_DQO 10 105 #define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1) 106 #define GVE_TSO_MAXSIZE_DQO IP_MAXPACKET 107 108 _Static_assert(GVE_TX_MAX_BUF_SIZE_DQO * GVE_TX_MAX_DATA_DESCS_DQO >= 109 GVE_TSO_MAXSIZE_DQO, 110 "gve: bad tso parameters"); 111 112 /* 113 * "report_event" on TX packet descriptors may only be reported on the last 114 * descriptor of a TX packet, and they must be spaced apart with at least this 115 * value. 116 */ 117 #define GVE_TX_MIN_RE_INTERVAL 32 118 119 struct gve_tx_context_cmd_dtype { 120 uint8_t dtype:5; 121 uint8_t tso:1; 122 uint8_t reserved1:2; 123 uint8_t reserved2; 124 }; 125 126 _Static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2, 127 "gve: bad dqo desc struct length"); 128 129 /* 130 * TX Native TSO Context DTYPE (0x05) 131 * 132 * "flex" fields allow the driver to send additional packet context to HW. 133 */ 134 struct gve_tx_tso_context_desc_dqo { 135 /* The L4 payload bytes that should be segmented. */ 136 uint32_t tso_total_len:24; 137 uint32_t flex10:8; 138 139 /* Max segment size in TSO excluding headers. */ 140 uint16_t mss:14; 141 uint16_t reserved:2; 142 143 uint8_t header_len; /* Header length to use for TSO offload */ 144 uint8_t flex11; 145 struct gve_tx_context_cmd_dtype cmd_dtype; 146 uint8_t flex0; 147 uint8_t flex5; 148 uint8_t flex6; 149 uint8_t flex7; 150 uint8_t flex8; 151 uint8_t flex9; 152 } __packed; 153 _Static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16, 154 "gve: bad dqo desc struct length"); 155 156 #define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5 157 158 /* General context descriptor for sending metadata. */ 159 struct gve_tx_general_context_desc_dqo { 160 uint8_t flex4; 161 uint8_t flex5; 162 uint8_t flex6; 163 uint8_t flex7; 164 uint8_t flex8; 165 uint8_t flex9; 166 uint8_t flex10; 167 uint8_t flex11; 168 struct gve_tx_context_cmd_dtype cmd_dtype; 169 uint16_t reserved; 170 uint8_t flex0; 171 uint8_t flex1; 172 uint8_t flex2; 173 uint8_t flex3; 174 } __packed; 175 _Static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16, 176 "gve: bad dqo desc struct length"); 177 178 #define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4 179 180 /* 181 * Logical structure of metadata which is packed into context descriptor flex 182 * fields. 183 */ 184 struct gve_tx_metadata_dqo { 185 union { 186 struct { 187 uint8_t version; 188 189 /* 190 * A zero value means no l4_hash was associated with the 191 * mbuf. 192 */ 193 uint16_t path_hash:15; 194 195 /* 196 * Should be set to 1 if the flow associated with the 197 * mbuf had a rehash from the TCP stack. 198 */ 199 uint16_t rehash_event:1; 200 } __packed; 201 uint8_t bytes[12]; 202 }; 203 } __packed; 204 _Static_assert(sizeof(struct gve_tx_metadata_dqo) == 12, 205 "gve: bad dqo desc struct length"); 206 207 #define GVE_TX_METADATA_VERSION_DQO 0 208 209 /* Used to access the generation bit within a TX completion descriptor. */ 210 #define GVE_TX_DESC_DQO_GEN_BYTE_OFFSET 1 211 #define GVE_TX_DESC_DQO_GEN_BIT_MASK 0x80 212 213 /* TX completion descriptor */ 214 struct gve_tx_compl_desc_dqo { 215 /* 216 * For types 0-4 this is the TX queue ID associated with this 217 * completion. 218 */ 219 uint16_t id:11; 220 221 /* See: GVE_COMPL_TYPE_DQO* */ 222 uint16_t type:3; 223 uint16_t reserved0:1; 224 225 /* Flipped by HW to notify the descriptor is populated. */ 226 uint16_t generation:1; 227 union { 228 /* 229 * For descriptor completions, this is the last index fetched 230 * by HW + 1. 231 */ 232 __le16 tx_head; 233 234 /* 235 * For packet completions, this is the completion tag set on the 236 * TX packet descriptors. 237 */ 238 __le16 completion_tag; 239 }; 240 __le32 reserved1; 241 } __packed; 242 _Static_assert(sizeof(struct gve_tx_compl_desc_dqo) == 8, 243 "gve: bad dqo desc struct length"); 244 245 union gve_tx_desc_dqo { 246 struct gve_tx_pkt_desc_dqo pkt; 247 struct gve_tx_tso_context_desc_dqo tso_ctx; 248 struct gve_tx_general_context_desc_dqo general_ctx; 249 }; 250 251 #define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */ 252 #define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */ 253 254 /* Descriptor to post buffers to HW on buffer queue. */ 255 struct gve_rx_desc_dqo { 256 __le16 buf_id; /* ID returned in Rx completion descriptor */ 257 __le16 reserved0; 258 __le32 reserved1; 259 __le64 buf_addr; /* DMA address of the buffer */ 260 __le64 header_buf_addr; 261 __le64 reserved2; 262 } __packed; 263 _Static_assert(sizeof(struct gve_rx_desc_dqo) == 32, 264 "gve: bad dqo desc struct length"); 265 266 /* Used to access the generation bit within an RX completion descriptor. */ 267 #define GVE_RX_DESC_DQO_GEN_BYTE_OFFSET 5 268 #define GVE_RX_DESC_DQO_GEN_BIT_MASK 0x40 269 270 /* Descriptor for HW to notify SW of new packets received on RX queue. */ 271 struct gve_rx_compl_desc_dqo { 272 /* Must be 1 */ 273 uint8_t rxdid:4; 274 uint8_t reserved0:4; 275 276 /* Packet originated from this system rather than the network. */ 277 uint8_t loopback:1; 278 /* 279 * Set when IPv6 packet contains a destination options header or routing 280 * header. 281 */ 282 uint8_t ipv6_ex_add:1; 283 /* Invalid packet was received. */ 284 uint8_t rx_error:1; 285 uint8_t reserved1:5; 286 287 uint16_t packet_type:10; 288 uint16_t ip_hdr_err:1; 289 uint16_t udp_len_err:1; 290 uint16_t raw_cs_invalid:1; 291 uint16_t reserved2:3; 292 293 uint16_t packet_len:14; 294 /* Flipped by HW to notify the descriptor is populated. */ 295 uint16_t generation:1; 296 /* Should be zero. */ 297 uint16_t buffer_queue_id:1; 298 299 uint16_t header_len:10; 300 uint16_t rsc:1; 301 uint16_t split_header:1; 302 uint16_t reserved3:4; 303 304 uint8_t descriptor_done:1; 305 uint8_t end_of_packet:1; 306 uint8_t header_buffer_overflow:1; 307 uint8_t l3_l4_processed:1; 308 uint8_t csum_ip_err:1; 309 uint8_t csum_l4_err:1; 310 uint8_t csum_external_ip_err:1; 311 uint8_t csum_external_udp_err:1; 312 313 uint8_t status_error1; 314 315 __le16 reserved5; 316 __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */ 317 318 union { 319 /* Packet checksum. */ 320 __le16 raw_cs; 321 /* Segment length for RSC packets. */ 322 __le16 rsc_seg_len; 323 }; 324 __le32 hash; 325 __le32 reserved6; 326 __le64 reserved7; 327 } __packed; 328 329 _Static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32, 330 "gve: bad dqo desc struct length"); 331 332 static inline uint8_t 333 gve_get_dq_num_frags_in_page(struct gve_priv *priv) 334 { 335 return (PAGE_SIZE / priv->rx_buf_size_dqo); 336 } 337 #endif /* _GVE_DESC_DQO_H_ */ 338