1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2024 Oxide Computer Company 14 */ 15 16 /* 17 * This file declares all constants and structures dealing with the 18 * physical ENA device. It is based on the ena_com code of the public 19 * Linux and FreeBSD drivers. While this file is based on the common 20 * code it doesn't share the same type names. Where it is useful, a 21 * "common" reference is added to include the name of the type as 22 * defined in the common code. 23 * 24 * The Linux driver defines enq_admin_aq_entry as the top-level type 25 * for admin command descriptors. From this type you can access the 26 * common bits shared by every descriptor (ena_admin_aq_common_desc) 27 * as well as the control buffer (ena_admin_ctrl_buff_info) which is 28 * present for _some_ commands. Other than that, this top-level type 29 * treats the rest of the data as an opaque array of unsigned 32-bit 30 * integers. Then, for each individual command, the Linux driver 31 * defines a dedicated type, each of which contains the following: 32 * 33 * 1. The common descriptor: ena_admin_aq_common_desc. 34 * 35 * 2. The optional control buffer desc: ena_admin_ctrl_buff_info. 36 * 37 * 3. The command-specific data. 38 * 39 * 4. Optional padding to make sure all commands are 64 bytes in size. 40 * 41 * Furthermore, there may be further common types for commands which 42 * are made up of several sub-commands, e.g. the get/set feature 43 * commands. 44 * 45 * Finally, when a command is passed to the common function for 46 * executing commands (ena_com_execute_admin_command()), it is cast as 47 * a pointer to the top-level type: ena_admin_aq_entry. 48 * 49 * This works for the Linux driver just fine, but it causes lots of 50 * repetition in the structure definitions and also means there is no 51 * easy way to determine all valid commands. This ENA driver has 52 * turned the Linux approach inside out -- the top-level type is a 53 * union of all possible commands: enahw_cmd_desc_t. Each command may 54 * then further sub-type via unions to represent its sub-commands. 55 * This same treatment was given to the response descriptor: 56 * enahw_resp_desc_t. 57 * 58 * What is the point of knowing all this? Well, when referencing the 59 * common type in the comment above the enahw_ type, you need to keep 60 * in mind that the Linux/common type will include all the common 61 * descriptor bits, whereas these types do not. 62 * 63 * The common code DOES NOT pack any of these structures, and thus 64 * neither do we. That means these structures all rely on natural 65 * compiler alignment, just as the common code does. In ena.c you will 66 * find CTASSERTs for many of these structures, to verify they are of 67 * the expected size. 68 */ 69 70 #ifndef _ENA_HW_H 71 #define _ENA_HW_H 72 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/types.h> 76 #include <sys/debug.h> 77 #include <sys/ethernet.h> 78 79 /* 80 * The common code sets the upper limit of I/O queues to 128. In this 81 * case a "queue" is a SQ+CQ pair that forms a logical queue or ring 82 * for sending or receiving packets. Thus, at maximum, we may expect 83 * 128 Tx rings, and 128 Rx rings; though, practically speaking, the 84 * number of rings will often be limited by number of CPUs or 85 * available interrupts. 86 * 87 * common: ENA_MAX_NUM_IO_QUEUES 88 */ 89 #define ENAHW_MAX_NUM_IO_QUEUES 128 90 91 /* 92 * Generate a 32-bit bitmask where the bits between high (inclusive) 93 * and low (inclusive) are set to 1. 94 */ 95 #define GENMASK(h, l) (((~0U) - (1U << (l)) + 1) & (~0U >> (32 - 1 - (h)))) 96 97 /* 98 * Generate a 64-bit bitmask where bit b is set to 1. 99 */ 100 #define BIT(b) (1UL << (b)) 101 102 #define ENAHW_DMA_ADMINQ_ALIGNMENT 8 103 104 #define ENAHW_ADMIN_CQ_DESC_BUF_ALIGNMENT 8 105 #define ENAHW_ADMIN_SQ_DESC_BUF_ALIGNMENT 8 106 #define ENAHW_AENQ_DESC_BUF_ALIGNMENT 8 107 #define ENAHW_HOST_INFO_ALIGNMENT 8 108 #define ENAHW_HOST_INFO_ALLOC_SZ 4096 109 #define ENAHW_IO_CQ_DESC_BUF_ALIGNMENT 4096 110 #define ENAHW_IO_SQ_DESC_BUF_ALIGNMENT 8 111 112 /* 113 * BAR0 register offsets. 114 * 115 * Any register not defined in the common code was marked as a gap, 116 * using the hex address of the register as suffix to make it clear 117 * where the gaps are. 118 */ 119 #define ENAHW_REG_VERSION 0x0 120 #define ENAHW_REG_CONTROLLER_VERSION 0x4 121 #define ENAHW_REG_CAPS 0x8 122 #define ENAHW_REG_CAPS_EXT 0xc 123 #define ENAHW_REG_ASQ_BASE_LO 0x10 124 #define ENAHW_REG_ASQ_BASE_HI 0x14 125 #define ENAHW_REG_ASQ_CAPS 0x18 126 #define ENAHW_REG_GAP_1C 0x1c 127 #define ENAHW_REG_ACQ_BASE_LO 0x20 128 #define ENAHW_REG_ACQ_BASE_HI 0x24 129 #define ENAHW_REG_ACQ_CAPS 0x28 130 #define ENAHW_REG_ASQ_DB 0x2c 131 #define ENAHW_REG_ACQ_TAIL 0x30 132 #define ENAHW_REG_AENQ_CAPS 0x34 133 #define ENAHW_REG_AENQ_BASE_LO 0x38 134 #define ENAHW_REG_AENQ_BASE_HI 0x3c 135 #define ENAHW_REG_AENQ_HEAD_DB 0x40 136 #define ENAHW_REG_AENQ_TAIL 0x44 137 #define ENAHW_REG_GAP_48 0x48 138 #define ENAHW_REG_INTERRUPT_MASK 0x4c 139 #define ENAHW_REG_GAP_50 0x50 140 #define ENAHW_REG_DEV_CTL 0x54 141 #define ENAHW_REG_DEV_STS 0x58 142 #define ENAHW_REG_MMIO_REG_READ 0x5c 143 #define ENAHW_REG_MMIO_RESP_LO 0x60 144 #define ENAHW_REG_MMIO_RESP_HI 0x64 145 #define ENAHW_REG_RSS_IND_ENTRY_UPDATE 0x68 146 #define ENAHW_NUM_REGS ((ENAHW_REG_RSS_IND_ENTRY_UPDATE / 4) + 1) 147 148 /* 149 * Device Version (Register 0x0) 150 */ 151 #define ENAHW_DEV_MINOR_VSN_MASK 0xff 152 #define ENAHW_DEV_MAJOR_VSN_SHIFT 8 153 #define ENAHW_DEV_MAJOR_VSN_MASK 0xff00 154 155 #define ENAHW_DEV_MAJOR_VSN(vsn) \ 156 (((vsn) & ENAHW_DEV_MAJOR_VSN_MASK) >> ENAHW_DEV_MAJOR_VSN_SHIFT) 157 #define ENAHW_DEV_MINOR_VSN(vsn) \ 158 ((vsn) & ENAHW_DEV_MINOR_VSN_MASK) 159 160 /* 161 * Controller Version (Register 0x4) 162 */ 163 #define ENAHW_CTRL_SUBMINOR_VSN_MASK 0xff 164 #define ENAHW_CTRL_MINOR_VSN_SHIFT 8 165 #define ENAHW_CTRL_MINOR_VSN_MASK 0xff00 166 #define ENAHW_CTRL_MAJOR_VSN_SHIFT 16 167 #define ENAHW_CTRL_MAJOR_VSN_MASK 0xff0000 168 #define ENAHW_CTRL_IMPL_ID_SHIFT 24 169 #define ENAHW_CTRL_IMPL_ID_MASK 0xff000000 170 171 #define ENAHW_CTRL_MAJOR_VSN(vsn) \ 172 (((vsn) & ENAHW_CTRL_MAJOR_VSN_MASK) >> ENAHW_CTRL_MAJOR_VSN_SHIFT) 173 #define ENAHW_CTRL_MINOR_VSN(vsn) \ 174 (((vsn) & ENAHW_CTRL_MINOR_VSN_MASK) >> ENAHW_CTRL_MINOR_VSN_SHIFT) 175 #define ENAHW_CTRL_SUBMINOR_VSN(vsn) \ 176 ((vsn) & ENAHW_CTRL_SUBMINOR_VSN_MASK) 177 #define ENAHW_CTRL_IMPL_ID(vsn) \ 178 (((vsn) & ENAHW_CTRL_IMPL_ID_MASK) >> ENAHW_CTRL_IMPL_ID_SHIFT) 179 180 /* 181 * Device Caps (Register 0x8) 182 */ 183 #define ENAHW_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 184 #define ENAHW_CAPS_RESET_TIMEOUT_SHIFT 1 185 #define ENAHW_CAPS_RESET_TIMEOUT_MASK 0x3e 186 #define ENAHW_CAPS_RESET_TIMEOUT(v) \ 187 (((v) & ENAHW_CAPS_RESET_TIMEOUT_MASK) >> \ 188 ENAHW_CAPS_RESET_TIMEOUT_SHIFT) 189 #define ENAHW_CAPS_DMA_ADDR_WIDTH_SHIFT 8 190 #define ENAHW_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 191 #define ENAHW_CAPS_DMA_ADDR_WIDTH(v) \ 192 (((v) & ENAHW_CAPS_DMA_ADDR_WIDTH_MASK) >> \ 193 ENAHW_CAPS_DMA_ADDR_WIDTH_SHIFT) 194 #define ENAHW_CAPS_ADMIN_CMD_TIMEOUT_SHIFT 16 195 #define ENAHW_CAPS_ADMIN_CMD_TIMEOUT_MASK 0xf0000 196 #define ENAHW_CAPS_ADMIN_CMD_TIMEOUT(v) \ 197 (((v) & ENAHW_CAPS_ADMIN_CMD_TIMEOUT_MASK) >> \ 198 ENAHW_CAPS_ADMIN_CMD_TIMEOUT_SHIFT) 199 200 typedef enum enahw_reset_reason_types { 201 ENAHW_RESET_NORMAL = 0, 202 ENAHW_RESET_KEEP_ALIVE_TO = 1, 203 ENAHW_RESET_ADMIN_TO = 2, 204 ENAHW_RESET_MISS_TX_CMPL = 3, 205 ENAHW_RESET_INV_RX_REQ_ID = 4, 206 ENAHW_RESET_INV_TX_REQ_ID = 5, 207 ENAHW_RESET_TOO_MANY_RX_DESCS = 6, 208 ENAHW_RESET_INIT_ERR = 7, 209 ENAHW_RESET_DRIVER_INVALID_STATE = 8, 210 ENAHW_RESET_OS_TRIGGER = 9, 211 ENAHW_RESET_OS_NETDEV_WD = 10, 212 ENAHW_RESET_SHUTDOWN = 11, 213 ENAHW_RESET_USER_TRIGGER = 12, 214 ENAHW_RESET_GENERIC = 13, 215 ENAHW_RESET_MISS_INTERRUPT = 14, 216 ENAHW_RESET_SUSPECTED_POLL_STARVATION = 15, 217 ENAHW_RESET_RX_DESCRIPTOR_MALFORMED = 16, 218 ENAHW_RESET_TX_DESCRIPTOR_MALFORMED = 17, 219 ENAHW_RESET_MISSING_ADMIN_INTERRUPT = 18, 220 ENAHW_RESET_DEVICE_REQUEST = 19, 221 ENAHW_RESET_LAST, 222 } enahw_reset_reason_t; 223 224 #define ENAHW_RESET_REASON_LSB_SHIFT 0 225 #define ENAHW_RESET_REASON_LSB_MASK 0xf 226 #define ENAHW_RESET_REASON_MSB_SHIFT 4 227 #define ENAHW_RESET_REASON_MSB_MASK 0xf0 228 #define ENAHW_RESET_REASON_LSB(v) \ 229 (((v) & ENAHW_RESET_REASON_LSB_MASK) >> ENAHW_RESET_REASON_LSB_SHIFT) 230 #define ENAHW_RESET_REASON_MSB(v) \ 231 (((v) & ENAHW_RESET_REASON_MSB_MASK) >> ENAHW_RESET_REASON_MSB_SHIFT) 232 233 /* 234 * Admin Submission Queue Caps (Register 0x18) 235 */ 236 #define ENAHW_ASQ_CAPS_DEPTH_MASK 0xffff 237 #define ENAHW_ASQ_CAPS_ENTRY_SIZE_SHIFT 16 238 #define ENAHW_ASQ_CAPS_ENTRY_SIZE_MASK 0xffff0000 239 240 #define ENAHW_ASQ_CAPS_DEPTH(x) ((x) & ENAHW_ASQ_CAPS_DEPTH_MASK) 241 242 #define ENAHW_ASQ_CAPS_ENTRY_SIZE(x) \ 243 (((x) << ENAHW_ASQ_CAPS_ENTRY_SIZE_SHIFT) & \ 244 ENAHW_ASQ_CAPS_ENTRY_SIZE_MASK) 245 246 /* 247 * Admin Completion Queue Caps (Register 0x28) 248 */ 249 #define ENAHW_ACQ_CAPS_DEPTH_MASK 0xffff 250 #define ENAHW_ACQ_CAPS_ENTRY_SIZE_SHIFT 16 251 #define ENAHW_ACQ_CAPS_ENTRY_SIZE_MASK 0xffff0000 252 253 #define ENAHW_ACQ_CAPS_DEPTH(x) ((x) & ENAHW_ACQ_CAPS_DEPTH_MASK) 254 255 #define ENAHW_ACQ_CAPS_ENTRY_SIZE(x) \ 256 (((x) << ENAHW_ACQ_CAPS_ENTRY_SIZE_SHIFT) & \ 257 ENAHW_ACQ_CAPS_ENTRY_SIZE_MASK) 258 259 /* 260 * Asynchronous Event Notification Queue Caps (Register 0x34) 261 */ 262 #define ENAHW_AENQ_CAPS_DEPTH_MASK 0xffff 263 #define ENAHW_AENQ_CAPS_ENTRY_SIZE_SHIFT 16 264 #define ENAHW_AENQ_CAPS_ENTRY_SIZE_MASK 0xffff0000 265 266 #define ENAHW_AENQ_CAPS_DEPTH(x) ((x) & ENAHW_AENQ_CAPS_DEPTH_MASK) 267 268 #define ENAHW_AENQ_CAPS_ENTRY_SIZE(x) \ 269 (((x) << ENAHW_AENQ_CAPS_ENTRY_SIZE_SHIFT) & \ 270 ENAHW_AENQ_CAPS_ENTRY_SIZE_MASK) 271 272 /* 273 * Interrupt Mask (Register 0x4c) 274 */ 275 #define ENAHW_INTR_UNMASK 0x0 276 #define ENAHW_INTR_MASK 0x1 277 278 /* 279 * Device Control (Register 0x54) 280 */ 281 #define ENAHW_DEV_CTL_DEV_RESET_MASK 0x1 282 #define ENAHW_DEV_CTL_AQ_RESTART_SHIFT 1 283 #define ENAHW_DEV_CTL_AQ_RESTART_MASK 0x2 284 #define ENAHW_DEV_CTL_QUIESCENT_SHIFT 2 285 #define ENAHW_DEV_CTL_QUIESCENT_MASK 0x4 286 #define ENAHW_DEV_CTL_IO_RESUME_SHIFT 3 287 #define ENAHW_DEV_CTL_IO_RESUME_MASK 0x8 288 #define ENAHW_DEV_CTL_RESET_REASON_EXT_SHIFT 24 289 #define ENAHW_DEV_CTL_RESET_REASON_EXT_MASK 0xf000000 290 #define ENAHW_DEV_CTL_RESET_REASON_SHIFT 28 291 #define ENAHW_DEV_CTL_RESET_REASON_MASK 0xf0000000 292 293 /* 294 * Device Status (Register 0x58) 295 */ 296 #define ENAHW_DEV_STS_READY_MASK 0x1 297 #define ENAHW_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 298 #define ENAHW_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 299 #define ENAHW_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 300 #define ENAHW_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 301 #define ENAHW_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 302 #define ENAHW_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 303 #define ENAHW_DEV_STS_RESET_FINISHED_SHIFT 4 304 #define ENAHW_DEV_STS_RESET_FINISHED_MASK 0x10 305 #define ENAHW_DEV_STS_FATAL_ERROR_SHIFT 5 306 #define ENAHW_DEV_STS_FATAL_ERROR_MASK 0x20 307 #define ENAHW_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 308 #define ENAHW_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 309 #define ENAHW_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 310 #define ENAHW_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 311 312 /* common: ena_admin_aenq_common_desc */ 313 typedef struct enahw_aenq_desc { 314 uint16_t ead_group; 315 uint16_t ead_syndrome; 316 uint8_t ead_flags; 317 uint8_t ead_rsvd1[3]; 318 uint32_t ead_ts_low; 319 uint32_t ead_ts_high; 320 321 union { 322 uint32_t raw[12]; 323 324 struct { 325 uint32_t flags; 326 } link_change; 327 328 struct { 329 uint32_t rx_drops_low; 330 uint32_t rx_drops_high; 331 uint32_t tx_drops_low; 332 uint32_t tx_drops_high; 333 uint32_t rx_overruns_low; 334 uint32_t rx_overruns_high; 335 } keep_alive; 336 } ead_payload; 337 } enahw_aenq_desc_t; 338 339 #define ENAHW_AENQ_DESC_PHASE_MASK BIT(0) 340 341 #define ENAHW_AENQ_DESC_PHASE(desc) \ 342 ((desc)->ead_flags & ENAHW_AENQ_DESC_PHASE_MASK) 343 344 #define ENAHW_AENQ_LINK_CHANGE_LINK_STATUS_MASK BIT(0) 345 346 /* 347 * Asynchronous Event Notification Queue groups. 348 * 349 * Note: These values represent the bit position of each feature as 350 * returned by ENAHW_FEAT_AENQ_CONFIG. We encode them this way so that 351 * they can double as an index into the AENQ handlers array. 352 * 353 * common: ena_admin_aenq_group 354 */ 355 typedef enum enahw_aenq_groups { 356 ENAHW_AENQ_GROUP_LINK_CHANGE = 0, 357 ENAHW_AENQ_GROUP_FATAL_ERROR = 1, 358 ENAHW_AENQ_GROUP_WARNING = 2, 359 ENAHW_AENQ_GROUP_NOTIFICATION = 3, 360 ENAHW_AENQ_GROUP_KEEP_ALIVE = 4, 361 ENAHW_AENQ_GROUP_REFRESH_CAPABILITIES = 5, 362 ENAHW_AENQ_GROUP_CONF_NOTIFICATIONS = 6, 363 ENAHW_AENQ_GROUP_DEVICE_REQUEST_RESET = 7, 364 ENAHW_AENQ_GROUPS_ARR_NUM = 8, 365 } enahw_aenq_groups_t; 366 367 /* 368 * The reason for ENAHW_AENQ_GROUP_NOFIFICATION. 369 * 370 * common: ena_admin_aenq_notification_syndrome 371 */ 372 typedef enum enahw_aenq_syndrome { 373 ENAHW_AENQ_SYNDROME_UPDATE_HINTS = 2, 374 } enahw_aenq_syndrome_t; 375 376 /* 377 * ENA devices use a 48-bit memory space. 378 * 379 * common: ena_common_mem_addr 380 */ 381 typedef struct enahw_addr { 382 uint32_t ea_low; 383 uint16_t ea_high; 384 uint16_t ea_rsvd; /* must be zero */ 385 } enahw_addr_t; 386 387 /* common: ena_admin_ctrl_buff_info */ 388 struct enahw_ctrl_buff { 389 uint32_t ecb_length; 390 enahw_addr_t ecb_addr; 391 }; 392 393 /* common: ena_admin_get_set_feature_common_desc */ 394 struct enahw_feat_common { 395 /* 396 * 1:0 Select which value you want. 397 * 398 * 0x1 = Current value. 399 * 0x3 = Default value. 400 * 401 * Note: Linux seems to set this to 0 to get the value, 402 * not sure if that's a bug or just another way to get the 403 * current value. 404 * 405 * 7:3 Reserved. 406 */ 407 uint8_t efc_flags; 408 409 /* An id from enahw_feature_id_t. */ 410 uint8_t efc_id; 411 412 /* 413 * Each feature is versioned, allowing upgrades to the feature 414 * set without breaking backwards compatibility. The driver 415 * uses this field to specify which version it supports 416 * (starting from zero). Linux doesn't document this very well 417 * and sets this value to 0 for most features. We define a set 418 * of macros, underneath the enahw_feature_id_t type, clearly 419 * documenting the version we support for each feature. 420 */ 421 uint8_t efc_version; 422 uint8_t efc_rsvd; 423 }; 424 425 /* common: ena_admin_get_feat_cmd */ 426 typedef struct enahw_cmd_get_feat { 427 struct enahw_ctrl_buff ecgf_ctrl_buf; 428 struct enahw_feat_common ecgf_comm; 429 uint32_t egcf_unused[11]; 430 } enahw_cmd_get_feat_t; 431 432 /* 433 * N.B. Linux sets efc_flags to 0 (via memset) when reading the 434 * current value, but the comments say it should be 0x1. We follow the 435 * comments. 436 */ 437 #define ENAHW_GET_FEAT_FLAGS_GET_CURR_VAL(desc) \ 438 ((desc)->ecgf_comm.efc_flags) |= 0x1 439 #define ENAHW_GET_FEAT_FLAGS_GET_DEF_VAL(desc) \ 440 ((desc)->ecgf_comm.efc_flags) |= 0x3 441 442 /* 443 * Set the MTU of the device. This value does not include the L2 444 * headers or trailers, only the payload. 445 * 446 * common: ena_admin_set_feature_mtu_desc 447 */ 448 typedef struct enahw_feat_mtu { 449 uint32_t efm_mtu; 450 } enahw_feat_mtu_t; 451 452 /* common: ena_admin_set_feature_host_attr_desc */ 453 typedef struct enahw_feat_host_attr { 454 enahw_addr_t efha_os_addr; 455 enahw_addr_t efha_debug_addr; 456 uint32_t efha_debug_sz; 457 } enahw_feat_host_attr_t; 458 459 /* 460 * ENAHW_FEAT_AENQ_CONFIG 461 * 462 * common: ena_admin_feature_aenq_desc 463 */ 464 typedef struct enahw_feat_aenq { 465 /* Bitmask of AENQ groups this device supports. */ 466 uint32_t efa_supported_groups; 467 468 /* Bitmask of AENQ groups currently enabled. */ 469 uint32_t efa_enabled_groups; 470 } enahw_feat_aenq_t; 471 472 /* common: ena_admin_set_feat_cmd */ 473 typedef struct enahw_cmd_set_feat { 474 struct enahw_ctrl_buff ecsf_ctrl_buf; 475 struct enahw_feat_common ecsf_comm; 476 477 union { 478 uint32_t ecsf_raw[11]; 479 enahw_feat_host_attr_t ecsf_host_attr; 480 enahw_feat_mtu_t ecsf_mtu; 481 enahw_feat_aenq_t ecsf_aenq; 482 } ecsf_feat; 483 } enahw_cmd_set_feat_t; 484 485 /* 486 * Used to populate the host information buffer which the Nitro 487 * hypervisor supposedly uses for display, debugging, and possibly 488 * other purposes. 489 * 490 * common: ena_admin_host_info 491 */ 492 typedef struct enahw_host_info { 493 uint32_t ehi_os_type; 494 uint8_t ehi_os_dist_str[128]; 495 uint32_t ehi_os_dist; 496 uint8_t ehi_kernel_ver_str[32]; 497 uint32_t ehi_kernel_ver; 498 uint32_t ehi_driver_ver; 499 uint32_t ehi_supported_net_features[2]; 500 uint16_t ehi_ena_spec_version; 501 uint16_t ehi_bdf; 502 uint16_t ehi_num_cpus; 503 uint16_t ehi_rsvd; 504 uint32_t ehi_driver_supported_features; 505 } enahw_host_info_t; 506 507 #define ENAHW_HOST_INFO_MAJOR_MASK GENMASK(7, 0) 508 #define ENAHW_HOST_INFO_MINOR_SHIFT 8 509 #define ENAHW_HOST_INFO_MINOR_MASK GENMASK(15, 8) 510 #define ENAHW_HOST_INFO_SUB_MINOR_SHIFT 16 511 #define ENAHW_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) 512 #define ENAHW_HOST_INFO_SPEC_MAJOR_SHIFT 8 513 #define ENAHW_HOST_INFO_MODULE_TYPE_SHIFT 24 514 #define ENAHW_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24) 515 #define ENAHW_HOST_INFO_FUNCTION_MASK GENMASK(2, 0) 516 #define ENAHW_HOST_INFO_DEVICE_SHIFT 3 517 #define ENAHW_HOST_INFO_DEVICE_MASK GENMASK(7, 3) 518 #define ENAHW_HOST_INFO_BUS_SHIFT 8 519 #define ENAHW_HOST_INFO_BUS_MASK GENMASK(15, 8) 520 #define ENAHW_HOST_INFO_RX_OFFSET_SHIFT 1 521 #define ENAHW_HOST_INFO_RX_OFFSET_MASK BIT(1) 522 #define ENAHW_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 523 #define ENAHW_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) 524 #define ENAHW_HOST_INFO_RX_BUF_MIRRORING_SHIFT 3 525 #define ENAHW_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3) 526 #define ENAHW_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4 527 #define ENAHW_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4) 528 #define ENAHW_HOST_INFO_RX_PAGE_REUSE_SHIFT 6 529 #define ENAHW_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6) 530 #define ENAHW_HOST_INFO_TX_IPV6_CSUM_OFFLOAD_SHIFT 7 531 #define ENAHW_HOST_INFO_TX_IPV6_CSUM_OFFLOAD_MASK BIT(7) 532 #define ENAHW_HOST_INFO_INFO_PHC_SHIFT 8 533 #define ENAHW_HOST_INFO_INFO_PHC_MASK BIT(8) 534 535 /* common: ena_admin_os_type */ 536 enum enahw_os_type { 537 ENAHW_OS_LINUX = 1, 538 ENAHW_OS_WIN = 2, 539 ENAHW_OS_DPDK = 3, 540 ENAHW_OS_FREEBSD = 4, 541 ENAHW_OS_IPXE = 5, 542 ENAHW_OS_ESXI = 6, 543 ENAHW_OS_MACOS = 7, 544 ENAHW_OS_GROUPS_NUM = 7, 545 }; 546 547 /* 548 * Create I/O Completion Queue 549 * 550 * A completion queue is where the device writes responses to I/O 551 * requests. The admin completion queue must be created before such a 552 * command can be issued, see ena_admin_cq_init(). 553 * 554 * common: ena_admin_aq_create_cq_cmd 555 */ 556 typedef struct enahw_cmd_create_cq { 557 /* 558 * 7-6 reserved 559 * 560 * 5 interrupt mode: when set the device sends an interrupt 561 * for each completion, otherwise the driver must poll 562 * the queue. 563 * 564 * 4-0 reserved 565 */ 566 uint8_t ecq_caps_1; 567 568 /* 569 * 7-5 reserved 570 * 571 * 4-0 CQ entry size (in words): the size of a single CQ entry 572 * in multiples of 32-bit words. 573 * 574 * NOTE: According to the common code the "valid" values 575 * are 4 or 8 -- this is incorrect. The valid values are 576 * 2 and 4. The common code does have an "extended" Rx 577 * completion descriptor, ena_eth_io_rx_cdesc_ext, that 578 * is 32 bytes and thus would use a value of 8, but it is 579 * not used by the Linux or FreeBSD drivers, so we do not 580 * bother with it. 581 * 582 * Type Bytes Value 583 * enahw_tx_cdesc_t 8 2 584 * enahw_rx_cdesc_t 16 4 585 */ 586 uint8_t ecq_caps_2; 587 588 /* The number of CQ entries, must be a power of 2. */ 589 uint16_t ecq_num_descs; 590 591 /* The MSI-X vector assigned to this CQ. */ 592 uint32_t ecq_msix_vector; 593 594 /* 595 * The CQ's physical base address. The CQ memory must be 596 * physically contiguous. 597 */ 598 enahw_addr_t ecq_addr; 599 } enahw_cmd_create_cq_t; 600 601 #define ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLED_SHIFT 5 602 #define ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLED_MASK (BIT(5)) 603 #define ENAHW_CMD_CREATE_CQ_DESC_SIZE_WORDS_MASK (GENMASK(4, 0)) 604 605 #define ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLE(cmd) \ 606 ((cmd)->ecq_caps_1 |= ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLED_MASK) 607 608 #define ENAHW_CMD_CREATE_CQ_DESC_SIZE_WORDS(cmd, val) \ 609 (((cmd)->ecq_caps_2) |= \ 610 ((val) & ENAHW_CMD_CREATE_CQ_DESC_SIZE_WORDS_MASK)) 611 612 /* 613 * Destroy Completion Queue 614 * 615 * common: ena_admin_aq_destroy_cq_cmd 616 */ 617 typedef struct enahw_cmd_destroy_cq { 618 uint16_t edcq_idx; 619 uint16_t edcq_rsvd; 620 } enahw_cmd_destroy_cq_t; 621 622 /* 623 * common: ena_admin_aq_create_sq_cmd 624 */ 625 typedef struct enahw_cmd_create_sq { 626 /* 627 * 7-5 direction: 0x1 = Tx, 0x2 = Rx 628 * 4-0 reserved 629 */ 630 uint8_t ecsq_dir; 631 uint8_t ecsq_rsvd1; 632 633 /* 634 * 7 reserved 635 * 636 * 6-4 completion policy: How are completion events generated. 637 * 638 * See enahw_completion_policy_type_t for a description of 639 * the various values. 640 * 641 * 3-0 placement policy: Where the descriptor ring and 642 * headers reside. 643 * 644 * See enahw_placement_policy_t for a description of the 645 * various values. 646 */ 647 uint8_t ecsq_caps_2; 648 649 /* 650 * 7-1 reserved 651 * 652 * 0 physically contiguous: When set indicates the descriptor 653 * ring memory is physically contiguous. 654 */ 655 uint8_t ecsq_caps_3; 656 657 /* 658 * The index of the associated Completion Queue (CQ). The CQ 659 * must be created before the SQ. 660 */ 661 uint16_t ecsq_cq_idx; 662 663 /* The number of descriptors in this SQ. */ 664 uint16_t ecsq_num_descs; 665 666 /* 667 * The base physical address of the SQ. This should not be set 668 * for LLQ. Must be page aligned. 669 */ 670 enahw_addr_t ecsq_base; 671 672 /* 673 * The physical address of the head write-back pointer. Valid 674 * only when the completion policy is set to one of the head 675 * write-back modes (0x2 or 0x3). Must be cacheline size 676 * aligned. 677 */ 678 enahw_addr_t ecsq_head_wb; 679 uint32_t ecsq_rsvdw2; 680 uint32_t ecsq_rsvdw3; 681 } enahw_cmd_create_sq_t; 682 683 typedef enum enahw_sq_direction { 684 ENAHW_SQ_DIRECTION_TX = 1, 685 ENAHW_SQ_DIRECTION_RX = 2, 686 } enahw_sq_direction_t; 687 688 typedef enum enahw_placement_policy { 689 /* Descriptors and headers are in host memory. */ 690 ENAHW_PLACEMENT_POLICY_HOST = 1, 691 692 /* 693 * Descriptors and headers are in device memory (a.k.a Low 694 * Latency Queue). 695 */ 696 ENAHW_PLACEMENT_POLICY_DEV = 3, 697 } enahw_placement_policy_t; 698 699 /* 700 * DESC: Write a CQ entry for each SQ descriptor. 701 * 702 * DESC_ON_DEMAND: Write a CQ entry when requested by the SQ descriptor. 703 * 704 * HEAD_ON_DEMAND: Update head pointer when requested by the SQ 705 * descriptor. 706 * 707 * HEAD: Update head pointer for each SQ descriptor. 708 * 709 */ 710 typedef enum enahw_completion_policy_type { 711 ENAHW_COMPLETION_POLICY_DESC = 0, 712 ENAHW_COMPLETION_POLICY_DESC_ON_DEMAND = 1, 713 ENAHW_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, 714 ENAHW_COMPLETION_POLICY_HEAD = 3, 715 } enahw_completion_policy_type_t; 716 717 #define ENAHW_CMD_CREATE_SQ_DIR_SHIFT 5 718 #define ENAHW_CMD_CREATE_SQ_DIR_MASK GENMASK(7, 5) 719 #define ENAHW_CMD_CREATE_SQ_PLACEMENT_POLICY_MASK GENMASK(3, 0) 720 #define ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY_SHIFT 4 721 #define ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY_MASK GENMASK(6, 4) 722 #define ENAHW_CMD_CREATE_SQ_PHYSMEM_CONTIG_MASK BIT(0) 723 724 #define ENAHW_CMD_CREATE_SQ_DIR(cmd, val) \ 725 (((cmd)->ecsq_dir) |= (((val) << ENAHW_CMD_CREATE_SQ_DIR_SHIFT) & \ 726 ENAHW_CMD_CREATE_SQ_DIR_MASK)) 727 728 #define ENAHW_CMD_CREATE_SQ_PLACEMENT_POLICY(cmd, val) \ 729 (((cmd)->ecsq_caps_2) |= \ 730 ((val) & ENAHW_CMD_CREATE_SQ_PLACEMENT_POLICY_MASK)) 731 732 #define ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY(cmd, val) \ 733 (((cmd)->ecsq_caps_2) |= \ 734 (((val) << ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY_SHIFT) & \ 735 ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY_MASK)) 736 737 #define ENAHW_CMD_CREATE_SQ_PHYSMEM_CONTIG(cmd) \ 738 ((cmd)->ecsq_caps_3 |= ENAHW_CMD_CREATE_SQ_PHYSMEM_CONTIG_MASK) 739 740 /* common: ena_admin_sq */ 741 typedef struct enahw_cmd_destroy_sq { 742 uint16_t edsq_idx; 743 uint8_t edsq_dir; /* Tx/Rx */ 744 uint8_t edsq_rsvd; 745 } enahw_cmd_destroy_sq_t; 746 747 #define ENAHW_CMD_DESTROY_SQ_DIR_SHIFT 5 748 #define ENAHW_CMD_DESTROY_SQ_DIR_MASK GENMASK(7, 5) 749 750 #define ENAHW_CMD_DESTROY_SQ_DIR(cmd, val) \ 751 (((cmd)->edsq_dir) |= (((val) << ENAHW_CMD_DESTROY_SQ_DIR_SHIFT) & \ 752 ENAHW_CMD_DESTROY_SQ_DIR_MASK)) 753 754 /* common: ena_admin_aq_get_stats_cmd */ 755 typedef struct enahw_cmd_get_stats { 756 struct enahw_ctrl_buff ecgs_ctrl_buf; 757 uint8_t ecgs_type; 758 uint8_t ecgs_scope; 759 uint16_t ecgs_rsvd; 760 uint16_t ecgs_queue_idx; 761 762 /* 763 * The device ID for which to query stats from. The sentinel 764 * value 0xFFFF indicates a query of the current device. 765 * According to the common docs, a "privileged device" may 766 * query stats for other ENA devices. However the definition 767 * of this "privilege device" is not expanded upon. 768 */ 769 uint16_t ecgs_device_id; 770 } enahw_cmd_get_stats_t; 771 772 /* Query the stats for my device. */ 773 #define ENAHW_CMD_GET_STATS_MY_DEVICE_ID 0xFFFF 774 775 /* 776 * BASIC: Returns enahw_resp_basic_stats. 777 * 778 * EXTENDED: According to the Linux documentation returns a buffer in 779 * "string format" with additional statistics per queue and per device ID. 780 * 781 * ENI: According to the Linux documentation it returns "extra HW 782 * stats for specific network interface". 783 * 784 * common: ena_admin_get_stats_type 785 */ 786 typedef enum enahw_get_stats_type { 787 ENAHW_GET_STATS_TYPE_BASIC = 0, 788 ENAHW_GET_STATS_TYPE_EXTENDED = 1, 789 ENAHW_GET_STATS_TYPE_ENI = 2, 790 } enahw_get_stats_type_t; 791 792 /* common: ena_admin_get_stats_scope */ 793 typedef enum enahw_get_stats_scope { 794 ENAHW_GET_STATS_SCOPE_QUEUE = 0, 795 ENAHW_GET_STATS_SCOPE_ETH = 1, 796 } enahw_get_stats_scope_t; 797 798 /* common: ena_admin_aq_entry */ 799 typedef struct enahw_cmd_desc { 800 uint16_t ecd_cmd_id; 801 uint8_t ecd_opcode; 802 uint8_t ecd_flags; 803 804 union { 805 uint32_t ecd_raw[15]; 806 enahw_cmd_get_feat_t ecd_get_feat; 807 enahw_cmd_set_feat_t ecd_set_feat; 808 enahw_cmd_create_cq_t ecd_create_cq; 809 enahw_cmd_destroy_cq_t ecd_destroy_cq; 810 enahw_cmd_create_sq_t ecd_create_sq; 811 enahw_cmd_destroy_sq_t ecd_destroy_sq; 812 enahw_cmd_get_stats_t ecd_get_stats; 813 } ecd_cmd; 814 815 } enahw_cmd_desc_t; 816 817 /* 818 * top level commands that may be sent to the Admin Queue. 819 * 820 * common: ena_admin_aq_opcode 821 */ 822 typedef enum ena_cmd_opcode { 823 ENAHW_CMD_NONE = 0, 824 ENAHW_CMD_CREATE_SQ = 1, 825 ENAHW_CMD_DESTROY_SQ = 2, 826 ENAHW_CMD_CREATE_CQ = 3, 827 ENAHW_CMD_DESTROY_CQ = 4, 828 ENAHW_CMD_GET_FEATURE = 8, 829 ENAHW_CMD_SET_FEATURE = 9, 830 ENAHW_CMD_GET_STATS = 11, 831 } enahw_cmd_opcode_t; 832 833 /* common: ENA_ADMIN_AQ_COMMON_DESC */ 834 #define ENAHW_CMD_ID_MASK GENMASK(11, 0) 835 #define ENAHW_CMD_PHASE_MASK BIT(0) 836 837 #define ENAHW_CMD_ID(desc, id) \ 838 (((desc)->ecd_cmd_id) |= ((id) & ENAHW_CMD_ID_MASK)) 839 840 /* 841 * Subcommands for ENA_ADMIN_{GET,SET}_FEATURE. 842 * 843 * common: ena_admin_aq_feature_id 844 */ 845 typedef enum enahw_feature_id { 846 ENAHW_FEAT_DEVICE_ATTRIBUTES = 1, 847 ENAHW_FEAT_MAX_QUEUES_NUM = 2, 848 ENAHW_FEAT_HW_HINTS = 3, 849 ENAHW_FEAT_LLQ = 4, 850 ENAHW_FEAT_EXTRA_PROPERTIES_STRINGS = 5, 851 ENAHW_FEAT_EXTRA_PROPERTIES_FLAGS = 6, 852 ENAHW_FEAT_MAX_QUEUES_EXT = 7, 853 ENAHW_FEAT_RSS_HASH_FUNCTION = 10, 854 ENAHW_FEAT_STATELESS_OFFLOAD_CONFIG = 11, 855 ENAHW_FEAT_RSS_INDIRECTION_TABLE_CONFIG = 12, 856 ENAHW_FEAT_MTU = 14, 857 ENAHW_FEAT_RSS_HASH_INPUT = 18, 858 ENAHW_FEAT_INTERRUPT_MODERATION = 20, 859 ENAHW_FEAT_AENQ_CONFIG = 26, 860 ENAHW_FEAT_LINK_CONFIG = 27, 861 ENAHW_FEAT_HOST_ATTR_CONFIG = 28, 862 ENAHW_FEAT_PHC_CONFIG = 29, 863 ENAHW_FEAT_NUM = 32, 864 } enahw_feature_id_t; 865 866 /* 867 * Device capabilities. 868 * 869 * common: ena_admin_aq_caps_id 870 */ 871 typedef enum enahw_capability_id { 872 ENAHW_CAP_ENI_STATS = 0, 873 ENAHW_CAP_ENA_SRD_INFO = 1, 874 ENAHW_CAP_CUSTOMER_METRICS = 2, 875 ENAHW_CAP_EXTENDED_RESET_REASONS = 3, 876 ENAHW_CAP_CDESC_MBZ = 4, 877 ENAHW_CAP_NUM 878 } enahw_capability_id_t; 879 880 /* 881 * The following macros define the maximum version we support for each 882 * feature. These are the feature versions we use to communicate with 883 * the feature command. Linux has these values spread throughout the 884 * code at the various callsites of ena_com_get_feature(). We choose 885 * to centralize our feature versions to make it easier to audit. 886 */ 887 #define ENAHW_FEAT_DEVICE_ATTRIBUTES_VER 0 888 #define ENAHW_FEAT_MAX_QUEUES_NUM_VER 0 889 #define ENAHW_FEAT_HW_HINTS_VER 0 890 #define ENAHW_FEAT_LLQ_VER 0 891 #define ENAHW_FEAT_EXTRA_PROPERTIES_STRINGS_VER 0 892 #define ENAHW_FEAT_EXTRA_PROPERTIES_FLAGS_VER 0 893 #define ENAHW_FEAT_MAX_QUEUES_EXT_VER 1 894 #define ENAHW_FEAT_RSS_HASH_FUNCTION_VER 0 895 #define ENAHW_FEAT_STATELESS_OFFLOAD_CONFIG_VER 0 896 #define ENAHW_FEAT_RSS_INDIRECTION_TABLE_CONFIG_VER 0 897 #define ENAHW_FEAT_MTU_VER 0 898 #define ENAHW_FEAT_RSS_HASH_INPUT_VER 0 899 #define ENAHW_FEAT_INTERRUPT_MODERATION_VER 0 900 #define ENAHW_FEAT_AENQ_CONFIG_VER 0 901 #define ENAHW_FEAT_LINK_CONFIG_VER 0 902 #define ENAHW_FEAT_HOST_ATTR_CONFIG_VER 0 903 904 /* common: ena_admin_link_types */ 905 typedef enum enahw_link_speeds { 906 ENAHW_LINK_SPEED_1G = 0x1, 907 ENAHW_LINK_SPEED_2_HALF_G = 0x2, 908 ENAHW_LINK_SPEED_5G = 0x4, 909 ENAHW_LINK_SPEED_10G = 0x8, 910 ENAHW_LINK_SPEED_25G = 0x10, 911 ENAHW_LINK_SPEED_40G = 0x20, 912 ENAHW_LINK_SPEED_50G = 0x40, 913 ENAHW_LINK_SPEED_100G = 0x80, 914 ENAHW_LINK_SPEED_200G = 0x100, 915 ENAHW_LINK_SPEED_400G = 0x200, 916 } enahw_link_speeds_t; 917 918 /* 919 * Response to ENAHW_FEAT_HW_HINTS. 920 * 921 * Hints from the device to the driver about what values to use for 922 * various communications between the two. A value of 0 indicates 923 * there is no hint and the driver should provide its own default. All 924 * timeout values are in milliseconds. 925 * 926 * common: ena_admin_ena_hw_hints 927 */ 928 929 #define ENAHW_HINTS_NO_TIMEOUT 0xffff 930 931 typedef struct enahw_device_hints { 932 /* 933 * The amount of time the driver should wait for an MMIO read 934 * reply before giving up and returning an error. 935 */ 936 uint16_t edh_mmio_read_timeout; 937 938 /* 939 * If the driver has not seen an AENQ keep alive in this 940 * timeframe, then consider the device hung and perform a 941 * reset. 942 * common: driver_watchdog_timeout 943 */ 944 uint16_t edh_keep_alive_timeout; 945 946 /* 947 * The timeperiod in which we expect a Tx to report 948 * completion, otherwise it is considered "missed". Initiate a 949 * device reset when the number of missed completions is 950 * greater than the threshold. 951 */ 952 uint16_t edh_tx_comp_timeout; 953 uint16_t edh_missed_tx_reset_threshold; 954 955 /* 956 * The timeperiod in which we expect an admin command to 957 * report completion. 958 */ 959 uint16_t edh_admin_comp_timeout; 960 961 /* 962 * Used by Linux to set the netdevice 'watchdog_timeo' value. 963 * This value is used by the networking stack to determine 964 * when a pending transmission has stalled. This is similar to 965 * the keep alive timeout, except its viewing progress from 966 * the perspective of the network stack itself. This difference 967 * is subtle but important: the device could be in a state 968 * where it has a functioning keep alive heartbeat, but has a 969 * stuck Tx queue impeding forward progress of the networking 970 * stack (which in many cases results in a scenario 971 * indistinguishable from a complete host hang). 972 * 973 * The mac layer does not currently provide such 974 * functionality, though it could and should be extended to 975 * support such a feature. 976 */ 977 uint16_t edh_net_wd_timeout; 978 979 /* 980 * The maximum number of cookies/segments allowed in a DMA 981 * scatter-gather list. 982 */ 983 uint16_t edh_max_tx_sgl; 984 uint16_t edh_max_rx_sgl; 985 986 uint16_t reserved[8]; 987 } enahw_device_hints_t; 988 989 /* 990 * Response to ENAHW_FEAT_DEVICE_ATTRIBUTES. 991 * 992 * common: ena_admin_device_attr_feature_desc 993 */ 994 typedef struct enahw_feat_dev_attr { 995 uint32_t efda_impl_id; 996 uint32_t efda_device_version; 997 998 /* 999 * Bitmap representing supported get/set feature subcommands 1000 * (enahw_feature_id). 1001 */ 1002 uint32_t efda_supported_features; 1003 1004 /* 1005 * Bitmap representing device capabilities. 1006 * (enahw_capability_id) 1007 */ 1008 uint32_t efda_capabilities; 1009 1010 /* Number of bits used for physical/virtual address. */ 1011 uint32_t efda_phys_addr_width; 1012 uint32_t efda_virt_addr_with; 1013 1014 /* The unicast MAC address in network byte order. */ 1015 uint8_t efda_mac_addr[6]; 1016 uint8_t efda_rsvd2[2]; 1017 uint32_t efda_max_mtu; 1018 } enahw_feat_dev_attr_t; 1019 1020 /* 1021 * Response to ENAHW_FEAT_MAX_QUEUES_NUM. 1022 * 1023 * common: ena_admin_queue_feature_desc 1024 */ 1025 typedef struct enahw_feat_max_queue { 1026 uint32_t efmq_max_sq_num; 1027 uint32_t efmq_max_sq_depth; 1028 uint32_t efmq_max_cq_num; 1029 uint32_t efmq_max_cq_depth; 1030 uint32_t efmq_max_legacy_llq_num; 1031 uint32_t efmq_max_legacy_llq_depth; 1032 uint32_t efmq_max_header_size; 1033 1034 /* 1035 * The maximum number of descriptors a single Tx packet may 1036 * span. This includes the meta descriptor. 1037 */ 1038 uint16_t efmq_max_per_packet_tx_descs; 1039 1040 /* 1041 * The maximum number of descriptors a single Rx packet may span. 1042 */ 1043 uint16_t efmq_max_per_packet_rx_descs; 1044 } enahw_feat_max_queue_t; 1045 1046 /* 1047 * Response to ENAHW_FEAT_MAX_QUEUES_EXT. 1048 * 1049 * common: ena_admin_queue_ext_feature_desc 1050 */ 1051 typedef struct enahw_feat_max_queue_ext { 1052 uint8_t efmqe_version; 1053 uint8_t efmqe_rsvd[3]; 1054 1055 uint32_t efmqe_max_tx_sq_num; 1056 uint32_t efmqe_max_tx_cq_num; 1057 uint32_t efmqe_max_rx_sq_num; 1058 uint32_t efmqe_max_rx_cq_num; 1059 uint32_t efmqe_max_tx_sq_depth; 1060 uint32_t efmqe_max_tx_cq_depth; 1061 uint32_t efmqe_max_rx_sq_depth; 1062 uint32_t efmqe_max_rx_cq_depth; 1063 uint32_t efmqe_max_tx_header_size; 1064 1065 /* 1066 * The maximum number of descriptors a single Tx packet may 1067 * span. This includes the meta descriptor. 1068 */ 1069 uint16_t efmqe_max_per_packet_tx_descs; 1070 1071 /* 1072 * The maximum number of descriptors a single Rx packet may span. 1073 */ 1074 uint16_t efmqe_max_per_packet_rx_descs; 1075 } enahw_feat_max_queue_ext_t; 1076 1077 /* 1078 * Response to ENA_ADMIN_LINK_CONFIG. 1079 * 1080 * common: ena_admin_get_feature_link_desc 1081 */ 1082 typedef struct enahw_feat_link_conf { 1083 /* Link speed in Mbit/s. */ 1084 uint32_t eflc_speed; 1085 1086 /* Bit field of enahw_link_speeds_t. */ 1087 uint32_t eflc_supported; 1088 1089 /* 1090 * 31-2: reserved 1091 * 1: duplex - Full Duplex 1092 * 0: autoneg 1093 */ 1094 uint32_t eflc_flags; 1095 } enahw_feat_link_conf_t; 1096 1097 #define ENAHW_FEAT_LINK_CONF_AUTONEG_MASK BIT(0) 1098 #define ENAHW_FEAT_LINK_CONF_DUPLEX_SHIFT 1 1099 #define ENAHW_FEAT_LINK_CONF_DUPLEX_MASK BIT(1) 1100 1101 #define ENAHW_FEAT_LINK_CONF_AUTONEG(f) \ 1102 ((f)->eflc_flags & ENAHW_FEAT_LINK_CONF_AUTONEG_MASK) 1103 1104 #define ENAHW_FEAT_LINK_CONF_FULL_DUPLEX(f) \ 1105 ((((f)->eflc_flags & ENAHW_FEAT_LINK_CONF_DUPLEX_MASK) >> \ 1106 ENAHW_FEAT_LINK_CONF_DUPLEX_SHIFT) == 1) 1107 1108 /* 1109 * Response to ENAHW_FEAT_STATELESS_OFFLOAD_CONFIG. 1110 * 1111 * common: ena_admin_feature_offload_desc 1112 */ 1113 typedef struct enahw_feat_offload { 1114 /* 1115 * 0 : Tx IPv4 Header Checksum 1116 * 1 : Tx L4/IPv4 Partial Checksum 1117 * 1118 * The L4 checksum field should be initialized with pseudo 1119 * header checksum. 1120 * 1121 * 2 : Tx L4/IPv4 Checksum Full 1122 * 3 : Tx L4/IPv6 Partial Checksum 1123 * 1124 * The L4 checksum field should be initialized with pseudo 1125 * header checksum. 1126 * 1127 * 4 : Tx L4/IPv6 Checksum Full 1128 * 5 : TCP/IPv4 LSO (aka TSO) 1129 * 6 : TCP/IPv6 LSO (aka TSO) 1130 * 7 : LSO ECN 1131 */ 1132 uint32_t efo_tx; 1133 1134 /* 1135 * Receive side supported stateless offload. 1136 * 1137 * 0 : Rx IPv4 Header Checksum 1138 * 1 : Rx TCP/UDP + IPv4 Full Checksum 1139 * 2 : Rx TCP/UDP + IPv6 Full Checksum 1140 * 3 : Rx hash calculation 1141 */ 1142 uint32_t efo_rx_supported; 1143 1144 /* Linux seems to only check rx_supported. */ 1145 uint32_t efo_rx_enabled; 1146 } enahw_feat_offload_t; 1147 1148 /* Feature Offloads */ 1149 #define ENAHW_FEAT_OFFLOAD_TX_L3_IPV4_CSUM_MASK BIT(0) 1150 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_PART_SHIFT 1 1151 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_PART_MASK BIT(1) 1152 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_FULL_SHIFT 2 1153 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_FULL_MASK BIT(2) 1154 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_PART_SHIFT 3 1155 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_PART_MASK BIT(3) 1156 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_FULL_SHIFT 4 1157 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) 1158 #define ENAHW_FEAT_OFFLOAD_TSO_IPV4_SHIFT 5 1159 #define ENAHW_FEAT_OFFLOAD_TSO_IPV4_MASK BIT(5) 1160 #define ENAHW_FEAT_OFFLOAD_TSO_IPV6_SHIFT 6 1161 #define ENAHW_FEAT_OFFLOAD_TSO_IPV6_MASK BIT(6) 1162 #define ENAHW_FEAT_OFFLOAD_TSO_ECN_SHIFT 7 1163 #define ENAHW_FEAT_OFFLOAD_TSO_ECN_MASK BIT(7) 1164 #define ENAHW_FEAT_OFFLOAD_RX_L3_IPV4_CSUM_MASK BIT(0) 1165 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV4_CSUM_SHIFT 1 1166 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV4_CSUM_MASK BIT(1) 1167 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV6_CSUM_SHIFT 2 1168 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV6_CSUM_MASK BIT(2) 1169 #define ENAHW_FEAT_OFFLOAD_RX_HASH_SHIFT 3 1170 #define ENAHW_FEAT_OFFLOAD_RX_HASH_MASK BIT(3) 1171 1172 #define ENAHW_FEAT_OFFLOAD_TX_L3_IPV4_CSUM(f) \ 1173 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L3_IPV4_CSUM_MASK) != 0) 1174 1175 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_PART(f) \ 1176 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_PART_MASK) != 0) 1177 1178 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_FULL(f) \ 1179 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_FULL_MASK) != 0) 1180 1181 #define ENAHW_FEAT_OFFLOAD_TSO_IPV4(f) \ 1182 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TSO_IPV4_MASK) != 0) 1183 1184 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_PART(f) \ 1185 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_PART_MASK) != 0) 1186 1187 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_FULL(f) \ 1188 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_FULL_MASK) != 0) 1189 1190 #define ENAHW_FEAT_OFFLOAD_TSO_IPV6(f) \ 1191 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TSO_IPV6_MASK) != 0) 1192 1193 #define ENAHW_FEAT_OFFLOAD_RX_L3_IPV4_CSUM(f) \ 1194 (((f)->efo_rx_supported & ENAHW_FEAT_OFFLOAD_RX_L3_IPV4_CSUM_MASK) != 0) 1195 1196 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV4_CSUM(f) \ 1197 (((f)->efo_rx_supported & ENAHW_FEAT_OFFLOAD_RX_L4_IPV4_CSUM_MASK) != 0) 1198 1199 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV6_CSUM(f) \ 1200 (((f)->efo_rx_supported & ENAHW_FEAT_OFFLOAD_RX_L4_IPV6_CSUM_MASK) != 0) 1201 1202 typedef union enahw_resp_get_feat { 1203 uint32_t ergf_raw[14]; 1204 enahw_feat_dev_attr_t ergf_dev_attr; 1205 enahw_feat_max_queue_t ergf_max_queue; 1206 enahw_feat_max_queue_ext_t ergf_max_queue_ext; 1207 enahw_feat_aenq_t ergf_aenq; 1208 enahw_feat_link_conf_t ergf_link_conf; 1209 enahw_feat_offload_t ergf_offload; 1210 enahw_device_hints_t ergf_hints; 1211 } enahw_resp_get_feat_u; 1212 1213 /* 1214 * common: ena_admin_acq_create_cq_resp_desc 1215 */ 1216 typedef struct enahw_resp_create_cq { 1217 /* 1218 * The hardware's index for this queue. 1219 */ 1220 uint16_t ercq_idx; 1221 1222 /* 1223 * Apparently the number of descriptors granted may be 1224 * different than that requested. 1225 */ 1226 uint16_t ercq_actual_num_descs; 1227 uint32_t ercq_numa_node_reg_offset; 1228 /* CQ doorbell register - no longer supported by any ENA adapter */ 1229 uint32_t ercq_head_db_reg_offset; 1230 uint32_t ercq_interrupt_mask_reg_offset; /* stop intr */ 1231 } enahw_resp_create_cq_t; 1232 1233 /* common: ena_admin_acq_create_sq_resp_desc */ 1234 typedef struct enahw_resp_create_sq { 1235 uint16_t ersq_idx; 1236 uint16_t ersq_rsvdw1; 1237 uint32_t ersq_db_reg_offset; 1238 uint32_t ersq_llq_descs_reg_offset; 1239 uint32_t ersq_llq_headers_reg_offset; 1240 } enahw_resp_create_sq_t; 1241 1242 /* common: ena_admin_basic_stats */ 1243 typedef struct enahw_resp_basic_stats { 1244 uint32_t erbs_tx_bytes_low; 1245 uint32_t erbs_tx_bytes_high; 1246 uint32_t erbs_tx_pkts_low; 1247 uint32_t erbs_tx_pkts_high; 1248 uint32_t erbs_rx_bytes_low; 1249 uint32_t erbs_rx_bytes_high; 1250 uint32_t erbs_rx_pkts_low; 1251 uint32_t erbs_rx_pkts_high; 1252 uint32_t erbs_rx_drops_low; 1253 uint32_t erbs_rx_drops_high; 1254 uint32_t erbs_tx_drops_low; 1255 uint32_t erbs_tx_drops_high; 1256 uint32_t erbs_rx_overruns_low; 1257 uint32_t erbs_rx_overruns_high; 1258 } enahw_resp_basic_stats_t; 1259 1260 /* common: ena_admin_eni_stats */ 1261 typedef struct enahw_resp_eni_stats { 1262 /* 1263 * The number of inbound packets dropped due to aggregate 1264 * inbound bandwidth allowance being exceeded. 1265 */ 1266 uint64_t eres_bw_in_exceeded; 1267 1268 /* 1269 * The number of outbound packets dropped due to aggregated outbound 1270 * bandwidth allowance being exceeded. 1271 */ 1272 uint64_t eres_bw_out_exceeded; 1273 1274 /* 1275 * The number of packets dropped due to the Packets Per Second 1276 * allowance being exceeded. 1277 */ 1278 uint64_t eres_pps_exceeded; 1279 1280 /* 1281 * The number of packets dropped due to connection tracking 1282 * allowance being exceeded and leading to failure in 1283 * establishment of new connections. 1284 */ 1285 uint64_t eres_conns_exceeded; 1286 1287 /* 1288 * The number of packets dropped due to linklocal packet rate 1289 * allowance being exceeded. 1290 */ 1291 uint64_t eres_linklocal_exceeded; 1292 } enahw_resp_eni_stats_t; 1293 1294 /* 1295 * common: ena_admin_acq_entry 1296 */ 1297 typedef struct enahw_resp_desc { 1298 /* The index of the completed command. */ 1299 uint16_t erd_cmd_id; 1300 1301 /* The status of the command (enahw_resp_status_t). */ 1302 uint8_t erd_status; 1303 1304 /* 1305 * 7-1 Reserved 1306 * 0 Phase 1307 */ 1308 uint8_t erd_flags; 1309 1310 /* Extended status. */ 1311 uint16_t erd_ext_status; 1312 1313 /* 1314 * The AQ entry (enahw_cmd_desc) index which has been consumed 1315 * by the device and can be reused. However, this field is not 1316 * used in the other drivers, and it seems to be redundant 1317 * with the erd_idx field. 1318 */ 1319 uint16_t erd_sq_head_idx; 1320 1321 union { 1322 uint32_t raw[14]; 1323 enahw_resp_get_feat_u erd_get_feat; 1324 enahw_resp_create_cq_t erd_create_cq; 1325 /* destroy_cq: No command-specific response. */ 1326 enahw_resp_create_sq_t erd_create_sq; 1327 /* destroy_sq: No command-specific response. */ 1328 enahw_resp_basic_stats_t erd_basic_stats; 1329 enahw_resp_eni_stats_t erd_eni_stats; 1330 } erd_resp; 1331 } enahw_resp_desc_t; 1332 1333 /* common: ENA_ADMIN_ACQ_COMMON_DESC */ 1334 #define ENAHW_RESP_CMD_ID_MASK GENMASK(11, 0) 1335 #define ENAHW_RESP_PHASE_MASK 0x1 1336 1337 #define ENAHW_RESP_CMD_ID(desc) \ 1338 (((desc)->erd_cmd_id) & ENAHW_RESP_CMD_ID_MASK) 1339 1340 /* 1341 * The response status of an Admin Queue command. 1342 * 1343 * common: ena_admin_aq_completion_status 1344 */ 1345 typedef enum enahw_resp_status { 1346 ENAHW_RESP_SUCCESS = 0, 1347 ENAHW_RESP_RESOURCE_ALLOCATION_FAILURE = 1, 1348 ENAHW_RESP_BAD_OPCODE = 2, 1349 ENAHW_RESP_UNSUPPORTED_OPCODE = 3, 1350 ENAHW_RESP_MALFORMED_REQUEST = 4, 1351 /* 1352 * At this place in the common code it mentions that there is 1353 * "additional status" in the response descriptor's 1354 * erd_ext_status field. As the common code never actually 1355 * uses this field it's hard to know the exact meaning of the 1356 * comment. My best guess is the illegal parameter error 1357 * stores additional context in the erd_ext_status field. But 1358 * how to interpret that additional context is anyone's guess. 1359 */ 1360 ENAHW_RESP_ILLEGAL_PARAMETER = 5, 1361 ENAHW_RESP_UNKNOWN_ERROR = 6, 1362 ENAHW_RESP_RESOURCE_BUSY = 7, 1363 } enahw_resp_status_t; 1364 1365 /* 1366 * I/O macros and structures. 1367 * ------------------------- 1368 */ 1369 1370 /* 1371 * The device's L3 and L4 protocol numbers. These are specific to the 1372 * ENA device and not to be confused with IANA protocol numbers. 1373 * 1374 * common: ena_eth_io_l3_proto_index 1375 */ 1376 typedef enum enahw_io_l3_proto { 1377 ENAHW_IO_L3_PROTO_UNKNOWN = 0, 1378 ENAHW_IO_L3_PROTO_IPV4 = 8, 1379 ENAHW_IO_L3_PROTO_IPV6 = 11, 1380 ENAHW_IO_L3_PROTO_FCOE = 21, 1381 ENAHW_IO_L3_PROTO_ROCE = 22, 1382 } enahw_io_l3_proto_t; 1383 1384 /* common: ena_eth_io_l4_proto_index */ 1385 typedef enum enahw_io_l4_proto { 1386 ENAHW_IO_L4_PROTO_UNKNOWN = 0, 1387 ENAHW_IO_L4_PROTO_TCP = 12, 1388 ENAHW_IO_L4_PROTO_UDP = 13, 1389 ENAHW_IO_L4_PROTO_ROUTEABLE_ROCE = 23, 1390 } enahw_io_l4_proto_t; 1391 1392 /* common: ena_eth_io_tx_desc */ 1393 typedef struct enahw_tx_data_desc { 1394 /* 1395 * 15-0 Buffer Length (LENGTH) 1396 * 1397 * The buffer length in bytes. This should NOT include the 1398 * Ethernet FCS bytes. 1399 * 1400 * 21-16 Request ID High Bits [15-10] (REQ_ID_HI) 1401 * 22 Reserved Zero 1402 * 23 Metadata Flag always zero (META_DESC) 1403 * 1404 * This flag indicates if the descriptor is a metadata 1405 * descriptor or not. In this case we are defining the Tx 1406 * descriptor, so it's always zero. 1407 * 1408 * 24 Phase bit (PHASE) 1409 * 25 Reserved Zero 1410 * 26 First Descriptor Bit (FIRST) 1411 * 1412 * Indicates this is the first descriptor for the frame. 1413 * 1414 * 27 Last Descriptor Bit (LAST) 1415 * 1416 * Indicates this is the last descriptor for the frame. 1417 * 1418 * 28 Completion Request Bit (COMP_REQ) 1419 * 1420 * Indicates if completion should be posted after the 1421 * frame is transmitted. This bit is only valid on the 1422 * first descriptor. 1423 * 1424 * 31-29 Reserved Zero 1425 */ 1426 uint32_t etd_len_ctrl; 1427 1428 /* 1429 * 3-0 L3 Protocol Number (L3_PROTO_IDX) 1430 * 1431 * The L3 protocol type, one of enahw_io_l3_proto_t. This 1432 * field is required when L3_CSUM_EN or TSO_EN is set. 1433 * 1434 * 4 Don't Fragment Bit (DF) 1435 * 1436 * The value of IPv4 DF. This value must copy the value 1437 * found in the packet's IPv4 header. 1438 * 1439 * 6-5 Reserved Zero 1440 * 7 TSO Bit (TSO_EN) 1441 * 1442 * Enable TCP Segment Offload. 1443 * 1444 * 12-8 L4 Protocol Number (L4_PROTO_IDX) 1445 * 1446 * The L4 protocol type, one of enahw_io_l4_proto_t. This 1447 * field is required when L4_CSUM_EN or TSO_EN are 1448 * set. 1449 * 1450 * 13 L3 Checksum Offload (L3_CSUM_EN) 1451 * 1452 * Enable IPv4 header checksum offload. 1453 * 1454 * 14 L4 Checksum Offload (L4_CSUM_EN) 1455 * 1456 * Enable TCP/UDP checksum offload. 1457 * 1458 * 15 Ethernet FCS Disable (ETHERNET_FCS_DIS) 1459 * 1460 * Disable the device's Ethernet Frame Check sequence. 1461 * 1462 * 16 Reserved Zero 1463 * 17 L4 Partial Checksum Present (L4_CSUM_PARTIAL) 1464 * 1465 * When set it indicates the host has already provided 1466 * the pseudo-header checksum. Otherwise, it is up to the 1467 * device to calculate it. 1468 * 1469 * When set and using TSO the host stack must remember 1470 * not to include the TCP segment length in the supplied 1471 * pseudo-header. 1472 * 1473 * The host stack should provide the pseudo-header 1474 * checksum when using IPv6 with Routing Headers. 1475 * 1476 * 21-18 Reserved Zero 1477 * 31-22 Request ID Low [9-0] (REQ_ID_LO) 1478 */ 1479 uint32_t etd_meta_ctrl; 1480 1481 /* The low 32 bits of the buffer address. */ 1482 uint32_t etd_buff_addr_lo; 1483 1484 /* 1485 * address high and header size 1486 * 1487 * 15-0 Buffer Address High [47-32] (ADDR_HI) 1488 * 1489 * The upper 15 bits of the buffer address. 1490 * 1491 * 23-16 Reserved Zero 1492 * 31-24 Header Length (HEADER_LENGTH) 1493 * 1494 * This field has dubious documentation in the 1495 * common/Linux driver code, even contradicting itself in 1496 * the same sentence. Here's what it says, verbatim: 1497 * 1498 * > Header length. For Low Latency Queues, this fields 1499 * > indicates the number of bytes written to the 1500 * > headers' memory. For normal queues, if packet is TCP 1501 * > or UDP, and longer than max_header_size, then this 1502 * > field should be set to the sum of L4 header offset 1503 * > and L4 header size(without options), otherwise, this 1504 * > field should be set to 0. For both modes, this field 1505 * > must not exceed the max_header_size. max_header_size 1506 * > value is reported by the Max Queues Feature 1507 * > descriptor 1508 * 1509 * Here's what one _might_ ascertain from the above. 1510 * 1511 * 1. This field should always be set in the case of 1512 * LLQs/device placement. 1513 * 1514 * 2. This field must _never_ exceed the max header size 1515 * as reported by feature detection. In our code this 1516 * would be efmq_max_header_size for older ENA devices 1517 * and efmqe_max_tx_header_size for newer ones. One 1518 * empirical data point from a t3.small (with newer 1519 * device) is a max Tx header size of 128 bytes. 1520 * 1521 * 3. If the packet is TCP or UDP, and the packet (or the 1522 * headers?) is longer than the max header size, then 1523 * this field should be set to the total header size 1524 * with the exception of TCP header options. 1525 * Otherwise, if the packet is not TCP or UDP, or if 1526 * the packet (or header length?) _does not_ exceed 1527 * the max header size, then set this value to 0. 1528 * 1529 * One might think, based on (3), that when the header 1530 * size exceeds the max this field needs to be set, but 1531 * that contradicts (2), which dictates that the total 1532 * header size can never exceed the max. Sure enough, the 1533 * Linux code drops all packets with headers that exceed 1534 * the max. So in that case it would mean that "and 1535 * longer than max_header_size" is referring to the total 1536 * packet length. So for most workloads, the TCP/UDP 1537 * packets should have this field set, to indicate their 1538 * header length. This matches with Linux, which seems to 1539 * set header length regardless of IP protocol. 1540 * 1541 * However, the FreeBSD code tells a different story. In 1542 * it's non-LLQ Tx path it has the following comment, 1543 * verbatim: 1544 * 1545 * > header_len is just a hint for the device. Because 1546 * > FreeBSD is not giving us information about packet 1547 * > header length and it is not guaranteed that all 1548 * > packet headers will be in the 1st mbuf, setting 1549 * > header_len to 0 is making the device ignore this 1550 * > value and resolve header on it's own. 1551 * 1552 * According to this we can just set the value to zero 1553 * and let the device figure it out. This maps better to 1554 * illumos, where we also allow the header to potentially 1555 * span multiple mblks (though we do have access to the 1556 * header sizes via mac_ether_offload_info_t). 1557 * 1558 * The upshot: for now we take advantage of the device's 1559 * ability to determine the header length on its own, at 1560 * the potential cost of some performance (not measured). 1561 */ 1562 uint32_t etd_buff_addr_hi_hdr_sz; 1563 } enahw_tx_data_desc_t; 1564 1565 #define ENAHW_TX_DESC_LENGTH_MASK GENMASK(15, 0) 1566 #define ENAHW_TX_DESC_REQ_ID_HI_SHIFT 16 1567 #define ENAHW_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) 1568 #define ENAHW_TX_DESC_META_DESC_SHIFT 23 1569 #define ENAHW_TX_DESC_META_DESC_MASK BIT(23) 1570 #define ENAHW_TX_DESC_PHASE_SHIFT 24 1571 #define ENAHW_TX_DESC_PHASE_MASK BIT(24) 1572 #define ENAHW_TX_DESC_FIRST_SHIFT 26 1573 #define ENAHW_TX_DESC_FIRST_MASK BIT(26) 1574 #define ENAHW_TX_DESC_LAST_SHIFT 27 1575 #define ENAHW_TX_DESC_LAST_MASK BIT(27) 1576 #define ENAHW_TX_DESC_COMP_REQ_SHIFT 28 1577 #define ENAHW_TX_DESC_COMP_REQ_MASK BIT(28) 1578 #define ENAHW_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) 1579 #define ENAHW_TX_DESC_DF_SHIFT 4 1580 #define ENAHW_TX_DESC_DF_MASK BIT(4) 1581 #define ENAHW_TX_DESC_TSO_EN_SHIFT 7 1582 #define ENAHW_TX_DESC_TSO_EN_MASK BIT(7) 1583 #define ENAHW_TX_DESC_L4_PROTO_IDX_SHIFT 8 1584 #define ENAHW_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) 1585 #define ENAHW_TX_DESC_L3_CSUM_EN_SHIFT 13 1586 #define ENAHW_TX_DESC_L3_CSUM_EN_MASK BIT(13) 1587 #define ENAHW_TX_DESC_L4_CSUM_EN_SHIFT 14 1588 #define ENAHW_TX_DESC_L4_CSUM_EN_MASK BIT(14) 1589 #define ENAHW_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 1590 #define ENAHW_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) 1591 #define ENAHW_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 1592 #define ENAHW_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) 1593 #define ENAHW_TX_DESC_REQ_ID_LO_SHIFT 22 1594 #define ENAHW_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) 1595 #define ENAHW_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) 1596 #define ENAHW_TX_DESC_HEADER_LENGTH_SHIFT 24 1597 #define ENAHW_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) 1598 1599 #define ENAHW_TX_DESC_LENGTH(desc, len) \ 1600 (((desc)->etd_len_ctrl) |= ((len) & ENAHW_TX_DESC_LENGTH_MASK)) 1601 1602 #define ENAHW_TX_DESC_FIRST_ON(desc) \ 1603 (((desc)->etd_len_ctrl) |= ENAHW_TX_DESC_FIRST_MASK) 1604 1605 #define ENAHW_TX_DESC_FIRST_OFF(desc) \ 1606 (((desc)->etd_len_ctrl) &= ~ENAHW_TX_DESC_FIRST_MASK) 1607 1608 #define ENAHW_TX_DESC_REQID_HI(desc, reqid) \ 1609 (((desc)->etd_len_ctrl) |= \ 1610 ((((reqid) >> 10) << ENAHW_TX_DESC_REQ_ID_HI_SHIFT) & \ 1611 ENAHW_TX_DESC_REQ_ID_HI_MASK)) 1612 1613 #define ENAHW_TX_DESC_REQID_LO(desc, reqid) \ 1614 (((desc)->etd_meta_ctrl) |= \ 1615 (((reqid) << ENAHW_TX_DESC_REQ_ID_LO_SHIFT) & \ 1616 ENAHW_TX_DESC_REQ_ID_LO_MASK)) 1617 1618 #define ENAHW_TX_DESC_PHASE(desc, phase) \ 1619 (((desc)->etd_len_ctrl) |= (((phase) << ENAHW_TX_DESC_PHASE_SHIFT) & \ 1620 ENAHW_TX_DESC_PHASE_MASK)) 1621 1622 #define ENAHW_TX_DESC_LAST_ON(desc) \ 1623 (((desc)->etd_len_ctrl) |= ENAHW_TX_DESC_LAST_MASK) 1624 1625 #define ENAHW_TX_DESC_LAST_OFF(desc) \ 1626 (((desc)->etd_len_ctrl) &= ~ENAHW_TX_DESC_LAST_MASK) 1627 1628 #define ENAHW_TX_DESC_COMP_REQ_ON(desc) \ 1629 (((desc)->etd_len_ctrl) |= ENAHW_TX_DESC_COMP_REQ_MASK) 1630 1631 #define ENAHW_TX_DESC_COMP_REQ_OFF(desc) \ 1632 (((desc)->etd_len_ctrl) &= ~ENAHW_TX_DESC_COMP_REQ_MASK) 1633 1634 #define ENAHW_TX_DESC_META_DESC_ON(desc) \ 1635 (((desc)->etd_len_ctrl) |= ENAHW_TX_DESC_META_DESC_MASK) 1636 1637 #define ENAHW_TX_DESC_META_DESC_OFF(desc) \ 1638 (((desc)->etd_len_ctrl) &= ~ENAHW_TX_DESC_META_DESC_MASK) 1639 1640 #define ENAHW_TX_DESC_ADDR_LO(desc, addr) \ 1641 (((desc)->etd_buff_addr_lo) = (addr)) 1642 1643 #define ENAHW_TX_DESC_ADDR_HI(desc, addr) \ 1644 (((desc)->etd_buff_addr_hi_hdr_sz) |= \ 1645 (((addr) >> 32) & ENAHW_TX_DESC_ADDR_HI_MASK)) 1646 1647 #define ENAHW_TX_DESC_HEADER_LENGTH(desc, len) \ 1648 (((desc)->etd_buff_addr_hi_hdr_sz) |= \ 1649 (((len) << ENAHW_TX_DESC_HEADER_LENGTH_SHIFT) & \ 1650 ENAHW_TX_DESC_HEADER_LENGTH_MASK)) 1651 1652 #define ENAHW_TX_DESC_DF_ON(desc) \ 1653 ((desc)->etd_meta_ctrl |= ENAHW_TX_DESC_DF_MASK) 1654 1655 #define ENAHW_TX_DESC_TSO_OFF(desc) \ 1656 (((desc)->etd_meta_ctrl) &= ~ENAHW_TX_DESC_TSO_EN_MASK) 1657 1658 #define ENAHW_TX_DESC_L3_CSUM_OFF(desc) \ 1659 (((desc)->etd_meta_ctrl) &= ~ENAHW_TX_DESC_L3_CSUM_EN_MASK) 1660 1661 #define ENAHW_TX_DESC_L4_CSUM_OFF(desc) \ 1662 (((desc)->etd_meta_ctrl) &= ~ENAHW_TX_DESC_L4_CSUM_EN_MASK) 1663 1664 #define ENAHW_TX_DESC_L4_CSUM_PARTIAL_ON(desc) \ 1665 (((desc)->etd_meta_ctrl) &= ~ENAHW_TX_DESC_L4_CSUM_PARTIAL_MASK) 1666 1667 /* common: ena_eth_io_tx_meta_desc */ 1668 typedef struct enahw_tx_meta_desc { 1669 /* 1670 * 9-0 Request ID Low [9-0] (REQ_ID_LO) 1671 * 13-10 Reserved Zero 1672 * 14 Extended Metadata Valid (EXT_VALID) 1673 * 1674 * When set this descriptor contains valid extended 1675 * metadata. The extended metadata includes the L3/L4 1676 * length and offset fields as well as the MSS bits. This 1677 * is needed for TSO. 1678 * 1679 * 15 Reserved Zero 1680 * 19-16 MSS High Bits (MSS_HI) 1681 * 20 Meta Type (ETH_META_TYPE) 1682 * 1683 * If enabled this is an extended metadata descriptor. 1684 * This seems redundant with EXT_VALID. 1685 * 1686 * 21 Meta Store (META_STORE) 1687 * 1688 * Store the extended metadata in the queue cache. 1689 * 1690 * 22 Reserved Zero 1691 * 23 Metadata Flag (META_DESC) -- always one 1692 * 24 Phase (PHASE) 1693 * 25 Reserved Zero 1694 * 26 First Descriptor Bit (FIRST) 1695 * 27 Last Descriptor Bit (LAST) 1696 * 28 Completion Request Bit (COMP_REQ) 1697 * 31-29 Reserved Zero 1698 */ 1699 uint32_t etmd_len_ctrl; 1700 1701 /* 1702 * 5-0 Request ID High Bits [15-10] (REQ_ID_HI) 1703 * 31-6 Reserved Zero 1704 */ 1705 uint32_t etmd_word1; 1706 1707 /* 1708 * 7-0 L3 Header Length (L3_HDR_LEN) 1709 * 15:8 L3 Header Offset (L3_HDR_OFF) 1710 * 21:16 L4 Header Length in Words (L4_HDR_LEN_IN_WORDS) 1711 * 1712 * Specifies the L4 header length in words. The device 1713 * assumes the L4 header follows directly after the L3 1714 * header and that the L4 offset is equal to L3_HDR_OFF + 1715 * L3_HDR_LEN. 1716 * 1717 * 31-22 MSS Low Bits (MSS_LO) 1718 */ 1719 uint32_t etmd_word2; 1720 uint32_t etmd_reserved; 1721 } enahw_tx_meta_desc_t; 1722 1723 /* common: N/A */ 1724 typedef union enahw_tx_desc { 1725 enahw_tx_data_desc_t etd_data; 1726 enahw_tx_meta_desc_t etd_meta; 1727 } enahw_tx_desc_t; 1728 1729 /* common: ena_eth_io_tx_cdesc */ 1730 typedef struct enahw_tx_cdesc { 1731 /* 1732 * 15-0 Request ID Bits 1733 * 16 Reserved Zero 1734 */ 1735 uint16_t etc_req_id; 1736 1737 /* 1738 * Presumably the status of the Tx, though the Linux driver 1739 * never checks this field. 1740 */ 1741 uint8_t etc_status; 1742 1743 /* 1744 * 0 Phase 1745 * 7-1 Reserved Zero 1746 */ 1747 uint8_t etc_flags; 1748 1749 /* 1750 * This isn't documented or used in the Linux driver, but 1751 * these probably store the submission queue ID and the 1752 * submission queue head index. 1753 */ 1754 uint16_t etc_sub_qid; 1755 uint16_t etc_sq_head_idx; 1756 } enahw_tx_cdesc_t; 1757 1758 #define ENAHW_TX_CDESC_PHASE_SHIFT 0 1759 #define ENAHW_TX_CDESC_PHASE_MASK BIT(0) 1760 1761 #define ENAHW_TX_CDESC_GET_PHASE(cdesc) \ 1762 ((cdesc)->etc_flags & ENAHW_TX_CDESC_PHASE_MASK) 1763 1764 /* common: ena_eth_io_rx_desc */ 1765 typedef struct enahw_rx_desc { 1766 /* 1767 * The length of the buffer provided by the host, in bytes. 1768 * Use the value of 0 to indicate 64K. 1769 */ 1770 uint16_t erd_length; 1771 uint8_t erd_reserved1; 1772 1773 /* 1774 * 0 Phase (PHASE) 1775 * 1 Reserved Zero 1776 * 2 First (FIRST) 1777 * 1778 * Indicates this is the first descriptor for the frame. 1779 * 1780 * 3 Last (LAST) 1781 * 1782 * Indicates this is the last descriptor for the frame. 1783 * 1784 * 4 Completion Request (COMP_REQ) 1785 * 1786 * Indicates that a completion request should be generated 1787 * for this descriptor. 1788 * 1789 * 7-5 Reserved Zero 1790 */ 1791 uint8_t erd_ctrl; 1792 1793 /* 1794 * 15-0 Request ID 1795 * 16 Reserved 0 1796 */ 1797 uint16_t erd_req_id; 1798 uint16_t erd_reserved2; 1799 1800 /* The physical address of the buffer provided by the host. */ 1801 uint32_t erd_buff_addr_lo; 1802 uint16_t erd_buff_addr_hi; 1803 uint16_t erd_reserved3; 1804 } enahw_rx_desc_t; 1805 1806 #define ENAHW_RX_DESC_PHASE_MASK BIT(0) 1807 #define ENAHW_RX_DESC_FIRST_SHIFT 2 1808 #define ENAHW_RX_DESC_FIRST_MASK BIT(2) 1809 #define ENAHW_RX_DESC_LAST_SHIFT 3 1810 #define ENAHW_RX_DESC_LAST_MASK BIT(3) 1811 #define ENAHW_RX_DESC_COMP_REQ_SHIFT 4 1812 #define ENAHW_RX_DESC_COMP_REQ_MASK BIT(4) 1813 1814 #define ENAHW_RX_DESC_CLEAR_CTRL(desc) ((desc)->erd_ctrl = 0) 1815 #define ENAHW_RX_DESC_SET_PHASE(desc, val) \ 1816 ((desc)->erd_ctrl |= ((val) & ENAHW_RX_DESC_PHASE_MASK)) 1817 1818 #define ENAHW_RX_DESC_SET_FIRST(desc) \ 1819 ((desc)->erd_ctrl |= ENAHW_RX_DESC_FIRST_MASK) 1820 1821 #define ENAHW_RX_DESC_SET_LAST(desc) \ 1822 ((desc)->erd_ctrl |= ENAHW_RX_DESC_LAST_MASK) 1823 1824 #define ENAHW_RX_DESC_SET_COMP_REQ(desc) \ 1825 ((desc)->erd_ctrl |= ENAHW_RX_DESC_COMP_REQ_MASK) 1826 1827 /* 1828 * Ethernet parsing information is only valid when last == 1. 1829 * 1830 * common: ena_eth_io_rx_cdesc_base 1831 */ 1832 typedef struct enahw_rx_cdesc { 1833 /* 1834 * 4-0 L3 Protocol Number (L3_PROTO) 1835 * 1836 * The L3 protocol type, one of enahw_io_l3_proto_t. 1837 * 1838 * 6-5 (SRC_VLAN_CNT) 1839 * 7 Reserved Zero 1840 * 12-8 L4 Protocol Number (L4_PROTO) 1841 * 13 L3 Checksum Error (L3_CSUM_ERR) 1842 * 1843 * When set either the L3 checksum failed to match or the 1844 * controller didn't attempt to validate the checksum. 1845 * This bit is valid only when L3_PROTO indicates an IPv4 1846 * packet. 1847 * 1848 * 14 L4 Checksum Error (L4_CSUM_ERR) 1849 * 1850 * When set either the L4 checksum failed to match or the 1851 * controller didn't attempt to validate the checksum. 1852 * This bit is valid only when L4_PROTO indicates a 1853 * TCP/UDP packet, IPV4_FRAG is not set, and 1854 * L4_CSUM_CHECKED is set. 1855 * 1856 * 15 IPv4 Fragmented (IPV4_FRAG) 1857 * 16 L4 Checksum Validated (L4_CSUM_CHECKED) 1858 * 1859 * When set it indicates the device attempted to validate 1860 * the L4 checksum. 1861 * 1862 * 23-17 Reserved Zero 1863 * 24 Phase (PHASE) 1864 * 25 (L3_CSUM2) 1865 * 1866 * According to the Linux source this is the "second 1867 * checksum engine result". It's never checked. 1868 * 1869 * 26 First Descriptor Bit (FIRST) 1870 * 1871 * Indicates the first descriptor for the frame. 1872 * 1873 * 27 Last Descriptor Bit (LAST) 1874 * 1875 * Indicates the last descriptor for the frame. 1876 * 1877 * 29-28 Reserved Zero 1878 * 30 Buffer Type (BUFFER) 1879 * 1880 * When enabled indicates this is a data descriptor. 1881 * Otherwse, it is a metadata descriptor. 1882 * 1883 * 31 : reserved31 1884 */ 1885 uint32_t erc_status; 1886 uint16_t erc_length; 1887 uint16_t erc_req_id; 1888 1889 /* 32-bit hash result */ 1890 uint32_t erc_hash; 1891 uint16_t erc_sub_qid; 1892 1893 /* 1894 * The device may choose to offset the start of the header 1895 * data (which implies this value only applies to the first 1896 * descriptor). When and why the device does this is not 1897 * documented in the common code. The most likely case would 1898 * be for IP header alignment. 1899 */ 1900 uint8_t erc_offset; 1901 uint8_t erc_reserved; 1902 } enahw_rx_cdesc_t; 1903 1904 #define ENAHW_RX_CDESC_L3_PROTO_MASK GENMASK(4, 0) 1905 #define ENAHW_RX_CDESC_SRC_VLAN_CNT_SHIFT 5 1906 #define ENAHW_RX_CDESC_SRC_VLAN_CNT_MASK GENMASK(6, 5) 1907 #define ENAHW_RX_CDESC_L4_PROTO_SHIFT 8 1908 #define ENAHW_RX_CDESC_L4_PROTO_MASK GENMASK(12, 8) 1909 #define ENAHW_RX_CDESC_L3_CSUM_ERR_SHIFT 13 1910 #define ENAHW_RX_CDESC_L3_CSUM_ERR_MASK BIT(13) 1911 #define ENAHW_RX_CDESC_L4_CSUM_ERR_SHIFT 14 1912 #define ENAHW_RX_CDESC_L4_CSUM_ERR_MASK BIT(14) 1913 #define ENAHW_RX_CDESC_IPV4_FRAG_SHIFT 15 1914 #define ENAHW_RX_CDESC_IPV4_FRAG_MASK BIT(15) 1915 #define ENAHW_RX_CDESC_L4_CSUM_CHECKED_SHIFT 16 1916 #define ENAHW_RX_CDESC_L4_CSUM_CHECKED_MASK BIT(16) 1917 #define ENAHW_RX_CDESC_PHASE_SHIFT 24 1918 #define ENAHW_RX_CDESC_PHASE_MASK BIT(24) 1919 #define ENAHW_RX_CDESC_L3_CSUM2_SHIFT 25 1920 #define ENAHW_RX_CDESC_L3_CSUM2_MASK BIT(25) 1921 #define ENAHW_RX_CDESC_FIRST_SHIFT 26 1922 #define ENAHW_RX_CDESC_FIRST_MASK BIT(26) 1923 #define ENAHW_RX_CDESC_LAST_SHIFT 27 1924 #define ENAHW_RX_CDESC_LAST_MASK BIT(27) 1925 #define ENAHW_RX_CDESC_BUFFER_SHIFT 30 1926 #define ENAHW_RX_CDESC_BUFFER_MASK BIT(30) 1927 1928 #define ENAHW_RX_CDESC_L3_PROTO(desc) \ 1929 ((desc)->erc_status & ENAHW_RX_CDESC_L3_PROTO_MASK) 1930 1931 #define ENAHW_RX_CDESC_L3_CSUM_ERR(desc) \ 1932 ((((desc)->erc_status & ENAHW_RX_CDESC_L3_CSUM_ERR_MASK) >> \ 1933 ENAHW_RX_CDESC_L3_CSUM_ERR_SHIFT) != 0) 1934 1935 #define ENAHW_RX_CDESC_L4_PROTO(desc) \ 1936 (((desc)->erc_status & ENAHW_RX_CDESC_L4_PROTO_MASK) >> \ 1937 ENAHW_RX_CDESC_L4_PROTO_SHIFT) 1938 1939 #define ENAHW_RX_CDESC_L4_CSUM_CHECKED(desc) \ 1940 ((((desc)->erc_status & ENAHW_RX_CDESC_L4_CSUM_CHECKED_MASK) >> \ 1941 ENAHW_RX_CDESC_L4_CSUM_CHECKED_SHIFT) != 0) 1942 1943 #define ENAHW_RX_CDESC_L4_CSUM_ERR(desc) \ 1944 ((((desc)->erc_status & ENAHW_RX_CDESC_L4_CSUM_ERR_MASK) >> \ 1945 ENAHW_RX_CDESC_L4_CSUM_ERR_SHIFT) != 0) 1946 1947 #define ENAHW_RX_CDESC_PHASE(desc) \ 1948 (((desc)->erc_status & ENAHW_RX_CDESC_PHASE_MASK) >> \ 1949 ENAHW_RX_CDESC_PHASE_SHIFT) 1950 1951 #define ENAHW_RX_CDESC_FIRST(desc) \ 1952 ((((desc)->erc_status & ENAHW_RX_CDESC_FIRST_MASK) >> \ 1953 ENAHW_RX_CDESC_FIRST_SHIFT) == 1) 1954 1955 #define ENAHW_RX_CDESC_LAST(desc) \ 1956 ((((desc)->erc_status & ENAHW_RX_CDESC_LAST_MASK) >> \ 1957 ENAHW_RX_CDESC_LAST_SHIFT) == 1) 1958 1959 /* 1960 * Controls for the interrupt register mapped to each Rx/Tx CQ. 1961 */ 1962 #define ENAHW_REG_INTR_RX_DELAY_MASK GENMASK(14, 0) 1963 #define ENAHW_REG_INTR_TX_DELAY_SHIFT 15 1964 #define ENAHW_REG_INTR_TX_DELAY_MASK GENMASK(29, 15) 1965 #define ENAHW_REG_INTR_UNMASK_SHIFT 30 1966 #define ENAHW_REG_INTR_UNMASK_MASK BIT(30) 1967 1968 #define ENAHW_REG_INTR_UNMASK(val) \ 1969 ((val) |= ENAHW_REG_INTR_UNMASK_MASK) 1970 1971 #define ENAHW_REG_INTR_MASK(val) \ 1972 ((val) &= ~ENAHW_REG_INTR_UNMASK_MASK) 1973 1974 #endif /* _ENA_HW_H */ 1975