1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2024 Oxide Computer Company 14 */ 15 16 /* 17 * This file declares all constants and structures dealing with the 18 * physical ENA device. It is based on the ena_com code of the public 19 * Linux and FreeBSD drivers. While this file is based on the common 20 * code it doesn't share the same type names. Where it is useful, a 21 * "common" reference is added to include the name of the type as 22 * defined in the common code. 23 * 24 * The Linux driver defines enq_admin_aq_entry as the top-level type 25 * for admin command descriptors. From this type you can access the 26 * common bits shared by every descriptor (ena_admin_aq_common_desc) 27 * as well as the control buffer (ena_admin_ctrl_buff_info) which is 28 * present for _some_ commands. Other than that, this top-level type 29 * treats the rest of the data as an opaque array of unsigned 32-bit 30 * integers. Then, for each individual command, the Linux driver 31 * defines a dedicated type, each of which contains the following: 32 * 33 * 1. The common descriptor: ena_admin_aq_common_desc. 34 * 35 * 2. The optional control buffer desc: ena_admin_ctrl_buff_info. 36 * 37 * 3. The command-specific data. 38 * 39 * 4. Optional padding to make sure all commands are 64 bytes in size. 40 * 41 * Furthermore, there may be further common types for commands which 42 * are made up of several sub-commands, e.g. the get/set feature 43 * commands. 44 * 45 * Finally, when a command is passed to the common function for 46 * executing commands (ena_com_execute_admin_command()), it is cast as 47 * a pointer to the top-level type: ena_admin_aq_entry. 48 * 49 * This works for the Linux driver just fine, but it causes lots of 50 * repetition in the structure definitions and also means there is no 51 * easy way to determine all valid commands. This ENA driver has 52 * turned the Linux approach inside out -- the top-level type is a 53 * union of all possible commands: enahw_cmd_desc_t. Each command may 54 * then further sub-type via unions to represent its sub-commands. 55 * This same treatment was given to the response descriptor: 56 * enahw_resp_desc_t. 57 * 58 * What is the point of knowing all this? Well, when referencing the 59 * common type in the comment above the enahw_ type, you need to keep 60 * in mind that the Linux/common type will include all the common 61 * descriptor bits, whereas these types do not. 62 * 63 * The common code DOES NOT pack any of these structures, and thus 64 * neither do we. That means these structures all rely on natural 65 * compiler alignment, just as the common code does. In ena.c you will 66 * find CTASSERTs for many of these structures, to verify they are of 67 * the expected size. 68 */ 69 70 #ifndef _ENA_HW_H 71 #define _ENA_HW_H 72 73 #include <sys/ddi.h> 74 #include <sys/sunddi.h> 75 #include <sys/types.h> 76 #include <sys/debug.h> 77 #include <sys/ethernet.h> 78 79 /* 80 * The common code sets the upper limit of I/O queues to 128. In this 81 * case a "queue" is a SQ+CQ pair that forms a logical queue or ring 82 * for sending or receiving packets. Thus, at maximum, we may expect 83 * 128 Tx rings, and 128 Rx rings; though, practically speaking, the 84 * number of rings will often be limited by number of CPUs or 85 * available interrupts. 86 * 87 * common: ENA_MAX_NUM_IO_QUEUES 88 */ 89 #define ENAHW_MAX_NUM_IO_QUEUES 128 90 91 /* 92 * Generate a 32-bit bitmask where the bits between high (inclusive) 93 * and low (inclusive) are set to 1. 94 */ 95 #define GENMASK(h, l) (((~0U) - (1U << (l)) + 1) & (~0U >> (32 - 1 - (h)))) 96 97 /* 98 * Generate a 64-bit bitmask where bit b is set to 1. 99 */ 100 #define BIT(b) (1UL << (b)) 101 102 #define ENAHW_DMA_ADMINQ_ALIGNMENT 8 103 104 #define ENAHW_ADMIN_CQ_DESC_BUF_ALIGNMENT 8 105 #define ENAHW_ADMIN_SQ_DESC_BUF_ALIGNMENT 8 106 #define ENAHW_AENQ_DESC_BUF_ALIGNMENT 8 107 #define ENAHW_HOST_INFO_ALIGNMENT 8 108 #define ENAHW_HOST_INFO_ALLOC_SZ 4096 109 #define ENAHW_IO_CQ_DESC_BUF_ALIGNMENT 4096 110 #define ENAHW_IO_SQ_DESC_BUF_ALIGNMENT 8 111 112 /* 113 * BAR0 register offsets. 114 * 115 * Any register not defined in the common code was marked as a gap, 116 * using the hex address of the register as suffix to make it clear 117 * where the gaps are. 118 */ 119 #define ENAHW_REG_VERSION 0x0 120 #define ENAHW_REG_CONTROLLER_VERSION 0x4 121 #define ENAHW_REG_CAPS 0x8 122 #define ENAHW_REG_CAPS_EXT 0xc 123 #define ENAHW_REG_ASQ_BASE_LO 0x10 124 #define ENAHW_REG_ASQ_BASE_HI 0x14 125 #define ENAHW_REG_ASQ_CAPS 0x18 126 #define ENAHW_REG_GAP_1C 0x1c 127 #define ENAHW_REG_ACQ_BASE_LO 0x20 128 #define ENAHW_REG_ACQ_BASE_HI 0x24 129 #define ENAHW_REG_ACQ_CAPS 0x28 130 #define ENAHW_REG_ASQ_DB 0x2c 131 #define ENAHW_REG_ACQ_TAIL 0x30 132 #define ENAHW_REG_AENQ_CAPS 0x34 133 #define ENAHW_REG_AENQ_BASE_LO 0x38 134 #define ENAHW_REG_AENQ_BASE_HI 0x3c 135 #define ENAHW_REG_AENQ_HEAD_DB 0x40 136 #define ENAHW_REG_AENQ_TAIL 0x44 137 #define ENAHW_REG_GAP_48 0x48 138 #define ENAHW_REG_INTERRUPT_MASK 0x4c 139 #define ENAHW_REG_GAP_50 0x50 140 #define ENAHW_REG_DEV_CTL 0x54 141 #define ENAHW_REG_DEV_STS 0x58 142 #define ENAHW_REG_MMIO_REG_READ 0x5c 143 #define ENAHW_REG_MMIO_RESP_LO 0x60 144 #define ENAHW_REG_MMIO_RESP_HI 0x64 145 #define ENAHW_REG_RSS_IND_ENTRY_UPDATE 0x68 146 #define ENAHW_NUM_REGS ((ENAHW_REG_RSS_IND_ENTRY_UPDATE / 4) + 1) 147 148 /* 149 * Device Version (Register 0x0) 150 */ 151 #define ENAHW_DEV_MINOR_VSN_MASK 0xff 152 #define ENAHW_DEV_MAJOR_VSN_SHIFT 8 153 #define ENAHW_DEV_MAJOR_VSN_MASK 0xff00 154 155 #define ENAHW_DEV_MAJOR_VSN(vsn) \ 156 (((vsn) & ENAHW_DEV_MAJOR_VSN_MASK) >> ENAHW_DEV_MAJOR_VSN_SHIFT) 157 #define ENAHW_DEV_MINOR_VSN(vsn) \ 158 ((vsn) & ENAHW_DEV_MINOR_VSN_MASK) 159 160 /* 161 * Controller Version (Register 0x4) 162 */ 163 #define ENAHW_CTRL_SUBMINOR_VSN_MASK 0xff 164 #define ENAHW_CTRL_MINOR_VSN_SHIFT 8 165 #define ENAHW_CTRL_MINOR_VSN_MASK 0xff00 166 #define ENAHW_CTRL_MAJOR_VSN_SHIFT 16 167 #define ENAHW_CTRL_MAJOR_VSN_MASK 0xff0000 168 #define ENAHW_CTRL_IMPL_ID_SHIFT 24 169 #define ENAHW_CTRL_IMPL_ID_MASK 0xff000000 170 171 #define ENAHW_CTRL_MAJOR_VSN(vsn) \ 172 (((vsn) & ENAHW_CTRL_MAJOR_VSN_MASK) >> ENAHW_CTRL_MAJOR_VSN_SHIFT) 173 #define ENAHW_CTRL_MINOR_VSN(vsn) \ 174 (((vsn) & ENAHW_CTRL_MINOR_VSN_MASK) >> ENAHW_CTRL_MINOR_VSN_SHIFT) 175 #define ENAHW_CTRL_SUBMINOR_VSN(vsn) \ 176 ((vsn) & ENAHW_CTRL_SUBMINOR_VSN_MASK) 177 #define ENAHW_CTRL_IMPL_ID(vsn) \ 178 (((vsn) & ENAHW_CTRL_IMPL_ID_MASK) >> ENAHW_CTRL_IMPL_ID_SHIFT) 179 180 /* 181 * Device Caps (Register 0x8) 182 */ 183 #define ENAHW_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 184 #define ENAHW_CAPS_RESET_TIMEOUT_SHIFT 1 185 #define ENAHW_CAPS_RESET_TIMEOUT_MASK 0x3e 186 #define ENAHW_CAPS_RESET_TIMEOUT(v) \ 187 (((v) & ENAHW_CAPS_RESET_TIMEOUT_MASK) >> \ 188 ENAHW_CAPS_RESET_TIMEOUT_SHIFT) 189 #define ENAHW_CAPS_DMA_ADDR_WIDTH_SHIFT 8 190 #define ENAHW_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 191 #define ENAHW_CAPS_DMA_ADDR_WIDTH(v) \ 192 (((v) & ENAHW_CAPS_DMA_ADDR_WIDTH_MASK) >> \ 193 ENAHW_CAPS_DMA_ADDR_WIDTH_SHIFT) 194 #define ENAHW_CAPS_ADMIN_CMD_TIMEOUT_SHIFT 16 195 #define ENAHW_CAPS_ADMIN_CMD_TIMEOUT_MASK 0xf0000 196 #define ENAHW_CAPS_ADMIN_CMD_TIMEOUT(v) \ 197 (((v) & ENAHW_CAPS_ADMIN_CMD_TIMEOUT_MASK) >> \ 198 ENAHW_CAPS_ADMIN_CMD_TIMEOUT_SHIFT) 199 200 enum enahw_reset_reason_types { 201 ENAHW_RESET_NORMAL = 0, 202 ENAHW_RESET_KEEP_ALIVE_TO = 1, 203 ENAHW_RESET_ADMIN_TO = 2, 204 ENAHW_RESET_MISS_TX_CMPL = 3, 205 ENAHW_RESET_INV_RX_REQ_ID = 4, 206 ENAHW_RESET_INV_TX_REQ_ID = 5, 207 ENAHW_RESET_TOO_MANY_RX_DESCS = 6, 208 ENAHW_RESET_INIT_ERR = 7, 209 ENAHW_RESET_DRIVER_INVALID_STATE = 8, 210 ENAHW_RESET_OS_TRIGGER = 9, 211 ENAHW_RESET_OS_NETDEV_WD = 10, 212 ENAHW_RESET_SHUTDOWN = 11, 213 ENAHW_RESET_USER_TRIGGER = 12, 214 ENAHW_RESET_GENERIC = 13, 215 ENAHW_RESET_MISS_INTERRUPT = 14, 216 ENAHW_RESET_LAST, 217 }; 218 219 /* 220 * Admin Submission Queue Caps (Register 0x18) 221 */ 222 #define ENAHW_ASQ_CAPS_DEPTH_MASK 0xffff 223 #define ENAHW_ASQ_CAPS_ENTRY_SIZE_SHIFT 16 224 #define ENAHW_ASQ_CAPS_ENTRY_SIZE_MASK 0xffff0000 225 226 #define ENAHW_ASQ_CAPS_DEPTH(x) ((x) & ENAHW_ASQ_CAPS_DEPTH_MASK) 227 228 #define ENAHW_ASQ_CAPS_ENTRY_SIZE(x) \ 229 (((x) << ENAHW_ASQ_CAPS_ENTRY_SIZE_SHIFT) & \ 230 ENAHW_ASQ_CAPS_ENTRY_SIZE_MASK) 231 232 /* 233 * Admin Completion Queue Caps (Register 0x28) 234 */ 235 #define ENAHW_ACQ_CAPS_DEPTH_MASK 0xffff 236 #define ENAHW_ACQ_CAPS_ENTRY_SIZE_SHIFT 16 237 #define ENAHW_ACQ_CAPS_ENTRY_SIZE_MASK 0xffff0000 238 239 #define ENAHW_ACQ_CAPS_DEPTH(x) ((x) & ENAHW_ACQ_CAPS_DEPTH_MASK) 240 241 #define ENAHW_ACQ_CAPS_ENTRY_SIZE(x) \ 242 (((x) << ENAHW_ACQ_CAPS_ENTRY_SIZE_SHIFT) & \ 243 ENAHW_ACQ_CAPS_ENTRY_SIZE_MASK) 244 245 /* 246 * Asynchronous Event Notification Queue Caps (Register 0x34) 247 */ 248 #define ENAHW_AENQ_CAPS_DEPTH_MASK 0xffff 249 #define ENAHW_AENQ_CAPS_ENTRY_SIZE_SHIFT 16 250 #define ENAHW_AENQ_CAPS_ENTRY_SIZE_MASK 0xffff0000 251 252 #define ENAHW_AENQ_CAPS_DEPTH(x) ((x) & ENAHW_AENQ_CAPS_DEPTH_MASK) 253 254 #define ENAHW_AENQ_CAPS_ENTRY_SIZE(x) \ 255 (((x) << ENAHW_AENQ_CAPS_ENTRY_SIZE_SHIFT) & \ 256 ENAHW_AENQ_CAPS_ENTRY_SIZE_MASK) 257 258 /* 259 * Interrupt Mask (Register 0x4c) 260 */ 261 #define ENAHW_INTR_UNMASK 0x0 262 #define ENAHW_INTR_MASK 0x1 263 264 /* 265 * Device Control (Register 0x54) 266 */ 267 #define ENAHW_DEV_CTL_DEV_RESET_MASK 0x1 268 #define ENAHW_DEV_CTL_AQ_RESTART_SHIFT 1 269 #define ENAHW_DEV_CTL_AQ_RESTART_MASK 0x2 270 #define ENAHW_DEV_CTL_QUIESCENT_SHIFT 2 271 #define ENAHW_DEV_CTL_QUIESCENT_MASK 0x4 272 #define ENAHW_DEV_CTL_IO_RESUME_SHIFT 3 273 #define ENAHW_DEV_CTL_IO_RESUME_MASK 0x8 274 #define ENAHW_DEV_CTL_RESET_REASON_SHIFT 28 275 #define ENAHW_DEV_CTL_RESET_REASON_MASK 0xf0000000 276 277 /* 278 * Device Status (Register 0x58) 279 */ 280 #define ENAHW_DEV_STS_READY_MASK 0x1 281 #define ENAHW_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 282 #define ENAHW_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 283 #define ENAHW_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 284 #define ENAHW_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 285 #define ENAHW_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 286 #define ENAHW_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 287 #define ENAHW_DEV_STS_RESET_FINISHED_SHIFT 4 288 #define ENAHW_DEV_STS_RESET_FINISHED_MASK 0x10 289 #define ENAHW_DEV_STS_FATAL_ERROR_SHIFT 5 290 #define ENAHW_DEV_STS_FATAL_ERROR_MASK 0x20 291 #define ENAHW_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 292 #define ENAHW_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 293 #define ENAHW_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 294 #define ENAHW_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 295 296 /* common: ena_admin_aenq_common_desc */ 297 typedef struct enahw_aenq_desc { 298 uint16_t ead_group; 299 uint16_t ead_syndrome; 300 uint8_t ead_flags; 301 uint8_t ead_rsvd1[3]; 302 uint32_t ead_ts_low; 303 uint32_t ead_ts_high; 304 305 union { 306 uint32_t raw[12]; 307 308 struct { 309 uint32_t flags; 310 } link_change; 311 312 struct { 313 uint32_t rx_drops_low; 314 uint32_t rx_drops_high; 315 uint32_t tx_drops_low; 316 uint32_t tx_drops_high; 317 } keep_alive; 318 } ead_payload; 319 } enahw_aenq_desc_t; 320 321 #define ENAHW_AENQ_DESC_PHASE_MASK BIT(0) 322 323 #define ENAHW_AENQ_DESC_PHASE(desc) \ 324 ((desc)->ead_flags & ENAHW_AENQ_DESC_PHASE_MASK) 325 326 #define ENAHW_AENQ_LINK_CHANGE_LINK_STATUS_MASK BIT(0) 327 328 /* 329 * Asynchronous Event Notification Queue groups. 330 * 331 * Note: These values represent the bit position of each feature as 332 * returned by ENAHW_FEAT_AENQ_CONFIG. We encode them this way so that 333 * they can double as an index into the AENQ handlers array. 334 * 335 * common: ena_admin_aenq_group 336 */ 337 typedef enum enahw_aenq_groups { 338 ENAHW_AENQ_GROUP_LINK_CHANGE = 0, 339 ENAHW_AENQ_GROUP_FATAL_ERROR = 1, 340 ENAHW_AENQ_GROUP_WARNING = 2, 341 ENAHW_AENQ_GROUP_NOTIFICATION = 3, 342 ENAHW_AENQ_GROUP_KEEP_ALIVE = 4, 343 ENAHW_AENQ_GROUP_REFRESH_CAPABILITIES = 5, 344 ENAHW_AENQ_GROUPS_ARR_NUM = 6, 345 } enahw_aenq_groups_t; 346 347 /* 348 * The reason for ENAHW_AENQ_GROUP_NOFIFICATION. 349 * 350 * common: ena_admin_aenq_notification_syndrome 351 */ 352 typedef enum enahw_aenq_syndrome { 353 ENAHW_AENQ_SYNDROME_UPDATE_HINTS = 2, 354 } enahw_aenq_syndrome_t; 355 356 /* 357 * ENA devices use a 48-bit memory space. 358 * 359 * common: ena_common_mem_addr 360 */ 361 typedef struct enahw_addr { 362 uint32_t ea_low; 363 uint16_t ea_high; 364 uint16_t ea_rsvd; /* must be zero */ 365 } enahw_addr_t; 366 367 /* common: ena_admin_ctrl_buff_info */ 368 struct enahw_ctrl_buff { 369 uint32_t ecb_length; 370 enahw_addr_t ecb_addr; 371 }; 372 373 /* common: ena_admin_get_set_feature_common_desc */ 374 struct enahw_feat_common { 375 /* 376 * 1:0 Select which value you want. 377 * 378 * 0x1 = Current value. 379 * 0x3 = Default value. 380 * 381 * Note: Linux seems to set this to 0 to get the value, 382 * not sure if that's a bug or just another way to get the 383 * current value. 384 * 385 * 7:3 Reserved. 386 */ 387 uint8_t efc_flags; 388 389 /* An id from enahw_feature_id_t. */ 390 uint8_t efc_id; 391 392 /* 393 * Each feature is versioned, allowing upgrades to the feature 394 * set without breaking backwards compatibility. The driver 395 * uses this field to specify which version it supports 396 * (starting from zero). Linux doesn't document this very well 397 * and sets this value to 0 for most features. We define a set 398 * of macros, underneath the enahw_feature_id_t type, clearly 399 * documenting the version we support for each feature. 400 */ 401 uint8_t efc_version; 402 uint8_t efc_rsvd; 403 }; 404 405 /* common: ena_admin_get_feat_cmd */ 406 typedef struct enahw_cmd_get_feat { 407 struct enahw_ctrl_buff ecgf_ctrl_buf; 408 struct enahw_feat_common ecgf_comm; 409 uint32_t egcf_unused[11]; 410 } enahw_cmd_get_feat_t; 411 412 /* 413 * N.B. Linux sets efc_flags to 0 (via memset) when reading the 414 * current value, but the comments say it should be 0x1. We follow the 415 * comments. 416 */ 417 #define ENAHW_GET_FEAT_FLAGS_GET_CURR_VAL(desc) \ 418 ((desc)->ecgf_comm.efc_flags) |= 0x1 419 #define ENAHW_GET_FEAT_FLAGS_GET_DEF_VAL(desc) \ 420 ((desc)->ecgf_comm.efc_flags) |= 0x3 421 422 /* 423 * Set the MTU of the device. This value does not include the L2 424 * headers or trailers, only the payload. 425 * 426 * common: ena_admin_set_feature_mtu_desc 427 */ 428 typedef struct enahw_feat_mtu { 429 uint32_t efm_mtu; 430 } enahw_feat_mtu_t; 431 432 /* common: ena_admin_set_feature_host_attr_desc */ 433 typedef struct enahw_feat_host_attr { 434 enahw_addr_t efha_os_addr; 435 enahw_addr_t efha_debug_addr; 436 uint32_t efha_debug_sz; 437 } enahw_feat_host_attr_t; 438 439 /* 440 * ENAHW_FEAT_AENQ_CONFIG 441 * 442 * common: ena_admin_feature_aenq_desc 443 */ 444 typedef struct enahw_feat_aenq { 445 /* Bitmask of AENQ groups this device supports. */ 446 uint32_t efa_supported_groups; 447 448 /* Bitmask of AENQ groups currently enabled. */ 449 uint32_t efa_enabled_groups; 450 } enahw_feat_aenq_t; 451 452 /* common: ena_admin_set_feat_cmd */ 453 typedef struct enahw_cmd_set_feat { 454 struct enahw_ctrl_buff ecsf_ctrl_buf; 455 struct enahw_feat_common ecsf_comm; 456 457 union { 458 uint32_t ecsf_raw[11]; 459 enahw_feat_host_attr_t ecsf_host_attr; 460 enahw_feat_mtu_t ecsf_mtu; 461 enahw_feat_aenq_t ecsf_aenq; 462 } ecsf_feat; 463 } enahw_cmd_set_feat_t; 464 465 /* 466 * Used to populate the host information buffer which the Nitro 467 * hypervisor supposedly uses for display, debugging, and possibly 468 * other purposes. 469 * 470 * common: ena_admin_host_info 471 */ 472 typedef struct enahw_host_info { 473 uint32_t ehi_os_type; 474 uint8_t ehi_os_dist_str[128]; 475 uint32_t ehi_os_dist; 476 uint8_t ehi_kernel_ver_str[32]; 477 uint32_t ehi_kernel_ver; 478 uint32_t ehi_driver_ver; 479 uint32_t ehi_supported_net_features[2]; 480 uint16_t ehi_ena_spec_version; 481 uint16_t ehi_bdf; 482 uint16_t ehi_num_cpus; 483 uint16_t ehi_rsvd; 484 uint32_t ehi_driver_supported_features; 485 } enahw_host_info_t; 486 487 #define ENAHW_HOST_INFO_MAJOR_MASK GENMASK(7, 0) 488 #define ENAHW_HOST_INFO_MINOR_SHIFT 8 489 #define ENAHW_HOST_INFO_MINOR_MASK GENMASK(15, 8) 490 #define ENAHW_HOST_INFO_SUB_MINOR_SHIFT 16 491 #define ENAHW_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) 492 #define ENAHW_HOST_INFO_SPEC_MAJOR_SHIFT 8 493 #define ENAHW_HOST_INFO_MODULE_TYPE_SHIFT 24 494 #define ENAHW_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24) 495 #define ENAHW_HOST_INFO_FUNCTION_MASK GENMASK(2, 0) 496 #define ENAHW_HOST_INFO_DEVICE_SHIFT 3 497 #define ENAHW_HOST_INFO_DEVICE_MASK GENMASK(7, 3) 498 #define ENAHW_HOST_INFO_BUS_SHIFT 8 499 #define ENAHW_HOST_INFO_BUS_MASK GENMASK(15, 8) 500 #define ENAHW_HOST_INFO_RX_OFFSET_SHIFT 1 501 #define ENAHW_HOST_INFO_RX_OFFSET_MASK BIT(1) 502 #define ENAHW_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 503 #define ENAHW_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) 504 #define ENAHW_HOST_INFO_RX_BUF_MIRRORING_SHIFT 3 505 #define ENAHW_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3) 506 #define ENAHW_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4 507 #define ENAHW_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4) 508 509 /* common: ena_admin_os_type */ 510 enum enahw_os_type { 511 ENAHW_OS_LINUX = 1, 512 ENAHW_OS_WIN = 2, 513 ENAHW_OS_DPDK = 3, 514 ENAHW_OS_FREEBSD = 4, 515 ENAHW_OS_IPXE = 5, 516 ENAHW_OS_ESXI = 6, 517 ENAHW_OS_MACOS = 7, 518 ENAHW_OS_GROUPS_NUM = 7, 519 }; 520 521 /* 522 * Create I/O Completion Queue 523 * 524 * A completion queue is where the device writes responses to I/O 525 * requests. The admin completion queue must be created before such a 526 * command can be issued, see ena_admin_cq_init(). 527 * 528 * common: ena_admin_aq_create_cq_cmd 529 */ 530 typedef struct enahw_cmd_create_cq { 531 /* 532 * 7-6 reserved 533 * 534 * 5 interrupt mode: when set the device sends an interrupt 535 * for each completion, otherwise the driver must poll 536 * the queue. 537 * 538 * 4-0 reserved 539 */ 540 uint8_t ecq_caps_1; 541 542 /* 543 * 7-5 reserved 544 * 545 * 4-0 CQ entry size (in words): the size of a single CQ entry 546 * in multiples of 32-bit words. 547 * 548 * NOTE: According to the common code the "valid" values 549 * are 4 or 8 -- this is incorrect. The valid values are 550 * 2 and 4. The common code does have an "extended" Rx 551 * completion descriptor, ena_eth_io_rx_cdesc_ext, that 552 * is 32 bytes and thus would use a value of 8, but it is 553 * not used by the Linux or FreeBSD drivers, so we do not 554 * bother with it. 555 * 556 * Type Bytes Value 557 * enahw_tx_cdesc_t 8 2 558 * enahw_rx_cdesc_t 16 4 559 */ 560 uint8_t ecq_caps_2; 561 562 /* The number of CQ entries, must be a power of 2. */ 563 uint16_t ecq_num_descs; 564 565 /* The MSI-X vector assigned to this CQ. */ 566 uint32_t ecq_msix_vector; 567 568 /* 569 * The CQ's physical base address. The CQ memory must be 570 * physically contiguous. 571 */ 572 enahw_addr_t ecq_addr; 573 } enahw_cmd_create_cq_t; 574 575 #define ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLED_SHIFT 5 576 #define ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLED_MASK (BIT(5)) 577 #define ENAHW_CMD_CREATE_CQ_DESC_SIZE_WORDS_MASK (GENMASK(4, 0)) 578 579 #define ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLE(cmd) \ 580 ((cmd)->ecq_caps_1 |= ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLED_MASK) 581 582 #define ENAHW_CMD_CREATE_CQ_DESC_SIZE_WORDS(cmd, val) \ 583 (((cmd)->ecq_caps_2) |= \ 584 ((val) & ENAHW_CMD_CREATE_CQ_DESC_SIZE_WORDS_MASK)) 585 586 /* 587 * Destroy Completion Queue 588 * 589 * common: ena_admin_aq_destroy_cq_cmd 590 */ 591 typedef struct enahw_cmd_destroy_cq { 592 uint16_t edcq_idx; 593 uint16_t edcq_rsvd; 594 } enahw_cmd_destroy_cq_t; 595 596 /* 597 * common: ena_admin_aq_create_sq_cmd 598 */ 599 typedef struct enahw_cmd_create_sq { 600 /* 601 * 7-5 direction: 0x1 = Tx, 0x2 = Rx 602 * 4-0 reserved 603 */ 604 uint8_t ecsq_dir; 605 uint8_t ecsq_rsvd1; 606 607 /* 608 * 7 reserved 609 * 610 * 6-4 completion policy: How are completion events generated. 611 * 612 * See enahw_completion_policy_type_t for a description of 613 * the various values. 614 * 615 * 3-0 placement policy: Where the descriptor ring and 616 * headers reside. 617 * 618 * See enahw_placement_policy_t for a description of the 619 * various values. 620 */ 621 uint8_t ecsq_caps_2; 622 623 /* 624 * 7-1 reserved 625 * 626 * 0 physically contiguous: When set indicates the descriptor 627 * ring memory is physically contiguous. 628 */ 629 uint8_t ecsq_caps_3; 630 631 /* 632 * The index of the associated Completion Queue (CQ). The CQ 633 * must be created before the SQ. 634 */ 635 uint16_t ecsq_cq_idx; 636 637 /* The number of descriptors in this SQ. */ 638 uint16_t ecsq_num_descs; 639 640 /* 641 * The base physical address of the SQ. This should not be set 642 * for LLQ. Must be page aligned. 643 */ 644 enahw_addr_t ecsq_base; 645 646 /* 647 * The physical address of the head write-back pointer. Valid 648 * only when the completion policy is set to one of the head 649 * write-back modes (0x2 or 0x3). Must be cacheline size 650 * aligned. 651 */ 652 enahw_addr_t ecsq_head_wb; 653 uint32_t ecsq_rsvdw2; 654 uint32_t ecsq_rsvdw3; 655 } enahw_cmd_create_sq_t; 656 657 typedef enum enahw_sq_direction { 658 ENAHW_SQ_DIRECTION_TX = 1, 659 ENAHW_SQ_DIRECTION_RX = 2, 660 } enahw_sq_direction_t; 661 662 typedef enum enahw_placement_policy { 663 /* Descriptors and headers are in host memory. */ 664 ENAHW_PLACEMENT_POLICY_HOST = 1, 665 666 /* 667 * Descriptors and headers are in device memory (a.k.a Low 668 * Latency Queue). 669 */ 670 ENAHW_PLACEMENT_POLICY_DEV = 3, 671 } enahw_placement_policy_t; 672 673 /* 674 * DESC: Write a CQ entry for each SQ descriptor. 675 * 676 * DESC_ON_DEMAND: Write a CQ entry when requested by the SQ descriptor. 677 * 678 * HEAD_ON_DEMAND: Update head pointer when requested by the SQ 679 * descriptor. 680 * 681 * HEAD: Update head pointer for each SQ descriptor. 682 * 683 */ 684 typedef enum enahw_completion_policy_type { 685 ENAHW_COMPLETION_POLICY_DESC = 0, 686 ENAHW_COMPLETION_POLICY_DESC_ON_DEMAND = 1, 687 ENAHW_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, 688 ENAHW_COMPLETION_POLICY_HEAD = 3, 689 } enahw_completion_policy_type_t; 690 691 #define ENAHW_CMD_CREATE_SQ_DIR_SHIFT 5 692 #define ENAHW_CMD_CREATE_SQ_DIR_MASK GENMASK(7, 5) 693 #define ENAHW_CMD_CREATE_SQ_PLACEMENT_POLICY_MASK GENMASK(3, 0) 694 #define ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY_SHIFT 4 695 #define ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY_MASK GENMASK(6, 4) 696 #define ENAHW_CMD_CREATE_SQ_PHYSMEM_CONTIG_MASK BIT(0) 697 698 #define ENAHW_CMD_CREATE_SQ_DIR(cmd, val) \ 699 (((cmd)->ecsq_dir) |= (((val) << ENAHW_CMD_CREATE_SQ_DIR_SHIFT) & \ 700 ENAHW_CMD_CREATE_SQ_DIR_MASK)) 701 702 #define ENAHW_CMD_CREATE_SQ_PLACEMENT_POLICY(cmd, val) \ 703 (((cmd)->ecsq_caps_2) |= \ 704 ((val) & ENAHW_CMD_CREATE_SQ_PLACEMENT_POLICY_MASK)) 705 706 #define ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY(cmd, val) \ 707 (((cmd)->ecsq_caps_2) |= \ 708 (((val) << ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY_SHIFT) & \ 709 ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY_MASK)) 710 711 #define ENAHW_CMD_CREATE_SQ_PHYSMEM_CONTIG(cmd) \ 712 ((cmd)->ecsq_caps_3 |= ENAHW_CMD_CREATE_SQ_PHYSMEM_CONTIG_MASK) 713 714 /* common: ena_admin_sq */ 715 typedef struct enahw_cmd_destroy_sq { 716 uint16_t edsq_idx; 717 uint8_t edsq_dir; /* Tx/Rx */ 718 uint8_t edsq_rsvd; 719 } enahw_cmd_destroy_sq_t; 720 721 #define ENAHW_CMD_DESTROY_SQ_DIR_SHIFT 5 722 #define ENAHW_CMD_DESTROY_SQ_DIR_MASK GENMASK(7, 5) 723 724 #define ENAHW_CMD_DESTROY_SQ_DIR(cmd, val) \ 725 (((cmd)->edsq_dir) |= (((val) << ENAHW_CMD_DESTROY_SQ_DIR_SHIFT) & \ 726 ENAHW_CMD_DESTROY_SQ_DIR_MASK)) 727 728 /* common: ena_admin_aq_get_stats_cmd */ 729 typedef struct enahw_cmd_get_stats { 730 struct enahw_ctrl_buff ecgs_ctrl_buf; 731 uint8_t ecgs_type; 732 uint8_t ecgs_scope; 733 uint16_t ecgs_rsvd; 734 uint16_t ecgs_queue_idx; 735 736 /* 737 * The device ID for which to query stats from. The sentinel 738 * value 0xFFFF indicates a query of the current device. 739 * According to the common docs, a "privileged device" may 740 * query stats for other ENA devices. However the definition 741 * of this "privilege device" is not expanded upon. 742 */ 743 uint16_t ecgs_device_id; 744 } enahw_cmd_get_stats_t; 745 746 /* Query the stats for my device. */ 747 #define ENAHW_CMD_GET_STATS_MY_DEVICE_ID 0xFFFF 748 749 /* 750 * BASIC: Returns enahw_resp_basic_stats. 751 * 752 * EXTENDED: According to the Linux documentation returns a buffer in 753 * "string format" with additional statistics per queue and per device ID. 754 * 755 * ENI: According to the Linux documentation it returns "extra HW 756 * stats for a specific network interfaces". 757 * 758 * common: ena_admin_get_stats_type 759 */ 760 typedef enum enahw_get_stats_type { 761 ENAHW_GET_STATS_TYPE_BASIC = 0, 762 ENAHW_GET_STATS_TYPE_EXTENDED = 1, 763 ENAHW_GET_STATS_TYPE_ENI = 2, 764 } enahw_get_stats_type_t; 765 766 /* common: ena_admin_get_stats_scope */ 767 typedef enum enahw_get_stats_scope { 768 ENAHW_GET_STATS_SCOPE_QUEUE = 0, 769 ENAHW_GET_STATS_SCOPE_ETH = 1, 770 } enahw_get_stats_scope_t; 771 772 /* common: ena_admin_aq_entry */ 773 typedef struct enahw_cmd_desc { 774 uint16_t ecd_cmd_id; 775 uint8_t ecd_opcode; 776 uint8_t ecd_flags; 777 778 union { 779 uint32_t ecd_raw[15]; 780 enahw_cmd_get_feat_t ecd_get_feat; 781 enahw_cmd_set_feat_t ecd_set_feat; 782 enahw_cmd_create_cq_t ecd_create_cq; 783 enahw_cmd_destroy_cq_t ecd_destroy_cq; 784 enahw_cmd_create_sq_t ecd_create_sq; 785 enahw_cmd_destroy_sq_t ecd_destroy_sq; 786 enahw_cmd_get_stats_t ecd_get_stats; 787 } ecd_cmd; 788 789 } enahw_cmd_desc_t; 790 791 /* 792 * top level commands that may be sent to the Admin Queue. 793 * 794 * common: ena_admin_aq_opcode 795 */ 796 typedef enum ena_cmd_opcode { 797 ENAHW_CMD_NONE = 0, 798 ENAHW_CMD_CREATE_SQ = 1, 799 ENAHW_CMD_DESTROY_SQ = 2, 800 ENAHW_CMD_CREATE_CQ = 3, 801 ENAHW_CMD_DESTROY_CQ = 4, 802 ENAHW_CMD_GET_FEATURE = 8, 803 ENAHW_CMD_SET_FEATURE = 9, 804 ENAHW_CMD_GET_STATS = 11, 805 } enahw_cmd_opcode_t; 806 807 /* common: ENA_ADMIN_AQ_COMMON_DESC */ 808 #define ENAHW_CMD_ID_MASK GENMASK(11, 0) 809 #define ENAHW_CMD_PHASE_MASK BIT(0) 810 811 #define ENAHW_CMD_ID(desc, id) \ 812 (((desc)->ecd_cmd_id) |= ((id) & ENAHW_CMD_ID_MASK)) 813 814 /* 815 * Subcommands for ENA_ADMIN_{GET,SET}_FEATURE. 816 * 817 * common: ena_admin_aq_feature_id 818 */ 819 typedef enum enahw_feature_id { 820 ENAHW_FEAT_DEVICE_ATTRIBUTES = 1, 821 ENAHW_FEAT_MAX_QUEUES_NUM = 2, 822 ENAHW_FEAT_HW_HINTS = 3, 823 ENAHW_FEAT_LLQ = 4, 824 ENAHW_FEAT_EXTRA_PROPERTIES_STRINGS = 5, 825 ENAHW_FEAT_EXTRA_PROPERTIES_FLAGS = 6, 826 ENAHW_FEAT_MAX_QUEUES_EXT = 7, 827 ENAHW_FEAT_RSS_HASH_FUNCTION = 10, 828 ENAHW_FEAT_STATELESS_OFFLOAD_CONFIG = 11, 829 ENAHW_FEAT_RSS_INDIRECTION_TABLE_CONFIG = 12, 830 ENAHW_FEAT_MTU = 14, 831 ENAHW_FEAT_RSS_HASH_INPUT = 18, 832 ENAHW_FEAT_INTERRUPT_MODERATION = 20, 833 ENAHW_FEAT_AENQ_CONFIG = 26, 834 ENAHW_FEAT_LINK_CONFIG = 27, 835 ENAHW_FEAT_HOST_ATTR_CONFIG = 28, 836 ENAHW_FEAT_NUM = 32, 837 } enahw_feature_id_t; 838 839 /* 840 * Device capabilities. 841 * 842 * common: ena_admin_aq_caps_id 843 */ 844 typedef enum enahw_capability_id { 845 ENAHW_CAP_ENI_STATS = 0, 846 ENAHW_CAP_ENA_SRD_INFO = 1, 847 ENAHW_CAP_CUSTOMER_METRICS = 2, 848 ENAHW_CAP_EXTENDED_RESET = 3, 849 ENAHW_CAP_CDESC_MBZ = 4, 850 } enahw_capability_id_t; 851 852 /* 853 * The following macros define the maximum version we support for each 854 * feature. These are the feature versions we use to communicate with 855 * the feature command. Linux has these values spread throughout the 856 * code at the various callsites of ena_com_get_feature(). We choose 857 * to centralize our feature versions to make it easier to audit. 858 */ 859 #define ENAHW_FEAT_DEVICE_ATTRIBUTES_VER 0 860 #define ENAHW_FEAT_MAX_QUEUES_NUM_VER 0 861 #define ENAHW_FEAT_HW_HINTS_VER 0 862 #define ENAHW_FEAT_LLQ_VER 0 863 #define ENAHW_FEAT_EXTRA_PROPERTIES_STRINGS_VER 0 864 #define ENAHW_FEAT_EXTRA_PROPERTIES_FLAGS_VER 0 865 #define ENAHW_FEAT_MAX_QUEUES_EXT_VER 1 866 #define ENAHW_FEAT_RSS_HASH_FUNCTION_VER 0 867 #define ENAHW_FEAT_STATELESS_OFFLOAD_CONFIG_VER 0 868 #define ENAHW_FEAT_RSS_INDIRECTION_TABLE_CONFIG_VER 0 869 #define ENAHW_FEAT_MTU_VER 0 870 #define ENAHW_FEAT_RSS_HASH_INPUT_VER 0 871 #define ENAHW_FEAT_INTERRUPT_MODERATION_VER 0 872 #define ENAHW_FEAT_AENQ_CONFIG_VER 0 873 #define ENAHW_FEAT_LINK_CONFIG_VER 0 874 #define ENAHW_FEAT_HOST_ATTR_CONFIG_VER 0 875 876 /* common: ena_admin_link_types */ 877 typedef enum enahw_link_speeds { 878 ENAHW_LINK_SPEED_1G = 0x1, 879 ENAHW_LINK_SPEED_2_HALF_G = 0x2, 880 ENAHW_LINK_SPEED_5G = 0x4, 881 ENAHW_LINK_SPEED_10G = 0x8, 882 ENAHW_LINK_SPEED_25G = 0x10, 883 ENAHW_LINK_SPEED_40G = 0x20, 884 ENAHW_LINK_SPEED_50G = 0x40, 885 ENAHW_LINK_SPEED_100G = 0x80, 886 ENAHW_LINK_SPEED_200G = 0x100, 887 ENAHW_LINK_SPEED_400G = 0x200, 888 } enahw_link_speeds_t; 889 890 /* 891 * Response to ENAHW_FEAT_HW_HINTS. 892 * 893 * Hints from the device to the driver about what values to use for 894 * various communications between the two. A value of 0 indicates 895 * there is no hint and the driver should provide its own default. All 896 * timeout values are in milliseconds. 897 * 898 * common: ena_admin_ena_hw_hints 899 */ 900 typedef struct enahw_device_hints { 901 /* 902 * The amount of time the driver should wait for an MMIO read 903 * reply before giving up and returning an error. 904 */ 905 uint16_t edh_mmio_read_timeout; 906 907 /* 908 * If the driver has not seen an AENQ keep alive in this 909 * timeframe, then consider the device hung and perform a 910 * reset. 911 */ 912 uint16_t edh_keep_alive_timeout; 913 914 /* 915 * The timeperiod in which we expect a Tx to report 916 * completion, otherwise it is considered "missed". Initiate a 917 * device reset when the number of missed completions is 918 * greater than the threshold. 919 */ 920 uint16_t edh_tx_comp_timeout; 921 uint16_t edh_missed_tx_reset_threshold; 922 923 /* 924 * The timeperiod in which we expect an admin command to 925 * report completion. 926 */ 927 uint16_t edh_admin_comp_timeout; 928 929 /* 930 * Used by Linux to set the netdevice 'watchdog_timeo' value. 931 * This value is used by the networking stack to determine 932 * when a pending transmission has stalled. This is similar to 933 * the keep alive timeout, except its viewing progress from 934 * the perspective of the network stack itself. This difference 935 * is subtle but important: the device could be in a state 936 * where it has a functioning keep alive heartbeat, but has a 937 * stuck Tx queue impeding forward progress of the networking 938 * stack (which in many cases results in a scenario 939 * indistinguishable form a complete host hang). 940 * 941 * The mac layer does not currently provide such 942 * functionality, though it could and should be extended to 943 * support such a feature. 944 */ 945 uint16_t edh_net_wd_timeout; 946 947 /* 948 * The maximum number of cookies/segments allowed in a DMA 949 * scatter-gather list. 950 */ 951 uint16_t edh_max_tx_sgl; 952 uint16_t edh_max_rx_sgl; 953 954 uint16_t reserved[8]; 955 } enahw_device_hints_t; 956 957 /* 958 * Response to ENAHW_FEAT_DEVICE_ATTRIBUTES. 959 * 960 * common: ena_admin_device_attr_feature_desc 961 */ 962 typedef struct enahw_feat_dev_attr { 963 uint32_t efda_impl_id; 964 uint32_t efda_device_version; 965 966 /* 967 * Bitmap representing supported get/set feature subcommands 968 * (enahw_feature_id). 969 */ 970 uint32_t efda_supported_features; 971 972 /* 973 * Bitmap representing device capabilities. 974 * (enahw_capability_id) 975 */ 976 uint32_t efda_capabilities; 977 978 /* Number of bits used for physical/virtual address. */ 979 uint32_t efda_phys_addr_width; 980 uint32_t efda_virt_addr_with; 981 982 /* The unicast MAC address in network byte order. */ 983 uint8_t efda_mac_addr[6]; 984 uint8_t efda_rsvd2[2]; 985 uint32_t efda_max_mtu; 986 } enahw_feat_dev_attr_t; 987 988 /* 989 * Response to ENAHW_FEAT_MAX_QUEUES_NUM. 990 * 991 * common: ena_admin_queue_feature_desc 992 */ 993 typedef struct enahw_feat_max_queue { 994 uint32_t efmq_max_sq_num; 995 uint32_t efmq_max_sq_depth; 996 uint32_t efmq_max_cq_num; 997 uint32_t efmq_max_cq_depth; 998 uint32_t efmq_max_legacy_llq_num; 999 uint32_t efmq_max_legacy_llq_depth; 1000 uint32_t efmq_max_header_size; 1001 1002 /* 1003 * The maximum number of descriptors a single Tx packet may 1004 * span. This includes the meta descriptor. 1005 */ 1006 uint16_t efmq_max_per_packet_tx_descs; 1007 1008 /* 1009 * The maximum number of descriptors a single Rx packet may span. 1010 */ 1011 uint16_t efmq_max_per_packet_rx_descs; 1012 } enahw_feat_max_queue_t; 1013 1014 /* 1015 * Response to ENAHW_FEAT_MAX_QUEUES_EXT. 1016 * 1017 * common: ena_admin_queue_ext_feature_desc 1018 */ 1019 typedef struct enahw_feat_max_queue_ext { 1020 uint8_t efmqe_version; 1021 uint8_t efmqe_rsvd[3]; 1022 1023 uint32_t efmqe_max_tx_sq_num; 1024 uint32_t efmqe_max_tx_cq_num; 1025 uint32_t efmqe_max_rx_sq_num; 1026 uint32_t efmqe_max_rx_cq_num; 1027 uint32_t efmqe_max_tx_sq_depth; 1028 uint32_t efmqe_max_tx_cq_depth; 1029 uint32_t efmqe_max_rx_sq_depth; 1030 uint32_t efmqe_max_rx_cq_depth; 1031 uint32_t efmqe_max_tx_header_size; 1032 1033 /* 1034 * The maximum number of descriptors a single Tx packet may 1035 * span. This includes the meta descriptor. 1036 */ 1037 uint16_t efmqe_max_per_packet_tx_descs; 1038 1039 /* 1040 * The maximum number of descriptors a single Rx packet may span. 1041 */ 1042 uint16_t efmqe_max_per_packet_rx_descs; 1043 } enahw_feat_max_queue_ext_t; 1044 1045 /* 1046 * Response to ENA_ADMIN_LINK_CONFIG. 1047 * 1048 * common: ena_admin_get_feature_link_desc 1049 */ 1050 typedef struct enahw_feat_link_conf { 1051 /* Link speed in Mbit/s. */ 1052 uint32_t eflc_speed; 1053 1054 /* Bit field of enahw_link_speeds_t. */ 1055 uint32_t eflc_supported; 1056 1057 /* 1058 * 31-2: reserved 1059 * 1: duplex - Full Duplex 1060 * 0: autoneg 1061 */ 1062 uint32_t eflc_flags; 1063 } enahw_feat_link_conf_t; 1064 1065 #define ENAHW_FEAT_LINK_CONF_AUTONEG_MASK BIT(0) 1066 #define ENAHW_FEAT_LINK_CONF_DUPLEX_SHIFT 1 1067 #define ENAHW_FEAT_LINK_CONF_DUPLEX_MASK BIT(1) 1068 1069 #define ENAHW_FEAT_LINK_CONF_AUTONEG(f) \ 1070 ((f)->eflc_flags & ENAHW_FEAT_LINK_CONF_AUTONEG_MASK) 1071 1072 #define ENAHW_FEAT_LINK_CONF_FULL_DUPLEX(f) \ 1073 ((((f)->eflc_flags & ENAHW_FEAT_LINK_CONF_DUPLEX_MASK) >> \ 1074 ENAHW_FEAT_LINK_CONF_DUPLEX_SHIFT) == 1) 1075 1076 /* 1077 * Response to ENAHW_FEAT_STATELESS_OFFLOAD_CONFIG. 1078 * 1079 * common: ena_admin_feature_offload_desc 1080 */ 1081 typedef struct enahw_feat_offload { 1082 /* 1083 * 0 : Tx IPv4 Header Checksum 1084 * 1 : Tx L4/IPv4 Partial Checksum 1085 * 1086 * The L4 checksum field should be initialized with pseudo 1087 * header checksum. 1088 * 1089 * 2 : Tx L4/IPv4 Checksum Full 1090 * 3 : Tx L4/IPv6 Partial Checksum 1091 * 1092 * The L4 checksum field should be initialized with pseudo 1093 * header checksum. 1094 * 1095 * 4 : Tx L4/IPv6 Checksum Full 1096 * 5 : TCP/IPv4 LSO (aka TSO) 1097 * 6 : TCP/IPv6 LSO (aka TSO) 1098 * 7 : LSO ECN 1099 */ 1100 uint32_t efo_tx; 1101 1102 /* 1103 * Receive side supported stateless offload. 1104 * 1105 * 0 : Rx IPv4 Header Checksum 1106 * 1 : Rx TCP/UDP + IPv4 Full Checksum 1107 * 2 : Rx TCP/UDP + IPv6 Full Checksum 1108 * 3 : Rx hash calculation 1109 */ 1110 uint32_t efo_rx_supported; 1111 1112 /* Linux seems to only check rx_supported. */ 1113 uint32_t efo_rx_enabled; 1114 } enahw_feat_offload_t; 1115 1116 /* Feature Offloads */ 1117 #define ENAHW_FEAT_OFFLOAD_TX_L3_IPV4_CSUM_MASK BIT(0) 1118 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_PART_SHIFT 1 1119 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_PART_MASK BIT(1) 1120 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_FULL_SHIFT 2 1121 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_FULL_MASK BIT(2) 1122 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_PART_SHIFT 3 1123 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_PART_MASK BIT(3) 1124 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_FULL_SHIFT 4 1125 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) 1126 #define ENAHW_FEAT_OFFLOAD_TSO_IPV4_SHIFT 5 1127 #define ENAHW_FEAT_OFFLOAD_TSO_IPV4_MASK BIT(5) 1128 #define ENAHW_FEAT_OFFLOAD_TSO_IPV6_SHIFT 6 1129 #define ENAHW_FEAT_OFFLOAD_TSO_IPV6_MASK BIT(6) 1130 #define ENAHW_FEAT_OFFLOAD_TSO_ECN_SHIFT 7 1131 #define ENAHW_FEAT_OFFLOAD_TSO_ECN_MASK BIT(7) 1132 #define ENAHW_FEAT_OFFLOAD_RX_L3_IPV4_CSUM_MASK BIT(0) 1133 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV4_CSUM_SHIFT 1 1134 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV4_CSUM_MASK BIT(1) 1135 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV6_CSUM_SHIFT 2 1136 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV6_CSUM_MASK BIT(2) 1137 #define ENAHW_FEAT_OFFLOAD_RX_HASH_SHIFT 3 1138 #define ENAHW_FEAT_OFFLOAD_RX_HASH_MASK BIT(3) 1139 1140 #define ENAHW_FEAT_OFFLOAD_TX_L3_IPV4_CSUM(f) \ 1141 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L3_IPV4_CSUM_MASK) != 0) 1142 1143 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_PART(f) \ 1144 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_PART_MASK) != 0) 1145 1146 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_FULL(f) \ 1147 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L4_IPV4_CSUM_FULL_MASK) != 0) 1148 1149 #define ENAHW_FEAT_OFFLOAD_TSO_IPV4(f) \ 1150 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TSO_IPV4_MASK) != 0) 1151 1152 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_PART(f) \ 1153 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_PART_MASK) != 0) 1154 1155 #define ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_FULL(f) \ 1156 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TX_L4_IPV6_CSUM_FULL_MASK) != 0) 1157 1158 #define ENAHW_FEAT_OFFLOAD_TSO_IPV6(f) \ 1159 (((f)->efo_tx & ENAHW_FEAT_OFFLOAD_TSO_IPV6_MASK) != 0) 1160 1161 #define ENAHW_FEAT_OFFLOAD_RX_L3_IPV4_CSUM(f) \ 1162 (((f)->efo_rx_supported & ENAHW_FEAT_OFFLOAD_RX_L3_IPV4_CSUM_MASK) != 0) 1163 1164 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV4_CSUM(f) \ 1165 (((f)->efo_rx_supported & ENAHW_FEAT_OFFLOAD_RX_L4_IPV4_CSUM_MASK) != 0) 1166 1167 #define ENAHW_FEAT_OFFLOAD_RX_L4_IPV6_CSUM(f) \ 1168 (((f)->efo_rx_supported & ENAHW_FEAT_OFFLOAD_RX_L4_IPV6_CSUM_MASK) != 0) 1169 1170 typedef union enahw_resp_get_feat { 1171 uint32_t ergf_raw[14]; 1172 enahw_feat_dev_attr_t ergf_dev_attr; 1173 enahw_feat_max_queue_t ergf_max_queue; 1174 enahw_feat_max_queue_ext_t ergf_max_queue_ext; 1175 enahw_feat_aenq_t ergf_aenq; 1176 enahw_feat_link_conf_t ergf_link_conf; 1177 enahw_feat_offload_t ergf_offload; 1178 } enahw_resp_get_feat_u; 1179 1180 /* 1181 * common: ena_admin_acq_create_cq_resp_desc 1182 */ 1183 typedef struct enahw_resp_create_cq { 1184 /* 1185 * The hardware's index for this queue. 1186 */ 1187 uint16_t ercq_idx; 1188 1189 /* 1190 * Apparently the number of descriptors granted may be 1191 * different than that requested. 1192 */ 1193 uint16_t ercq_actual_num_descs; 1194 uint32_t ercq_numa_node_reg_offset; 1195 /* CQ doorbell register - no longer supported by any ENA adapter */ 1196 uint32_t ercq_head_db_reg_offset; 1197 uint32_t ercq_interrupt_mask_reg_offset; /* stop intr */ 1198 } enahw_resp_create_cq_t; 1199 1200 /* common: ena_admin_acq_create_sq_resp_desc */ 1201 typedef struct enahw_resp_create_sq { 1202 uint16_t ersq_idx; 1203 uint16_t ersq_rsvdw1; 1204 uint32_t ersq_db_reg_offset; 1205 uint32_t ersq_llq_descs_reg_offset; 1206 uint32_t ersq_llq_headers_reg_offset; 1207 } enahw_resp_create_sq_t; 1208 1209 /* common: ena_admin_basic_stats */ 1210 typedef struct enahw_resp_basic_stats { 1211 uint32_t erbs_tx_bytes_low; 1212 uint32_t erbs_tx_bytes_high; 1213 uint32_t erbs_tx_pkts_low; 1214 uint32_t erbs_tx_pkts_high; 1215 uint32_t erbs_rx_bytes_low; 1216 uint32_t erbs_rx_bytes_high; 1217 uint32_t erbs_rx_pkts_low; 1218 uint32_t erbs_rx_pkts_high; 1219 uint32_t erbs_rx_drops_low; 1220 uint32_t erbs_rx_drops_high; 1221 uint32_t erbs_tx_drops_low; 1222 uint32_t erbs_tx_drops_high; 1223 } enahw_resp_basic_stats_t; 1224 1225 /* common: ena_admin_eni_stats */ 1226 typedef struct enahw_resp_eni_stats { 1227 /* 1228 * The number of inbound packets dropped due to aggregate 1229 * inbound bandwidth allowance being exceeded. 1230 */ 1231 uint64_t eres_bw_in_exceeded; 1232 1233 /* 1234 * The number of outbound packets dropped due to aggregated outbound 1235 * bandwidth allowance being exceeded. 1236 */ 1237 uint64_t eres_bw_out_exceeded; 1238 1239 /* 1240 * The number of packets dropped due to the Packets Per Second 1241 * allowance being exceeded. 1242 */ 1243 uint64_t eres_pps_exceeded; 1244 1245 /* 1246 * The number of packets dropped due to connection tracking 1247 * allowance being exceeded and leading to failure in 1248 * establishment of new connections. 1249 */ 1250 uint64_t eres_conns_exceeded; 1251 1252 /* 1253 * The number of packets dropped due to linklocal packet rate 1254 * allowance being exceeded. 1255 */ 1256 uint64_t eres_linklocal_exceeded; 1257 } enahw_resp_eni_stats_t; 1258 1259 /* 1260 * common: ena_admin_acq_entry 1261 */ 1262 typedef struct enahw_resp_desc { 1263 /* The index of the completed command. */ 1264 uint16_t erd_cmd_id; 1265 1266 /* The status of the command (enahw_resp_status_t). */ 1267 uint8_t erd_status; 1268 1269 /* 1270 * 7-1 Reserved 1271 * 0 Phase 1272 */ 1273 uint8_t erd_flags; 1274 1275 /* Extended status. */ 1276 uint16_t erd_ext_status; 1277 1278 /* 1279 * The AQ entry (enahw_cmd_desc) index which has been consumed 1280 * by the device and can be reused. However, this field is not 1281 * used in the other drivers, and it seems to be redundant 1282 * with the erd_idx field. 1283 */ 1284 uint16_t erd_sq_head_idx; 1285 1286 union { 1287 uint32_t raw[14]; 1288 enahw_resp_get_feat_u erd_get_feat; 1289 enahw_resp_create_cq_t erd_create_cq; 1290 /* destroy_cq: No command-specific response. */ 1291 enahw_resp_create_sq_t erd_create_sq; 1292 /* destroy_sq: No command-specific response. */ 1293 enahw_resp_basic_stats_t erd_basic_stats; 1294 enahw_resp_eni_stats_t erd_eni_stats; 1295 } erd_resp; 1296 } enahw_resp_desc_t; 1297 1298 /* common: ENA_ADMIN_ACQ_COMMON_DESC */ 1299 #define ENAHW_RESP_CMD_ID_MASK GENMASK(11, 0) 1300 #define ENAHW_RESP_PHASE_MASK 0x1 1301 1302 #define ENAHW_RESP_CMD_ID(desc) \ 1303 (((desc)->erd_cmd_id) & ENAHW_RESP_CMD_ID_MASK) 1304 1305 /* 1306 * The response status of an Admin Queue command. 1307 * 1308 * common: ena_admin_aq_completion_status 1309 */ 1310 typedef enum enahw_resp_status { 1311 ENAHW_RESP_SUCCESS = 0, 1312 ENAHW_RESP_RESOURCE_ALLOCATION_FAILURE = 1, 1313 ENAHW_RESP_BAD_OPCODE = 2, 1314 ENAHW_RESP_UNSUPPORTED_OPCODE = 3, 1315 ENAHW_RESP_MALFORMED_REQUEST = 4, 1316 /* 1317 * At this place in the common code it mentions that there is 1318 * "additional status" in the response descriptor's 1319 * erd_ext_status field. As the common code never actually 1320 * uses this field it's hard to know the exact meaning of the 1321 * comment. My best guess is the illegal parameter error 1322 * stores additional context in the erd_ext_status field. But 1323 * how to interpret that additional context is anyone's guess. 1324 */ 1325 ENAHW_RESP_ILLEGAL_PARAMETER = 5, 1326 ENAHW_RESP_UNKNOWN_ERROR = 6, 1327 ENAHW_RESP_RESOURCE_BUSY = 7, 1328 } enahw_resp_status_t; 1329 1330 /* 1331 * I/O macros and structures. 1332 * ------------------------- 1333 */ 1334 1335 /* 1336 * The device's L3 and L4 protocol numbers. These are specific to the 1337 * ENA device and not to be confused with IANA protocol numbers. 1338 * 1339 * common: ena_eth_io_l3_proto_index 1340 */ 1341 typedef enum enahw_io_l3_proto { 1342 ENAHW_IO_L3_PROTO_UNKNOWN = 0, 1343 ENAHW_IO_L3_PROTO_IPV4 = 8, 1344 ENAHW_IO_L3_PROTO_IPV6 = 11, 1345 ENAHW_IO_L3_PROTO_FCOE = 21, 1346 ENAHW_IO_L3_PROTO_ROCE = 22, 1347 } enahw_io_l3_proto_t; 1348 1349 /* common: ena_eth_io_l4_proto_index */ 1350 typedef enum enahw_io_l4_proto { 1351 ENAHW_IO_L4_PROTO_UNKNOWN = 0, 1352 ENAHW_IO_L4_PROTO_TCP = 12, 1353 ENAHW_IO_L4_PROTO_UDP = 13, 1354 ENAHW_IO_L4_PROTO_ROUTEABLE_ROCE = 23, 1355 } enahw_io_l4_proto_t; 1356 1357 /* common: ena_eth_io_tx_desc */ 1358 typedef struct enahw_tx_data_desc { 1359 /* 1360 * 15-0 Buffer Length (LENGTH) 1361 * 1362 * The buffer length in bytes. This should NOT include the 1363 * Ethernet FCS bytes. 1364 * 1365 * 21-16 Request ID High Bits [15-10] (REQ_ID_HI) 1366 * 22 Reserved Zero 1367 * 23 Metadata Flag always zero (META_DESC) 1368 * 1369 * This flag indicates if the descriptor is a metadata 1370 * descriptor or not. In this case we are defining the Tx 1371 * descriptor, so it's always zero. 1372 * 1373 * 24 Phase bit (PHASE) 1374 * 25 Reserved Zero 1375 * 26 First Descriptor Bit (FIRST) 1376 * 1377 * Indicates this is the first descriptor for the frame. 1378 * 1379 * 27 Last Descriptor Bit (LAST) 1380 * 1381 * Indicates this is the last descriptor for the frame. 1382 * 1383 * 28 Completion Request Bit (COMP_REQ) 1384 * 1385 * Indicates if completion should be posted after the 1386 * frame is transmitted. This bit is only valid on the 1387 * first descriptor. 1388 * 1389 * 31-29 Reserved Zero 1390 */ 1391 uint32_t etd_len_ctrl; 1392 1393 /* 1394 * 3-0 L3 Protocol Number (L3_PROTO_IDX) 1395 * 1396 * The L3 protocol type, one of enahw_io_l3_proto_t. This 1397 * field is required when L3_CSUM_EN or TSO_EN is set. 1398 * 1399 * 4 Don't Fragment Bit (DF) 1400 * 1401 * The value of IPv4 DF. This value must copy the value 1402 * found in the packet's IPv4 header. 1403 * 1404 * 6-5 Reserved Zero 1405 * 7 TSO Bit (TSO_EN) 1406 * 1407 * Enable TCP Segment Offload. 1408 * 1409 * 12-8 L4 Protocol Number (L4_PROTO_IDX) 1410 * 1411 * The L4 protocol type, one of enahw_io_l4_proto_t. This 1412 * field is required when L4_CSUM_EN or TSO_EN are 1413 * set. 1414 * 1415 * 13 L3 Checksum Offload (L3_CSUM_EN) 1416 * 1417 * Enable IPv4 header checksum offload. 1418 * 1419 * 14 L4 Checksum Offload (L4_CSUM_EN) 1420 * 1421 * Enable TCP/UDP checksum offload. 1422 * 1423 * 15 Ethernet FCS Disable (ETHERNET_FCS_DIS) 1424 * 1425 * Disable the device's Ethernet Frame Check sequence. 1426 * 1427 * 16 Reserved Zero 1428 * 17 L4 Partial Checksum Present (L4_CSUM_PARTIAL) 1429 * 1430 * When set it indicates the host has already provided 1431 * the pseudo-header checksum. Otherwise, it is up to the 1432 * device to calculate it. 1433 * 1434 * When set and using TSO the host stack must remember 1435 * not to include the TCP segment length in the supplied 1436 * pseudo-header. 1437 * 1438 * The host stack should provide the pseudo-header 1439 * checksum when using IPv6 with Routing Headers. 1440 * 1441 * 21-18 Reserved Zero 1442 * 31-22 Request ID Low [9-0] (REQ_ID_LO) 1443 */ 1444 uint32_t etd_meta_ctrl; 1445 1446 /* The low 32 bits of the buffer address. */ 1447 uint32_t etd_buff_addr_lo; 1448 1449 /* 1450 * address high and header size 1451 * 1452 * 15-0 Buffer Address High [47-32] (ADDR_HI) 1453 * 1454 * The upper 15 bits of the buffer address. 1455 * 1456 * 23-16 Reserved Zero 1457 * 31-24 Header Length (HEADER_LENGTH) 1458 * 1459 * This field has dubious documentation in the 1460 * common/Linux driver code, even contradicting itself in 1461 * the same sentence. Here's what it says, verbatim: 1462 * 1463 * > Header length. For Low Latency Queues, this fields 1464 * > indicates the number of bytes written to the 1465 * > headers' memory. For normal queues, if packet is TCP 1466 * > or UDP, and longer than max_header_size, then this 1467 * > field should be set to the sum of L4 header offset 1468 * > and L4 header size(without options), otherwise, this 1469 * > field should be set to 0. For both modes, this field 1470 * > must not exceed the max_header_size. max_header_size 1471 * > value is reported by the Max Queues Feature 1472 * > descriptor 1473 * 1474 * Here's what one _might_ ascertain from the above. 1475 * 1476 * 1. This field should always be set in the case of 1477 * LLQs/device placement. 1478 * 1479 * 2. This field must _never_ exceed the max header size 1480 * as reported by feature detection. In our code this 1481 * would be efmq_max_header_size for older ENA devices 1482 * and efmqe_max_tx_header_size for newer ones. One 1483 * empirical data point from a t3.small (with newer 1484 * device) is a max Tx header size of 128 bytes. 1485 * 1486 * 3. If the packet is TCP or UDP, and the packet (or the 1487 * headers?) is longer than the max header size, then 1488 * this field should be set to the total header size 1489 * with the exception of TCP header options. 1490 * Otherwise, if the packet is not TCP or UDP, or if 1491 * the packet (or header length?) _does not_ exceed 1492 * the max header size, then set this value to 0. 1493 * 1494 * One might think, based on (3), that when the header 1495 * size exceeds the max this field needs to be set, but 1496 * that contradicts (2), which dictates that the total 1497 * header size can never exceed the max. Sure enough, the 1498 * Linux code drops all packets with headers that exceed 1499 * the max. So in that case it would mean that "and 1500 * longer than max_header_size" is referring to the total 1501 * packet length. So for most workloads, the TCP/UDP 1502 * packets should have this field set, to indicate their 1503 * header length. This matches with Linux, which seems to 1504 * set header length regardless of IP protocol. 1505 * 1506 * However, the FreeBSD code tells a different story. In 1507 * it's non-LLQ Tx path it has the following comment, 1508 * verbatim: 1509 * 1510 * > header_len is just a hint for the device. Because 1511 * > FreeBSD is not giving us information about packet 1512 * > header length and it is not guaranteed that all 1513 * > packet headers will be in the 1st mbuf, setting 1514 * > header_len to 0 is making the device ignore this 1515 * > value and resolve header on it's own. 1516 * 1517 * According to this we can just set the value to zero 1518 * and let the device figure it out. This maps better to 1519 * illumos, where we also allow the header to potentially 1520 * span multiple mblks (though we do have access to the 1521 * header sizes via mac_ether_offload_info_t). 1522 * 1523 * The upshot: for now we take advantage of the device's 1524 * ability to determine the header length on its own, at 1525 * the potential cost of some performance (not measured). 1526 */ 1527 uint32_t etd_buff_addr_hi_hdr_sz; 1528 } enahw_tx_data_desc_t; 1529 1530 #define ENAHW_TX_DESC_LENGTH_MASK GENMASK(15, 0) 1531 #define ENAHW_TX_DESC_REQ_ID_HI_SHIFT 16 1532 #define ENAHW_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) 1533 #define ENAHW_TX_DESC_META_DESC_SHIFT 23 1534 #define ENAHW_TX_DESC_META_DESC_MASK BIT(23) 1535 #define ENAHW_TX_DESC_PHASE_SHIFT 24 1536 #define ENAHW_TX_DESC_PHASE_MASK BIT(24) 1537 #define ENAHW_TX_DESC_FIRST_SHIFT 26 1538 #define ENAHW_TX_DESC_FIRST_MASK BIT(26) 1539 #define ENAHW_TX_DESC_LAST_SHIFT 27 1540 #define ENAHW_TX_DESC_LAST_MASK BIT(27) 1541 #define ENAHW_TX_DESC_COMP_REQ_SHIFT 28 1542 #define ENAHW_TX_DESC_COMP_REQ_MASK BIT(28) 1543 #define ENAHW_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) 1544 #define ENAHW_TX_DESC_DF_SHIFT 4 1545 #define ENAHW_TX_DESC_DF_MASK BIT(4) 1546 #define ENAHW_TX_DESC_TSO_EN_SHIFT 7 1547 #define ENAHW_TX_DESC_TSO_EN_MASK BIT(7) 1548 #define ENAHW_TX_DESC_L4_PROTO_IDX_SHIFT 8 1549 #define ENAHW_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) 1550 #define ENAHW_TX_DESC_L3_CSUM_EN_SHIFT 13 1551 #define ENAHW_TX_DESC_L3_CSUM_EN_MASK BIT(13) 1552 #define ENAHW_TX_DESC_L4_CSUM_EN_SHIFT 14 1553 #define ENAHW_TX_DESC_L4_CSUM_EN_MASK BIT(14) 1554 #define ENAHW_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 1555 #define ENAHW_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) 1556 #define ENAHW_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 1557 #define ENAHW_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) 1558 #define ENAHW_TX_DESC_REQ_ID_LO_SHIFT 22 1559 #define ENAHW_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) 1560 #define ENAHW_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) 1561 #define ENAHW_TX_DESC_HEADER_LENGTH_SHIFT 24 1562 #define ENAHW_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) 1563 1564 #define ENAHW_TX_DESC_LENGTH(desc, len) \ 1565 (((desc)->etd_len_ctrl) |= ((len) & ENAHW_TX_DESC_LENGTH_MASK)) 1566 1567 #define ENAHW_TX_DESC_FIRST_ON(desc) \ 1568 (((desc)->etd_len_ctrl) |= ENAHW_TX_DESC_FIRST_MASK) 1569 1570 #define ENAHW_TX_DESC_FIRST_OFF(desc) \ 1571 (((desc)->etd_len_ctrl) &= ~ENAHW_TX_DESC_FIRST_MASK) 1572 1573 #define ENAHW_TX_DESC_REQID_HI(desc, reqid) \ 1574 (((desc)->etd_len_ctrl) |= \ 1575 ((((reqid) >> 10) << ENAHW_TX_DESC_REQ_ID_HI_SHIFT) & \ 1576 ENAHW_TX_DESC_REQ_ID_HI_MASK)) 1577 1578 #define ENAHW_TX_DESC_REQID_LO(desc, reqid) \ 1579 (((desc)->etd_meta_ctrl) |= \ 1580 (((reqid) << ENAHW_TX_DESC_REQ_ID_LO_SHIFT) & \ 1581 ENAHW_TX_DESC_REQ_ID_LO_MASK)) 1582 1583 #define ENAHW_TX_DESC_PHASE(desc, phase) \ 1584 (((desc)->etd_len_ctrl) |= (((phase) << ENAHW_TX_DESC_PHASE_SHIFT) & \ 1585 ENAHW_TX_DESC_PHASE_MASK)) 1586 1587 #define ENAHW_TX_DESC_LAST_ON(desc) \ 1588 (((desc)->etd_len_ctrl) |= ENAHW_TX_DESC_LAST_MASK) 1589 1590 #define ENAHW_TX_DESC_LAST_OFF(desc) \ 1591 (((desc)->etd_len_ctrl) &= ~ENAHW_TX_DESC_LAST_MASK) 1592 1593 #define ENAHW_TX_DESC_COMP_REQ_ON(desc) \ 1594 (((desc)->etd_len_ctrl) |= ENAHW_TX_DESC_COMP_REQ_MASK) 1595 1596 #define ENAHW_TX_DESC_COMP_REQ_OFF(desc) \ 1597 (((desc)->etd_len_ctrl) &= ~ENAHW_TX_DESC_COMP_REQ_MASK) 1598 1599 #define ENAHW_TX_DESC_META_DESC_ON(desc) \ 1600 (((desc)->etd_len_ctrl) |= ENAHW_TX_DESC_META_DESC_MASK) 1601 1602 #define ENAHW_TX_DESC_META_DESC_OFF(desc) \ 1603 (((desc)->etd_len_ctrl) &= ~ENAHW_TX_DESC_META_DESC_MASK) 1604 1605 #define ENAHW_TX_DESC_ADDR_LO(desc, addr) \ 1606 (((desc)->etd_buff_addr_lo) = (addr)) 1607 1608 #define ENAHW_TX_DESC_ADDR_HI(desc, addr) \ 1609 (((desc)->etd_buff_addr_hi_hdr_sz) |= \ 1610 (((addr) >> 32) & ENAHW_TX_DESC_ADDR_HI_MASK)) 1611 1612 #define ENAHW_TX_DESC_HEADER_LENGTH(desc, len) \ 1613 (((desc)->etd_buff_addr_hi_hdr_sz) |= \ 1614 (((len) << ENAHW_TX_DESC_HEADER_LENGTH_SHIFT) & \ 1615 ENAHW_TX_DESC_HEADER_LENGTH_MASK)) 1616 1617 #define ENAHW_TX_DESC_DF_ON(desc) \ 1618 ((desc)->etd_meta_ctrl |= ENAHW_TX_DESC_DF_MASK) 1619 1620 #define ENAHW_TX_DESC_TSO_OFF(desc) \ 1621 (((desc)->etd_meta_ctrl) &= ~ENAHW_TX_DESC_TSO_EN_MASK) 1622 1623 #define ENAHW_TX_DESC_L3_CSUM_OFF(desc) \ 1624 (((desc)->etd_meta_ctrl) &= ~ENAHW_TX_DESC_L3_CSUM_EN_MASK) 1625 1626 #define ENAHW_TX_DESC_L4_CSUM_OFF(desc) \ 1627 (((desc)->etd_meta_ctrl) &= ~ENAHW_TX_DESC_L4_CSUM_EN_MASK) 1628 1629 #define ENAHW_TX_DESC_L4_CSUM_PARTIAL_ON(desc) \ 1630 (((desc)->etd_meta_ctrl) &= ~ENAHW_TX_DESC_L4_CSUM_PARTIAL_MASK) 1631 1632 /* common: ena_eth_io_tx_meta_desc */ 1633 typedef struct enahw_tx_meta_desc { 1634 /* 1635 * 9-0 Request ID Low [9-0] (REQ_ID_LO) 1636 * 13-10 Reserved Zero 1637 * 14 Extended Metadata Valid (EXT_VALID) 1638 * 1639 * When set this descriptor contains valid extended 1640 * metadata. The extended metadata includes the L3/L4 1641 * length and offset fields as well as the MSS bits. This 1642 * is needed for TSO. 1643 * 1644 * 15 Reserved Zero 1645 * 19-16 MSS High Bits (MSS_HI) 1646 * 20 Meta Type (ETH_META_TYPE) 1647 * 1648 * If enabled this is an extended metadata descriptor. 1649 * This seems redundant with EXT_VALID. 1650 * 1651 * 21 Meta Store (META_STORE) 1652 * 1653 * Store the extended metadata in the queue cache. 1654 * 1655 * 22 Reserved Zero 1656 * 23 Metadata Flag (META_DESC) -- always one 1657 * 24 Phase (PHASE) 1658 * 25 Reserved Zero 1659 * 26 First Descriptor Bit (FIRST) 1660 * 27 Last Descriptor Bit (LAST) 1661 * 28 Completion Request Bit (COMP_REQ) 1662 * 31-29 Reserved Zero 1663 */ 1664 uint32_t etmd_len_ctrl; 1665 1666 /* 1667 * 5-0 Request ID High Bits [15-10] (REQ_ID_HI) 1668 * 31-6 Reserved Zero 1669 */ 1670 uint32_t etmd_word1; 1671 1672 /* 1673 * 7-0 L3 Header Length (L3_HDR_LEN) 1674 * 15:8 L3 Header Offset (L3_HDR_OFF) 1675 * 21:16 L4 Header Length in Words (L4_HDR_LEN_IN_WORDS) 1676 * 1677 * Specifies the L4 header length in words. The device 1678 * assumes the L4 header follows directly after the L3 1679 * header and that the L4 offset is equal to L3_HDR_OFF + 1680 * L3_HDR_LEN. 1681 * 1682 * 31-22 MSS Low Bits (MSS_LO) 1683 */ 1684 uint32_t etmd_word2; 1685 uint32_t etmd_reserved; 1686 } enahw_tx_meta_desc_t; 1687 1688 /* common: N/A */ 1689 typedef union enahw_tx_desc { 1690 enahw_tx_data_desc_t etd_data; 1691 enahw_tx_meta_desc_t etd_meta; 1692 } enahw_tx_desc_t; 1693 1694 /* common: ena_eth_io_tx_cdesc */ 1695 typedef struct enahw_tx_cdesc { 1696 /* 1697 * 15-0 Request ID Bits 1698 * 16 Reserved Zero 1699 */ 1700 uint16_t etc_req_id; 1701 1702 /* 1703 * Presumably the status of the Tx, though the Linux driver 1704 * never checks this field. 1705 */ 1706 uint8_t etc_status; 1707 1708 /* 1709 * 0 Phase 1710 * 7-1 Reserved Zero 1711 */ 1712 uint8_t etc_flags; 1713 1714 /* 1715 * This isn't documented or used in the Linux driver, but 1716 * these probably store the submission queue ID and the 1717 * submission queue head index. 1718 */ 1719 uint16_t etc_sub_qid; 1720 uint16_t etc_sq_head_idx; 1721 } enahw_tx_cdesc_t; 1722 1723 #define ENAHW_TX_CDESC_PHASE_SHIFT 0 1724 #define ENAHW_TX_CDESC_PHASE_MASK BIT(0) 1725 1726 #define ENAHW_TX_CDESC_GET_PHASE(cdesc) \ 1727 ((cdesc)->etc_flags & ENAHW_TX_CDESC_PHASE_MASK) 1728 1729 /* common: ena_eth_io_rx_desc */ 1730 typedef struct enahw_rx_desc { 1731 /* 1732 * The length of the buffer provided by the host, in bytes. 1733 * Use the value of 0 to indicate 64K. 1734 */ 1735 uint16_t erd_length; 1736 uint8_t erd_reserved1; 1737 1738 /* 1739 * 0 Phase (PHASE) 1740 * 1 Reserved Zero 1741 * 2 First (FIRST) 1742 * 1743 * Indicates this is the first descriptor for the frame. 1744 * 1745 * 3 Last (LAST) 1746 * 1747 * Indicates this is the last descriptor for the frame. 1748 * 1749 * 4 Completion Request (COMP_REQ) 1750 * 1751 * Indicates that a completion request should be generated 1752 * for this descriptor. 1753 * 1754 * 7-5 Reserved Zero 1755 */ 1756 uint8_t erd_ctrl; 1757 1758 /* 1759 * 15-0 Request ID 1760 * 16 Reserved 0 1761 */ 1762 uint16_t erd_req_id; 1763 uint16_t erd_reserved2; 1764 1765 /* The physical address of the buffer provided by the host. */ 1766 uint32_t erd_buff_addr_lo; 1767 uint16_t erd_buff_addr_hi; 1768 uint16_t erd_reserved3; 1769 } enahw_rx_desc_t; 1770 1771 #define ENAHW_RX_DESC_PHASE_MASK BIT(0) 1772 #define ENAHW_RX_DESC_FIRST_SHIFT 2 1773 #define ENAHW_RX_DESC_FIRST_MASK BIT(2) 1774 #define ENAHW_RX_DESC_LAST_SHIFT 3 1775 #define ENAHW_RX_DESC_LAST_MASK BIT(3) 1776 #define ENAHW_RX_DESC_COMP_REQ_SHIFT 4 1777 #define ENAHW_RX_DESC_COMP_REQ_MASK BIT(4) 1778 1779 #define ENAHW_RX_DESC_CLEAR_CTRL(desc) ((desc)->erd_ctrl = 0) 1780 #define ENAHW_RX_DESC_SET_PHASE(desc, val) \ 1781 ((desc)->erd_ctrl |= ((val) & ENAHW_RX_DESC_PHASE_MASK)) 1782 1783 #define ENAHW_RX_DESC_SET_FIRST(desc) \ 1784 ((desc)->erd_ctrl |= ENAHW_RX_DESC_FIRST_MASK) 1785 1786 #define ENAHW_RX_DESC_SET_LAST(desc) \ 1787 ((desc)->erd_ctrl |= ENAHW_RX_DESC_LAST_MASK) 1788 1789 #define ENAHW_RX_DESC_SET_COMP_REQ(desc) \ 1790 ((desc)->erd_ctrl |= ENAHW_RX_DESC_COMP_REQ_MASK) 1791 1792 /* 1793 * Ethernet parsing information is only valid when last == 1. 1794 * 1795 * common: ena_eth_io_rx_cdesc_base 1796 */ 1797 typedef struct enahw_rx_cdesc { 1798 /* 1799 * 4-0 L3 Protocol Number (L3_PROTO) 1800 * 1801 * The L3 protocol type, one of enahw_io_l3_proto_t. 1802 * 1803 * 6-5 (SRC_VLAN_CNT) 1804 * 7 Reserved Zero 1805 * 12-8 L4 Protocol Number (L4_PROTO) 1806 * 13 L3 Checksum Error (L3_CSUM_ERR) 1807 * 1808 * When set either the L3 checksum failed to match or the 1809 * controller didn't attempt to validate the checksum. 1810 * This bit is valid only when L3_PROTO indicates an IPv4 1811 * packet. 1812 * 1813 * 14 L4 Checksum Error (L4_CSUM_ERR) 1814 * 1815 * When set either the L4 checksum failed to match or the 1816 * controller didn't attempt to validate the checksum. 1817 * This bit is valid only when L4_PROTO indicates a 1818 * TCP/UDP packet, IPV4_FRAG is not set, and 1819 * L4_CSUM_CHECKED is set. 1820 * 1821 * 15 IPv4 Fragmented (IPV4_FRAG) 1822 * 16 L4 Checksum Validated (L4_CSUM_CHECKED) 1823 * 1824 * When set it indicates the device attempted to validate 1825 * the L4 checksum. 1826 * 1827 * 23-17 Reserved Zero 1828 * 24 Phase (PHASE) 1829 * 25 (L3_CSUM2) 1830 * 1831 * According to the Linux source this is the "second 1832 * checksum engine result". It's never checked. 1833 * 1834 * 26 First Descriptor Bit (FIRST) 1835 * 1836 * Indicates the first descriptor for the frame. 1837 * 1838 * 27 Last Descriptor Bit (LAST) 1839 * 1840 * Indicates the last descriptor for the frame. 1841 * 1842 * 29-28 Reserved Zero 1843 * 30 Buffer Type (BUFFER) 1844 * 1845 * When enabled indicates this is a data descriptor. 1846 * Otherwse, it is a metadata descriptor. 1847 * 1848 * 31 : reserved31 1849 */ 1850 uint32_t erc_status; 1851 uint16_t erc_length; 1852 uint16_t erc_req_id; 1853 1854 /* 32-bit hash result */ 1855 uint32_t erc_hash; 1856 uint16_t erc_sub_qid; 1857 1858 /* 1859 * The device may choose to offset the start of the header 1860 * data (which implies this value only applies to the first 1861 * descriptor). When and why the device does this is not 1862 * documented in the common code. The most likely case would 1863 * be for IP header alignment. 1864 */ 1865 uint8_t erc_offset; 1866 uint8_t erc_reserved; 1867 } enahw_rx_cdesc_t; 1868 1869 #define ENAHW_RX_CDESC_L3_PROTO_MASK GENMASK(4, 0) 1870 #define ENAHW_RX_CDESC_SRC_VLAN_CNT_SHIFT 5 1871 #define ENAHW_RX_CDESC_SRC_VLAN_CNT_MASK GENMASK(6, 5) 1872 #define ENAHW_RX_CDESC_L4_PROTO_SHIFT 8 1873 #define ENAHW_RX_CDESC_L4_PROTO_MASK GENMASK(12, 8) 1874 #define ENAHW_RX_CDESC_L3_CSUM_ERR_SHIFT 13 1875 #define ENAHW_RX_CDESC_L3_CSUM_ERR_MASK BIT(13) 1876 #define ENAHW_RX_CDESC_L4_CSUM_ERR_SHIFT 14 1877 #define ENAHW_RX_CDESC_L4_CSUM_ERR_MASK BIT(14) 1878 #define ENAHW_RX_CDESC_IPV4_FRAG_SHIFT 15 1879 #define ENAHW_RX_CDESC_IPV4_FRAG_MASK BIT(15) 1880 #define ENAHW_RX_CDESC_L4_CSUM_CHECKED_SHIFT 16 1881 #define ENAHW_RX_CDESC_L4_CSUM_CHECKED_MASK BIT(16) 1882 #define ENAHW_RX_CDESC_PHASE_SHIFT 24 1883 #define ENAHW_RX_CDESC_PHASE_MASK BIT(24) 1884 #define ENAHW_RX_CDESC_L3_CSUM2_SHIFT 25 1885 #define ENAHW_RX_CDESC_L3_CSUM2_MASK BIT(25) 1886 #define ENAHW_RX_CDESC_FIRST_SHIFT 26 1887 #define ENAHW_RX_CDESC_FIRST_MASK BIT(26) 1888 #define ENAHW_RX_CDESC_LAST_SHIFT 27 1889 #define ENAHW_RX_CDESC_LAST_MASK BIT(27) 1890 #define ENAHW_RX_CDESC_BUFFER_SHIFT 30 1891 #define ENAHW_RX_CDESC_BUFFER_MASK BIT(30) 1892 1893 #define ENAHW_RX_CDESC_L3_PROTO(desc) \ 1894 ((desc)->erc_status & ENAHW_RX_CDESC_L3_PROTO_MASK) 1895 1896 #define ENAHW_RX_CDESC_L3_CSUM_ERR(desc) \ 1897 ((((desc)->erc_status & ENAHW_RX_CDESC_L3_CSUM_ERR_MASK) >> \ 1898 ENAHW_RX_CDESC_L3_CSUM_ERR_SHIFT) != 0) 1899 1900 #define ENAHW_RX_CDESC_L4_PROTO(desc) \ 1901 (((desc)->erc_status & ENAHW_RX_CDESC_L4_PROTO_MASK) >> \ 1902 ENAHW_RX_CDESC_L4_PROTO_SHIFT) 1903 1904 #define ENAHW_RX_CDESC_L4_CSUM_CHECKED(desc) \ 1905 ((((desc)->erc_status & ENAHW_RX_CDESC_L4_CSUM_CHECKED_MASK) >> \ 1906 ENAHW_RX_CDESC_L4_CSUM_CHECKED_SHIFT) != 0) 1907 1908 #define ENAHW_RX_CDESC_L4_CSUM_ERR(desc) \ 1909 ((((desc)->erc_status & ENAHW_RX_CDESC_L4_CSUM_ERR_MASK) >> \ 1910 ENAHW_RX_CDESC_L4_CSUM_ERR_SHIFT) != 0) 1911 1912 #define ENAHW_RX_CDESC_PHASE(desc) \ 1913 (((desc)->erc_status & ENAHW_RX_CDESC_PHASE_MASK) >> \ 1914 ENAHW_RX_CDESC_PHASE_SHIFT) 1915 1916 #define ENAHW_RX_CDESC_FIRST(desc) \ 1917 ((((desc)->erc_status & ENAHW_RX_CDESC_FIRST_MASK) >> \ 1918 ENAHW_RX_CDESC_FIRST_SHIFT) == 1) 1919 1920 #define ENAHW_RX_CDESC_LAST(desc) \ 1921 ((((desc)->erc_status & ENAHW_RX_CDESC_LAST_MASK) >> \ 1922 ENAHW_RX_CDESC_LAST_SHIFT) == 1) 1923 1924 /* 1925 * Controls for the interrupt register mapped to each Rx/Tx CQ. 1926 */ 1927 #define ENAHW_REG_INTR_RX_DELAY_MASK GENMASK(14, 0) 1928 #define ENAHW_REG_INTR_TX_DELAY_SHIFT 15 1929 #define ENAHW_REG_INTR_TX_DELAY_MASK GENMASK(29, 15) 1930 #define ENAHW_REG_INTR_UNMASK_SHIFT 30 1931 #define ENAHW_REG_INTR_UNMASK_MASK BIT(30) 1932 1933 #define ENAHW_REG_INTR_UNMASK(val) \ 1934 ((val) |= ENAHW_REG_INTR_UNMASK_MASK) 1935 1936 #define ENAHW_REG_INTR_MASK(val) \ 1937 ((val) &= ~ENAHW_REG_INTR_UNMASK_MASK) 1938 1939 #endif /* _ENA_HW_H */ 1940