xref: /illumos-gate/usr/src/uts/common/io/ena/ena.h (revision dd72704bd9e794056c558153663c739e2012d721)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2021 Oxide Computer Company
14  */
15 
16 #ifndef	_ENA_H
17 #define	_ENA_H
18 
19 #include <sys/ddi.h>
20 #include <sys/sunddi.h>
21 #include <sys/types.h>
22 #include <sys/atomic.h>
23 #include <sys/list.h>
24 #include <sys/time.h>
25 #include <sys/modctl.h>
26 #include <sys/conf.h>
27 #include <sys/cpuvar.h>
28 #include <sys/pci.h>
29 #include <sys/sysmacros.h>
30 #include <sys/mac.h>
31 #include <sys/mac_ether.h>
32 #include <sys/mac_provider.h>
33 #include <sys/pattr.h>
34 #include <sys/strsun.h>
35 #include <sys/ethernet.h>
36 #include <sys/vlan.h>
37 #include <sys/utsname.h>
38 #include "ena_hw.h"
39 
40 /*
41  * AWS ENA Ethernet Driver
42  */
43 
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47 
48 #define	ENA_MODULE_NAME	"ena"
49 
50 /*
51  * The minimum supported ENA device controller version.
52  */
53 #define	ENA_CTRL_MAJOR_VSN_MIN		0
54 #define	ENA_CTRL_MINOR_VSN_MIN		0
55 #define	ENA_CTRL_SUBMINOR_VSN_MIN	1
56 
57 #define	ENA_MODULE_VER_MAJOR	1
58 #define	ENA_MODULE_VER_MINOR	0
59 #define	ENA_MODULE_VER_SUBMINOR	0
60 
61 /*
62  * The Linux driver doesn't document what the specification version
63  * number controls or the contract around version changes. The best we
64  * can do is use the same version that they use and port version
65  * changes as they come (the last one was in 2018).
66  *
67  * common: ENA_COMMON_SPEC_VERSION_{MAJOR,MINOR}
68  */
69 #define	ENA_SPEC_VERSION_MAJOR	2
70 #define	ENA_SPEC_VERSION_MINOR	0
71 
72 
73 /* This represents BAR 0. */
74 #define	ENA_REG_NUMBER	1
75 
76 /*
77  * A sentinel value passed as argument to ena_ring_rx() to indicate
78  * the Rx ring is being read in interrupt mode, not polling mode.
79  */
80 #define	ENA_INTERRUPT_MODE	-1
81 
82 #define	ENA_RX_BUF_IPHDR_ALIGNMENT	2
83 #define	ENA_ADMINQ_DEPTH		32
84 #define	ENA_AENQ_NUM_DESCS		32
85 
86 /* Convert milliseconds to nanoseconds. */
87 #define	ENA_MS_TO_NS(ms)	((ms) * 1000000ul)
88 
89 /*
90  * The default amount of time we will wait for an admin command to
91  * complete, specified in microseconds. In this case, 500 milliseconds.
92  */
93 #define	ENA_ADMIN_CMD_DEF_TIMEOUT	MSEC2NSEC(500)
94 
95 /*
96  * Property macros.
97  */
98 #define	ENA_PROP_RXQ_NUM_DESCS	"rx_queue_num_descs"
99 #define	ENA_PROP_RXQ_NUM_DESCS_MIN	64
100 
101 #define	ENA_PROP_TXQ_NUM_DESCS	"tx_queue_num_descs"
102 #define	ENA_PROP_TXQ_NUM_DESCS_MIN	64
103 
104 #define	ENA_PROP_RXQ_INTR_LIMIT	"rx_queue_intr_limit"
105 #define	ENA_PROP_RXQ_INTR_LIMIT_MIN	16
106 #define	ENA_PROP_RXQ_INTR_LIMIT_MAX	4096
107 #define	ENA_PROP_RXQ_INTR_LIMIT_DEF	256
108 
109 #define	ENA_DMA_BIT_MASK(x)	((1ULL << (x)) - 1ULL)
110 #define	ENA_DMA_VERIFY_ADDR(ena, phys_addr)				\
111 	VERIFY3U(ENA_DMA_BIT_MASK((ena)->ena_dma_width) & (phys_addr), \
112 	    ==, (phys_addr))
113 
114 typedef struct ena_dma_conf {
115 	size_t		edc_size;
116 	uint64_t	edc_align;
117 	int		edc_sgl;
118 	uchar_t		edc_endian;
119 	boolean_t	edc_stream;
120 } ena_dma_conf_t;
121 
122 typedef struct ena_dma_buf {
123 	caddr_t			edb_va;
124 	size_t			edb_len;
125 	/*
126 	 * The length given by DMA engine, kept around for debugging
127 	 * purposes.
128 	 */
129 	size_t			edb_real_len;
130 	size_t			edb_used_len;
131 	ddi_acc_handle_t	edb_acc_hdl;
132 	ddi_dma_handle_t	edb_dma_hdl;
133 	const ddi_dma_cookie_t	*edb_cookie;
134 } ena_dma_buf_t;
135 
136 /*
137  * We always sync the entire range, and therefore expect success.
138  */
139 #ifdef DEBUG
140 #define	ENA_DMA_SYNC(buf, flag)					\
141 	ASSERT0(ddi_dma_sync((buf).edb_dma_hdl, 0, 0, (flag)))
142 #else  /* DEBUG */
143 #define	ENA_DMA_SYNC(buf, flag)					\
144 	((void)ddi_dma_sync((buf).edb_dma_hdl, 0, 0, (flag)))
145 #endif
146 
147 typedef struct ena_aenq_grpstr {
148 	enahw_aenq_groups_t	eag_type;
149 	const char		*eag_str;
150 } ena_aenq_grpstr_t;
151 
152 typedef struct ena_aenq_synstr {
153 	enahw_aenq_syndrome_t	eas_type;
154 	const char		*eas_str;
155 } ena_aenq_synstr_t;
156 
157 typedef void (*ena_aenq_hdlr_t)(void *data, enahw_aenq_desc_t *desc);
158 
159 typedef struct ena_aenq {
160 	enahw_aenq_desc_t	*eaenq_descs;
161 	ena_dma_buf_t		eaenq_dma;
162 	ena_aenq_hdlr_t		eaenq_hdlrs[ENAHW_AENQ_GROUPS_ARR_NUM];
163 	uint16_t		eaenq_num_descs;
164 	uint16_t		eaenq_head;
165 	uint8_t			eaenq_phase;
166 } ena_aenq_t;
167 
168 typedef struct ena_admin_sq {
169 	enahw_cmd_desc_t	*eas_entries;
170 	ena_dma_buf_t		eas_dma;
171 	uint32_t		*eas_dbaddr;
172 	uint16_t		eas_tail;
173 	uint8_t			eas_phase;
174 } ena_admin_sq_t;
175 
176 typedef struct ena_admin_cq {
177 	enahw_resp_desc_t	*eac_entries;
178 	ena_dma_buf_t		eac_dma;
179 	uint16_t		eac_head;
180 	uint8_t			eac_phase;
181 } ena_admin_cq_t;
182 
183 /*
184  * The command context is used to track outstanding requests and match
185  * them to device responses.
186  */
187 typedef struct ena_cmd_ctx {
188 	list_node_t		ectx_node;
189 
190 	/*
191 	 * The index into ea_cmd_ctxs where this ctx lives. Used as
192 	 * the command ID value in the command descriptor. This allows
193 	 * us to match a response to its associated context.
194 	 */
195 	uint16_t		ectx_id;
196 
197 	/* Is the command pending? */
198 	boolean_t		ectx_pending;
199 
200 	/* The type of command associated with this context. */
201 	enahw_cmd_opcode_t	ectx_cmd_opcode;
202 
203 	/*
204 	 * The location to copy the full response to. This is
205 	 * specified by the caller of the command during
206 	 * submission.
207 	 */
208 	enahw_resp_desc_t	*ectx_resp;
209 } ena_cmd_ctx_t;
210 
211 /*
212  * The admin queue, the queue through which commands are sent to the
213  * device.
214  *
215  * WO: Write Once (at initialization)
216  *
217  * In general, only a single lock needs to be held in order to access
218  * the different parts of the admin queue:
219  *
220  *  sq_lock: Any data deailng with submitting admin commands, which
221  *  includes acquiring a command context.
222  *
223  *  cq_lock: Any data dealing with reading command responses.
224  *
225  *  stat_lock: For accessing statistics.
226  *
227  * In some cases, the ectx_lock/stat_lock may be held in tandem with
228  * either the SQ or CQ lock. In that case, the SQ/CQ lock is always
229  * entered first.
230  */
231 typedef struct ena_adminq {
232 	kmutex_t		ea_sq_lock;	/* WO */
233 	kmutex_t		ea_cq_lock;	/* WO */
234 	kmutex_t		ea_stat_lock;	/* WO */
235 
236 	hrtime_t		ea_cmd_timeout_ns; /* WO */
237 
238 	uint16_t		ea_qlen;	/* WO */
239 	boolean_t		ea_poll_mode;	/* WO */
240 
241 	ena_cmd_ctx_t		*ea_cmd_ctxs;	  /* WO */
242 	list_t			ea_cmd_ctxs_free; /* ea_sq_lock */
243 	uint16_t		ea_pending_cmds; /* ea_sq_lock */
244 	ena_admin_sq_t		ea_sq; /* eq_sq_lock */
245 	ena_admin_cq_t		ea_cq; /* eq_cq_lock */
246 
247 	/* ea_stat_lock */
248 	struct ena_adminq_stats {
249 		uint64_t cmds_fail;
250 		uint64_t cmds_submitted;
251 		uint64_t cmds_success;
252 		uint64_t queue_full;
253 	} ea_stats;
254 } ena_adminq_t;
255 
256 typedef enum ena_attach_seq {
257 	ENA_ATTACH_PCI = 1,	 /* PCI config space */
258 	ENA_ATTACH_REGS,	 /* BAR mapping */
259 	ENA_ATTACH_DEV_INIT,	 /* ENA device initialization */
260 	ENA_ATTACH_READ_CONF,	 /* Read driver conf file */
261 	ENA_ATTACH_DEV_CFG,	 /* Set any needed device config */
262 	ENA_ATTACH_INTR_ALLOC,	 /* interrupt handles allocated */
263 	ENA_ATTACH_INTR_HDLRS,	 /* intr handlers set */
264 	ENA_ATTACH_TXQS_ALLOC,	 /* Tx Queues allocated */
265 	ENA_ATTACH_RXQS_ALLOC,	 /* Tx Queues allocated */
266 	ENA_ATTACH_MAC_REGISTER, /* registered with mac */
267 	ENA_ATTACH_INTRS_ENABLE, /* interrupts are enabled */
268 	ENA_ATTACH_END
269 } ena_attach_seq_t;
270 
271 #define	ENA_ATTACH_SEQ_FIRST	(ENA_ATTACH_PCI)
272 #define	ENA_ATTACH_NUM_ENTRIES	(ENA_ATTACH_END - 1)
273 
274 struct ena;
275 typedef boolean_t (*ena_attach_fn_t)(struct ena *);
276 typedef void (*ena_cleanup_fn_t)(struct ena *);
277 
278 typedef struct ena_attach_desc {
279 	ena_attach_seq_t ead_seq;
280 	const char *ead_name;
281 	ena_attach_fn_t ead_attach_fn;
282 	boolean_t ead_attach_hard_fail;
283 	ena_cleanup_fn_t ead_cleanup_fn;
284 } ena_attach_desc_t;
285 
286 typedef enum {
287 	ENA_TCB_NONE,
288 	ENA_TCB_COPY
289 } ena_tcb_type_t;
290 
291 /*
292  * The TCB is used to track information relating to the Tx of a
293  * packet. At the moment we support copy only.
294  */
295 typedef struct ena_tx_control_block {
296 	mblk_t		*etcb_mp;
297 	ena_tcb_type_t	etcb_type;
298 	ena_dma_buf_t	etcb_dma;
299 } ena_tx_control_block_t;
300 
301 typedef enum ena_txq_state {
302 	ENA_TXQ_STATE_NONE		= 0,
303 	ENA_TXQ_STATE_HOST_ALLOC	= 1 << 0,
304 	ENA_TXQ_STATE_CQ_CREATED	= 1 << 1,
305 	ENA_TXQ_STATE_SQ_CREATED	= 1 << 2,
306 	ENA_TXQ_STATE_READY		= 1 << 3, /* TxQ ready and waiting */
307 	ENA_TXQ_STATE_RUNNING		= 1 << 4, /* intrs enabled */
308 } ena_txq_state_t;
309 
310 typedef struct ena_txq_stat {
311 	/* Number of times mac_ether_offload_info() has failed. */
312 	kstat_named_t	ets_hck_meoifail;
313 
314 	/*
315 	 * Total number of times the ring was blocked due to
316 	 * insufficient descriptors, or unblocked due to recycling
317 	 * descriptors.
318 	 */
319 	kstat_named_t	ets_blocked;
320 	kstat_named_t	ets_unblocked;
321 
322 	/* The total number descriptors that have been recycled. */
323 	kstat_named_t	ets_recycled;
324 
325 	/*
326 	 * Number of bytes and packets that have been _submitted_ to
327 	 * the device.
328 	 */
329 	kstat_named_t	ets_bytes;
330 	kstat_named_t	ets_packets;
331 } ena_txq_stat_t;
332 
333 /*
334  * A transmit queue, made up of a Submission Queue (SQ) and Completion
335  * Queue (CQ) to form a logical descriptor ring for sending packets.
336  *
337  * Write Once (WO)
338  *
339  *   This value is written once, before the datapath is activated, in
340  *   a function which is controlled by mac(9E). Some values may be
341  *   written earlier, during ena attach, like et_ena and
342  *   et_sq_num_descs.
343  *
344  * Tx Mutex (TM) -- et_lock
345  *
346  *   This value is protected by the Tx queue's mutex. Some values may
347  *   be initialized in a WO path, but also continually updated as part
348  *   of normal datapath operation, such as et_sq_avail_descs. These
349  *   values need mutex protection.
350  */
351 typedef struct ena_txq {
352 	kmutex_t		et_lock; /* WO */
353 
354 	struct ena		*et_ena; /* WO */
355 	uint_t			et_txqs_idx; /* WO */
356 	mac_ring_handle_t	et_mrh;	 /* WO */
357 	uint64_t		et_m_gen_num; /* TM */
358 	ena_txq_state_t		et_state; /* WO */
359 	uint16_t		et_intr_vector; /* WO */
360 
361 	enahw_tx_desc_t		*et_sq_descs; /* TM */
362 	ena_dma_buf_t		et_sq_dma;    /* WO */
363 
364 	/* Is the Tx queue currently in a blocked state? */
365 	boolean_t		et_blocked; /* TM */
366 
367 	/*
368 	 * The number of descriptors owned by this ring. This value
369 	 * never changes after initialization.
370 	 */
371 	uint16_t		et_sq_num_descs;   /* WO */
372 
373 	/*
374 	 * The number of descriptors currently available for Tx
375 	 * submission. When this value reaches zero the ring must
376 	 * block until device notifies us of freed descriptors.
377 	 */
378 	uint16_t		et_sq_avail_descs; /* TM */
379 
380 	/*
381 	 * The current tail index of the queue (the first free
382 	 * descriptor for host Tx submission). After initialization,
383 	 * this value only increments, relying on unsigned wrap
384 	 * around. The ENA device seems to expect this behavior,
385 	 * performing its own modulo on the value for the purposes of
386 	 * indexing, much like the driver code needs to do in order to
387 	 * access the proper TCB entry.
388 	 */
389 	uint16_t		et_sq_tail_idx;  /* TM */
390 
391 	/*
392 	 * The phase is used to know which CQ descriptors may be
393 	 * reclaimed. This is explained further in ena.c.
394 	 */
395 	uint16_t		et_sq_phase; /* TM */
396 	uint16_t		et_sq_hw_idx; /* WO */
397 
398 	/*
399 	 * The "doorbell" address is how the host indicates to the
400 	 * device which descriptors are ready for Tx processing.
401 	 */
402 	uint32_t		*et_sq_db_addr; /* WO */
403 
404 	/*
405 	 * The TCBs track host Tx information, like a pointer to the
406 	 * mblk being submitted. Currently we maintain a 1:1 mapping
407 	 * of SQ descriptors to TCBs as Tx is copy only.
408 	 */
409 	ena_tx_control_block_t	*et_tcbs;    /* TM */
410 
411 	enahw_tx_cdesc_t	*et_cq_descs; /* TM */
412 	ena_dma_buf_t		et_cq_dma;    /* WO */
413 	uint16_t		et_cq_num_descs; /* WO */
414 	uint16_t		et_cq_head_idx; /* TM */
415 	uint16_t		et_cq_phase;	/* TM */
416 	uint16_t		et_cq_hw_idx;	/* WO */
417 
418 	/*
419 	 * This address is used to control the CQ interrupts.
420 	 */
421 	uint32_t		*et_cq_unmask_addr; /* WO */
422 	uint32_t		*et_cq_head_db_addr; /* WO (currently unused) */
423 	uint32_t		*et_cq_numa_addr;    /* WO (currently unused) */
424 
425 	/*
426 	 * This mutex protects the Tx queue stats. This mutex may be
427 	 * entered while et_lock is held, but et_lock is not required
428 	 * to access/modify the stats. However, if both locks are
429 	 * held, then et_lock must be entered first.
430 	 */
431 	kmutex_t		et_stat_lock;
432 	ena_txq_stat_t		et_stat;
433 	kstat_t			*et_kstat;
434 } ena_txq_t;
435 
436 typedef enum ena_rxq_state {
437 	ENA_RXQ_STATE_NONE		= 0,
438 	ENA_RXQ_STATE_HOST_ALLOC	= 1 << 0,
439 	ENA_RXQ_STATE_CQ_CREATED	= 1 << 1,
440 	ENA_RXQ_STATE_SQ_CREATED	= 1 << 2,
441 	ENA_RXQ_STATE_READY		= 1 << 3, /* RxQ ready and waiting */
442 	ENA_RXQ_STATE_RUNNING		= 1 << 4, /* intrs enabled */
443 } ena_rxq_state_t;
444 
445 typedef struct ena_rx_ctrl_block {
446 	ena_dma_buf_t	ercb_dma;
447 	uint8_t		ercb_offset;
448 	uint16_t	ercb_length;
449 } ena_rx_ctrl_block_t;
450 
451 typedef enum {
452 	ENA_RXQ_MODE_POLLING	= 1,
453 	ENA_RXQ_MODE_INTR	= 2,
454 } ena_rxq_mode_t;
455 
456 typedef struct ena_rxq_stat_t {
457 	/* The total number of packets/bytes received on this queue. */
458 	kstat_named_t	ers_packets;
459 	kstat_named_t	ers_bytes;
460 
461 	/*
462 	 * At this time we expect all incoming frames to fit in a
463 	 * single buffer/descriptor. In some rare event that the
464 	 * device doesn't cooperate this stat is incremented.
465 	 */
466 	kstat_named_t	ers_multi_desc;
467 
468 	/*
469 	 * The total number of times we failed to allocate a new mblk
470 	 * for an incoming frame.
471 	 */
472 	kstat_named_t	ers_allocb_fail;
473 
474 	/*
475 	 * The total number of times the Rx interrupt handler reached
476 	 * its maximum limit for number of packets to process in a
477 	 * single interrupt. If you see this number increase
478 	 * continuously at a steady rate, then it may be an indication
479 	 * the driver is not entering polling mode.
480 	 */
481 	kstat_named_t	ers_intr_limit;
482 
483 	/*
484 	 * The total number of times the device detected an incorrect
485 	 * IPv4 header checksum.
486 	 */
487 	kstat_named_t	ers_hck_ipv4_err;
488 
489 	/*
490 	 * The total number of times the device detected an incorrect
491 	 * L4/ULP checksum.
492 	 */
493 	kstat_named_t	ers_hck_l4_err;
494 } ena_rxq_stat_t;
495 
496 /*
497  * A receive queue, made up of a Submission Queue (SQ) and Completion
498  * Queue (CQ) to form a logical descriptor ring for receiving packets.
499  *
500  * Write Once (WO)
501  *
502  *   This value is written once, before the datapath is activated, in
503  *   a function which is controlled by mac(9E).
504  *
505  * Rx Mutex (RM) -- er_lock
506  *
507  *   This value is protected by the Rx queue's mutex. Some values may
508  *   be initialized in a WO path, but also continually updated as part
509  *   of normal datapath operation, such as er_sq_avail_descs. These
510  *   values need mutex protection.
511  */
512 typedef struct ena_rxq {
513 	kmutex_t		er_lock;
514 
515 	struct ena		*er_ena; /* WO */
516 	uint_t			er_rxqs_idx; /* WO */
517 	mac_ring_handle_t	er_mrh;	 /* WO */
518 	uint64_t		er_m_gen_num; /* WO */
519 	ena_rxq_state_t		er_state; /* WO */
520 	uint16_t		er_intr_vector; /* WO */
521 	ena_rxq_mode_t		er_mode;	/* RM */
522 	uint16_t		er_intr_limit;	/* RM */
523 
524 	enahw_rx_desc_t		*er_sq_descs; /* RM */
525 	ena_dma_buf_t		er_sq_dma;    /* WO */
526 	uint16_t		er_sq_num_descs;   /* WO */
527 	uint16_t		er_sq_avail_descs; /* RM */
528 	uint16_t		er_sq_tail_idx;  /* RM */
529 	uint16_t		er_sq_phase; /* RM */
530 	uint16_t		er_sq_hw_idx;	/* WO */
531 	uint32_t		*er_sq_db_addr; /* WO */
532 
533 	enahw_rx_cdesc_t	*er_cq_descs; /* RM */
534 	ena_dma_buf_t		er_cq_dma;    /* WO */
535 	uint16_t		er_cq_num_descs; /* WO */
536 	uint16_t		er_cq_head_idx;	 /* RM */
537 	uint16_t		er_cq_phase;	 /* RM */
538 	uint16_t		er_cq_hw_idx;	 /* WO */
539 	uint32_t		*er_cq_unmask_addr; /* WO */
540 	uint32_t		*er_cq_head_db_addr; /* WO (currently unused) */
541 	uint32_t		*er_cq_numa_addr;    /* WO (currently unused) */
542 
543 	ena_rx_ctrl_block_t	*er_rcbs; /* RM */
544 
545 	kmutex_t		er_stat_lock;
546 	ena_rxq_stat_t		er_stat;
547 	kstat_t			*er_kstat;
548 } ena_rxq_t;
549 
550 /* These are stats based off of enahw_resp_basic_stats_t. */
551 typedef struct ena_basic_stat {
552 	kstat_named_t	ebs_tx_bytes;
553 	kstat_named_t	ebs_tx_pkts;
554 	kstat_named_t	ebs_tx_drops;
555 
556 	kstat_named_t	ebs_rx_bytes;
557 	kstat_named_t	ebs_rx_pkts;
558 	kstat_named_t	ebs_rx_drops;
559 } ena_basic_stat_t;
560 
561 /* These are stats based off of enahw_resp_eni_stats_t. */
562 typedef struct ena_extended_stat {
563 	kstat_named_t	ees_bw_in_exceeded;
564 	kstat_named_t	ees_bw_out_exceeded;
565 	kstat_named_t	ees_pps_exceeded;
566 	kstat_named_t	ees_conns_exceeded;
567 	kstat_named_t	ees_linklocal_exceeded;
568 } ena_extended_stat_t;
569 
570 /* These stats monitor which AENQ handlers have been called. */
571 typedef struct ena_aenq_stat {
572 	kstat_named_t	eaes_default;
573 	kstat_named_t	eaes_link_change;
574 } ena_aenq_stat_t;
575 
576 #define	ENA_STATE_PRIMORDIAL	0x1u
577 #define	ENA_STATE_RUNNING	0x2u
578 
579 /*
580  * This structure contains the per-instance (PF of VF) state of the
581  * device.
582  */
583 typedef struct ena {
584 	dev_info_t		*ena_dip;
585 	int			ena_instance;
586 
587 	/*
588 	 * Global lock, used to synchronize administration changes to
589 	 * the ena_t. This lock should not be held in the datapath.
590 	 */
591 	kmutex_t		ena_lock;
592 	ena_attach_seq_t	ena_attach_seq;
593 
594 	/*
595 	 * We use atomic ops for ena_state so that datapath consumers
596 	 * do not need to enter ena_lock.
597 	 */
598 	uint32_t		ena_state;
599 
600 	/*
601 	 * PCI config space and BAR handle.
602 	 */
603 	ddi_acc_handle_t	ena_pci_hdl;
604 	off_t			ena_reg_size;
605 	caddr_t			ena_reg_base;
606 	ddi_device_acc_attr_t	ena_reg_attr;
607 	ddi_acc_handle_t	ena_reg_hdl;
608 
609 	/*
610 	 * Vendor information.
611 	 */
612 	uint16_t		ena_pci_vid;
613 	uint16_t		ena_pci_did;
614 	uint8_t			ena_pci_rev;
615 	uint16_t		ena_pci_svid;
616 	uint16_t		ena_pci_sdid;
617 
618 	/*
619 	 * Device and controller versions.
620 	 */
621 	uint32_t		ena_dev_major_vsn;
622 	uint32_t		ena_dev_minor_vsn;
623 	uint32_t		ena_ctrl_major_vsn;
624 	uint32_t		ena_ctrl_minor_vsn;
625 	uint32_t		ena_ctrl_subminor_vsn;
626 	uint32_t		ena_ctrl_impl_id;
627 
628 	/*
629 	 * Interrupts
630 	 */
631 	int			ena_num_intrs;
632 	ddi_intr_handle_t	*ena_intr_handles;
633 	size_t			ena_intr_handles_sz;
634 	int			ena_intr_caps;
635 	uint_t			ena_intr_pri;
636 
637 	mac_handle_t		ena_mh;
638 
639 	size_t			ena_page_sz;
640 
641 	/*
642 	 * The MTU and data layer frame sizes.
643 	 */
644 	uint32_t		ena_mtu;
645 	uint32_t		ena_max_frame_hdr;
646 	uint32_t		ena_max_frame_total;
647 
648 	/* The size (in bytes) of the Rx/Tx data buffers. */
649 	uint32_t		ena_tx_buf_sz;
650 	uint32_t		ena_rx_buf_sz;
651 
652 	/*
653 	 * The maximum number of Scatter Gather List segments the
654 	 * device can address.
655 	 */
656 	uint8_t			ena_tx_sgl_max_sz;
657 	uint8_t			ena_rx_sgl_max_sz;
658 
659 	/* The number of descriptors per Rx/Tx queue. */
660 	uint16_t		ena_rxq_num_descs;
661 	uint16_t		ena_txq_num_descs;
662 
663 	/*
664 	 * The maximum number of frames which may be read per Rx
665 	 * interrupt.
666 	 */
667 	uint16_t		ena_rxq_intr_limit;
668 
669 	/* The Rx/Tx data queues (rings). */
670 	ena_rxq_t		*ena_rxqs;
671 	uint16_t		ena_num_rxqs;
672 	ena_txq_t		*ena_txqs;
673 	uint16_t		ena_num_txqs;
674 
675 	/* These statistics are device-wide. */
676 	kstat_t			*ena_device_basic_kstat;
677 	kstat_t			*ena_device_extended_kstat;
678 
679 	/*
680 	 * This tracks AENQ-related stats, it is implicitly
681 	 * device-wide.
682 	 */
683 	ena_aenq_stat_t		ena_aenq_stat;
684 	kstat_t			*ena_aenq_kstat;
685 
686 	/*
687 	 * The Admin Queue, through which call device commands are
688 	 * sent.
689 	 */
690 	ena_adminq_t		ena_aq;
691 
692 	ena_aenq_t		ena_aenq;
693 	ena_dma_buf_t		ena_host_info;
694 
695 	/*
696 	 * Hardware info
697 	 */
698 	uint32_t		ena_supported_features;
699 	uint8_t			ena_dma_width;
700 	boolean_t		ena_link_up;
701 	boolean_t		ena_link_autoneg;
702 	boolean_t		ena_link_full_duplex;
703 	link_duplex_t		ena_link_duplex;
704 	uint64_t		ena_link_speed_mbits;
705 	enahw_link_speeds_t	ena_link_speeds;
706 	link_state_t		ena_link_state;
707 	uint32_t		ena_aenq_supported_groups;
708 	uint32_t		ena_aenq_enabled_groups;
709 
710 	uint32_t		ena_tx_max_sq_num;
711 	uint32_t		ena_tx_max_sq_num_descs;
712 	uint32_t		ena_tx_max_cq_num;
713 	uint32_t		ena_tx_max_cq_num_descs;
714 	uint16_t		ena_tx_max_desc_per_pkt;
715 	uint32_t		ena_tx_max_hdr_len;
716 
717 	uint32_t		ena_rx_max_sq_num;
718 	uint32_t		ena_rx_max_sq_num_descs;
719 	uint32_t		ena_rx_max_cq_num;
720 	uint32_t		ena_rx_max_cq_num_descs;
721 	uint16_t		ena_rx_max_desc_per_pkt;
722 
723 	/* This is calculated from the Rx/Tx queue nums. */
724 	uint16_t		ena_max_io_queues;
725 
726 	/* Hardware Offloads */
727 	boolean_t		ena_tx_l3_ipv4_csum;
728 
729 	boolean_t		ena_tx_l4_ipv4_part_csum;
730 	boolean_t		ena_tx_l4_ipv4_full_csum;
731 	boolean_t		ena_tx_l4_ipv4_lso;
732 
733 	boolean_t		ena_tx_l4_ipv6_part_csum;
734 	boolean_t		ena_tx_l4_ipv6_full_csum;
735 	boolean_t		ena_tx_l4_ipv6_lso;
736 
737 	boolean_t		ena_rx_l3_ipv4_csum;
738 	boolean_t		ena_rx_l4_ipv4_csum;
739 	boolean_t		ena_rx_l4_ipv6_csum;
740 	boolean_t		ena_rx_hash;
741 
742 	uint32_t		ena_max_mtu;
743 	uint8_t			ena_mac_addr[ETHERADDRL];
744 } ena_t;
745 
746 /*
747  * Logging functions.
748  */
749 /*PRINTFLIKE2*/
750 extern void ena_err(const ena_t *, const char *, ...) __KPRINTFLIKE(2);
751 /*PRINTFLIKE2*/
752 extern void ena_dbg(const ena_t *, const char *, ...) __KPRINTFLIKE(2);
753 
754 extern uint32_t ena_hw_bar_read32(const ena_t *, const uint16_t);
755 extern uint32_t ena_hw_abs_read32(const ena_t *, uint32_t *);
756 extern void ena_hw_bar_write32(const ena_t *, const uint16_t, const uint32_t);
757 extern void ena_hw_abs_write32(const ena_t *, uint32_t *, const uint32_t);
758 
759 /*
760  * Stats
761  */
762 extern void ena_stat_device_basic_cleanup(ena_t *);
763 extern boolean_t ena_stat_device_basic_init(ena_t *);
764 
765 extern void ena_stat_device_extended_cleanup(ena_t *);
766 extern boolean_t ena_stat_device_extended_init(ena_t *);
767 
768 extern void ena_stat_aenq_cleanup(ena_t *);
769 extern boolean_t ena_stat_aenq_init(ena_t *);
770 
771 extern void ena_stat_rxq_cleanup(ena_rxq_t *);
772 extern boolean_t ena_stat_rxq_init(ena_rxq_t *);
773 extern void ena_stat_txq_cleanup(ena_txq_t *);
774 extern boolean_t ena_stat_txq_init(ena_txq_t *);
775 
776 /*
777  * DMA
778  */
779 extern boolean_t ena_dma_alloc(ena_t *, ena_dma_buf_t *, ena_dma_conf_t *,
780     size_t);
781 extern void ena_dma_free(ena_dma_buf_t *);
782 extern void ena_set_dma_addr(const ena_t *, const uint64_t, enahw_addr_t *);
783 extern void ena_set_dma_addr_values(const ena_t *, const uint64_t, uint32_t *,
784     uint16_t *);
785 
786 /*
787  * Interrupts
788  */
789 extern boolean_t ena_intr_add_handlers(ena_t *);
790 extern void ena_intr_remove_handlers(ena_t *);
791 extern void ena_tx_intr_work(ena_txq_t *);
792 extern void ena_rx_intr_work(ena_rxq_t *);
793 extern void ena_aenq_work(ena_t *);
794 extern boolean_t ena_intrs_disable(ena_t *);
795 extern boolean_t ena_intrs_enable(ena_t *);
796 
797 /*
798  * MAC
799  */
800 extern boolean_t ena_mac_register(ena_t *);
801 extern int ena_mac_unregister(ena_t *);
802 extern void ena_ring_tx_stop(mac_ring_driver_t);
803 extern int ena_ring_tx_start(mac_ring_driver_t, uint64_t);
804 extern mblk_t *ena_ring_tx(void *, mblk_t *);
805 extern void ena_ring_rx_stop(mac_ring_driver_t);
806 extern int ena_ring_rx_start(mac_ring_driver_t rh, uint64_t gen_num);
807 extern int ena_m_stat(void *, uint_t, uint64_t *);
808 extern mblk_t *ena_ring_rx_poll(void *, int);
809 extern int ena_ring_rx_stat(mac_ring_driver_t, uint_t, uint64_t *);
810 extern int ena_ring_tx_stat(mac_ring_driver_t, uint_t, uint64_t *);
811 
812 /*
813  * Admin API
814  */
815 extern int ena_admin_submit_cmd(ena_t *, enahw_cmd_desc_t *,
816     enahw_resp_desc_t *, ena_cmd_ctx_t **);
817 extern int ena_admin_poll_for_resp(ena_t *, ena_cmd_ctx_t *);
818 extern void ena_free_host_info(ena_t *);
819 extern boolean_t ena_init_host_info(ena_t *);
820 extern int ena_create_cq(ena_t *, uint16_t, uint64_t, boolean_t, uint32_t,
821     uint16_t *, uint32_t **, uint32_t **, uint32_t **);
822 extern int ena_destroy_cq(ena_t *, uint16_t);
823 extern int ena_create_sq(ena_t *, uint16_t, uint64_t, boolean_t, uint16_t,
824     uint16_t *, uint32_t **);
825 extern int ena_destroy_sq(ena_t *, uint16_t, boolean_t);
826 extern int ena_set_feature(ena_t *, enahw_cmd_desc_t *,
827     enahw_resp_desc_t *, const enahw_feature_id_t, const uint8_t);
828 extern int ena_get_feature(ena_t *, enahw_resp_desc_t *,
829     const enahw_feature_id_t, const uint8_t);
830 extern int ena_admin_get_basic_stats(ena_t *, enahw_resp_desc_t *);
831 extern int ena_admin_get_eni_stats(ena_t *, enahw_resp_desc_t *);
832 extern int enahw_resp_status_to_errno(ena_t *, enahw_resp_status_t);
833 
834 /*
835  * Rx/Tx allocations
836  */
837 extern boolean_t ena_alloc_rxq(ena_rxq_t *);
838 extern void ena_cleanup_rxq(ena_rxq_t *);
839 extern boolean_t ena_alloc_txq(ena_txq_t *);
840 extern void ena_cleanup_txq(ena_txq_t *);
841 
842 extern ena_aenq_grpstr_t ena_groups_str[];
843 
844 #ifdef __cplusplus
845 }
846 #endif
847 
848 #endif	/* _ENA_H */
849