xref: /linux/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
2 /*
3  * Copyright 2018-2026 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #ifndef _EFA_ADMIN_CMDS_H_
7 #define _EFA_ADMIN_CMDS_H_
8 
9 #define EFA_ADMIN_API_VERSION_MAJOR          0
10 #define EFA_ADMIN_API_VERSION_MINOR          1
11 
12 /* EFA admin queue opcodes */
13 enum efa_admin_aq_opcode {
14 	EFA_ADMIN_CREATE_QP                         = 1,
15 	EFA_ADMIN_MODIFY_QP                         = 2,
16 	EFA_ADMIN_QUERY_QP                          = 3,
17 	EFA_ADMIN_DESTROY_QP                        = 4,
18 	EFA_ADMIN_CREATE_AH                         = 5,
19 	EFA_ADMIN_DESTROY_AH                        = 6,
20 	EFA_ADMIN_REG_MR                            = 7,
21 	EFA_ADMIN_DEREG_MR                          = 8,
22 	EFA_ADMIN_CREATE_CQ                         = 9,
23 	EFA_ADMIN_DESTROY_CQ                        = 10,
24 	EFA_ADMIN_GET_FEATURE                       = 11,
25 	EFA_ADMIN_SET_FEATURE                       = 12,
26 	EFA_ADMIN_GET_STATS                         = 13,
27 	EFA_ADMIN_ALLOC_PD                          = 14,
28 	EFA_ADMIN_DEALLOC_PD                        = 15,
29 	EFA_ADMIN_ALLOC_UAR                         = 16,
30 	EFA_ADMIN_DEALLOC_UAR                       = 17,
31 	EFA_ADMIN_CREATE_EQ                         = 18,
32 	EFA_ADMIN_DESTROY_EQ                        = 19,
33 	EFA_ADMIN_ALLOC_MR                          = 20,
34 	EFA_ADMIN_MAX_OPCODE                        = 20,
35 };
36 
37 enum efa_admin_aq_feature_id {
38 	EFA_ADMIN_DEVICE_ATTR                       = 1,
39 	EFA_ADMIN_AENQ_CONFIG                       = 2,
40 	EFA_ADMIN_NETWORK_ATTR                      = 3,
41 	EFA_ADMIN_QUEUE_ATTR_1                      = 4,
42 	EFA_ADMIN_HW_HINTS                          = 5,
43 	EFA_ADMIN_HOST_INFO                         = 6,
44 	EFA_ADMIN_EVENT_QUEUE_ATTR                  = 7,
45 	EFA_ADMIN_QUEUE_ATTR_2                      = 9,
46 };
47 
48 /* QP transport type */
49 enum efa_admin_qp_type {
50 	/* Unreliable Datagram */
51 	EFA_ADMIN_QP_TYPE_UD                        = 1,
52 	/* Scalable Reliable Datagram */
53 	EFA_ADMIN_QP_TYPE_SRD                       = 2,
54 };
55 
56 /* QP state */
57 enum efa_admin_qp_state {
58 	EFA_ADMIN_QP_STATE_RESET                    = 0,
59 	EFA_ADMIN_QP_STATE_INIT                     = 1,
60 	EFA_ADMIN_QP_STATE_RTR                      = 2,
61 	EFA_ADMIN_QP_STATE_RTS                      = 3,
62 	EFA_ADMIN_QP_STATE_SQD                      = 4,
63 	EFA_ADMIN_QP_STATE_SQE                      = 5,
64 	EFA_ADMIN_QP_STATE_ERR                      = 6,
65 };
66 
67 enum efa_admin_get_stats_type {
68 	EFA_ADMIN_GET_STATS_TYPE_BASIC              = 0,
69 	EFA_ADMIN_GET_STATS_TYPE_MESSAGES           = 1,
70 	EFA_ADMIN_GET_STATS_TYPE_RDMA_READ          = 2,
71 	EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE         = 3,
72 	EFA_ADMIN_GET_STATS_TYPE_NETWORK            = 4,
73 };
74 
75 enum efa_admin_get_stats_scope {
76 	EFA_ADMIN_GET_STATS_SCOPE_ALL               = 0,
77 	EFA_ADMIN_GET_STATS_SCOPE_QUEUE             = 1,
78 };
79 
80 /*
81  * QP allocation sizes, converted by fabric QueuePair (QP) create command
82  * from QP capabilities.
83  */
84 struct efa_admin_qp_alloc_size {
85 	/* Send descriptor ring size in bytes */
86 	u32 send_queue_ring_size;
87 
88 	/* Max number of WQEs that can be outstanding on send queue. */
89 	u32 send_queue_depth;
90 
91 	/*
92 	 * Recv descriptor ring size in bytes, sufficient for user-provided
93 	 * number of WQEs
94 	 */
95 	u32 recv_queue_ring_size;
96 
97 	/* Max number of WQEs that can be outstanding on recv queue */
98 	u32 recv_queue_depth;
99 };
100 
101 struct efa_admin_create_qp_cmd {
102 	/* Common Admin Queue descriptor */
103 	struct efa_admin_aq_common_desc aq_common_desc;
104 
105 	/* Protection Domain associated with this QP */
106 	u16 pd;
107 
108 	/* QP type */
109 	u8 qp_type;
110 
111 	/*
112 	 * 0 : sq_virt - If set, SQ ring base address is
113 	 *    virtual (IOVA returned by MR registration)
114 	 * 1 : rq_virt - If set, RQ ring base address is
115 	 *    virtual (IOVA returned by MR registration)
116 	 * 2 : unsolicited_write_recv - If set, work requests
117 	 *    will not be consumed for incoming RDMA write with
118 	 *    immediate
119 	 * 7:3 : reserved - MBZ
120 	 */
121 	u8 flags;
122 
123 	/*
124 	 * Send queue (SQ) ring base physical address. This field is not
125 	 * used if this is a Low Latency Queue(LLQ).
126 	 */
127 	u64 sq_base_addr;
128 
129 	/* Receive queue (RQ) ring base address. */
130 	u64 rq_base_addr;
131 
132 	/* Index of CQ to be associated with Send Queue completions */
133 	u32 send_cq_idx;
134 
135 	/* Index of CQ to be associated with Recv Queue completions */
136 	u32 recv_cq_idx;
137 
138 	/*
139 	 * Memory registration key for the SQ ring, used only when not in
140 	 * LLQ mode and base address is virtual
141 	 */
142 	u32 sq_l_key;
143 
144 	/*
145 	 * Memory registration key for the RQ ring, used only when base
146 	 * address is virtual
147 	 */
148 	u32 rq_l_key;
149 
150 	/* Requested QP allocation sizes */
151 	struct efa_admin_qp_alloc_size qp_alloc_size;
152 
153 	/* UAR number */
154 	u16 uar;
155 
156 	/* Requested service level for the QP, 0 is the default SL */
157 	u8 sl;
158 
159 	/* MBZ */
160 	u8 reserved;
161 
162 	/* MBZ */
163 	u32 reserved2;
164 };
165 
166 struct efa_admin_create_qp_resp {
167 	/* Common Admin Queue completion descriptor */
168 	struct efa_admin_acq_common_desc acq_common_desc;
169 
170 	/*
171 	 * Opaque handle to be used for consequent admin operations on the
172 	 * QP
173 	 */
174 	u32 qp_handle;
175 
176 	/*
177 	 * QP number in the given EFA virtual device. Least-significant bits (as
178 	 * needed according to max_qp) carry unique QP ID
179 	 */
180 	u16 qp_num;
181 
182 	/* MBZ */
183 	u16 reserved;
184 
185 	/* Index of sub-CQ for Send Queue completions */
186 	u16 send_sub_cq_idx;
187 
188 	/* Index of sub-CQ for Receive Queue completions */
189 	u16 recv_sub_cq_idx;
190 
191 	/* SQ doorbell address, as offset to PCIe DB BAR */
192 	u32 sq_db_offset;
193 
194 	/* RQ doorbell address, as offset to PCIe DB BAR */
195 	u32 rq_db_offset;
196 
197 	/*
198 	 * low latency send queue ring base address as an offset to PCIe
199 	 * MMIO LLQ_MEM BAR
200 	 */
201 	u32 llq_descriptors_offset;
202 };
203 
204 struct efa_admin_modify_qp_cmd {
205 	/* Common Admin Queue descriptor */
206 	struct efa_admin_aq_common_desc aq_common_desc;
207 
208 	/*
209 	 * Mask indicating which fields should be updated
210 	 * 0 : qp_state
211 	 * 1 : cur_qp_state
212 	 * 2 : qkey
213 	 * 3 : sq_psn
214 	 * 4 : sq_drained_async_notify
215 	 * 5 : rnr_retry
216 	 * 31:6 : reserved
217 	 */
218 	u32 modify_mask;
219 
220 	/* QP handle returned by create_qp command */
221 	u32 qp_handle;
222 
223 	/* QP state */
224 	u32 qp_state;
225 
226 	/* Override current QP state (before applying the transition) */
227 	u32 cur_qp_state;
228 
229 	/* QKey */
230 	u32 qkey;
231 
232 	/* SQ PSN */
233 	u32 sq_psn;
234 
235 	/* Enable async notification when SQ is drained */
236 	u8 sq_drained_async_notify;
237 
238 	/* Number of RNR retries (valid only for SRD QPs) */
239 	u8 rnr_retry;
240 
241 	/* MBZ */
242 	u16 reserved2;
243 };
244 
245 struct efa_admin_modify_qp_resp {
246 	/* Common Admin Queue completion descriptor */
247 	struct efa_admin_acq_common_desc acq_common_desc;
248 };
249 
250 struct efa_admin_query_qp_cmd {
251 	/* Common Admin Queue descriptor */
252 	struct efa_admin_aq_common_desc aq_common_desc;
253 
254 	/* QP handle returned by create_qp command */
255 	u32 qp_handle;
256 };
257 
258 struct efa_admin_query_qp_resp {
259 	/* Common Admin Queue completion descriptor */
260 	struct efa_admin_acq_common_desc acq_common_desc;
261 
262 	/* QP state */
263 	u32 qp_state;
264 
265 	/* QKey */
266 	u32 qkey;
267 
268 	/* SQ PSN */
269 	u32 sq_psn;
270 
271 	/* Indicates that draining is in progress */
272 	u8 sq_draining;
273 
274 	/* Number of RNR retries (valid only for SRD QPs) */
275 	u8 rnr_retry;
276 
277 	/* MBZ */
278 	u16 reserved2;
279 };
280 
281 struct efa_admin_destroy_qp_cmd {
282 	/* Common Admin Queue descriptor */
283 	struct efa_admin_aq_common_desc aq_common_desc;
284 
285 	/* QP handle returned by create_qp command */
286 	u32 qp_handle;
287 };
288 
289 struct efa_admin_destroy_qp_resp {
290 	/* Common Admin Queue completion descriptor */
291 	struct efa_admin_acq_common_desc acq_common_desc;
292 };
293 
294 /*
295  * Create Address Handle command parameters. Must not be called more than
296  * once for the same destination
297  */
298 struct efa_admin_create_ah_cmd {
299 	/* Common Admin Queue descriptor */
300 	struct efa_admin_aq_common_desc aq_common_desc;
301 
302 	/* Destination address in network byte order */
303 	u8 dest_addr[16];
304 
305 	/* PD number */
306 	u16 pd;
307 
308 	/* MBZ */
309 	u16 reserved;
310 };
311 
312 struct efa_admin_create_ah_resp {
313 	/* Common Admin Queue completion descriptor */
314 	struct efa_admin_acq_common_desc acq_common_desc;
315 
316 	/* Target interface address handle (opaque) */
317 	u16 ah;
318 
319 	/* MBZ */
320 	u16 reserved;
321 };
322 
323 struct efa_admin_destroy_ah_cmd {
324 	/* Common Admin Queue descriptor */
325 	struct efa_admin_aq_common_desc aq_common_desc;
326 
327 	/* Target interface address handle (opaque) */
328 	u16 ah;
329 
330 	/* PD number */
331 	u16 pd;
332 };
333 
334 struct efa_admin_destroy_ah_resp {
335 	/* Common Admin Queue completion descriptor */
336 	struct efa_admin_acq_common_desc acq_common_desc;
337 };
338 
339 /*
340  * Registration of MemoryRegion, required for QP working with Virtual
341  * Addresses. In standard verbs semantics, region length is limited to 2GB
342  * space, but EFA offers larger MR support for large memory space, to ease
343  * on users working with very large datasets (i.e. full GPU memory mapping).
344  */
345 struct efa_admin_reg_mr_cmd {
346 	/* Common Admin Queue descriptor */
347 	struct efa_admin_aq_common_desc aq_common_desc;
348 
349 	/* Protection Domain */
350 	u16 pd;
351 
352 	/* MBZ */
353 	u16 reserved16_w1;
354 
355 	/* Physical Buffer List, each element is page-aligned. */
356 	union {
357 		/*
358 		 * Inline array of guest-physical page addresses of user
359 		 * memory pages (optimization for short region
360 		 * registrations)
361 		 */
362 		u64 inline_pbl_array[4];
363 
364 		/* points to PBL (direct or indirect, chained if needed) */
365 		struct efa_admin_ctrl_buff_info pbl;
366 	} pbl;
367 
368 	/* Memory region length, in bytes. */
369 	u64 mr_length;
370 
371 	/*
372 	 * flags and page size
373 	 * 4:0 : phys_page_size_shift - page size is (1 <<
374 	 *    phys_page_size_shift). Page size is used for
375 	 *    building the Virtual to Physical address mapping
376 	 * 6:5 : reserved - MBZ
377 	 * 7 : mem_addr_phy_mode_en - Enable bit for physical
378 	 *    memory registration (no translation), can be used
379 	 *    only by privileged clients. If set, PBL must
380 	 *    contain a single entry.
381 	 */
382 	u8 flags;
383 
384 	/*
385 	 * permissions
386 	 * 0 : local_write_enable - Local write permissions:
387 	 *    must be set for RQ buffers and buffers posted for
388 	 *    RDMA Read requests
389 	 * 1 : remote_write_enable - Remote write
390 	 *    permissions: must be set to enable RDMA write to
391 	 *    the region
392 	 * 2 : remote_read_enable - Remote read permissions:
393 	 *    must be set to enable RDMA read from the region
394 	 * 7:3 : reserved2 - MBZ
395 	 */
396 	u8 permissions;
397 
398 	/* MBZ */
399 	u16 reserved16_w5;
400 
401 	/* number of pages in PBL (redundant, could be calculated) */
402 	u32 page_num;
403 
404 	/*
405 	 * IO Virtual Address associated with this MR. If
406 	 * mem_addr_phy_mode_en is set, contains the physical address of
407 	 * the region.
408 	 */
409 	u64 iova;
410 };
411 
412 struct efa_admin_reg_mr_resp {
413 	/* Common Admin Queue completion descriptor */
414 	struct efa_admin_acq_common_desc acq_common_desc;
415 
416 	/*
417 	 * L_Key, to be used in conjunction with local buffer references in
418 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
419 	 */
420 	u32 l_key;
421 
422 	/*
423 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
424 	 * memory region
425 	 */
426 	u32 r_key;
427 
428 	/*
429 	 * Mask indicating which fields have valid values
430 	 * 0 : recv_ic_id
431 	 * 1 : rdma_read_ic_id
432 	 * 2 : rdma_recv_ic_id
433 	 */
434 	u8 validity;
435 
436 	/*
437 	 * Physical interconnect used by the device to reach the MR for receive
438 	 * operation
439 	 */
440 	u8 recv_ic_id;
441 
442 	/*
443 	 * Physical interconnect used by the device to reach the MR for RDMA
444 	 * read operation
445 	 */
446 	u8 rdma_read_ic_id;
447 
448 	/*
449 	 * Physical interconnect used by the device to reach the MR for RDMA
450 	 * write receive
451 	 */
452 	u8 rdma_recv_ic_id;
453 };
454 
455 struct efa_admin_dereg_mr_cmd {
456 	/* Common Admin Queue descriptor */
457 	struct efa_admin_aq_common_desc aq_common_desc;
458 
459 	/* L_Key, memory region's l_key */
460 	u32 l_key;
461 };
462 
463 struct efa_admin_dereg_mr_resp {
464 	/* Common Admin Queue completion descriptor */
465 	struct efa_admin_acq_common_desc acq_common_desc;
466 };
467 
468 /*
469  * Allocation of MemoryRegion, required for QP working with Virtual
470  * Addresses in kernel verbs semantics, ready for fast registration use.
471  */
472 struct efa_admin_alloc_mr_cmd {
473 	/* Common Admin Queue descriptor */
474 	struct efa_admin_aq_common_desc aq_common_desc;
475 
476 	/* Protection Domain */
477 	u16 pd;
478 
479 	/* MBZ */
480 	u16 reserved1;
481 
482 	/* Maximum number of pages this MR supports. */
483 	u32 max_pages;
484 };
485 
486 struct efa_admin_alloc_mr_resp {
487 	/* Common Admin Queue completion descriptor */
488 	struct efa_admin_acq_common_desc acq_common_desc;
489 
490 	/*
491 	 * L_Key, to be used in conjunction with local buffer references in
492 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
493 	 */
494 	u32 l_key;
495 
496 	/*
497 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
498 	 * memory region
499 	 */
500 	u32 r_key;
501 };
502 
503 struct efa_admin_create_cq_cmd {
504 	struct efa_admin_aq_common_desc aq_common_desc;
505 
506 	/*
507 	 * 4:0 : reserved5 - MBZ
508 	 * 5 : interrupt_mode_enabled - if set, cq operates
509 	 *    in interrupt mode (i.e. CQ events and EQ elements
510 	 *    are generated), otherwise - polling
511 	 * 6 : virt - If set, ring base address is virtual
512 	 *    (IOVA returned by MR registration)
513 	 * 7 : reserved6 - MBZ
514 	 */
515 	u8 cq_caps_1;
516 
517 	/*
518 	 * 4:0 : cq_entry_size_words - size of CQ entry in
519 	 *    32-bit words, valid values: 4, 8.
520 	 * 5 : set_src_addr - If set, source address will be
521 	 *    filled on RX completions from unknown senders.
522 	 *    Requires 8 words CQ entry size.
523 	 * 7:6 : reserved7 - MBZ
524 	 */
525 	u8 cq_caps_2;
526 
527 	/* Sub completion queue depth in # of entries. must be power of 2 */
528 	u16 sub_cq_depth;
529 
530 	/* EQ number assigned to this cq */
531 	u16 eqn;
532 
533 	/* MBZ */
534 	u16 reserved;
535 
536 	/*
537 	 * CQ ring base address, virtual or physical depending on 'virt'
538 	 * flag
539 	 */
540 	struct efa_common_mem_addr cq_ba;
541 
542 	/*
543 	 * Memory registration key for the ring, used only when base
544 	 * address is virtual
545 	 */
546 	u32 l_key;
547 
548 	/*
549 	 * number of sub cqs - must be equal to sub_cqs_per_cq of queue
550 	 * attributes.
551 	 */
552 	u16 num_sub_cqs;
553 
554 	/* UAR number */
555 	u16 uar;
556 };
557 
558 struct efa_admin_create_cq_resp {
559 	struct efa_admin_acq_common_desc acq_common_desc;
560 
561 	u16 cq_idx;
562 
563 	/* actual sub cq depth in number of entries */
564 	u16 sub_cq_actual_depth;
565 
566 	/* CQ doorbell address, as offset to PCIe DB BAR */
567 	u32 db_offset;
568 
569 	/*
570 	 * 0 : db_valid - If set, doorbell offset is valid.
571 	 *    Always set when interrupts are requested.
572 	 */
573 	u32 flags;
574 };
575 
576 struct efa_admin_destroy_cq_cmd {
577 	struct efa_admin_aq_common_desc aq_common_desc;
578 
579 	u16 cq_idx;
580 
581 	/* MBZ */
582 	u16 reserved1;
583 };
584 
585 struct efa_admin_destroy_cq_resp {
586 	struct efa_admin_acq_common_desc acq_common_desc;
587 };
588 
589 /*
590  * EFA AQ Get Statistics command. Extended statistics are placed in control
591  * buffer pointed by AQ entry
592  */
593 struct efa_admin_aq_get_stats_cmd {
594 	struct efa_admin_aq_common_desc aq_common_descriptor;
595 
596 	union {
597 		/* command specific inline data */
598 		u32 inline_data_w1[3];
599 
600 		struct efa_admin_ctrl_buff_info control_buffer;
601 	} u;
602 
603 	/* stats type as defined in enum efa_admin_get_stats_type */
604 	u8 type;
605 
606 	/* stats scope defined in enum efa_admin_get_stats_scope */
607 	u8 scope;
608 
609 	u16 scope_modifier;
610 };
611 
612 struct efa_admin_basic_stats {
613 	u64 tx_bytes;
614 
615 	u64 tx_pkts;
616 
617 	u64 rx_bytes;
618 
619 	u64 rx_pkts;
620 
621 	u64 rx_drops;
622 
623 	u64 qkey_viol;
624 };
625 
626 struct efa_admin_messages_stats {
627 	u64 send_bytes;
628 
629 	u64 send_wrs;
630 
631 	u64 recv_bytes;
632 
633 	u64 recv_wrs;
634 };
635 
636 struct efa_admin_rdma_read_stats {
637 	u64 read_wrs;
638 
639 	u64 read_bytes;
640 
641 	u64 read_wr_err;
642 
643 	u64 read_resp_bytes;
644 };
645 
646 struct efa_admin_rdma_write_stats {
647 	u64 write_wrs;
648 
649 	u64 write_bytes;
650 
651 	u64 write_wr_err;
652 
653 	u64 write_recv_bytes;
654 };
655 
656 struct efa_admin_network_stats {
657 	u64 retrans_bytes;
658 
659 	u64 retrans_pkts;
660 
661 	u64 retrans_timeout_events;
662 
663 	u64 unresponsive_remote_events;
664 
665 	u64 impaired_remote_conn_events;
666 };
667 
668 struct efa_admin_acq_get_stats_resp {
669 	struct efa_admin_acq_common_desc acq_common_desc;
670 
671 	union {
672 		struct efa_admin_basic_stats basic_stats;
673 
674 		struct efa_admin_messages_stats messages_stats;
675 
676 		struct efa_admin_rdma_read_stats rdma_read_stats;
677 
678 		struct efa_admin_rdma_write_stats rdma_write_stats;
679 
680 		struct efa_admin_network_stats network_stats;
681 	} u;
682 };
683 
684 struct efa_admin_get_set_feature_common_desc {
685 	/* MBZ */
686 	u8 reserved0;
687 
688 	/* as appears in efa_admin_aq_feature_id */
689 	u8 feature_id;
690 
691 	/* MBZ */
692 	u16 reserved16;
693 };
694 
695 struct efa_admin_feature_device_attr_desc {
696 	/* Bitmap of efa_admin_aq_feature_id */
697 	u64 supported_features;
698 
699 	/* Bitmap of supported page sizes in MR registrations */
700 	u64 page_size_cap;
701 
702 	u32 fw_version;
703 
704 	u32 admin_api_version;
705 
706 	u32 device_version;
707 
708 	/* Bar used for SQ and RQ doorbells */
709 	u16 db_bar;
710 
711 	/* Indicates how many bits are used on physical address access */
712 	u8 phys_addr_width;
713 
714 	/* Indicates how many bits are used on virtual address access */
715 	u8 virt_addr_width;
716 
717 	/*
718 	 * 0 : rdma_read - If set, RDMA Read is supported on
719 	 *    TX queues
720 	 * 1 : rnr_retry - If set, RNR retry is supported on
721 	 *    modify QP command
722 	 * 2 : data_polling_128 - If set, 128 bytes data
723 	 *    polling is supported
724 	 * 3 : rdma_write - If set, RDMA Write is supported
725 	 *    on TX queues
726 	 * 4 : unsolicited_write_recv - If set, unsolicited
727 	 *    write with imm. receive is supported
728 	 * 31:5 : reserved - MBZ
729 	 */
730 	u32 device_caps;
731 
732 	/* Max RDMA transfer size in bytes */
733 	u32 max_rdma_size;
734 
735 	/* Unique global ID for an EFA device */
736 	u64 guid;
737 
738 	/* The device maximum link speed in Gbit/sec */
739 	u16 max_link_speed_gbps;
740 
741 	/* MBZ */
742 	u16 reserved0;
743 
744 	/* MBZ */
745 	u32 reserved1;
746 };
747 
748 struct efa_admin_feature_queue_attr_desc_1 {
749 	/* The maximum number of queue pairs supported */
750 	u32 max_qp;
751 
752 	/* Maximum number of WQEs per Send Queue */
753 	u32 max_sq_depth;
754 
755 	/*
756 	 * Maximum size of data that can be sent inline in a Send WQE
757 	 * (deprecated by
758 	 * efa_admin_feature_queue_attr_desc_2::inline_buf_size_ex on
759 	 * supporting devices)
760 	 */
761 	u32 inline_buf_size;
762 
763 	/* Maximum number of buffer descriptors per Recv Queue */
764 	u32 max_rq_depth;
765 
766 	/* The maximum number of completion queues supported per VF */
767 	u32 max_cq;
768 
769 	/* Maximum number of CQEs per Completion Queue */
770 	u32 max_cq_depth;
771 
772 	/* Number of sub-CQs to be created for each CQ */
773 	u16 sub_cqs_per_cq;
774 
775 	/* Minimum number of WQEs per SQ */
776 	u16 min_sq_depth;
777 
778 	/* Maximum number of SGEs (buffers) allowed for a single send WQE */
779 	u16 max_wr_send_sges;
780 
781 	/* Maximum number of SGEs allowed for a single recv WQE */
782 	u16 max_wr_recv_sges;
783 
784 	/* The maximum number of memory regions supported */
785 	u32 max_mr;
786 
787 	/* The maximum number of pages can be registered */
788 	u32 max_mr_pages;
789 
790 	/* The maximum number of protection domains supported */
791 	u32 max_pd;
792 
793 	/* The maximum number of address handles supported */
794 	u32 max_ah;
795 
796 	/* The maximum size of LLQ in bytes */
797 	u32 max_llq_size;
798 
799 	/* Maximum number of SGEs for a single RDMA read/write WQE */
800 	u16 max_wr_rdma_sges;
801 
802 	/*
803 	 * Maximum number of bytes that can be written to SQ between two
804 	 * consecutive doorbells (in units of 64B). Driver must ensure that only
805 	 * complete WQEs are written to queue before issuing a doorbell.
806 	 * Examples: max_tx_batch=16 and WQE size = 64B, means up to 16 WQEs can
807 	 * be written to SQ between two consecutive doorbells. max_tx_batch=11
808 	 * and WQE size = 128B, means up to 5 WQEs can be written to SQ between
809 	 * two consecutive doorbells. Zero means unlimited.
810 	 */
811 	u16 max_tx_batch;
812 };
813 
814 struct efa_admin_feature_queue_attr_desc_2 {
815 	/* Maximum size of data that can be sent inline in a Send WQE */
816 	u16 inline_buf_size_ex;
817 };
818 
819 struct efa_admin_event_queue_attr_desc {
820 	/* The maximum number of event queues supported */
821 	u32 max_eq;
822 
823 	/* Maximum number of EQEs per Event Queue */
824 	u32 max_eq_depth;
825 
826 	/* Supported events bitmask */
827 	u32 event_bitmask;
828 };
829 
830 struct efa_admin_feature_aenq_desc {
831 	/* bitmask for AENQ groups the device can report */
832 	u32 supported_groups;
833 
834 	/* bitmask for AENQ groups to report */
835 	u32 enabled_groups;
836 };
837 
838 struct efa_admin_feature_network_attr_desc {
839 	/* Raw address data in network byte order */
840 	u8 addr[16];
841 
842 	/* max packet payload size in bytes */
843 	u32 mtu;
844 };
845 
846 /*
847  * When hint value is 0, hints capabilities are not supported or driver
848  * should use its own predefined value
849  */
850 struct efa_admin_hw_hints {
851 	/* value in ms */
852 	u16 mmio_read_timeout;
853 
854 	/* value in ms */
855 	u16 driver_watchdog_timeout;
856 
857 	/* value in ms */
858 	u16 admin_completion_timeout;
859 
860 	/* poll interval in ms */
861 	u16 poll_interval;
862 };
863 
864 struct efa_admin_get_feature_cmd {
865 	struct efa_admin_aq_common_desc aq_common_descriptor;
866 
867 	struct efa_admin_ctrl_buff_info control_buffer;
868 
869 	struct efa_admin_get_set_feature_common_desc feature_common;
870 
871 	u32 raw[11];
872 };
873 
874 struct efa_admin_get_feature_resp {
875 	struct efa_admin_acq_common_desc acq_common_desc;
876 
877 	union {
878 		u32 raw[14];
879 
880 		struct efa_admin_feature_device_attr_desc device_attr;
881 
882 		struct efa_admin_feature_aenq_desc aenq;
883 
884 		struct efa_admin_feature_network_attr_desc network_attr;
885 
886 		struct efa_admin_feature_queue_attr_desc_1 queue_attr_1;
887 
888 		struct efa_admin_feature_queue_attr_desc_2 queue_attr_2;
889 
890 		struct efa_admin_event_queue_attr_desc event_queue_attr;
891 
892 		struct efa_admin_hw_hints hw_hints;
893 	} u;
894 };
895 
896 struct efa_admin_set_feature_cmd {
897 	struct efa_admin_aq_common_desc aq_common_descriptor;
898 
899 	struct efa_admin_ctrl_buff_info control_buffer;
900 
901 	struct efa_admin_get_set_feature_common_desc feature_common;
902 
903 	union {
904 		u32 raw[11];
905 
906 		/* AENQ configuration */
907 		struct efa_admin_feature_aenq_desc aenq;
908 	} u;
909 };
910 
911 struct efa_admin_set_feature_resp {
912 	struct efa_admin_acq_common_desc acq_common_desc;
913 
914 	union {
915 		u32 raw[14];
916 	} u;
917 };
918 
919 struct efa_admin_alloc_pd_cmd {
920 	struct efa_admin_aq_common_desc aq_common_descriptor;
921 };
922 
923 struct efa_admin_alloc_pd_resp {
924 	struct efa_admin_acq_common_desc acq_common_desc;
925 
926 	/* PD number */
927 	u16 pd;
928 
929 	/* MBZ */
930 	u16 reserved;
931 };
932 
933 struct efa_admin_dealloc_pd_cmd {
934 	struct efa_admin_aq_common_desc aq_common_descriptor;
935 
936 	/* PD number */
937 	u16 pd;
938 
939 	/* MBZ */
940 	u16 reserved;
941 };
942 
943 struct efa_admin_dealloc_pd_resp {
944 	struct efa_admin_acq_common_desc acq_common_desc;
945 };
946 
947 struct efa_admin_alloc_uar_cmd {
948 	struct efa_admin_aq_common_desc aq_common_descriptor;
949 };
950 
951 struct efa_admin_alloc_uar_resp {
952 	struct efa_admin_acq_common_desc acq_common_desc;
953 
954 	/* UAR number */
955 	u16 uar;
956 
957 	/* MBZ */
958 	u16 reserved;
959 };
960 
961 struct efa_admin_dealloc_uar_cmd {
962 	struct efa_admin_aq_common_desc aq_common_descriptor;
963 
964 	/* UAR number */
965 	u16 uar;
966 
967 	/* MBZ */
968 	u16 reserved;
969 };
970 
971 struct efa_admin_dealloc_uar_resp {
972 	struct efa_admin_acq_common_desc acq_common_desc;
973 };
974 
975 struct efa_admin_create_eq_cmd {
976 	struct efa_admin_aq_common_desc aq_common_descriptor;
977 
978 	/* Size of the EQ in entries, must be power of 2 */
979 	u16 depth;
980 
981 	/* MSI-X table entry index */
982 	u8 msix_vec;
983 
984 	/*
985 	 * 4:0 : entry_size_words - size of EQ entry in
986 	 *    32-bit words
987 	 * 7:5 : reserved - MBZ
988 	 */
989 	u8 caps;
990 
991 	/* EQ ring base address */
992 	struct efa_common_mem_addr ba;
993 
994 	/*
995 	 * Enabled events on this EQ
996 	 * 0 : completion_events - Enable completion events
997 	 * 31:1 : reserved - MBZ
998 	 */
999 	u32 event_bitmask;
1000 
1001 	/* MBZ */
1002 	u32 reserved;
1003 };
1004 
1005 struct efa_admin_create_eq_resp {
1006 	struct efa_admin_acq_common_desc acq_common_desc;
1007 
1008 	/* EQ number */
1009 	u16 eqn;
1010 
1011 	/* MBZ */
1012 	u16 reserved;
1013 };
1014 
1015 struct efa_admin_destroy_eq_cmd {
1016 	struct efa_admin_aq_common_desc aq_common_descriptor;
1017 
1018 	/* EQ number */
1019 	u16 eqn;
1020 
1021 	/* MBZ */
1022 	u16 reserved;
1023 };
1024 
1025 struct efa_admin_destroy_eq_resp {
1026 	struct efa_admin_acq_common_desc acq_common_desc;
1027 };
1028 
1029 /* asynchronous event notification groups */
1030 enum efa_admin_aenq_group {
1031 	EFA_ADMIN_FATAL_ERROR                       = 1,
1032 	EFA_ADMIN_WARNING                           = 2,
1033 	EFA_ADMIN_NOTIFICATION                      = 3,
1034 	EFA_ADMIN_KEEP_ALIVE                        = 4,
1035 	EFA_ADMIN_AENQ_GROUPS_NUM                   = 5,
1036 };
1037 
1038 struct efa_admin_mmio_req_read_less_resp {
1039 	u16 req_id;
1040 
1041 	u16 reg_off;
1042 
1043 	/* value is valid when poll is cleared */
1044 	u32 reg_val;
1045 };
1046 
1047 enum efa_admin_os_type {
1048 	EFA_ADMIN_OS_LINUX                          = 0,
1049 };
1050 
1051 struct efa_admin_host_info {
1052 	/* OS distribution string format */
1053 	u8 os_dist_str[128];
1054 
1055 	/* Defined in enum efa_admin_os_type */
1056 	u32 os_type;
1057 
1058 	/* Kernel version string format */
1059 	u8 kernel_ver_str[32];
1060 
1061 	/* Kernel version numeric format */
1062 	u32 kernel_ver;
1063 
1064 	/*
1065 	 * 7:0 : driver_module_type
1066 	 * 15:8 : driver_sub_minor
1067 	 * 23:16 : driver_minor
1068 	 * 31:24 : driver_major
1069 	 */
1070 	u32 driver_ver;
1071 
1072 	/*
1073 	 * Device's Bus, Device and Function
1074 	 * 2:0 : function
1075 	 * 7:3 : device
1076 	 * 15:8 : bus
1077 	 */
1078 	u16 bdf;
1079 
1080 	/*
1081 	 * Spec version
1082 	 * 7:0 : spec_minor
1083 	 * 15:8 : spec_major
1084 	 */
1085 	u16 spec_ver;
1086 
1087 	/*
1088 	 * 0 : intree - Intree driver
1089 	 * 1 : gdr - GPUDirect RDMA supported
1090 	 * 31:2 : reserved2
1091 	 */
1092 	u32 flags;
1093 };
1094 
1095 /* create_qp_cmd */
1096 #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK                BIT(0)
1097 #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK                BIT(1)
1098 #define EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV_MASK BIT(2)
1099 
1100 /* modify_qp_cmd */
1101 #define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK               BIT(0)
1102 #define EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE_MASK           BIT(1)
1103 #define EFA_ADMIN_MODIFY_QP_CMD_QKEY_MASK                   BIT(2)
1104 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN_MASK                 BIT(3)
1105 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY_MASK BIT(4)
1106 #define EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY_MASK              BIT(5)
1107 
1108 /* reg_mr_cmd */
1109 #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK      GENMASK(4, 0)
1110 #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK      BIT(7)
1111 #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK        BIT(0)
1112 #define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK       BIT(1)
1113 #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK        BIT(2)
1114 
1115 /* reg_mr_resp */
1116 #define EFA_ADMIN_REG_MR_RESP_RECV_IC_ID_MASK               BIT(0)
1117 #define EFA_ADMIN_REG_MR_RESP_RDMA_READ_IC_ID_MASK          BIT(1)
1118 #define EFA_ADMIN_REG_MR_RESP_RDMA_RECV_IC_ID_MASK          BIT(2)
1119 
1120 /* create_cq_cmd */
1121 #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
1122 #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK                   BIT(6)
1123 #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK    GENMASK(4, 0)
1124 #define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK           BIT(5)
1125 
1126 /* create_cq_resp */
1127 #define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK              BIT(0)
1128 
1129 /* feature_device_attr_desc */
1130 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK   BIT(0)
1131 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK   BIT(1)
1132 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
1133 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK  BIT(3)
1134 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_UNSOLICITED_WRITE_RECV_MASK BIT(4)
1135 
1136 /* create_eq_cmd */
1137 #define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK       GENMASK(4, 0)
1138 #define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK      BIT(0)
1139 
1140 /* host_info */
1141 #define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK         GENMASK(7, 0)
1142 #define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK           GENMASK(15, 8)
1143 #define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK               GENMASK(23, 16)
1144 #define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK               GENMASK(31, 24)
1145 #define EFA_ADMIN_HOST_INFO_FUNCTION_MASK                   GENMASK(2, 0)
1146 #define EFA_ADMIN_HOST_INFO_DEVICE_MASK                     GENMASK(7, 3)
1147 #define EFA_ADMIN_HOST_INFO_BUS_MASK                        GENMASK(15, 8)
1148 #define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK                 GENMASK(7, 0)
1149 #define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK                 GENMASK(15, 8)
1150 #define EFA_ADMIN_HOST_INFO_INTREE_MASK                     BIT(0)
1151 #define EFA_ADMIN_HOST_INFO_GDR_MASK                        BIT(1)
1152 
1153 #endif /* _EFA_ADMIN_CMDS_H_ */
1154