xref: /linux/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
2 /*
3  * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #ifndef _EFA_ADMIN_CMDS_H_
7 #define _EFA_ADMIN_CMDS_H_
8 
9 #define EFA_ADMIN_API_VERSION_MAJOR          0
10 #define EFA_ADMIN_API_VERSION_MINOR          1
11 
12 /* EFA admin queue opcodes */
13 enum efa_admin_aq_opcode {
14 	EFA_ADMIN_CREATE_QP                         = 1,
15 	EFA_ADMIN_MODIFY_QP                         = 2,
16 	EFA_ADMIN_QUERY_QP                          = 3,
17 	EFA_ADMIN_DESTROY_QP                        = 4,
18 	EFA_ADMIN_CREATE_AH                         = 5,
19 	EFA_ADMIN_DESTROY_AH                        = 6,
20 	EFA_ADMIN_REG_MR                            = 7,
21 	EFA_ADMIN_DEREG_MR                          = 8,
22 	EFA_ADMIN_CREATE_CQ                         = 9,
23 	EFA_ADMIN_DESTROY_CQ                        = 10,
24 	EFA_ADMIN_GET_FEATURE                       = 11,
25 	EFA_ADMIN_SET_FEATURE                       = 12,
26 	EFA_ADMIN_GET_STATS                         = 13,
27 	EFA_ADMIN_ALLOC_PD                          = 14,
28 	EFA_ADMIN_DEALLOC_PD                        = 15,
29 	EFA_ADMIN_ALLOC_UAR                         = 16,
30 	EFA_ADMIN_DEALLOC_UAR                       = 17,
31 	EFA_ADMIN_CREATE_EQ                         = 18,
32 	EFA_ADMIN_DESTROY_EQ                        = 19,
33 	EFA_ADMIN_ALLOC_MR                          = 20,
34 	EFA_ADMIN_MAX_OPCODE                        = 20,
35 };
36 
37 enum efa_admin_aq_feature_id {
38 	EFA_ADMIN_DEVICE_ATTR                       = 1,
39 	EFA_ADMIN_AENQ_CONFIG                       = 2,
40 	EFA_ADMIN_NETWORK_ATTR                      = 3,
41 	EFA_ADMIN_QUEUE_ATTR                        = 4,
42 	EFA_ADMIN_HW_HINTS                          = 5,
43 	EFA_ADMIN_HOST_INFO                         = 6,
44 	EFA_ADMIN_EVENT_QUEUE_ATTR                  = 7,
45 };
46 
47 /* QP transport type */
48 enum efa_admin_qp_type {
49 	/* Unreliable Datagram */
50 	EFA_ADMIN_QP_TYPE_UD                        = 1,
51 	/* Scalable Reliable Datagram */
52 	EFA_ADMIN_QP_TYPE_SRD                       = 2,
53 };
54 
55 /* QP state */
56 enum efa_admin_qp_state {
57 	EFA_ADMIN_QP_STATE_RESET                    = 0,
58 	EFA_ADMIN_QP_STATE_INIT                     = 1,
59 	EFA_ADMIN_QP_STATE_RTR                      = 2,
60 	EFA_ADMIN_QP_STATE_RTS                      = 3,
61 	EFA_ADMIN_QP_STATE_SQD                      = 4,
62 	EFA_ADMIN_QP_STATE_SQE                      = 5,
63 	EFA_ADMIN_QP_STATE_ERR                      = 6,
64 };
65 
66 enum efa_admin_get_stats_type {
67 	EFA_ADMIN_GET_STATS_TYPE_BASIC              = 0,
68 	EFA_ADMIN_GET_STATS_TYPE_MESSAGES           = 1,
69 	EFA_ADMIN_GET_STATS_TYPE_RDMA_READ          = 2,
70 	EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE         = 3,
71 };
72 
73 enum efa_admin_get_stats_scope {
74 	EFA_ADMIN_GET_STATS_SCOPE_ALL               = 0,
75 	EFA_ADMIN_GET_STATS_SCOPE_QUEUE             = 1,
76 };
77 
78 /*
79  * QP allocation sizes, converted by fabric QueuePair (QP) create command
80  * from QP capabilities.
81  */
82 struct efa_admin_qp_alloc_size {
83 	/* Send descriptor ring size in bytes */
84 	u32 send_queue_ring_size;
85 
86 	/* Max number of WQEs that can be outstanding on send queue. */
87 	u32 send_queue_depth;
88 
89 	/*
90 	 * Recv descriptor ring size in bytes, sufficient for user-provided
91 	 * number of WQEs
92 	 */
93 	u32 recv_queue_ring_size;
94 
95 	/* Max number of WQEs that can be outstanding on recv queue */
96 	u32 recv_queue_depth;
97 };
98 
99 struct efa_admin_create_qp_cmd {
100 	/* Common Admin Queue descriptor */
101 	struct efa_admin_aq_common_desc aq_common_desc;
102 
103 	/* Protection Domain associated with this QP */
104 	u16 pd;
105 
106 	/* QP type */
107 	u8 qp_type;
108 
109 	/*
110 	 * 0 : sq_virt - If set, SQ ring base address is
111 	 *    virtual (IOVA returned by MR registration)
112 	 * 1 : rq_virt - If set, RQ ring base address is
113 	 *    virtual (IOVA returned by MR registration)
114 	 * 2 : unsolicited_write_recv - If set, work requests
115 	 *    will not be consumed for incoming RDMA write with
116 	 *    immediate
117 	 * 7:3 : reserved - MBZ
118 	 */
119 	u8 flags;
120 
121 	/*
122 	 * Send queue (SQ) ring base physical address. This field is not
123 	 * used if this is a Low Latency Queue(LLQ).
124 	 */
125 	u64 sq_base_addr;
126 
127 	/* Receive queue (RQ) ring base address. */
128 	u64 rq_base_addr;
129 
130 	/* Index of CQ to be associated with Send Queue completions */
131 	u32 send_cq_idx;
132 
133 	/* Index of CQ to be associated with Recv Queue completions */
134 	u32 recv_cq_idx;
135 
136 	/*
137 	 * Memory registration key for the SQ ring, used only when not in
138 	 * LLQ mode and base address is virtual
139 	 */
140 	u32 sq_l_key;
141 
142 	/*
143 	 * Memory registration key for the RQ ring, used only when base
144 	 * address is virtual
145 	 */
146 	u32 rq_l_key;
147 
148 	/* Requested QP allocation sizes */
149 	struct efa_admin_qp_alloc_size qp_alloc_size;
150 
151 	/* UAR number */
152 	u16 uar;
153 
154 	/* Requested service level for the QP, 0 is the default SL */
155 	u8 sl;
156 
157 	/* MBZ */
158 	u8 reserved;
159 
160 	/* MBZ */
161 	u32 reserved2;
162 };
163 
164 struct efa_admin_create_qp_resp {
165 	/* Common Admin Queue completion descriptor */
166 	struct efa_admin_acq_common_desc acq_common_desc;
167 
168 	/*
169 	 * Opaque handle to be used for consequent admin operations on the
170 	 * QP
171 	 */
172 	u32 qp_handle;
173 
174 	/*
175 	 * QP number in the given EFA virtual device. Least-significant bits (as
176 	 * needed according to max_qp) carry unique QP ID
177 	 */
178 	u16 qp_num;
179 
180 	/* MBZ */
181 	u16 reserved;
182 
183 	/* Index of sub-CQ for Send Queue completions */
184 	u16 send_sub_cq_idx;
185 
186 	/* Index of sub-CQ for Receive Queue completions */
187 	u16 recv_sub_cq_idx;
188 
189 	/* SQ doorbell address, as offset to PCIe DB BAR */
190 	u32 sq_db_offset;
191 
192 	/* RQ doorbell address, as offset to PCIe DB BAR */
193 	u32 rq_db_offset;
194 
195 	/*
196 	 * low latency send queue ring base address as an offset to PCIe
197 	 * MMIO LLQ_MEM BAR
198 	 */
199 	u32 llq_descriptors_offset;
200 };
201 
202 struct efa_admin_modify_qp_cmd {
203 	/* Common Admin Queue descriptor */
204 	struct efa_admin_aq_common_desc aq_common_desc;
205 
206 	/*
207 	 * Mask indicating which fields should be updated
208 	 * 0 : qp_state
209 	 * 1 : cur_qp_state
210 	 * 2 : qkey
211 	 * 3 : sq_psn
212 	 * 4 : sq_drained_async_notify
213 	 * 5 : rnr_retry
214 	 * 31:6 : reserved
215 	 */
216 	u32 modify_mask;
217 
218 	/* QP handle returned by create_qp command */
219 	u32 qp_handle;
220 
221 	/* QP state */
222 	u32 qp_state;
223 
224 	/* Override current QP state (before applying the transition) */
225 	u32 cur_qp_state;
226 
227 	/* QKey */
228 	u32 qkey;
229 
230 	/* SQ PSN */
231 	u32 sq_psn;
232 
233 	/* Enable async notification when SQ is drained */
234 	u8 sq_drained_async_notify;
235 
236 	/* Number of RNR retries (valid only for SRD QPs) */
237 	u8 rnr_retry;
238 
239 	/* MBZ */
240 	u16 reserved2;
241 };
242 
243 struct efa_admin_modify_qp_resp {
244 	/* Common Admin Queue completion descriptor */
245 	struct efa_admin_acq_common_desc acq_common_desc;
246 };
247 
248 struct efa_admin_query_qp_cmd {
249 	/* Common Admin Queue descriptor */
250 	struct efa_admin_aq_common_desc aq_common_desc;
251 
252 	/* QP handle returned by create_qp command */
253 	u32 qp_handle;
254 };
255 
256 struct efa_admin_query_qp_resp {
257 	/* Common Admin Queue completion descriptor */
258 	struct efa_admin_acq_common_desc acq_common_desc;
259 
260 	/* QP state */
261 	u32 qp_state;
262 
263 	/* QKey */
264 	u32 qkey;
265 
266 	/* SQ PSN */
267 	u32 sq_psn;
268 
269 	/* Indicates that draining is in progress */
270 	u8 sq_draining;
271 
272 	/* Number of RNR retries (valid only for SRD QPs) */
273 	u8 rnr_retry;
274 
275 	/* MBZ */
276 	u16 reserved2;
277 };
278 
279 struct efa_admin_destroy_qp_cmd {
280 	/* Common Admin Queue descriptor */
281 	struct efa_admin_aq_common_desc aq_common_desc;
282 
283 	/* QP handle returned by create_qp command */
284 	u32 qp_handle;
285 };
286 
287 struct efa_admin_destroy_qp_resp {
288 	/* Common Admin Queue completion descriptor */
289 	struct efa_admin_acq_common_desc acq_common_desc;
290 };
291 
292 /*
293  * Create Address Handle command parameters. Must not be called more than
294  * once for the same destination
295  */
296 struct efa_admin_create_ah_cmd {
297 	/* Common Admin Queue descriptor */
298 	struct efa_admin_aq_common_desc aq_common_desc;
299 
300 	/* Destination address in network byte order */
301 	u8 dest_addr[16];
302 
303 	/* PD number */
304 	u16 pd;
305 
306 	/* MBZ */
307 	u16 reserved;
308 };
309 
310 struct efa_admin_create_ah_resp {
311 	/* Common Admin Queue completion descriptor */
312 	struct efa_admin_acq_common_desc acq_common_desc;
313 
314 	/* Target interface address handle (opaque) */
315 	u16 ah;
316 
317 	/* MBZ */
318 	u16 reserved;
319 };
320 
321 struct efa_admin_destroy_ah_cmd {
322 	/* Common Admin Queue descriptor */
323 	struct efa_admin_aq_common_desc aq_common_desc;
324 
325 	/* Target interface address handle (opaque) */
326 	u16 ah;
327 
328 	/* PD number */
329 	u16 pd;
330 };
331 
332 struct efa_admin_destroy_ah_resp {
333 	/* Common Admin Queue completion descriptor */
334 	struct efa_admin_acq_common_desc acq_common_desc;
335 };
336 
337 /*
338  * Registration of MemoryRegion, required for QP working with Virtual
339  * Addresses. In standard verbs semantics, region length is limited to 2GB
340  * space, but EFA offers larger MR support for large memory space, to ease
341  * on users working with very large datasets (i.e. full GPU memory mapping).
342  */
343 struct efa_admin_reg_mr_cmd {
344 	/* Common Admin Queue descriptor */
345 	struct efa_admin_aq_common_desc aq_common_desc;
346 
347 	/* Protection Domain */
348 	u16 pd;
349 
350 	/* MBZ */
351 	u16 reserved16_w1;
352 
353 	/* Physical Buffer List, each element is page-aligned. */
354 	union {
355 		/*
356 		 * Inline array of guest-physical page addresses of user
357 		 * memory pages (optimization for short region
358 		 * registrations)
359 		 */
360 		u64 inline_pbl_array[4];
361 
362 		/* points to PBL (direct or indirect, chained if needed) */
363 		struct efa_admin_ctrl_buff_info pbl;
364 	} pbl;
365 
366 	/* Memory region length, in bytes. */
367 	u64 mr_length;
368 
369 	/*
370 	 * flags and page size
371 	 * 4:0 : phys_page_size_shift - page size is (1 <<
372 	 *    phys_page_size_shift). Page size is used for
373 	 *    building the Virtual to Physical address mapping
374 	 * 6:5 : reserved - MBZ
375 	 * 7 : mem_addr_phy_mode_en - Enable bit for physical
376 	 *    memory registration (no translation), can be used
377 	 *    only by privileged clients. If set, PBL must
378 	 *    contain a single entry.
379 	 */
380 	u8 flags;
381 
382 	/*
383 	 * permissions
384 	 * 0 : local_write_enable - Local write permissions:
385 	 *    must be set for RQ buffers and buffers posted for
386 	 *    RDMA Read requests
387 	 * 1 : remote_write_enable - Remote write
388 	 *    permissions: must be set to enable RDMA write to
389 	 *    the region
390 	 * 2 : remote_read_enable - Remote read permissions:
391 	 *    must be set to enable RDMA read from the region
392 	 * 7:3 : reserved2 - MBZ
393 	 */
394 	u8 permissions;
395 
396 	/* MBZ */
397 	u16 reserved16_w5;
398 
399 	/* number of pages in PBL (redundant, could be calculated) */
400 	u32 page_num;
401 
402 	/*
403 	 * IO Virtual Address associated with this MR. If
404 	 * mem_addr_phy_mode_en is set, contains the physical address of
405 	 * the region.
406 	 */
407 	u64 iova;
408 };
409 
410 struct efa_admin_reg_mr_resp {
411 	/* Common Admin Queue completion descriptor */
412 	struct efa_admin_acq_common_desc acq_common_desc;
413 
414 	/*
415 	 * L_Key, to be used in conjunction with local buffer references in
416 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
417 	 */
418 	u32 l_key;
419 
420 	/*
421 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
422 	 * memory region
423 	 */
424 	u32 r_key;
425 
426 	/*
427 	 * Mask indicating which fields have valid values
428 	 * 0 : recv_ic_id
429 	 * 1 : rdma_read_ic_id
430 	 * 2 : rdma_recv_ic_id
431 	 */
432 	u8 validity;
433 
434 	/*
435 	 * Physical interconnect used by the device to reach the MR for receive
436 	 * operation
437 	 */
438 	u8 recv_ic_id;
439 
440 	/*
441 	 * Physical interconnect used by the device to reach the MR for RDMA
442 	 * read operation
443 	 */
444 	u8 rdma_read_ic_id;
445 
446 	/*
447 	 * Physical interconnect used by the device to reach the MR for RDMA
448 	 * write receive
449 	 */
450 	u8 rdma_recv_ic_id;
451 };
452 
453 struct efa_admin_dereg_mr_cmd {
454 	/* Common Admin Queue descriptor */
455 	struct efa_admin_aq_common_desc aq_common_desc;
456 
457 	/* L_Key, memory region's l_key */
458 	u32 l_key;
459 };
460 
461 struct efa_admin_dereg_mr_resp {
462 	/* Common Admin Queue completion descriptor */
463 	struct efa_admin_acq_common_desc acq_common_desc;
464 };
465 
466 /*
467  * Allocation of MemoryRegion, required for QP working with Virtual
468  * Addresses in kernel verbs semantics, ready for fast registration use.
469  */
470 struct efa_admin_alloc_mr_cmd {
471 	/* Common Admin Queue descriptor */
472 	struct efa_admin_aq_common_desc aq_common_desc;
473 
474 	/* Protection Domain */
475 	u16 pd;
476 
477 	/* MBZ */
478 	u16 reserved1;
479 
480 	/* Maximum number of pages this MR supports. */
481 	u32 max_pages;
482 };
483 
484 struct efa_admin_alloc_mr_resp {
485 	/* Common Admin Queue completion descriptor */
486 	struct efa_admin_acq_common_desc acq_common_desc;
487 
488 	/*
489 	 * L_Key, to be used in conjunction with local buffer references in
490 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
491 	 */
492 	u32 l_key;
493 
494 	/*
495 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
496 	 * memory region
497 	 */
498 	u32 r_key;
499 };
500 
501 struct efa_admin_create_cq_cmd {
502 	struct efa_admin_aq_common_desc aq_common_desc;
503 
504 	/*
505 	 * 4:0 : reserved5 - MBZ
506 	 * 5 : interrupt_mode_enabled - if set, cq operates
507 	 *    in interrupt mode (i.e. CQ events and EQ elements
508 	 *    are generated), otherwise - polling
509 	 * 6 : virt - If set, ring base address is virtual
510 	 *    (IOVA returned by MR registration)
511 	 * 7 : reserved6 - MBZ
512 	 */
513 	u8 cq_caps_1;
514 
515 	/*
516 	 * 4:0 : cq_entry_size_words - size of CQ entry in
517 	 *    32-bit words, valid values: 4, 8.
518 	 * 5 : set_src_addr - If set, source address will be
519 	 *    filled on RX completions from unknown senders.
520 	 *    Requires 8 words CQ entry size.
521 	 * 7:6 : reserved7 - MBZ
522 	 */
523 	u8 cq_caps_2;
524 
525 	/* Sub completion queue depth in # of entries. must be power of 2 */
526 	u16 sub_cq_depth;
527 
528 	/* EQ number assigned to this cq */
529 	u16 eqn;
530 
531 	/* MBZ */
532 	u16 reserved;
533 
534 	/*
535 	 * CQ ring base address, virtual or physical depending on 'virt'
536 	 * flag
537 	 */
538 	struct efa_common_mem_addr cq_ba;
539 
540 	/*
541 	 * Memory registration key for the ring, used only when base
542 	 * address is virtual
543 	 */
544 	u32 l_key;
545 
546 	/*
547 	 * number of sub cqs - must be equal to sub_cqs_per_cq of queue
548 	 * attributes.
549 	 */
550 	u16 num_sub_cqs;
551 
552 	/* UAR number */
553 	u16 uar;
554 };
555 
556 struct efa_admin_create_cq_resp {
557 	struct efa_admin_acq_common_desc acq_common_desc;
558 
559 	u16 cq_idx;
560 
561 	/* actual sub cq depth in number of entries */
562 	u16 sub_cq_actual_depth;
563 
564 	/* CQ doorbell address, as offset to PCIe DB BAR */
565 	u32 db_offset;
566 
567 	/*
568 	 * 0 : db_valid - If set, doorbell offset is valid.
569 	 *    Always set when interrupts are requested.
570 	 */
571 	u32 flags;
572 };
573 
574 struct efa_admin_destroy_cq_cmd {
575 	struct efa_admin_aq_common_desc aq_common_desc;
576 
577 	u16 cq_idx;
578 
579 	/* MBZ */
580 	u16 reserved1;
581 };
582 
583 struct efa_admin_destroy_cq_resp {
584 	struct efa_admin_acq_common_desc acq_common_desc;
585 };
586 
587 /*
588  * EFA AQ Get Statistics command. Extended statistics are placed in control
589  * buffer pointed by AQ entry
590  */
591 struct efa_admin_aq_get_stats_cmd {
592 	struct efa_admin_aq_common_desc aq_common_descriptor;
593 
594 	union {
595 		/* command specific inline data */
596 		u32 inline_data_w1[3];
597 
598 		struct efa_admin_ctrl_buff_info control_buffer;
599 	} u;
600 
601 	/* stats type as defined in enum efa_admin_get_stats_type */
602 	u8 type;
603 
604 	/* stats scope defined in enum efa_admin_get_stats_scope */
605 	u8 scope;
606 
607 	u16 scope_modifier;
608 };
609 
610 struct efa_admin_basic_stats {
611 	u64 tx_bytes;
612 
613 	u64 tx_pkts;
614 
615 	u64 rx_bytes;
616 
617 	u64 rx_pkts;
618 
619 	u64 rx_drops;
620 
621 	u64 qkey_viol;
622 };
623 
624 struct efa_admin_messages_stats {
625 	u64 send_bytes;
626 
627 	u64 send_wrs;
628 
629 	u64 recv_bytes;
630 
631 	u64 recv_wrs;
632 };
633 
634 struct efa_admin_rdma_read_stats {
635 	u64 read_wrs;
636 
637 	u64 read_bytes;
638 
639 	u64 read_wr_err;
640 
641 	u64 read_resp_bytes;
642 };
643 
644 struct efa_admin_rdma_write_stats {
645 	u64 write_wrs;
646 
647 	u64 write_bytes;
648 
649 	u64 write_wr_err;
650 
651 	u64 write_recv_bytes;
652 };
653 
654 struct efa_admin_acq_get_stats_resp {
655 	struct efa_admin_acq_common_desc acq_common_desc;
656 
657 	union {
658 		struct efa_admin_basic_stats basic_stats;
659 
660 		struct efa_admin_messages_stats messages_stats;
661 
662 		struct efa_admin_rdma_read_stats rdma_read_stats;
663 
664 		struct efa_admin_rdma_write_stats rdma_write_stats;
665 	} u;
666 };
667 
668 struct efa_admin_get_set_feature_common_desc {
669 	/* MBZ */
670 	u8 reserved0;
671 
672 	/* as appears in efa_admin_aq_feature_id */
673 	u8 feature_id;
674 
675 	/* MBZ */
676 	u16 reserved16;
677 };
678 
679 struct efa_admin_feature_device_attr_desc {
680 	/* Bitmap of efa_admin_aq_feature_id */
681 	u64 supported_features;
682 
683 	/* Bitmap of supported page sizes in MR registrations */
684 	u64 page_size_cap;
685 
686 	u32 fw_version;
687 
688 	u32 admin_api_version;
689 
690 	u32 device_version;
691 
692 	/* Bar used for SQ and RQ doorbells */
693 	u16 db_bar;
694 
695 	/* Indicates how many bits are used on physical address access */
696 	u8 phys_addr_width;
697 
698 	/* Indicates how many bits are used on virtual address access */
699 	u8 virt_addr_width;
700 
701 	/*
702 	 * 0 : rdma_read - If set, RDMA Read is supported on
703 	 *    TX queues
704 	 * 1 : rnr_retry - If set, RNR retry is supported on
705 	 *    modify QP command
706 	 * 2 : data_polling_128 - If set, 128 bytes data
707 	 *    polling is supported
708 	 * 3 : rdma_write - If set, RDMA Write is supported
709 	 *    on TX queues
710 	 * 4 : unsolicited_write_recv - If set, unsolicited
711 	 *    write with imm. receive is supported
712 	 * 31:5 : reserved - MBZ
713 	 */
714 	u32 device_caps;
715 
716 	/* Max RDMA transfer size in bytes */
717 	u32 max_rdma_size;
718 
719 	/* Unique global ID for an EFA device */
720 	u64 guid;
721 
722 	/* The device maximum link speed in Gbit/sec */
723 	u16 max_link_speed_gbps;
724 
725 	/* MBZ */
726 	u16 reserved0;
727 
728 	/* MBZ */
729 	u32 reserved1;
730 };
731 
732 struct efa_admin_feature_queue_attr_desc {
733 	/* The maximum number of queue pairs supported */
734 	u32 max_qp;
735 
736 	/* Maximum number of WQEs per Send Queue */
737 	u32 max_sq_depth;
738 
739 	/* Maximum size of data that can be sent inline in a Send WQE */
740 	u32 inline_buf_size;
741 
742 	/* Maximum number of buffer descriptors per Recv Queue */
743 	u32 max_rq_depth;
744 
745 	/* The maximum number of completion queues supported per VF */
746 	u32 max_cq;
747 
748 	/* Maximum number of CQEs per Completion Queue */
749 	u32 max_cq_depth;
750 
751 	/* Number of sub-CQs to be created for each CQ */
752 	u16 sub_cqs_per_cq;
753 
754 	/* Minimum number of WQEs per SQ */
755 	u16 min_sq_depth;
756 
757 	/* Maximum number of SGEs (buffers) allowed for a single send WQE */
758 	u16 max_wr_send_sges;
759 
760 	/* Maximum number of SGEs allowed for a single recv WQE */
761 	u16 max_wr_recv_sges;
762 
763 	/* The maximum number of memory regions supported */
764 	u32 max_mr;
765 
766 	/* The maximum number of pages can be registered */
767 	u32 max_mr_pages;
768 
769 	/* The maximum number of protection domains supported */
770 	u32 max_pd;
771 
772 	/* The maximum number of address handles supported */
773 	u32 max_ah;
774 
775 	/* The maximum size of LLQ in bytes */
776 	u32 max_llq_size;
777 
778 	/* Maximum number of SGEs for a single RDMA read/write WQE */
779 	u16 max_wr_rdma_sges;
780 
781 	/*
782 	 * Maximum number of bytes that can be written to SQ between two
783 	 * consecutive doorbells (in units of 64B). Driver must ensure that only
784 	 * complete WQEs are written to queue before issuing a doorbell.
785 	 * Examples: max_tx_batch=16 and WQE size = 64B, means up to 16 WQEs can
786 	 * be written to SQ between two consecutive doorbells. max_tx_batch=11
787 	 * and WQE size = 128B, means up to 5 WQEs can be written to SQ between
788 	 * two consecutive doorbells. Zero means unlimited.
789 	 */
790 	u16 max_tx_batch;
791 };
792 
793 struct efa_admin_event_queue_attr_desc {
794 	/* The maximum number of event queues supported */
795 	u32 max_eq;
796 
797 	/* Maximum number of EQEs per Event Queue */
798 	u32 max_eq_depth;
799 
800 	/* Supported events bitmask */
801 	u32 event_bitmask;
802 };
803 
804 struct efa_admin_feature_aenq_desc {
805 	/* bitmask for AENQ groups the device can report */
806 	u32 supported_groups;
807 
808 	/* bitmask for AENQ groups to report */
809 	u32 enabled_groups;
810 };
811 
812 struct efa_admin_feature_network_attr_desc {
813 	/* Raw address data in network byte order */
814 	u8 addr[16];
815 
816 	/* max packet payload size in bytes */
817 	u32 mtu;
818 };
819 
820 /*
821  * When hint value is 0, hints capabilities are not supported or driver
822  * should use its own predefined value
823  */
824 struct efa_admin_hw_hints {
825 	/* value in ms */
826 	u16 mmio_read_timeout;
827 
828 	/* value in ms */
829 	u16 driver_watchdog_timeout;
830 
831 	/* value in ms */
832 	u16 admin_completion_timeout;
833 
834 	/* poll interval in ms */
835 	u16 poll_interval;
836 };
837 
838 struct efa_admin_get_feature_cmd {
839 	struct efa_admin_aq_common_desc aq_common_descriptor;
840 
841 	struct efa_admin_ctrl_buff_info control_buffer;
842 
843 	struct efa_admin_get_set_feature_common_desc feature_common;
844 
845 	u32 raw[11];
846 };
847 
848 struct efa_admin_get_feature_resp {
849 	struct efa_admin_acq_common_desc acq_common_desc;
850 
851 	union {
852 		u32 raw[14];
853 
854 		struct efa_admin_feature_device_attr_desc device_attr;
855 
856 		struct efa_admin_feature_aenq_desc aenq;
857 
858 		struct efa_admin_feature_network_attr_desc network_attr;
859 
860 		struct efa_admin_feature_queue_attr_desc queue_attr;
861 
862 		struct efa_admin_event_queue_attr_desc event_queue_attr;
863 
864 		struct efa_admin_hw_hints hw_hints;
865 	} u;
866 };
867 
868 struct efa_admin_set_feature_cmd {
869 	struct efa_admin_aq_common_desc aq_common_descriptor;
870 
871 	struct efa_admin_ctrl_buff_info control_buffer;
872 
873 	struct efa_admin_get_set_feature_common_desc feature_common;
874 
875 	union {
876 		u32 raw[11];
877 
878 		/* AENQ configuration */
879 		struct efa_admin_feature_aenq_desc aenq;
880 	} u;
881 };
882 
883 struct efa_admin_set_feature_resp {
884 	struct efa_admin_acq_common_desc acq_common_desc;
885 
886 	union {
887 		u32 raw[14];
888 	} u;
889 };
890 
891 struct efa_admin_alloc_pd_cmd {
892 	struct efa_admin_aq_common_desc aq_common_descriptor;
893 };
894 
895 struct efa_admin_alloc_pd_resp {
896 	struct efa_admin_acq_common_desc acq_common_desc;
897 
898 	/* PD number */
899 	u16 pd;
900 
901 	/* MBZ */
902 	u16 reserved;
903 };
904 
905 struct efa_admin_dealloc_pd_cmd {
906 	struct efa_admin_aq_common_desc aq_common_descriptor;
907 
908 	/* PD number */
909 	u16 pd;
910 
911 	/* MBZ */
912 	u16 reserved;
913 };
914 
915 struct efa_admin_dealloc_pd_resp {
916 	struct efa_admin_acq_common_desc acq_common_desc;
917 };
918 
919 struct efa_admin_alloc_uar_cmd {
920 	struct efa_admin_aq_common_desc aq_common_descriptor;
921 };
922 
923 struct efa_admin_alloc_uar_resp {
924 	struct efa_admin_acq_common_desc acq_common_desc;
925 
926 	/* UAR number */
927 	u16 uar;
928 
929 	/* MBZ */
930 	u16 reserved;
931 };
932 
933 struct efa_admin_dealloc_uar_cmd {
934 	struct efa_admin_aq_common_desc aq_common_descriptor;
935 
936 	/* UAR number */
937 	u16 uar;
938 
939 	/* MBZ */
940 	u16 reserved;
941 };
942 
943 struct efa_admin_dealloc_uar_resp {
944 	struct efa_admin_acq_common_desc acq_common_desc;
945 };
946 
947 struct efa_admin_create_eq_cmd {
948 	struct efa_admin_aq_common_desc aq_common_descriptor;
949 
950 	/* Size of the EQ in entries, must be power of 2 */
951 	u16 depth;
952 
953 	/* MSI-X table entry index */
954 	u8 msix_vec;
955 
956 	/*
957 	 * 4:0 : entry_size_words - size of EQ entry in
958 	 *    32-bit words
959 	 * 7:5 : reserved - MBZ
960 	 */
961 	u8 caps;
962 
963 	/* EQ ring base address */
964 	struct efa_common_mem_addr ba;
965 
966 	/*
967 	 * Enabled events on this EQ
968 	 * 0 : completion_events - Enable completion events
969 	 * 31:1 : reserved - MBZ
970 	 */
971 	u32 event_bitmask;
972 
973 	/* MBZ */
974 	u32 reserved;
975 };
976 
977 struct efa_admin_create_eq_resp {
978 	struct efa_admin_acq_common_desc acq_common_desc;
979 
980 	/* EQ number */
981 	u16 eqn;
982 
983 	/* MBZ */
984 	u16 reserved;
985 };
986 
987 struct efa_admin_destroy_eq_cmd {
988 	struct efa_admin_aq_common_desc aq_common_descriptor;
989 
990 	/* EQ number */
991 	u16 eqn;
992 
993 	/* MBZ */
994 	u16 reserved;
995 };
996 
997 struct efa_admin_destroy_eq_resp {
998 	struct efa_admin_acq_common_desc acq_common_desc;
999 };
1000 
1001 /* asynchronous event notification groups */
1002 enum efa_admin_aenq_group {
1003 	EFA_ADMIN_FATAL_ERROR                       = 1,
1004 	EFA_ADMIN_WARNING                           = 2,
1005 	EFA_ADMIN_NOTIFICATION                      = 3,
1006 	EFA_ADMIN_KEEP_ALIVE                        = 4,
1007 	EFA_ADMIN_AENQ_GROUPS_NUM                   = 5,
1008 };
1009 
1010 struct efa_admin_mmio_req_read_less_resp {
1011 	u16 req_id;
1012 
1013 	u16 reg_off;
1014 
1015 	/* value is valid when poll is cleared */
1016 	u32 reg_val;
1017 };
1018 
1019 enum efa_admin_os_type {
1020 	EFA_ADMIN_OS_LINUX                          = 0,
1021 };
1022 
1023 struct efa_admin_host_info {
1024 	/* OS distribution string format */
1025 	u8 os_dist_str[128];
1026 
1027 	/* Defined in enum efa_admin_os_type */
1028 	u32 os_type;
1029 
1030 	/* Kernel version string format */
1031 	u8 kernel_ver_str[32];
1032 
1033 	/* Kernel version numeric format */
1034 	u32 kernel_ver;
1035 
1036 	/*
1037 	 * 7:0 : driver_module_type
1038 	 * 15:8 : driver_sub_minor
1039 	 * 23:16 : driver_minor
1040 	 * 31:24 : driver_major
1041 	 */
1042 	u32 driver_ver;
1043 
1044 	/*
1045 	 * Device's Bus, Device and Function
1046 	 * 2:0 : function
1047 	 * 7:3 : device
1048 	 * 15:8 : bus
1049 	 */
1050 	u16 bdf;
1051 
1052 	/*
1053 	 * Spec version
1054 	 * 7:0 : spec_minor
1055 	 * 15:8 : spec_major
1056 	 */
1057 	u16 spec_ver;
1058 
1059 	/*
1060 	 * 0 : intree - Intree driver
1061 	 * 1 : gdr - GPUDirect RDMA supported
1062 	 * 31:2 : reserved2
1063 	 */
1064 	u32 flags;
1065 };
1066 
1067 /* create_qp_cmd */
1068 #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK                BIT(0)
1069 #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK                BIT(1)
1070 #define EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV_MASK BIT(2)
1071 
1072 /* modify_qp_cmd */
1073 #define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK               BIT(0)
1074 #define EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE_MASK           BIT(1)
1075 #define EFA_ADMIN_MODIFY_QP_CMD_QKEY_MASK                   BIT(2)
1076 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN_MASK                 BIT(3)
1077 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY_MASK BIT(4)
1078 #define EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY_MASK              BIT(5)
1079 
1080 /* reg_mr_cmd */
1081 #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK      GENMASK(4, 0)
1082 #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK      BIT(7)
1083 #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK        BIT(0)
1084 #define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK       BIT(1)
1085 #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK        BIT(2)
1086 
1087 /* reg_mr_resp */
1088 #define EFA_ADMIN_REG_MR_RESP_RECV_IC_ID_MASK               BIT(0)
1089 #define EFA_ADMIN_REG_MR_RESP_RDMA_READ_IC_ID_MASK          BIT(1)
1090 #define EFA_ADMIN_REG_MR_RESP_RDMA_RECV_IC_ID_MASK          BIT(2)
1091 
1092 /* create_cq_cmd */
1093 #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
1094 #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK                   BIT(6)
1095 #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK    GENMASK(4, 0)
1096 #define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK           BIT(5)
1097 
1098 /* create_cq_resp */
1099 #define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK              BIT(0)
1100 
1101 /* feature_device_attr_desc */
1102 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK   BIT(0)
1103 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK   BIT(1)
1104 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
1105 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK  BIT(3)
1106 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_UNSOLICITED_WRITE_RECV_MASK BIT(4)
1107 
1108 /* create_eq_cmd */
1109 #define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK       GENMASK(4, 0)
1110 #define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK      BIT(0)
1111 
1112 /* host_info */
1113 #define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK         GENMASK(7, 0)
1114 #define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK           GENMASK(15, 8)
1115 #define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK               GENMASK(23, 16)
1116 #define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK               GENMASK(31, 24)
1117 #define EFA_ADMIN_HOST_INFO_FUNCTION_MASK                   GENMASK(2, 0)
1118 #define EFA_ADMIN_HOST_INFO_DEVICE_MASK                     GENMASK(7, 3)
1119 #define EFA_ADMIN_HOST_INFO_BUS_MASK                        GENMASK(15, 8)
1120 #define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK                 GENMASK(7, 0)
1121 #define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK                 GENMASK(15, 8)
1122 #define EFA_ADMIN_HOST_INFO_INTREE_MASK                     BIT(0)
1123 #define EFA_ADMIN_HOST_INFO_GDR_MASK                        BIT(1)
1124 
1125 #endif /* _EFA_ADMIN_CMDS_H_ */
1126