xref: /linux/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h (revision 06a130e42a5bfc84795464bff023bff4c16f58c5)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
2 /*
3  * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #ifndef _EFA_ADMIN_CMDS_H_
7 #define _EFA_ADMIN_CMDS_H_
8 
9 #define EFA_ADMIN_API_VERSION_MAJOR          0
10 #define EFA_ADMIN_API_VERSION_MINOR          1
11 
12 /* EFA admin queue opcodes */
13 enum efa_admin_aq_opcode {
14 	EFA_ADMIN_CREATE_QP                         = 1,
15 	EFA_ADMIN_MODIFY_QP                         = 2,
16 	EFA_ADMIN_QUERY_QP                          = 3,
17 	EFA_ADMIN_DESTROY_QP                        = 4,
18 	EFA_ADMIN_CREATE_AH                         = 5,
19 	EFA_ADMIN_DESTROY_AH                        = 6,
20 	EFA_ADMIN_REG_MR                            = 7,
21 	EFA_ADMIN_DEREG_MR                          = 8,
22 	EFA_ADMIN_CREATE_CQ                         = 9,
23 	EFA_ADMIN_DESTROY_CQ                        = 10,
24 	EFA_ADMIN_GET_FEATURE                       = 11,
25 	EFA_ADMIN_SET_FEATURE                       = 12,
26 	EFA_ADMIN_GET_STATS                         = 13,
27 	EFA_ADMIN_ALLOC_PD                          = 14,
28 	EFA_ADMIN_DEALLOC_PD                        = 15,
29 	EFA_ADMIN_ALLOC_UAR                         = 16,
30 	EFA_ADMIN_DEALLOC_UAR                       = 17,
31 	EFA_ADMIN_CREATE_EQ                         = 18,
32 	EFA_ADMIN_DESTROY_EQ                        = 19,
33 	EFA_ADMIN_MAX_OPCODE                        = 19,
34 };
35 
36 enum efa_admin_aq_feature_id {
37 	EFA_ADMIN_DEVICE_ATTR                       = 1,
38 	EFA_ADMIN_AENQ_CONFIG                       = 2,
39 	EFA_ADMIN_NETWORK_ATTR                      = 3,
40 	EFA_ADMIN_QUEUE_ATTR                        = 4,
41 	EFA_ADMIN_HW_HINTS                          = 5,
42 	EFA_ADMIN_HOST_INFO                         = 6,
43 	EFA_ADMIN_EVENT_QUEUE_ATTR                  = 7,
44 };
45 
46 /* QP transport type */
47 enum efa_admin_qp_type {
48 	/* Unreliable Datagram */
49 	EFA_ADMIN_QP_TYPE_UD                        = 1,
50 	/* Scalable Reliable Datagram */
51 	EFA_ADMIN_QP_TYPE_SRD                       = 2,
52 };
53 
54 /* QP state */
55 enum efa_admin_qp_state {
56 	EFA_ADMIN_QP_STATE_RESET                    = 0,
57 	EFA_ADMIN_QP_STATE_INIT                     = 1,
58 	EFA_ADMIN_QP_STATE_RTR                      = 2,
59 	EFA_ADMIN_QP_STATE_RTS                      = 3,
60 	EFA_ADMIN_QP_STATE_SQD                      = 4,
61 	EFA_ADMIN_QP_STATE_SQE                      = 5,
62 	EFA_ADMIN_QP_STATE_ERR                      = 6,
63 };
64 
65 enum efa_admin_get_stats_type {
66 	EFA_ADMIN_GET_STATS_TYPE_BASIC              = 0,
67 	EFA_ADMIN_GET_STATS_TYPE_MESSAGES           = 1,
68 	EFA_ADMIN_GET_STATS_TYPE_RDMA_READ          = 2,
69 	EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE         = 3,
70 };
71 
72 enum efa_admin_get_stats_scope {
73 	EFA_ADMIN_GET_STATS_SCOPE_ALL               = 0,
74 	EFA_ADMIN_GET_STATS_SCOPE_QUEUE             = 1,
75 };
76 
77 /*
78  * QP allocation sizes, converted by fabric QueuePair (QP) create command
79  * from QP capabilities.
80  */
81 struct efa_admin_qp_alloc_size {
82 	/* Send descriptor ring size in bytes */
83 	u32 send_queue_ring_size;
84 
85 	/* Max number of WQEs that can be outstanding on send queue. */
86 	u32 send_queue_depth;
87 
88 	/*
89 	 * Recv descriptor ring size in bytes, sufficient for user-provided
90 	 * number of WQEs
91 	 */
92 	u32 recv_queue_ring_size;
93 
94 	/* Max number of WQEs that can be outstanding on recv queue */
95 	u32 recv_queue_depth;
96 };
97 
98 struct efa_admin_create_qp_cmd {
99 	/* Common Admin Queue descriptor */
100 	struct efa_admin_aq_common_desc aq_common_desc;
101 
102 	/* Protection Domain associated with this QP */
103 	u16 pd;
104 
105 	/* QP type */
106 	u8 qp_type;
107 
108 	/*
109 	 * 0 : sq_virt - If set, SQ ring base address is
110 	 *    virtual (IOVA returned by MR registration)
111 	 * 1 : rq_virt - If set, RQ ring base address is
112 	 *    virtual (IOVA returned by MR registration)
113 	 * 2 : unsolicited_write_recv - If set, work requests
114 	 *    will not be consumed for incoming RDMA write with
115 	 *    immediate
116 	 * 7:3 : reserved - MBZ
117 	 */
118 	u8 flags;
119 
120 	/*
121 	 * Send queue (SQ) ring base physical address. This field is not
122 	 * used if this is a Low Latency Queue(LLQ).
123 	 */
124 	u64 sq_base_addr;
125 
126 	/* Receive queue (RQ) ring base address. */
127 	u64 rq_base_addr;
128 
129 	/* Index of CQ to be associated with Send Queue completions */
130 	u32 send_cq_idx;
131 
132 	/* Index of CQ to be associated with Recv Queue completions */
133 	u32 recv_cq_idx;
134 
135 	/*
136 	 * Memory registration key for the SQ ring, used only when not in
137 	 * LLQ mode and base address is virtual
138 	 */
139 	u32 sq_l_key;
140 
141 	/*
142 	 * Memory registration key for the RQ ring, used only when base
143 	 * address is virtual
144 	 */
145 	u32 rq_l_key;
146 
147 	/* Requested QP allocation sizes */
148 	struct efa_admin_qp_alloc_size qp_alloc_size;
149 
150 	/* UAR number */
151 	u16 uar;
152 
153 	/* MBZ */
154 	u16 reserved;
155 
156 	/* MBZ */
157 	u32 reserved2;
158 };
159 
160 struct efa_admin_create_qp_resp {
161 	/* Common Admin Queue completion descriptor */
162 	struct efa_admin_acq_common_desc acq_common_desc;
163 
164 	/*
165 	 * Opaque handle to be used for consequent admin operations on the
166 	 * QP
167 	 */
168 	u32 qp_handle;
169 
170 	/*
171 	 * QP number in the given EFA virtual device. Least-significant bits (as
172 	 * needed according to max_qp) carry unique QP ID
173 	 */
174 	u16 qp_num;
175 
176 	/* MBZ */
177 	u16 reserved;
178 
179 	/* Index of sub-CQ for Send Queue completions */
180 	u16 send_sub_cq_idx;
181 
182 	/* Index of sub-CQ for Receive Queue completions */
183 	u16 recv_sub_cq_idx;
184 
185 	/* SQ doorbell address, as offset to PCIe DB BAR */
186 	u32 sq_db_offset;
187 
188 	/* RQ doorbell address, as offset to PCIe DB BAR */
189 	u32 rq_db_offset;
190 
191 	/*
192 	 * low latency send queue ring base address as an offset to PCIe
193 	 * MMIO LLQ_MEM BAR
194 	 */
195 	u32 llq_descriptors_offset;
196 };
197 
198 struct efa_admin_modify_qp_cmd {
199 	/* Common Admin Queue descriptor */
200 	struct efa_admin_aq_common_desc aq_common_desc;
201 
202 	/*
203 	 * Mask indicating which fields should be updated
204 	 * 0 : qp_state
205 	 * 1 : cur_qp_state
206 	 * 2 : qkey
207 	 * 3 : sq_psn
208 	 * 4 : sq_drained_async_notify
209 	 * 5 : rnr_retry
210 	 * 31:6 : reserved
211 	 */
212 	u32 modify_mask;
213 
214 	/* QP handle returned by create_qp command */
215 	u32 qp_handle;
216 
217 	/* QP state */
218 	u32 qp_state;
219 
220 	/* Override current QP state (before applying the transition) */
221 	u32 cur_qp_state;
222 
223 	/* QKey */
224 	u32 qkey;
225 
226 	/* SQ PSN */
227 	u32 sq_psn;
228 
229 	/* Enable async notification when SQ is drained */
230 	u8 sq_drained_async_notify;
231 
232 	/* Number of RNR retries (valid only for SRD QPs) */
233 	u8 rnr_retry;
234 
235 	/* MBZ */
236 	u16 reserved2;
237 };
238 
239 struct efa_admin_modify_qp_resp {
240 	/* Common Admin Queue completion descriptor */
241 	struct efa_admin_acq_common_desc acq_common_desc;
242 };
243 
244 struct efa_admin_query_qp_cmd {
245 	/* Common Admin Queue descriptor */
246 	struct efa_admin_aq_common_desc aq_common_desc;
247 
248 	/* QP handle returned by create_qp command */
249 	u32 qp_handle;
250 };
251 
252 struct efa_admin_query_qp_resp {
253 	/* Common Admin Queue completion descriptor */
254 	struct efa_admin_acq_common_desc acq_common_desc;
255 
256 	/* QP state */
257 	u32 qp_state;
258 
259 	/* QKey */
260 	u32 qkey;
261 
262 	/* SQ PSN */
263 	u32 sq_psn;
264 
265 	/* Indicates that draining is in progress */
266 	u8 sq_draining;
267 
268 	/* Number of RNR retries (valid only for SRD QPs) */
269 	u8 rnr_retry;
270 
271 	/* MBZ */
272 	u16 reserved2;
273 };
274 
275 struct efa_admin_destroy_qp_cmd {
276 	/* Common Admin Queue descriptor */
277 	struct efa_admin_aq_common_desc aq_common_desc;
278 
279 	/* QP handle returned by create_qp command */
280 	u32 qp_handle;
281 };
282 
283 struct efa_admin_destroy_qp_resp {
284 	/* Common Admin Queue completion descriptor */
285 	struct efa_admin_acq_common_desc acq_common_desc;
286 };
287 
288 /*
289  * Create Address Handle command parameters. Must not be called more than
290  * once for the same destination
291  */
292 struct efa_admin_create_ah_cmd {
293 	/* Common Admin Queue descriptor */
294 	struct efa_admin_aq_common_desc aq_common_desc;
295 
296 	/* Destination address in network byte order */
297 	u8 dest_addr[16];
298 
299 	/* PD number */
300 	u16 pd;
301 
302 	/* MBZ */
303 	u16 reserved;
304 };
305 
306 struct efa_admin_create_ah_resp {
307 	/* Common Admin Queue completion descriptor */
308 	struct efa_admin_acq_common_desc acq_common_desc;
309 
310 	/* Target interface address handle (opaque) */
311 	u16 ah;
312 
313 	/* MBZ */
314 	u16 reserved;
315 };
316 
317 struct efa_admin_destroy_ah_cmd {
318 	/* Common Admin Queue descriptor */
319 	struct efa_admin_aq_common_desc aq_common_desc;
320 
321 	/* Target interface address handle (opaque) */
322 	u16 ah;
323 
324 	/* PD number */
325 	u16 pd;
326 };
327 
328 struct efa_admin_destroy_ah_resp {
329 	/* Common Admin Queue completion descriptor */
330 	struct efa_admin_acq_common_desc acq_common_desc;
331 };
332 
333 /*
334  * Registration of MemoryRegion, required for QP working with Virtual
335  * Addresses. In standard verbs semantics, region length is limited to 2GB
336  * space, but EFA offers larger MR support for large memory space, to ease
337  * on users working with very large datasets (i.e. full GPU memory mapping).
338  */
339 struct efa_admin_reg_mr_cmd {
340 	/* Common Admin Queue descriptor */
341 	struct efa_admin_aq_common_desc aq_common_desc;
342 
343 	/* Protection Domain */
344 	u16 pd;
345 
346 	/* MBZ */
347 	u16 reserved16_w1;
348 
349 	/* Physical Buffer List, each element is page-aligned. */
350 	union {
351 		/*
352 		 * Inline array of guest-physical page addresses of user
353 		 * memory pages (optimization for short region
354 		 * registrations)
355 		 */
356 		u64 inline_pbl_array[4];
357 
358 		/* points to PBL (direct or indirect, chained if needed) */
359 		struct efa_admin_ctrl_buff_info pbl;
360 	} pbl;
361 
362 	/* Memory region length, in bytes. */
363 	u64 mr_length;
364 
365 	/*
366 	 * flags and page size
367 	 * 4:0 : phys_page_size_shift - page size is (1 <<
368 	 *    phys_page_size_shift). Page size is used for
369 	 *    building the Virtual to Physical address mapping
370 	 * 6:5 : reserved - MBZ
371 	 * 7 : mem_addr_phy_mode_en - Enable bit for physical
372 	 *    memory registration (no translation), can be used
373 	 *    only by privileged clients. If set, PBL must
374 	 *    contain a single entry.
375 	 */
376 	u8 flags;
377 
378 	/*
379 	 * permissions
380 	 * 0 : local_write_enable - Local write permissions:
381 	 *    must be set for RQ buffers and buffers posted for
382 	 *    RDMA Read requests
383 	 * 1 : remote_write_enable - Remote write
384 	 *    permissions: must be set to enable RDMA write to
385 	 *    the region
386 	 * 2 : remote_read_enable - Remote read permissions:
387 	 *    must be set to enable RDMA read from the region
388 	 * 7:3 : reserved2 - MBZ
389 	 */
390 	u8 permissions;
391 
392 	/* MBZ */
393 	u16 reserved16_w5;
394 
395 	/* number of pages in PBL (redundant, could be calculated) */
396 	u32 page_num;
397 
398 	/*
399 	 * IO Virtual Address associated with this MR. If
400 	 * mem_addr_phy_mode_en is set, contains the physical address of
401 	 * the region.
402 	 */
403 	u64 iova;
404 };
405 
406 struct efa_admin_reg_mr_resp {
407 	/* Common Admin Queue completion descriptor */
408 	struct efa_admin_acq_common_desc acq_common_desc;
409 
410 	/*
411 	 * L_Key, to be used in conjunction with local buffer references in
412 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
413 	 */
414 	u32 l_key;
415 
416 	/*
417 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
418 	 * memory region
419 	 */
420 	u32 r_key;
421 
422 	/*
423 	 * Mask indicating which fields have valid values
424 	 * 0 : recv_ic_id
425 	 * 1 : rdma_read_ic_id
426 	 * 2 : rdma_recv_ic_id
427 	 */
428 	u8 validity;
429 
430 	/*
431 	 * Physical interconnect used by the device to reach the MR for receive
432 	 * operation
433 	 */
434 	u8 recv_ic_id;
435 
436 	/*
437 	 * Physical interconnect used by the device to reach the MR for RDMA
438 	 * read operation
439 	 */
440 	u8 rdma_read_ic_id;
441 
442 	/*
443 	 * Physical interconnect used by the device to reach the MR for RDMA
444 	 * write receive
445 	 */
446 	u8 rdma_recv_ic_id;
447 };
448 
449 struct efa_admin_dereg_mr_cmd {
450 	/* Common Admin Queue descriptor */
451 	struct efa_admin_aq_common_desc aq_common_desc;
452 
453 	/* L_Key, memory region's l_key */
454 	u32 l_key;
455 };
456 
457 struct efa_admin_dereg_mr_resp {
458 	/* Common Admin Queue completion descriptor */
459 	struct efa_admin_acq_common_desc acq_common_desc;
460 };
461 
462 struct efa_admin_create_cq_cmd {
463 	struct efa_admin_aq_common_desc aq_common_desc;
464 
465 	/*
466 	 * 4:0 : reserved5 - MBZ
467 	 * 5 : interrupt_mode_enabled - if set, cq operates
468 	 *    in interrupt mode (i.e. CQ events and EQ elements
469 	 *    are generated), otherwise - polling
470 	 * 6 : virt - If set, ring base address is virtual
471 	 *    (IOVA returned by MR registration)
472 	 * 7 : reserved6 - MBZ
473 	 */
474 	u8 cq_caps_1;
475 
476 	/*
477 	 * 4:0 : cq_entry_size_words - size of CQ entry in
478 	 *    32-bit words, valid values: 4, 8.
479 	 * 5 : set_src_addr - If set, source address will be
480 	 *    filled on RX completions from unknown senders.
481 	 *    Requires 8 words CQ entry size.
482 	 * 7:6 : reserved7 - MBZ
483 	 */
484 	u8 cq_caps_2;
485 
486 	/* completion queue depth in # of entries. must be power of 2 */
487 	u16 cq_depth;
488 
489 	/* EQ number assigned to this cq */
490 	u16 eqn;
491 
492 	/* MBZ */
493 	u16 reserved;
494 
495 	/*
496 	 * CQ ring base address, virtual or physical depending on 'virt'
497 	 * flag
498 	 */
499 	struct efa_common_mem_addr cq_ba;
500 
501 	/*
502 	 * Memory registration key for the ring, used only when base
503 	 * address is virtual
504 	 */
505 	u32 l_key;
506 
507 	/*
508 	 * number of sub cqs - must be equal to sub_cqs_per_cq of queue
509 	 * attributes.
510 	 */
511 	u16 num_sub_cqs;
512 
513 	/* UAR number */
514 	u16 uar;
515 };
516 
517 struct efa_admin_create_cq_resp {
518 	struct efa_admin_acq_common_desc acq_common_desc;
519 
520 	u16 cq_idx;
521 
522 	/* actual cq depth in number of entries */
523 	u16 cq_actual_depth;
524 
525 	/* CQ doorbell address, as offset to PCIe DB BAR */
526 	u32 db_offset;
527 
528 	/*
529 	 * 0 : db_valid - If set, doorbell offset is valid.
530 	 *    Always set when interrupts are requested.
531 	 */
532 	u32 flags;
533 };
534 
535 struct efa_admin_destroy_cq_cmd {
536 	struct efa_admin_aq_common_desc aq_common_desc;
537 
538 	u16 cq_idx;
539 
540 	/* MBZ */
541 	u16 reserved1;
542 };
543 
544 struct efa_admin_destroy_cq_resp {
545 	struct efa_admin_acq_common_desc acq_common_desc;
546 };
547 
548 /*
549  * EFA AQ Get Statistics command. Extended statistics are placed in control
550  * buffer pointed by AQ entry
551  */
552 struct efa_admin_aq_get_stats_cmd {
553 	struct efa_admin_aq_common_desc aq_common_descriptor;
554 
555 	union {
556 		/* command specific inline data */
557 		u32 inline_data_w1[3];
558 
559 		struct efa_admin_ctrl_buff_info control_buffer;
560 	} u;
561 
562 	/* stats type as defined in enum efa_admin_get_stats_type */
563 	u8 type;
564 
565 	/* stats scope defined in enum efa_admin_get_stats_scope */
566 	u8 scope;
567 
568 	u16 scope_modifier;
569 };
570 
571 struct efa_admin_basic_stats {
572 	u64 tx_bytes;
573 
574 	u64 tx_pkts;
575 
576 	u64 rx_bytes;
577 
578 	u64 rx_pkts;
579 
580 	u64 rx_drops;
581 };
582 
583 struct efa_admin_messages_stats {
584 	u64 send_bytes;
585 
586 	u64 send_wrs;
587 
588 	u64 recv_bytes;
589 
590 	u64 recv_wrs;
591 };
592 
593 struct efa_admin_rdma_read_stats {
594 	u64 read_wrs;
595 
596 	u64 read_bytes;
597 
598 	u64 read_wr_err;
599 
600 	u64 read_resp_bytes;
601 };
602 
603 struct efa_admin_rdma_write_stats {
604 	u64 write_wrs;
605 
606 	u64 write_bytes;
607 
608 	u64 write_wr_err;
609 
610 	u64 write_recv_bytes;
611 };
612 
613 struct efa_admin_acq_get_stats_resp {
614 	struct efa_admin_acq_common_desc acq_common_desc;
615 
616 	union {
617 		struct efa_admin_basic_stats basic_stats;
618 
619 		struct efa_admin_messages_stats messages_stats;
620 
621 		struct efa_admin_rdma_read_stats rdma_read_stats;
622 
623 		struct efa_admin_rdma_write_stats rdma_write_stats;
624 	} u;
625 };
626 
627 struct efa_admin_get_set_feature_common_desc {
628 	/* MBZ */
629 	u8 reserved0;
630 
631 	/* as appears in efa_admin_aq_feature_id */
632 	u8 feature_id;
633 
634 	/* MBZ */
635 	u16 reserved16;
636 };
637 
638 struct efa_admin_feature_device_attr_desc {
639 	/* Bitmap of efa_admin_aq_feature_id */
640 	u64 supported_features;
641 
642 	/* Bitmap of supported page sizes in MR registrations */
643 	u64 page_size_cap;
644 
645 	u32 fw_version;
646 
647 	u32 admin_api_version;
648 
649 	u32 device_version;
650 
651 	/* Bar used for SQ and RQ doorbells */
652 	u16 db_bar;
653 
654 	/* Indicates how many bits are used on physical address access */
655 	u8 phys_addr_width;
656 
657 	/* Indicates how many bits are used on virtual address access */
658 	u8 virt_addr_width;
659 
660 	/*
661 	 * 0 : rdma_read - If set, RDMA Read is supported on
662 	 *    TX queues
663 	 * 1 : rnr_retry - If set, RNR retry is supported on
664 	 *    modify QP command
665 	 * 2 : data_polling_128 - If set, 128 bytes data
666 	 *    polling is supported
667 	 * 3 : rdma_write - If set, RDMA Write is supported
668 	 *    on TX queues
669 	 * 4 : unsolicited_write_recv - If set, unsolicited
670 	 *    write with imm. receive is supported
671 	 * 31:5 : reserved - MBZ
672 	 */
673 	u32 device_caps;
674 
675 	/* Max RDMA transfer size in bytes */
676 	u32 max_rdma_size;
677 
678 	/* Unique global ID for an EFA device */
679 	u64 guid;
680 };
681 
682 struct efa_admin_feature_queue_attr_desc {
683 	/* The maximum number of queue pairs supported */
684 	u32 max_qp;
685 
686 	/* Maximum number of WQEs per Send Queue */
687 	u32 max_sq_depth;
688 
689 	/* Maximum size of data that can be sent inline in a Send WQE */
690 	u32 inline_buf_size;
691 
692 	/* Maximum number of buffer descriptors per Recv Queue */
693 	u32 max_rq_depth;
694 
695 	/* The maximum number of completion queues supported per VF */
696 	u32 max_cq;
697 
698 	/* Maximum number of CQEs per Completion Queue */
699 	u32 max_cq_depth;
700 
701 	/* Number of sub-CQs to be created for each CQ */
702 	u16 sub_cqs_per_cq;
703 
704 	/* Minimum number of WQEs per SQ */
705 	u16 min_sq_depth;
706 
707 	/* Maximum number of SGEs (buffers) allowed for a single send WQE */
708 	u16 max_wr_send_sges;
709 
710 	/* Maximum number of SGEs allowed for a single recv WQE */
711 	u16 max_wr_recv_sges;
712 
713 	/* The maximum number of memory regions supported */
714 	u32 max_mr;
715 
716 	/* The maximum number of pages can be registered */
717 	u32 max_mr_pages;
718 
719 	/* The maximum number of protection domains supported */
720 	u32 max_pd;
721 
722 	/* The maximum number of address handles supported */
723 	u32 max_ah;
724 
725 	/* The maximum size of LLQ in bytes */
726 	u32 max_llq_size;
727 
728 	/* Maximum number of SGEs for a single RDMA read/write WQE */
729 	u16 max_wr_rdma_sges;
730 
731 	/*
732 	 * Maximum number of bytes that can be written to SQ between two
733 	 * consecutive doorbells (in units of 64B). Driver must ensure that only
734 	 * complete WQEs are written to queue before issuing a doorbell.
735 	 * Examples: max_tx_batch=16 and WQE size = 64B, means up to 16 WQEs can
736 	 * be written to SQ between two consecutive doorbells. max_tx_batch=11
737 	 * and WQE size = 128B, means up to 5 WQEs can be written to SQ between
738 	 * two consecutive doorbells. Zero means unlimited.
739 	 */
740 	u16 max_tx_batch;
741 };
742 
743 struct efa_admin_event_queue_attr_desc {
744 	/* The maximum number of event queues supported */
745 	u32 max_eq;
746 
747 	/* Maximum number of EQEs per Event Queue */
748 	u32 max_eq_depth;
749 
750 	/* Supported events bitmask */
751 	u32 event_bitmask;
752 };
753 
754 struct efa_admin_feature_aenq_desc {
755 	/* bitmask for AENQ groups the device can report */
756 	u32 supported_groups;
757 
758 	/* bitmask for AENQ groups to report */
759 	u32 enabled_groups;
760 };
761 
762 struct efa_admin_feature_network_attr_desc {
763 	/* Raw address data in network byte order */
764 	u8 addr[16];
765 
766 	/* max packet payload size in bytes */
767 	u32 mtu;
768 };
769 
770 /*
771  * When hint value is 0, hints capabilities are not supported or driver
772  * should use its own predefined value
773  */
774 struct efa_admin_hw_hints {
775 	/* value in ms */
776 	u16 mmio_read_timeout;
777 
778 	/* value in ms */
779 	u16 driver_watchdog_timeout;
780 
781 	/* value in ms */
782 	u16 admin_completion_timeout;
783 
784 	/* poll interval in ms */
785 	u16 poll_interval;
786 };
787 
788 struct efa_admin_get_feature_cmd {
789 	struct efa_admin_aq_common_desc aq_common_descriptor;
790 
791 	struct efa_admin_ctrl_buff_info control_buffer;
792 
793 	struct efa_admin_get_set_feature_common_desc feature_common;
794 
795 	u32 raw[11];
796 };
797 
798 struct efa_admin_get_feature_resp {
799 	struct efa_admin_acq_common_desc acq_common_desc;
800 
801 	union {
802 		u32 raw[14];
803 
804 		struct efa_admin_feature_device_attr_desc device_attr;
805 
806 		struct efa_admin_feature_aenq_desc aenq;
807 
808 		struct efa_admin_feature_network_attr_desc network_attr;
809 
810 		struct efa_admin_feature_queue_attr_desc queue_attr;
811 
812 		struct efa_admin_event_queue_attr_desc event_queue_attr;
813 
814 		struct efa_admin_hw_hints hw_hints;
815 	} u;
816 };
817 
818 struct efa_admin_set_feature_cmd {
819 	struct efa_admin_aq_common_desc aq_common_descriptor;
820 
821 	struct efa_admin_ctrl_buff_info control_buffer;
822 
823 	struct efa_admin_get_set_feature_common_desc feature_common;
824 
825 	union {
826 		u32 raw[11];
827 
828 		/* AENQ configuration */
829 		struct efa_admin_feature_aenq_desc aenq;
830 	} u;
831 };
832 
833 struct efa_admin_set_feature_resp {
834 	struct efa_admin_acq_common_desc acq_common_desc;
835 
836 	union {
837 		u32 raw[14];
838 	} u;
839 };
840 
841 struct efa_admin_alloc_pd_cmd {
842 	struct efa_admin_aq_common_desc aq_common_descriptor;
843 };
844 
845 struct efa_admin_alloc_pd_resp {
846 	struct efa_admin_acq_common_desc acq_common_desc;
847 
848 	/* PD number */
849 	u16 pd;
850 
851 	/* MBZ */
852 	u16 reserved;
853 };
854 
855 struct efa_admin_dealloc_pd_cmd {
856 	struct efa_admin_aq_common_desc aq_common_descriptor;
857 
858 	/* PD number */
859 	u16 pd;
860 
861 	/* MBZ */
862 	u16 reserved;
863 };
864 
865 struct efa_admin_dealloc_pd_resp {
866 	struct efa_admin_acq_common_desc acq_common_desc;
867 };
868 
869 struct efa_admin_alloc_uar_cmd {
870 	struct efa_admin_aq_common_desc aq_common_descriptor;
871 };
872 
873 struct efa_admin_alloc_uar_resp {
874 	struct efa_admin_acq_common_desc acq_common_desc;
875 
876 	/* UAR number */
877 	u16 uar;
878 
879 	/* MBZ */
880 	u16 reserved;
881 };
882 
883 struct efa_admin_dealloc_uar_cmd {
884 	struct efa_admin_aq_common_desc aq_common_descriptor;
885 
886 	/* UAR number */
887 	u16 uar;
888 
889 	/* MBZ */
890 	u16 reserved;
891 };
892 
893 struct efa_admin_dealloc_uar_resp {
894 	struct efa_admin_acq_common_desc acq_common_desc;
895 };
896 
897 struct efa_admin_create_eq_cmd {
898 	struct efa_admin_aq_common_desc aq_common_descriptor;
899 
900 	/* Size of the EQ in entries, must be power of 2 */
901 	u16 depth;
902 
903 	/* MSI-X table entry index */
904 	u8 msix_vec;
905 
906 	/*
907 	 * 4:0 : entry_size_words - size of EQ entry in
908 	 *    32-bit words
909 	 * 7:5 : reserved - MBZ
910 	 */
911 	u8 caps;
912 
913 	/* EQ ring base address */
914 	struct efa_common_mem_addr ba;
915 
916 	/*
917 	 * Enabled events on this EQ
918 	 * 0 : completion_events - Enable completion events
919 	 * 31:1 : reserved - MBZ
920 	 */
921 	u32 event_bitmask;
922 
923 	/* MBZ */
924 	u32 reserved;
925 };
926 
927 struct efa_admin_create_eq_resp {
928 	struct efa_admin_acq_common_desc acq_common_desc;
929 
930 	/* EQ number */
931 	u16 eqn;
932 
933 	/* MBZ */
934 	u16 reserved;
935 };
936 
937 struct efa_admin_destroy_eq_cmd {
938 	struct efa_admin_aq_common_desc aq_common_descriptor;
939 
940 	/* EQ number */
941 	u16 eqn;
942 
943 	/* MBZ */
944 	u16 reserved;
945 };
946 
947 struct efa_admin_destroy_eq_resp {
948 	struct efa_admin_acq_common_desc acq_common_desc;
949 };
950 
951 /* asynchronous event notification groups */
952 enum efa_admin_aenq_group {
953 	EFA_ADMIN_FATAL_ERROR                       = 1,
954 	EFA_ADMIN_WARNING                           = 2,
955 	EFA_ADMIN_NOTIFICATION                      = 3,
956 	EFA_ADMIN_KEEP_ALIVE                        = 4,
957 	EFA_ADMIN_AENQ_GROUPS_NUM                   = 5,
958 };
959 
960 struct efa_admin_mmio_req_read_less_resp {
961 	u16 req_id;
962 
963 	u16 reg_off;
964 
965 	/* value is valid when poll is cleared */
966 	u32 reg_val;
967 };
968 
969 enum efa_admin_os_type {
970 	EFA_ADMIN_OS_LINUX                          = 0,
971 };
972 
973 struct efa_admin_host_info {
974 	/* OS distribution string format */
975 	u8 os_dist_str[128];
976 
977 	/* Defined in enum efa_admin_os_type */
978 	u32 os_type;
979 
980 	/* Kernel version string format */
981 	u8 kernel_ver_str[32];
982 
983 	/* Kernel version numeric format */
984 	u32 kernel_ver;
985 
986 	/*
987 	 * 7:0 : driver_module_type
988 	 * 15:8 : driver_sub_minor
989 	 * 23:16 : driver_minor
990 	 * 31:24 : driver_major
991 	 */
992 	u32 driver_ver;
993 
994 	/*
995 	 * Device's Bus, Device and Function
996 	 * 2:0 : function
997 	 * 7:3 : device
998 	 * 15:8 : bus
999 	 */
1000 	u16 bdf;
1001 
1002 	/*
1003 	 * Spec version
1004 	 * 7:0 : spec_minor
1005 	 * 15:8 : spec_major
1006 	 */
1007 	u16 spec_ver;
1008 
1009 	/*
1010 	 * 0 : intree - Intree driver
1011 	 * 1 : gdr - GPUDirect RDMA supported
1012 	 * 31:2 : reserved2
1013 	 */
1014 	u32 flags;
1015 };
1016 
1017 /* create_qp_cmd */
1018 #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK                BIT(0)
1019 #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK                BIT(1)
1020 #define EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV_MASK BIT(2)
1021 
1022 /* modify_qp_cmd */
1023 #define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK               BIT(0)
1024 #define EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE_MASK           BIT(1)
1025 #define EFA_ADMIN_MODIFY_QP_CMD_QKEY_MASK                   BIT(2)
1026 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN_MASK                 BIT(3)
1027 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY_MASK BIT(4)
1028 #define EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY_MASK              BIT(5)
1029 
1030 /* reg_mr_cmd */
1031 #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK      GENMASK(4, 0)
1032 #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK      BIT(7)
1033 #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK        BIT(0)
1034 #define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK       BIT(1)
1035 #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK        BIT(2)
1036 
1037 /* reg_mr_resp */
1038 #define EFA_ADMIN_REG_MR_RESP_RECV_IC_ID_MASK               BIT(0)
1039 #define EFA_ADMIN_REG_MR_RESP_RDMA_READ_IC_ID_MASK          BIT(1)
1040 #define EFA_ADMIN_REG_MR_RESP_RDMA_RECV_IC_ID_MASK          BIT(2)
1041 
1042 /* create_cq_cmd */
1043 #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
1044 #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK                   BIT(6)
1045 #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK    GENMASK(4, 0)
1046 #define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK           BIT(5)
1047 
1048 /* create_cq_resp */
1049 #define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK              BIT(0)
1050 
1051 /* feature_device_attr_desc */
1052 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK   BIT(0)
1053 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK   BIT(1)
1054 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
1055 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK  BIT(3)
1056 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_UNSOLICITED_WRITE_RECV_MASK BIT(4)
1057 
1058 /* create_eq_cmd */
1059 #define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK       GENMASK(4, 0)
1060 #define EFA_ADMIN_CREATE_EQ_CMD_VIRT_MASK                   BIT(6)
1061 #define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK      BIT(0)
1062 
1063 /* host_info */
1064 #define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK         GENMASK(7, 0)
1065 #define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK           GENMASK(15, 8)
1066 #define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK               GENMASK(23, 16)
1067 #define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK               GENMASK(31, 24)
1068 #define EFA_ADMIN_HOST_INFO_FUNCTION_MASK                   GENMASK(2, 0)
1069 #define EFA_ADMIN_HOST_INFO_DEVICE_MASK                     GENMASK(7, 3)
1070 #define EFA_ADMIN_HOST_INFO_BUS_MASK                        GENMASK(15, 8)
1071 #define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK                 GENMASK(7, 0)
1072 #define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK                 GENMASK(15, 8)
1073 #define EFA_ADMIN_HOST_INFO_INTREE_MASK                     BIT(0)
1074 #define EFA_ADMIN_HOST_INFO_GDR_MASK                        BIT(1)
1075 
1076 #endif /* _EFA_ADMIN_CMDS_H_ */
1077