xref: /linux/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
2 /*
3  * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #ifndef _EFA_ADMIN_CMDS_H_
7 #define _EFA_ADMIN_CMDS_H_
8 
9 #define EFA_ADMIN_API_VERSION_MAJOR          0
10 #define EFA_ADMIN_API_VERSION_MINOR          1
11 
12 /* EFA admin queue opcodes */
13 enum efa_admin_aq_opcode {
14 	EFA_ADMIN_CREATE_QP                         = 1,
15 	EFA_ADMIN_MODIFY_QP                         = 2,
16 	EFA_ADMIN_QUERY_QP                          = 3,
17 	EFA_ADMIN_DESTROY_QP                        = 4,
18 	EFA_ADMIN_CREATE_AH                         = 5,
19 	EFA_ADMIN_DESTROY_AH                        = 6,
20 	EFA_ADMIN_REG_MR                            = 7,
21 	EFA_ADMIN_DEREG_MR                          = 8,
22 	EFA_ADMIN_CREATE_CQ                         = 9,
23 	EFA_ADMIN_DESTROY_CQ                        = 10,
24 	EFA_ADMIN_GET_FEATURE                       = 11,
25 	EFA_ADMIN_SET_FEATURE                       = 12,
26 	EFA_ADMIN_GET_STATS                         = 13,
27 	EFA_ADMIN_ALLOC_PD                          = 14,
28 	EFA_ADMIN_DEALLOC_PD                        = 15,
29 	EFA_ADMIN_ALLOC_UAR                         = 16,
30 	EFA_ADMIN_DEALLOC_UAR                       = 17,
31 	EFA_ADMIN_CREATE_EQ                         = 18,
32 	EFA_ADMIN_DESTROY_EQ                        = 19,
33 	EFA_ADMIN_MAX_OPCODE                        = 19,
34 };
35 
36 enum efa_admin_aq_feature_id {
37 	EFA_ADMIN_DEVICE_ATTR                       = 1,
38 	EFA_ADMIN_AENQ_CONFIG                       = 2,
39 	EFA_ADMIN_NETWORK_ATTR                      = 3,
40 	EFA_ADMIN_QUEUE_ATTR                        = 4,
41 	EFA_ADMIN_HW_HINTS                          = 5,
42 	EFA_ADMIN_HOST_INFO                         = 6,
43 	EFA_ADMIN_EVENT_QUEUE_ATTR                  = 7,
44 };
45 
46 /* QP transport type */
47 enum efa_admin_qp_type {
48 	/* Unreliable Datagram */
49 	EFA_ADMIN_QP_TYPE_UD                        = 1,
50 	/* Scalable Reliable Datagram */
51 	EFA_ADMIN_QP_TYPE_SRD                       = 2,
52 };
53 
54 /* QP state */
55 enum efa_admin_qp_state {
56 	EFA_ADMIN_QP_STATE_RESET                    = 0,
57 	EFA_ADMIN_QP_STATE_INIT                     = 1,
58 	EFA_ADMIN_QP_STATE_RTR                      = 2,
59 	EFA_ADMIN_QP_STATE_RTS                      = 3,
60 	EFA_ADMIN_QP_STATE_SQD                      = 4,
61 	EFA_ADMIN_QP_STATE_SQE                      = 5,
62 	EFA_ADMIN_QP_STATE_ERR                      = 6,
63 };
64 
65 enum efa_admin_get_stats_type {
66 	EFA_ADMIN_GET_STATS_TYPE_BASIC              = 0,
67 	EFA_ADMIN_GET_STATS_TYPE_MESSAGES           = 1,
68 	EFA_ADMIN_GET_STATS_TYPE_RDMA_READ          = 2,
69 	EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE         = 3,
70 };
71 
72 enum efa_admin_get_stats_scope {
73 	EFA_ADMIN_GET_STATS_SCOPE_ALL               = 0,
74 	EFA_ADMIN_GET_STATS_SCOPE_QUEUE             = 1,
75 };
76 
77 /*
78  * QP allocation sizes, converted by fabric QueuePair (QP) create command
79  * from QP capabilities.
80  */
81 struct efa_admin_qp_alloc_size {
82 	/* Send descriptor ring size in bytes */
83 	u32 send_queue_ring_size;
84 
85 	/* Max number of WQEs that can be outstanding on send queue. */
86 	u32 send_queue_depth;
87 
88 	/*
89 	 * Recv descriptor ring size in bytes, sufficient for user-provided
90 	 * number of WQEs
91 	 */
92 	u32 recv_queue_ring_size;
93 
94 	/* Max number of WQEs that can be outstanding on recv queue */
95 	u32 recv_queue_depth;
96 };
97 
98 struct efa_admin_create_qp_cmd {
99 	/* Common Admin Queue descriptor */
100 	struct efa_admin_aq_common_desc aq_common_desc;
101 
102 	/* Protection Domain associated with this QP */
103 	u16 pd;
104 
105 	/* QP type */
106 	u8 qp_type;
107 
108 	/*
109 	 * 0 : sq_virt - If set, SQ ring base address is
110 	 *    virtual (IOVA returned by MR registration)
111 	 * 1 : rq_virt - If set, RQ ring base address is
112 	 *    virtual (IOVA returned by MR registration)
113 	 * 2 : unsolicited_write_recv - If set, work requests
114 	 *    will not be consumed for incoming RDMA write with
115 	 *    immediate
116 	 * 7:3 : reserved - MBZ
117 	 */
118 	u8 flags;
119 
120 	/*
121 	 * Send queue (SQ) ring base physical address. This field is not
122 	 * used if this is a Low Latency Queue(LLQ).
123 	 */
124 	u64 sq_base_addr;
125 
126 	/* Receive queue (RQ) ring base address. */
127 	u64 rq_base_addr;
128 
129 	/* Index of CQ to be associated with Send Queue completions */
130 	u32 send_cq_idx;
131 
132 	/* Index of CQ to be associated with Recv Queue completions */
133 	u32 recv_cq_idx;
134 
135 	/*
136 	 * Memory registration key for the SQ ring, used only when not in
137 	 * LLQ mode and base address is virtual
138 	 */
139 	u32 sq_l_key;
140 
141 	/*
142 	 * Memory registration key for the RQ ring, used only when base
143 	 * address is virtual
144 	 */
145 	u32 rq_l_key;
146 
147 	/* Requested QP allocation sizes */
148 	struct efa_admin_qp_alloc_size qp_alloc_size;
149 
150 	/* UAR number */
151 	u16 uar;
152 
153 	/* MBZ */
154 	u16 reserved;
155 
156 	/* MBZ */
157 	u32 reserved2;
158 };
159 
160 struct efa_admin_create_qp_resp {
161 	/* Common Admin Queue completion descriptor */
162 	struct efa_admin_acq_common_desc acq_common_desc;
163 
164 	/*
165 	 * Opaque handle to be used for consequent admin operations on the
166 	 * QP
167 	 */
168 	u32 qp_handle;
169 
170 	/*
171 	 * QP number in the given EFA virtual device. Least-significant bits (as
172 	 * needed according to max_qp) carry unique QP ID
173 	 */
174 	u16 qp_num;
175 
176 	/* MBZ */
177 	u16 reserved;
178 
179 	/* Index of sub-CQ for Send Queue completions */
180 	u16 send_sub_cq_idx;
181 
182 	/* Index of sub-CQ for Receive Queue completions */
183 	u16 recv_sub_cq_idx;
184 
185 	/* SQ doorbell address, as offset to PCIe DB BAR */
186 	u32 sq_db_offset;
187 
188 	/* RQ doorbell address, as offset to PCIe DB BAR */
189 	u32 rq_db_offset;
190 
191 	/*
192 	 * low latency send queue ring base address as an offset to PCIe
193 	 * MMIO LLQ_MEM BAR
194 	 */
195 	u32 llq_descriptors_offset;
196 };
197 
198 struct efa_admin_modify_qp_cmd {
199 	/* Common Admin Queue descriptor */
200 	struct efa_admin_aq_common_desc aq_common_desc;
201 
202 	/*
203 	 * Mask indicating which fields should be updated
204 	 * 0 : qp_state
205 	 * 1 : cur_qp_state
206 	 * 2 : qkey
207 	 * 3 : sq_psn
208 	 * 4 : sq_drained_async_notify
209 	 * 5 : rnr_retry
210 	 * 31:6 : reserved
211 	 */
212 	u32 modify_mask;
213 
214 	/* QP handle returned by create_qp command */
215 	u32 qp_handle;
216 
217 	/* QP state */
218 	u32 qp_state;
219 
220 	/* Override current QP state (before applying the transition) */
221 	u32 cur_qp_state;
222 
223 	/* QKey */
224 	u32 qkey;
225 
226 	/* SQ PSN */
227 	u32 sq_psn;
228 
229 	/* Enable async notification when SQ is drained */
230 	u8 sq_drained_async_notify;
231 
232 	/* Number of RNR retries (valid only for SRD QPs) */
233 	u8 rnr_retry;
234 
235 	/* MBZ */
236 	u16 reserved2;
237 };
238 
239 struct efa_admin_modify_qp_resp {
240 	/* Common Admin Queue completion descriptor */
241 	struct efa_admin_acq_common_desc acq_common_desc;
242 };
243 
244 struct efa_admin_query_qp_cmd {
245 	/* Common Admin Queue descriptor */
246 	struct efa_admin_aq_common_desc aq_common_desc;
247 
248 	/* QP handle returned by create_qp command */
249 	u32 qp_handle;
250 };
251 
252 struct efa_admin_query_qp_resp {
253 	/* Common Admin Queue completion descriptor */
254 	struct efa_admin_acq_common_desc acq_common_desc;
255 
256 	/* QP state */
257 	u32 qp_state;
258 
259 	/* QKey */
260 	u32 qkey;
261 
262 	/* SQ PSN */
263 	u32 sq_psn;
264 
265 	/* Indicates that draining is in progress */
266 	u8 sq_draining;
267 
268 	/* Number of RNR retries (valid only for SRD QPs) */
269 	u8 rnr_retry;
270 
271 	/* MBZ */
272 	u16 reserved2;
273 };
274 
275 struct efa_admin_destroy_qp_cmd {
276 	/* Common Admin Queue descriptor */
277 	struct efa_admin_aq_common_desc aq_common_desc;
278 
279 	/* QP handle returned by create_qp command */
280 	u32 qp_handle;
281 };
282 
283 struct efa_admin_destroy_qp_resp {
284 	/* Common Admin Queue completion descriptor */
285 	struct efa_admin_acq_common_desc acq_common_desc;
286 };
287 
288 /*
289  * Create Address Handle command parameters. Must not be called more than
290  * once for the same destination
291  */
292 struct efa_admin_create_ah_cmd {
293 	/* Common Admin Queue descriptor */
294 	struct efa_admin_aq_common_desc aq_common_desc;
295 
296 	/* Destination address in network byte order */
297 	u8 dest_addr[16];
298 
299 	/* PD number */
300 	u16 pd;
301 
302 	/* MBZ */
303 	u16 reserved;
304 };
305 
306 struct efa_admin_create_ah_resp {
307 	/* Common Admin Queue completion descriptor */
308 	struct efa_admin_acq_common_desc acq_common_desc;
309 
310 	/* Target interface address handle (opaque) */
311 	u16 ah;
312 
313 	/* MBZ */
314 	u16 reserved;
315 };
316 
317 struct efa_admin_destroy_ah_cmd {
318 	/* Common Admin Queue descriptor */
319 	struct efa_admin_aq_common_desc aq_common_desc;
320 
321 	/* Target interface address handle (opaque) */
322 	u16 ah;
323 
324 	/* PD number */
325 	u16 pd;
326 };
327 
328 struct efa_admin_destroy_ah_resp {
329 	/* Common Admin Queue completion descriptor */
330 	struct efa_admin_acq_common_desc acq_common_desc;
331 };
332 
333 /*
334  * Registration of MemoryRegion, required for QP working with Virtual
335  * Addresses. In standard verbs semantics, region length is limited to 2GB
336  * space, but EFA offers larger MR support for large memory space, to ease
337  * on users working with very large datasets (i.e. full GPU memory mapping).
338  */
339 struct efa_admin_reg_mr_cmd {
340 	/* Common Admin Queue descriptor */
341 	struct efa_admin_aq_common_desc aq_common_desc;
342 
343 	/* Protection Domain */
344 	u16 pd;
345 
346 	/* MBZ */
347 	u16 reserved16_w1;
348 
349 	/* Physical Buffer List, each element is page-aligned. */
350 	union {
351 		/*
352 		 * Inline array of guest-physical page addresses of user
353 		 * memory pages (optimization for short region
354 		 * registrations)
355 		 */
356 		u64 inline_pbl_array[4];
357 
358 		/* points to PBL (direct or indirect, chained if needed) */
359 		struct efa_admin_ctrl_buff_info pbl;
360 	} pbl;
361 
362 	/* Memory region length, in bytes. */
363 	u64 mr_length;
364 
365 	/*
366 	 * flags and page size
367 	 * 4:0 : phys_page_size_shift - page size is (1 <<
368 	 *    phys_page_size_shift). Page size is used for
369 	 *    building the Virtual to Physical address mapping
370 	 * 6:5 : reserved - MBZ
371 	 * 7 : mem_addr_phy_mode_en - Enable bit for physical
372 	 *    memory registration (no translation), can be used
373 	 *    only by privileged clients. If set, PBL must
374 	 *    contain a single entry.
375 	 */
376 	u8 flags;
377 
378 	/*
379 	 * permissions
380 	 * 0 : local_write_enable - Local write permissions:
381 	 *    must be set for RQ buffers and buffers posted for
382 	 *    RDMA Read requests
383 	 * 1 : remote_write_enable - Remote write
384 	 *    permissions: must be set to enable RDMA write to
385 	 *    the region
386 	 * 2 : remote_read_enable - Remote read permissions:
387 	 *    must be set to enable RDMA read from the region
388 	 * 7:3 : reserved2 - MBZ
389 	 */
390 	u8 permissions;
391 
392 	/* MBZ */
393 	u16 reserved16_w5;
394 
395 	/* number of pages in PBL (redundant, could be calculated) */
396 	u32 page_num;
397 
398 	/*
399 	 * IO Virtual Address associated with this MR. If
400 	 * mem_addr_phy_mode_en is set, contains the physical address of
401 	 * the region.
402 	 */
403 	u64 iova;
404 };
405 
406 struct efa_admin_reg_mr_resp {
407 	/* Common Admin Queue completion descriptor */
408 	struct efa_admin_acq_common_desc acq_common_desc;
409 
410 	/*
411 	 * L_Key, to be used in conjunction with local buffer references in
412 	 * SQ and RQ WQE, or with virtual RQ/CQ rings
413 	 */
414 	u32 l_key;
415 
416 	/*
417 	 * R_Key, to be used in RDMA messages to refer to remotely accessed
418 	 * memory region
419 	 */
420 	u32 r_key;
421 
422 	/*
423 	 * Mask indicating which fields have valid values
424 	 * 0 : recv_ic_id
425 	 * 1 : rdma_read_ic_id
426 	 * 2 : rdma_recv_ic_id
427 	 */
428 	u8 validity;
429 
430 	/*
431 	 * Physical interconnect used by the device to reach the MR for receive
432 	 * operation
433 	 */
434 	u8 recv_ic_id;
435 
436 	/*
437 	 * Physical interconnect used by the device to reach the MR for RDMA
438 	 * read operation
439 	 */
440 	u8 rdma_read_ic_id;
441 
442 	/*
443 	 * Physical interconnect used by the device to reach the MR for RDMA
444 	 * write receive
445 	 */
446 	u8 rdma_recv_ic_id;
447 };
448 
449 struct efa_admin_dereg_mr_cmd {
450 	/* Common Admin Queue descriptor */
451 	struct efa_admin_aq_common_desc aq_common_desc;
452 
453 	/* L_Key, memory region's l_key */
454 	u32 l_key;
455 };
456 
457 struct efa_admin_dereg_mr_resp {
458 	/* Common Admin Queue completion descriptor */
459 	struct efa_admin_acq_common_desc acq_common_desc;
460 };
461 
462 struct efa_admin_create_cq_cmd {
463 	struct efa_admin_aq_common_desc aq_common_desc;
464 
465 	/*
466 	 * 4:0 : reserved5 - MBZ
467 	 * 5 : interrupt_mode_enabled - if set, cq operates
468 	 *    in interrupt mode (i.e. CQ events and EQ elements
469 	 *    are generated), otherwise - polling
470 	 * 6 : virt - If set, ring base address is virtual
471 	 *    (IOVA returned by MR registration)
472 	 * 7 : reserved6 - MBZ
473 	 */
474 	u8 cq_caps_1;
475 
476 	/*
477 	 * 4:0 : cq_entry_size_words - size of CQ entry in
478 	 *    32-bit words, valid values: 4, 8.
479 	 * 5 : set_src_addr - If set, source address will be
480 	 *    filled on RX completions from unknown senders.
481 	 *    Requires 8 words CQ entry size.
482 	 * 7:6 : reserved7 - MBZ
483 	 */
484 	u8 cq_caps_2;
485 
486 	/* completion queue depth in # of entries. must be power of 2 */
487 	u16 cq_depth;
488 
489 	/* EQ number assigned to this cq */
490 	u16 eqn;
491 
492 	/* MBZ */
493 	u16 reserved;
494 
495 	/*
496 	 * CQ ring base address, virtual or physical depending on 'virt'
497 	 * flag
498 	 */
499 	struct efa_common_mem_addr cq_ba;
500 
501 	/*
502 	 * Memory registration key for the ring, used only when base
503 	 * address is virtual
504 	 */
505 	u32 l_key;
506 
507 	/*
508 	 * number of sub cqs - must be equal to sub_cqs_per_cq of queue
509 	 * attributes.
510 	 */
511 	u16 num_sub_cqs;
512 
513 	/* UAR number */
514 	u16 uar;
515 };
516 
517 struct efa_admin_create_cq_resp {
518 	struct efa_admin_acq_common_desc acq_common_desc;
519 
520 	u16 cq_idx;
521 
522 	/* actual cq depth in number of entries */
523 	u16 cq_actual_depth;
524 
525 	/* CQ doorbell address, as offset to PCIe DB BAR */
526 	u32 db_offset;
527 
528 	/*
529 	 * 0 : db_valid - If set, doorbell offset is valid.
530 	 *    Always set when interrupts are requested.
531 	 */
532 	u32 flags;
533 };
534 
535 struct efa_admin_destroy_cq_cmd {
536 	struct efa_admin_aq_common_desc aq_common_desc;
537 
538 	u16 cq_idx;
539 
540 	/* MBZ */
541 	u16 reserved1;
542 };
543 
544 struct efa_admin_destroy_cq_resp {
545 	struct efa_admin_acq_common_desc acq_common_desc;
546 };
547 
548 /*
549  * EFA AQ Get Statistics command. Extended statistics are placed in control
550  * buffer pointed by AQ entry
551  */
552 struct efa_admin_aq_get_stats_cmd {
553 	struct efa_admin_aq_common_desc aq_common_descriptor;
554 
555 	union {
556 		/* command specific inline data */
557 		u32 inline_data_w1[3];
558 
559 		struct efa_admin_ctrl_buff_info control_buffer;
560 	} u;
561 
562 	/* stats type as defined in enum efa_admin_get_stats_type */
563 	u8 type;
564 
565 	/* stats scope defined in enum efa_admin_get_stats_scope */
566 	u8 scope;
567 
568 	u16 scope_modifier;
569 };
570 
571 struct efa_admin_basic_stats {
572 	u64 tx_bytes;
573 
574 	u64 tx_pkts;
575 
576 	u64 rx_bytes;
577 
578 	u64 rx_pkts;
579 
580 	u64 rx_drops;
581 };
582 
583 struct efa_admin_messages_stats {
584 	u64 send_bytes;
585 
586 	u64 send_wrs;
587 
588 	u64 recv_bytes;
589 
590 	u64 recv_wrs;
591 };
592 
593 struct efa_admin_rdma_read_stats {
594 	u64 read_wrs;
595 
596 	u64 read_bytes;
597 
598 	u64 read_wr_err;
599 
600 	u64 read_resp_bytes;
601 };
602 
603 struct efa_admin_rdma_write_stats {
604 	u64 write_wrs;
605 
606 	u64 write_bytes;
607 
608 	u64 write_wr_err;
609 
610 	u64 write_recv_bytes;
611 };
612 
613 struct efa_admin_acq_get_stats_resp {
614 	struct efa_admin_acq_common_desc acq_common_desc;
615 
616 	union {
617 		struct efa_admin_basic_stats basic_stats;
618 
619 		struct efa_admin_messages_stats messages_stats;
620 
621 		struct efa_admin_rdma_read_stats rdma_read_stats;
622 
623 		struct efa_admin_rdma_write_stats rdma_write_stats;
624 	} u;
625 };
626 
627 struct efa_admin_get_set_feature_common_desc {
628 	/* MBZ */
629 	u8 reserved0;
630 
631 	/* as appears in efa_admin_aq_feature_id */
632 	u8 feature_id;
633 
634 	/* MBZ */
635 	u16 reserved16;
636 };
637 
638 struct efa_admin_feature_device_attr_desc {
639 	/* Bitmap of efa_admin_aq_feature_id */
640 	u64 supported_features;
641 
642 	/* Bitmap of supported page sizes in MR registrations */
643 	u64 page_size_cap;
644 
645 	u32 fw_version;
646 
647 	u32 admin_api_version;
648 
649 	u32 device_version;
650 
651 	/* Bar used for SQ and RQ doorbells */
652 	u16 db_bar;
653 
654 	/* Indicates how many bits are used on physical address access */
655 	u8 phys_addr_width;
656 
657 	/* Indicates how many bits are used on virtual address access */
658 	u8 virt_addr_width;
659 
660 	/*
661 	 * 0 : rdma_read - If set, RDMA Read is supported on
662 	 *    TX queues
663 	 * 1 : rnr_retry - If set, RNR retry is supported on
664 	 *    modify QP command
665 	 * 2 : data_polling_128 - If set, 128 bytes data
666 	 *    polling is supported
667 	 * 3 : rdma_write - If set, RDMA Write is supported
668 	 *    on TX queues
669 	 * 4 : unsolicited_write_recv - If set, unsolicited
670 	 *    write with imm. receive is supported
671 	 * 31:5 : reserved - MBZ
672 	 */
673 	u32 device_caps;
674 
675 	/* Max RDMA transfer size in bytes */
676 	u32 max_rdma_size;
677 };
678 
679 struct efa_admin_feature_queue_attr_desc {
680 	/* The maximum number of queue pairs supported */
681 	u32 max_qp;
682 
683 	/* Maximum number of WQEs per Send Queue */
684 	u32 max_sq_depth;
685 
686 	/* Maximum size of data that can be sent inline in a Send WQE */
687 	u32 inline_buf_size;
688 
689 	/* Maximum number of buffer descriptors per Recv Queue */
690 	u32 max_rq_depth;
691 
692 	/* The maximum number of completion queues supported per VF */
693 	u32 max_cq;
694 
695 	/* Maximum number of CQEs per Completion Queue */
696 	u32 max_cq_depth;
697 
698 	/* Number of sub-CQs to be created for each CQ */
699 	u16 sub_cqs_per_cq;
700 
701 	/* Minimum number of WQEs per SQ */
702 	u16 min_sq_depth;
703 
704 	/* Maximum number of SGEs (buffers) allowed for a single send WQE */
705 	u16 max_wr_send_sges;
706 
707 	/* Maximum number of SGEs allowed for a single recv WQE */
708 	u16 max_wr_recv_sges;
709 
710 	/* The maximum number of memory regions supported */
711 	u32 max_mr;
712 
713 	/* The maximum number of pages can be registered */
714 	u32 max_mr_pages;
715 
716 	/* The maximum number of protection domains supported */
717 	u32 max_pd;
718 
719 	/* The maximum number of address handles supported */
720 	u32 max_ah;
721 
722 	/* The maximum size of LLQ in bytes */
723 	u32 max_llq_size;
724 
725 	/* Maximum number of SGEs for a single RDMA read/write WQE */
726 	u16 max_wr_rdma_sges;
727 
728 	/*
729 	 * Maximum number of bytes that can be written to SQ between two
730 	 * consecutive doorbells (in units of 64B). Driver must ensure that only
731 	 * complete WQEs are written to queue before issuing a doorbell.
732 	 * Examples: max_tx_batch=16 and WQE size = 64B, means up to 16 WQEs can
733 	 * be written to SQ between two consecutive doorbells. max_tx_batch=11
734 	 * and WQE size = 128B, means up to 5 WQEs can be written to SQ between
735 	 * two consecutive doorbells. Zero means unlimited.
736 	 */
737 	u16 max_tx_batch;
738 };
739 
740 struct efa_admin_event_queue_attr_desc {
741 	/* The maximum number of event queues supported */
742 	u32 max_eq;
743 
744 	/* Maximum number of EQEs per Event Queue */
745 	u32 max_eq_depth;
746 
747 	/* Supported events bitmask */
748 	u32 event_bitmask;
749 };
750 
751 struct efa_admin_feature_aenq_desc {
752 	/* bitmask for AENQ groups the device can report */
753 	u32 supported_groups;
754 
755 	/* bitmask for AENQ groups to report */
756 	u32 enabled_groups;
757 };
758 
759 struct efa_admin_feature_network_attr_desc {
760 	/* Raw address data in network byte order */
761 	u8 addr[16];
762 
763 	/* max packet payload size in bytes */
764 	u32 mtu;
765 };
766 
767 /*
768  * When hint value is 0, hints capabilities are not supported or driver
769  * should use its own predefined value
770  */
771 struct efa_admin_hw_hints {
772 	/* value in ms */
773 	u16 mmio_read_timeout;
774 
775 	/* value in ms */
776 	u16 driver_watchdog_timeout;
777 
778 	/* value in ms */
779 	u16 admin_completion_timeout;
780 
781 	/* poll interval in ms */
782 	u16 poll_interval;
783 };
784 
785 struct efa_admin_get_feature_cmd {
786 	struct efa_admin_aq_common_desc aq_common_descriptor;
787 
788 	struct efa_admin_ctrl_buff_info control_buffer;
789 
790 	struct efa_admin_get_set_feature_common_desc feature_common;
791 
792 	u32 raw[11];
793 };
794 
795 struct efa_admin_get_feature_resp {
796 	struct efa_admin_acq_common_desc acq_common_desc;
797 
798 	union {
799 		u32 raw[14];
800 
801 		struct efa_admin_feature_device_attr_desc device_attr;
802 
803 		struct efa_admin_feature_aenq_desc aenq;
804 
805 		struct efa_admin_feature_network_attr_desc network_attr;
806 
807 		struct efa_admin_feature_queue_attr_desc queue_attr;
808 
809 		struct efa_admin_event_queue_attr_desc event_queue_attr;
810 
811 		struct efa_admin_hw_hints hw_hints;
812 	} u;
813 };
814 
815 struct efa_admin_set_feature_cmd {
816 	struct efa_admin_aq_common_desc aq_common_descriptor;
817 
818 	struct efa_admin_ctrl_buff_info control_buffer;
819 
820 	struct efa_admin_get_set_feature_common_desc feature_common;
821 
822 	union {
823 		u32 raw[11];
824 
825 		/* AENQ configuration */
826 		struct efa_admin_feature_aenq_desc aenq;
827 	} u;
828 };
829 
830 struct efa_admin_set_feature_resp {
831 	struct efa_admin_acq_common_desc acq_common_desc;
832 
833 	union {
834 		u32 raw[14];
835 	} u;
836 };
837 
838 struct efa_admin_alloc_pd_cmd {
839 	struct efa_admin_aq_common_desc aq_common_descriptor;
840 };
841 
842 struct efa_admin_alloc_pd_resp {
843 	struct efa_admin_acq_common_desc acq_common_desc;
844 
845 	/* PD number */
846 	u16 pd;
847 
848 	/* MBZ */
849 	u16 reserved;
850 };
851 
852 struct efa_admin_dealloc_pd_cmd {
853 	struct efa_admin_aq_common_desc aq_common_descriptor;
854 
855 	/* PD number */
856 	u16 pd;
857 
858 	/* MBZ */
859 	u16 reserved;
860 };
861 
862 struct efa_admin_dealloc_pd_resp {
863 	struct efa_admin_acq_common_desc acq_common_desc;
864 };
865 
866 struct efa_admin_alloc_uar_cmd {
867 	struct efa_admin_aq_common_desc aq_common_descriptor;
868 };
869 
870 struct efa_admin_alloc_uar_resp {
871 	struct efa_admin_acq_common_desc acq_common_desc;
872 
873 	/* UAR number */
874 	u16 uar;
875 
876 	/* MBZ */
877 	u16 reserved;
878 };
879 
880 struct efa_admin_dealloc_uar_cmd {
881 	struct efa_admin_aq_common_desc aq_common_descriptor;
882 
883 	/* UAR number */
884 	u16 uar;
885 
886 	/* MBZ */
887 	u16 reserved;
888 };
889 
890 struct efa_admin_dealloc_uar_resp {
891 	struct efa_admin_acq_common_desc acq_common_desc;
892 };
893 
894 struct efa_admin_create_eq_cmd {
895 	struct efa_admin_aq_common_desc aq_common_descriptor;
896 
897 	/* Size of the EQ in entries, must be power of 2 */
898 	u16 depth;
899 
900 	/* MSI-X table entry index */
901 	u8 msix_vec;
902 
903 	/*
904 	 * 4:0 : entry_size_words - size of EQ entry in
905 	 *    32-bit words
906 	 * 7:5 : reserved - MBZ
907 	 */
908 	u8 caps;
909 
910 	/* EQ ring base address */
911 	struct efa_common_mem_addr ba;
912 
913 	/*
914 	 * Enabled events on this EQ
915 	 * 0 : completion_events - Enable completion events
916 	 * 31:1 : reserved - MBZ
917 	 */
918 	u32 event_bitmask;
919 
920 	/* MBZ */
921 	u32 reserved;
922 };
923 
924 struct efa_admin_create_eq_resp {
925 	struct efa_admin_acq_common_desc acq_common_desc;
926 
927 	/* EQ number */
928 	u16 eqn;
929 
930 	/* MBZ */
931 	u16 reserved;
932 };
933 
934 struct efa_admin_destroy_eq_cmd {
935 	struct efa_admin_aq_common_desc aq_common_descriptor;
936 
937 	/* EQ number */
938 	u16 eqn;
939 
940 	/* MBZ */
941 	u16 reserved;
942 };
943 
944 struct efa_admin_destroy_eq_resp {
945 	struct efa_admin_acq_common_desc acq_common_desc;
946 };
947 
948 /* asynchronous event notification groups */
949 enum efa_admin_aenq_group {
950 	EFA_ADMIN_FATAL_ERROR                       = 1,
951 	EFA_ADMIN_WARNING                           = 2,
952 	EFA_ADMIN_NOTIFICATION                      = 3,
953 	EFA_ADMIN_KEEP_ALIVE                        = 4,
954 	EFA_ADMIN_AENQ_GROUPS_NUM                   = 5,
955 };
956 
957 struct efa_admin_mmio_req_read_less_resp {
958 	u16 req_id;
959 
960 	u16 reg_off;
961 
962 	/* value is valid when poll is cleared */
963 	u32 reg_val;
964 };
965 
966 enum efa_admin_os_type {
967 	EFA_ADMIN_OS_LINUX                          = 0,
968 };
969 
970 struct efa_admin_host_info {
971 	/* OS distribution string format */
972 	u8 os_dist_str[128];
973 
974 	/* Defined in enum efa_admin_os_type */
975 	u32 os_type;
976 
977 	/* Kernel version string format */
978 	u8 kernel_ver_str[32];
979 
980 	/* Kernel version numeric format */
981 	u32 kernel_ver;
982 
983 	/*
984 	 * 7:0 : driver_module_type
985 	 * 15:8 : driver_sub_minor
986 	 * 23:16 : driver_minor
987 	 * 31:24 : driver_major
988 	 */
989 	u32 driver_ver;
990 
991 	/*
992 	 * Device's Bus, Device and Function
993 	 * 2:0 : function
994 	 * 7:3 : device
995 	 * 15:8 : bus
996 	 */
997 	u16 bdf;
998 
999 	/*
1000 	 * Spec version
1001 	 * 7:0 : spec_minor
1002 	 * 15:8 : spec_major
1003 	 */
1004 	u16 spec_ver;
1005 
1006 	/*
1007 	 * 0 : intree - Intree driver
1008 	 * 1 : gdr - GPUDirect RDMA supported
1009 	 * 31:2 : reserved2
1010 	 */
1011 	u32 flags;
1012 };
1013 
1014 /* create_qp_cmd */
1015 #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK                BIT(0)
1016 #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK                BIT(1)
1017 #define EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV_MASK BIT(2)
1018 
1019 /* modify_qp_cmd */
1020 #define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK               BIT(0)
1021 #define EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE_MASK           BIT(1)
1022 #define EFA_ADMIN_MODIFY_QP_CMD_QKEY_MASK                   BIT(2)
1023 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN_MASK                 BIT(3)
1024 #define EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY_MASK BIT(4)
1025 #define EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY_MASK              BIT(5)
1026 
1027 /* reg_mr_cmd */
1028 #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK      GENMASK(4, 0)
1029 #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK      BIT(7)
1030 #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK        BIT(0)
1031 #define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK       BIT(1)
1032 #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK        BIT(2)
1033 
1034 /* reg_mr_resp */
1035 #define EFA_ADMIN_REG_MR_RESP_RECV_IC_ID_MASK               BIT(0)
1036 #define EFA_ADMIN_REG_MR_RESP_RDMA_READ_IC_ID_MASK          BIT(1)
1037 #define EFA_ADMIN_REG_MR_RESP_RDMA_RECV_IC_ID_MASK          BIT(2)
1038 
1039 /* create_cq_cmd */
1040 #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
1041 #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK                   BIT(6)
1042 #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK    GENMASK(4, 0)
1043 #define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK           BIT(5)
1044 
1045 /* create_cq_resp */
1046 #define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK              BIT(0)
1047 
1048 /* feature_device_attr_desc */
1049 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK   BIT(0)
1050 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK   BIT(1)
1051 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
1052 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK  BIT(3)
1053 #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_UNSOLICITED_WRITE_RECV_MASK BIT(4)
1054 
1055 /* create_eq_cmd */
1056 #define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK       GENMASK(4, 0)
1057 #define EFA_ADMIN_CREATE_EQ_CMD_VIRT_MASK                   BIT(6)
1058 #define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK      BIT(0)
1059 
1060 /* host_info */
1061 #define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK         GENMASK(7, 0)
1062 #define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK           GENMASK(15, 8)
1063 #define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK               GENMASK(23, 16)
1064 #define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK               GENMASK(31, 24)
1065 #define EFA_ADMIN_HOST_INFO_FUNCTION_MASK                   GENMASK(2, 0)
1066 #define EFA_ADMIN_HOST_INFO_DEVICE_MASK                     GENMASK(7, 3)
1067 #define EFA_ADMIN_HOST_INFO_BUS_MASK                        GENMASK(15, 8)
1068 #define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK                 GENMASK(7, 0)
1069 #define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK                 GENMASK(15, 8)
1070 #define EFA_ADMIN_HOST_INFO_INTREE_MASK                     BIT(0)
1071 #define EFA_ADMIN_HOST_INFO_GDR_MASK                        BIT(1)
1072 
1073 #endif /* _EFA_ADMIN_CMDS_H_ */
1074