1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2015 - 2023 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #ifndef IRDMA_TYPE_H
36 #define IRDMA_TYPE_H
37
38 #include "osdep.h"
39
40 #include "irdma.h"
41 #include "irdma_user.h"
42 #include "irdma_hmc.h"
43 #include "irdma_uda.h"
44 #include "irdma_ws.h"
45 #include "irdma_pble.h"
46
47 enum irdma_debug_flag {
48 IRDMA_DEBUG_NONE = 0x00000000,
49 IRDMA_DEBUG_ERR = 0x00000001,
50 IRDMA_DEBUG_INIT = 0x00000002,
51 IRDMA_DEBUG_DEV = 0x00000004,
52 IRDMA_DEBUG_CM = 0x00000008,
53 IRDMA_DEBUG_VERBS = 0x00000010,
54 IRDMA_DEBUG_PUDA = 0x00000020,
55 IRDMA_DEBUG_ILQ = 0x00000040,
56 IRDMA_DEBUG_IEQ = 0x00000080,
57 IRDMA_DEBUG_QP = 0x00000100,
58 IRDMA_DEBUG_CQ = 0x00000200,
59 IRDMA_DEBUG_MR = 0x00000400,
60 IRDMA_DEBUG_PBLE = 0x00000800,
61 IRDMA_DEBUG_WQE = 0x00001000,
62 IRDMA_DEBUG_AEQ = 0x00002000,
63 IRDMA_DEBUG_CQP = 0x00004000,
64 IRDMA_DEBUG_HMC = 0x00008000,
65 IRDMA_DEBUG_USER = 0x00010000,
66 IRDMA_DEBUG_VIRT = 0x00020000,
67 IRDMA_DEBUG_DCB = 0x00040000,
68 IRDMA_DEBUG_CQE = 0x00800000,
69 IRDMA_DEBUG_CLNT = 0x01000000,
70 IRDMA_DEBUG_WS = 0x02000000,
71 IRDMA_DEBUG_STATS = 0x04000000,
72 IRDMA_DEBUG_ALL = 0xFFFFFFFF,
73 };
74
75 #define RSVD_OFFSET 0xFFFFFFFF
76
77 enum irdma_page_size {
78 IRDMA_PAGE_SIZE_4K = 0,
79 IRDMA_PAGE_SIZE_2M,
80 IRDMA_PAGE_SIZE_1G,
81 };
82
83 enum irdma_hdrct_flags {
84 DDP_LEN_FLAG = 0x80,
85 DDP_HDR_FLAG = 0x40,
86 RDMA_HDR_FLAG = 0x20,
87 };
88
89 enum irdma_term_layers {
90 LAYER_RDMA = 0,
91 LAYER_DDP = 1,
92 LAYER_MPA = 2,
93 };
94
95 enum irdma_term_error_types {
96 RDMAP_REMOTE_PROT = 1,
97 RDMAP_REMOTE_OP = 2,
98 DDP_CATASTROPHIC = 0,
99 DDP_TAGGED_BUF = 1,
100 DDP_UNTAGGED_BUF = 2,
101 DDP_LLP = 3,
102 };
103
104 enum irdma_term_rdma_errors {
105 RDMAP_INV_STAG = 0x00,
106 RDMAP_INV_BOUNDS = 0x01,
107 RDMAP_ACCESS = 0x02,
108 RDMAP_UNASSOC_STAG = 0x03,
109 RDMAP_TO_WRAP = 0x04,
110 RDMAP_INV_RDMAP_VER = 0x05,
111 RDMAP_UNEXPECTED_OP = 0x06,
112 RDMAP_CATASTROPHIC_LOCAL = 0x07,
113 RDMAP_CATASTROPHIC_GLOBAL = 0x08,
114 RDMAP_CANT_INV_STAG = 0x09,
115 RDMAP_UNSPECIFIED = 0xff,
116 };
117
118 enum irdma_term_ddp_errors {
119 DDP_CATASTROPHIC_LOCAL = 0x00,
120 DDP_TAGGED_INV_STAG = 0x00,
121 DDP_TAGGED_BOUNDS = 0x01,
122 DDP_TAGGED_UNASSOC_STAG = 0x02,
123 DDP_TAGGED_TO_WRAP = 0x03,
124 DDP_TAGGED_INV_DDP_VER = 0x04,
125 DDP_UNTAGGED_INV_QN = 0x01,
126 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
127 DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
128 DDP_UNTAGGED_INV_MO = 0x04,
129 DDP_UNTAGGED_INV_TOO_LONG = 0x05,
130 DDP_UNTAGGED_INV_DDP_VER = 0x06,
131 };
132
133 enum irdma_term_mpa_errors {
134 MPA_CLOSED = 0x01,
135 MPA_CRC = 0x02,
136 MPA_MARKER = 0x03,
137 MPA_REQ_RSP = 0x04,
138 };
139
140 enum irdma_hw_stats_index {
141 /* gen1 - 32-bit */
142 IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
143 IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
144 IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
145 IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3,
146 IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4,
147 IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5,
148 IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
149 IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
150 IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
151 IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
152 /* gen1 - 64-bit */
153 IRDMA_HW_STAT_INDEX_IP4RXOCTS = 10,
154 IRDMA_HW_STAT_INDEX_IP4RXPKTS = 11,
155 IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 12,
156 IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 13,
157 IRDMA_HW_STAT_INDEX_IP4TXOCTS = 14,
158 IRDMA_HW_STAT_INDEX_IP4TXPKTS = 15,
159 IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 16,
160 IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 17,
161 IRDMA_HW_STAT_INDEX_IP6RXOCTS = 18,
162 IRDMA_HW_STAT_INDEX_IP6RXPKTS = 19,
163 IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 20,
164 IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 21,
165 IRDMA_HW_STAT_INDEX_IP6TXOCTS = 22,
166 IRDMA_HW_STAT_INDEX_IP6TXPKTS = 23,
167 IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 24,
168 IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 25,
169 IRDMA_HW_STAT_INDEX_TCPRXSEGS = 26,
170 IRDMA_HW_STAT_INDEX_TCPTXSEG = 27,
171 IRDMA_HW_STAT_INDEX_RDMARXRDS = 28,
172 IRDMA_HW_STAT_INDEX_RDMARXSNDS = 29,
173 IRDMA_HW_STAT_INDEX_RDMARXWRS = 30,
174 IRDMA_HW_STAT_INDEX_RDMATXRDS = 31,
175 IRDMA_HW_STAT_INDEX_RDMATXSNDS = 32,
176 IRDMA_HW_STAT_INDEX_RDMATXWRS = 33,
177 IRDMA_HW_STAT_INDEX_RDMAVBND = 34,
178 IRDMA_HW_STAT_INDEX_RDMAVINV = 35,
179 IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36,
180 IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37,
181 IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38,
182 IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39,
183 IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40,
184 IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41,
185 IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */
186
187 /* gen2 - 64-bit */
188 IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42,
189
190 /* gen2 - 32-bit */
191 IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43,
192 IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
193 IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
194 IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
195 };
196
197 #define IRDMA_MIN_FEATURES 2
198
199 enum irdma_feature_type {
200 IRDMA_FEATURE_FW_INFO = 0,
201 IRDMA_HW_VERSION_INFO = 1,
202 IRDMA_QSETS_MAX = 26,
203 IRDMA_MAX_FEATURES, /* Must be last entry */
204 };
205
206 enum irdma_sched_prio_type {
207 IRDMA_PRIO_WEIGHTED_RR = 1,
208 IRDMA_PRIO_STRICT = 2,
209 IRDMA_PRIO_WEIGHTED_STRICT = 3,
210 };
211
212 enum irdma_vm_vf_type {
213 IRDMA_VF_TYPE = 0,
214 IRDMA_VM_TYPE,
215 IRDMA_PF_TYPE,
216 };
217
218 enum irdma_cqp_hmc_profile {
219 IRDMA_HMC_PROFILE_DEFAULT = 1,
220 IRDMA_HMC_PROFILE_FAVOR_VF = 2,
221 IRDMA_HMC_PROFILE_EQUAL = 3,
222 };
223
224 enum irdma_quad_entry_type {
225 IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
226 IRDMA_QHASH_TYPE_TCP_SYN,
227 IRDMA_QHASH_TYPE_UDP_UNICAST,
228 IRDMA_QHASH_TYPE_UDP_MCAST,
229 IRDMA_QHASH_TYPE_ROCE_MCAST,
230 IRDMA_QHASH_TYPE_ROCEV2_HW,
231 };
232
233 enum irdma_quad_hash_manage_type {
234 IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
235 IRDMA_QHASH_MANAGE_TYPE_ADD,
236 IRDMA_QHASH_MANAGE_TYPE_MODIFY,
237 };
238
239 enum irdma_syn_rst_handling {
240 IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
241 IRDMA_SYN_RST_HANDLING_HW_TCP,
242 IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
243 IRDMA_SYN_RST_HANDLING_FW_TCP,
244 };
245
246 enum irdma_queue_type {
247 IRDMA_QUEUE_TYPE_SQ_RQ = 0,
248 IRDMA_QUEUE_TYPE_CQP,
249 };
250
251 struct irdma_sc_dev;
252 struct irdma_vsi_pestat;
253
254 struct irdma_dcqcn_cc_params {
255 u8 cc_cfg_valid;
256 u8 min_dec_factor;
257 u8 min_rate;
258 u8 dcqcn_f;
259 u16 rai_factor;
260 u16 hai_factor;
261 u16 dcqcn_t;
262 u32 dcqcn_b;
263 u32 rreduce_mperiod;
264 };
265
266 struct irdma_cqp_init_info {
267 u64 cqp_compl_ctx;
268 u64 host_ctx_pa;
269 u64 sq_pa;
270 struct irdma_sc_dev *dev;
271 struct irdma_cqp_quanta *sq;
272 struct irdma_dcqcn_cc_params dcqcn_params;
273 __le64 *host_ctx;
274 u64 *scratch_array;
275 u32 sq_size;
276 u16 hw_maj_ver;
277 u16 hw_min_ver;
278 u8 struct_ver;
279 u8 hmc_profile;
280 u8 ena_vf_count;
281 u8 ceqs_per_vf;
282 bool en_datacenter_tcp:1;
283 bool disable_packed:1;
284 bool rocev2_rto_policy:1;
285 bool en_rem_endpoint_trk:1;
286 enum irdma_protocol_used protocol_used;
287 };
288
289 struct irdma_terminate_hdr {
290 u8 layer_etype;
291 u8 error_code;
292 u8 hdrct;
293 u8 rsvd;
294 };
295
296 struct irdma_cqp_sq_wqe {
297 __le64 buf[IRDMA_CQP_WQE_SIZE];
298 };
299
300 struct irdma_sc_aeqe {
301 __le64 buf[IRDMA_AEQE_SIZE];
302 };
303
304 struct irdma_ceqe {
305 __le64 buf[IRDMA_CEQE_SIZE];
306 };
307
308 struct irdma_cqp_ctx {
309 __le64 buf[IRDMA_CQP_CTX_SIZE];
310 };
311
312 struct irdma_cq_shadow_area {
313 __le64 buf[IRDMA_SHADOW_AREA_SIZE];
314 };
315
316 struct irdma_dev_hw_stats {
317 u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
318 };
319
320 struct irdma_gather_stats {
321 u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
322 };
323
324 struct irdma_hw_stat_map {
325 u16 byteoff;
326 u8 bitoff;
327 u64 bitmask;
328 };
329
330 struct irdma_stats_gather_info {
331 bool use_hmc_fcn_index:1;
332 bool use_stats_inst:1;
333 u16 hmc_fcn_index;
334 u16 stats_inst_index;
335 struct irdma_dma_mem stats_buff_mem;
336 void *gather_stats_va;
337 void *last_gather_stats_va;
338 };
339
340 struct irdma_vsi_pestat {
341 struct irdma_hw *hw;
342 struct irdma_dev_hw_stats hw_stats;
343 struct irdma_stats_gather_info gather_info;
344 struct OS_TIMER stats_timer;
345 struct irdma_sc_vsi *vsi;
346 spinlock_t lock; /* rdma stats lock */
347 };
348
349 struct irdma_hw {
350 u8 IOMEM *hw_addr;
351 u8 IOMEM *priv_hw_addr;
352 void *dev_context;
353 struct irdma_hmc_info hmc;
354 };
355
356 struct irdma_pfpdu {
357 struct list_head rxlist;
358 u32 rcv_nxt;
359 u32 fps;
360 u32 max_fpdu_data;
361 u32 nextseqnum;
362 u32 rcv_start_seq;
363 bool mode:1;
364 bool mpa_crc_err:1;
365 u8 marker_len;
366 u64 total_ieq_bufs;
367 u64 fpdu_processed;
368 u64 bad_seq_num;
369 u64 crc_err;
370 u64 no_tx_bufs;
371 u64 tx_err;
372 u64 out_of_order;
373 u64 pmode_count;
374 struct irdma_sc_ah *ah;
375 struct irdma_puda_buf *ah_buf;
376 spinlock_t lock; /* fpdu processing lock */
377 struct irdma_puda_buf *lastrcv_buf;
378 };
379
380 struct irdma_sc_pd {
381 struct irdma_sc_dev *dev;
382 u32 pd_id;
383 int abi_ver;
384 };
385
386 struct irdma_cqp_quanta {
387 __le64 elem[IRDMA_CQP_WQE_SIZE];
388 };
389
390 struct irdma_sc_cqp {
391 u32 size;
392 u64 sq_pa;
393 u64 host_ctx_pa;
394 void *back_cqp;
395 struct irdma_sc_dev *dev;
396 int (*process_cqp_sds)(struct irdma_sc_dev *dev,
397 struct irdma_update_sds_info *info);
398 struct irdma_dma_mem sdbuf;
399 struct irdma_ring sq_ring;
400 struct irdma_cqp_quanta *sq_base;
401 struct irdma_dcqcn_cc_params dcqcn_params;
402 __le64 *host_ctx;
403 u64 *scratch_array;
404 u64 requested_ops;
405 atomic64_t completed_ops;
406 u32 cqp_id;
407 u32 sq_size;
408 u32 hw_sq_size;
409 u16 hw_maj_ver;
410 u16 hw_min_ver;
411 u8 struct_ver;
412 u8 polarity;
413 u8 hmc_profile;
414 u8 ena_vf_count;
415 u8 timeout_count;
416 u8 ceqs_per_vf;
417 bool en_datacenter_tcp:1;
418 bool disable_packed:1;
419 bool rocev2_rto_policy:1;
420 bool en_rem_endpoint_trk:1;
421 enum irdma_protocol_used protocol_used;
422 };
423
424 struct irdma_sc_aeq {
425 u32 size;
426 u64 aeq_elem_pa;
427 struct irdma_sc_dev *dev;
428 struct irdma_sc_aeqe *aeqe_base;
429 void *pbl_list;
430 u32 elem_cnt;
431 struct irdma_ring aeq_ring;
432 u8 pbl_chunk_size;
433 u32 first_pm_pbl_idx;
434 u32 msix_idx;
435 u8 polarity;
436 bool virtual_map:1;
437 };
438
439 struct irdma_sc_ceq {
440 u32 size;
441 u64 ceq_elem_pa;
442 struct irdma_sc_dev *dev;
443 struct irdma_ceqe *ceqe_base;
444 void *pbl_list;
445 u32 ceq_id;
446 u32 elem_cnt;
447 struct irdma_ring ceq_ring;
448 u8 pbl_chunk_size;
449 u8 tph_val;
450 u32 first_pm_pbl_idx;
451 u8 polarity;
452 struct irdma_sc_vsi *vsi;
453 struct irdma_sc_cq **reg_cq;
454 u32 reg_cq_size;
455 spinlock_t req_cq_lock; /* protect access to reg_cq array */
456 bool virtual_map:1;
457 bool tph_en:1;
458 bool itr_no_expire:1;
459 };
460
461 struct irdma_sc_cq {
462 struct irdma_cq_uk cq_uk;
463 u64 cq_pa;
464 u64 shadow_area_pa;
465 struct irdma_sc_dev *dev;
466 struct irdma_sc_vsi *vsi;
467 void *pbl_list;
468 void *back_cq;
469 u32 ceq_id;
470 u32 shadow_read_threshold;
471 u8 pbl_chunk_size;
472 u8 cq_type;
473 u8 tph_val;
474 u32 first_pm_pbl_idx;
475 bool ceqe_mask:1;
476 bool virtual_map:1;
477 bool check_overflow:1;
478 bool ceq_id_valid:1;
479 bool tph_en:1;
480 };
481
482 struct irdma_sc_qp {
483 struct irdma_qp_uk qp_uk;
484 u64 sq_pa;
485 u64 rq_pa;
486 u64 hw_host_ctx_pa;
487 u64 shadow_area_pa;
488 u64 q2_pa;
489 struct irdma_sc_dev *dev;
490 struct irdma_sc_vsi *vsi;
491 struct irdma_sc_pd *pd;
492 __le64 *hw_host_ctx;
493 void *llp_stream_handle;
494 struct irdma_pfpdu pfpdu;
495 u32 ieq_qp;
496 u8 *q2_buf;
497 u64 qp_compl_ctx;
498 u32 push_idx;
499 u16 qs_handle;
500 u16 push_offset;
501 u8 flush_wqes_count;
502 u8 sq_tph_val;
503 u8 rq_tph_val;
504 u8 qp_state;
505 u8 hw_sq_size;
506 u8 hw_rq_size;
507 u8 src_mac_addr_idx;
508 bool on_qoslist:1;
509 bool ieq_pass_thru:1;
510 bool sq_tph_en:1;
511 bool rq_tph_en:1;
512 bool rcv_tph_en:1;
513 bool xmit_tph_en:1;
514 bool virtual_map:1;
515 bool flush_sq:1;
516 bool flush_rq:1;
517 bool sq_flush_code:1;
518 bool rq_flush_code:1;
519 enum irdma_flush_opcode flush_code;
520 enum irdma_qp_event_type event_type;
521 u8 term_flags;
522 u8 user_pri;
523 struct list_head list;
524 };
525
526 struct irdma_stats_inst_info {
527 u16 hmc_fn_id;
528 u16 stats_idx;
529 bool use_hmc_fcn_index:1;
530 };
531
532 struct irdma_up_info {
533 u8 map[8];
534 u8 cnp_up_override;
535 u16 hmc_fcn_idx;
536 bool use_vlan:1;
537 bool use_cnp_up_override:1;
538 };
539
540 #define IRDMA_MAX_WS_NODES 0x3FF
541 #define IRDMA_WS_NODE_INVALID 0xFFFF
542
543 struct irdma_ws_node_info {
544 u16 id;
545 u16 vsi;
546 u16 parent_id;
547 u16 qs_handle;
548 bool type_leaf:1;
549 bool enable:1;
550 u8 prio_type;
551 u8 tc;
552 u8 weight;
553 };
554
555 struct irdma_hmc_fpm_misc {
556 u32 max_ceqs;
557 u32 max_sds;
558 u32 xf_block_size;
559 u32 q1_block_size;
560 u32 ht_multiplier;
561 u32 timer_bucket;
562 u32 rrf_block_size;
563 u32 ooiscf_block_size;
564 };
565
566 #define IRDMA_LEAF_DEFAULT_REL_BW 64
567 #define IRDMA_PARENT_DEFAULT_REL_BW 1
568
569 struct irdma_qos {
570 struct list_head qplist;
571 struct mutex qos_mutex; /* protect QoS attributes per QoS level */
572 u32 l2_sched_node_id;
573 u16 qs_handle;
574 u8 traffic_class;
575 u8 rel_bw;
576 u8 prio_type;
577 bool valid:1;
578 };
579
580 struct irdma_config_check {
581 bool config_ok:1;
582 bool lfc_set:1;
583 bool pfc_set:1;
584 u8 traffic_class;
585 u16 qs_handle;
586 };
587
588 #define IRDMA_INVALID_STATS_IDX 0xff
589 struct irdma_sc_vsi {
590 u16 vsi_idx;
591 struct irdma_sc_dev *dev;
592 void *back_vsi;
593 u32 ilq_count;
594 struct irdma_virt_mem ilq_mem;
595 struct irdma_puda_rsrc *ilq;
596 u32 ieq_count;
597 struct irdma_virt_mem ieq_mem;
598 struct irdma_puda_rsrc *ieq;
599 u32 exception_lan_q;
600 u16 mtu;
601 enum irdma_vm_vf_type vm_vf_type;
602 bool stats_inst_alloc:1;
603 bool tc_change_pending:1;
604 bool mtu_change_pending:1;
605 struct irdma_vsi_pestat *pestat;
606 atomic_t qp_suspend_reqs;
607 int (*register_qset)(struct irdma_sc_vsi *vsi,
608 struct irdma_ws_node *tc_node);
609 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
610 struct irdma_ws_node *tc_node);
611 struct irdma_config_check cfg_check[IRDMA_MAX_USER_PRIORITY];
612 bool tc_print_warning[IRDMA_MAX_TRAFFIC_CLASS];
613 u8 qos_rel_bw;
614 u8 qos_prio_type;
615 u16 stats_idx;
616 u8 dscp_map[IRDMA_DSCP_NUM_VAL];
617 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
618 bool dscp_mode:1;
619 };
620
621 struct irdma_sc_dev {
622 struct list_head cqp_cmd_head; /* head of the CQP command list */
623 spinlock_t cqp_lock; /* protect CQP list access */
624 struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
625 u64 fpm_query_buf_pa;
626 u64 fpm_commit_buf_pa;
627 __le64 *fpm_query_buf;
628 __le64 *fpm_commit_buf;
629 struct irdma_hw *hw;
630 u32 IOMEM *wqe_alloc_db;
631 u32 IOMEM *cq_arm_db;
632 u32 IOMEM *aeq_alloc_db;
633 u32 IOMEM *cqp_db;
634 u32 IOMEM *cq_ack_db;
635 u32 IOMEM *hw_regs[IRDMA_MAX_REGS];
636 u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
637 u64 hw_masks[IRDMA_MAX_MASKS];
638 u8 hw_shifts[IRDMA_MAX_SHIFTS];
639 const struct irdma_hw_stat_map *hw_stats_map;
640 u64 feature_info[IRDMA_MAX_FEATURES];
641 u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
642 struct irdma_hw_attrs hw_attrs;
643 struct irdma_hmc_info *hmc_info;
644 struct irdma_sc_cqp *cqp;
645 struct irdma_sc_aeq *aeq;
646 struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
647 struct irdma_sc_cq *ccq;
648 const struct irdma_irq_ops *irq_ops;
649 struct irdma_hmc_fpm_misc hmc_fpm_misc;
650 struct irdma_ws_node *ws_tree_root;
651 struct mutex ws_mutex; /* ws tree mutex */
652 u32 debug_mask;
653 u16 num_vfs;
654 u16 hmc_fn_id;
655 bool ceq_valid:1;
656 u8 pci_rev;
657 int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
658 void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
659 void (*ws_reset)(struct irdma_sc_vsi *vsi);
660 };
661
662 struct irdma_modify_cq_info {
663 u64 cq_pa;
664 struct irdma_cqe *cq_base;
665 u32 cq_size;
666 u32 shadow_read_threshold;
667 u8 pbl_chunk_size;
668 u32 first_pm_pbl_idx;
669 bool virtual_map:1;
670 bool check_overflow:1;
671 bool cq_resize:1;
672 };
673
674 struct irdma_create_qp_info {
675 bool ord_valid:1;
676 bool tcp_ctx_valid:1;
677 bool cq_num_valid:1;
678 bool arp_cache_idx_valid:1;
679 bool mac_valid:1;
680 bool force_lpb:1;
681 u8 next_iwarp_state;
682 };
683
684 struct irdma_modify_qp_info {
685 u64 rx_win0;
686 u64 rx_win1;
687 u16 new_mss;
688 u8 next_iwarp_state;
689 u8 curr_iwarp_state;
690 u8 termlen;
691 bool ord_valid:1;
692 bool tcp_ctx_valid:1;
693 bool udp_ctx_valid:1;
694 bool cq_num_valid:1;
695 bool arp_cache_idx_valid:1;
696 bool reset_tcp_conn:1;
697 bool remove_hash_idx:1;
698 bool dont_send_term:1;
699 bool dont_send_fin:1;
700 bool cached_var_valid:1;
701 bool mss_change:1;
702 bool force_lpb:1;
703 bool mac_valid:1;
704 };
705
706 struct irdma_ccq_cqe_info {
707 struct irdma_sc_cqp *cqp;
708 u64 scratch;
709 u32 op_ret_val;
710 u16 maj_err_code;
711 u16 min_err_code;
712 u8 op_code;
713 bool error:1;
714 };
715
716 struct irdma_qos_tc_info {
717 u64 tc_ctx;
718 u8 rel_bw;
719 u8 prio_type;
720 u8 egress_virt_up;
721 u8 ingress_virt_up;
722 };
723
724 struct irdma_l2params {
725 struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
726 u32 num_apps;
727 u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
728 u16 mtu;
729 u8 up2tc[IRDMA_MAX_USER_PRIORITY];
730 u8 dscp_map[IRDMA_DSCP_NUM_VAL];
731 u8 num_tc;
732 u8 vsi_rel_bw;
733 u8 vsi_prio_type;
734 bool mtu_changed:1;
735 bool tc_changed:1;
736 bool dscp_mode:1;
737 };
738
739 struct irdma_vsi_init_info {
740 struct irdma_sc_dev *dev;
741 void *back_vsi;
742 struct irdma_l2params *params;
743 u16 exception_lan_q;
744 u16 pf_data_vsi_num;
745 enum irdma_vm_vf_type vm_vf_type;
746 int (*register_qset)(struct irdma_sc_vsi *vsi,
747 struct irdma_ws_node *tc_node);
748 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
749 struct irdma_ws_node *tc_node);
750 };
751
752 struct irdma_vsi_stats_info {
753 struct irdma_vsi_pestat *pestat;
754 u8 fcn_id;
755 bool alloc_stats_inst:1;
756 };
757
758 struct irdma_device_init_info {
759 u64 fpm_query_buf_pa;
760 u64 fpm_commit_buf_pa;
761 __le64 *fpm_query_buf;
762 __le64 *fpm_commit_buf;
763 struct irdma_hw *hw;
764 void IOMEM *bar0;
765 u16 max_vfs;
766 u16 hmc_fn_id;
767 u32 debug_mask;
768 };
769
770 struct irdma_ceq_init_info {
771 u64 ceqe_pa;
772 struct irdma_sc_dev *dev;
773 u64 *ceqe_base;
774 void *pbl_list;
775 u32 elem_cnt;
776 u32 ceq_id;
777 bool virtual_map:1;
778 bool tph_en:1;
779 bool itr_no_expire:1;
780 u8 pbl_chunk_size;
781 u8 tph_val;
782 u32 first_pm_pbl_idx;
783 struct irdma_sc_vsi *vsi;
784 struct irdma_sc_cq **reg_cq;
785 };
786
787 struct irdma_aeq_init_info {
788 u64 aeq_elem_pa;
789 struct irdma_sc_dev *dev;
790 u32 *aeqe_base;
791 void *pbl_list;
792 u32 elem_cnt;
793 bool virtual_map:1;
794 u8 pbl_chunk_size;
795 u32 first_pm_pbl_idx;
796 u32 msix_idx;
797 };
798
799 struct irdma_ccq_init_info {
800 u64 cq_pa;
801 u64 shadow_area_pa;
802 struct irdma_sc_dev *dev;
803 struct irdma_cqe *cq_base;
804 __le64 *shadow_area;
805 void *pbl_list;
806 u32 num_elem;
807 u32 ceq_id;
808 u32 shadow_read_threshold;
809 bool ceqe_mask:1;
810 bool ceq_id_valid:1;
811 bool avoid_mem_cflct:1;
812 bool virtual_map:1;
813 bool tph_en:1;
814 u8 tph_val;
815 u8 pbl_chunk_size;
816 u32 first_pm_pbl_idx;
817 struct irdma_sc_vsi *vsi;
818 };
819
820 struct irdma_udp_offload_info {
821 bool ipv4:1;
822 bool insert_vlan_tag:1;
823 u8 ttl;
824 u8 tos;
825 u16 src_port;
826 u16 dst_port;
827 u32 dest_ip_addr[4];
828 u32 snd_mss;
829 u16 vlan_tag;
830 u16 arp_idx;
831 u32 flow_label;
832 u8 udp_state;
833 u32 psn_nxt;
834 u32 lsn;
835 u32 epsn;
836 u32 psn_max;
837 u32 psn_una;
838 u32 local_ipaddr[4];
839 u32 cwnd;
840 u8 rexmit_thresh;
841 u8 rnr_nak_thresh;
842 };
843
844 struct irdma_roce_offload_info {
845 u16 p_key;
846 u32 err_rq_idx;
847 u32 qkey;
848 u32 dest_qp;
849 u8 roce_tver;
850 u8 ack_credits;
851 u8 err_rq_idx_valid;
852 u32 pd_id;
853 u16 ord_size;
854 u16 ird_size;
855 bool is_qp1:1;
856 bool udprivcq_en:1;
857 bool dcqcn_en:1;
858 bool rcv_no_icrc:1;
859 bool wr_rdresp_en:1;
860 bool fast_reg_en:1;
861 bool priv_mode_en:1;
862 bool rd_en:1;
863 bool timely_en:1;
864 bool dctcp_en:1;
865 bool fw_cc_enable:1;
866 bool use_stats_inst:1;
867 u16 t_high;
868 u16 t_low;
869 u8 last_byte_sent;
870 u8 mac_addr[ETHER_ADDR_LEN];
871 u8 rtomin;
872 };
873
874 struct irdma_iwarp_offload_info {
875 u16 rcv_mark_offset;
876 u16 snd_mark_offset;
877 u8 ddp_ver;
878 u8 rdmap_ver;
879 u8 iwarp_mode;
880 u32 err_rq_idx;
881 u32 pd_id;
882 u16 ord_size;
883 u16 ird_size;
884 bool ib_rd_en:1;
885 bool align_hdrs:1;
886 bool rcv_no_mpa_crc:1;
887 bool err_rq_idx_valid:1;
888 bool snd_mark_en:1;
889 bool rcv_mark_en:1;
890 bool wr_rdresp_en:1;
891 bool fast_reg_en:1;
892 bool priv_mode_en:1;
893 bool rd_en:1;
894 bool timely_en:1;
895 bool use_stats_inst:1;
896 bool ecn_en:1;
897 bool dctcp_en:1;
898 u16 t_high;
899 u16 t_low;
900 u8 last_byte_sent;
901 u8 mac_addr[ETHER_ADDR_LEN];
902 u8 rtomin;
903 };
904
905 struct irdma_tcp_offload_info {
906 bool ipv4:1;
907 bool no_nagle:1;
908 bool insert_vlan_tag:1;
909 bool time_stamp:1;
910 bool drop_ooo_seg:1;
911 bool avoid_stretch_ack:1;
912 bool wscale:1;
913 bool ignore_tcp_opt:1;
914 bool ignore_tcp_uns_opt:1;
915 u8 cwnd_inc_limit;
916 u8 dup_ack_thresh;
917 u8 ttl;
918 u8 src_mac_addr_idx;
919 u8 tos;
920 u16 src_port;
921 u16 dst_port;
922 u32 dest_ip_addr[4];
923 //u32 dest_ip_addr0;
924 //u32 dest_ip_addr1;
925 //u32 dest_ip_addr2;
926 //u32 dest_ip_addr3;
927 u32 snd_mss;
928 u16 syn_rst_handling;
929 u16 vlan_tag;
930 u16 arp_idx;
931 u32 flow_label;
932 u8 tcp_state;
933 u8 snd_wscale;
934 u8 rcv_wscale;
935 u32 time_stamp_recent;
936 u32 time_stamp_age;
937 u32 snd_nxt;
938 u32 snd_wnd;
939 u32 rcv_nxt;
940 u32 rcv_wnd;
941 u32 snd_max;
942 u32 snd_una;
943 u32 srtt;
944 u32 rtt_var;
945 u32 ss_thresh;
946 u32 cwnd;
947 u32 snd_wl1;
948 u32 snd_wl2;
949 u32 max_snd_window;
950 u8 rexmit_thresh;
951 u32 local_ipaddr[4];
952 };
953
954 struct irdma_qp_host_ctx_info {
955 u64 qp_compl_ctx;
956 union {
957 struct irdma_tcp_offload_info *tcp_info;
958 struct irdma_udp_offload_info *udp_info;
959 };
960 union {
961 struct irdma_iwarp_offload_info *iwarp_info;
962 struct irdma_roce_offload_info *roce_info;
963 };
964 u32 send_cq_num;
965 u32 rcv_cq_num;
966 u32 rem_endpoint_idx;
967 u16 stats_idx;
968 bool tcp_info_valid:1;
969 bool iwarp_info_valid:1;
970 bool stats_idx_valid:1;
971 u8 user_pri;
972 };
973
974 struct irdma_aeqe_info {
975 u64 compl_ctx;
976 u32 qp_cq_id;
977 u32 wqe_idx;
978 u16 ae_id;
979 u8 tcp_state;
980 u8 iwarp_state;
981 bool qp:1;
982 bool cq:1;
983 bool sq:1;
984 bool rq:1;
985 bool in_rdrsp_wr:1;
986 bool out_rdrsp:1;
987 bool aeqe_overflow:1;
988 /* This flag is used to determine if we should pass the rq tail
989 * in the QP context for FW/HW. It is set when ae_src is rq for GEN1/GEN2
990 * And additionally set for inbound atomic, read and write for GEN3
991 */
992 bool err_rq_idx_valid:1;
993 u8 q2_data_written;
994 u8 ae_src;
995 };
996
997 struct irdma_allocate_stag_info {
998 u64 total_len;
999 u64 first_pm_pbl_idx;
1000 u32 chunk_size;
1001 u32 stag_idx;
1002 u32 page_size;
1003 u32 pd_id;
1004 u16 access_rights;
1005 bool remote_access:1;
1006 bool use_hmc_fcn_index:1;
1007 bool use_pf_rid:1;
1008 bool all_memory:1;
1009 u16 hmc_fcn_index;
1010 };
1011
1012 struct irdma_mw_alloc_info {
1013 u32 mw_stag_index;
1014 u32 page_size;
1015 u32 pd_id;
1016 bool remote_access:1;
1017 bool mw_wide:1;
1018 bool mw1_bind_dont_vldt_key:1;
1019 };
1020
1021 struct irdma_reg_ns_stag_info {
1022 u64 reg_addr_pa;
1023 u64 va;
1024 u64 total_len;
1025 u32 page_size;
1026 u32 chunk_size;
1027 u32 first_pm_pbl_index;
1028 enum irdma_addressing_type addr_type;
1029 irdma_stag_index stag_idx;
1030 u16 access_rights;
1031 u32 pd_id;
1032 irdma_stag_key stag_key;
1033 bool use_hmc_fcn_index:1;
1034 u16 hmc_fcn_index;
1035 bool use_pf_rid:1;
1036 bool all_memory:1;
1037 };
1038
1039 struct irdma_fast_reg_stag_info {
1040 u64 wr_id;
1041 u64 reg_addr_pa;
1042 u64 fbo;
1043 void *va;
1044 u64 total_len;
1045 u32 page_size;
1046 u32 chunk_size;
1047 u32 first_pm_pbl_index;
1048 enum irdma_addressing_type addr_type;
1049 irdma_stag_index stag_idx;
1050 u16 access_rights;
1051 u32 pd_id;
1052 irdma_stag_key stag_key;
1053 bool local_fence:1;
1054 bool read_fence:1;
1055 bool signaled:1;
1056 bool push_wqe:1;
1057 bool use_hmc_fcn_index:1;
1058 u16 hmc_fcn_index;
1059 bool use_pf_rid:1;
1060 bool defer_flag:1;
1061 };
1062
1063 struct irdma_dealloc_stag_info {
1064 u32 stag_idx;
1065 u32 pd_id;
1066 bool mr:1;
1067 bool dealloc_pbl:1;
1068 };
1069
1070 struct irdma_register_shared_stag {
1071 u64 va;
1072 enum irdma_addressing_type addr_type;
1073 irdma_stag_index new_stag_idx;
1074 irdma_stag_index parent_stag_idx;
1075 u32 access_rights;
1076 u32 pd_id;
1077 u32 page_size;
1078 irdma_stag_key new_stag_key;
1079 };
1080
1081 struct irdma_qp_init_info {
1082 struct irdma_qp_uk_init_info qp_uk_init_info;
1083 struct irdma_sc_pd *pd;
1084 struct irdma_sc_vsi *vsi;
1085 __le64 *host_ctx;
1086 u8 *q2;
1087 u64 sq_pa;
1088 u64 rq_pa;
1089 u64 host_ctx_pa;
1090 u64 q2_pa;
1091 u64 shadow_area_pa;
1092 u8 sq_tph_val;
1093 u8 rq_tph_val;
1094 bool sq_tph_en:1;
1095 bool rq_tph_en:1;
1096 bool rcv_tph_en:1;
1097 bool xmit_tph_en:1;
1098 bool virtual_map:1;
1099 };
1100
1101 struct irdma_cq_init_info {
1102 struct irdma_sc_dev *dev;
1103 u64 cq_base_pa;
1104 u64 shadow_area_pa;
1105 u32 ceq_id;
1106 u32 shadow_read_threshold;
1107 u8 pbl_chunk_size;
1108 u32 first_pm_pbl_idx;
1109 bool virtual_map:1;
1110 bool ceqe_mask:1;
1111 bool ceq_id_valid:1;
1112 bool tph_en:1;
1113 u8 tph_val;
1114 u8 type;
1115 struct irdma_cq_uk_init_info cq_uk_init_info;
1116 struct irdma_sc_vsi *vsi;
1117 };
1118
1119 struct irdma_upload_context_info {
1120 u64 buf_pa;
1121 u32 qp_id;
1122 u8 qp_type;
1123 bool freeze_qp:1;
1124 bool raw_format:1;
1125 };
1126
1127 struct irdma_local_mac_entry_info {
1128 u8 mac_addr[6];
1129 u16 entry_idx;
1130 };
1131
1132 struct irdma_add_arp_cache_entry_info {
1133 u8 mac_addr[ETHER_ADDR_LEN];
1134 u32 reach_max;
1135 u16 arp_index;
1136 bool permanent:1;
1137 };
1138
1139 struct irdma_apbvt_info {
1140 u16 port;
1141 bool add:1;
1142 };
1143
1144 struct irdma_qhash_table_info {
1145 struct irdma_sc_vsi *vsi;
1146 enum irdma_quad_hash_manage_type manage;
1147 enum irdma_quad_entry_type entry_type;
1148 bool vlan_valid:1;
1149 bool ipv4_valid:1;
1150 u8 mac_addr[ETHER_ADDR_LEN];
1151 u16 vlan_id;
1152 u8 user_pri;
1153 u32 qp_num;
1154 u32 dest_ip[4];
1155 u32 src_ip[4];
1156 u16 dest_port;
1157 u16 src_port;
1158 };
1159
1160 struct irdma_cqp_manage_push_page_info {
1161 u32 push_idx;
1162 u16 qs_handle;
1163 u8 free_page;
1164 u8 push_page_type;
1165 };
1166
1167 struct irdma_qp_flush_info {
1168 u16 sq_minor_code;
1169 u16 sq_major_code;
1170 u16 rq_minor_code;
1171 u16 rq_major_code;
1172 u16 ae_code;
1173 u8 ae_src;
1174 bool sq:1;
1175 bool rq:1;
1176 bool userflushcode:1;
1177 bool generate_ae:1;
1178 };
1179
1180 struct irdma_gen_ae_info {
1181 u16 ae_code;
1182 u8 ae_src;
1183 };
1184
1185 struct irdma_cqp_timeout {
1186 u64 compl_cqp_cmds;
1187 u32 count;
1188 };
1189
1190 struct irdma_irq_ops {
1191 void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1192 void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1193 bool enable);
1194 void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1195 void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1196 };
1197
1198 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1199 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1200 bool check_overflow, bool post_sq);
1201 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
1202 int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1203 struct irdma_ccq_cqe_info *info);
1204 int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1205 struct irdma_ccq_init_info *info);
1206
1207 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1208 int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1209
1210 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
1211 int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1212 struct irdma_ceq_init_info *info);
1213 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1214 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
1215
1216 int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1217 struct irdma_aeq_init_info *info);
1218 int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1219 struct irdma_aeqe_info *info);
1220 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1221
1222 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1223 int abi_ver);
1224 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1225 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1226 struct irdma_sc_dev *dev);
1227 int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
1228 int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp);
1229 int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1230 struct irdma_cqp_init_info *info);
1231 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1232 int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1233 struct irdma_ccq_cqe_info *cmpl_info);
1234 int irdma_sc_qp_create(struct irdma_sc_qp *qp,
1235 struct irdma_create_qp_info *info, u64 scratch,
1236 bool post_sq);
1237 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
1238 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
1239 int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1240 struct irdma_qp_flush_info *info, u64 scratch,
1241 bool post_sq);
1242 int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
1243 int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1244 struct irdma_modify_qp_info *info, u64 scratch,
1245 bool post_sq);
1246 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1247 irdma_stag stag);
1248 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1249 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1250 struct irdma_qp_host_ctx_info *info);
1251 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1252 struct irdma_qp_host_ctx_info *info);
1253 int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
1254 int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
1255 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1256 int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq);
1257 int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
1258 u16 hmc_fn_id, bool post_sq,
1259 bool poll_registers);
1260
1261 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1262 struct cqp_info {
1263 union {
1264 struct {
1265 struct irdma_sc_qp *qp;
1266 struct irdma_create_qp_info info;
1267 u64 scratch;
1268 } qp_create;
1269
1270 struct {
1271 struct irdma_sc_qp *qp;
1272 struct irdma_modify_qp_info info;
1273 u64 scratch;
1274 } qp_modify;
1275
1276 struct {
1277 struct irdma_sc_qp *qp;
1278 u64 scratch;
1279 bool remove_hash_idx;
1280 bool ignore_mw_bnd;
1281 } qp_destroy;
1282
1283 struct {
1284 struct irdma_sc_cq *cq;
1285 u64 scratch;
1286 bool check_overflow;
1287 } cq_create;
1288
1289 struct {
1290 struct irdma_sc_cq *cq;
1291 struct irdma_modify_cq_info info;
1292 u64 scratch;
1293 } cq_modify;
1294
1295 struct {
1296 struct irdma_sc_cq *cq;
1297 u64 scratch;
1298 } cq_destroy;
1299
1300 struct {
1301 struct irdma_sc_dev *dev;
1302 struct irdma_allocate_stag_info info;
1303 u64 scratch;
1304 } alloc_stag;
1305
1306 struct {
1307 struct irdma_sc_dev *dev;
1308 struct irdma_mw_alloc_info info;
1309 u64 scratch;
1310 } mw_alloc;
1311
1312 struct {
1313 struct irdma_sc_dev *dev;
1314 struct irdma_reg_ns_stag_info info;
1315 u64 scratch;
1316 } mr_reg_non_shared;
1317
1318 struct {
1319 struct irdma_sc_dev *dev;
1320 struct irdma_dealloc_stag_info info;
1321 u64 scratch;
1322 } dealloc_stag;
1323
1324 struct {
1325 struct irdma_sc_cqp *cqp;
1326 struct irdma_add_arp_cache_entry_info info;
1327 u64 scratch;
1328 } add_arp_cache_entry;
1329
1330 struct {
1331 struct irdma_sc_cqp *cqp;
1332 u64 scratch;
1333 u16 arp_index;
1334 } del_arp_cache_entry;
1335
1336 struct {
1337 struct irdma_sc_cqp *cqp;
1338 struct irdma_local_mac_entry_info info;
1339 u64 scratch;
1340 } add_local_mac_entry;
1341
1342 struct {
1343 struct irdma_sc_cqp *cqp;
1344 u64 scratch;
1345 u8 entry_idx;
1346 u8 ignore_ref_count;
1347 } del_local_mac_entry;
1348
1349 struct {
1350 struct irdma_sc_cqp *cqp;
1351 u64 scratch;
1352 } alloc_local_mac_entry;
1353
1354 struct {
1355 struct irdma_sc_cqp *cqp;
1356 struct irdma_cqp_manage_push_page_info info;
1357 u64 scratch;
1358 } manage_push_page;
1359
1360 struct {
1361 struct irdma_sc_dev *dev;
1362 struct irdma_upload_context_info info;
1363 u64 scratch;
1364 } qp_upload_context;
1365
1366 struct {
1367 struct irdma_sc_dev *dev;
1368 struct irdma_hmc_fcn_info info;
1369 u64 scratch;
1370 } manage_hmc_pm;
1371
1372 struct {
1373 struct irdma_sc_ceq *ceq;
1374 u64 scratch;
1375 } ceq_create;
1376
1377 struct {
1378 struct irdma_sc_ceq *ceq;
1379 u64 scratch;
1380 } ceq_destroy;
1381
1382 struct {
1383 struct irdma_sc_aeq *aeq;
1384 u64 scratch;
1385 } aeq_create;
1386
1387 struct {
1388 struct irdma_sc_aeq *aeq;
1389 u64 scratch;
1390 } aeq_destroy;
1391
1392 struct {
1393 struct irdma_sc_qp *qp;
1394 struct irdma_qp_flush_info info;
1395 u64 scratch;
1396 } qp_flush_wqes;
1397
1398 struct {
1399 struct irdma_sc_qp *qp;
1400 struct irdma_gen_ae_info info;
1401 u64 scratch;
1402 } gen_ae;
1403
1404 struct {
1405 struct irdma_sc_cqp *cqp;
1406 void *fpm_val_va;
1407 u64 fpm_val_pa;
1408 u16 hmc_fn_id;
1409 u64 scratch;
1410 } query_fpm_val;
1411
1412 struct {
1413 struct irdma_sc_cqp *cqp;
1414 void *fpm_val_va;
1415 u64 fpm_val_pa;
1416 u16 hmc_fn_id;
1417 u64 scratch;
1418 } commit_fpm_val;
1419
1420 struct {
1421 struct irdma_sc_cqp *cqp;
1422 struct irdma_apbvt_info info;
1423 u64 scratch;
1424 } manage_apbvt_entry;
1425
1426 struct {
1427 struct irdma_sc_cqp *cqp;
1428 struct irdma_qhash_table_info info;
1429 u64 scratch;
1430 } manage_qhash_table_entry;
1431
1432 struct {
1433 struct irdma_sc_dev *dev;
1434 struct irdma_update_sds_info info;
1435 u64 scratch;
1436 } update_pe_sds;
1437
1438 struct {
1439 struct irdma_sc_cqp *cqp;
1440 struct irdma_sc_qp *qp;
1441 u64 scratch;
1442 } suspend_resume;
1443
1444 struct {
1445 struct irdma_sc_cqp *cqp;
1446 struct irdma_ah_info info;
1447 u64 scratch;
1448 } ah_create;
1449
1450 struct {
1451 struct irdma_sc_cqp *cqp;
1452 struct irdma_ah_info info;
1453 u64 scratch;
1454 } ah_destroy;
1455
1456 struct {
1457 struct irdma_sc_cqp *cqp;
1458 struct irdma_mcast_grp_info info;
1459 u64 scratch;
1460 } mc_create;
1461
1462 struct {
1463 struct irdma_sc_cqp *cqp;
1464 struct irdma_mcast_grp_info info;
1465 u64 scratch;
1466 } mc_destroy;
1467
1468 struct {
1469 struct irdma_sc_cqp *cqp;
1470 struct irdma_mcast_grp_info info;
1471 u64 scratch;
1472 } mc_modify;
1473
1474 struct {
1475 struct irdma_sc_cqp *cqp;
1476 struct irdma_stats_inst_info info;
1477 u64 scratch;
1478 } stats_manage;
1479
1480 struct {
1481 struct irdma_sc_cqp *cqp;
1482 struct irdma_stats_gather_info info;
1483 u64 scratch;
1484 } stats_gather;
1485
1486 struct {
1487 struct irdma_sc_cqp *cqp;
1488 struct irdma_ws_node_info info;
1489 u64 scratch;
1490 } ws_node;
1491
1492 struct {
1493 struct irdma_sc_cqp *cqp;
1494 struct irdma_up_info info;
1495 u64 scratch;
1496 } up_map;
1497
1498 struct {
1499 struct irdma_sc_cqp *cqp;
1500 struct irdma_dma_mem query_buff_mem;
1501 u64 scratch;
1502 } query_rdma;
1503 } u;
1504 };
1505
1506 struct cqp_cmds_info {
1507 struct list_head cqp_cmd_entry;
1508 u8 cqp_cmd;
1509 u8 post_sq;
1510 struct cqp_info in;
1511 };
1512
1513 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1514 u32 *wqe_idx);
1515
1516 /**
1517 * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1518 * @cqp: struct for cqp hw
1519 * @scratch: private data for CQP WQE
1520 */
irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp * cqp,u64 scratch)1521 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1522 {
1523 u32 wqe_idx;
1524
1525 return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1526 }
1527 #endif /* IRDMA_TYPE_H */
1528