1 /*
2 * Copyright (c) 2018-2019 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #ifndef __ECORE_RDMA_H__
29 #define __ECORE_RDMA_H__
30
31 #include "ecore_status.h"
32 #include "ecore.h"
33 #include "ecore_hsi_common.h"
34 #include "ecore_proto_if.h"
35 #include "ecore_rdma_api.h"
36 #include "ecore_dev_api.h"
37 #include "ecore_roce.h"
38 #include "ecore_iwarp.h"
39
40 /* Constants */
41
42 /* HW/FW RoCE Limitations (internal. For external see ecore_rdma_api.h) */
43 #define ECORE_RDMA_MAX_FMR (RDMA_MAX_TIDS) /* 2^17 - 1 */
44 #define ECORE_RDMA_MAX_P_KEY (1)
45 #define ECORE_RDMA_MAX_WQE (0x7FFF) /* 2^15 -1 */
46 #define ECORE_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF) /* 2^15 -1 */
47 #define ECORE_RDMA_PAGE_SIZE_CAPS (0xFFFFF000) /* TODO: > 4k?! */
48 #define ECORE_RDMA_ACK_DELAY (15) /* 131 milliseconds */
49 #define ECORE_RDMA_MAX_MR_SIZE (0x10000000000ULL) /* 2^40 */
50 #define ECORE_RDMA_MAX_CQS (RDMA_MAX_CQS) /* 64k */
51 #define ECORE_RDMA_MAX_MRS (RDMA_MAX_TIDS) /* 2^17 - 1 */
52 /* Add 1 for header element */
53 #define ECORE_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
54 #define ECORE_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
55 #define ECORE_RDMA_SRQ_WQE_ELEM_SIZE (16)
56 #define ECORE_RDMA_MAX_SRQS (32 * 1024) /* 32k */
57
58 /* Configurable */
59 /* Max CQE is derived from u16/32 size, halved and decremented by 1 to handle
60 * wrap properly and then decremented by 1 again. The latter decrement comes
61 * from a requirement to create a chain that is bigger than what the user
62 * requested by one:
63 * The CQE size is 32 bytes but the FW writes in chunks of 64
64 * bytes, for performance purposes. Allocating an extra entry and telling the
65 * FW we have less prevents overwriting the first entry in case of a wrap i.e.
66 * when the FW writes the last entry and the application hasn't read the first
67 * one.
68 */
69 #define ECORE_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
70 #define ECORE_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
71
72 #define ECORE_RDMA_MAX_XRC_SRQS (RDMA_MAX_XRC_SRQS)
73
74 /* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
75 * SRQs is much smaller so there's no need to have that many domains.
76 */
77 #define ECORE_RDMA_MAX_XRCDS (OSAL_ROUNDUP_POW_OF_TWO(RDMA_MAX_XRC_SRQS))
78
79 #define IS_IWARP(_p_hwfn) (_p_hwfn->p_rdma_info->proto == PROTOCOLID_IWARP)
80 #define IS_ROCE(_p_hwfn) (_p_hwfn->p_rdma_info->proto == PROTOCOLID_ROCE)
81
82 enum ecore_rdma_toggle_bit {
83 ECORE_RDMA_TOGGLE_BIT_CLEAR = 0,
84 ECORE_RDMA_TOGGLE_BIT_SET = 1
85 };
86
87 /* @@@TBD Currently we support only affilited events
88 * enum ecore_rdma_unaffiliated_event_code {
89 * ECORE_RDMA_PORT_ACTIVE, // Link Up
90 * ECORE_RDMA_PORT_CHANGED, // SGID table has changed
91 * ECORE_RDMA_LOCAL_CATASTROPHIC_ERR, // Fatal device error
92 * ECORE_RDMA_PORT_ERR, // Link down
93 * };
94 */
95
96 #define QEDR_MAX_BMAP_NAME (10)
97 struct ecore_bmap {
98 u32 max_count;
99 unsigned long *bitmap;
100 char name[QEDR_MAX_BMAP_NAME];
101 };
102
103 struct ecore_rdma_info {
104 osal_spinlock_t lock;
105
106 struct ecore_bmap cq_map;
107 struct ecore_bmap pd_map;
108 struct ecore_bmap xrcd_map;
109 struct ecore_bmap tid_map;
110 struct ecore_bmap srq_map;
111 struct ecore_bmap xrc_srq_map;
112 struct ecore_bmap qp_map;
113 struct ecore_bmap tcp_cid_map;
114 struct ecore_bmap cid_map;
115 struct ecore_bmap dpi_map;
116 struct ecore_bmap toggle_bits;
117 struct ecore_rdma_events events;
118 struct ecore_rdma_device *dev;
119 struct ecore_rdma_port *port;
120 u32 last_tid;
121 u8 num_cnqs;
122 struct rdma_sent_stats rdma_sent_pstats;
123 struct rdma_rcv_stats rdma_rcv_tstats;
124 u32 num_qps;
125 u32 num_mrs;
126 u32 num_srqs;
127 u16 srq_id_offset;
128 u16 queue_zone_base;
129 u16 max_queue_zones;
130
131 struct ecore_rdma_glob_cfg glob_cfg;
132
133 enum protocol_type proto;
134 struct ecore_roce_info roce;
135 #ifdef CONFIG_ECORE_IWARP
136 struct ecore_iwarp_info iwarp;
137 #endif
138 bool active;
139 int ref_cnt;
140 };
141
142 struct cq_prod {
143 u32 req;
144 u32 resp;
145 };
146
147 struct ecore_rdma_qp {
148 struct regpair qp_handle;
149 struct regpair qp_handle_async;
150 u32 qpid; /* iwarp: may differ from icid */
151 u16 icid;
152 u16 qp_idx;
153 enum ecore_roce_qp_state cur_state;
154 enum ecore_rdma_qp_type qp_type;
155 #ifdef CONFIG_ECORE_IWARP
156 enum ecore_iwarp_qp_state iwarp_state;
157 #endif
158 bool use_srq;
159 bool signal_all;
160 bool fmr_and_reserved_lkey;
161
162 bool incoming_rdma_read_en;
163 bool incoming_rdma_write_en;
164 bool incoming_atomic_en;
165 bool e2e_flow_control_en;
166
167 u16 pd; /* Protection domain */
168 u16 pkey; /* Primary P_key index */
169 u32 dest_qp;
170 u16 mtu;
171 u16 srq_id;
172 u8 traffic_class_tos; /* IPv6/GRH traffic class; IPv4 TOS */
173 u8 hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
174 u16 dpi;
175 u32 flow_label; /* ignored in IPv4 */
176 u16 vlan_id;
177 u32 ack_timeout;
178 u8 retry_cnt;
179 u8 rnr_retry_cnt;
180 u8 min_rnr_nak_timer;
181 bool sqd_async;
182 union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
183 union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
184 enum roce_mode roce_mode;
185 u16 udp_src_port; /* RoCEv2 only */
186 u8 stats_queue;
187
188 /* requeseter */
189 u8 max_rd_atomic_req;
190 u32 sq_psn;
191 u16 sq_cq_id; /* The cq to be associated with the send queue*/
192 u16 sq_num_pages;
193 dma_addr_t sq_pbl_ptr;
194 void *orq;
195 dma_addr_t orq_phys_addr;
196 u8 orq_num_pages;
197 bool req_offloaded;
198 bool has_req;
199
200 /* responder */
201 u8 max_rd_atomic_resp;
202 u32 rq_psn;
203 u16 rq_cq_id; /* The cq to be associated with the receive queue */
204 u16 rq_num_pages;
205 dma_addr_t rq_pbl_ptr;
206 void *irq;
207 dma_addr_t irq_phys_addr;
208 u8 irq_num_pages;
209 bool resp_offloaded;
210 bool has_resp;
211 struct cq_prod cq_prod;
212
213 u8 remote_mac_addr[6];
214 u8 local_mac_addr[6];
215
216 void *shared_queue;
217 dma_addr_t shared_queue_phys_addr;
218 #ifdef CONFIG_ECORE_IWARP
219 struct ecore_iwarp_ep *ep;
220 #endif
221
222 u16 xrcd_id;
223 };
224
ecore_rdma_is_xrc_qp(struct ecore_rdma_qp * qp)225 static OSAL_INLINE bool ecore_rdma_is_xrc_qp(struct ecore_rdma_qp *qp)
226 {
227 if ((qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_TGT) ||
228 (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_INI))
229 return 1;
230
231 return 0;
232 }
233
234 enum _ecore_status_t ecore_rdma_info_alloc(struct ecore_hwfn *p_hwfn);
235 void ecore_rdma_info_free(struct ecore_hwfn *p_hwfn);
236
237 enum _ecore_status_t
238 ecore_rdma_bmap_alloc(struct ecore_hwfn *p_hwfn,
239 struct ecore_bmap *bmap,
240 u32 max_count,
241 char *name);
242
243 void
244 ecore_rdma_bmap_free(struct ecore_hwfn *p_hwfn,
245 struct ecore_bmap *bmap,
246 bool check);
247
248 enum _ecore_status_t
249 ecore_rdma_bmap_alloc_id(struct ecore_hwfn *p_hwfn,
250 struct ecore_bmap *bmap,
251 u32 *id_num);
252
253 void
254 ecore_bmap_set_id(struct ecore_hwfn *p_hwfn,
255 struct ecore_bmap *bmap,
256 u32 id_num);
257
258 void
259 ecore_bmap_release_id(struct ecore_hwfn *p_hwfn,
260 struct ecore_bmap *bmap,
261 u32 id_num);
262
263 int
264 ecore_bmap_test_id(struct ecore_hwfn *p_hwfn,
265 struct ecore_bmap *bmap,
266 u32 id_num);
267
268 void
269 ecore_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_ecore_mac);
270
271 bool
272 ecore_rdma_allocated_qps(struct ecore_hwfn *p_hwfn);
273
274 u16 ecore_rdma_get_fw_srq_id(struct ecore_hwfn *p_hwfn, u16 id, bool is_xrc);
275
276 #endif /*__ECORE_RDMA_H__*/
277