1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #ifndef MLX4_H
35 #define MLX4_H
36
37 #include <infiniband/endian.h>
38 #include <stddef.h>
39
40 #include <infiniband/driver.h>
41 #include <infiniband/udma_barrier.h>
42 #include <infiniband/verbs.h>
43
44 #define MLX4_PORTS_NUM 2
45
46 #define PFX "mlx4: "
47
48 enum {
49 MLX4_STAT_RATE_OFFSET = 5
50 };
51
52 enum {
53 MLX4_QP_TABLE_BITS = 8,
54 MLX4_QP_TABLE_SIZE = 1 << MLX4_QP_TABLE_BITS,
55 MLX4_QP_TABLE_MASK = MLX4_QP_TABLE_SIZE - 1
56 };
57
58 #define MLX4_REMOTE_SRQN_FLAGS(wr) htobe32(wr->qp_type.xrc.remote_srqn << 8)
59
60 enum {
61 MLX4_XSRQ_TABLE_BITS = 8,
62 MLX4_XSRQ_TABLE_SIZE = 1 << MLX4_XSRQ_TABLE_BITS,
63 MLX4_XSRQ_TABLE_MASK = MLX4_XSRQ_TABLE_SIZE - 1
64 };
65
66 struct mlx4_xsrq_table {
67 struct {
68 struct mlx4_srq **table;
69 int refcnt;
70 } xsrq_table[MLX4_XSRQ_TABLE_SIZE];
71
72 pthread_mutex_t mutex;
73 int num_xsrq;
74 int shift;
75 int mask;
76 };
77
78 enum {
79 MLX4_XRC_QPN_BIT = (1 << 23)
80 };
81
82 enum mlx4_db_type {
83 MLX4_DB_TYPE_CQ,
84 MLX4_DB_TYPE_RQ,
85 MLX4_NUM_DB_TYPE
86 };
87
88 enum {
89 MLX4_OPCODE_NOP = 0x00,
90 MLX4_OPCODE_SEND_INVAL = 0x01,
91 MLX4_OPCODE_RDMA_WRITE = 0x08,
92 MLX4_OPCODE_RDMA_WRITE_IMM = 0x09,
93 MLX4_OPCODE_SEND = 0x0a,
94 MLX4_OPCODE_SEND_IMM = 0x0b,
95 MLX4_OPCODE_LSO = 0x0e,
96 MLX4_OPCODE_RDMA_READ = 0x10,
97 MLX4_OPCODE_ATOMIC_CS = 0x11,
98 MLX4_OPCODE_ATOMIC_FA = 0x12,
99 MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14,
100 MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15,
101 MLX4_OPCODE_BIND_MW = 0x18,
102 MLX4_OPCODE_FMR = 0x19,
103 MLX4_OPCODE_LOCAL_INVAL = 0x1b,
104 MLX4_OPCODE_CONFIG_CMD = 0x1f,
105
106 MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
107 MLX4_RECV_OPCODE_SEND = 0x01,
108 MLX4_RECV_OPCODE_SEND_IMM = 0x02,
109 MLX4_RECV_OPCODE_SEND_INVAL = 0x03,
110
111 MLX4_CQE_OPCODE_ERROR = 0x1e,
112 MLX4_CQE_OPCODE_RESIZE = 0x16,
113 };
114
115 struct mlx4_device {
116 struct verbs_device verbs_dev;
117 int page_size;
118 int abi_version;
119 };
120
121 struct mlx4_db_page;
122
123 struct mlx4_context {
124 struct ibv_context ibv_ctx;
125
126 void *uar;
127 pthread_spinlock_t uar_lock;
128
129 void *bf_page;
130 int bf_buf_size;
131 int bf_offset;
132 pthread_spinlock_t bf_lock;
133
134 struct {
135 struct mlx4_qp **table;
136 int refcnt;
137 } qp_table[MLX4_QP_TABLE_SIZE];
138 pthread_mutex_t qp_table_mutex;
139 int num_qps;
140 int qp_table_shift;
141 int qp_table_mask;
142 int max_qp_wr;
143 int max_sge;
144
145 struct mlx4_db_page *db_list[MLX4_NUM_DB_TYPE];
146 pthread_mutex_t db_list_mutex;
147 int cqe_size;
148 struct mlx4_xsrq_table xsrq_table;
149 struct {
150 uint8_t valid;
151 uint8_t link_layer;
152 enum ibv_port_cap_flags caps;
153 } port_query_cache[MLX4_PORTS_NUM];
154 struct {
155 uint64_t offset;
156 uint8_t offset_valid;
157 } core_clock;
158 void *hca_core_clock;
159 };
160
161 struct mlx4_buf {
162 void *buf;
163 size_t length;
164 };
165
166 struct mlx4_pd {
167 struct ibv_pd ibv_pd;
168 uint32_t pdn;
169 };
170
171 enum {
172 MLX4_CQ_FLAGS_RX_CSUM_VALID = 1 << 0,
173 MLX4_CQ_FLAGS_EXTENDED = 1 << 1,
174 MLX4_CQ_FLAGS_SINGLE_THREADED = 1 << 2,
175 };
176
177 struct mlx4_cq {
178 struct ibv_cq_ex ibv_cq;
179 struct mlx4_buf buf;
180 struct mlx4_buf resize_buf;
181 pthread_spinlock_t lock;
182 uint32_t cqn;
183 uint32_t cons_index;
184 uint32_t *set_ci_db;
185 uint32_t *arm_db;
186 int arm_sn;
187 int cqe_size;
188 struct mlx4_qp *cur_qp;
189 struct mlx4_cqe *cqe;
190 uint32_t flags;
191 };
192
193 struct mlx4_srq {
194 struct verbs_srq verbs_srq;
195 struct mlx4_buf buf;
196 pthread_spinlock_t lock;
197 uint64_t *wrid;
198 uint32_t srqn;
199 int max;
200 int max_gs;
201 int wqe_shift;
202 int head;
203 int tail;
204 uint32_t *db;
205 uint16_t counter;
206 uint8_t ext_srq;
207 };
208
209 struct mlx4_wq {
210 uint64_t *wrid;
211 pthread_spinlock_t lock;
212 int wqe_cnt;
213 int max_post;
214 unsigned head;
215 unsigned tail;
216 int max_gs;
217 int wqe_shift;
218 int offset;
219 };
220
221 struct mlx4_qp {
222 struct verbs_qp verbs_qp;
223 struct mlx4_buf buf;
224 int max_inline_data;
225 int buf_size;
226
227 uint32_t doorbell_qpn;
228 uint32_t sq_signal_bits;
229 int sq_spare_wqes;
230 struct mlx4_wq sq;
231
232 uint32_t *db;
233 struct mlx4_wq rq;
234
235 uint8_t link_layer;
236 uint32_t qp_cap_cache;
237 };
238
239 struct mlx4_av {
240 uint32_t port_pd;
241 uint8_t reserved1;
242 uint8_t g_slid;
243 uint16_t dlid;
244 uint8_t reserved2;
245 uint8_t gid_index;
246 uint8_t stat_rate;
247 uint8_t hop_limit;
248 uint32_t sl_tclass_flowlabel;
249 uint8_t dgid[16];
250 };
251
252 struct mlx4_ah {
253 struct ibv_ah ibv_ah;
254 struct mlx4_av av;
255 uint16_t vlan;
256 uint8_t mac[6];
257 };
258
259 enum {
260 MLX4_CSUM_SUPPORT_UD_OVER_IB = (1 << 0),
261 MLX4_CSUM_SUPPORT_RAW_OVER_ETH = (1 << 1),
262 /* Only report rx checksum when the validation is valid */
263 MLX4_RX_CSUM_VALID = (1 << 16),
264 };
265
266 enum mlx4_cqe_status {
267 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK = (1 << 2),
268 MLX4_CQE_STATUS_IPV4_PKT = (1 << 22),
269 MLX4_CQE_STATUS_IP_HDR_CSUM_OK = (1 << 28),
270 MLX4_CQE_STATUS_IPV4_CSUM_OK = MLX4_CQE_STATUS_IPV4_PKT |
271 MLX4_CQE_STATUS_IP_HDR_CSUM_OK |
272 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK
273 };
274
275 struct mlx4_cqe {
276 uint32_t vlan_my_qpn;
277 uint32_t immed_rss_invalid;
278 uint32_t g_mlpath_rqpn;
279 union {
280 struct {
281 uint16_t sl_vid;
282 uint16_t rlid;
283 };
284 uint32_t ts_47_16;
285 };
286 uint32_t status;
287 uint32_t byte_cnt;
288 uint16_t wqe_index;
289 uint16_t checksum;
290 uint8_t reserved3;
291 uint8_t ts_15_8;
292 uint8_t ts_7_0;
293 uint8_t owner_sr_opcode;
294 };
295
align(unsigned long val,unsigned long align)296 static inline unsigned long align(unsigned long val, unsigned long align)
297 {
298 return (val + align - 1) & ~(align - 1);
299 }
300 int align_queue_size(int req);
301
302 #define to_mxxx(xxx, type) \
303 ((struct mlx4_##type *) \
304 ((void *) ib##xxx - offsetof(struct mlx4_##type, ibv_##xxx)))
305
to_mdev(struct ibv_device * ibdev)306 static inline struct mlx4_device *to_mdev(struct ibv_device *ibdev)
307 {
308 /* ibv_device is first field of verbs_device
309 * see try_driver() in libibverbs.
310 */
311 return container_of(ibdev, struct mlx4_device, verbs_dev);
312 }
313
to_mctx(struct ibv_context * ibctx)314 static inline struct mlx4_context *to_mctx(struct ibv_context *ibctx)
315 {
316 return to_mxxx(ctx, context);
317 }
318
to_mpd(struct ibv_pd * ibpd)319 static inline struct mlx4_pd *to_mpd(struct ibv_pd *ibpd)
320 {
321 return to_mxxx(pd, pd);
322 }
323
to_mcq(struct ibv_cq * ibcq)324 static inline struct mlx4_cq *to_mcq(struct ibv_cq *ibcq)
325 {
326 return to_mxxx(cq, cq);
327 }
328
to_msrq(struct ibv_srq * ibsrq)329 static inline struct mlx4_srq *to_msrq(struct ibv_srq *ibsrq)
330 {
331 return container_of(container_of(ibsrq, struct verbs_srq, srq),
332 struct mlx4_srq, verbs_srq);
333 }
334
to_mqp(struct ibv_qp * ibqp)335 static inline struct mlx4_qp *to_mqp(struct ibv_qp *ibqp)
336 {
337 return container_of(container_of(ibqp, struct verbs_qp, qp),
338 struct mlx4_qp, verbs_qp);
339 }
340
to_mah(struct ibv_ah * ibah)341 static inline struct mlx4_ah *to_mah(struct ibv_ah *ibah)
342 {
343 return to_mxxx(ah, ah);
344 }
345
mlx4_update_cons_index(struct mlx4_cq * cq)346 static inline void mlx4_update_cons_index(struct mlx4_cq *cq)
347 {
348 *cq->set_ci_db = htobe32(cq->cons_index & 0xffffff);
349 }
350
351 int mlx4_alloc_buf(struct mlx4_buf *buf, size_t size, int page_size);
352 void mlx4_free_buf(struct mlx4_buf *buf);
353
354 uint32_t *mlx4_alloc_db(struct mlx4_context *context, enum mlx4_db_type type);
355 void mlx4_free_db(struct mlx4_context *context, enum mlx4_db_type type, uint32_t *db);
356
357 int mlx4_query_device(struct ibv_context *context,
358 struct ibv_device_attr *attr);
359 int mlx4_query_device_ex(struct ibv_context *context,
360 const struct ibv_query_device_ex_input *input,
361 struct ibv_device_attr_ex *attr,
362 size_t attr_size);
363 int mlx4_query_port(struct ibv_context *context, uint8_t port,
364 struct ibv_port_attr *attr);
365 int mlx4_query_rt_values(struct ibv_context *context,
366 struct ibv_values_ex *values);
367 struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context);
368 int mlx4_free_pd(struct ibv_pd *pd);
369 struct ibv_xrcd *mlx4_open_xrcd(struct ibv_context *context,
370 struct ibv_xrcd_init_attr *attr);
371 int mlx4_close_xrcd(struct ibv_xrcd *xrcd);
372
373 struct ibv_mr *mlx4_reg_mr(struct ibv_pd *pd, void *addr,
374 size_t length, int access);
375 int mlx4_rereg_mr(struct ibv_mr *mr, int flags, struct ibv_pd *pd,
376 void *addr, size_t length, int access);
377 int mlx4_dereg_mr(struct ibv_mr *mr);
378
379 struct ibv_mw *mlx4_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type);
380 int mlx4_dealloc_mw(struct ibv_mw *mw);
381 int mlx4_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
382 struct ibv_mw_bind *mw_bind);
383
384 struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
385 struct ibv_comp_channel *channel,
386 int comp_vector);
387 struct ibv_cq_ex *mlx4_create_cq_ex(struct ibv_context *context,
388 struct ibv_cq_init_attr_ex *cq_attr);
389 void mlx4_cq_fill_pfns(struct mlx4_cq *cq, const struct ibv_cq_init_attr_ex *cq_attr);
390 int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent,
391 int entry_size);
392 int mlx4_resize_cq(struct ibv_cq *cq, int cqe);
393 int mlx4_destroy_cq(struct ibv_cq *cq);
394 int mlx4_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
395 int mlx4_arm_cq(struct ibv_cq *cq, int solicited);
396 void mlx4_cq_event(struct ibv_cq *cq);
397 void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq);
398 void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq);
399 int mlx4_get_outstanding_cqes(struct mlx4_cq *cq);
400 void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int new_cqe);
401
402 struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
403 struct ibv_srq_init_attr *attr);
404 struct ibv_srq *mlx4_create_srq_ex(struct ibv_context *context,
405 struct ibv_srq_init_attr_ex *attr_ex);
406 struct ibv_srq *mlx4_create_xrc_srq(struct ibv_context *context,
407 struct ibv_srq_init_attr_ex *attr_ex);
408 int mlx4_modify_srq(struct ibv_srq *srq,
409 struct ibv_srq_attr *attr,
410 int mask);
411 int mlx4_query_srq(struct ibv_srq *srq,
412 struct ibv_srq_attr *attr);
413 int mlx4_destroy_srq(struct ibv_srq *srq);
414 int mlx4_destroy_xrc_srq(struct ibv_srq *srq);
415 int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
416 struct mlx4_srq *srq);
417 void mlx4_cleanup_xsrq_table(struct mlx4_xsrq_table *xsrq_table);
418 int mlx4_init_xsrq_table(struct mlx4_xsrq_table *xsrq_table, int size);
419 struct mlx4_srq *mlx4_find_xsrq(struct mlx4_xsrq_table *xsrq_table, uint32_t srqn);
420 int mlx4_store_xsrq(struct mlx4_xsrq_table *xsrq_table, uint32_t srqn,
421 struct mlx4_srq *srq);
422 void mlx4_clear_xsrq(struct mlx4_xsrq_table *xsrq_table, uint32_t srqn);
423 void mlx4_free_srq_wqe(struct mlx4_srq *srq, int ind);
424 int mlx4_post_srq_recv(struct ibv_srq *ibsrq,
425 struct ibv_recv_wr *wr,
426 struct ibv_recv_wr **bad_wr);
427
428 struct ibv_qp *mlx4_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr);
429 struct ibv_qp *mlx4_create_qp_ex(struct ibv_context *context,
430 struct ibv_qp_init_attr_ex *attr);
431 struct ibv_qp *mlx4_open_qp(struct ibv_context *context, struct ibv_qp_open_attr *attr);
432 int mlx4_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
433 int attr_mask,
434 struct ibv_qp_init_attr *init_attr);
435 int mlx4_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
436 int attr_mask);
437 int mlx4_destroy_qp(struct ibv_qp *qp);
438 void mlx4_init_qp_indices(struct mlx4_qp *qp);
439 void mlx4_qp_init_sq_ownership(struct mlx4_qp *qp);
440 int mlx4_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
441 struct ibv_send_wr **bad_wr);
442 int mlx4_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
443 struct ibv_recv_wr **bad_wr);
444 void mlx4_calc_sq_wqe_size(struct ibv_qp_cap *cap, enum ibv_qp_type type,
445 struct mlx4_qp *qp);
446 int mlx4_alloc_qp_buf(struct ibv_context *context, struct ibv_qp_cap *cap,
447 enum ibv_qp_type type, struct mlx4_qp *qp);
448 void mlx4_set_sq_sizes(struct mlx4_qp *qp, struct ibv_qp_cap *cap,
449 enum ibv_qp_type type);
450 struct mlx4_qp *mlx4_find_qp(struct mlx4_context *ctx, uint32_t qpn);
451 int mlx4_store_qp(struct mlx4_context *ctx, uint32_t qpn, struct mlx4_qp *qp);
452 void mlx4_clear_qp(struct mlx4_context *ctx, uint32_t qpn);
453 struct ibv_ah *mlx4_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);
454 int mlx4_destroy_ah(struct ibv_ah *ah);
455 int mlx4_alloc_av(struct mlx4_pd *pd, struct ibv_ah_attr *attr,
456 struct mlx4_ah *ah);
457 void mlx4_free_av(struct mlx4_ah *ah);
458
459 #endif /* MLX4_H */
460