xref: /linux/drivers/infiniband/hw/mthca/mthca_provider.h (revision 13abf8130139c2ccd4962a7e5a8902be5e6cb5a7)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: mthca_provider.h 1349 2004-12-16 21:09:43Z roland $
35  */
36 
37 #ifndef MTHCA_PROVIDER_H
38 #define MTHCA_PROVIDER_H
39 
40 #include <rdma/ib_verbs.h>
41 #include <rdma/ib_pack.h>
42 
43 #define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)
44 #define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)
45 #define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)
46 #define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)
47 #define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)
48 
49 struct mthca_buf_list {
50 	void *buf;
51 	DECLARE_PCI_UNMAP_ADDR(mapping)
52 };
53 
54 union mthca_buf {
55 	struct mthca_buf_list direct;
56 	struct mthca_buf_list *page_list;
57 };
58 
59 struct mthca_uar {
60 	unsigned long pfn;
61 	int           index;
62 };
63 
64 struct mthca_user_db_table;
65 
66 struct mthca_ucontext {
67 	struct ib_ucontext          ibucontext;
68 	struct mthca_uar            uar;
69 	struct mthca_user_db_table *db_tab;
70 };
71 
72 struct mthca_mtt;
73 
74 struct mthca_mr {
75 	struct ib_mr      ibmr;
76 	struct mthca_mtt *mtt;
77 };
78 
79 struct mthca_fmr {
80 	struct ib_fmr      ibmr;
81 	struct ib_fmr_attr attr;
82 	struct mthca_mtt  *mtt;
83 	int                maps;
84 	union {
85 		struct {
86 			struct mthca_mpt_entry __iomem *mpt;
87 			u64 __iomem *mtts;
88 		} tavor;
89 		struct {
90 			struct mthca_mpt_entry *mpt;
91 			__be64 *mtts;
92 		} arbel;
93 	} mem;
94 };
95 
96 struct mthca_pd {
97 	struct ib_pd    ibpd;
98 	u32             pd_num;
99 	atomic_t        sqp_count;
100 	struct mthca_mr ntmr;
101 	int             privileged;
102 };
103 
104 struct mthca_eq {
105 	struct mthca_dev      *dev;
106 	int                    eqn;
107 	u32                    eqn_mask;
108 	u32                    cons_index;
109 	u16                    msi_x_vector;
110 	u16                    msi_x_entry;
111 	int                    have_irq;
112 	int                    nent;
113 	struct mthca_buf_list *page_list;
114 	struct mthca_mr        mr;
115 };
116 
117 struct mthca_av;
118 
119 enum mthca_ah_type {
120 	MTHCA_AH_ON_HCA,
121 	MTHCA_AH_PCI_POOL,
122 	MTHCA_AH_KMALLOC
123 };
124 
125 struct mthca_ah {
126 	struct ib_ah       ibah;
127 	enum mthca_ah_type type;
128 	u32                key;
129 	struct mthca_av   *av;
130 	dma_addr_t         avdma;
131 };
132 
133 /*
134  * Quick description of our CQ/QP locking scheme:
135  *
136  * We have one global lock that protects dev->cq/qp_table.  Each
137  * struct mthca_cq/qp also has its own lock.  An individual qp lock
138  * may be taken inside of an individual cq lock.  Both cqs attached to
139  * a qp may be locked, with the send cq locked first.  No other
140  * nesting should be done.
141  *
142  * Each struct mthca_cq/qp also has an atomic_t ref count.  The
143  * pointer from the cq/qp_table to the struct counts as one reference.
144  * This reference also is good for access through the consumer API, so
145  * modifying the CQ/QP etc doesn't need to take another reference.
146  * Access because of a completion being polled does need a reference.
147  *
148  * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
149  * destroy function to sleep on.
150  *
151  * This means that access from the consumer API requires nothing but
152  * taking the struct's lock.
153  *
154  * Access because of a completion event should go as follows:
155  * - lock cq/qp_table and look up struct
156  * - increment ref count in struct
157  * - drop cq/qp_table lock
158  * - lock struct, do your thing, and unlock struct
159  * - decrement ref count; if zero, wake up waiters
160  *
161  * To destroy a CQ/QP, we can do the following:
162  * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
163  * - decrement ref count
164  * - wait_event until ref count is zero
165  *
166  * It is the consumer's responsibilty to make sure that no QP
167  * operations (WQE posting or state modification) are pending when the
168  * QP is destroyed.  Also, the consumer must make sure that calls to
169  * qp_modify are serialized.
170  *
171  * Possible optimizations (wait for profile data to see if/where we
172  * have locks bouncing between CPUs):
173  * - split cq/qp table lock into n separate (cache-aligned) locks,
174  *   indexed (say) by the page in the table
175  * - split QP struct lock into three (one for common info, one for the
176  *   send queue and one for the receive queue)
177  */
178 
179 struct mthca_cq {
180 	struct ib_cq           ibcq;
181 	spinlock_t             lock;
182 	atomic_t               refcount;
183 	int                    cqn;
184 	u32                    cons_index;
185 	int                    is_direct;
186 	int                    is_kernel;
187 
188 	/* Next fields are Arbel only */
189 	int                    set_ci_db_index;
190 	__be32                *set_ci_db;
191 	int                    arm_db_index;
192 	__be32                *arm_db;
193 	int                    arm_sn;
194 
195 	union mthca_buf        queue;
196 	struct mthca_mr        mr;
197 	wait_queue_head_t      wait;
198 };
199 
200 struct mthca_srq {
201 	struct ib_srq		ibsrq;
202 	spinlock_t		lock;
203 	atomic_t		refcount;
204 	int			srqn;
205 	int			max;
206 	int			max_gs;
207 	int			wqe_shift;
208 	int			first_free;
209 	int			last_free;
210 	u16			counter;  /* Arbel only */
211 	int			db_index; /* Arbel only */
212 	__be32		       *db;       /* Arbel only */
213 	void		       *last;
214 
215 	int			is_direct;
216 	u64		       *wrid;
217 	union mthca_buf		queue;
218 	struct mthca_mr		mr;
219 
220 	wait_queue_head_t	wait;
221 };
222 
223 struct mthca_wq {
224 	spinlock_t lock;
225 	int        max;
226 	unsigned   next_ind;
227 	unsigned   last_comp;
228 	unsigned   head;
229 	unsigned   tail;
230 	void      *last;
231 	int        max_gs;
232 	int        wqe_shift;
233 
234 	int        db_index;	/* Arbel only */
235 	__be32    *db;
236 };
237 
238 struct mthca_qp {
239 	struct ib_qp           ibqp;
240 	atomic_t               refcount;
241 	u32                    qpn;
242 	int                    is_direct;
243 	u8                     transport;
244 	u8                     state;
245 	u8                     atomic_rd_en;
246 	u8                     resp_depth;
247 
248 	struct mthca_mr        mr;
249 
250 	struct mthca_wq        rq;
251 	struct mthca_wq        sq;
252 	enum ib_sig_type       sq_policy;
253 	int                    send_wqe_offset;
254 
255 	u64                   *wrid;
256 	union mthca_buf	       queue;
257 
258 	wait_queue_head_t      wait;
259 };
260 
261 struct mthca_sqp {
262 	struct mthca_qp qp;
263 	int             port;
264 	int             pkey_index;
265 	u32             qkey;
266 	u32             send_psn;
267 	struct ib_ud_header ud_header;
268 	int             header_buf_size;
269 	void           *header_buf;
270 	dma_addr_t      header_dma;
271 };
272 
273 static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
274 {
275 	return container_of(ibucontext, struct mthca_ucontext, ibucontext);
276 }
277 
278 static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
279 {
280 	return container_of(ibmr, struct mthca_fmr, ibmr);
281 }
282 
283 static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
284 {
285 	return container_of(ibmr, struct mthca_mr, ibmr);
286 }
287 
288 static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
289 {
290 	return container_of(ibpd, struct mthca_pd, ibpd);
291 }
292 
293 static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
294 {
295 	return container_of(ibah, struct mthca_ah, ibah);
296 }
297 
298 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
299 {
300 	return container_of(ibcq, struct mthca_cq, ibcq);
301 }
302 
303 static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
304 {
305 	return container_of(ibsrq, struct mthca_srq, ibsrq);
306 }
307 
308 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
309 {
310 	return container_of(ibqp, struct mthca_qp, ibqp);
311 }
312 
313 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
314 {
315 	return container_of(qp, struct mthca_sqp, qp);
316 }
317 
318 #endif /* MTHCA_PROVIDER_H */
319