1*33ec1ccbSHans Petter Selasky /*
2*33ec1ccbSHans Petter Selasky * Copyright (c) 2004 Topspin Communications. All rights reserved.
3*33ec1ccbSHans Petter Selasky * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4*33ec1ccbSHans Petter Selasky * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5*33ec1ccbSHans Petter Selasky *
6*33ec1ccbSHans Petter Selasky * This software is available to you under a choice of one of two
7*33ec1ccbSHans Petter Selasky * licenses. You may choose to be licensed under the terms of the GNU
8*33ec1ccbSHans Petter Selasky * General Public License (GPL) Version 2, available from the file
9*33ec1ccbSHans Petter Selasky * COPYING in the main directory of this source tree, or the
10*33ec1ccbSHans Petter Selasky * OpenIB.org BSD license below:
11*33ec1ccbSHans Petter Selasky *
12*33ec1ccbSHans Petter Selasky * Redistribution and use in source and binary forms, with or
13*33ec1ccbSHans Petter Selasky * without modification, are permitted provided that the following
14*33ec1ccbSHans Petter Selasky * conditions are met:
15*33ec1ccbSHans Petter Selasky *
16*33ec1ccbSHans Petter Selasky * - Redistributions of source code must retain the above
17*33ec1ccbSHans Petter Selasky * copyright notice, this list of conditions and the following
18*33ec1ccbSHans Petter Selasky * disclaimer.
19*33ec1ccbSHans Petter Selasky *
20*33ec1ccbSHans Petter Selasky * - Redistributions in binary form must reproduce the above
21*33ec1ccbSHans Petter Selasky * copyright notice, this list of conditions and the following
22*33ec1ccbSHans Petter Selasky * disclaimer in the documentation and/or other materials
23*33ec1ccbSHans Petter Selasky * provided with the distribution.
24*33ec1ccbSHans Petter Selasky *
25*33ec1ccbSHans Petter Selasky * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*33ec1ccbSHans Petter Selasky * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*33ec1ccbSHans Petter Selasky * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*33ec1ccbSHans Petter Selasky * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*33ec1ccbSHans Petter Selasky * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*33ec1ccbSHans Petter Selasky * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*33ec1ccbSHans Petter Selasky * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*33ec1ccbSHans Petter Selasky * SOFTWARE.
33*33ec1ccbSHans Petter Selasky */
34*33ec1ccbSHans Petter Selasky
35*33ec1ccbSHans Petter Selasky #ifndef MTHCA_PROVIDER_H
36*33ec1ccbSHans Petter Selasky #define MTHCA_PROVIDER_H
37*33ec1ccbSHans Petter Selasky
38*33ec1ccbSHans Petter Selasky #include <rdma/ib_verbs.h>
39*33ec1ccbSHans Petter Selasky #include <rdma/ib_pack.h>
40*33ec1ccbSHans Petter Selasky
41*33ec1ccbSHans Petter Selasky #include <linux/wait.h>
42*33ec1ccbSHans Petter Selasky
43*33ec1ccbSHans Petter Selasky #define MTHCA_MPT_FLAG_ATOMIC (1 << 14)
44*33ec1ccbSHans Petter Selasky #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)
45*33ec1ccbSHans Petter Selasky #define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12)
46*33ec1ccbSHans Petter Selasky #define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11)
47*33ec1ccbSHans Petter Selasky #define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10)
48*33ec1ccbSHans Petter Selasky
49*33ec1ccbSHans Petter Selasky struct mthca_buf_list {
50*33ec1ccbSHans Petter Selasky void *buf;
51*33ec1ccbSHans Petter Selasky DEFINE_DMA_UNMAP_ADDR(mapping);
52*33ec1ccbSHans Petter Selasky };
53*33ec1ccbSHans Petter Selasky
54*33ec1ccbSHans Petter Selasky union mthca_buf {
55*33ec1ccbSHans Petter Selasky struct mthca_buf_list direct;
56*33ec1ccbSHans Petter Selasky struct mthca_buf_list *page_list;
57*33ec1ccbSHans Petter Selasky };
58*33ec1ccbSHans Petter Selasky
59*33ec1ccbSHans Petter Selasky struct mthca_uar {
60*33ec1ccbSHans Petter Selasky unsigned long pfn;
61*33ec1ccbSHans Petter Selasky int index;
62*33ec1ccbSHans Petter Selasky };
63*33ec1ccbSHans Petter Selasky
64*33ec1ccbSHans Petter Selasky struct mthca_user_db_table;
65*33ec1ccbSHans Petter Selasky
66*33ec1ccbSHans Petter Selasky struct mthca_ucontext {
67*33ec1ccbSHans Petter Selasky struct ib_ucontext ibucontext;
68*33ec1ccbSHans Petter Selasky struct mthca_uar uar;
69*33ec1ccbSHans Petter Selasky struct mthca_user_db_table *db_tab;
70*33ec1ccbSHans Petter Selasky int reg_mr_warned;
71*33ec1ccbSHans Petter Selasky };
72*33ec1ccbSHans Petter Selasky
73*33ec1ccbSHans Petter Selasky struct mthca_mtt;
74*33ec1ccbSHans Petter Selasky
75*33ec1ccbSHans Petter Selasky struct mthca_mr {
76*33ec1ccbSHans Petter Selasky struct ib_mr ibmr;
77*33ec1ccbSHans Petter Selasky struct ib_umem *umem;
78*33ec1ccbSHans Petter Selasky struct mthca_mtt *mtt;
79*33ec1ccbSHans Petter Selasky };
80*33ec1ccbSHans Petter Selasky
81*33ec1ccbSHans Petter Selasky struct mthca_fmr {
82*33ec1ccbSHans Petter Selasky struct ib_fmr ibmr;
83*33ec1ccbSHans Petter Selasky struct ib_fmr_attr attr;
84*33ec1ccbSHans Petter Selasky struct mthca_mtt *mtt;
85*33ec1ccbSHans Petter Selasky int maps;
86*33ec1ccbSHans Petter Selasky union {
87*33ec1ccbSHans Petter Selasky struct {
88*33ec1ccbSHans Petter Selasky struct mthca_mpt_entry __iomem *mpt;
89*33ec1ccbSHans Petter Selasky u64 __iomem *mtts;
90*33ec1ccbSHans Petter Selasky } tavor;
91*33ec1ccbSHans Petter Selasky struct {
92*33ec1ccbSHans Petter Selasky struct mthca_mpt_entry *mpt;
93*33ec1ccbSHans Petter Selasky __be64 *mtts;
94*33ec1ccbSHans Petter Selasky dma_addr_t dma_handle;
95*33ec1ccbSHans Petter Selasky } arbel;
96*33ec1ccbSHans Petter Selasky } mem;
97*33ec1ccbSHans Petter Selasky };
98*33ec1ccbSHans Petter Selasky
99*33ec1ccbSHans Petter Selasky struct mthca_pd {
100*33ec1ccbSHans Petter Selasky struct ib_pd ibpd;
101*33ec1ccbSHans Petter Selasky u32 pd_num;
102*33ec1ccbSHans Petter Selasky atomic_t sqp_count;
103*33ec1ccbSHans Petter Selasky struct mthca_mr ntmr;
104*33ec1ccbSHans Petter Selasky int privileged;
105*33ec1ccbSHans Petter Selasky };
106*33ec1ccbSHans Petter Selasky
107*33ec1ccbSHans Petter Selasky struct mthca_eq {
108*33ec1ccbSHans Petter Selasky struct mthca_dev *dev;
109*33ec1ccbSHans Petter Selasky int eqn;
110*33ec1ccbSHans Petter Selasky u32 eqn_mask;
111*33ec1ccbSHans Petter Selasky u32 cons_index;
112*33ec1ccbSHans Petter Selasky u16 msi_x_vector;
113*33ec1ccbSHans Petter Selasky u16 msi_x_entry;
114*33ec1ccbSHans Petter Selasky int have_irq;
115*33ec1ccbSHans Petter Selasky int nent;
116*33ec1ccbSHans Petter Selasky struct mthca_buf_list *page_list;
117*33ec1ccbSHans Petter Selasky struct mthca_mr mr;
118*33ec1ccbSHans Petter Selasky char irq_name[IB_DEVICE_NAME_MAX];
119*33ec1ccbSHans Petter Selasky };
120*33ec1ccbSHans Petter Selasky
121*33ec1ccbSHans Petter Selasky struct mthca_av;
122*33ec1ccbSHans Petter Selasky
123*33ec1ccbSHans Petter Selasky enum mthca_ah_type {
124*33ec1ccbSHans Petter Selasky MTHCA_AH_ON_HCA,
125*33ec1ccbSHans Petter Selasky MTHCA_AH_PCI_POOL,
126*33ec1ccbSHans Petter Selasky MTHCA_AH_KMALLOC
127*33ec1ccbSHans Petter Selasky };
128*33ec1ccbSHans Petter Selasky
129*33ec1ccbSHans Petter Selasky struct mthca_ah {
130*33ec1ccbSHans Petter Selasky struct ib_ah ibah;
131*33ec1ccbSHans Petter Selasky enum mthca_ah_type type;
132*33ec1ccbSHans Petter Selasky u32 key;
133*33ec1ccbSHans Petter Selasky struct mthca_av *av;
134*33ec1ccbSHans Petter Selasky dma_addr_t avdma;
135*33ec1ccbSHans Petter Selasky };
136*33ec1ccbSHans Petter Selasky
137*33ec1ccbSHans Petter Selasky /*
138*33ec1ccbSHans Petter Selasky * Quick description of our CQ/QP locking scheme:
139*33ec1ccbSHans Petter Selasky *
140*33ec1ccbSHans Petter Selasky * We have one global lock that protects dev->cq/qp_table. Each
141*33ec1ccbSHans Petter Selasky * struct mthca_cq/qp also has its own lock. An individual qp lock
142*33ec1ccbSHans Petter Selasky * may be taken inside of an individual cq lock. Both cqs attached to
143*33ec1ccbSHans Petter Selasky * a qp may be locked, with the cq with the lower cqn locked first.
144*33ec1ccbSHans Petter Selasky * No other nesting should be done.
145*33ec1ccbSHans Petter Selasky *
146*33ec1ccbSHans Petter Selasky * Each struct mthca_cq/qp also has an ref count, protected by the
147*33ec1ccbSHans Petter Selasky * corresponding table lock. The pointer from the cq/qp_table to the
148*33ec1ccbSHans Petter Selasky * struct counts as one reference. This reference also is good for
149*33ec1ccbSHans Petter Selasky * access through the consumer API, so modifying the CQ/QP etc doesn't
150*33ec1ccbSHans Petter Selasky * need to take another reference. Access to a QP because of a
151*33ec1ccbSHans Petter Selasky * completion being polled does not need a reference either.
152*33ec1ccbSHans Petter Selasky *
153*33ec1ccbSHans Petter Selasky * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
154*33ec1ccbSHans Petter Selasky * destroy function to sleep on.
155*33ec1ccbSHans Petter Selasky *
156*33ec1ccbSHans Petter Selasky * This means that access from the consumer API requires nothing but
157*33ec1ccbSHans Petter Selasky * taking the struct's lock.
158*33ec1ccbSHans Petter Selasky *
159*33ec1ccbSHans Petter Selasky * Access because of a completion event should go as follows:
160*33ec1ccbSHans Petter Selasky * - lock cq/qp_table and look up struct
161*33ec1ccbSHans Petter Selasky * - increment ref count in struct
162*33ec1ccbSHans Petter Selasky * - drop cq/qp_table lock
163*33ec1ccbSHans Petter Selasky * - lock struct, do your thing, and unlock struct
164*33ec1ccbSHans Petter Selasky * - decrement ref count; if zero, wake up waiters
165*33ec1ccbSHans Petter Selasky *
166*33ec1ccbSHans Petter Selasky * To destroy a CQ/QP, we can do the following:
167*33ec1ccbSHans Petter Selasky * - lock cq/qp_table
168*33ec1ccbSHans Petter Selasky * - remove pointer and decrement ref count
169*33ec1ccbSHans Petter Selasky * - unlock cq/qp_table lock
170*33ec1ccbSHans Petter Selasky * - wait_event until ref count is zero
171*33ec1ccbSHans Petter Selasky *
172*33ec1ccbSHans Petter Selasky * It is the consumer's responsibilty to make sure that no QP
173*33ec1ccbSHans Petter Selasky * operations (WQE posting or state modification) are pending when a
174*33ec1ccbSHans Petter Selasky * QP is destroyed. Also, the consumer must make sure that calls to
175*33ec1ccbSHans Petter Selasky * qp_modify are serialized. Similarly, the consumer is responsible
176*33ec1ccbSHans Petter Selasky * for ensuring that no CQ resize operations are pending when a CQ
177*33ec1ccbSHans Petter Selasky * is destroyed.
178*33ec1ccbSHans Petter Selasky *
179*33ec1ccbSHans Petter Selasky * Possible optimizations (wait for profile data to see if/where we
180*33ec1ccbSHans Petter Selasky * have locks bouncing between CPUs):
181*33ec1ccbSHans Petter Selasky * - split cq/qp table lock into n separate (cache-aligned) locks,
182*33ec1ccbSHans Petter Selasky * indexed (say) by the page in the table
183*33ec1ccbSHans Petter Selasky * - split QP struct lock into three (one for common info, one for the
184*33ec1ccbSHans Petter Selasky * send queue and one for the receive queue)
185*33ec1ccbSHans Petter Selasky */
186*33ec1ccbSHans Petter Selasky
187*33ec1ccbSHans Petter Selasky struct mthca_cq_buf {
188*33ec1ccbSHans Petter Selasky union mthca_buf queue;
189*33ec1ccbSHans Petter Selasky struct mthca_mr mr;
190*33ec1ccbSHans Petter Selasky int is_direct;
191*33ec1ccbSHans Petter Selasky };
192*33ec1ccbSHans Petter Selasky
193*33ec1ccbSHans Petter Selasky struct mthca_cq_resize {
194*33ec1ccbSHans Petter Selasky struct mthca_cq_buf buf;
195*33ec1ccbSHans Petter Selasky int cqe;
196*33ec1ccbSHans Petter Selasky enum {
197*33ec1ccbSHans Petter Selasky CQ_RESIZE_ALLOC,
198*33ec1ccbSHans Petter Selasky CQ_RESIZE_READY,
199*33ec1ccbSHans Petter Selasky CQ_RESIZE_SWAPPED
200*33ec1ccbSHans Petter Selasky } state;
201*33ec1ccbSHans Petter Selasky };
202*33ec1ccbSHans Petter Selasky
203*33ec1ccbSHans Petter Selasky struct mthca_cq {
204*33ec1ccbSHans Petter Selasky struct ib_cq ibcq;
205*33ec1ccbSHans Petter Selasky spinlock_t lock;
206*33ec1ccbSHans Petter Selasky int refcount;
207*33ec1ccbSHans Petter Selasky int cqn;
208*33ec1ccbSHans Petter Selasky u32 cons_index;
209*33ec1ccbSHans Petter Selasky struct mthca_cq_buf buf;
210*33ec1ccbSHans Petter Selasky struct mthca_cq_resize *resize_buf;
211*33ec1ccbSHans Petter Selasky int is_kernel;
212*33ec1ccbSHans Petter Selasky
213*33ec1ccbSHans Petter Selasky /* Next fields are Arbel only */
214*33ec1ccbSHans Petter Selasky int set_ci_db_index;
215*33ec1ccbSHans Petter Selasky __be32 *set_ci_db;
216*33ec1ccbSHans Petter Selasky int arm_db_index;
217*33ec1ccbSHans Petter Selasky __be32 *arm_db;
218*33ec1ccbSHans Petter Selasky int arm_sn;
219*33ec1ccbSHans Petter Selasky
220*33ec1ccbSHans Petter Selasky wait_queue_head_t wait;
221*33ec1ccbSHans Petter Selasky struct mutex mutex;
222*33ec1ccbSHans Petter Selasky };
223*33ec1ccbSHans Petter Selasky
224*33ec1ccbSHans Petter Selasky struct mthca_srq {
225*33ec1ccbSHans Petter Selasky struct ib_srq ibsrq;
226*33ec1ccbSHans Petter Selasky spinlock_t lock;
227*33ec1ccbSHans Petter Selasky int refcount;
228*33ec1ccbSHans Petter Selasky int srqn;
229*33ec1ccbSHans Petter Selasky int max;
230*33ec1ccbSHans Petter Selasky int max_gs;
231*33ec1ccbSHans Petter Selasky int wqe_shift;
232*33ec1ccbSHans Petter Selasky int first_free;
233*33ec1ccbSHans Petter Selasky int last_free;
234*33ec1ccbSHans Petter Selasky u16 counter; /* Arbel only */
235*33ec1ccbSHans Petter Selasky int db_index; /* Arbel only */
236*33ec1ccbSHans Petter Selasky __be32 *db; /* Arbel only */
237*33ec1ccbSHans Petter Selasky void *last;
238*33ec1ccbSHans Petter Selasky
239*33ec1ccbSHans Petter Selasky int is_direct;
240*33ec1ccbSHans Petter Selasky u64 *wrid;
241*33ec1ccbSHans Petter Selasky union mthca_buf queue;
242*33ec1ccbSHans Petter Selasky struct mthca_mr mr;
243*33ec1ccbSHans Petter Selasky
244*33ec1ccbSHans Petter Selasky wait_queue_head_t wait;
245*33ec1ccbSHans Petter Selasky struct mutex mutex;
246*33ec1ccbSHans Petter Selasky };
247*33ec1ccbSHans Petter Selasky
248*33ec1ccbSHans Petter Selasky struct mthca_wq {
249*33ec1ccbSHans Petter Selasky spinlock_t lock;
250*33ec1ccbSHans Petter Selasky int max;
251*33ec1ccbSHans Petter Selasky unsigned next_ind;
252*33ec1ccbSHans Petter Selasky unsigned last_comp;
253*33ec1ccbSHans Petter Selasky unsigned head;
254*33ec1ccbSHans Petter Selasky unsigned tail;
255*33ec1ccbSHans Petter Selasky void *last;
256*33ec1ccbSHans Petter Selasky int max_gs;
257*33ec1ccbSHans Petter Selasky int wqe_shift;
258*33ec1ccbSHans Petter Selasky
259*33ec1ccbSHans Petter Selasky int db_index; /* Arbel only */
260*33ec1ccbSHans Petter Selasky __be32 *db;
261*33ec1ccbSHans Petter Selasky };
262*33ec1ccbSHans Petter Selasky
263*33ec1ccbSHans Petter Selasky struct mthca_qp {
264*33ec1ccbSHans Petter Selasky struct ib_qp ibqp;
265*33ec1ccbSHans Petter Selasky int refcount;
266*33ec1ccbSHans Petter Selasky u32 qpn;
267*33ec1ccbSHans Petter Selasky int is_direct;
268*33ec1ccbSHans Petter Selasky u8 port; /* for SQP and memfree use only */
269*33ec1ccbSHans Petter Selasky u8 alt_port; /* for memfree use only */
270*33ec1ccbSHans Petter Selasky u8 transport;
271*33ec1ccbSHans Petter Selasky u8 state;
272*33ec1ccbSHans Petter Selasky u8 atomic_rd_en;
273*33ec1ccbSHans Petter Selasky u8 resp_depth;
274*33ec1ccbSHans Petter Selasky
275*33ec1ccbSHans Petter Selasky struct mthca_mr mr;
276*33ec1ccbSHans Petter Selasky
277*33ec1ccbSHans Petter Selasky struct mthca_wq rq;
278*33ec1ccbSHans Petter Selasky struct mthca_wq sq;
279*33ec1ccbSHans Petter Selasky enum ib_sig_type sq_policy;
280*33ec1ccbSHans Petter Selasky int send_wqe_offset;
281*33ec1ccbSHans Petter Selasky int max_inline_data;
282*33ec1ccbSHans Petter Selasky
283*33ec1ccbSHans Petter Selasky u64 *wrid;
284*33ec1ccbSHans Petter Selasky union mthca_buf queue;
285*33ec1ccbSHans Petter Selasky
286*33ec1ccbSHans Petter Selasky wait_queue_head_t wait;
287*33ec1ccbSHans Petter Selasky struct mutex mutex;
288*33ec1ccbSHans Petter Selasky };
289*33ec1ccbSHans Petter Selasky
290*33ec1ccbSHans Petter Selasky struct mthca_sqp {
291*33ec1ccbSHans Petter Selasky struct mthca_qp qp;
292*33ec1ccbSHans Petter Selasky int pkey_index;
293*33ec1ccbSHans Petter Selasky u32 qkey;
294*33ec1ccbSHans Petter Selasky u32 send_psn;
295*33ec1ccbSHans Petter Selasky struct ib_ud_header ud_header;
296*33ec1ccbSHans Petter Selasky int header_buf_size;
297*33ec1ccbSHans Petter Selasky void *header_buf;
298*33ec1ccbSHans Petter Selasky dma_addr_t header_dma;
299*33ec1ccbSHans Petter Selasky };
300*33ec1ccbSHans Petter Selasky
to_mucontext(struct ib_ucontext * ibucontext)301*33ec1ccbSHans Petter Selasky static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
302*33ec1ccbSHans Petter Selasky {
303*33ec1ccbSHans Petter Selasky return container_of(ibucontext, struct mthca_ucontext, ibucontext);
304*33ec1ccbSHans Petter Selasky }
305*33ec1ccbSHans Petter Selasky
to_mfmr(struct ib_fmr * ibmr)306*33ec1ccbSHans Petter Selasky static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
307*33ec1ccbSHans Petter Selasky {
308*33ec1ccbSHans Petter Selasky return container_of(ibmr, struct mthca_fmr, ibmr);
309*33ec1ccbSHans Petter Selasky }
310*33ec1ccbSHans Petter Selasky
to_mmr(struct ib_mr * ibmr)311*33ec1ccbSHans Petter Selasky static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
312*33ec1ccbSHans Petter Selasky {
313*33ec1ccbSHans Petter Selasky return container_of(ibmr, struct mthca_mr, ibmr);
314*33ec1ccbSHans Petter Selasky }
315*33ec1ccbSHans Petter Selasky
to_mpd(struct ib_pd * ibpd)316*33ec1ccbSHans Petter Selasky static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
317*33ec1ccbSHans Petter Selasky {
318*33ec1ccbSHans Petter Selasky return container_of(ibpd, struct mthca_pd, ibpd);
319*33ec1ccbSHans Petter Selasky }
320*33ec1ccbSHans Petter Selasky
to_mah(struct ib_ah * ibah)321*33ec1ccbSHans Petter Selasky static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
322*33ec1ccbSHans Petter Selasky {
323*33ec1ccbSHans Petter Selasky return container_of(ibah, struct mthca_ah, ibah);
324*33ec1ccbSHans Petter Selasky }
325*33ec1ccbSHans Petter Selasky
to_mcq(struct ib_cq * ibcq)326*33ec1ccbSHans Petter Selasky static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
327*33ec1ccbSHans Petter Selasky {
328*33ec1ccbSHans Petter Selasky return container_of(ibcq, struct mthca_cq, ibcq);
329*33ec1ccbSHans Petter Selasky }
330*33ec1ccbSHans Petter Selasky
to_msrq(struct ib_srq * ibsrq)331*33ec1ccbSHans Petter Selasky static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
332*33ec1ccbSHans Petter Selasky {
333*33ec1ccbSHans Petter Selasky return container_of(ibsrq, struct mthca_srq, ibsrq);
334*33ec1ccbSHans Petter Selasky }
335*33ec1ccbSHans Petter Selasky
to_mqp(struct ib_qp * ibqp)336*33ec1ccbSHans Petter Selasky static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
337*33ec1ccbSHans Petter Selasky {
338*33ec1ccbSHans Petter Selasky return container_of(ibqp, struct mthca_qp, ibqp);
339*33ec1ccbSHans Petter Selasky }
340*33ec1ccbSHans Petter Selasky
to_msqp(struct mthca_qp * qp)341*33ec1ccbSHans Petter Selasky static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
342*33ec1ccbSHans Petter Selasky {
343*33ec1ccbSHans Petter Selasky return container_of(qp, struct mthca_sqp, qp);
344*33ec1ccbSHans Petter Selasky }
345*33ec1ccbSHans Petter Selasky
346*33ec1ccbSHans Petter Selasky #endif /* MTHCA_PROVIDER_H */
347