xref: /freebsd/sys/dev/mthca/mthca_srq.c (revision 33ec1ccbae880855a4aa9e221ba8512da70e541e)
1*33ec1ccbSHans Petter Selasky /*
2*33ec1ccbSHans Petter Selasky  * Copyright (c) 2005 Cisco Systems. All rights reserved.
3*33ec1ccbSHans Petter Selasky  *
4*33ec1ccbSHans Petter Selasky  * This software is available to you under a choice of one of two
5*33ec1ccbSHans Petter Selasky  * licenses.  You may choose to be licensed under the terms of the GNU
6*33ec1ccbSHans Petter Selasky  * General Public License (GPL) Version 2, available from the file
7*33ec1ccbSHans Petter Selasky  * COPYING in the main directory of this source tree, or the
8*33ec1ccbSHans Petter Selasky  * OpenIB.org BSD license below:
9*33ec1ccbSHans Petter Selasky  *
10*33ec1ccbSHans Petter Selasky  *     Redistribution and use in source and binary forms, with or
11*33ec1ccbSHans Petter Selasky  *     without modification, are permitted provided that the following
12*33ec1ccbSHans Petter Selasky  *     conditions are met:
13*33ec1ccbSHans Petter Selasky  *
14*33ec1ccbSHans Petter Selasky  *      - Redistributions of source code must retain the above
15*33ec1ccbSHans Petter Selasky  *        copyright notice, this list of conditions and the following
16*33ec1ccbSHans Petter Selasky  *        disclaimer.
17*33ec1ccbSHans Petter Selasky  *
18*33ec1ccbSHans Petter Selasky  *      - Redistributions in binary form must reproduce the above
19*33ec1ccbSHans Petter Selasky  *        copyright notice, this list of conditions and the following
20*33ec1ccbSHans Petter Selasky  *        disclaimer in the documentation and/or other materials
21*33ec1ccbSHans Petter Selasky  *        provided with the distribution.
22*33ec1ccbSHans Petter Selasky  *
23*33ec1ccbSHans Petter Selasky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*33ec1ccbSHans Petter Selasky  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*33ec1ccbSHans Petter Selasky  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*33ec1ccbSHans Petter Selasky  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*33ec1ccbSHans Petter Selasky  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*33ec1ccbSHans Petter Selasky  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*33ec1ccbSHans Petter Selasky  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*33ec1ccbSHans Petter Selasky  * SOFTWARE.
31*33ec1ccbSHans Petter Selasky  */
32*33ec1ccbSHans Petter Selasky 
33*33ec1ccbSHans Petter Selasky #include <linux/slab.h>
34*33ec1ccbSHans Petter Selasky #include <linux/string.h>
35*33ec1ccbSHans Petter Selasky #include <linux/sched.h>
36*33ec1ccbSHans Petter Selasky 
37*33ec1ccbSHans Petter Selasky #include <asm/io.h>
38*33ec1ccbSHans Petter Selasky 
39*33ec1ccbSHans Petter Selasky #include "mthca_dev.h"
40*33ec1ccbSHans Petter Selasky #include "mthca_cmd.h"
41*33ec1ccbSHans Petter Selasky #include "mthca_memfree.h"
42*33ec1ccbSHans Petter Selasky #include "mthca_wqe.h"
43*33ec1ccbSHans Petter Selasky 
44*33ec1ccbSHans Petter Selasky enum {
45*33ec1ccbSHans Petter Selasky 	MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
46*33ec1ccbSHans Petter Selasky };
47*33ec1ccbSHans Petter Selasky 
48*33ec1ccbSHans Petter Selasky struct mthca_tavor_srq_context {
49*33ec1ccbSHans Petter Selasky 	__be64 wqe_base_ds;	/* low 6 bits is descriptor size */
50*33ec1ccbSHans Petter Selasky 	__be32 state_pd;
51*33ec1ccbSHans Petter Selasky 	__be32 lkey;
52*33ec1ccbSHans Petter Selasky 	__be32 uar;
53*33ec1ccbSHans Petter Selasky 	__be16 limit_watermark;
54*33ec1ccbSHans Petter Selasky 	__be16 wqe_cnt;
55*33ec1ccbSHans Petter Selasky 	u32    reserved[2];
56*33ec1ccbSHans Petter Selasky };
57*33ec1ccbSHans Petter Selasky 
58*33ec1ccbSHans Petter Selasky struct mthca_arbel_srq_context {
59*33ec1ccbSHans Petter Selasky 	__be32 state_logsize_srqn;
60*33ec1ccbSHans Petter Selasky 	__be32 lkey;
61*33ec1ccbSHans Petter Selasky 	__be32 db_index;
62*33ec1ccbSHans Petter Selasky 	__be32 logstride_usrpage;
63*33ec1ccbSHans Petter Selasky 	__be64 wqe_base;
64*33ec1ccbSHans Petter Selasky 	__be32 eq_pd;
65*33ec1ccbSHans Petter Selasky 	__be16 limit_watermark;
66*33ec1ccbSHans Petter Selasky 	__be16 wqe_cnt;
67*33ec1ccbSHans Petter Selasky 	u16    reserved1;
68*33ec1ccbSHans Petter Selasky 	__be16 wqe_counter;
69*33ec1ccbSHans Petter Selasky 	u32    reserved2[3];
70*33ec1ccbSHans Petter Selasky };
71*33ec1ccbSHans Petter Selasky 
72*33ec1ccbSHans Petter Selasky static void *get_wqe(struct mthca_srq *srq, int n)
73*33ec1ccbSHans Petter Selasky {
74*33ec1ccbSHans Petter Selasky 	if (srq->is_direct)
75*33ec1ccbSHans Petter Selasky 		return srq->queue.direct.buf + (n << srq->wqe_shift);
76*33ec1ccbSHans Petter Selasky 	else
77*33ec1ccbSHans Petter Selasky 		return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
78*33ec1ccbSHans Petter Selasky 			((n << srq->wqe_shift) & (PAGE_SIZE - 1));
79*33ec1ccbSHans Petter Selasky }
80*33ec1ccbSHans Petter Selasky 
81*33ec1ccbSHans Petter Selasky /*
82*33ec1ccbSHans Petter Selasky  * Return a pointer to the location within a WQE that we're using as a
83*33ec1ccbSHans Petter Selasky  * link when the WQE is in the free list.  We use the imm field
84*33ec1ccbSHans Petter Selasky  * because in the Tavor case, posting a WQE may overwrite the next
85*33ec1ccbSHans Petter Selasky  * segment of the previous WQE, but a receive WQE will never touch the
86*33ec1ccbSHans Petter Selasky  * imm field.  This avoids corrupting our free list if the previous
87*33ec1ccbSHans Petter Selasky  * WQE has already completed and been put on the free list when we
88*33ec1ccbSHans Petter Selasky  * post the next WQE.
89*33ec1ccbSHans Petter Selasky  */
90*33ec1ccbSHans Petter Selasky static inline int *wqe_to_link(void *wqe)
91*33ec1ccbSHans Petter Selasky {
92*33ec1ccbSHans Petter Selasky 	return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
93*33ec1ccbSHans Petter Selasky }
94*33ec1ccbSHans Petter Selasky 
95*33ec1ccbSHans Petter Selasky static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
96*33ec1ccbSHans Petter Selasky 					 struct mthca_pd *pd,
97*33ec1ccbSHans Petter Selasky 					 struct mthca_srq *srq,
98*33ec1ccbSHans Petter Selasky 					 struct mthca_tavor_srq_context *context)
99*33ec1ccbSHans Petter Selasky {
100*33ec1ccbSHans Petter Selasky 	memset(context, 0, sizeof *context);
101*33ec1ccbSHans Petter Selasky 
102*33ec1ccbSHans Petter Selasky 	context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
103*33ec1ccbSHans Petter Selasky 	context->state_pd    = cpu_to_be32(pd->pd_num);
104*33ec1ccbSHans Petter Selasky 	context->lkey        = cpu_to_be32(srq->mr.ibmr.lkey);
105*33ec1ccbSHans Petter Selasky 
106*33ec1ccbSHans Petter Selasky 	if (pd->ibpd.uobject)
107*33ec1ccbSHans Petter Selasky 		context->uar =
108*33ec1ccbSHans Petter Selasky 			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
109*33ec1ccbSHans Petter Selasky 	else
110*33ec1ccbSHans Petter Selasky 		context->uar = cpu_to_be32(dev->driver_uar.index);
111*33ec1ccbSHans Petter Selasky }
112*33ec1ccbSHans Petter Selasky 
113*33ec1ccbSHans Petter Selasky static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
114*33ec1ccbSHans Petter Selasky 					 struct mthca_pd *pd,
115*33ec1ccbSHans Petter Selasky 					 struct mthca_srq *srq,
116*33ec1ccbSHans Petter Selasky 					 struct mthca_arbel_srq_context *context)
117*33ec1ccbSHans Petter Selasky {
118*33ec1ccbSHans Petter Selasky 	int logsize, max;
119*33ec1ccbSHans Petter Selasky 
120*33ec1ccbSHans Petter Selasky 	memset(context, 0, sizeof *context);
121*33ec1ccbSHans Petter Selasky 
122*33ec1ccbSHans Petter Selasky 	/*
123*33ec1ccbSHans Petter Selasky 	 * Put max in a temporary variable to work around gcc bug
124*33ec1ccbSHans Petter Selasky 	 * triggered by ilog2() on sparc64.
125*33ec1ccbSHans Petter Selasky 	 */
126*33ec1ccbSHans Petter Selasky 	max = srq->max;
127*33ec1ccbSHans Petter Selasky 	logsize = ilog2(max);
128*33ec1ccbSHans Petter Selasky 	context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
129*33ec1ccbSHans Petter Selasky 	context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
130*33ec1ccbSHans Petter Selasky 	context->db_index = cpu_to_be32(srq->db_index);
131*33ec1ccbSHans Petter Selasky 	context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
132*33ec1ccbSHans Petter Selasky 	if (pd->ibpd.uobject)
133*33ec1ccbSHans Petter Selasky 		context->logstride_usrpage |=
134*33ec1ccbSHans Petter Selasky 			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
135*33ec1ccbSHans Petter Selasky 	else
136*33ec1ccbSHans Petter Selasky 		context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
137*33ec1ccbSHans Petter Selasky 	context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
138*33ec1ccbSHans Petter Selasky }
139*33ec1ccbSHans Petter Selasky 
140*33ec1ccbSHans Petter Selasky static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
141*33ec1ccbSHans Petter Selasky {
142*33ec1ccbSHans Petter Selasky 	mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
143*33ec1ccbSHans Petter Selasky 		       srq->is_direct, &srq->mr);
144*33ec1ccbSHans Petter Selasky 	kfree(srq->wrid);
145*33ec1ccbSHans Petter Selasky }
146*33ec1ccbSHans Petter Selasky 
147*33ec1ccbSHans Petter Selasky static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
148*33ec1ccbSHans Petter Selasky 			       struct mthca_srq *srq)
149*33ec1ccbSHans Petter Selasky {
150*33ec1ccbSHans Petter Selasky 	struct mthca_data_seg *scatter;
151*33ec1ccbSHans Petter Selasky 	void *wqe;
152*33ec1ccbSHans Petter Selasky 	int err;
153*33ec1ccbSHans Petter Selasky 	int i;
154*33ec1ccbSHans Petter Selasky 
155*33ec1ccbSHans Petter Selasky 	if (pd->ibpd.uobject)
156*33ec1ccbSHans Petter Selasky 		return 0;
157*33ec1ccbSHans Petter Selasky 
158*33ec1ccbSHans Petter Selasky 	srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
159*33ec1ccbSHans Petter Selasky 	if (!srq->wrid)
160*33ec1ccbSHans Petter Selasky 		return -ENOMEM;
161*33ec1ccbSHans Petter Selasky 
162*33ec1ccbSHans Petter Selasky 	err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
163*33ec1ccbSHans Petter Selasky 			      MTHCA_MAX_DIRECT_SRQ_SIZE,
164*33ec1ccbSHans Petter Selasky 			      &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
165*33ec1ccbSHans Petter Selasky 	if (err) {
166*33ec1ccbSHans Petter Selasky 		kfree(srq->wrid);
167*33ec1ccbSHans Petter Selasky 		return err;
168*33ec1ccbSHans Petter Selasky 	}
169*33ec1ccbSHans Petter Selasky 
170*33ec1ccbSHans Petter Selasky 	/*
171*33ec1ccbSHans Petter Selasky 	 * Now initialize the SRQ buffer so that all of the WQEs are
172*33ec1ccbSHans Petter Selasky 	 * linked into the list of free WQEs.  In addition, set the
173*33ec1ccbSHans Petter Selasky 	 * scatter list L_Keys to the sentry value of 0x100.
174*33ec1ccbSHans Petter Selasky 	 */
175*33ec1ccbSHans Petter Selasky 	for (i = 0; i < srq->max; ++i) {
176*33ec1ccbSHans Petter Selasky 		struct mthca_next_seg *next;
177*33ec1ccbSHans Petter Selasky 
178*33ec1ccbSHans Petter Selasky 		next = wqe = get_wqe(srq, i);
179*33ec1ccbSHans Petter Selasky 
180*33ec1ccbSHans Petter Selasky 		if (i < srq->max - 1) {
181*33ec1ccbSHans Petter Selasky 			*wqe_to_link(wqe) = i + 1;
182*33ec1ccbSHans Petter Selasky 			next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
183*33ec1ccbSHans Petter Selasky 		} else {
184*33ec1ccbSHans Petter Selasky 			*wqe_to_link(wqe) = -1;
185*33ec1ccbSHans Petter Selasky 			next->nda_op = 0;
186*33ec1ccbSHans Petter Selasky 		}
187*33ec1ccbSHans Petter Selasky 
188*33ec1ccbSHans Petter Selasky 		for (scatter = wqe + sizeof (struct mthca_next_seg);
189*33ec1ccbSHans Petter Selasky 		     (void *) scatter < wqe + (1 << srq->wqe_shift);
190*33ec1ccbSHans Petter Selasky 		     ++scatter)
191*33ec1ccbSHans Petter Selasky 			scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
192*33ec1ccbSHans Petter Selasky 	}
193*33ec1ccbSHans Petter Selasky 
194*33ec1ccbSHans Petter Selasky 	srq->last = get_wqe(srq, srq->max - 1);
195*33ec1ccbSHans Petter Selasky 
196*33ec1ccbSHans Petter Selasky 	return 0;
197*33ec1ccbSHans Petter Selasky }
198*33ec1ccbSHans Petter Selasky 
199*33ec1ccbSHans Petter Selasky int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
200*33ec1ccbSHans Petter Selasky 		    struct ib_srq_attr *attr, struct mthca_srq *srq)
201*33ec1ccbSHans Petter Selasky {
202*33ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
203*33ec1ccbSHans Petter Selasky 	int ds;
204*33ec1ccbSHans Petter Selasky 	int err;
205*33ec1ccbSHans Petter Selasky 
206*33ec1ccbSHans Petter Selasky 	/* Sanity check SRQ size before proceeding */
207*33ec1ccbSHans Petter Selasky 	if (attr->max_wr  > dev->limits.max_srq_wqes ||
208*33ec1ccbSHans Petter Selasky 	    attr->max_sge > dev->limits.max_srq_sge)
209*33ec1ccbSHans Petter Selasky 		return -EINVAL;
210*33ec1ccbSHans Petter Selasky 
211*33ec1ccbSHans Petter Selasky 	srq->max      = attr->max_wr;
212*33ec1ccbSHans Petter Selasky 	srq->max_gs   = attr->max_sge;
213*33ec1ccbSHans Petter Selasky 	srq->counter  = 0;
214*33ec1ccbSHans Petter Selasky 
215*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
216*33ec1ccbSHans Petter Selasky 		srq->max = roundup_pow_of_two(srq->max + 1);
217*33ec1ccbSHans Petter Selasky 	else
218*33ec1ccbSHans Petter Selasky 		srq->max = srq->max + 1;
219*33ec1ccbSHans Petter Selasky 
220*33ec1ccbSHans Petter Selasky 	ds = max(64UL,
221*33ec1ccbSHans Petter Selasky 		 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
222*33ec1ccbSHans Petter Selasky 				    srq->max_gs * sizeof (struct mthca_data_seg)));
223*33ec1ccbSHans Petter Selasky 
224*33ec1ccbSHans Petter Selasky 	if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
225*33ec1ccbSHans Petter Selasky 		return -EINVAL;
226*33ec1ccbSHans Petter Selasky 
227*33ec1ccbSHans Petter Selasky 	srq->wqe_shift = ilog2(ds);
228*33ec1ccbSHans Petter Selasky 
229*33ec1ccbSHans Petter Selasky 	srq->srqn = mthca_alloc(&dev->srq_table.alloc);
230*33ec1ccbSHans Petter Selasky 	if (srq->srqn == -1)
231*33ec1ccbSHans Petter Selasky 		return -ENOMEM;
232*33ec1ccbSHans Petter Selasky 
233*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
234*33ec1ccbSHans Petter Selasky 		err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
235*33ec1ccbSHans Petter Selasky 		if (err)
236*33ec1ccbSHans Petter Selasky 			goto err_out;
237*33ec1ccbSHans Petter Selasky 
238*33ec1ccbSHans Petter Selasky 		if (!pd->ibpd.uobject) {
239*33ec1ccbSHans Petter Selasky 			srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
240*33ec1ccbSHans Petter Selasky 						       srq->srqn, &srq->db);
241*33ec1ccbSHans Petter Selasky 			if (srq->db_index < 0) {
242*33ec1ccbSHans Petter Selasky 				err = -ENOMEM;
243*33ec1ccbSHans Petter Selasky 				goto err_out_icm;
244*33ec1ccbSHans Petter Selasky 			}
245*33ec1ccbSHans Petter Selasky 		}
246*33ec1ccbSHans Petter Selasky 	}
247*33ec1ccbSHans Petter Selasky 
248*33ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
249*33ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox)) {
250*33ec1ccbSHans Petter Selasky 		err = PTR_ERR(mailbox);
251*33ec1ccbSHans Petter Selasky 		goto err_out_db;
252*33ec1ccbSHans Petter Selasky 	}
253*33ec1ccbSHans Petter Selasky 
254*33ec1ccbSHans Petter Selasky 	err = mthca_alloc_srq_buf(dev, pd, srq);
255*33ec1ccbSHans Petter Selasky 	if (err)
256*33ec1ccbSHans Petter Selasky 		goto err_out_mailbox;
257*33ec1ccbSHans Petter Selasky 
258*33ec1ccbSHans Petter Selasky 	spin_lock_init(&srq->lock);
259*33ec1ccbSHans Petter Selasky 	srq->refcount = 1;
260*33ec1ccbSHans Petter Selasky 	init_waitqueue_head(&srq->wait);
261*33ec1ccbSHans Petter Selasky 	mutex_init(&srq->mutex);
262*33ec1ccbSHans Petter Selasky 
263*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
264*33ec1ccbSHans Petter Selasky 		mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
265*33ec1ccbSHans Petter Selasky 	else
266*33ec1ccbSHans Petter Selasky 		mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
267*33ec1ccbSHans Petter Selasky 
268*33ec1ccbSHans Petter Selasky 	err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
269*33ec1ccbSHans Petter Selasky 
270*33ec1ccbSHans Petter Selasky 	if (err) {
271*33ec1ccbSHans Petter Selasky 		mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
272*33ec1ccbSHans Petter Selasky 		goto err_out_free_buf;
273*33ec1ccbSHans Petter Selasky 	}
274*33ec1ccbSHans Petter Selasky 
275*33ec1ccbSHans Petter Selasky 	spin_lock_irq(&dev->srq_table.lock);
276*33ec1ccbSHans Petter Selasky 	if (mthca_array_set(&dev->srq_table.srq,
277*33ec1ccbSHans Petter Selasky 			    srq->srqn & (dev->limits.num_srqs - 1),
278*33ec1ccbSHans Petter Selasky 			    srq)) {
279*33ec1ccbSHans Petter Selasky 		spin_unlock_irq(&dev->srq_table.lock);
280*33ec1ccbSHans Petter Selasky 		goto err_out_free_srq;
281*33ec1ccbSHans Petter Selasky 	}
282*33ec1ccbSHans Petter Selasky 	spin_unlock_irq(&dev->srq_table.lock);
283*33ec1ccbSHans Petter Selasky 
284*33ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
285*33ec1ccbSHans Petter Selasky 
286*33ec1ccbSHans Petter Selasky 	srq->first_free = 0;
287*33ec1ccbSHans Petter Selasky 	srq->last_free  = srq->max - 1;
288*33ec1ccbSHans Petter Selasky 
289*33ec1ccbSHans Petter Selasky 	attr->max_wr    = srq->max - 1;
290*33ec1ccbSHans Petter Selasky 	attr->max_sge   = srq->max_gs;
291*33ec1ccbSHans Petter Selasky 
292*33ec1ccbSHans Petter Selasky 	return 0;
293*33ec1ccbSHans Petter Selasky 
294*33ec1ccbSHans Petter Selasky err_out_free_srq:
295*33ec1ccbSHans Petter Selasky 	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
296*33ec1ccbSHans Petter Selasky 	if (err)
297*33ec1ccbSHans Petter Selasky 		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
298*33ec1ccbSHans Petter Selasky 
299*33ec1ccbSHans Petter Selasky err_out_free_buf:
300*33ec1ccbSHans Petter Selasky 	if (!pd->ibpd.uobject)
301*33ec1ccbSHans Petter Selasky 		mthca_free_srq_buf(dev, srq);
302*33ec1ccbSHans Petter Selasky 
303*33ec1ccbSHans Petter Selasky err_out_mailbox:
304*33ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
305*33ec1ccbSHans Petter Selasky 
306*33ec1ccbSHans Petter Selasky err_out_db:
307*33ec1ccbSHans Petter Selasky 	if (!pd->ibpd.uobject && mthca_is_memfree(dev))
308*33ec1ccbSHans Petter Selasky 		mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
309*33ec1ccbSHans Petter Selasky 
310*33ec1ccbSHans Petter Selasky err_out_icm:
311*33ec1ccbSHans Petter Selasky 	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
312*33ec1ccbSHans Petter Selasky 
313*33ec1ccbSHans Petter Selasky err_out:
314*33ec1ccbSHans Petter Selasky 	mthca_free(&dev->srq_table.alloc, srq->srqn);
315*33ec1ccbSHans Petter Selasky 
316*33ec1ccbSHans Petter Selasky 	return err;
317*33ec1ccbSHans Petter Selasky }
318*33ec1ccbSHans Petter Selasky 
319*33ec1ccbSHans Petter Selasky static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
320*33ec1ccbSHans Petter Selasky {
321*33ec1ccbSHans Petter Selasky 	int c;
322*33ec1ccbSHans Petter Selasky 
323*33ec1ccbSHans Petter Selasky 	spin_lock_irq(&dev->srq_table.lock);
324*33ec1ccbSHans Petter Selasky 	c = srq->refcount;
325*33ec1ccbSHans Petter Selasky 	spin_unlock_irq(&dev->srq_table.lock);
326*33ec1ccbSHans Petter Selasky 
327*33ec1ccbSHans Petter Selasky 	return c;
328*33ec1ccbSHans Petter Selasky }
329*33ec1ccbSHans Petter Selasky 
330*33ec1ccbSHans Petter Selasky void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
331*33ec1ccbSHans Petter Selasky {
332*33ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
333*33ec1ccbSHans Petter Selasky 	int err;
334*33ec1ccbSHans Petter Selasky 
335*33ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
336*33ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox)) {
337*33ec1ccbSHans Petter Selasky 		mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
338*33ec1ccbSHans Petter Selasky 		return;
339*33ec1ccbSHans Petter Selasky 	}
340*33ec1ccbSHans Petter Selasky 
341*33ec1ccbSHans Petter Selasky 	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
342*33ec1ccbSHans Petter Selasky 	if (err)
343*33ec1ccbSHans Petter Selasky 		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
344*33ec1ccbSHans Petter Selasky 
345*33ec1ccbSHans Petter Selasky 	spin_lock_irq(&dev->srq_table.lock);
346*33ec1ccbSHans Petter Selasky 	mthca_array_clear(&dev->srq_table.srq,
347*33ec1ccbSHans Petter Selasky 			  srq->srqn & (dev->limits.num_srqs - 1));
348*33ec1ccbSHans Petter Selasky 	--srq->refcount;
349*33ec1ccbSHans Petter Selasky 	spin_unlock_irq(&dev->srq_table.lock);
350*33ec1ccbSHans Petter Selasky 
351*33ec1ccbSHans Petter Selasky 	wait_event(srq->wait, !get_srq_refcount(dev, srq));
352*33ec1ccbSHans Petter Selasky 
353*33ec1ccbSHans Petter Selasky 	if (!srq->ibsrq.uobject) {
354*33ec1ccbSHans Petter Selasky 		mthca_free_srq_buf(dev, srq);
355*33ec1ccbSHans Petter Selasky 		if (mthca_is_memfree(dev))
356*33ec1ccbSHans Petter Selasky 			mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
357*33ec1ccbSHans Petter Selasky 	}
358*33ec1ccbSHans Petter Selasky 
359*33ec1ccbSHans Petter Selasky 	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
360*33ec1ccbSHans Petter Selasky 	mthca_free(&dev->srq_table.alloc, srq->srqn);
361*33ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
362*33ec1ccbSHans Petter Selasky }
363*33ec1ccbSHans Petter Selasky 
364*33ec1ccbSHans Petter Selasky int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
365*33ec1ccbSHans Petter Selasky 		     enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
366*33ec1ccbSHans Petter Selasky {
367*33ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = to_mdev(ibsrq->device);
368*33ec1ccbSHans Petter Selasky 	struct mthca_srq *srq = to_msrq(ibsrq);
369*33ec1ccbSHans Petter Selasky 	int ret = 0;
370*33ec1ccbSHans Petter Selasky 
371*33ec1ccbSHans Petter Selasky 	/* We don't support resizing SRQs (yet?) */
372*33ec1ccbSHans Petter Selasky 	if (attr_mask & IB_SRQ_MAX_WR)
373*33ec1ccbSHans Petter Selasky 		return -EINVAL;
374*33ec1ccbSHans Petter Selasky 
375*33ec1ccbSHans Petter Selasky 	if (attr_mask & IB_SRQ_LIMIT) {
376*33ec1ccbSHans Petter Selasky 		u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
377*33ec1ccbSHans Petter Selasky 		if (attr->srq_limit > max_wr)
378*33ec1ccbSHans Petter Selasky 			return -EINVAL;
379*33ec1ccbSHans Petter Selasky 
380*33ec1ccbSHans Petter Selasky 		mutex_lock(&srq->mutex);
381*33ec1ccbSHans Petter Selasky 		ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
382*33ec1ccbSHans Petter Selasky 		mutex_unlock(&srq->mutex);
383*33ec1ccbSHans Petter Selasky 	}
384*33ec1ccbSHans Petter Selasky 
385*33ec1ccbSHans Petter Selasky 	return ret;
386*33ec1ccbSHans Petter Selasky }
387*33ec1ccbSHans Petter Selasky 
388*33ec1ccbSHans Petter Selasky int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
389*33ec1ccbSHans Petter Selasky {
390*33ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = to_mdev(ibsrq->device);
391*33ec1ccbSHans Petter Selasky 	struct mthca_srq *srq = to_msrq(ibsrq);
392*33ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
393*33ec1ccbSHans Petter Selasky 	struct mthca_arbel_srq_context *arbel_ctx;
394*33ec1ccbSHans Petter Selasky 	struct mthca_tavor_srq_context *tavor_ctx;
395*33ec1ccbSHans Petter Selasky 	int err;
396*33ec1ccbSHans Petter Selasky 
397*33ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
398*33ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox))
399*33ec1ccbSHans Petter Selasky 		return PTR_ERR(mailbox);
400*33ec1ccbSHans Petter Selasky 
401*33ec1ccbSHans Petter Selasky 	err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
402*33ec1ccbSHans Petter Selasky 	if (err)
403*33ec1ccbSHans Petter Selasky 		goto out;
404*33ec1ccbSHans Petter Selasky 
405*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
406*33ec1ccbSHans Petter Selasky 		arbel_ctx = mailbox->buf;
407*33ec1ccbSHans Petter Selasky 		srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
408*33ec1ccbSHans Petter Selasky 	} else {
409*33ec1ccbSHans Petter Selasky 		tavor_ctx = mailbox->buf;
410*33ec1ccbSHans Petter Selasky 		srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
411*33ec1ccbSHans Petter Selasky 	}
412*33ec1ccbSHans Petter Selasky 
413*33ec1ccbSHans Petter Selasky 	srq_attr->max_wr  = srq->max - 1;
414*33ec1ccbSHans Petter Selasky 	srq_attr->max_sge = srq->max_gs;
415*33ec1ccbSHans Petter Selasky 
416*33ec1ccbSHans Petter Selasky out:
417*33ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
418*33ec1ccbSHans Petter Selasky 
419*33ec1ccbSHans Petter Selasky 	return err;
420*33ec1ccbSHans Petter Selasky }
421*33ec1ccbSHans Petter Selasky 
422*33ec1ccbSHans Petter Selasky void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
423*33ec1ccbSHans Petter Selasky 		     enum ib_event_type event_type)
424*33ec1ccbSHans Petter Selasky {
425*33ec1ccbSHans Petter Selasky 	struct mthca_srq *srq;
426*33ec1ccbSHans Petter Selasky 	struct ib_event event;
427*33ec1ccbSHans Petter Selasky 
428*33ec1ccbSHans Petter Selasky 	spin_lock(&dev->srq_table.lock);
429*33ec1ccbSHans Petter Selasky 	srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
430*33ec1ccbSHans Petter Selasky 	if (srq)
431*33ec1ccbSHans Petter Selasky 		++srq->refcount;
432*33ec1ccbSHans Petter Selasky 	spin_unlock(&dev->srq_table.lock);
433*33ec1ccbSHans Petter Selasky 
434*33ec1ccbSHans Petter Selasky 	if (!srq) {
435*33ec1ccbSHans Petter Selasky 		mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
436*33ec1ccbSHans Petter Selasky 		return;
437*33ec1ccbSHans Petter Selasky 	}
438*33ec1ccbSHans Petter Selasky 
439*33ec1ccbSHans Petter Selasky 	if (!srq->ibsrq.event_handler)
440*33ec1ccbSHans Petter Selasky 		goto out;
441*33ec1ccbSHans Petter Selasky 
442*33ec1ccbSHans Petter Selasky 	event.device      = &dev->ib_dev;
443*33ec1ccbSHans Petter Selasky 	event.event       = event_type;
444*33ec1ccbSHans Petter Selasky 	event.element.srq = &srq->ibsrq;
445*33ec1ccbSHans Petter Selasky 	srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
446*33ec1ccbSHans Petter Selasky 
447*33ec1ccbSHans Petter Selasky out:
448*33ec1ccbSHans Petter Selasky 	spin_lock(&dev->srq_table.lock);
449*33ec1ccbSHans Petter Selasky 	if (!--srq->refcount)
450*33ec1ccbSHans Petter Selasky 		wake_up(&srq->wait);
451*33ec1ccbSHans Petter Selasky 	spin_unlock(&dev->srq_table.lock);
452*33ec1ccbSHans Petter Selasky }
453*33ec1ccbSHans Petter Selasky 
454*33ec1ccbSHans Petter Selasky /*
455*33ec1ccbSHans Petter Selasky  * This function must be called with IRQs disabled.
456*33ec1ccbSHans Petter Selasky  */
457*33ec1ccbSHans Petter Selasky void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
458*33ec1ccbSHans Petter Selasky {
459*33ec1ccbSHans Petter Selasky 	int ind;
460*33ec1ccbSHans Petter Selasky 	struct mthca_next_seg *last_free;
461*33ec1ccbSHans Petter Selasky 
462*33ec1ccbSHans Petter Selasky 	ind = wqe_addr >> srq->wqe_shift;
463*33ec1ccbSHans Petter Selasky 
464*33ec1ccbSHans Petter Selasky 	spin_lock(&srq->lock);
465*33ec1ccbSHans Petter Selasky 
466*33ec1ccbSHans Petter Selasky 	last_free = get_wqe(srq, srq->last_free);
467*33ec1ccbSHans Petter Selasky 	*wqe_to_link(last_free) = ind;
468*33ec1ccbSHans Petter Selasky 	last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
469*33ec1ccbSHans Petter Selasky 	*wqe_to_link(get_wqe(srq, ind)) = -1;
470*33ec1ccbSHans Petter Selasky 	srq->last_free = ind;
471*33ec1ccbSHans Petter Selasky 
472*33ec1ccbSHans Petter Selasky 	spin_unlock(&srq->lock);
473*33ec1ccbSHans Petter Selasky }
474*33ec1ccbSHans Petter Selasky 
475*33ec1ccbSHans Petter Selasky int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
476*33ec1ccbSHans Petter Selasky 			      struct ib_recv_wr **bad_wr)
477*33ec1ccbSHans Petter Selasky {
478*33ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = to_mdev(ibsrq->device);
479*33ec1ccbSHans Petter Selasky 	struct mthca_srq *srq = to_msrq(ibsrq);
480*33ec1ccbSHans Petter Selasky 	unsigned long flags;
481*33ec1ccbSHans Petter Selasky 	int err = 0;
482*33ec1ccbSHans Petter Selasky 	int first_ind;
483*33ec1ccbSHans Petter Selasky 	int ind;
484*33ec1ccbSHans Petter Selasky 	int next_ind;
485*33ec1ccbSHans Petter Selasky 	int nreq;
486*33ec1ccbSHans Petter Selasky 	int i;
487*33ec1ccbSHans Petter Selasky 	void *wqe;
488*33ec1ccbSHans Petter Selasky 	void *prev_wqe;
489*33ec1ccbSHans Petter Selasky 
490*33ec1ccbSHans Petter Selasky 	spin_lock_irqsave(&srq->lock, flags);
491*33ec1ccbSHans Petter Selasky 
492*33ec1ccbSHans Petter Selasky 	first_ind = srq->first_free;
493*33ec1ccbSHans Petter Selasky 
494*33ec1ccbSHans Petter Selasky 	for (nreq = 0; wr; wr = wr->next) {
495*33ec1ccbSHans Petter Selasky 		ind       = srq->first_free;
496*33ec1ccbSHans Petter Selasky 		wqe       = get_wqe(srq, ind);
497*33ec1ccbSHans Petter Selasky 		next_ind  = *wqe_to_link(wqe);
498*33ec1ccbSHans Petter Selasky 
499*33ec1ccbSHans Petter Selasky 		if (unlikely(next_ind < 0)) {
500*33ec1ccbSHans Petter Selasky 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
501*33ec1ccbSHans Petter Selasky 			err = -ENOMEM;
502*33ec1ccbSHans Petter Selasky 			*bad_wr = wr;
503*33ec1ccbSHans Petter Selasky 			break;
504*33ec1ccbSHans Petter Selasky 		}
505*33ec1ccbSHans Petter Selasky 
506*33ec1ccbSHans Petter Selasky 		prev_wqe  = srq->last;
507*33ec1ccbSHans Petter Selasky 		srq->last = wqe;
508*33ec1ccbSHans Petter Selasky 
509*33ec1ccbSHans Petter Selasky 		((struct mthca_next_seg *) wqe)->ee_nds = 0;
510*33ec1ccbSHans Petter Selasky 		/* flags field will always remain 0 */
511*33ec1ccbSHans Petter Selasky 
512*33ec1ccbSHans Petter Selasky 		wqe += sizeof (struct mthca_next_seg);
513*33ec1ccbSHans Petter Selasky 
514*33ec1ccbSHans Petter Selasky 		if (unlikely(wr->num_sge > srq->max_gs)) {
515*33ec1ccbSHans Petter Selasky 			err = -EINVAL;
516*33ec1ccbSHans Petter Selasky 			*bad_wr = wr;
517*33ec1ccbSHans Petter Selasky 			srq->last = prev_wqe;
518*33ec1ccbSHans Petter Selasky 			break;
519*33ec1ccbSHans Petter Selasky 		}
520*33ec1ccbSHans Petter Selasky 
521*33ec1ccbSHans Petter Selasky 		for (i = 0; i < wr->num_sge; ++i) {
522*33ec1ccbSHans Petter Selasky 			mthca_set_data_seg(wqe, wr->sg_list + i);
523*33ec1ccbSHans Petter Selasky 			wqe += sizeof (struct mthca_data_seg);
524*33ec1ccbSHans Petter Selasky 		}
525*33ec1ccbSHans Petter Selasky 
526*33ec1ccbSHans Petter Selasky 		if (i < srq->max_gs)
527*33ec1ccbSHans Petter Selasky 			mthca_set_data_seg_inval(wqe);
528*33ec1ccbSHans Petter Selasky 
529*33ec1ccbSHans Petter Selasky 		((struct mthca_next_seg *) prev_wqe)->ee_nds =
530*33ec1ccbSHans Petter Selasky 			cpu_to_be32(MTHCA_NEXT_DBD);
531*33ec1ccbSHans Petter Selasky 
532*33ec1ccbSHans Petter Selasky 		srq->wrid[ind]  = wr->wr_id;
533*33ec1ccbSHans Petter Selasky 		srq->first_free = next_ind;
534*33ec1ccbSHans Petter Selasky 
535*33ec1ccbSHans Petter Selasky 		++nreq;
536*33ec1ccbSHans Petter Selasky 		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
537*33ec1ccbSHans Petter Selasky 			nreq = 0;
538*33ec1ccbSHans Petter Selasky 
539*33ec1ccbSHans Petter Selasky 			/*
540*33ec1ccbSHans Petter Selasky 			 * Make sure that descriptors are written
541*33ec1ccbSHans Petter Selasky 			 * before doorbell is rung.
542*33ec1ccbSHans Petter Selasky 			 */
543*33ec1ccbSHans Petter Selasky 			wmb();
544*33ec1ccbSHans Petter Selasky 
545*33ec1ccbSHans Petter Selasky 			mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
546*33ec1ccbSHans Petter Selasky 				      dev->kar + MTHCA_RECEIVE_DOORBELL,
547*33ec1ccbSHans Petter Selasky 				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
548*33ec1ccbSHans Petter Selasky 
549*33ec1ccbSHans Petter Selasky 			first_ind = srq->first_free;
550*33ec1ccbSHans Petter Selasky 		}
551*33ec1ccbSHans Petter Selasky 	}
552*33ec1ccbSHans Petter Selasky 
553*33ec1ccbSHans Petter Selasky 	if (likely(nreq)) {
554*33ec1ccbSHans Petter Selasky 		/*
555*33ec1ccbSHans Petter Selasky 		 * Make sure that descriptors are written before
556*33ec1ccbSHans Petter Selasky 		 * doorbell is rung.
557*33ec1ccbSHans Petter Selasky 		 */
558*33ec1ccbSHans Petter Selasky 		wmb();
559*33ec1ccbSHans Petter Selasky 
560*33ec1ccbSHans Petter Selasky 		mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
561*33ec1ccbSHans Petter Selasky 			      dev->kar + MTHCA_RECEIVE_DOORBELL,
562*33ec1ccbSHans Petter Selasky 			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
563*33ec1ccbSHans Petter Selasky 	}
564*33ec1ccbSHans Petter Selasky 
565*33ec1ccbSHans Petter Selasky 	/*
566*33ec1ccbSHans Petter Selasky 	 * Make sure doorbells don't leak out of SRQ spinlock and
567*33ec1ccbSHans Petter Selasky 	 * reach the HCA out of order:
568*33ec1ccbSHans Petter Selasky 	 */
569*33ec1ccbSHans Petter Selasky 	mmiowb();
570*33ec1ccbSHans Petter Selasky 
571*33ec1ccbSHans Petter Selasky 	spin_unlock_irqrestore(&srq->lock, flags);
572*33ec1ccbSHans Petter Selasky 	return err;
573*33ec1ccbSHans Petter Selasky }
574*33ec1ccbSHans Petter Selasky 
575*33ec1ccbSHans Petter Selasky int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
576*33ec1ccbSHans Petter Selasky 			      struct ib_recv_wr **bad_wr)
577*33ec1ccbSHans Petter Selasky {
578*33ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = to_mdev(ibsrq->device);
579*33ec1ccbSHans Petter Selasky 	struct mthca_srq *srq = to_msrq(ibsrq);
580*33ec1ccbSHans Petter Selasky 	unsigned long flags;
581*33ec1ccbSHans Petter Selasky 	int err = 0;
582*33ec1ccbSHans Petter Selasky 	int ind;
583*33ec1ccbSHans Petter Selasky 	int next_ind;
584*33ec1ccbSHans Petter Selasky 	int nreq;
585*33ec1ccbSHans Petter Selasky 	int i;
586*33ec1ccbSHans Petter Selasky 	void *wqe;
587*33ec1ccbSHans Petter Selasky 
588*33ec1ccbSHans Petter Selasky 	spin_lock_irqsave(&srq->lock, flags);
589*33ec1ccbSHans Petter Selasky 
590*33ec1ccbSHans Petter Selasky 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
591*33ec1ccbSHans Petter Selasky 		ind       = srq->first_free;
592*33ec1ccbSHans Petter Selasky 		wqe       = get_wqe(srq, ind);
593*33ec1ccbSHans Petter Selasky 		next_ind  = *wqe_to_link(wqe);
594*33ec1ccbSHans Petter Selasky 
595*33ec1ccbSHans Petter Selasky 		if (unlikely(next_ind < 0)) {
596*33ec1ccbSHans Petter Selasky 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
597*33ec1ccbSHans Petter Selasky 			err = -ENOMEM;
598*33ec1ccbSHans Petter Selasky 			*bad_wr = wr;
599*33ec1ccbSHans Petter Selasky 			break;
600*33ec1ccbSHans Petter Selasky 		}
601*33ec1ccbSHans Petter Selasky 
602*33ec1ccbSHans Petter Selasky 		((struct mthca_next_seg *) wqe)->ee_nds = 0;
603*33ec1ccbSHans Petter Selasky 		/* flags field will always remain 0 */
604*33ec1ccbSHans Petter Selasky 
605*33ec1ccbSHans Petter Selasky 		wqe += sizeof (struct mthca_next_seg);
606*33ec1ccbSHans Petter Selasky 
607*33ec1ccbSHans Petter Selasky 		if (unlikely(wr->num_sge > srq->max_gs)) {
608*33ec1ccbSHans Petter Selasky 			err = -EINVAL;
609*33ec1ccbSHans Petter Selasky 			*bad_wr = wr;
610*33ec1ccbSHans Petter Selasky 			break;
611*33ec1ccbSHans Petter Selasky 		}
612*33ec1ccbSHans Petter Selasky 
613*33ec1ccbSHans Petter Selasky 		for (i = 0; i < wr->num_sge; ++i) {
614*33ec1ccbSHans Petter Selasky 			mthca_set_data_seg(wqe, wr->sg_list + i);
615*33ec1ccbSHans Petter Selasky 			wqe += sizeof (struct mthca_data_seg);
616*33ec1ccbSHans Petter Selasky 		}
617*33ec1ccbSHans Petter Selasky 
618*33ec1ccbSHans Petter Selasky 		if (i < srq->max_gs)
619*33ec1ccbSHans Petter Selasky 			mthca_set_data_seg_inval(wqe);
620*33ec1ccbSHans Petter Selasky 
621*33ec1ccbSHans Petter Selasky 		srq->wrid[ind]  = wr->wr_id;
622*33ec1ccbSHans Petter Selasky 		srq->first_free = next_ind;
623*33ec1ccbSHans Petter Selasky 	}
624*33ec1ccbSHans Petter Selasky 
625*33ec1ccbSHans Petter Selasky 	if (likely(nreq)) {
626*33ec1ccbSHans Petter Selasky 		srq->counter += nreq;
627*33ec1ccbSHans Petter Selasky 
628*33ec1ccbSHans Petter Selasky 		/*
629*33ec1ccbSHans Petter Selasky 		 * Make sure that descriptors are written before
630*33ec1ccbSHans Petter Selasky 		 * we write doorbell record.
631*33ec1ccbSHans Petter Selasky 		 */
632*33ec1ccbSHans Petter Selasky 		wmb();
633*33ec1ccbSHans Petter Selasky 		*srq->db = cpu_to_be32(srq->counter);
634*33ec1ccbSHans Petter Selasky 	}
635*33ec1ccbSHans Petter Selasky 
636*33ec1ccbSHans Petter Selasky 	spin_unlock_irqrestore(&srq->lock, flags);
637*33ec1ccbSHans Petter Selasky 	return err;
638*33ec1ccbSHans Petter Selasky }
639*33ec1ccbSHans Petter Selasky 
640*33ec1ccbSHans Petter Selasky int mthca_max_srq_sge(struct mthca_dev *dev)
641*33ec1ccbSHans Petter Selasky {
642*33ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
643*33ec1ccbSHans Petter Selasky 		return dev->limits.max_sg;
644*33ec1ccbSHans Petter Selasky 
645*33ec1ccbSHans Petter Selasky 	/*
646*33ec1ccbSHans Petter Selasky 	 * SRQ allocations are based on powers of 2 for Tavor,
647*33ec1ccbSHans Petter Selasky 	 * (although they only need to be multiples of 16 bytes).
648*33ec1ccbSHans Petter Selasky 	 *
649*33ec1ccbSHans Petter Selasky 	 * Therefore, we need to base the max number of sg entries on
650*33ec1ccbSHans Petter Selasky 	 * the largest power of 2 descriptor size that is <= to the
651*33ec1ccbSHans Petter Selasky 	 * actual max WQE descriptor size, rather than return the
652*33ec1ccbSHans Petter Selasky 	 * max_sg value given by the firmware (which is based on WQE
653*33ec1ccbSHans Petter Selasky 	 * sizes as multiples of 16, not powers of 2).
654*33ec1ccbSHans Petter Selasky 	 *
655*33ec1ccbSHans Petter Selasky 	 * If SRQ implementation is changed for Tavor to be based on
656*33ec1ccbSHans Petter Selasky 	 * multiples of 16, the calculation below can be deleted and
657*33ec1ccbSHans Petter Selasky 	 * the FW max_sg value returned.
658*33ec1ccbSHans Petter Selasky 	 */
659*33ec1ccbSHans Petter Selasky 	return min_t(int, dev->limits.max_sg,
660*33ec1ccbSHans Petter Selasky 		     ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
661*33ec1ccbSHans Petter Selasky 		      sizeof (struct mthca_next_seg)) /
662*33ec1ccbSHans Petter Selasky 		     sizeof (struct mthca_data_seg));
663*33ec1ccbSHans Petter Selasky }
664*33ec1ccbSHans Petter Selasky 
665*33ec1ccbSHans Petter Selasky int mthca_init_srq_table(struct mthca_dev *dev)
666*33ec1ccbSHans Petter Selasky {
667*33ec1ccbSHans Petter Selasky 	int err;
668*33ec1ccbSHans Petter Selasky 
669*33ec1ccbSHans Petter Selasky 	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
670*33ec1ccbSHans Petter Selasky 		return 0;
671*33ec1ccbSHans Petter Selasky 
672*33ec1ccbSHans Petter Selasky 	spin_lock_init(&dev->srq_table.lock);
673*33ec1ccbSHans Petter Selasky 
674*33ec1ccbSHans Petter Selasky 	err = mthca_alloc_init(&dev->srq_table.alloc,
675*33ec1ccbSHans Petter Selasky 			       dev->limits.num_srqs,
676*33ec1ccbSHans Petter Selasky 			       dev->limits.num_srqs - 1,
677*33ec1ccbSHans Petter Selasky 			       dev->limits.reserved_srqs);
678*33ec1ccbSHans Petter Selasky 	if (err)
679*33ec1ccbSHans Petter Selasky 		return err;
680*33ec1ccbSHans Petter Selasky 
681*33ec1ccbSHans Petter Selasky 	err = mthca_array_init(&dev->srq_table.srq,
682*33ec1ccbSHans Petter Selasky 			       dev->limits.num_srqs);
683*33ec1ccbSHans Petter Selasky 	if (err)
684*33ec1ccbSHans Petter Selasky 		mthca_alloc_cleanup(&dev->srq_table.alloc);
685*33ec1ccbSHans Petter Selasky 
686*33ec1ccbSHans Petter Selasky 	return err;
687*33ec1ccbSHans Petter Selasky }
688*33ec1ccbSHans Petter Selasky 
689*33ec1ccbSHans Petter Selasky void mthca_cleanup_srq_table(struct mthca_dev *dev)
690*33ec1ccbSHans Petter Selasky {
691*33ec1ccbSHans Petter Selasky 	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
692*33ec1ccbSHans Petter Selasky 		return;
693*33ec1ccbSHans Petter Selasky 
694*33ec1ccbSHans Petter Selasky 	mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
695*33ec1ccbSHans Petter Selasky 	mthca_alloc_cleanup(&dev->srq_table.alloc);
696*33ec1ccbSHans Petter Selasky }
697