xref: /freebsd/sys/dev/mthca/mthca_srq.c (revision c3987b8ea793c11f61fecb14ef93195a23e3522c)
133ec1ccbSHans Petter Selasky /*
233ec1ccbSHans Petter Selasky  * Copyright (c) 2005 Cisco Systems. All rights reserved.
333ec1ccbSHans Petter Selasky  *
433ec1ccbSHans Petter Selasky  * This software is available to you under a choice of one of two
533ec1ccbSHans Petter Selasky  * licenses.  You may choose to be licensed under the terms of the GNU
633ec1ccbSHans Petter Selasky  * General Public License (GPL) Version 2, available from the file
733ec1ccbSHans Petter Selasky  * COPYING in the main directory of this source tree, or the
833ec1ccbSHans Petter Selasky  * OpenIB.org BSD license below:
933ec1ccbSHans Petter Selasky  *
1033ec1ccbSHans Petter Selasky  *     Redistribution and use in source and binary forms, with or
1133ec1ccbSHans Petter Selasky  *     without modification, are permitted provided that the following
1233ec1ccbSHans Petter Selasky  *     conditions are met:
1333ec1ccbSHans Petter Selasky  *
1433ec1ccbSHans Petter Selasky  *      - Redistributions of source code must retain the above
1533ec1ccbSHans Petter Selasky  *        copyright notice, this list of conditions and the following
1633ec1ccbSHans Petter Selasky  *        disclaimer.
1733ec1ccbSHans Petter Selasky  *
1833ec1ccbSHans Petter Selasky  *      - Redistributions in binary form must reproduce the above
1933ec1ccbSHans Petter Selasky  *        copyright notice, this list of conditions and the following
2033ec1ccbSHans Petter Selasky  *        disclaimer in the documentation and/or other materials
2133ec1ccbSHans Petter Selasky  *        provided with the distribution.
2233ec1ccbSHans Petter Selasky  *
2333ec1ccbSHans Petter Selasky  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2433ec1ccbSHans Petter Selasky  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2533ec1ccbSHans Petter Selasky  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2633ec1ccbSHans Petter Selasky  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2733ec1ccbSHans Petter Selasky  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2833ec1ccbSHans Petter Selasky  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2933ec1ccbSHans Petter Selasky  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3033ec1ccbSHans Petter Selasky  * SOFTWARE.
3133ec1ccbSHans Petter Selasky  */
3233ec1ccbSHans Petter Selasky 
3333ec1ccbSHans Petter Selasky #include <linux/slab.h>
3433ec1ccbSHans Petter Selasky #include <linux/string.h>
3533ec1ccbSHans Petter Selasky #include <linux/sched.h>
3633ec1ccbSHans Petter Selasky 
3733ec1ccbSHans Petter Selasky #include <asm/io.h>
3833ec1ccbSHans Petter Selasky 
3933ec1ccbSHans Petter Selasky #include "mthca_dev.h"
4033ec1ccbSHans Petter Selasky #include "mthca_cmd.h"
4133ec1ccbSHans Petter Selasky #include "mthca_memfree.h"
4233ec1ccbSHans Petter Selasky #include "mthca_wqe.h"
4333ec1ccbSHans Petter Selasky 
4433ec1ccbSHans Petter Selasky enum {
4533ec1ccbSHans Petter Selasky 	MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
4633ec1ccbSHans Petter Selasky };
4733ec1ccbSHans Petter Selasky 
4833ec1ccbSHans Petter Selasky struct mthca_tavor_srq_context {
4933ec1ccbSHans Petter Selasky 	__be64 wqe_base_ds;	/* low 6 bits is descriptor size */
5033ec1ccbSHans Petter Selasky 	__be32 state_pd;
5133ec1ccbSHans Petter Selasky 	__be32 lkey;
5233ec1ccbSHans Petter Selasky 	__be32 uar;
5333ec1ccbSHans Petter Selasky 	__be16 limit_watermark;
5433ec1ccbSHans Petter Selasky 	__be16 wqe_cnt;
5533ec1ccbSHans Petter Selasky 	u32    reserved[2];
5633ec1ccbSHans Petter Selasky };
5733ec1ccbSHans Petter Selasky 
5833ec1ccbSHans Petter Selasky struct mthca_arbel_srq_context {
5933ec1ccbSHans Petter Selasky 	__be32 state_logsize_srqn;
6033ec1ccbSHans Petter Selasky 	__be32 lkey;
6133ec1ccbSHans Petter Selasky 	__be32 db_index;
6233ec1ccbSHans Petter Selasky 	__be32 logstride_usrpage;
6333ec1ccbSHans Petter Selasky 	__be64 wqe_base;
6433ec1ccbSHans Petter Selasky 	__be32 eq_pd;
6533ec1ccbSHans Petter Selasky 	__be16 limit_watermark;
6633ec1ccbSHans Petter Selasky 	__be16 wqe_cnt;
6733ec1ccbSHans Petter Selasky 	u16    reserved1;
6833ec1ccbSHans Petter Selasky 	__be16 wqe_counter;
6933ec1ccbSHans Petter Selasky 	u32    reserved2[3];
7033ec1ccbSHans Petter Selasky };
7133ec1ccbSHans Petter Selasky 
7233ec1ccbSHans Petter Selasky static void *get_wqe(struct mthca_srq *srq, int n)
7333ec1ccbSHans Petter Selasky {
7433ec1ccbSHans Petter Selasky 	if (srq->is_direct)
7533ec1ccbSHans Petter Selasky 		return srq->queue.direct.buf + (n << srq->wqe_shift);
7633ec1ccbSHans Petter Selasky 	else
7733ec1ccbSHans Petter Selasky 		return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
7833ec1ccbSHans Petter Selasky 			((n << srq->wqe_shift) & (PAGE_SIZE - 1));
7933ec1ccbSHans Petter Selasky }
8033ec1ccbSHans Petter Selasky 
8133ec1ccbSHans Petter Selasky /*
8233ec1ccbSHans Petter Selasky  * Return a pointer to the location within a WQE that we're using as a
8333ec1ccbSHans Petter Selasky  * link when the WQE is in the free list.  We use the imm field
8433ec1ccbSHans Petter Selasky  * because in the Tavor case, posting a WQE may overwrite the next
8533ec1ccbSHans Petter Selasky  * segment of the previous WQE, but a receive WQE will never touch the
8633ec1ccbSHans Petter Selasky  * imm field.  This avoids corrupting our free list if the previous
8733ec1ccbSHans Petter Selasky  * WQE has already completed and been put on the free list when we
8833ec1ccbSHans Petter Selasky  * post the next WQE.
8933ec1ccbSHans Petter Selasky  */
9033ec1ccbSHans Petter Selasky static inline int *wqe_to_link(void *wqe)
9133ec1ccbSHans Petter Selasky {
9233ec1ccbSHans Petter Selasky 	return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
9333ec1ccbSHans Petter Selasky }
9433ec1ccbSHans Petter Selasky 
9533ec1ccbSHans Petter Selasky static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
9633ec1ccbSHans Petter Selasky 					 struct mthca_pd *pd,
9733ec1ccbSHans Petter Selasky 					 struct mthca_srq *srq,
9833ec1ccbSHans Petter Selasky 					 struct mthca_tavor_srq_context *context)
9933ec1ccbSHans Petter Selasky {
10033ec1ccbSHans Petter Selasky 	memset(context, 0, sizeof *context);
10133ec1ccbSHans Petter Selasky 
10233ec1ccbSHans Petter Selasky 	context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
10333ec1ccbSHans Petter Selasky 	context->state_pd    = cpu_to_be32(pd->pd_num);
10433ec1ccbSHans Petter Selasky 	context->lkey        = cpu_to_be32(srq->mr.ibmr.lkey);
10533ec1ccbSHans Petter Selasky 
10633ec1ccbSHans Petter Selasky 	if (pd->ibpd.uobject)
10733ec1ccbSHans Petter Selasky 		context->uar =
10833ec1ccbSHans Petter Selasky 			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
10933ec1ccbSHans Petter Selasky 	else
11033ec1ccbSHans Petter Selasky 		context->uar = cpu_to_be32(dev->driver_uar.index);
11133ec1ccbSHans Petter Selasky }
11233ec1ccbSHans Petter Selasky 
11333ec1ccbSHans Petter Selasky static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
11433ec1ccbSHans Petter Selasky 					 struct mthca_pd *pd,
11533ec1ccbSHans Petter Selasky 					 struct mthca_srq *srq,
11633ec1ccbSHans Petter Selasky 					 struct mthca_arbel_srq_context *context)
11733ec1ccbSHans Petter Selasky {
11858aa35d4SWarner Losh 	int logsize;
11933ec1ccbSHans Petter Selasky 
12033ec1ccbSHans Petter Selasky 	memset(context, 0, sizeof *context);
12158aa35d4SWarner Losh 	logsize = ilog2(srq->max);
12233ec1ccbSHans Petter Selasky 	context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
12333ec1ccbSHans Petter Selasky 	context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
12433ec1ccbSHans Petter Selasky 	context->db_index = cpu_to_be32(srq->db_index);
12533ec1ccbSHans Petter Selasky 	context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
12633ec1ccbSHans Petter Selasky 	if (pd->ibpd.uobject)
12733ec1ccbSHans Petter Selasky 		context->logstride_usrpage |=
12833ec1ccbSHans Petter Selasky 			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
12933ec1ccbSHans Petter Selasky 	else
13033ec1ccbSHans Petter Selasky 		context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
13133ec1ccbSHans Petter Selasky 	context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
13233ec1ccbSHans Petter Selasky }
13333ec1ccbSHans Petter Selasky 
13433ec1ccbSHans Petter Selasky static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
13533ec1ccbSHans Petter Selasky {
13633ec1ccbSHans Petter Selasky 	mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
13733ec1ccbSHans Petter Selasky 		       srq->is_direct, &srq->mr);
13833ec1ccbSHans Petter Selasky 	kfree(srq->wrid);
13933ec1ccbSHans Petter Selasky }
14033ec1ccbSHans Petter Selasky 
14133ec1ccbSHans Petter Selasky static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
14233ec1ccbSHans Petter Selasky 			       struct mthca_srq *srq)
14333ec1ccbSHans Petter Selasky {
14433ec1ccbSHans Petter Selasky 	struct mthca_data_seg *scatter;
14533ec1ccbSHans Petter Selasky 	void *wqe;
14633ec1ccbSHans Petter Selasky 	int err;
14733ec1ccbSHans Petter Selasky 	int i;
14833ec1ccbSHans Petter Selasky 
14933ec1ccbSHans Petter Selasky 	if (pd->ibpd.uobject)
15033ec1ccbSHans Petter Selasky 		return 0;
15133ec1ccbSHans Petter Selasky 
15233ec1ccbSHans Petter Selasky 	srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
15333ec1ccbSHans Petter Selasky 	if (!srq->wrid)
15433ec1ccbSHans Petter Selasky 		return -ENOMEM;
15533ec1ccbSHans Petter Selasky 
15633ec1ccbSHans Petter Selasky 	err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
15733ec1ccbSHans Petter Selasky 			      MTHCA_MAX_DIRECT_SRQ_SIZE,
15833ec1ccbSHans Petter Selasky 			      &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
15933ec1ccbSHans Petter Selasky 	if (err) {
16033ec1ccbSHans Petter Selasky 		kfree(srq->wrid);
16133ec1ccbSHans Petter Selasky 		return err;
16233ec1ccbSHans Petter Selasky 	}
16333ec1ccbSHans Petter Selasky 
16433ec1ccbSHans Petter Selasky 	/*
16533ec1ccbSHans Petter Selasky 	 * Now initialize the SRQ buffer so that all of the WQEs are
16633ec1ccbSHans Petter Selasky 	 * linked into the list of free WQEs.  In addition, set the
16733ec1ccbSHans Petter Selasky 	 * scatter list L_Keys to the sentry value of 0x100.
16833ec1ccbSHans Petter Selasky 	 */
16933ec1ccbSHans Petter Selasky 	for (i = 0; i < srq->max; ++i) {
17033ec1ccbSHans Petter Selasky 		struct mthca_next_seg *next;
17133ec1ccbSHans Petter Selasky 
17233ec1ccbSHans Petter Selasky 		next = wqe = get_wqe(srq, i);
17333ec1ccbSHans Petter Selasky 
17433ec1ccbSHans Petter Selasky 		if (i < srq->max - 1) {
17533ec1ccbSHans Petter Selasky 			*wqe_to_link(wqe) = i + 1;
17633ec1ccbSHans Petter Selasky 			next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
17733ec1ccbSHans Petter Selasky 		} else {
17833ec1ccbSHans Petter Selasky 			*wqe_to_link(wqe) = -1;
17933ec1ccbSHans Petter Selasky 			next->nda_op = 0;
18033ec1ccbSHans Petter Selasky 		}
18133ec1ccbSHans Petter Selasky 
18233ec1ccbSHans Petter Selasky 		for (scatter = wqe + sizeof (struct mthca_next_seg);
18333ec1ccbSHans Petter Selasky 		     (void *) scatter < wqe + (1 << srq->wqe_shift);
18433ec1ccbSHans Petter Selasky 		     ++scatter)
18533ec1ccbSHans Petter Selasky 			scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
18633ec1ccbSHans Petter Selasky 	}
18733ec1ccbSHans Petter Selasky 
18833ec1ccbSHans Petter Selasky 	srq->last = get_wqe(srq, srq->max - 1);
18933ec1ccbSHans Petter Selasky 
19033ec1ccbSHans Petter Selasky 	return 0;
19133ec1ccbSHans Petter Selasky }
19233ec1ccbSHans Petter Selasky 
19333ec1ccbSHans Petter Selasky int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
19433ec1ccbSHans Petter Selasky 		    struct ib_srq_attr *attr, struct mthca_srq *srq)
19533ec1ccbSHans Petter Selasky {
19633ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
19733ec1ccbSHans Petter Selasky 	int ds;
19833ec1ccbSHans Petter Selasky 	int err;
19933ec1ccbSHans Petter Selasky 
20033ec1ccbSHans Petter Selasky 	/* Sanity check SRQ size before proceeding */
20133ec1ccbSHans Petter Selasky 	if (attr->max_wr  > dev->limits.max_srq_wqes ||
20233ec1ccbSHans Petter Selasky 	    attr->max_sge > dev->limits.max_srq_sge)
20333ec1ccbSHans Petter Selasky 		return -EINVAL;
20433ec1ccbSHans Petter Selasky 
20533ec1ccbSHans Petter Selasky 	srq->max      = attr->max_wr;
20633ec1ccbSHans Petter Selasky 	srq->max_gs   = attr->max_sge;
20733ec1ccbSHans Petter Selasky 	srq->counter  = 0;
20833ec1ccbSHans Petter Selasky 
20933ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
21033ec1ccbSHans Petter Selasky 		srq->max = roundup_pow_of_two(srq->max + 1);
21133ec1ccbSHans Petter Selasky 	else
21233ec1ccbSHans Petter Selasky 		srq->max = srq->max + 1;
21333ec1ccbSHans Petter Selasky 
21433ec1ccbSHans Petter Selasky 	ds = max(64UL,
21533ec1ccbSHans Petter Selasky 		 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
21633ec1ccbSHans Petter Selasky 				    srq->max_gs * sizeof (struct mthca_data_seg)));
21733ec1ccbSHans Petter Selasky 
21833ec1ccbSHans Petter Selasky 	if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
21933ec1ccbSHans Petter Selasky 		return -EINVAL;
22033ec1ccbSHans Petter Selasky 
22133ec1ccbSHans Petter Selasky 	srq->wqe_shift = ilog2(ds);
22233ec1ccbSHans Petter Selasky 
22333ec1ccbSHans Petter Selasky 	srq->srqn = mthca_alloc(&dev->srq_table.alloc);
22433ec1ccbSHans Petter Selasky 	if (srq->srqn == -1)
22533ec1ccbSHans Petter Selasky 		return -ENOMEM;
22633ec1ccbSHans Petter Selasky 
22733ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
22833ec1ccbSHans Petter Selasky 		err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
22933ec1ccbSHans Petter Selasky 		if (err)
23033ec1ccbSHans Petter Selasky 			goto err_out;
23133ec1ccbSHans Petter Selasky 
23233ec1ccbSHans Petter Selasky 		if (!pd->ibpd.uobject) {
23333ec1ccbSHans Petter Selasky 			srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
23433ec1ccbSHans Petter Selasky 						       srq->srqn, &srq->db);
23533ec1ccbSHans Petter Selasky 			if (srq->db_index < 0) {
23633ec1ccbSHans Petter Selasky 				err = -ENOMEM;
23733ec1ccbSHans Petter Selasky 				goto err_out_icm;
23833ec1ccbSHans Petter Selasky 			}
23933ec1ccbSHans Petter Selasky 		}
24033ec1ccbSHans Petter Selasky 	}
24133ec1ccbSHans Petter Selasky 
24233ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
24333ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox)) {
24433ec1ccbSHans Petter Selasky 		err = PTR_ERR(mailbox);
24533ec1ccbSHans Petter Selasky 		goto err_out_db;
24633ec1ccbSHans Petter Selasky 	}
24733ec1ccbSHans Petter Selasky 
24833ec1ccbSHans Petter Selasky 	err = mthca_alloc_srq_buf(dev, pd, srq);
24933ec1ccbSHans Petter Selasky 	if (err)
25033ec1ccbSHans Petter Selasky 		goto err_out_mailbox;
25133ec1ccbSHans Petter Selasky 
25233ec1ccbSHans Petter Selasky 	spin_lock_init(&srq->lock);
25333ec1ccbSHans Petter Selasky 	srq->refcount = 1;
25433ec1ccbSHans Petter Selasky 	init_waitqueue_head(&srq->wait);
25533ec1ccbSHans Petter Selasky 	mutex_init(&srq->mutex);
25633ec1ccbSHans Petter Selasky 
25733ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
25833ec1ccbSHans Petter Selasky 		mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
25933ec1ccbSHans Petter Selasky 	else
26033ec1ccbSHans Petter Selasky 		mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
26133ec1ccbSHans Petter Selasky 
26233ec1ccbSHans Petter Selasky 	err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
26333ec1ccbSHans Petter Selasky 
26433ec1ccbSHans Petter Selasky 	if (err) {
26533ec1ccbSHans Petter Selasky 		mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
26633ec1ccbSHans Petter Selasky 		goto err_out_free_buf;
26733ec1ccbSHans Petter Selasky 	}
26833ec1ccbSHans Petter Selasky 
26933ec1ccbSHans Petter Selasky 	spin_lock_irq(&dev->srq_table.lock);
27033ec1ccbSHans Petter Selasky 	if (mthca_array_set(&dev->srq_table.srq,
27133ec1ccbSHans Petter Selasky 			    srq->srqn & (dev->limits.num_srqs - 1),
27233ec1ccbSHans Petter Selasky 			    srq)) {
27333ec1ccbSHans Petter Selasky 		spin_unlock_irq(&dev->srq_table.lock);
27433ec1ccbSHans Petter Selasky 		goto err_out_free_srq;
27533ec1ccbSHans Petter Selasky 	}
27633ec1ccbSHans Petter Selasky 	spin_unlock_irq(&dev->srq_table.lock);
27733ec1ccbSHans Petter Selasky 
27833ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
27933ec1ccbSHans Petter Selasky 
28033ec1ccbSHans Petter Selasky 	srq->first_free = 0;
28133ec1ccbSHans Petter Selasky 	srq->last_free  = srq->max - 1;
28233ec1ccbSHans Petter Selasky 
28333ec1ccbSHans Petter Selasky 	attr->max_wr    = srq->max - 1;
28433ec1ccbSHans Petter Selasky 	attr->max_sge   = srq->max_gs;
28533ec1ccbSHans Petter Selasky 
28633ec1ccbSHans Petter Selasky 	return 0;
28733ec1ccbSHans Petter Selasky 
28833ec1ccbSHans Petter Selasky err_out_free_srq:
28933ec1ccbSHans Petter Selasky 	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
29033ec1ccbSHans Petter Selasky 	if (err)
29133ec1ccbSHans Petter Selasky 		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
29233ec1ccbSHans Petter Selasky 
29333ec1ccbSHans Petter Selasky err_out_free_buf:
29433ec1ccbSHans Petter Selasky 	if (!pd->ibpd.uobject)
29533ec1ccbSHans Petter Selasky 		mthca_free_srq_buf(dev, srq);
29633ec1ccbSHans Petter Selasky 
29733ec1ccbSHans Petter Selasky err_out_mailbox:
29833ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
29933ec1ccbSHans Petter Selasky 
30033ec1ccbSHans Petter Selasky err_out_db:
30133ec1ccbSHans Petter Selasky 	if (!pd->ibpd.uobject && mthca_is_memfree(dev))
30233ec1ccbSHans Petter Selasky 		mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
30333ec1ccbSHans Petter Selasky 
30433ec1ccbSHans Petter Selasky err_out_icm:
30533ec1ccbSHans Petter Selasky 	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
30633ec1ccbSHans Petter Selasky 
30733ec1ccbSHans Petter Selasky err_out:
30833ec1ccbSHans Petter Selasky 	mthca_free(&dev->srq_table.alloc, srq->srqn);
30933ec1ccbSHans Petter Selasky 
31033ec1ccbSHans Petter Selasky 	return err;
31133ec1ccbSHans Petter Selasky }
31233ec1ccbSHans Petter Selasky 
31333ec1ccbSHans Petter Selasky static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
31433ec1ccbSHans Petter Selasky {
31533ec1ccbSHans Petter Selasky 	int c;
31633ec1ccbSHans Petter Selasky 
31733ec1ccbSHans Petter Selasky 	spin_lock_irq(&dev->srq_table.lock);
31833ec1ccbSHans Petter Selasky 	c = srq->refcount;
31933ec1ccbSHans Petter Selasky 	spin_unlock_irq(&dev->srq_table.lock);
32033ec1ccbSHans Petter Selasky 
32133ec1ccbSHans Petter Selasky 	return c;
32233ec1ccbSHans Petter Selasky }
32333ec1ccbSHans Petter Selasky 
32433ec1ccbSHans Petter Selasky void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
32533ec1ccbSHans Petter Selasky {
32633ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
32733ec1ccbSHans Petter Selasky 	int err;
32833ec1ccbSHans Petter Selasky 
32933ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
33033ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox)) {
33133ec1ccbSHans Petter Selasky 		mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
33233ec1ccbSHans Petter Selasky 		return;
33333ec1ccbSHans Petter Selasky 	}
33433ec1ccbSHans Petter Selasky 
33533ec1ccbSHans Petter Selasky 	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
33633ec1ccbSHans Petter Selasky 	if (err)
33733ec1ccbSHans Petter Selasky 		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
33833ec1ccbSHans Petter Selasky 
33933ec1ccbSHans Petter Selasky 	spin_lock_irq(&dev->srq_table.lock);
34033ec1ccbSHans Petter Selasky 	mthca_array_clear(&dev->srq_table.srq,
34133ec1ccbSHans Petter Selasky 			  srq->srqn & (dev->limits.num_srqs - 1));
34233ec1ccbSHans Petter Selasky 	--srq->refcount;
34333ec1ccbSHans Petter Selasky 	spin_unlock_irq(&dev->srq_table.lock);
34433ec1ccbSHans Petter Selasky 
34533ec1ccbSHans Petter Selasky 	wait_event(srq->wait, !get_srq_refcount(dev, srq));
34633ec1ccbSHans Petter Selasky 
34733ec1ccbSHans Petter Selasky 	if (!srq->ibsrq.uobject) {
34833ec1ccbSHans Petter Selasky 		mthca_free_srq_buf(dev, srq);
34933ec1ccbSHans Petter Selasky 		if (mthca_is_memfree(dev))
35033ec1ccbSHans Petter Selasky 			mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
35133ec1ccbSHans Petter Selasky 	}
35233ec1ccbSHans Petter Selasky 
35333ec1ccbSHans Petter Selasky 	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
35433ec1ccbSHans Petter Selasky 	mthca_free(&dev->srq_table.alloc, srq->srqn);
35533ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
35633ec1ccbSHans Petter Selasky }
35733ec1ccbSHans Petter Selasky 
35833ec1ccbSHans Petter Selasky int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
35933ec1ccbSHans Petter Selasky 		     enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
36033ec1ccbSHans Petter Selasky {
36133ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = to_mdev(ibsrq->device);
36233ec1ccbSHans Petter Selasky 	struct mthca_srq *srq = to_msrq(ibsrq);
36333ec1ccbSHans Petter Selasky 	int ret = 0;
36433ec1ccbSHans Petter Selasky 
36533ec1ccbSHans Petter Selasky 	/* We don't support resizing SRQs (yet?) */
36633ec1ccbSHans Petter Selasky 	if (attr_mask & IB_SRQ_MAX_WR)
36733ec1ccbSHans Petter Selasky 		return -EINVAL;
36833ec1ccbSHans Petter Selasky 
36933ec1ccbSHans Petter Selasky 	if (attr_mask & IB_SRQ_LIMIT) {
37033ec1ccbSHans Petter Selasky 		u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
37133ec1ccbSHans Petter Selasky 		if (attr->srq_limit > max_wr)
37233ec1ccbSHans Petter Selasky 			return -EINVAL;
37333ec1ccbSHans Petter Selasky 
37433ec1ccbSHans Petter Selasky 		mutex_lock(&srq->mutex);
37533ec1ccbSHans Petter Selasky 		ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
37633ec1ccbSHans Petter Selasky 		mutex_unlock(&srq->mutex);
37733ec1ccbSHans Petter Selasky 	}
37833ec1ccbSHans Petter Selasky 
37933ec1ccbSHans Petter Selasky 	return ret;
38033ec1ccbSHans Petter Selasky }
38133ec1ccbSHans Petter Selasky 
38233ec1ccbSHans Petter Selasky int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
38333ec1ccbSHans Petter Selasky {
38433ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = to_mdev(ibsrq->device);
38533ec1ccbSHans Petter Selasky 	struct mthca_srq *srq = to_msrq(ibsrq);
38633ec1ccbSHans Petter Selasky 	struct mthca_mailbox *mailbox;
38733ec1ccbSHans Petter Selasky 	struct mthca_arbel_srq_context *arbel_ctx;
38833ec1ccbSHans Petter Selasky 	struct mthca_tavor_srq_context *tavor_ctx;
38933ec1ccbSHans Petter Selasky 	int err;
39033ec1ccbSHans Petter Selasky 
39133ec1ccbSHans Petter Selasky 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
39233ec1ccbSHans Petter Selasky 	if (IS_ERR(mailbox))
39333ec1ccbSHans Petter Selasky 		return PTR_ERR(mailbox);
39433ec1ccbSHans Petter Selasky 
39533ec1ccbSHans Petter Selasky 	err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
39633ec1ccbSHans Petter Selasky 	if (err)
39733ec1ccbSHans Petter Selasky 		goto out;
39833ec1ccbSHans Petter Selasky 
39933ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev)) {
40033ec1ccbSHans Petter Selasky 		arbel_ctx = mailbox->buf;
40133ec1ccbSHans Petter Selasky 		srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
40233ec1ccbSHans Petter Selasky 	} else {
40333ec1ccbSHans Petter Selasky 		tavor_ctx = mailbox->buf;
40433ec1ccbSHans Petter Selasky 		srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
40533ec1ccbSHans Petter Selasky 	}
40633ec1ccbSHans Petter Selasky 
40733ec1ccbSHans Petter Selasky 	srq_attr->max_wr  = srq->max - 1;
40833ec1ccbSHans Petter Selasky 	srq_attr->max_sge = srq->max_gs;
40933ec1ccbSHans Petter Selasky 
41033ec1ccbSHans Petter Selasky out:
41133ec1ccbSHans Petter Selasky 	mthca_free_mailbox(dev, mailbox);
41233ec1ccbSHans Petter Selasky 
41333ec1ccbSHans Petter Selasky 	return err;
41433ec1ccbSHans Petter Selasky }
41533ec1ccbSHans Petter Selasky 
41633ec1ccbSHans Petter Selasky void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
41733ec1ccbSHans Petter Selasky 		     enum ib_event_type event_type)
41833ec1ccbSHans Petter Selasky {
41933ec1ccbSHans Petter Selasky 	struct mthca_srq *srq;
42033ec1ccbSHans Petter Selasky 	struct ib_event event;
42133ec1ccbSHans Petter Selasky 
42233ec1ccbSHans Petter Selasky 	spin_lock(&dev->srq_table.lock);
42333ec1ccbSHans Petter Selasky 	srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
42433ec1ccbSHans Petter Selasky 	if (srq)
42533ec1ccbSHans Petter Selasky 		++srq->refcount;
42633ec1ccbSHans Petter Selasky 	spin_unlock(&dev->srq_table.lock);
42733ec1ccbSHans Petter Selasky 
42833ec1ccbSHans Petter Selasky 	if (!srq) {
42933ec1ccbSHans Petter Selasky 		mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
43033ec1ccbSHans Petter Selasky 		return;
43133ec1ccbSHans Petter Selasky 	}
43233ec1ccbSHans Petter Selasky 
43333ec1ccbSHans Petter Selasky 	if (!srq->ibsrq.event_handler)
43433ec1ccbSHans Petter Selasky 		goto out;
43533ec1ccbSHans Petter Selasky 
43633ec1ccbSHans Petter Selasky 	event.device      = &dev->ib_dev;
43733ec1ccbSHans Petter Selasky 	event.event       = event_type;
43833ec1ccbSHans Petter Selasky 	event.element.srq = &srq->ibsrq;
43933ec1ccbSHans Petter Selasky 	srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
44033ec1ccbSHans Petter Selasky 
44133ec1ccbSHans Petter Selasky out:
44233ec1ccbSHans Petter Selasky 	spin_lock(&dev->srq_table.lock);
44333ec1ccbSHans Petter Selasky 	if (!--srq->refcount)
44433ec1ccbSHans Petter Selasky 		wake_up(&srq->wait);
44533ec1ccbSHans Petter Selasky 	spin_unlock(&dev->srq_table.lock);
44633ec1ccbSHans Petter Selasky }
44733ec1ccbSHans Petter Selasky 
44833ec1ccbSHans Petter Selasky /*
44933ec1ccbSHans Petter Selasky  * This function must be called with IRQs disabled.
45033ec1ccbSHans Petter Selasky  */
45133ec1ccbSHans Petter Selasky void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
45233ec1ccbSHans Petter Selasky {
45333ec1ccbSHans Petter Selasky 	int ind;
45433ec1ccbSHans Petter Selasky 	struct mthca_next_seg *last_free;
45533ec1ccbSHans Petter Selasky 
45633ec1ccbSHans Petter Selasky 	ind = wqe_addr >> srq->wqe_shift;
45733ec1ccbSHans Petter Selasky 
45833ec1ccbSHans Petter Selasky 	spin_lock(&srq->lock);
45933ec1ccbSHans Petter Selasky 
46033ec1ccbSHans Petter Selasky 	last_free = get_wqe(srq, srq->last_free);
46133ec1ccbSHans Petter Selasky 	*wqe_to_link(last_free) = ind;
46233ec1ccbSHans Petter Selasky 	last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
46333ec1ccbSHans Petter Selasky 	*wqe_to_link(get_wqe(srq, ind)) = -1;
46433ec1ccbSHans Petter Selasky 	srq->last_free = ind;
46533ec1ccbSHans Petter Selasky 
46633ec1ccbSHans Petter Selasky 	spin_unlock(&srq->lock);
46733ec1ccbSHans Petter Selasky }
46833ec1ccbSHans Petter Selasky 
469*c3987b8eSHans Petter Selasky int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
470*c3987b8eSHans Petter Selasky 			      const struct ib_recv_wr **bad_wr)
47133ec1ccbSHans Petter Selasky {
47233ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = to_mdev(ibsrq->device);
47333ec1ccbSHans Petter Selasky 	struct mthca_srq *srq = to_msrq(ibsrq);
47433ec1ccbSHans Petter Selasky 	unsigned long flags;
47533ec1ccbSHans Petter Selasky 	int err = 0;
47633ec1ccbSHans Petter Selasky 	int first_ind;
47733ec1ccbSHans Petter Selasky 	int ind;
47833ec1ccbSHans Petter Selasky 	int next_ind;
47933ec1ccbSHans Petter Selasky 	int nreq;
48033ec1ccbSHans Petter Selasky 	int i;
48133ec1ccbSHans Petter Selasky 	void *wqe;
48233ec1ccbSHans Petter Selasky 	void *prev_wqe;
48333ec1ccbSHans Petter Selasky 
48433ec1ccbSHans Petter Selasky 	spin_lock_irqsave(&srq->lock, flags);
48533ec1ccbSHans Petter Selasky 
48633ec1ccbSHans Petter Selasky 	first_ind = srq->first_free;
48733ec1ccbSHans Petter Selasky 
48833ec1ccbSHans Petter Selasky 	for (nreq = 0; wr; wr = wr->next) {
48933ec1ccbSHans Petter Selasky 		ind       = srq->first_free;
49033ec1ccbSHans Petter Selasky 		wqe       = get_wqe(srq, ind);
49133ec1ccbSHans Petter Selasky 		next_ind  = *wqe_to_link(wqe);
49233ec1ccbSHans Petter Selasky 
49333ec1ccbSHans Petter Selasky 		if (unlikely(next_ind < 0)) {
49433ec1ccbSHans Petter Selasky 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
49533ec1ccbSHans Petter Selasky 			err = -ENOMEM;
49633ec1ccbSHans Petter Selasky 			*bad_wr = wr;
49733ec1ccbSHans Petter Selasky 			break;
49833ec1ccbSHans Petter Selasky 		}
49933ec1ccbSHans Petter Selasky 
50033ec1ccbSHans Petter Selasky 		prev_wqe  = srq->last;
50133ec1ccbSHans Petter Selasky 		srq->last = wqe;
50233ec1ccbSHans Petter Selasky 
50333ec1ccbSHans Petter Selasky 		((struct mthca_next_seg *) wqe)->ee_nds = 0;
50433ec1ccbSHans Petter Selasky 		/* flags field will always remain 0 */
50533ec1ccbSHans Petter Selasky 
50633ec1ccbSHans Petter Selasky 		wqe += sizeof (struct mthca_next_seg);
50733ec1ccbSHans Petter Selasky 
50833ec1ccbSHans Petter Selasky 		if (unlikely(wr->num_sge > srq->max_gs)) {
50933ec1ccbSHans Petter Selasky 			err = -EINVAL;
51033ec1ccbSHans Petter Selasky 			*bad_wr = wr;
51133ec1ccbSHans Petter Selasky 			srq->last = prev_wqe;
51233ec1ccbSHans Petter Selasky 			break;
51333ec1ccbSHans Petter Selasky 		}
51433ec1ccbSHans Petter Selasky 
51533ec1ccbSHans Petter Selasky 		for (i = 0; i < wr->num_sge; ++i) {
51633ec1ccbSHans Petter Selasky 			mthca_set_data_seg(wqe, wr->sg_list + i);
51733ec1ccbSHans Petter Selasky 			wqe += sizeof (struct mthca_data_seg);
51833ec1ccbSHans Petter Selasky 		}
51933ec1ccbSHans Petter Selasky 
52033ec1ccbSHans Petter Selasky 		if (i < srq->max_gs)
52133ec1ccbSHans Petter Selasky 			mthca_set_data_seg_inval(wqe);
52233ec1ccbSHans Petter Selasky 
52333ec1ccbSHans Petter Selasky 		((struct mthca_next_seg *) prev_wqe)->ee_nds =
52433ec1ccbSHans Petter Selasky 			cpu_to_be32(MTHCA_NEXT_DBD);
52533ec1ccbSHans Petter Selasky 
52633ec1ccbSHans Petter Selasky 		srq->wrid[ind]  = wr->wr_id;
52733ec1ccbSHans Petter Selasky 		srq->first_free = next_ind;
52833ec1ccbSHans Petter Selasky 
52933ec1ccbSHans Petter Selasky 		++nreq;
53033ec1ccbSHans Petter Selasky 		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
53133ec1ccbSHans Petter Selasky 			nreq = 0;
53233ec1ccbSHans Petter Selasky 
53333ec1ccbSHans Petter Selasky 			/*
53433ec1ccbSHans Petter Selasky 			 * Make sure that descriptors are written
53533ec1ccbSHans Petter Selasky 			 * before doorbell is rung.
53633ec1ccbSHans Petter Selasky 			 */
53733ec1ccbSHans Petter Selasky 			wmb();
53833ec1ccbSHans Petter Selasky 
53933ec1ccbSHans Petter Selasky 			mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
54033ec1ccbSHans Petter Selasky 				      dev->kar + MTHCA_RECEIVE_DOORBELL,
54133ec1ccbSHans Petter Selasky 				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
54233ec1ccbSHans Petter Selasky 
54333ec1ccbSHans Petter Selasky 			first_ind = srq->first_free;
54433ec1ccbSHans Petter Selasky 		}
54533ec1ccbSHans Petter Selasky 	}
54633ec1ccbSHans Petter Selasky 
54733ec1ccbSHans Petter Selasky 	if (likely(nreq)) {
54833ec1ccbSHans Petter Selasky 		/*
54933ec1ccbSHans Petter Selasky 		 * Make sure that descriptors are written before
55033ec1ccbSHans Petter Selasky 		 * doorbell is rung.
55133ec1ccbSHans Petter Selasky 		 */
55233ec1ccbSHans Petter Selasky 		wmb();
55333ec1ccbSHans Petter Selasky 
55433ec1ccbSHans Petter Selasky 		mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
55533ec1ccbSHans Petter Selasky 			      dev->kar + MTHCA_RECEIVE_DOORBELL,
55633ec1ccbSHans Petter Selasky 			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
55733ec1ccbSHans Petter Selasky 	}
55833ec1ccbSHans Petter Selasky 
55933ec1ccbSHans Petter Selasky 	/*
56033ec1ccbSHans Petter Selasky 	 * Make sure doorbells don't leak out of SRQ spinlock and
56133ec1ccbSHans Petter Selasky 	 * reach the HCA out of order:
56233ec1ccbSHans Petter Selasky 	 */
56333ec1ccbSHans Petter Selasky 	mmiowb();
56433ec1ccbSHans Petter Selasky 
56533ec1ccbSHans Petter Selasky 	spin_unlock_irqrestore(&srq->lock, flags);
56633ec1ccbSHans Petter Selasky 	return err;
56733ec1ccbSHans Petter Selasky }
56833ec1ccbSHans Petter Selasky 
569*c3987b8eSHans Petter Selasky int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
570*c3987b8eSHans Petter Selasky 			      const struct ib_recv_wr **bad_wr)
57133ec1ccbSHans Petter Selasky {
57233ec1ccbSHans Petter Selasky 	struct mthca_dev *dev = to_mdev(ibsrq->device);
57333ec1ccbSHans Petter Selasky 	struct mthca_srq *srq = to_msrq(ibsrq);
57433ec1ccbSHans Petter Selasky 	unsigned long flags;
57533ec1ccbSHans Petter Selasky 	int err = 0;
57633ec1ccbSHans Petter Selasky 	int ind;
57733ec1ccbSHans Petter Selasky 	int next_ind;
57833ec1ccbSHans Petter Selasky 	int nreq;
57933ec1ccbSHans Petter Selasky 	int i;
58033ec1ccbSHans Petter Selasky 	void *wqe;
58133ec1ccbSHans Petter Selasky 
58233ec1ccbSHans Petter Selasky 	spin_lock_irqsave(&srq->lock, flags);
58333ec1ccbSHans Petter Selasky 
58433ec1ccbSHans Petter Selasky 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
58533ec1ccbSHans Petter Selasky 		ind       = srq->first_free;
58633ec1ccbSHans Petter Selasky 		wqe       = get_wqe(srq, ind);
58733ec1ccbSHans Petter Selasky 		next_ind  = *wqe_to_link(wqe);
58833ec1ccbSHans Petter Selasky 
58933ec1ccbSHans Petter Selasky 		if (unlikely(next_ind < 0)) {
59033ec1ccbSHans Petter Selasky 			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
59133ec1ccbSHans Petter Selasky 			err = -ENOMEM;
59233ec1ccbSHans Petter Selasky 			*bad_wr = wr;
59333ec1ccbSHans Petter Selasky 			break;
59433ec1ccbSHans Petter Selasky 		}
59533ec1ccbSHans Petter Selasky 
59633ec1ccbSHans Petter Selasky 		((struct mthca_next_seg *) wqe)->ee_nds = 0;
59733ec1ccbSHans Petter Selasky 		/* flags field will always remain 0 */
59833ec1ccbSHans Petter Selasky 
59933ec1ccbSHans Petter Selasky 		wqe += sizeof (struct mthca_next_seg);
60033ec1ccbSHans Petter Selasky 
60133ec1ccbSHans Petter Selasky 		if (unlikely(wr->num_sge > srq->max_gs)) {
60233ec1ccbSHans Petter Selasky 			err = -EINVAL;
60333ec1ccbSHans Petter Selasky 			*bad_wr = wr;
60433ec1ccbSHans Petter Selasky 			break;
60533ec1ccbSHans Petter Selasky 		}
60633ec1ccbSHans Petter Selasky 
60733ec1ccbSHans Petter Selasky 		for (i = 0; i < wr->num_sge; ++i) {
60833ec1ccbSHans Petter Selasky 			mthca_set_data_seg(wqe, wr->sg_list + i);
60933ec1ccbSHans Petter Selasky 			wqe += sizeof (struct mthca_data_seg);
61033ec1ccbSHans Petter Selasky 		}
61133ec1ccbSHans Petter Selasky 
61233ec1ccbSHans Petter Selasky 		if (i < srq->max_gs)
61333ec1ccbSHans Petter Selasky 			mthca_set_data_seg_inval(wqe);
61433ec1ccbSHans Petter Selasky 
61533ec1ccbSHans Petter Selasky 		srq->wrid[ind]  = wr->wr_id;
61633ec1ccbSHans Petter Selasky 		srq->first_free = next_ind;
61733ec1ccbSHans Petter Selasky 	}
61833ec1ccbSHans Petter Selasky 
61933ec1ccbSHans Petter Selasky 	if (likely(nreq)) {
62033ec1ccbSHans Petter Selasky 		srq->counter += nreq;
62133ec1ccbSHans Petter Selasky 
62233ec1ccbSHans Petter Selasky 		/*
62333ec1ccbSHans Petter Selasky 		 * Make sure that descriptors are written before
62433ec1ccbSHans Petter Selasky 		 * we write doorbell record.
62533ec1ccbSHans Petter Selasky 		 */
62633ec1ccbSHans Petter Selasky 		wmb();
62733ec1ccbSHans Petter Selasky 		*srq->db = cpu_to_be32(srq->counter);
62833ec1ccbSHans Petter Selasky 	}
62933ec1ccbSHans Petter Selasky 
63033ec1ccbSHans Petter Selasky 	spin_unlock_irqrestore(&srq->lock, flags);
63133ec1ccbSHans Petter Selasky 	return err;
63233ec1ccbSHans Petter Selasky }
63333ec1ccbSHans Petter Selasky 
63433ec1ccbSHans Petter Selasky int mthca_max_srq_sge(struct mthca_dev *dev)
63533ec1ccbSHans Petter Selasky {
63633ec1ccbSHans Petter Selasky 	if (mthca_is_memfree(dev))
63733ec1ccbSHans Petter Selasky 		return dev->limits.max_sg;
63833ec1ccbSHans Petter Selasky 
63933ec1ccbSHans Petter Selasky 	/*
64033ec1ccbSHans Petter Selasky 	 * SRQ allocations are based on powers of 2 for Tavor,
64133ec1ccbSHans Petter Selasky 	 * (although they only need to be multiples of 16 bytes).
64233ec1ccbSHans Petter Selasky 	 *
64333ec1ccbSHans Petter Selasky 	 * Therefore, we need to base the max number of sg entries on
64433ec1ccbSHans Petter Selasky 	 * the largest power of 2 descriptor size that is <= to the
64533ec1ccbSHans Petter Selasky 	 * actual max WQE descriptor size, rather than return the
64633ec1ccbSHans Petter Selasky 	 * max_sg value given by the firmware (which is based on WQE
64733ec1ccbSHans Petter Selasky 	 * sizes as multiples of 16, not powers of 2).
64833ec1ccbSHans Petter Selasky 	 *
64933ec1ccbSHans Petter Selasky 	 * If SRQ implementation is changed for Tavor to be based on
65033ec1ccbSHans Petter Selasky 	 * multiples of 16, the calculation below can be deleted and
65133ec1ccbSHans Petter Selasky 	 * the FW max_sg value returned.
65233ec1ccbSHans Petter Selasky 	 */
65333ec1ccbSHans Petter Selasky 	return min_t(int, dev->limits.max_sg,
65433ec1ccbSHans Petter Selasky 		     ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
65533ec1ccbSHans Petter Selasky 		      sizeof (struct mthca_next_seg)) /
65633ec1ccbSHans Petter Selasky 		     sizeof (struct mthca_data_seg));
65733ec1ccbSHans Petter Selasky }
65833ec1ccbSHans Petter Selasky 
65933ec1ccbSHans Petter Selasky int mthca_init_srq_table(struct mthca_dev *dev)
66033ec1ccbSHans Petter Selasky {
66133ec1ccbSHans Petter Selasky 	int err;
66233ec1ccbSHans Petter Selasky 
66333ec1ccbSHans Petter Selasky 	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
66433ec1ccbSHans Petter Selasky 		return 0;
66533ec1ccbSHans Petter Selasky 
66633ec1ccbSHans Petter Selasky 	spin_lock_init(&dev->srq_table.lock);
66733ec1ccbSHans Petter Selasky 
66833ec1ccbSHans Petter Selasky 	err = mthca_alloc_init(&dev->srq_table.alloc,
66933ec1ccbSHans Petter Selasky 			       dev->limits.num_srqs,
67033ec1ccbSHans Petter Selasky 			       dev->limits.num_srqs - 1,
67133ec1ccbSHans Petter Selasky 			       dev->limits.reserved_srqs);
67233ec1ccbSHans Petter Selasky 	if (err)
67333ec1ccbSHans Petter Selasky 		return err;
67433ec1ccbSHans Petter Selasky 
67533ec1ccbSHans Petter Selasky 	err = mthca_array_init(&dev->srq_table.srq,
67633ec1ccbSHans Petter Selasky 			       dev->limits.num_srqs);
67733ec1ccbSHans Petter Selasky 	if (err)
67833ec1ccbSHans Petter Selasky 		mthca_alloc_cleanup(&dev->srq_table.alloc);
67933ec1ccbSHans Petter Selasky 
68033ec1ccbSHans Petter Selasky 	return err;
68133ec1ccbSHans Petter Selasky }
68233ec1ccbSHans Petter Selasky 
68333ec1ccbSHans Petter Selasky void mthca_cleanup_srq_table(struct mthca_dev *dev)
68433ec1ccbSHans Petter Selasky {
68533ec1ccbSHans Petter Selasky 	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
68633ec1ccbSHans Petter Selasky 		return;
68733ec1ccbSHans Petter Selasky 
68833ec1ccbSHans Petter Selasky 	mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
68933ec1ccbSHans Petter Selasky 	mthca_alloc_cleanup(&dev->srq_table.alloc);
69033ec1ccbSHans Petter Selasky }
691