133ec1ccbSHans Petter Selasky /*
233ec1ccbSHans Petter Selasky * Copyright (c) 2005 Cisco Systems. All rights reserved.
333ec1ccbSHans Petter Selasky *
433ec1ccbSHans Petter Selasky * This software is available to you under a choice of one of two
533ec1ccbSHans Petter Selasky * licenses. You may choose to be licensed under the terms of the GNU
633ec1ccbSHans Petter Selasky * General Public License (GPL) Version 2, available from the file
733ec1ccbSHans Petter Selasky * COPYING in the main directory of this source tree, or the
833ec1ccbSHans Petter Selasky * OpenIB.org BSD license below:
933ec1ccbSHans Petter Selasky *
1033ec1ccbSHans Petter Selasky * Redistribution and use in source and binary forms, with or
1133ec1ccbSHans Petter Selasky * without modification, are permitted provided that the following
1233ec1ccbSHans Petter Selasky * conditions are met:
1333ec1ccbSHans Petter Selasky *
1433ec1ccbSHans Petter Selasky * - Redistributions of source code must retain the above
1533ec1ccbSHans Petter Selasky * copyright notice, this list of conditions and the following
1633ec1ccbSHans Petter Selasky * disclaimer.
1733ec1ccbSHans Petter Selasky *
1833ec1ccbSHans Petter Selasky * - Redistributions in binary form must reproduce the above
1933ec1ccbSHans Petter Selasky * copyright notice, this list of conditions and the following
2033ec1ccbSHans Petter Selasky * disclaimer in the documentation and/or other materials
2133ec1ccbSHans Petter Selasky * provided with the distribution.
2233ec1ccbSHans Petter Selasky *
2333ec1ccbSHans Petter Selasky * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2433ec1ccbSHans Petter Selasky * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2533ec1ccbSHans Petter Selasky * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2633ec1ccbSHans Petter Selasky * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2733ec1ccbSHans Petter Selasky * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2833ec1ccbSHans Petter Selasky * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2933ec1ccbSHans Petter Selasky * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3033ec1ccbSHans Petter Selasky * SOFTWARE.
3133ec1ccbSHans Petter Selasky */
3233ec1ccbSHans Petter Selasky
3333ec1ccbSHans Petter Selasky #include <linux/slab.h>
3433ec1ccbSHans Petter Selasky #include <linux/string.h>
3533ec1ccbSHans Petter Selasky #include <linux/sched.h>
3633ec1ccbSHans Petter Selasky
3733ec1ccbSHans Petter Selasky #include <asm/io.h>
3833ec1ccbSHans Petter Selasky
39*b633e08cSHans Petter Selasky #include <rdma/uverbs_ioctl.h>
40*b633e08cSHans Petter Selasky
4133ec1ccbSHans Petter Selasky #include "mthca_dev.h"
4233ec1ccbSHans Petter Selasky #include "mthca_cmd.h"
4333ec1ccbSHans Petter Selasky #include "mthca_memfree.h"
4433ec1ccbSHans Petter Selasky #include "mthca_wqe.h"
4533ec1ccbSHans Petter Selasky
4633ec1ccbSHans Petter Selasky enum {
4733ec1ccbSHans Petter Selasky MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
4833ec1ccbSHans Petter Selasky };
4933ec1ccbSHans Petter Selasky
5033ec1ccbSHans Petter Selasky struct mthca_tavor_srq_context {
5133ec1ccbSHans Petter Selasky __be64 wqe_base_ds; /* low 6 bits is descriptor size */
5233ec1ccbSHans Petter Selasky __be32 state_pd;
5333ec1ccbSHans Petter Selasky __be32 lkey;
5433ec1ccbSHans Petter Selasky __be32 uar;
5533ec1ccbSHans Petter Selasky __be16 limit_watermark;
5633ec1ccbSHans Petter Selasky __be16 wqe_cnt;
5733ec1ccbSHans Petter Selasky u32 reserved[2];
5833ec1ccbSHans Petter Selasky };
5933ec1ccbSHans Petter Selasky
6033ec1ccbSHans Petter Selasky struct mthca_arbel_srq_context {
6133ec1ccbSHans Petter Selasky __be32 state_logsize_srqn;
6233ec1ccbSHans Petter Selasky __be32 lkey;
6333ec1ccbSHans Petter Selasky __be32 db_index;
6433ec1ccbSHans Petter Selasky __be32 logstride_usrpage;
6533ec1ccbSHans Petter Selasky __be64 wqe_base;
6633ec1ccbSHans Petter Selasky __be32 eq_pd;
6733ec1ccbSHans Petter Selasky __be16 limit_watermark;
6833ec1ccbSHans Petter Selasky __be16 wqe_cnt;
6933ec1ccbSHans Petter Selasky u16 reserved1;
7033ec1ccbSHans Petter Selasky __be16 wqe_counter;
7133ec1ccbSHans Petter Selasky u32 reserved2[3];
7233ec1ccbSHans Petter Selasky };
7333ec1ccbSHans Petter Selasky
get_wqe(struct mthca_srq * srq,int n)7433ec1ccbSHans Petter Selasky static void *get_wqe(struct mthca_srq *srq, int n)
7533ec1ccbSHans Petter Selasky {
7633ec1ccbSHans Petter Selasky if (srq->is_direct)
7733ec1ccbSHans Petter Selasky return srq->queue.direct.buf + (n << srq->wqe_shift);
7833ec1ccbSHans Petter Selasky else
7933ec1ccbSHans Petter Selasky return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
8033ec1ccbSHans Petter Selasky ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
8133ec1ccbSHans Petter Selasky }
8233ec1ccbSHans Petter Selasky
8333ec1ccbSHans Petter Selasky /*
8433ec1ccbSHans Petter Selasky * Return a pointer to the location within a WQE that we're using as a
8533ec1ccbSHans Petter Selasky * link when the WQE is in the free list. We use the imm field
8633ec1ccbSHans Petter Selasky * because in the Tavor case, posting a WQE may overwrite the next
8733ec1ccbSHans Petter Selasky * segment of the previous WQE, but a receive WQE will never touch the
8833ec1ccbSHans Petter Selasky * imm field. This avoids corrupting our free list if the previous
8933ec1ccbSHans Petter Selasky * WQE has already completed and been put on the free list when we
9033ec1ccbSHans Petter Selasky * post the next WQE.
9133ec1ccbSHans Petter Selasky */
wqe_to_link(void * wqe)9233ec1ccbSHans Petter Selasky static inline int *wqe_to_link(void *wqe)
9333ec1ccbSHans Petter Selasky {
9433ec1ccbSHans Petter Selasky return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
9533ec1ccbSHans Petter Selasky }
9633ec1ccbSHans Petter Selasky
mthca_tavor_init_srq_context(struct mthca_dev * dev,struct mthca_pd * pd,struct mthca_srq * srq,struct mthca_tavor_srq_context * context,struct ib_udata * udata)9733ec1ccbSHans Petter Selasky static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
9833ec1ccbSHans Petter Selasky struct mthca_pd *pd,
9933ec1ccbSHans Petter Selasky struct mthca_srq *srq,
100*b633e08cSHans Petter Selasky struct mthca_tavor_srq_context *context,
101*b633e08cSHans Petter Selasky struct ib_udata *udata)
10233ec1ccbSHans Petter Selasky {
103*b633e08cSHans Petter Selasky struct mthca_ucontext *ucontext = rdma_udata_to_drv_context(
104*b633e08cSHans Petter Selasky udata, struct mthca_ucontext, ibucontext);
105*b633e08cSHans Petter Selasky
10633ec1ccbSHans Petter Selasky memset(context, 0, sizeof *context);
10733ec1ccbSHans Petter Selasky
10833ec1ccbSHans Petter Selasky context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
10933ec1ccbSHans Petter Selasky context->state_pd = cpu_to_be32(pd->pd_num);
11033ec1ccbSHans Petter Selasky context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
11133ec1ccbSHans Petter Selasky
112*b633e08cSHans Petter Selasky if (udata)
113*b633e08cSHans Petter Selasky context->uar = cpu_to_be32(ucontext->uar.index);
11433ec1ccbSHans Petter Selasky else
11533ec1ccbSHans Petter Selasky context->uar = cpu_to_be32(dev->driver_uar.index);
11633ec1ccbSHans Petter Selasky }
11733ec1ccbSHans Petter Selasky
mthca_arbel_init_srq_context(struct mthca_dev * dev,struct mthca_pd * pd,struct mthca_srq * srq,struct mthca_arbel_srq_context * context,struct ib_udata * udata)11833ec1ccbSHans Petter Selasky static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
11933ec1ccbSHans Petter Selasky struct mthca_pd *pd,
12033ec1ccbSHans Petter Selasky struct mthca_srq *srq,
121*b633e08cSHans Petter Selasky struct mthca_arbel_srq_context *context,
122*b633e08cSHans Petter Selasky struct ib_udata *udata)
12333ec1ccbSHans Petter Selasky {
124*b633e08cSHans Petter Selasky struct mthca_ucontext *ucontext = rdma_udata_to_drv_context(
125*b633e08cSHans Petter Selasky udata, struct mthca_ucontext, ibucontext);
12658aa35d4SWarner Losh int logsize;
12733ec1ccbSHans Petter Selasky
12833ec1ccbSHans Petter Selasky memset(context, 0, sizeof *context);
12958aa35d4SWarner Losh logsize = ilog2(srq->max);
13033ec1ccbSHans Petter Selasky context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
13133ec1ccbSHans Petter Selasky context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
13233ec1ccbSHans Petter Selasky context->db_index = cpu_to_be32(srq->db_index);
13333ec1ccbSHans Petter Selasky context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
134*b633e08cSHans Petter Selasky if (udata)
135*b633e08cSHans Petter Selasky context->logstride_usrpage |= cpu_to_be32(ucontext->uar.index);
13633ec1ccbSHans Petter Selasky else
13733ec1ccbSHans Petter Selasky context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
13833ec1ccbSHans Petter Selasky context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
13933ec1ccbSHans Petter Selasky }
14033ec1ccbSHans Petter Selasky
mthca_free_srq_buf(struct mthca_dev * dev,struct mthca_srq * srq)14133ec1ccbSHans Petter Selasky static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
14233ec1ccbSHans Petter Selasky {
14333ec1ccbSHans Petter Selasky mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
14433ec1ccbSHans Petter Selasky srq->is_direct, &srq->mr);
14533ec1ccbSHans Petter Selasky kfree(srq->wrid);
14633ec1ccbSHans Petter Selasky }
14733ec1ccbSHans Petter Selasky
mthca_alloc_srq_buf(struct mthca_dev * dev,struct mthca_pd * pd,struct mthca_srq * srq,struct ib_udata * udata)14833ec1ccbSHans Petter Selasky static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
149*b633e08cSHans Petter Selasky struct mthca_srq *srq, struct ib_udata *udata)
15033ec1ccbSHans Petter Selasky {
15133ec1ccbSHans Petter Selasky struct mthca_data_seg *scatter;
15233ec1ccbSHans Petter Selasky void *wqe;
15333ec1ccbSHans Petter Selasky int err;
15433ec1ccbSHans Petter Selasky int i;
15533ec1ccbSHans Petter Selasky
156*b633e08cSHans Petter Selasky if (udata)
15733ec1ccbSHans Petter Selasky return 0;
15833ec1ccbSHans Petter Selasky
15933ec1ccbSHans Petter Selasky srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
16033ec1ccbSHans Petter Selasky if (!srq->wrid)
16133ec1ccbSHans Petter Selasky return -ENOMEM;
16233ec1ccbSHans Petter Selasky
16333ec1ccbSHans Petter Selasky err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
16433ec1ccbSHans Petter Selasky MTHCA_MAX_DIRECT_SRQ_SIZE,
16533ec1ccbSHans Petter Selasky &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
16633ec1ccbSHans Petter Selasky if (err) {
16733ec1ccbSHans Petter Selasky kfree(srq->wrid);
16833ec1ccbSHans Petter Selasky return err;
16933ec1ccbSHans Petter Selasky }
17033ec1ccbSHans Petter Selasky
17133ec1ccbSHans Petter Selasky /*
17233ec1ccbSHans Petter Selasky * Now initialize the SRQ buffer so that all of the WQEs are
17333ec1ccbSHans Petter Selasky * linked into the list of free WQEs. In addition, set the
17433ec1ccbSHans Petter Selasky * scatter list L_Keys to the sentry value of 0x100.
17533ec1ccbSHans Petter Selasky */
17633ec1ccbSHans Petter Selasky for (i = 0; i < srq->max; ++i) {
17733ec1ccbSHans Petter Selasky struct mthca_next_seg *next;
17833ec1ccbSHans Petter Selasky
17933ec1ccbSHans Petter Selasky next = wqe = get_wqe(srq, i);
18033ec1ccbSHans Petter Selasky
18133ec1ccbSHans Petter Selasky if (i < srq->max - 1) {
18233ec1ccbSHans Petter Selasky *wqe_to_link(wqe) = i + 1;
18333ec1ccbSHans Petter Selasky next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
18433ec1ccbSHans Petter Selasky } else {
18533ec1ccbSHans Petter Selasky *wqe_to_link(wqe) = -1;
18633ec1ccbSHans Petter Selasky next->nda_op = 0;
18733ec1ccbSHans Petter Selasky }
18833ec1ccbSHans Petter Selasky
18933ec1ccbSHans Petter Selasky for (scatter = wqe + sizeof (struct mthca_next_seg);
19033ec1ccbSHans Petter Selasky (void *) scatter < wqe + (1 << srq->wqe_shift);
19133ec1ccbSHans Petter Selasky ++scatter)
19233ec1ccbSHans Petter Selasky scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
19333ec1ccbSHans Petter Selasky }
19433ec1ccbSHans Petter Selasky
19533ec1ccbSHans Petter Selasky srq->last = get_wqe(srq, srq->max - 1);
19633ec1ccbSHans Petter Selasky
19733ec1ccbSHans Petter Selasky return 0;
19833ec1ccbSHans Petter Selasky }
19933ec1ccbSHans Petter Selasky
mthca_alloc_srq(struct mthca_dev * dev,struct mthca_pd * pd,struct ib_srq_attr * attr,struct mthca_srq * srq,struct ib_udata * udata)20033ec1ccbSHans Petter Selasky int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
201*b633e08cSHans Petter Selasky struct ib_srq_attr *attr, struct mthca_srq *srq,
202*b633e08cSHans Petter Selasky struct ib_udata *udata)
20333ec1ccbSHans Petter Selasky {
20433ec1ccbSHans Petter Selasky struct mthca_mailbox *mailbox;
20533ec1ccbSHans Petter Selasky int ds;
20633ec1ccbSHans Petter Selasky int err;
20733ec1ccbSHans Petter Selasky
20833ec1ccbSHans Petter Selasky /* Sanity check SRQ size before proceeding */
20933ec1ccbSHans Petter Selasky if (attr->max_wr > dev->limits.max_srq_wqes ||
21033ec1ccbSHans Petter Selasky attr->max_sge > dev->limits.max_srq_sge)
21133ec1ccbSHans Petter Selasky return -EINVAL;
21233ec1ccbSHans Petter Selasky
21333ec1ccbSHans Petter Selasky srq->max = attr->max_wr;
21433ec1ccbSHans Petter Selasky srq->max_gs = attr->max_sge;
21533ec1ccbSHans Petter Selasky srq->counter = 0;
21633ec1ccbSHans Petter Selasky
21733ec1ccbSHans Petter Selasky if (mthca_is_memfree(dev))
21833ec1ccbSHans Petter Selasky srq->max = roundup_pow_of_two(srq->max + 1);
21933ec1ccbSHans Petter Selasky else
22033ec1ccbSHans Petter Selasky srq->max = srq->max + 1;
22133ec1ccbSHans Petter Selasky
22233ec1ccbSHans Petter Selasky ds = max(64UL,
22333ec1ccbSHans Petter Selasky roundup_pow_of_two(sizeof (struct mthca_next_seg) +
22433ec1ccbSHans Petter Selasky srq->max_gs * sizeof (struct mthca_data_seg)));
22533ec1ccbSHans Petter Selasky
22633ec1ccbSHans Petter Selasky if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
22733ec1ccbSHans Petter Selasky return -EINVAL;
22833ec1ccbSHans Petter Selasky
22933ec1ccbSHans Petter Selasky srq->wqe_shift = ilog2(ds);
23033ec1ccbSHans Petter Selasky
23133ec1ccbSHans Petter Selasky srq->srqn = mthca_alloc(&dev->srq_table.alloc);
23233ec1ccbSHans Petter Selasky if (srq->srqn == -1)
23333ec1ccbSHans Petter Selasky return -ENOMEM;
23433ec1ccbSHans Petter Selasky
23533ec1ccbSHans Petter Selasky if (mthca_is_memfree(dev)) {
23633ec1ccbSHans Petter Selasky err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
23733ec1ccbSHans Petter Selasky if (err)
23833ec1ccbSHans Petter Selasky goto err_out;
23933ec1ccbSHans Petter Selasky
240*b633e08cSHans Petter Selasky if (!udata) {
24133ec1ccbSHans Petter Selasky srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
24233ec1ccbSHans Petter Selasky srq->srqn, &srq->db);
24333ec1ccbSHans Petter Selasky if (srq->db_index < 0) {
24433ec1ccbSHans Petter Selasky err = -ENOMEM;
24533ec1ccbSHans Petter Selasky goto err_out_icm;
24633ec1ccbSHans Petter Selasky }
24733ec1ccbSHans Petter Selasky }
24833ec1ccbSHans Petter Selasky }
24933ec1ccbSHans Petter Selasky
25033ec1ccbSHans Petter Selasky mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
25133ec1ccbSHans Petter Selasky if (IS_ERR(mailbox)) {
25233ec1ccbSHans Petter Selasky err = PTR_ERR(mailbox);
25333ec1ccbSHans Petter Selasky goto err_out_db;
25433ec1ccbSHans Petter Selasky }
25533ec1ccbSHans Petter Selasky
256*b633e08cSHans Petter Selasky err = mthca_alloc_srq_buf(dev, pd, srq, udata);
25733ec1ccbSHans Petter Selasky if (err)
25833ec1ccbSHans Petter Selasky goto err_out_mailbox;
25933ec1ccbSHans Petter Selasky
26033ec1ccbSHans Petter Selasky spin_lock_init(&srq->lock);
26133ec1ccbSHans Petter Selasky srq->refcount = 1;
26233ec1ccbSHans Petter Selasky init_waitqueue_head(&srq->wait);
26333ec1ccbSHans Petter Selasky mutex_init(&srq->mutex);
26433ec1ccbSHans Petter Selasky
26533ec1ccbSHans Petter Selasky if (mthca_is_memfree(dev))
266*b633e08cSHans Petter Selasky mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata);
26733ec1ccbSHans Petter Selasky else
268*b633e08cSHans Petter Selasky mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata);
26933ec1ccbSHans Petter Selasky
27033ec1ccbSHans Petter Selasky err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
27133ec1ccbSHans Petter Selasky
27233ec1ccbSHans Petter Selasky if (err) {
27333ec1ccbSHans Petter Selasky mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
27433ec1ccbSHans Petter Selasky goto err_out_free_buf;
27533ec1ccbSHans Petter Selasky }
27633ec1ccbSHans Petter Selasky
27733ec1ccbSHans Petter Selasky spin_lock_irq(&dev->srq_table.lock);
27833ec1ccbSHans Petter Selasky if (mthca_array_set(&dev->srq_table.srq,
27933ec1ccbSHans Petter Selasky srq->srqn & (dev->limits.num_srqs - 1),
28033ec1ccbSHans Petter Selasky srq)) {
28133ec1ccbSHans Petter Selasky spin_unlock_irq(&dev->srq_table.lock);
28233ec1ccbSHans Petter Selasky goto err_out_free_srq;
28333ec1ccbSHans Petter Selasky }
28433ec1ccbSHans Petter Selasky spin_unlock_irq(&dev->srq_table.lock);
28533ec1ccbSHans Petter Selasky
28633ec1ccbSHans Petter Selasky mthca_free_mailbox(dev, mailbox);
28733ec1ccbSHans Petter Selasky
28833ec1ccbSHans Petter Selasky srq->first_free = 0;
28933ec1ccbSHans Petter Selasky srq->last_free = srq->max - 1;
29033ec1ccbSHans Petter Selasky
29133ec1ccbSHans Petter Selasky attr->max_wr = srq->max - 1;
29233ec1ccbSHans Petter Selasky attr->max_sge = srq->max_gs;
29333ec1ccbSHans Petter Selasky
29433ec1ccbSHans Petter Selasky return 0;
29533ec1ccbSHans Petter Selasky
29633ec1ccbSHans Petter Selasky err_out_free_srq:
29733ec1ccbSHans Petter Selasky err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
29833ec1ccbSHans Petter Selasky if (err)
29933ec1ccbSHans Petter Selasky mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
30033ec1ccbSHans Petter Selasky
30133ec1ccbSHans Petter Selasky err_out_free_buf:
302*b633e08cSHans Petter Selasky if (!udata)
30333ec1ccbSHans Petter Selasky mthca_free_srq_buf(dev, srq);
30433ec1ccbSHans Petter Selasky
30533ec1ccbSHans Petter Selasky err_out_mailbox:
30633ec1ccbSHans Petter Selasky mthca_free_mailbox(dev, mailbox);
30733ec1ccbSHans Petter Selasky
30833ec1ccbSHans Petter Selasky err_out_db:
309*b633e08cSHans Petter Selasky if (!udata && mthca_is_memfree(dev))
31033ec1ccbSHans Petter Selasky mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
31133ec1ccbSHans Petter Selasky
31233ec1ccbSHans Petter Selasky err_out_icm:
31333ec1ccbSHans Petter Selasky mthca_table_put(dev, dev->srq_table.table, srq->srqn);
31433ec1ccbSHans Petter Selasky
31533ec1ccbSHans Petter Selasky err_out:
31633ec1ccbSHans Petter Selasky mthca_free(&dev->srq_table.alloc, srq->srqn);
31733ec1ccbSHans Petter Selasky
31833ec1ccbSHans Petter Selasky return err;
31933ec1ccbSHans Petter Selasky }
32033ec1ccbSHans Petter Selasky
get_srq_refcount(struct mthca_dev * dev,struct mthca_srq * srq)32133ec1ccbSHans Petter Selasky static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
32233ec1ccbSHans Petter Selasky {
32333ec1ccbSHans Petter Selasky int c;
32433ec1ccbSHans Petter Selasky
32533ec1ccbSHans Petter Selasky spin_lock_irq(&dev->srq_table.lock);
32633ec1ccbSHans Petter Selasky c = srq->refcount;
32733ec1ccbSHans Petter Selasky spin_unlock_irq(&dev->srq_table.lock);
32833ec1ccbSHans Petter Selasky
32933ec1ccbSHans Petter Selasky return c;
33033ec1ccbSHans Petter Selasky }
33133ec1ccbSHans Petter Selasky
mthca_free_srq(struct mthca_dev * dev,struct mthca_srq * srq)33233ec1ccbSHans Petter Selasky void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
33333ec1ccbSHans Petter Selasky {
33433ec1ccbSHans Petter Selasky struct mthca_mailbox *mailbox;
33533ec1ccbSHans Petter Selasky int err;
33633ec1ccbSHans Petter Selasky
33733ec1ccbSHans Petter Selasky mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
33833ec1ccbSHans Petter Selasky if (IS_ERR(mailbox)) {
33933ec1ccbSHans Petter Selasky mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
34033ec1ccbSHans Petter Selasky return;
34133ec1ccbSHans Petter Selasky }
34233ec1ccbSHans Petter Selasky
34333ec1ccbSHans Petter Selasky err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
34433ec1ccbSHans Petter Selasky if (err)
34533ec1ccbSHans Petter Selasky mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
34633ec1ccbSHans Petter Selasky
34733ec1ccbSHans Petter Selasky spin_lock_irq(&dev->srq_table.lock);
34833ec1ccbSHans Petter Selasky mthca_array_clear(&dev->srq_table.srq,
34933ec1ccbSHans Petter Selasky srq->srqn & (dev->limits.num_srqs - 1));
35033ec1ccbSHans Petter Selasky --srq->refcount;
35133ec1ccbSHans Petter Selasky spin_unlock_irq(&dev->srq_table.lock);
35233ec1ccbSHans Petter Selasky
35333ec1ccbSHans Petter Selasky wait_event(srq->wait, !get_srq_refcount(dev, srq));
35433ec1ccbSHans Petter Selasky
35533ec1ccbSHans Petter Selasky if (!srq->ibsrq.uobject) {
35633ec1ccbSHans Petter Selasky mthca_free_srq_buf(dev, srq);
35733ec1ccbSHans Petter Selasky if (mthca_is_memfree(dev))
35833ec1ccbSHans Petter Selasky mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
35933ec1ccbSHans Petter Selasky }
36033ec1ccbSHans Petter Selasky
36133ec1ccbSHans Petter Selasky mthca_table_put(dev, dev->srq_table.table, srq->srqn);
36233ec1ccbSHans Petter Selasky mthca_free(&dev->srq_table.alloc, srq->srqn);
36333ec1ccbSHans Petter Selasky mthca_free_mailbox(dev, mailbox);
36433ec1ccbSHans Petter Selasky }
36533ec1ccbSHans Petter Selasky
mthca_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)36633ec1ccbSHans Petter Selasky int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
36733ec1ccbSHans Petter Selasky enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
36833ec1ccbSHans Petter Selasky {
36933ec1ccbSHans Petter Selasky struct mthca_dev *dev = to_mdev(ibsrq->device);
37033ec1ccbSHans Petter Selasky struct mthca_srq *srq = to_msrq(ibsrq);
37133ec1ccbSHans Petter Selasky int ret = 0;
37233ec1ccbSHans Petter Selasky
37333ec1ccbSHans Petter Selasky /* We don't support resizing SRQs (yet?) */
37433ec1ccbSHans Petter Selasky if (attr_mask & IB_SRQ_MAX_WR)
37533ec1ccbSHans Petter Selasky return -EINVAL;
37633ec1ccbSHans Petter Selasky
37733ec1ccbSHans Petter Selasky if (attr_mask & IB_SRQ_LIMIT) {
37833ec1ccbSHans Petter Selasky u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
37933ec1ccbSHans Petter Selasky if (attr->srq_limit > max_wr)
38033ec1ccbSHans Petter Selasky return -EINVAL;
38133ec1ccbSHans Petter Selasky
38233ec1ccbSHans Petter Selasky mutex_lock(&srq->mutex);
38333ec1ccbSHans Petter Selasky ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
38433ec1ccbSHans Petter Selasky mutex_unlock(&srq->mutex);
38533ec1ccbSHans Petter Selasky }
38633ec1ccbSHans Petter Selasky
38733ec1ccbSHans Petter Selasky return ret;
38833ec1ccbSHans Petter Selasky }
38933ec1ccbSHans Petter Selasky
mthca_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * srq_attr)39033ec1ccbSHans Petter Selasky int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
39133ec1ccbSHans Petter Selasky {
39233ec1ccbSHans Petter Selasky struct mthca_dev *dev = to_mdev(ibsrq->device);
39333ec1ccbSHans Petter Selasky struct mthca_srq *srq = to_msrq(ibsrq);
39433ec1ccbSHans Petter Selasky struct mthca_mailbox *mailbox;
39533ec1ccbSHans Petter Selasky struct mthca_arbel_srq_context *arbel_ctx;
39633ec1ccbSHans Petter Selasky struct mthca_tavor_srq_context *tavor_ctx;
39733ec1ccbSHans Petter Selasky int err;
39833ec1ccbSHans Petter Selasky
39933ec1ccbSHans Petter Selasky mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
40033ec1ccbSHans Petter Selasky if (IS_ERR(mailbox))
40133ec1ccbSHans Petter Selasky return PTR_ERR(mailbox);
40233ec1ccbSHans Petter Selasky
40333ec1ccbSHans Petter Selasky err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
40433ec1ccbSHans Petter Selasky if (err)
40533ec1ccbSHans Petter Selasky goto out;
40633ec1ccbSHans Petter Selasky
40733ec1ccbSHans Petter Selasky if (mthca_is_memfree(dev)) {
40833ec1ccbSHans Petter Selasky arbel_ctx = mailbox->buf;
40933ec1ccbSHans Petter Selasky srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
41033ec1ccbSHans Petter Selasky } else {
41133ec1ccbSHans Petter Selasky tavor_ctx = mailbox->buf;
41233ec1ccbSHans Petter Selasky srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
41333ec1ccbSHans Petter Selasky }
41433ec1ccbSHans Petter Selasky
41533ec1ccbSHans Petter Selasky srq_attr->max_wr = srq->max - 1;
41633ec1ccbSHans Petter Selasky srq_attr->max_sge = srq->max_gs;
41733ec1ccbSHans Petter Selasky
41833ec1ccbSHans Petter Selasky out:
41933ec1ccbSHans Petter Selasky mthca_free_mailbox(dev, mailbox);
42033ec1ccbSHans Petter Selasky
42133ec1ccbSHans Petter Selasky return err;
42233ec1ccbSHans Petter Selasky }
42333ec1ccbSHans Petter Selasky
mthca_srq_event(struct mthca_dev * dev,u32 srqn,enum ib_event_type event_type)42433ec1ccbSHans Petter Selasky void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
42533ec1ccbSHans Petter Selasky enum ib_event_type event_type)
42633ec1ccbSHans Petter Selasky {
42733ec1ccbSHans Petter Selasky struct mthca_srq *srq;
42833ec1ccbSHans Petter Selasky struct ib_event event;
42933ec1ccbSHans Petter Selasky
43033ec1ccbSHans Petter Selasky spin_lock(&dev->srq_table.lock);
43133ec1ccbSHans Petter Selasky srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
43233ec1ccbSHans Petter Selasky if (srq)
43333ec1ccbSHans Petter Selasky ++srq->refcount;
43433ec1ccbSHans Petter Selasky spin_unlock(&dev->srq_table.lock);
43533ec1ccbSHans Petter Selasky
43633ec1ccbSHans Petter Selasky if (!srq) {
43733ec1ccbSHans Petter Selasky mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
43833ec1ccbSHans Petter Selasky return;
43933ec1ccbSHans Petter Selasky }
44033ec1ccbSHans Petter Selasky
44133ec1ccbSHans Petter Selasky if (!srq->ibsrq.event_handler)
44233ec1ccbSHans Petter Selasky goto out;
44333ec1ccbSHans Petter Selasky
44433ec1ccbSHans Petter Selasky event.device = &dev->ib_dev;
44533ec1ccbSHans Petter Selasky event.event = event_type;
44633ec1ccbSHans Petter Selasky event.element.srq = &srq->ibsrq;
44733ec1ccbSHans Petter Selasky srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
44833ec1ccbSHans Petter Selasky
44933ec1ccbSHans Petter Selasky out:
45033ec1ccbSHans Petter Selasky spin_lock(&dev->srq_table.lock);
45133ec1ccbSHans Petter Selasky if (!--srq->refcount)
45233ec1ccbSHans Petter Selasky wake_up(&srq->wait);
45333ec1ccbSHans Petter Selasky spin_unlock(&dev->srq_table.lock);
45433ec1ccbSHans Petter Selasky }
45533ec1ccbSHans Petter Selasky
45633ec1ccbSHans Petter Selasky /*
45733ec1ccbSHans Petter Selasky * This function must be called with IRQs disabled.
45833ec1ccbSHans Petter Selasky */
mthca_free_srq_wqe(struct mthca_srq * srq,u32 wqe_addr)45933ec1ccbSHans Petter Selasky void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
46033ec1ccbSHans Petter Selasky {
46133ec1ccbSHans Petter Selasky int ind;
46233ec1ccbSHans Petter Selasky struct mthca_next_seg *last_free;
46333ec1ccbSHans Petter Selasky
46433ec1ccbSHans Petter Selasky ind = wqe_addr >> srq->wqe_shift;
46533ec1ccbSHans Petter Selasky
46633ec1ccbSHans Petter Selasky spin_lock(&srq->lock);
46733ec1ccbSHans Petter Selasky
46833ec1ccbSHans Petter Selasky last_free = get_wqe(srq, srq->last_free);
46933ec1ccbSHans Petter Selasky *wqe_to_link(last_free) = ind;
47033ec1ccbSHans Petter Selasky last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
47133ec1ccbSHans Petter Selasky *wqe_to_link(get_wqe(srq, ind)) = -1;
47233ec1ccbSHans Petter Selasky srq->last_free = ind;
47333ec1ccbSHans Petter Selasky
47433ec1ccbSHans Petter Selasky spin_unlock(&srq->lock);
47533ec1ccbSHans Petter Selasky }
47633ec1ccbSHans Petter Selasky
mthca_tavor_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)477c3987b8eSHans Petter Selasky int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
478c3987b8eSHans Petter Selasky const struct ib_recv_wr **bad_wr)
47933ec1ccbSHans Petter Selasky {
48033ec1ccbSHans Petter Selasky struct mthca_dev *dev = to_mdev(ibsrq->device);
48133ec1ccbSHans Petter Selasky struct mthca_srq *srq = to_msrq(ibsrq);
48233ec1ccbSHans Petter Selasky unsigned long flags;
48333ec1ccbSHans Petter Selasky int err = 0;
48433ec1ccbSHans Petter Selasky int first_ind;
48533ec1ccbSHans Petter Selasky int ind;
48633ec1ccbSHans Petter Selasky int next_ind;
48733ec1ccbSHans Petter Selasky int nreq;
48833ec1ccbSHans Petter Selasky int i;
48933ec1ccbSHans Petter Selasky void *wqe;
49033ec1ccbSHans Petter Selasky void *prev_wqe;
49133ec1ccbSHans Petter Selasky
49233ec1ccbSHans Petter Selasky spin_lock_irqsave(&srq->lock, flags);
49333ec1ccbSHans Petter Selasky
49433ec1ccbSHans Petter Selasky first_ind = srq->first_free;
49533ec1ccbSHans Petter Selasky
49633ec1ccbSHans Petter Selasky for (nreq = 0; wr; wr = wr->next) {
49733ec1ccbSHans Petter Selasky ind = srq->first_free;
49833ec1ccbSHans Petter Selasky wqe = get_wqe(srq, ind);
49933ec1ccbSHans Petter Selasky next_ind = *wqe_to_link(wqe);
50033ec1ccbSHans Petter Selasky
50133ec1ccbSHans Petter Selasky if (unlikely(next_ind < 0)) {
50233ec1ccbSHans Petter Selasky mthca_err(dev, "SRQ %06x full\n", srq->srqn);
50333ec1ccbSHans Petter Selasky err = -ENOMEM;
50433ec1ccbSHans Petter Selasky *bad_wr = wr;
50533ec1ccbSHans Petter Selasky break;
50633ec1ccbSHans Petter Selasky }
50733ec1ccbSHans Petter Selasky
50833ec1ccbSHans Petter Selasky prev_wqe = srq->last;
50933ec1ccbSHans Petter Selasky srq->last = wqe;
51033ec1ccbSHans Petter Selasky
51133ec1ccbSHans Petter Selasky ((struct mthca_next_seg *) wqe)->ee_nds = 0;
51233ec1ccbSHans Petter Selasky /* flags field will always remain 0 */
51333ec1ccbSHans Petter Selasky
51433ec1ccbSHans Petter Selasky wqe += sizeof (struct mthca_next_seg);
51533ec1ccbSHans Petter Selasky
51633ec1ccbSHans Petter Selasky if (unlikely(wr->num_sge > srq->max_gs)) {
51733ec1ccbSHans Petter Selasky err = -EINVAL;
51833ec1ccbSHans Petter Selasky *bad_wr = wr;
51933ec1ccbSHans Petter Selasky srq->last = prev_wqe;
52033ec1ccbSHans Petter Selasky break;
52133ec1ccbSHans Petter Selasky }
52233ec1ccbSHans Petter Selasky
52333ec1ccbSHans Petter Selasky for (i = 0; i < wr->num_sge; ++i) {
52433ec1ccbSHans Petter Selasky mthca_set_data_seg(wqe, wr->sg_list + i);
52533ec1ccbSHans Petter Selasky wqe += sizeof (struct mthca_data_seg);
52633ec1ccbSHans Petter Selasky }
52733ec1ccbSHans Petter Selasky
52833ec1ccbSHans Petter Selasky if (i < srq->max_gs)
52933ec1ccbSHans Petter Selasky mthca_set_data_seg_inval(wqe);
53033ec1ccbSHans Petter Selasky
53133ec1ccbSHans Petter Selasky ((struct mthca_next_seg *) prev_wqe)->ee_nds =
53233ec1ccbSHans Petter Selasky cpu_to_be32(MTHCA_NEXT_DBD);
53333ec1ccbSHans Petter Selasky
53433ec1ccbSHans Petter Selasky srq->wrid[ind] = wr->wr_id;
53533ec1ccbSHans Petter Selasky srq->first_free = next_ind;
53633ec1ccbSHans Petter Selasky
53733ec1ccbSHans Petter Selasky ++nreq;
53833ec1ccbSHans Petter Selasky if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
53933ec1ccbSHans Petter Selasky nreq = 0;
54033ec1ccbSHans Petter Selasky
54133ec1ccbSHans Petter Selasky /*
54233ec1ccbSHans Petter Selasky * Make sure that descriptors are written
54333ec1ccbSHans Petter Selasky * before doorbell is rung.
54433ec1ccbSHans Petter Selasky */
54533ec1ccbSHans Petter Selasky wmb();
54633ec1ccbSHans Petter Selasky
54733ec1ccbSHans Petter Selasky mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
54833ec1ccbSHans Petter Selasky dev->kar + MTHCA_RECEIVE_DOORBELL,
54933ec1ccbSHans Petter Selasky MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
55033ec1ccbSHans Petter Selasky
55133ec1ccbSHans Petter Selasky first_ind = srq->first_free;
55233ec1ccbSHans Petter Selasky }
55333ec1ccbSHans Petter Selasky }
55433ec1ccbSHans Petter Selasky
55533ec1ccbSHans Petter Selasky if (likely(nreq)) {
55633ec1ccbSHans Petter Selasky /*
55733ec1ccbSHans Petter Selasky * Make sure that descriptors are written before
55833ec1ccbSHans Petter Selasky * doorbell is rung.
55933ec1ccbSHans Petter Selasky */
56033ec1ccbSHans Petter Selasky wmb();
56133ec1ccbSHans Petter Selasky
56233ec1ccbSHans Petter Selasky mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
56333ec1ccbSHans Petter Selasky dev->kar + MTHCA_RECEIVE_DOORBELL,
56433ec1ccbSHans Petter Selasky MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
56533ec1ccbSHans Petter Selasky }
56633ec1ccbSHans Petter Selasky
56733ec1ccbSHans Petter Selasky /*
56833ec1ccbSHans Petter Selasky * Make sure doorbells don't leak out of SRQ spinlock and
56933ec1ccbSHans Petter Selasky * reach the HCA out of order:
57033ec1ccbSHans Petter Selasky */
57133ec1ccbSHans Petter Selasky mmiowb();
57233ec1ccbSHans Petter Selasky
57333ec1ccbSHans Petter Selasky spin_unlock_irqrestore(&srq->lock, flags);
57433ec1ccbSHans Petter Selasky return err;
57533ec1ccbSHans Petter Selasky }
57633ec1ccbSHans Petter Selasky
mthca_arbel_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)577c3987b8eSHans Petter Selasky int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
578c3987b8eSHans Petter Selasky const struct ib_recv_wr **bad_wr)
57933ec1ccbSHans Petter Selasky {
58033ec1ccbSHans Petter Selasky struct mthca_dev *dev = to_mdev(ibsrq->device);
58133ec1ccbSHans Petter Selasky struct mthca_srq *srq = to_msrq(ibsrq);
58233ec1ccbSHans Petter Selasky unsigned long flags;
58333ec1ccbSHans Petter Selasky int err = 0;
58433ec1ccbSHans Petter Selasky int ind;
58533ec1ccbSHans Petter Selasky int next_ind;
58633ec1ccbSHans Petter Selasky int nreq;
58733ec1ccbSHans Petter Selasky int i;
58833ec1ccbSHans Petter Selasky void *wqe;
58933ec1ccbSHans Petter Selasky
59033ec1ccbSHans Petter Selasky spin_lock_irqsave(&srq->lock, flags);
59133ec1ccbSHans Petter Selasky
59233ec1ccbSHans Petter Selasky for (nreq = 0; wr; ++nreq, wr = wr->next) {
59333ec1ccbSHans Petter Selasky ind = srq->first_free;
59433ec1ccbSHans Petter Selasky wqe = get_wqe(srq, ind);
59533ec1ccbSHans Petter Selasky next_ind = *wqe_to_link(wqe);
59633ec1ccbSHans Petter Selasky
59733ec1ccbSHans Petter Selasky if (unlikely(next_ind < 0)) {
59833ec1ccbSHans Petter Selasky mthca_err(dev, "SRQ %06x full\n", srq->srqn);
59933ec1ccbSHans Petter Selasky err = -ENOMEM;
60033ec1ccbSHans Petter Selasky *bad_wr = wr;
60133ec1ccbSHans Petter Selasky break;
60233ec1ccbSHans Petter Selasky }
60333ec1ccbSHans Petter Selasky
60433ec1ccbSHans Petter Selasky ((struct mthca_next_seg *) wqe)->ee_nds = 0;
60533ec1ccbSHans Petter Selasky /* flags field will always remain 0 */
60633ec1ccbSHans Petter Selasky
60733ec1ccbSHans Petter Selasky wqe += sizeof (struct mthca_next_seg);
60833ec1ccbSHans Petter Selasky
60933ec1ccbSHans Petter Selasky if (unlikely(wr->num_sge > srq->max_gs)) {
61033ec1ccbSHans Petter Selasky err = -EINVAL;
61133ec1ccbSHans Petter Selasky *bad_wr = wr;
61233ec1ccbSHans Petter Selasky break;
61333ec1ccbSHans Petter Selasky }
61433ec1ccbSHans Petter Selasky
61533ec1ccbSHans Petter Selasky for (i = 0; i < wr->num_sge; ++i) {
61633ec1ccbSHans Petter Selasky mthca_set_data_seg(wqe, wr->sg_list + i);
61733ec1ccbSHans Petter Selasky wqe += sizeof (struct mthca_data_seg);
61833ec1ccbSHans Petter Selasky }
61933ec1ccbSHans Petter Selasky
62033ec1ccbSHans Petter Selasky if (i < srq->max_gs)
62133ec1ccbSHans Petter Selasky mthca_set_data_seg_inval(wqe);
62233ec1ccbSHans Petter Selasky
62333ec1ccbSHans Petter Selasky srq->wrid[ind] = wr->wr_id;
62433ec1ccbSHans Petter Selasky srq->first_free = next_ind;
62533ec1ccbSHans Petter Selasky }
62633ec1ccbSHans Petter Selasky
62733ec1ccbSHans Petter Selasky if (likely(nreq)) {
62833ec1ccbSHans Petter Selasky srq->counter += nreq;
62933ec1ccbSHans Petter Selasky
63033ec1ccbSHans Petter Selasky /*
63133ec1ccbSHans Petter Selasky * Make sure that descriptors are written before
63233ec1ccbSHans Petter Selasky * we write doorbell record.
63333ec1ccbSHans Petter Selasky */
63433ec1ccbSHans Petter Selasky wmb();
63533ec1ccbSHans Petter Selasky *srq->db = cpu_to_be32(srq->counter);
63633ec1ccbSHans Petter Selasky }
63733ec1ccbSHans Petter Selasky
63833ec1ccbSHans Petter Selasky spin_unlock_irqrestore(&srq->lock, flags);
63933ec1ccbSHans Petter Selasky return err;
64033ec1ccbSHans Petter Selasky }
64133ec1ccbSHans Petter Selasky
mthca_max_srq_sge(struct mthca_dev * dev)64233ec1ccbSHans Petter Selasky int mthca_max_srq_sge(struct mthca_dev *dev)
64333ec1ccbSHans Petter Selasky {
64433ec1ccbSHans Petter Selasky if (mthca_is_memfree(dev))
64533ec1ccbSHans Petter Selasky return dev->limits.max_sg;
64633ec1ccbSHans Petter Selasky
64733ec1ccbSHans Petter Selasky /*
64833ec1ccbSHans Petter Selasky * SRQ allocations are based on powers of 2 for Tavor,
64933ec1ccbSHans Petter Selasky * (although they only need to be multiples of 16 bytes).
65033ec1ccbSHans Petter Selasky *
65133ec1ccbSHans Petter Selasky * Therefore, we need to base the max number of sg entries on
65233ec1ccbSHans Petter Selasky * the largest power of 2 descriptor size that is <= to the
65333ec1ccbSHans Petter Selasky * actual max WQE descriptor size, rather than return the
65433ec1ccbSHans Petter Selasky * max_sg value given by the firmware (which is based on WQE
65533ec1ccbSHans Petter Selasky * sizes as multiples of 16, not powers of 2).
65633ec1ccbSHans Petter Selasky *
65733ec1ccbSHans Petter Selasky * If SRQ implementation is changed for Tavor to be based on
65833ec1ccbSHans Petter Selasky * multiples of 16, the calculation below can be deleted and
65933ec1ccbSHans Petter Selasky * the FW max_sg value returned.
66033ec1ccbSHans Petter Selasky */
66133ec1ccbSHans Petter Selasky return min_t(int, dev->limits.max_sg,
66233ec1ccbSHans Petter Selasky ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
66333ec1ccbSHans Petter Selasky sizeof (struct mthca_next_seg)) /
66433ec1ccbSHans Petter Selasky sizeof (struct mthca_data_seg));
66533ec1ccbSHans Petter Selasky }
66633ec1ccbSHans Petter Selasky
mthca_init_srq_table(struct mthca_dev * dev)66733ec1ccbSHans Petter Selasky int mthca_init_srq_table(struct mthca_dev *dev)
66833ec1ccbSHans Petter Selasky {
66933ec1ccbSHans Petter Selasky int err;
67033ec1ccbSHans Petter Selasky
67133ec1ccbSHans Petter Selasky if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
67233ec1ccbSHans Petter Selasky return 0;
67333ec1ccbSHans Petter Selasky
67433ec1ccbSHans Petter Selasky spin_lock_init(&dev->srq_table.lock);
67533ec1ccbSHans Petter Selasky
67633ec1ccbSHans Petter Selasky err = mthca_alloc_init(&dev->srq_table.alloc,
67733ec1ccbSHans Petter Selasky dev->limits.num_srqs,
67833ec1ccbSHans Petter Selasky dev->limits.num_srqs - 1,
67933ec1ccbSHans Petter Selasky dev->limits.reserved_srqs);
68033ec1ccbSHans Petter Selasky if (err)
68133ec1ccbSHans Petter Selasky return err;
68233ec1ccbSHans Petter Selasky
68333ec1ccbSHans Petter Selasky err = mthca_array_init(&dev->srq_table.srq,
68433ec1ccbSHans Petter Selasky dev->limits.num_srqs);
68533ec1ccbSHans Petter Selasky if (err)
68633ec1ccbSHans Petter Selasky mthca_alloc_cleanup(&dev->srq_table.alloc);
68733ec1ccbSHans Petter Selasky
68833ec1ccbSHans Petter Selasky return err;
68933ec1ccbSHans Petter Selasky }
69033ec1ccbSHans Petter Selasky
mthca_cleanup_srq_table(struct mthca_dev * dev)69133ec1ccbSHans Petter Selasky void mthca_cleanup_srq_table(struct mthca_dev *dev)
69233ec1ccbSHans Petter Selasky {
69333ec1ccbSHans Petter Selasky if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
69433ec1ccbSHans Petter Selasky return;
69533ec1ccbSHans Petter Selasky
69633ec1ccbSHans Petter Selasky mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
69733ec1ccbSHans Petter Selasky mthca_alloc_cleanup(&dev->srq_table.alloc);
69833ec1ccbSHans Petter Selasky }
699