1718cf2ccSPedro F. Giffuni /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
3718cf2ccSPedro F. Giffuni *
4fb93f5c4SNavdeep Parhar * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
5fb93f5c4SNavdeep Parhar *
6fb93f5c4SNavdeep Parhar * This software is available to you under a choice of one of two
7fb93f5c4SNavdeep Parhar * licenses. You may choose to be licensed under the terms of the GNU
8fb93f5c4SNavdeep Parhar * General Public License (GPL) Version 2, available from the file
9fb93f5c4SNavdeep Parhar * COPYING in the main directory of this source tree, or the
10fb93f5c4SNavdeep Parhar * OpenIB.org BSD license below:
11fb93f5c4SNavdeep Parhar *
12fb93f5c4SNavdeep Parhar * Redistribution and use in source and binary forms, with or
13fb93f5c4SNavdeep Parhar * without modification, are permitted provided that the following
14fb93f5c4SNavdeep Parhar * conditions are met:
15fb93f5c4SNavdeep Parhar *
16fb93f5c4SNavdeep Parhar * - Redistributions of source code must retain the above
17fb93f5c4SNavdeep Parhar * copyright notice, this list of conditions and the following
18fb93f5c4SNavdeep Parhar * disclaimer.
19fb93f5c4SNavdeep Parhar *
20fb93f5c4SNavdeep Parhar * - Redistributions in binary form must reproduce the above
21fb93f5c4SNavdeep Parhar * copyright notice, this list of conditions and the following
22fb93f5c4SNavdeep Parhar * disclaimer in the documentation and/or other materials
23fb93f5c4SNavdeep Parhar * provided with the distribution.
24fb93f5c4SNavdeep Parhar *
25fb93f5c4SNavdeep Parhar * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26fb93f5c4SNavdeep Parhar * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27fb93f5c4SNavdeep Parhar * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28fb93f5c4SNavdeep Parhar * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29fb93f5c4SNavdeep Parhar * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30fb93f5c4SNavdeep Parhar * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31fb93f5c4SNavdeep Parhar * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32fb93f5c4SNavdeep Parhar * SOFTWARE.
33fb93f5c4SNavdeep Parhar */
34fb93f5c4SNavdeep Parhar #include <sys/cdefs.h>
35fb93f5c4SNavdeep Parhar #include "opt_inet.h"
36fb93f5c4SNavdeep Parhar
37fb93f5c4SNavdeep Parhar #ifdef TCP_OFFLOAD
38fb93f5c4SNavdeep Parhar #include <sys/types.h>
39fb93f5c4SNavdeep Parhar #include <sys/malloc.h>
40fb93f5c4SNavdeep Parhar #include <sys/socket.h>
41fb93f5c4SNavdeep Parhar #include <sys/socketvar.h>
42fb93f5c4SNavdeep Parhar #include <sys/sockio.h>
43fb93f5c4SNavdeep Parhar #include <sys/taskqueue.h>
44fb93f5c4SNavdeep Parhar #include <netinet/in.h>
45fb93f5c4SNavdeep Parhar #include <net/route.h>
46fb93f5c4SNavdeep Parhar
47fb93f5c4SNavdeep Parhar #include <netinet/in_systm.h>
48fb93f5c4SNavdeep Parhar #include <netinet/in_pcb.h>
49fb93f5c4SNavdeep Parhar #include <netinet/ip.h>
50fb93f5c4SNavdeep Parhar #include <netinet/ip_var.h>
51fb93f5c4SNavdeep Parhar #include <netinet/tcp_var.h>
52fb93f5c4SNavdeep Parhar #include <netinet/tcp.h>
53fb93f5c4SNavdeep Parhar #include <netinet/tcpip.h>
54fb93f5c4SNavdeep Parhar
55fb93f5c4SNavdeep Parhar #include <netinet/toecore.h>
56fb93f5c4SNavdeep Parhar
57fb93f5c4SNavdeep Parhar struct sge_iq;
58fb93f5c4SNavdeep Parhar struct rss_header;
59f7bc3934SJohn Baldwin struct cpl_set_tcb_rpl;
60fb93f5c4SNavdeep Parhar #include <linux/types.h>
61fb93f5c4SNavdeep Parhar #include "offload.h"
62fb93f5c4SNavdeep Parhar #include "tom/t4_tom.h"
63fb93f5c4SNavdeep Parhar
64fb93f5c4SNavdeep Parhar #include "iw_cxgbe.h"
65fb93f5c4SNavdeep Parhar #include "user.h"
66121684b7SNavdeep Parhar
6794036cffSNavdeep Parhar static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize);
685c2bacdeSNavdeep Parhar static int max_fr_immd = T4_MAX_FR_IMMD;//SYSCTL parameter later...
69fb93f5c4SNavdeep Parhar
alloc_ird(struct c4iw_dev * dev,u32 ird)705c2bacdeSNavdeep Parhar static int alloc_ird(struct c4iw_dev *dev, u32 ird)
715c2bacdeSNavdeep Parhar {
725c2bacdeSNavdeep Parhar int ret = 0;
735c2bacdeSNavdeep Parhar
745c2bacdeSNavdeep Parhar spin_lock_irq(&dev->lock);
755c2bacdeSNavdeep Parhar if (ird <= dev->avail_ird)
765c2bacdeSNavdeep Parhar dev->avail_ird -= ird;
775c2bacdeSNavdeep Parhar else
785c2bacdeSNavdeep Parhar ret = -ENOMEM;
795c2bacdeSNavdeep Parhar spin_unlock_irq(&dev->lock);
805c2bacdeSNavdeep Parhar
815c2bacdeSNavdeep Parhar if (ret)
825c2bacdeSNavdeep Parhar log(LOG_WARNING, "%s: device IRD resources exhausted\n",
835c2bacdeSNavdeep Parhar device_get_nameunit(dev->rdev.adap->dev));
845c2bacdeSNavdeep Parhar
855c2bacdeSNavdeep Parhar return ret;
865c2bacdeSNavdeep Parhar }
875c2bacdeSNavdeep Parhar
free_ird(struct c4iw_dev * dev,int ird)885c2bacdeSNavdeep Parhar static void free_ird(struct c4iw_dev *dev, int ird)
895c2bacdeSNavdeep Parhar {
905c2bacdeSNavdeep Parhar spin_lock_irq(&dev->lock);
915c2bacdeSNavdeep Parhar dev->avail_ird += ird;
925c2bacdeSNavdeep Parhar spin_unlock_irq(&dev->lock);
935c2bacdeSNavdeep Parhar }
94fb93f5c4SNavdeep Parhar
set_state(struct c4iw_qp * qhp,enum c4iw_qp_state state)95fb93f5c4SNavdeep Parhar static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
96fb93f5c4SNavdeep Parhar {
97fb93f5c4SNavdeep Parhar unsigned long flag;
98fb93f5c4SNavdeep Parhar spin_lock_irqsave(&qhp->lock, flag);
99fb93f5c4SNavdeep Parhar qhp->attr.state = state;
100fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&qhp->lock, flag);
101fb93f5c4SNavdeep Parhar }
102fb93f5c4SNavdeep Parhar
destroy_qp(struct c4iw_rdev * rdev,struct t4_wq * wq,struct c4iw_dev_ucontext * uctx)103fb93f5c4SNavdeep Parhar static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
104fb93f5c4SNavdeep Parhar struct c4iw_dev_ucontext *uctx)
105fb93f5c4SNavdeep Parhar {
1065c2bacdeSNavdeep Parhar struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
107fb93f5c4SNavdeep Parhar /*
108fb93f5c4SNavdeep Parhar * uP clears EQ contexts when the connection exits rdma mode,
109fb93f5c4SNavdeep Parhar * so no need to post a RESET WR for these EQs.
110fb93f5c4SNavdeep Parhar */
1115c2bacdeSNavdeep Parhar dma_free_coherent(rhp->ibdev.dma_device,
1125c2bacdeSNavdeep Parhar wq->rq.memsize, wq->rq.queue,
1135c2bacdeSNavdeep Parhar dma_unmap_addr(&wq->rq, mapping));
1145c2bacdeSNavdeep Parhar dma_free_coherent(rhp->ibdev.dma_device,
1155c2bacdeSNavdeep Parhar wq->sq.memsize, wq->sq.queue,
1165c2bacdeSNavdeep Parhar dma_unmap_addr(&wq->sq, mapping));
117fb93f5c4SNavdeep Parhar c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
118fb93f5c4SNavdeep Parhar kfree(wq->rq.sw_rq);
119fb93f5c4SNavdeep Parhar kfree(wq->sq.sw_sq);
120fb93f5c4SNavdeep Parhar c4iw_put_qpid(rdev, wq->rq.qid, uctx);
121fb93f5c4SNavdeep Parhar c4iw_put_qpid(rdev, wq->sq.qid, uctx);
122fb93f5c4SNavdeep Parhar return 0;
123fb93f5c4SNavdeep Parhar }
124fb93f5c4SNavdeep Parhar
create_qp(struct c4iw_rdev * rdev,struct t4_wq * wq,struct t4_cq * rcq,struct t4_cq * scq,struct c4iw_dev_ucontext * uctx)125fb93f5c4SNavdeep Parhar static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
126fb93f5c4SNavdeep Parhar struct t4_cq *rcq, struct t4_cq *scq,
127fb93f5c4SNavdeep Parhar struct c4iw_dev_ucontext *uctx)
128fb93f5c4SNavdeep Parhar {
129fb93f5c4SNavdeep Parhar struct adapter *sc = rdev->adap;
1305c2bacdeSNavdeep Parhar struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
131fb93f5c4SNavdeep Parhar int user = (uctx != &rdev->uctx);
132fb93f5c4SNavdeep Parhar struct fw_ri_res_wr *res_wr;
133fb93f5c4SNavdeep Parhar struct fw_ri_res *res;
134fb93f5c4SNavdeep Parhar int wr_len;
135fb93f5c4SNavdeep Parhar struct c4iw_wr_wait wr_wait;
1365c2bacdeSNavdeep Parhar int ret = 0;
137fb93f5c4SNavdeep Parhar int eqsize;
138fb93f5c4SNavdeep Parhar struct wrqe *wr;
1395c2bacdeSNavdeep Parhar u64 sq_bar2_qoffset = 0, rq_bar2_qoffset = 0;
140fb93f5c4SNavdeep Parhar
141*9fdb683dSNavdeep Parhar if (__predict_false(c4iw_stopped(rdev)))
142*9fdb683dSNavdeep Parhar return -EIO;
143*9fdb683dSNavdeep Parhar
144fb93f5c4SNavdeep Parhar wq->sq.qid = c4iw_get_qpid(rdev, uctx);
145fb93f5c4SNavdeep Parhar if (!wq->sq.qid)
146fb93f5c4SNavdeep Parhar return -ENOMEM;
147fb93f5c4SNavdeep Parhar
148fb93f5c4SNavdeep Parhar wq->rq.qid = c4iw_get_qpid(rdev, uctx);
1495c2bacdeSNavdeep Parhar if (!wq->rq.qid) {
1505c2bacdeSNavdeep Parhar ret = -ENOMEM;
1515c2bacdeSNavdeep Parhar goto free_sq_qid;
1525c2bacdeSNavdeep Parhar }
153fb93f5c4SNavdeep Parhar
154fb93f5c4SNavdeep Parhar if (!user) {
155fb93f5c4SNavdeep Parhar wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
156fb93f5c4SNavdeep Parhar GFP_KERNEL);
1575c2bacdeSNavdeep Parhar if (!wq->sq.sw_sq) {
1585c2bacdeSNavdeep Parhar ret = -ENOMEM;
1595c2bacdeSNavdeep Parhar goto free_rq_qid;
1605c2bacdeSNavdeep Parhar }
161fb93f5c4SNavdeep Parhar
162fb93f5c4SNavdeep Parhar wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
163fb93f5c4SNavdeep Parhar GFP_KERNEL);
1645c2bacdeSNavdeep Parhar if (!wq->rq.sw_rq) {
1655c2bacdeSNavdeep Parhar ret = -ENOMEM;
1665c2bacdeSNavdeep Parhar goto free_sw_sq;
1675c2bacdeSNavdeep Parhar }
168fb93f5c4SNavdeep Parhar }
169fb93f5c4SNavdeep Parhar
1705c2bacdeSNavdeep Parhar /*
1715c2bacdeSNavdeep Parhar * RQT must be a power of 2 and at least 16 deep.
1725c2bacdeSNavdeep Parhar */
1735c2bacdeSNavdeep Parhar wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
174fb93f5c4SNavdeep Parhar wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
1755c2bacdeSNavdeep Parhar if (!wq->rq.rqt_hwaddr) {
1765c2bacdeSNavdeep Parhar ret = -ENOMEM;
1775c2bacdeSNavdeep Parhar goto free_sw_rq;
178fb93f5c4SNavdeep Parhar }
1795c2bacdeSNavdeep Parhar
1805c2bacdeSNavdeep Parhar /*QP memory, allocate DMAable memory for Send & Receive Queues */
1815c2bacdeSNavdeep Parhar wq->sq.queue = dma_alloc_coherent(rhp->ibdev.dma_device, wq->sq.memsize,
1825c2bacdeSNavdeep Parhar &(wq->sq.dma_addr), GFP_KERNEL);
1835c2bacdeSNavdeep Parhar if (!wq->sq.queue) {
1845c2bacdeSNavdeep Parhar ret = -ENOMEM;
1855c2bacdeSNavdeep Parhar goto free_hwaddr;
1865c2bacdeSNavdeep Parhar }
1875c2bacdeSNavdeep Parhar wq->sq.phys_addr = vtophys(wq->sq.queue);
1885c2bacdeSNavdeep Parhar dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
1895c2bacdeSNavdeep Parhar memset(wq->sq.queue, 0, wq->sq.memsize);
1905c2bacdeSNavdeep Parhar
1915c2bacdeSNavdeep Parhar wq->rq.queue = dma_alloc_coherent(rhp->ibdev.dma_device,
1925c2bacdeSNavdeep Parhar wq->rq.memsize, &(wq->rq.dma_addr), GFP_KERNEL);
1935c2bacdeSNavdeep Parhar if (!wq->rq.queue) {
1945c2bacdeSNavdeep Parhar ret = -ENOMEM;
1955c2bacdeSNavdeep Parhar goto free_sq_dma;
1965c2bacdeSNavdeep Parhar }
1975c2bacdeSNavdeep Parhar wq->rq.phys_addr = vtophys(wq->rq.queue);
1985c2bacdeSNavdeep Parhar dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
1995c2bacdeSNavdeep Parhar memset(wq->rq.queue, 0, wq->rq.memsize);
2005c2bacdeSNavdeep Parhar
2015c2bacdeSNavdeep Parhar CTR5(KTR_IW_CXGBE,
2025c2bacdeSNavdeep Parhar "%s QP sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx",
2035c2bacdeSNavdeep Parhar __func__,
2045c2bacdeSNavdeep Parhar wq->sq.queue, (unsigned long long)wq->sq.phys_addr,
2055c2bacdeSNavdeep Parhar wq->rq.queue, (unsigned long long)wq->rq.phys_addr);
2065c2bacdeSNavdeep Parhar
2075c2bacdeSNavdeep Parhar /* Doorbell/WC regions, determine the BAR2 queue offset and qid. */
2085c2bacdeSNavdeep Parhar t4_bar2_sge_qregs(rdev->adap, wq->sq.qid, T4_BAR2_QTYPE_EGRESS, user,
2095c2bacdeSNavdeep Parhar &sq_bar2_qoffset, &wq->sq.bar2_qid);
2105c2bacdeSNavdeep Parhar t4_bar2_sge_qregs(rdev->adap, wq->rq.qid, T4_BAR2_QTYPE_EGRESS, user,
2115c2bacdeSNavdeep Parhar &rq_bar2_qoffset, &wq->rq.bar2_qid);
2125c2bacdeSNavdeep Parhar
2135c2bacdeSNavdeep Parhar if (user) {
2145c2bacdeSNavdeep Parhar /* Compute BAR2 DB/WC physical address(page-aligned) for
2155c2bacdeSNavdeep Parhar * Userspace mapping.
2165c2bacdeSNavdeep Parhar */
2175c2bacdeSNavdeep Parhar wq->sq.bar2_pa = (rdev->bar2_pa + sq_bar2_qoffset) & PAGE_MASK;
2185c2bacdeSNavdeep Parhar wq->rq.bar2_pa = (rdev->bar2_pa + rq_bar2_qoffset) & PAGE_MASK;
2195c2bacdeSNavdeep Parhar CTR3(KTR_IW_CXGBE,
2205c2bacdeSNavdeep Parhar "%s BAR2 DB/WC sq base pa 0x%llx rq base pa 0x%llx",
2215c2bacdeSNavdeep Parhar __func__, (unsigned long long)wq->sq.bar2_pa,
2225c2bacdeSNavdeep Parhar (unsigned long long)wq->rq.bar2_pa);
2235c2bacdeSNavdeep Parhar } else {
2245c2bacdeSNavdeep Parhar /* Compute BAR2 DB/WC virtual address to access in kernel. */
2255c2bacdeSNavdeep Parhar wq->sq.bar2_va = (void __iomem *)((u64)rdev->bar2_kva +
2265c2bacdeSNavdeep Parhar sq_bar2_qoffset);
2275c2bacdeSNavdeep Parhar wq->rq.bar2_va = (void __iomem *)((u64)rdev->bar2_kva +
2285c2bacdeSNavdeep Parhar rq_bar2_qoffset);
2295c2bacdeSNavdeep Parhar CTR3(KTR_IW_CXGBE, "%s BAR2 DB/WC sq base va %p rq base va %p",
2305c2bacdeSNavdeep Parhar __func__, (unsigned long long)wq->sq.bar2_va,
2315c2bacdeSNavdeep Parhar (unsigned long long)wq->rq.bar2_va);
2325c2bacdeSNavdeep Parhar }
2335c2bacdeSNavdeep Parhar
234fb93f5c4SNavdeep Parhar wq->rdev = rdev;
235fb93f5c4SNavdeep Parhar wq->rq.msn = 1;
236fb93f5c4SNavdeep Parhar
237fb93f5c4SNavdeep Parhar /* build fw_ri_res_wr */
238fb93f5c4SNavdeep Parhar wr_len = sizeof *res_wr + 2 * sizeof *res;
239fb93f5c4SNavdeep Parhar
24037310a98SNavdeep Parhar wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
2415c2bacdeSNavdeep Parhar if (wr == NULL) {
2425c2bacdeSNavdeep Parhar ret = -ENOMEM;
2435c2bacdeSNavdeep Parhar goto free_rq_dma;
2445c2bacdeSNavdeep Parhar }
245fb93f5c4SNavdeep Parhar res_wr = wrtod(wr);
246fb93f5c4SNavdeep Parhar
247fb93f5c4SNavdeep Parhar memset(res_wr, 0, wr_len);
248fb93f5c4SNavdeep Parhar res_wr->op_nres = cpu_to_be32(
249fb93f5c4SNavdeep Parhar V_FW_WR_OP(FW_RI_RES_WR) |
250fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_NRES(2) |
251fb93f5c4SNavdeep Parhar F_FW_WR_COMPL);
252fb93f5c4SNavdeep Parhar res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
253fb93f5c4SNavdeep Parhar res_wr->cookie = (unsigned long) &wr_wait;
254fb93f5c4SNavdeep Parhar res = res_wr->res;
255fb93f5c4SNavdeep Parhar res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
256fb93f5c4SNavdeep Parhar res->u.sqrq.op = FW_RI_RES_OP_WRITE;
257fb93f5c4SNavdeep Parhar
258fb93f5c4SNavdeep Parhar /* eqsize is the number of 64B entries plus the status page size. */
2595c2bacdeSNavdeep Parhar eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
2605c2bacdeSNavdeep Parhar rdev->hw_queue.t4_eq_status_entries;
261fb93f5c4SNavdeep Parhar
262fb93f5c4SNavdeep Parhar res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
263fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
264fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
265fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
266fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_IQID(scq->cqid));
267fb93f5c4SNavdeep Parhar res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
268fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_DCAEN(0) |
269fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_DCACPU(0) |
270adb0cd84SNavdeep Parhar V_FW_RI_RES_WR_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
271adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
2725c2bacdeSNavdeep Parhar V_FW_RI_RES_WR_FBMAX(3) |
273fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
274fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_CIDXFTHRESH(0) |
275fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_EQSIZE(eqsize));
276fb93f5c4SNavdeep Parhar res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
277fb93f5c4SNavdeep Parhar res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
278fb93f5c4SNavdeep Parhar res++;
279fb93f5c4SNavdeep Parhar res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
280fb93f5c4SNavdeep Parhar res->u.sqrq.op = FW_RI_RES_OP_WRITE;
281fb93f5c4SNavdeep Parhar
282fb93f5c4SNavdeep Parhar /* eqsize is the number of 64B entries plus the status page size. */
2835c2bacdeSNavdeep Parhar eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
2845c2bacdeSNavdeep Parhar rdev->hw_queue.t4_eq_status_entries;
285fb93f5c4SNavdeep Parhar res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
286fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
287fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
288fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
289fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_IQID(rcq->cqid));
290fb93f5c4SNavdeep Parhar res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
291fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_DCAEN(0) |
292fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_DCACPU(0) |
293adb0cd84SNavdeep Parhar V_FW_RI_RES_WR_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
294adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
2955c2bacdeSNavdeep Parhar V_FW_RI_RES_WR_FBMAX(3) |
296fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
297fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_CIDXFTHRESH(0) |
298fb93f5c4SNavdeep Parhar V_FW_RI_RES_WR_EQSIZE(eqsize));
299fb93f5c4SNavdeep Parhar res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
300fb93f5c4SNavdeep Parhar res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
301fb93f5c4SNavdeep Parhar
302fb93f5c4SNavdeep Parhar c4iw_init_wr_wait(&wr_wait);
303fb93f5c4SNavdeep Parhar
304fb93f5c4SNavdeep Parhar t4_wrq_tx(sc, wr);
3055c2bacdeSNavdeep Parhar ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid,
3065c2bacdeSNavdeep Parhar NULL, __func__);
307fb93f5c4SNavdeep Parhar if (ret)
3085c2bacdeSNavdeep Parhar goto free_rq_dma;
309fb93f5c4SNavdeep Parhar
3105c2bacdeSNavdeep Parhar CTR5(KTR_IW_CXGBE,
311fb93f5c4SNavdeep Parhar "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx",
3125c2bacdeSNavdeep Parhar __func__, wq->sq.qid, wq->rq.qid,
3135c2bacdeSNavdeep Parhar (unsigned long long)wq->sq.bar2_va,
3145c2bacdeSNavdeep Parhar (unsigned long long)wq->rq.bar2_va);
315fb93f5c4SNavdeep Parhar
316fb93f5c4SNavdeep Parhar return 0;
3175c2bacdeSNavdeep Parhar free_rq_dma:
3185c2bacdeSNavdeep Parhar dma_free_coherent(rhp->ibdev.dma_device,
3195c2bacdeSNavdeep Parhar wq->rq.memsize, wq->rq.queue,
3205c2bacdeSNavdeep Parhar dma_unmap_addr(&wq->rq, mapping));
3215c2bacdeSNavdeep Parhar free_sq_dma:
3225c2bacdeSNavdeep Parhar dma_free_coherent(rhp->ibdev.dma_device,
3235c2bacdeSNavdeep Parhar wq->sq.memsize, wq->sq.queue,
3245c2bacdeSNavdeep Parhar dma_unmap_addr(&wq->sq, mapping));
3255c2bacdeSNavdeep Parhar free_hwaddr:
326fb93f5c4SNavdeep Parhar c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
3275c2bacdeSNavdeep Parhar free_sw_rq:
328fb93f5c4SNavdeep Parhar kfree(wq->rq.sw_rq);
3295c2bacdeSNavdeep Parhar free_sw_sq:
330fb93f5c4SNavdeep Parhar kfree(wq->sq.sw_sq);
3315c2bacdeSNavdeep Parhar free_rq_qid:
332fb93f5c4SNavdeep Parhar c4iw_put_qpid(rdev, wq->rq.qid, uctx);
3335c2bacdeSNavdeep Parhar free_sq_qid:
334fb93f5c4SNavdeep Parhar c4iw_put_qpid(rdev, wq->sq.qid, uctx);
3355c2bacdeSNavdeep Parhar return ret;
336fb93f5c4SNavdeep Parhar }
337fb93f5c4SNavdeep Parhar
build_immd(struct t4_sq * sq,struct fw_ri_immd * immdp,const struct ib_send_wr * wr,int max,u32 * plenp)338fb93f5c4SNavdeep Parhar static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
339c3987b8eSHans Petter Selasky const struct ib_send_wr *wr, int max, u32 *plenp)
340fb93f5c4SNavdeep Parhar {
341fb93f5c4SNavdeep Parhar u8 *dstp, *srcp;
342fb93f5c4SNavdeep Parhar u32 plen = 0;
343fb93f5c4SNavdeep Parhar int i;
344fb93f5c4SNavdeep Parhar int rem, len;
345fb93f5c4SNavdeep Parhar
346fb93f5c4SNavdeep Parhar dstp = (u8 *)immdp->data;
347fb93f5c4SNavdeep Parhar for (i = 0; i < wr->num_sge; i++) {
348fb93f5c4SNavdeep Parhar if ((plen + wr->sg_list[i].length) > max)
349fb93f5c4SNavdeep Parhar return -EMSGSIZE;
350fb93f5c4SNavdeep Parhar srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
351fb93f5c4SNavdeep Parhar plen += wr->sg_list[i].length;
352fb93f5c4SNavdeep Parhar rem = wr->sg_list[i].length;
353fb93f5c4SNavdeep Parhar while (rem) {
354fb93f5c4SNavdeep Parhar if (dstp == (u8 *)&sq->queue[sq->size])
355fb93f5c4SNavdeep Parhar dstp = (u8 *)sq->queue;
356fb93f5c4SNavdeep Parhar if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
357fb93f5c4SNavdeep Parhar len = rem;
358fb93f5c4SNavdeep Parhar else
359fb93f5c4SNavdeep Parhar len = (u8 *)&sq->queue[sq->size] - dstp;
360fb93f5c4SNavdeep Parhar memcpy(dstp, srcp, len);
361fb93f5c4SNavdeep Parhar dstp += len;
362fb93f5c4SNavdeep Parhar srcp += len;
363fb93f5c4SNavdeep Parhar rem -= len;
364fb93f5c4SNavdeep Parhar }
365fb93f5c4SNavdeep Parhar }
366fb93f5c4SNavdeep Parhar len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
367fb93f5c4SNavdeep Parhar if (len)
368fb93f5c4SNavdeep Parhar memset(dstp, 0, len);
369fb93f5c4SNavdeep Parhar immdp->op = FW_RI_DATA_IMMD;
370fb93f5c4SNavdeep Parhar immdp->r1 = 0;
371fb93f5c4SNavdeep Parhar immdp->r2 = 0;
372fb93f5c4SNavdeep Parhar immdp->immdlen = cpu_to_be32(plen);
373fb93f5c4SNavdeep Parhar *plenp = plen;
374fb93f5c4SNavdeep Parhar return 0;
375fb93f5c4SNavdeep Parhar }
376fb93f5c4SNavdeep Parhar
build_isgl(__be64 * queue_start,__be64 * queue_end,struct fw_ri_isgl * isglp,struct ib_sge * sg_list,int num_sge,u32 * plenp)377fb93f5c4SNavdeep Parhar static int build_isgl(__be64 *queue_start, __be64 *queue_end,
378fb93f5c4SNavdeep Parhar struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
379fb93f5c4SNavdeep Parhar int num_sge, u32 *plenp)
380fb93f5c4SNavdeep Parhar
381fb93f5c4SNavdeep Parhar {
382fb93f5c4SNavdeep Parhar int i;
383fb93f5c4SNavdeep Parhar u32 plen = 0;
384fb93f5c4SNavdeep Parhar __be64 *flitp = (__be64 *)isglp->sge;
385fb93f5c4SNavdeep Parhar
386fb93f5c4SNavdeep Parhar for (i = 0; i < num_sge; i++) {
387fb93f5c4SNavdeep Parhar if ((plen + sg_list[i].length) < plen)
388fb93f5c4SNavdeep Parhar return -EMSGSIZE;
389fb93f5c4SNavdeep Parhar plen += sg_list[i].length;
390fb93f5c4SNavdeep Parhar *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
391fb93f5c4SNavdeep Parhar sg_list[i].length);
392fb93f5c4SNavdeep Parhar if (++flitp == queue_end)
393fb93f5c4SNavdeep Parhar flitp = queue_start;
394fb93f5c4SNavdeep Parhar *flitp = cpu_to_be64(sg_list[i].addr);
395fb93f5c4SNavdeep Parhar if (++flitp == queue_end)
396fb93f5c4SNavdeep Parhar flitp = queue_start;
397fb93f5c4SNavdeep Parhar }
398fb93f5c4SNavdeep Parhar *flitp = (__force __be64)0;
399fb93f5c4SNavdeep Parhar isglp->op = FW_RI_DATA_ISGL;
400fb93f5c4SNavdeep Parhar isglp->r1 = 0;
401fb93f5c4SNavdeep Parhar isglp->nsge = cpu_to_be16(num_sge);
402fb93f5c4SNavdeep Parhar isglp->r2 = 0;
403fb93f5c4SNavdeep Parhar if (plenp)
404fb93f5c4SNavdeep Parhar *plenp = plen;
405fb93f5c4SNavdeep Parhar return 0;
406fb93f5c4SNavdeep Parhar }
407fb93f5c4SNavdeep Parhar
build_rdma_send(struct t4_sq * sq,union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)408fb93f5c4SNavdeep Parhar static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
409c3987b8eSHans Petter Selasky const struct ib_send_wr *wr, u8 *len16)
410fb93f5c4SNavdeep Parhar {
411fb93f5c4SNavdeep Parhar u32 plen;
412fb93f5c4SNavdeep Parhar int size;
413fb93f5c4SNavdeep Parhar int ret;
414fb93f5c4SNavdeep Parhar
415fb93f5c4SNavdeep Parhar if (wr->num_sge > T4_MAX_SEND_SGE)
416fb93f5c4SNavdeep Parhar return -EINVAL;
417fb93f5c4SNavdeep Parhar switch (wr->opcode) {
418fb93f5c4SNavdeep Parhar case IB_WR_SEND:
419fb93f5c4SNavdeep Parhar if (wr->send_flags & IB_SEND_SOLICITED)
420fb93f5c4SNavdeep Parhar wqe->send.sendop_pkd = cpu_to_be32(
421fb93f5c4SNavdeep Parhar V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
422fb93f5c4SNavdeep Parhar else
423fb93f5c4SNavdeep Parhar wqe->send.sendop_pkd = cpu_to_be32(
424fb93f5c4SNavdeep Parhar V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
425fb93f5c4SNavdeep Parhar wqe->send.stag_inv = 0;
426fb93f5c4SNavdeep Parhar break;
427fb93f5c4SNavdeep Parhar case IB_WR_SEND_WITH_INV:
428fb93f5c4SNavdeep Parhar if (wr->send_flags & IB_SEND_SOLICITED)
429fb93f5c4SNavdeep Parhar wqe->send.sendop_pkd = cpu_to_be32(
430fb93f5c4SNavdeep Parhar V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
431fb93f5c4SNavdeep Parhar else
432fb93f5c4SNavdeep Parhar wqe->send.sendop_pkd = cpu_to_be32(
433fb93f5c4SNavdeep Parhar V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
434fb93f5c4SNavdeep Parhar wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
435fb93f5c4SNavdeep Parhar break;
436fb93f5c4SNavdeep Parhar
437fb93f5c4SNavdeep Parhar default:
438fb93f5c4SNavdeep Parhar return -EINVAL;
439fb93f5c4SNavdeep Parhar }
4405c2bacdeSNavdeep Parhar wqe->send.r3 = 0;
4415c2bacdeSNavdeep Parhar wqe->send.r4 = 0;
442fb93f5c4SNavdeep Parhar
443fb93f5c4SNavdeep Parhar plen = 0;
444fb93f5c4SNavdeep Parhar if (wr->num_sge) {
445fb93f5c4SNavdeep Parhar if (wr->send_flags & IB_SEND_INLINE) {
446fb93f5c4SNavdeep Parhar ret = build_immd(sq, wqe->send.u.immd_src, wr,
447fb93f5c4SNavdeep Parhar T4_MAX_SEND_INLINE, &plen);
448fb93f5c4SNavdeep Parhar if (ret)
449fb93f5c4SNavdeep Parhar return ret;
450fb93f5c4SNavdeep Parhar size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
451fb93f5c4SNavdeep Parhar plen;
452fb93f5c4SNavdeep Parhar } else {
453fb93f5c4SNavdeep Parhar ret = build_isgl((__be64 *)sq->queue,
454fb93f5c4SNavdeep Parhar (__be64 *)&sq->queue[sq->size],
455fb93f5c4SNavdeep Parhar wqe->send.u.isgl_src,
456fb93f5c4SNavdeep Parhar wr->sg_list, wr->num_sge, &plen);
457fb93f5c4SNavdeep Parhar if (ret)
458fb93f5c4SNavdeep Parhar return ret;
459fb93f5c4SNavdeep Parhar size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
460fb93f5c4SNavdeep Parhar wr->num_sge * sizeof(struct fw_ri_sge);
461fb93f5c4SNavdeep Parhar }
462fb93f5c4SNavdeep Parhar } else {
463fb93f5c4SNavdeep Parhar wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
464fb93f5c4SNavdeep Parhar wqe->send.u.immd_src[0].r1 = 0;
465fb93f5c4SNavdeep Parhar wqe->send.u.immd_src[0].r2 = 0;
466fb93f5c4SNavdeep Parhar wqe->send.u.immd_src[0].immdlen = 0;
467fb93f5c4SNavdeep Parhar size = sizeof wqe->send + sizeof(struct fw_ri_immd);
468fb93f5c4SNavdeep Parhar plen = 0;
469fb93f5c4SNavdeep Parhar }
470fb93f5c4SNavdeep Parhar *len16 = DIV_ROUND_UP(size, 16);
471fb93f5c4SNavdeep Parhar wqe->send.plen = cpu_to_be32(plen);
472fb93f5c4SNavdeep Parhar return 0;
473fb93f5c4SNavdeep Parhar }
474fb93f5c4SNavdeep Parhar
build_rdma_write(struct t4_sq * sq,union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)475fb93f5c4SNavdeep Parhar static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
476c3987b8eSHans Petter Selasky const struct ib_send_wr *wr, u8 *len16)
477fb93f5c4SNavdeep Parhar {
478fb93f5c4SNavdeep Parhar u32 plen;
479fb93f5c4SNavdeep Parhar int size;
480fb93f5c4SNavdeep Parhar int ret;
481fb93f5c4SNavdeep Parhar
482fb93f5c4SNavdeep Parhar if (wr->num_sge > T4_MAX_SEND_SGE)
483fb93f5c4SNavdeep Parhar return -EINVAL;
484fc740a16SNavdeep Parhar wqe->write.immd_data = 0;
4855c2bacdeSNavdeep Parhar wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
4865c2bacdeSNavdeep Parhar wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
487fb93f5c4SNavdeep Parhar if (wr->num_sge) {
488fb93f5c4SNavdeep Parhar if (wr->send_flags & IB_SEND_INLINE) {
489fb93f5c4SNavdeep Parhar ret = build_immd(sq, wqe->write.u.immd_src, wr,
490fb93f5c4SNavdeep Parhar T4_MAX_WRITE_INLINE, &plen);
491fb93f5c4SNavdeep Parhar if (ret)
492fb93f5c4SNavdeep Parhar return ret;
493fb93f5c4SNavdeep Parhar size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
494fb93f5c4SNavdeep Parhar plen;
495fb93f5c4SNavdeep Parhar } else {
496fb93f5c4SNavdeep Parhar ret = build_isgl((__be64 *)sq->queue,
497fb93f5c4SNavdeep Parhar (__be64 *)&sq->queue[sq->size],
498fb93f5c4SNavdeep Parhar wqe->write.u.isgl_src,
499fb93f5c4SNavdeep Parhar wr->sg_list, wr->num_sge, &plen);
500fb93f5c4SNavdeep Parhar if (ret)
501fb93f5c4SNavdeep Parhar return ret;
502fb93f5c4SNavdeep Parhar size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
503fb93f5c4SNavdeep Parhar wr->num_sge * sizeof(struct fw_ri_sge);
504fb93f5c4SNavdeep Parhar }
505fb93f5c4SNavdeep Parhar } else {
506fb93f5c4SNavdeep Parhar wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
507fb93f5c4SNavdeep Parhar wqe->write.u.immd_src[0].r1 = 0;
508fb93f5c4SNavdeep Parhar wqe->write.u.immd_src[0].r2 = 0;
509fb93f5c4SNavdeep Parhar wqe->write.u.immd_src[0].immdlen = 0;
510fb93f5c4SNavdeep Parhar size = sizeof wqe->write + sizeof(struct fw_ri_immd);
511fb93f5c4SNavdeep Parhar plen = 0;
512fb93f5c4SNavdeep Parhar }
513fb93f5c4SNavdeep Parhar *len16 = DIV_ROUND_UP(size, 16);
514fb93f5c4SNavdeep Parhar wqe->write.plen = cpu_to_be32(plen);
515fb93f5c4SNavdeep Parhar return 0;
516fb93f5c4SNavdeep Parhar }
517fb93f5c4SNavdeep Parhar
build_rdma_read(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)518c3987b8eSHans Petter Selasky static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16)
519fb93f5c4SNavdeep Parhar {
520fb93f5c4SNavdeep Parhar if (wr->num_sge > 1)
521fb93f5c4SNavdeep Parhar return -EINVAL;
5225c2bacdeSNavdeep Parhar if (wr->num_sge && wr->sg_list[0].length) {
5235c2bacdeSNavdeep Parhar wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
5245c2bacdeSNavdeep Parhar wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
525fb93f5c4SNavdeep Parhar >> 32));
5265c2bacdeSNavdeep Parhar wqe->read.to_src_lo =
5275c2bacdeSNavdeep Parhar cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
528fb93f5c4SNavdeep Parhar wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
529fb93f5c4SNavdeep Parhar wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
530fb93f5c4SNavdeep Parhar wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
531fb93f5c4SNavdeep Parhar >> 32));
532fb93f5c4SNavdeep Parhar wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
533fb93f5c4SNavdeep Parhar } else {
534fb93f5c4SNavdeep Parhar wqe->read.stag_src = cpu_to_be32(2);
535fb93f5c4SNavdeep Parhar wqe->read.to_src_hi = 0;
536fb93f5c4SNavdeep Parhar wqe->read.to_src_lo = 0;
537fb93f5c4SNavdeep Parhar wqe->read.stag_sink = cpu_to_be32(2);
538fb93f5c4SNavdeep Parhar wqe->read.plen = 0;
539fb93f5c4SNavdeep Parhar wqe->read.to_sink_hi = 0;
540fb93f5c4SNavdeep Parhar wqe->read.to_sink_lo = 0;
541fb93f5c4SNavdeep Parhar }
542fb93f5c4SNavdeep Parhar wqe->read.r2 = 0;
543fb93f5c4SNavdeep Parhar wqe->read.r5 = 0;
544fb93f5c4SNavdeep Parhar *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
545fb93f5c4SNavdeep Parhar return 0;
546fb93f5c4SNavdeep Parhar }
547fb93f5c4SNavdeep Parhar
build_rdma_recv(struct c4iw_qp * qhp,union t4_recv_wr * wqe,const struct ib_recv_wr * wr,u8 * len16)548fb93f5c4SNavdeep Parhar static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
549c3987b8eSHans Petter Selasky const struct ib_recv_wr *wr, u8 *len16)
550fb93f5c4SNavdeep Parhar {
551fb93f5c4SNavdeep Parhar int ret;
552fb93f5c4SNavdeep Parhar
553fb93f5c4SNavdeep Parhar ret = build_isgl((__be64 *)qhp->wq.rq.queue,
554fb93f5c4SNavdeep Parhar (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
555fb93f5c4SNavdeep Parhar &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
556fb93f5c4SNavdeep Parhar if (ret)
557fb93f5c4SNavdeep Parhar return ret;
558fb93f5c4SNavdeep Parhar *len16 = DIV_ROUND_UP(sizeof wqe->recv +
559fb93f5c4SNavdeep Parhar wr->num_sge * sizeof(struct fw_ri_sge), 16);
560fb93f5c4SNavdeep Parhar return 0;
561fb93f5c4SNavdeep Parhar }
562fb93f5c4SNavdeep Parhar
build_inv_stag(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)563c3987b8eSHans Petter Selasky static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
564fb93f5c4SNavdeep Parhar u8 *len16)
565fb93f5c4SNavdeep Parhar {
566fb93f5c4SNavdeep Parhar wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
567fb93f5c4SNavdeep Parhar wqe->inv.r2 = 0;
568fb93f5c4SNavdeep Parhar *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
569fb93f5c4SNavdeep Parhar return 0;
570fb93f5c4SNavdeep Parhar }
571fb93f5c4SNavdeep Parhar
free_qp_work(struct work_struct * work)5725c2bacdeSNavdeep Parhar static void free_qp_work(struct work_struct *work)
5735c2bacdeSNavdeep Parhar {
5745c2bacdeSNavdeep Parhar struct c4iw_ucontext *ucontext;
5755c2bacdeSNavdeep Parhar struct c4iw_qp *qhp;
5765c2bacdeSNavdeep Parhar struct c4iw_dev *rhp;
5775c2bacdeSNavdeep Parhar
5785c2bacdeSNavdeep Parhar qhp = container_of(work, struct c4iw_qp, free_work);
5795c2bacdeSNavdeep Parhar ucontext = qhp->ucontext;
5805c2bacdeSNavdeep Parhar rhp = qhp->rhp;
5815c2bacdeSNavdeep Parhar
5826bb03465SNavdeep Parhar CTR3(KTR_IW_CXGBE, "%s qhp %p ucontext %p", __func__,
5835c2bacdeSNavdeep Parhar qhp, ucontext);
5845c2bacdeSNavdeep Parhar destroy_qp(&rhp->rdev, &qhp->wq,
5855c2bacdeSNavdeep Parhar ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
5865c2bacdeSNavdeep Parhar
5875c2bacdeSNavdeep Parhar kfree(qhp);
5885c2bacdeSNavdeep Parhar }
5895c2bacdeSNavdeep Parhar
queue_qp_free(struct kref * kref)5905c2bacdeSNavdeep Parhar static void queue_qp_free(struct kref *kref)
5915c2bacdeSNavdeep Parhar {
5925c2bacdeSNavdeep Parhar struct c4iw_qp *qhp;
5935c2bacdeSNavdeep Parhar
5945c2bacdeSNavdeep Parhar qhp = container_of(kref, struct c4iw_qp, kref);
5955c2bacdeSNavdeep Parhar CTR2(KTR_IW_CXGBE, "%s qhp %p", __func__, qhp);
5965c2bacdeSNavdeep Parhar queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
5975c2bacdeSNavdeep Parhar }
5985c2bacdeSNavdeep Parhar
c4iw_qp_add_ref(struct ib_qp * qp)599fb93f5c4SNavdeep Parhar void c4iw_qp_add_ref(struct ib_qp *qp)
600fb93f5c4SNavdeep Parhar {
601fb93f5c4SNavdeep Parhar CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
6025c2bacdeSNavdeep Parhar kref_get(&to_c4iw_qp(qp)->kref);
603fb93f5c4SNavdeep Parhar }
604fb93f5c4SNavdeep Parhar
c4iw_qp_rem_ref(struct ib_qp * qp)605fb93f5c4SNavdeep Parhar void c4iw_qp_rem_ref(struct ib_qp *qp)
606fb93f5c4SNavdeep Parhar {
607fb93f5c4SNavdeep Parhar CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
6085c2bacdeSNavdeep Parhar kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
609fb93f5c4SNavdeep Parhar }
610fb93f5c4SNavdeep Parhar
complete_sq_drain_wr(struct c4iw_qp * qhp,const struct ib_send_wr * wr)611c3987b8eSHans Petter Selasky static void complete_sq_drain_wr(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
612401032c6SNavdeep Parhar {
613401032c6SNavdeep Parhar struct t4_cqe cqe = {};
614401032c6SNavdeep Parhar struct c4iw_cq *schp;
615401032c6SNavdeep Parhar unsigned long flag;
616401032c6SNavdeep Parhar struct t4_cq *cq;
617401032c6SNavdeep Parhar
618401032c6SNavdeep Parhar schp = to_c4iw_cq(qhp->ibqp.send_cq);
619401032c6SNavdeep Parhar cq = &schp->cq;
620401032c6SNavdeep Parhar
621401032c6SNavdeep Parhar PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid);
622401032c6SNavdeep Parhar cqe.u.drain_cookie = wr->wr_id;
623401032c6SNavdeep Parhar cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
624401032c6SNavdeep Parhar V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
625401032c6SNavdeep Parhar V_CQE_TYPE(1) |
626401032c6SNavdeep Parhar V_CQE_SWCQE(1) |
627401032c6SNavdeep Parhar V_CQE_QPID(qhp->wq.sq.qid));
628401032c6SNavdeep Parhar
629401032c6SNavdeep Parhar spin_lock_irqsave(&schp->lock, flag);
630401032c6SNavdeep Parhar cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
631401032c6SNavdeep Parhar cq->sw_queue[cq->sw_pidx] = cqe;
632401032c6SNavdeep Parhar t4_swcq_produce(cq);
633401032c6SNavdeep Parhar spin_unlock_irqrestore(&schp->lock, flag);
634401032c6SNavdeep Parhar
635401032c6SNavdeep Parhar spin_lock_irqsave(&schp->comp_handler_lock, flag);
636401032c6SNavdeep Parhar (*schp->ibcq.comp_handler)(&schp->ibcq,
637401032c6SNavdeep Parhar schp->ibcq.cq_context);
638401032c6SNavdeep Parhar spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
639401032c6SNavdeep Parhar }
640401032c6SNavdeep Parhar
complete_rq_drain_wr(struct c4iw_qp * qhp,const struct ib_recv_wr * wr)641c3987b8eSHans Petter Selasky static void complete_rq_drain_wr(struct c4iw_qp *qhp, const struct ib_recv_wr *wr)
642401032c6SNavdeep Parhar {
643401032c6SNavdeep Parhar struct t4_cqe cqe = {};
644401032c6SNavdeep Parhar struct c4iw_cq *rchp;
645401032c6SNavdeep Parhar unsigned long flag;
646401032c6SNavdeep Parhar struct t4_cq *cq;
647401032c6SNavdeep Parhar
648401032c6SNavdeep Parhar rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
649401032c6SNavdeep Parhar cq = &rchp->cq;
650401032c6SNavdeep Parhar
651401032c6SNavdeep Parhar PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid);
652401032c6SNavdeep Parhar cqe.u.drain_cookie = wr->wr_id;
653401032c6SNavdeep Parhar cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
654401032c6SNavdeep Parhar V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
655401032c6SNavdeep Parhar V_CQE_TYPE(0) |
656401032c6SNavdeep Parhar V_CQE_SWCQE(1) |
657401032c6SNavdeep Parhar V_CQE_QPID(qhp->wq.sq.qid));
658401032c6SNavdeep Parhar
659401032c6SNavdeep Parhar spin_lock_irqsave(&rchp->lock, flag);
660401032c6SNavdeep Parhar cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
661401032c6SNavdeep Parhar cq->sw_queue[cq->sw_pidx] = cqe;
662401032c6SNavdeep Parhar t4_swcq_produce(cq);
663401032c6SNavdeep Parhar spin_unlock_irqrestore(&rchp->lock, flag);
664401032c6SNavdeep Parhar
665401032c6SNavdeep Parhar spin_lock_irqsave(&rchp->comp_handler_lock, flag);
666401032c6SNavdeep Parhar (*rchp->ibcq.comp_handler)(&rchp->ibcq,
667401032c6SNavdeep Parhar rchp->ibcq.cq_context);
668401032c6SNavdeep Parhar spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
669401032c6SNavdeep Parhar }
670401032c6SNavdeep Parhar
build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr * fr,const struct ib_reg_wr * wr,struct c4iw_mr * mhp,u8 * len16)67146d29cabSNavdeep Parhar static int build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
672c3987b8eSHans Petter Selasky const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16)
6735c2bacdeSNavdeep Parhar {
6745c2bacdeSNavdeep Parhar __be64 *p = (__be64 *)fr->pbl;
6755c2bacdeSNavdeep Parhar
67646d29cabSNavdeep Parhar if (wr->mr->page_size > C4IW_MAX_PAGE_SIZE)
67746d29cabSNavdeep Parhar return -EINVAL;
67846d29cabSNavdeep Parhar
6795c2bacdeSNavdeep Parhar fr->r2 = cpu_to_be32(0);
6805c2bacdeSNavdeep Parhar fr->stag = cpu_to_be32(mhp->ibmr.rkey);
6815c2bacdeSNavdeep Parhar
6825c2bacdeSNavdeep Parhar fr->tpte.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
6835c2bacdeSNavdeep Parhar V_FW_RI_TPTE_STAGKEY((mhp->ibmr.rkey & M_FW_RI_TPTE_STAGKEY)) |
6845c2bacdeSNavdeep Parhar V_FW_RI_TPTE_STAGSTATE(1) |
6855c2bacdeSNavdeep Parhar V_FW_RI_TPTE_STAGTYPE(FW_RI_STAG_NSMR) |
6865c2bacdeSNavdeep Parhar V_FW_RI_TPTE_PDID(mhp->attr.pdid));
6875c2bacdeSNavdeep Parhar fr->tpte.locread_to_qpid = cpu_to_be32(
6885c2bacdeSNavdeep Parhar V_FW_RI_TPTE_PERM(c4iw_ib_to_tpt_access(wr->access)) |
6895c2bacdeSNavdeep Parhar V_FW_RI_TPTE_ADDRTYPE(FW_RI_VA_BASED_TO) |
6905c2bacdeSNavdeep Parhar V_FW_RI_TPTE_PS(ilog2(wr->mr->page_size) - 12));
6915c2bacdeSNavdeep Parhar fr->tpte.nosnoop_pbladdr = cpu_to_be32(V_FW_RI_TPTE_PBLADDR(
6925c2bacdeSNavdeep Parhar PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
6935c2bacdeSNavdeep Parhar fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
694168bde45SNavdeep Parhar fr->tpte.len_hi = cpu_to_be32(mhp->ibmr.length >> 32);
695168bde45SNavdeep Parhar fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length & 0xffffffff);
6965c2bacdeSNavdeep Parhar fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
6975c2bacdeSNavdeep Parhar fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
6985c2bacdeSNavdeep Parhar
6995c2bacdeSNavdeep Parhar p[0] = cpu_to_be64((u64)mhp->mpl[0]);
7005c2bacdeSNavdeep Parhar p[1] = cpu_to_be64((u64)mhp->mpl[1]);
7015c2bacdeSNavdeep Parhar
7025c2bacdeSNavdeep Parhar *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
70346d29cabSNavdeep Parhar return 0;
7045c2bacdeSNavdeep Parhar }
7055c2bacdeSNavdeep Parhar
build_memreg(struct t4_sq * sq,union t4_wr * wqe,const struct ib_reg_wr * wr,struct c4iw_mr * mhp,u8 * len16,bool dsgl_supported)7065c2bacdeSNavdeep Parhar static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
707c3987b8eSHans Petter Selasky const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
7085c2bacdeSNavdeep Parhar bool dsgl_supported)
7095c2bacdeSNavdeep Parhar {
7105c2bacdeSNavdeep Parhar struct fw_ri_immd *imdp;
7115c2bacdeSNavdeep Parhar __be64 *p;
7125c2bacdeSNavdeep Parhar int i;
7135c2bacdeSNavdeep Parhar int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
7145c2bacdeSNavdeep Parhar int rem;
7155c2bacdeSNavdeep Parhar
716211972cfSNavdeep Parhar if (mhp->mpl_len > t4_max_fr_depth(&mhp->rhp->rdev, use_dsgl))
7175c2bacdeSNavdeep Parhar return -EINVAL;
71846d29cabSNavdeep Parhar if (wr->mr->page_size > C4IW_MAX_PAGE_SIZE)
71946d29cabSNavdeep Parhar return -EINVAL;
7205c2bacdeSNavdeep Parhar
7215c2bacdeSNavdeep Parhar wqe->fr.qpbinde_to_dcacpu = 0;
7225c2bacdeSNavdeep Parhar wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
7235c2bacdeSNavdeep Parhar wqe->fr.addr_type = FW_RI_VA_BASED_TO;
7245c2bacdeSNavdeep Parhar wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
725168bde45SNavdeep Parhar wqe->fr.len_hi = cpu_to_be32(mhp->ibmr.length >> 32);
726168bde45SNavdeep Parhar wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length & 0xffffffff);
7275c2bacdeSNavdeep Parhar wqe->fr.stag = cpu_to_be32(wr->key);
7285c2bacdeSNavdeep Parhar wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
729168bde45SNavdeep Parhar wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
7305c2bacdeSNavdeep Parhar
7315c2bacdeSNavdeep Parhar if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
7325c2bacdeSNavdeep Parhar struct fw_ri_dsgl *sglp;
7335c2bacdeSNavdeep Parhar
7345c2bacdeSNavdeep Parhar for (i = 0; i < mhp->mpl_len; i++)
7355c2bacdeSNavdeep Parhar mhp->mpl[i] =
7365c2bacdeSNavdeep Parhar (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
7375c2bacdeSNavdeep Parhar
7385c2bacdeSNavdeep Parhar sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
7395c2bacdeSNavdeep Parhar sglp->op = FW_RI_DATA_DSGL;
7405c2bacdeSNavdeep Parhar sglp->r1 = 0;
7415c2bacdeSNavdeep Parhar sglp->nsge = cpu_to_be16(1);
7425c2bacdeSNavdeep Parhar sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
7435c2bacdeSNavdeep Parhar sglp->len0 = cpu_to_be32(pbllen);
7445c2bacdeSNavdeep Parhar
7455c2bacdeSNavdeep Parhar *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
7465c2bacdeSNavdeep Parhar } else {
7475c2bacdeSNavdeep Parhar imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
7485c2bacdeSNavdeep Parhar imdp->op = FW_RI_DATA_IMMD;
7495c2bacdeSNavdeep Parhar imdp->r1 = 0;
7505c2bacdeSNavdeep Parhar imdp->r2 = 0;
7515c2bacdeSNavdeep Parhar imdp->immdlen = cpu_to_be32(pbllen);
7525c2bacdeSNavdeep Parhar p = (__be64 *)(imdp + 1);
7535c2bacdeSNavdeep Parhar rem = pbllen;
7545c2bacdeSNavdeep Parhar for (i = 0; i < mhp->mpl_len; i++) {
7555c2bacdeSNavdeep Parhar *p = cpu_to_be64((u64)mhp->mpl[i]);
7565c2bacdeSNavdeep Parhar rem -= sizeof(*p);
7575c2bacdeSNavdeep Parhar if (++p == (__be64 *)&sq->queue[sq->size])
7585c2bacdeSNavdeep Parhar p = (__be64 *)sq->queue;
7595c2bacdeSNavdeep Parhar }
7605c2bacdeSNavdeep Parhar BUG_ON(rem < 0);
7615c2bacdeSNavdeep Parhar while (rem) {
7625c2bacdeSNavdeep Parhar *p = 0;
7635c2bacdeSNavdeep Parhar rem -= sizeof(*p);
7645c2bacdeSNavdeep Parhar if (++p == (__be64 *)&sq->queue[sq->size])
7655c2bacdeSNavdeep Parhar p = (__be64 *)sq->queue;
7665c2bacdeSNavdeep Parhar }
7675c2bacdeSNavdeep Parhar *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
7685c2bacdeSNavdeep Parhar + pbllen, 16);
7695c2bacdeSNavdeep Parhar }
7705c2bacdeSNavdeep Parhar
7715c2bacdeSNavdeep Parhar return 0;
7725c2bacdeSNavdeep Parhar }
7735c2bacdeSNavdeep Parhar
c4iw_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)774c3987b8eSHans Petter Selasky int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
775c3987b8eSHans Petter Selasky const struct ib_send_wr **bad_wr)
776fb93f5c4SNavdeep Parhar {
777fb93f5c4SNavdeep Parhar int err = 0;
778fb93f5c4SNavdeep Parhar u8 len16 = 0;
779fb93f5c4SNavdeep Parhar enum fw_wr_opcodes fw_opcode = 0;
780fb93f5c4SNavdeep Parhar enum fw_ri_wr_flags fw_flags;
781fb93f5c4SNavdeep Parhar struct c4iw_qp *qhp;
7825c2bacdeSNavdeep Parhar union t4_wr *wqe = NULL;
783fb93f5c4SNavdeep Parhar u32 num_wrs;
784fb93f5c4SNavdeep Parhar struct t4_swsqe *swsqe;
785fb93f5c4SNavdeep Parhar unsigned long flag;
786fb93f5c4SNavdeep Parhar u16 idx = 0;
7875c2bacdeSNavdeep Parhar struct c4iw_rdev *rdev;
788fb93f5c4SNavdeep Parhar
789fb93f5c4SNavdeep Parhar qhp = to_c4iw_qp(ibqp);
7905c2bacdeSNavdeep Parhar rdev = &qhp->rhp->rdev;
791*9fdb683dSNavdeep Parhar if (__predict_false(c4iw_stopped(rdev)))
792*9fdb683dSNavdeep Parhar return -EIO;
793fb93f5c4SNavdeep Parhar spin_lock_irqsave(&qhp->lock, flag);
794fb93f5c4SNavdeep Parhar if (t4_wq_in_error(&qhp->wq)) {
795fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&qhp->lock, flag);
796401032c6SNavdeep Parhar complete_sq_drain_wr(qhp, wr);
797401032c6SNavdeep Parhar return err;
798fb93f5c4SNavdeep Parhar }
799fb93f5c4SNavdeep Parhar num_wrs = t4_sq_avail(&qhp->wq);
800fb93f5c4SNavdeep Parhar if (num_wrs == 0) {
801fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&qhp->lock, flag);
8025c2bacdeSNavdeep Parhar *bad_wr = wr;
803fb93f5c4SNavdeep Parhar return -ENOMEM;
804fb93f5c4SNavdeep Parhar }
805fb93f5c4SNavdeep Parhar while (wr) {
806fb93f5c4SNavdeep Parhar if (num_wrs == 0) {
807fb93f5c4SNavdeep Parhar err = -ENOMEM;
808fb93f5c4SNavdeep Parhar *bad_wr = wr;
809fb93f5c4SNavdeep Parhar break;
810fb93f5c4SNavdeep Parhar }
811fb93f5c4SNavdeep Parhar wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
812fb93f5c4SNavdeep Parhar qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
813fb93f5c4SNavdeep Parhar
814fb93f5c4SNavdeep Parhar fw_flags = 0;
815fb93f5c4SNavdeep Parhar if (wr->send_flags & IB_SEND_SOLICITED)
816fb93f5c4SNavdeep Parhar fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
8178d814a45SNavdeep Parhar if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
818fb93f5c4SNavdeep Parhar fw_flags |= FW_RI_COMPLETION_FLAG;
819fb93f5c4SNavdeep Parhar swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
820fb93f5c4SNavdeep Parhar switch (wr->opcode) {
821fb93f5c4SNavdeep Parhar case IB_WR_SEND_WITH_INV:
822fb93f5c4SNavdeep Parhar case IB_WR_SEND:
823fb93f5c4SNavdeep Parhar if (wr->send_flags & IB_SEND_FENCE)
824fb93f5c4SNavdeep Parhar fw_flags |= FW_RI_READ_FENCE_FLAG;
825fb93f5c4SNavdeep Parhar fw_opcode = FW_RI_SEND_WR;
826fb93f5c4SNavdeep Parhar if (wr->opcode == IB_WR_SEND)
827fb93f5c4SNavdeep Parhar swsqe->opcode = FW_RI_SEND;
828fb93f5c4SNavdeep Parhar else
829fb93f5c4SNavdeep Parhar swsqe->opcode = FW_RI_SEND_WITH_INV;
830fb93f5c4SNavdeep Parhar err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
831fb93f5c4SNavdeep Parhar break;
832fb93f5c4SNavdeep Parhar case IB_WR_RDMA_WRITE:
833fb93f5c4SNavdeep Parhar fw_opcode = FW_RI_RDMA_WRITE_WR;
834fb93f5c4SNavdeep Parhar swsqe->opcode = FW_RI_RDMA_WRITE;
835fb93f5c4SNavdeep Parhar err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
836fb93f5c4SNavdeep Parhar break;
837fb93f5c4SNavdeep Parhar case IB_WR_RDMA_READ:
838fb93f5c4SNavdeep Parhar case IB_WR_RDMA_READ_WITH_INV:
839fb93f5c4SNavdeep Parhar fw_opcode = FW_RI_RDMA_READ_WR;
840fb93f5c4SNavdeep Parhar swsqe->opcode = FW_RI_READ_REQ;
8415c2bacdeSNavdeep Parhar if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
8425c2bacdeSNavdeep Parhar c4iw_invalidate_mr(qhp->rhp,
8435c2bacdeSNavdeep Parhar wr->sg_list[0].lkey);
844fb93f5c4SNavdeep Parhar fw_flags = FW_RI_RDMA_READ_INVALIDATE;
8455c2bacdeSNavdeep Parhar } else {
846fb93f5c4SNavdeep Parhar fw_flags = 0;
8475c2bacdeSNavdeep Parhar }
848fb93f5c4SNavdeep Parhar err = build_rdma_read(wqe, wr, &len16);
849fb93f5c4SNavdeep Parhar if (err)
850fb93f5c4SNavdeep Parhar break;
851fb93f5c4SNavdeep Parhar swsqe->read_len = wr->sg_list[0].length;
852fb93f5c4SNavdeep Parhar if (!qhp->wq.sq.oldest_read)
853fb93f5c4SNavdeep Parhar qhp->wq.sq.oldest_read = swsqe;
854fb93f5c4SNavdeep Parhar break;
8555c2bacdeSNavdeep Parhar case IB_WR_REG_MR: {
8565c2bacdeSNavdeep Parhar struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
8575c2bacdeSNavdeep Parhar
858fb93f5c4SNavdeep Parhar swsqe->opcode = FW_RI_FAST_REGISTER;
8595c2bacdeSNavdeep Parhar if (rdev->adap->params.fr_nsmr_tpte_wr_support &&
8605c2bacdeSNavdeep Parhar !mhp->attr.state && mhp->mpl_len <= 2) {
8615c2bacdeSNavdeep Parhar fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
86246d29cabSNavdeep Parhar err = build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
8635c2bacdeSNavdeep Parhar mhp, &len16);
8645c2bacdeSNavdeep Parhar } else {
8655c2bacdeSNavdeep Parhar fw_opcode = FW_RI_FR_NSMR_WR;
8665c2bacdeSNavdeep Parhar err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
8675c2bacdeSNavdeep Parhar mhp, &len16,
8685c2bacdeSNavdeep Parhar rdev->adap->params.ulptx_memwrite_dsgl);
86946d29cabSNavdeep Parhar }
8705c2bacdeSNavdeep Parhar if (err)
871fb93f5c4SNavdeep Parhar break;
8725c2bacdeSNavdeep Parhar mhp->attr.state = 1;
8735c2bacdeSNavdeep Parhar break;
8745c2bacdeSNavdeep Parhar }
875fb93f5c4SNavdeep Parhar case IB_WR_LOCAL_INV:
876fb93f5c4SNavdeep Parhar if (wr->send_flags & IB_SEND_FENCE)
877fb93f5c4SNavdeep Parhar fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
878fb93f5c4SNavdeep Parhar fw_opcode = FW_RI_INV_LSTAG_WR;
879fb93f5c4SNavdeep Parhar swsqe->opcode = FW_RI_LOCAL_INV;
880fb93f5c4SNavdeep Parhar err = build_inv_stag(wqe, wr, &len16);
8815c2bacdeSNavdeep Parhar c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
882fb93f5c4SNavdeep Parhar break;
883fb93f5c4SNavdeep Parhar default:
884fb93f5c4SNavdeep Parhar CTR2(KTR_IW_CXGBE, "%s post of type =%d TBD!", __func__,
885fb93f5c4SNavdeep Parhar wr->opcode);
886fb93f5c4SNavdeep Parhar err = -EINVAL;
887fb93f5c4SNavdeep Parhar }
888fb93f5c4SNavdeep Parhar if (err) {
889fb93f5c4SNavdeep Parhar *bad_wr = wr;
890fb93f5c4SNavdeep Parhar break;
891fb93f5c4SNavdeep Parhar }
892fb93f5c4SNavdeep Parhar swsqe->idx = qhp->wq.sq.pidx;
893fb93f5c4SNavdeep Parhar swsqe->complete = 0;
8948d814a45SNavdeep Parhar swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
8958d814a45SNavdeep Parhar qhp->sq_sig_all;
8965c2bacdeSNavdeep Parhar swsqe->flushed = 0;
897fb93f5c4SNavdeep Parhar swsqe->wr_id = wr->wr_id;
898fb93f5c4SNavdeep Parhar
899fb93f5c4SNavdeep Parhar init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
900fb93f5c4SNavdeep Parhar
901fb93f5c4SNavdeep Parhar CTR5(KTR_IW_CXGBE,
902fb93f5c4SNavdeep Parhar "%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u",
903fb93f5c4SNavdeep Parhar __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
904fb93f5c4SNavdeep Parhar swsqe->opcode, swsqe->read_len);
905fb93f5c4SNavdeep Parhar wr = wr->next;
906fb93f5c4SNavdeep Parhar num_wrs--;
907fb93f5c4SNavdeep Parhar t4_sq_produce(&qhp->wq, len16);
908fb93f5c4SNavdeep Parhar idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
909fb93f5c4SNavdeep Parhar }
9107df135c0SNavdeep Parhar
9115c2bacdeSNavdeep Parhar t4_ring_sq_db(&qhp->wq, idx, wqe, rdev->adap->iwt.wc_en);
912fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&qhp->lock, flag);
913fb93f5c4SNavdeep Parhar return err;
914fb93f5c4SNavdeep Parhar }
915fb93f5c4SNavdeep Parhar
c4iw_post_receive(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)916c3987b8eSHans Petter Selasky int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
917c3987b8eSHans Petter Selasky const struct ib_recv_wr **bad_wr)
918fb93f5c4SNavdeep Parhar {
919fb93f5c4SNavdeep Parhar int err = 0;
920fb93f5c4SNavdeep Parhar struct c4iw_qp *qhp;
9215c2bacdeSNavdeep Parhar union t4_recv_wr *wqe = NULL;
922fb93f5c4SNavdeep Parhar u32 num_wrs;
923fb93f5c4SNavdeep Parhar u8 len16 = 0;
924fb93f5c4SNavdeep Parhar unsigned long flag;
925fb93f5c4SNavdeep Parhar u16 idx = 0;
926fb93f5c4SNavdeep Parhar
927fb93f5c4SNavdeep Parhar qhp = to_c4iw_qp(ibqp);
928*9fdb683dSNavdeep Parhar if (__predict_false(c4iw_stopped(&qhp->rhp->rdev)))
929*9fdb683dSNavdeep Parhar return -EIO;
930fb93f5c4SNavdeep Parhar spin_lock_irqsave(&qhp->lock, flag);
931fb93f5c4SNavdeep Parhar if (t4_wq_in_error(&qhp->wq)) {
932fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&qhp->lock, flag);
933401032c6SNavdeep Parhar complete_rq_drain_wr(qhp, wr);
934401032c6SNavdeep Parhar return err;
935fb93f5c4SNavdeep Parhar }
936fb93f5c4SNavdeep Parhar num_wrs = t4_rq_avail(&qhp->wq);
937fb93f5c4SNavdeep Parhar if (num_wrs == 0) {
938fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&qhp->lock, flag);
9395c2bacdeSNavdeep Parhar *bad_wr = wr;
940fb93f5c4SNavdeep Parhar return -ENOMEM;
941fb93f5c4SNavdeep Parhar }
942fb93f5c4SNavdeep Parhar while (wr) {
943fb93f5c4SNavdeep Parhar if (wr->num_sge > T4_MAX_RECV_SGE) {
944fb93f5c4SNavdeep Parhar err = -EINVAL;
945fb93f5c4SNavdeep Parhar *bad_wr = wr;
946fb93f5c4SNavdeep Parhar break;
947fb93f5c4SNavdeep Parhar }
948fb93f5c4SNavdeep Parhar wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
949fb93f5c4SNavdeep Parhar qhp->wq.rq.wq_pidx *
950fb93f5c4SNavdeep Parhar T4_EQ_ENTRY_SIZE);
951fb93f5c4SNavdeep Parhar if (num_wrs)
952fb93f5c4SNavdeep Parhar err = build_rdma_recv(qhp, wqe, wr, &len16);
953fb93f5c4SNavdeep Parhar else
954fb93f5c4SNavdeep Parhar err = -ENOMEM;
955fb93f5c4SNavdeep Parhar if (err) {
956fb93f5c4SNavdeep Parhar *bad_wr = wr;
957fb93f5c4SNavdeep Parhar break;
958fb93f5c4SNavdeep Parhar }
959fb93f5c4SNavdeep Parhar
960fb93f5c4SNavdeep Parhar qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
961fb93f5c4SNavdeep Parhar
962fb93f5c4SNavdeep Parhar wqe->recv.opcode = FW_RI_RECV_WR;
963fb93f5c4SNavdeep Parhar wqe->recv.r1 = 0;
964fb93f5c4SNavdeep Parhar wqe->recv.wrid = qhp->wq.rq.pidx;
965fb93f5c4SNavdeep Parhar wqe->recv.r2[0] = 0;
966fb93f5c4SNavdeep Parhar wqe->recv.r2[1] = 0;
967fb93f5c4SNavdeep Parhar wqe->recv.r2[2] = 0;
968fb93f5c4SNavdeep Parhar wqe->recv.len16 = len16;
969fb93f5c4SNavdeep Parhar CTR3(KTR_IW_CXGBE, "%s cookie 0x%llx pidx %u", __func__,
970fb93f5c4SNavdeep Parhar (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
971fb93f5c4SNavdeep Parhar t4_rq_produce(&qhp->wq, len16);
972fb93f5c4SNavdeep Parhar idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
973fb93f5c4SNavdeep Parhar wr = wr->next;
974fb93f5c4SNavdeep Parhar num_wrs--;
975fb93f5c4SNavdeep Parhar }
9767df135c0SNavdeep Parhar
9775c2bacdeSNavdeep Parhar t4_ring_rq_db(&qhp->wq, idx, wqe, qhp->rhp->rdev.adap->iwt.wc_en);
978fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&qhp->lock, flag);
979fb93f5c4SNavdeep Parhar return err;
980fb93f5c4SNavdeep Parhar }
981fb93f5c4SNavdeep Parhar
build_term_codes(struct t4_cqe * err_cqe,u8 * layer_type,u8 * ecode)982fb93f5c4SNavdeep Parhar static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
983fb93f5c4SNavdeep Parhar u8 *ecode)
984fb93f5c4SNavdeep Parhar {
985fb93f5c4SNavdeep Parhar int status;
986fb93f5c4SNavdeep Parhar int tagged;
987fb93f5c4SNavdeep Parhar int opcode;
988fb93f5c4SNavdeep Parhar int rqtype;
989fb93f5c4SNavdeep Parhar int send_inv;
990fb93f5c4SNavdeep Parhar
991fb93f5c4SNavdeep Parhar if (!err_cqe) {
992fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
993fb93f5c4SNavdeep Parhar *ecode = 0;
994fb93f5c4SNavdeep Parhar return;
995fb93f5c4SNavdeep Parhar }
996fb93f5c4SNavdeep Parhar
997fb93f5c4SNavdeep Parhar status = CQE_STATUS(err_cqe);
998fb93f5c4SNavdeep Parhar opcode = CQE_OPCODE(err_cqe);
999fb93f5c4SNavdeep Parhar rqtype = RQ_TYPE(err_cqe);
1000fb93f5c4SNavdeep Parhar send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1001fb93f5c4SNavdeep Parhar (opcode == FW_RI_SEND_WITH_SE_INV);
1002fb93f5c4SNavdeep Parhar tagged = (opcode == FW_RI_RDMA_WRITE) ||
1003fb93f5c4SNavdeep Parhar (rqtype && (opcode == FW_RI_READ_RESP));
1004fb93f5c4SNavdeep Parhar
1005fb93f5c4SNavdeep Parhar switch (status) {
1006fb93f5c4SNavdeep Parhar case T4_ERR_STAG:
1007fb93f5c4SNavdeep Parhar if (send_inv) {
1008fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1009fb93f5c4SNavdeep Parhar *ecode = RDMAP_CANT_INV_STAG;
1010fb93f5c4SNavdeep Parhar } else {
1011fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1012fb93f5c4SNavdeep Parhar *ecode = RDMAP_INV_STAG;
1013fb93f5c4SNavdeep Parhar }
1014fb93f5c4SNavdeep Parhar break;
1015fb93f5c4SNavdeep Parhar case T4_ERR_PDID:
1016fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1017fb93f5c4SNavdeep Parhar if ((opcode == FW_RI_SEND_WITH_INV) ||
1018fb93f5c4SNavdeep Parhar (opcode == FW_RI_SEND_WITH_SE_INV))
1019fb93f5c4SNavdeep Parhar *ecode = RDMAP_CANT_INV_STAG;
1020fb93f5c4SNavdeep Parhar else
1021fb93f5c4SNavdeep Parhar *ecode = RDMAP_STAG_NOT_ASSOC;
1022fb93f5c4SNavdeep Parhar break;
1023fb93f5c4SNavdeep Parhar case T4_ERR_QPID:
1024fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1025fb93f5c4SNavdeep Parhar *ecode = RDMAP_STAG_NOT_ASSOC;
1026fb93f5c4SNavdeep Parhar break;
1027fb93f5c4SNavdeep Parhar case T4_ERR_ACCESS:
1028fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1029fb93f5c4SNavdeep Parhar *ecode = RDMAP_ACC_VIOL;
1030fb93f5c4SNavdeep Parhar break;
1031fb93f5c4SNavdeep Parhar case T4_ERR_WRAP:
1032fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1033fb93f5c4SNavdeep Parhar *ecode = RDMAP_TO_WRAP;
1034fb93f5c4SNavdeep Parhar break;
1035fb93f5c4SNavdeep Parhar case T4_ERR_BOUND:
1036fb93f5c4SNavdeep Parhar if (tagged) {
1037fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1038fb93f5c4SNavdeep Parhar *ecode = DDPT_BASE_BOUNDS;
1039fb93f5c4SNavdeep Parhar } else {
1040fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1041fb93f5c4SNavdeep Parhar *ecode = RDMAP_BASE_BOUNDS;
1042fb93f5c4SNavdeep Parhar }
1043fb93f5c4SNavdeep Parhar break;
1044fb93f5c4SNavdeep Parhar case T4_ERR_INVALIDATE_SHARED_MR:
1045fb93f5c4SNavdeep Parhar case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1046fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1047fb93f5c4SNavdeep Parhar *ecode = RDMAP_CANT_INV_STAG;
1048fb93f5c4SNavdeep Parhar break;
1049fb93f5c4SNavdeep Parhar case T4_ERR_ECC:
1050fb93f5c4SNavdeep Parhar case T4_ERR_ECC_PSTAG:
1051fb93f5c4SNavdeep Parhar case T4_ERR_INTERNAL_ERR:
1052fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1053fb93f5c4SNavdeep Parhar *ecode = 0;
1054fb93f5c4SNavdeep Parhar break;
1055fb93f5c4SNavdeep Parhar case T4_ERR_OUT_OF_RQE:
1056fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1057fb93f5c4SNavdeep Parhar *ecode = DDPU_INV_MSN_NOBUF;
1058fb93f5c4SNavdeep Parhar break;
1059fb93f5c4SNavdeep Parhar case T4_ERR_PBL_ADDR_BOUND:
1060fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1061fb93f5c4SNavdeep Parhar *ecode = DDPT_BASE_BOUNDS;
1062fb93f5c4SNavdeep Parhar break;
1063fb93f5c4SNavdeep Parhar case T4_ERR_CRC:
1064fb93f5c4SNavdeep Parhar *layer_type = LAYER_MPA|DDP_LLP;
1065fb93f5c4SNavdeep Parhar *ecode = MPA_CRC_ERR;
1066fb93f5c4SNavdeep Parhar break;
1067fb93f5c4SNavdeep Parhar case T4_ERR_MARKER:
1068fb93f5c4SNavdeep Parhar *layer_type = LAYER_MPA|DDP_LLP;
1069fb93f5c4SNavdeep Parhar *ecode = MPA_MARKER_ERR;
1070fb93f5c4SNavdeep Parhar break;
1071fb93f5c4SNavdeep Parhar case T4_ERR_PDU_LEN_ERR:
1072fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1073fb93f5c4SNavdeep Parhar *ecode = DDPU_MSG_TOOBIG;
1074fb93f5c4SNavdeep Parhar break;
1075fb93f5c4SNavdeep Parhar case T4_ERR_DDP_VERSION:
1076fb93f5c4SNavdeep Parhar if (tagged) {
1077fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1078fb93f5c4SNavdeep Parhar *ecode = DDPT_INV_VERS;
1079fb93f5c4SNavdeep Parhar } else {
1080fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1081fb93f5c4SNavdeep Parhar *ecode = DDPU_INV_VERS;
1082fb93f5c4SNavdeep Parhar }
1083fb93f5c4SNavdeep Parhar break;
1084fb93f5c4SNavdeep Parhar case T4_ERR_RDMA_VERSION:
1085fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1086fb93f5c4SNavdeep Parhar *ecode = RDMAP_INV_VERS;
1087fb93f5c4SNavdeep Parhar break;
1088fb93f5c4SNavdeep Parhar case T4_ERR_OPCODE:
1089fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1090fb93f5c4SNavdeep Parhar *ecode = RDMAP_INV_OPCODE;
1091fb93f5c4SNavdeep Parhar break;
1092fb93f5c4SNavdeep Parhar case T4_ERR_DDP_QUEUE_NUM:
1093fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1094fb93f5c4SNavdeep Parhar *ecode = DDPU_INV_QN;
1095fb93f5c4SNavdeep Parhar break;
1096fb93f5c4SNavdeep Parhar case T4_ERR_MSN:
1097fb93f5c4SNavdeep Parhar case T4_ERR_MSN_GAP:
1098fb93f5c4SNavdeep Parhar case T4_ERR_MSN_RANGE:
1099fb93f5c4SNavdeep Parhar case T4_ERR_IRD_OVERFLOW:
1100fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1101fb93f5c4SNavdeep Parhar *ecode = DDPU_INV_MSN_RANGE;
1102fb93f5c4SNavdeep Parhar break;
1103fb93f5c4SNavdeep Parhar case T4_ERR_TBIT:
1104fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1105fb93f5c4SNavdeep Parhar *ecode = 0;
1106fb93f5c4SNavdeep Parhar break;
1107fb93f5c4SNavdeep Parhar case T4_ERR_MO:
1108fb93f5c4SNavdeep Parhar *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1109fb93f5c4SNavdeep Parhar *ecode = DDPU_INV_MO;
1110fb93f5c4SNavdeep Parhar break;
1111fb93f5c4SNavdeep Parhar default:
1112fb93f5c4SNavdeep Parhar *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1113fb93f5c4SNavdeep Parhar *ecode = 0;
1114fb93f5c4SNavdeep Parhar break;
1115fb93f5c4SNavdeep Parhar }
1116fb93f5c4SNavdeep Parhar }
1117fb93f5c4SNavdeep Parhar
post_terminate(struct c4iw_qp * qhp,struct t4_cqe * err_cqe,gfp_t gfp)1118fb93f5c4SNavdeep Parhar static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1119fb93f5c4SNavdeep Parhar gfp_t gfp)
1120fb93f5c4SNavdeep Parhar {
112194036cffSNavdeep Parhar int ret;
1122fb93f5c4SNavdeep Parhar struct fw_ri_wr *wqe;
1123fb93f5c4SNavdeep Parhar struct terminate_message *term;
1124fb93f5c4SNavdeep Parhar struct wrqe *wr;
1125fb93f5c4SNavdeep Parhar struct socket *so = qhp->ep->com.so;
1126fb93f5c4SNavdeep Parhar struct inpcb *inp = sotoinpcb(so);
1127fb93f5c4SNavdeep Parhar struct tcpcb *tp = intotcpcb(inp);
1128fb93f5c4SNavdeep Parhar struct toepcb *toep = tp->t_toe;
1129fb93f5c4SNavdeep Parhar
1130fb93f5c4SNavdeep Parhar CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
1131fb93f5c4SNavdeep Parhar qhp->wq.sq.qid, qhp->ep->hwtid);
1132fb93f5c4SNavdeep Parhar
1133077ba6a8SJohn Baldwin wr = alloc_wrqe(sizeof(*wqe), &toep->ofld_txq->wrq);
1134fb93f5c4SNavdeep Parhar if (wr == NULL)
1135fb93f5c4SNavdeep Parhar return;
1136fb93f5c4SNavdeep Parhar wqe = wrtod(wr);
1137fb93f5c4SNavdeep Parhar
1138fb93f5c4SNavdeep Parhar memset(wqe, 0, sizeof *wqe);
1139fb93f5c4SNavdeep Parhar wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR));
1140fb93f5c4SNavdeep Parhar wqe->flowid_len16 = cpu_to_be32(
1141fb93f5c4SNavdeep Parhar V_FW_WR_FLOWID(qhp->ep->hwtid) |
1142fb93f5c4SNavdeep Parhar V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1143fb93f5c4SNavdeep Parhar
1144fb93f5c4SNavdeep Parhar wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1145fb93f5c4SNavdeep Parhar wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1146fb93f5c4SNavdeep Parhar term = (struct terminate_message *)wqe->u.terminate.termmsg;
1147fb93f5c4SNavdeep Parhar if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1148fb93f5c4SNavdeep Parhar term->layer_etype = qhp->attr.layer_etype;
1149fb93f5c4SNavdeep Parhar term->ecode = qhp->attr.ecode;
1150fb93f5c4SNavdeep Parhar } else
1151fb93f5c4SNavdeep Parhar build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
115294036cffSNavdeep Parhar ret = creds(toep, inp, sizeof(*wqe));
115394036cffSNavdeep Parhar if (ret) {
115494036cffSNavdeep Parhar free_wrqe(wr);
115594036cffSNavdeep Parhar return;
115694036cffSNavdeep Parhar }
1157fb93f5c4SNavdeep Parhar t4_wrq_tx(qhp->rhp->rdev.adap, wr);
1158fb93f5c4SNavdeep Parhar }
1159fb93f5c4SNavdeep Parhar
1160fb93f5c4SNavdeep Parhar /* Assumes qhp lock is held. */
__flush_qp(struct c4iw_qp * qhp,struct c4iw_cq * rchp,struct c4iw_cq * schp)1161fb93f5c4SNavdeep Parhar static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1162fb93f5c4SNavdeep Parhar struct c4iw_cq *schp)
1163fb93f5c4SNavdeep Parhar {
1164fb93f5c4SNavdeep Parhar int count;
11655c2bacdeSNavdeep Parhar int rq_flushed, sq_flushed;
1166fb93f5c4SNavdeep Parhar unsigned long flag;
1167fb93f5c4SNavdeep Parhar
1168fb93f5c4SNavdeep Parhar CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp,
1169fb93f5c4SNavdeep Parhar schp);
1170fb93f5c4SNavdeep Parhar
1171fb93f5c4SNavdeep Parhar /* locking hierarchy: cq lock first, then qp lock. */
1172fb93f5c4SNavdeep Parhar spin_lock_irqsave(&rchp->lock, flag);
1173fb93f5c4SNavdeep Parhar spin_lock(&qhp->lock);
11745c2bacdeSNavdeep Parhar
11755c2bacdeSNavdeep Parhar if (qhp->wq.flushed) {
1176fb93f5c4SNavdeep Parhar spin_unlock(&qhp->lock);
1177fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&rchp->lock, flag);
11785c2bacdeSNavdeep Parhar return;
1179fb93f5c4SNavdeep Parhar }
11805c2bacdeSNavdeep Parhar qhp->wq.flushed = 1;
11815c2bacdeSNavdeep Parhar
11825c2bacdeSNavdeep Parhar c4iw_flush_hw_cq(rchp);
11835c2bacdeSNavdeep Parhar c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
11845c2bacdeSNavdeep Parhar rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
11855c2bacdeSNavdeep Parhar spin_unlock(&qhp->lock);
11865c2bacdeSNavdeep Parhar spin_unlock_irqrestore(&rchp->lock, flag);
1187fb93f5c4SNavdeep Parhar
1188fb93f5c4SNavdeep Parhar /* locking hierarchy: cq lock first, then qp lock. */
1189fb93f5c4SNavdeep Parhar spin_lock_irqsave(&schp->lock, flag);
1190fb93f5c4SNavdeep Parhar spin_lock(&qhp->lock);
11915c2bacdeSNavdeep Parhar if (schp != rchp)
11925c2bacdeSNavdeep Parhar c4iw_flush_hw_cq(schp);
11935c2bacdeSNavdeep Parhar sq_flushed = c4iw_flush_sq(qhp);
1194fb93f5c4SNavdeep Parhar spin_unlock(&qhp->lock);
1195fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&schp->lock, flag);
11965c2bacdeSNavdeep Parhar
11975c2bacdeSNavdeep Parhar if (schp == rchp) {
11985c2bacdeSNavdeep Parhar if (t4_clear_cq_armed(&rchp->cq) &&
11995c2bacdeSNavdeep Parhar (rq_flushed || sq_flushed)) {
12005c2bacdeSNavdeep Parhar spin_lock_irqsave(&rchp->comp_handler_lock, flag);
12015c2bacdeSNavdeep Parhar (*rchp->ibcq.comp_handler)(&rchp->ibcq,
12025c2bacdeSNavdeep Parhar rchp->ibcq.cq_context);
12035c2bacdeSNavdeep Parhar spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
12045c2bacdeSNavdeep Parhar }
12055c2bacdeSNavdeep Parhar } else {
12065c2bacdeSNavdeep Parhar if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
12075c2bacdeSNavdeep Parhar spin_lock_irqsave(&rchp->comp_handler_lock, flag);
12085c2bacdeSNavdeep Parhar (*rchp->ibcq.comp_handler)(&rchp->ibcq,
12095c2bacdeSNavdeep Parhar rchp->ibcq.cq_context);
12105c2bacdeSNavdeep Parhar spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
12115c2bacdeSNavdeep Parhar }
12125c2bacdeSNavdeep Parhar if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1213fb93f5c4SNavdeep Parhar spin_lock_irqsave(&schp->comp_handler_lock, flag);
12145c2bacdeSNavdeep Parhar (*schp->ibcq.comp_handler)(&schp->ibcq,
12155c2bacdeSNavdeep Parhar schp->ibcq.cq_context);
1216fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1217fb93f5c4SNavdeep Parhar }
1218fb93f5c4SNavdeep Parhar }
12195c2bacdeSNavdeep Parhar }
1220fb93f5c4SNavdeep Parhar
flush_qp(struct c4iw_qp * qhp)1221fb93f5c4SNavdeep Parhar static void flush_qp(struct c4iw_qp *qhp)
1222fb93f5c4SNavdeep Parhar {
1223fb93f5c4SNavdeep Parhar struct c4iw_cq *rchp, *schp;
1224fb93f5c4SNavdeep Parhar unsigned long flag;
1225fb93f5c4SNavdeep Parhar
12265c2bacdeSNavdeep Parhar rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
12275c2bacdeSNavdeep Parhar schp = to_c4iw_cq(qhp->ibqp.send_cq);
1228fb93f5c4SNavdeep Parhar
1229fb93f5c4SNavdeep Parhar t4_set_wq_in_error(&qhp->wq);
12305c2bacdeSNavdeep Parhar if (qhp->ibqp.uobject) {
1231fb93f5c4SNavdeep Parhar t4_set_cq_in_error(&rchp->cq);
1232fb93f5c4SNavdeep Parhar spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1233fb93f5c4SNavdeep Parhar (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1234fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1235fb93f5c4SNavdeep Parhar if (schp != rchp) {
1236fb93f5c4SNavdeep Parhar t4_set_cq_in_error(&schp->cq);
1237fb93f5c4SNavdeep Parhar spin_lock_irqsave(&schp->comp_handler_lock, flag);
1238fb93f5c4SNavdeep Parhar (*schp->ibcq.comp_handler)(&schp->ibcq,
1239fb93f5c4SNavdeep Parhar schp->ibcq.cq_context);
1240fb93f5c4SNavdeep Parhar spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1241fb93f5c4SNavdeep Parhar }
1242fb93f5c4SNavdeep Parhar return;
1243fb93f5c4SNavdeep Parhar }
1244fb93f5c4SNavdeep Parhar __flush_qp(qhp, rchp, schp);
1245fb93f5c4SNavdeep Parhar }
1246fb93f5c4SNavdeep Parhar
1247fb93f5c4SNavdeep Parhar static int
rdma_fini(struct c4iw_dev * rhp,struct c4iw_qp * qhp,struct c4iw_ep * ep)1248fb93f5c4SNavdeep Parhar rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep)
1249fb93f5c4SNavdeep Parhar {
1250fb93f5c4SNavdeep Parhar struct c4iw_rdev *rdev = &rhp->rdev;
1251fb93f5c4SNavdeep Parhar struct adapter *sc = rdev->adap;
1252fb93f5c4SNavdeep Parhar struct fw_ri_wr *wqe;
1253fb93f5c4SNavdeep Parhar int ret;
1254fb93f5c4SNavdeep Parhar struct wrqe *wr;
1255fb93f5c4SNavdeep Parhar struct socket *so = ep->com.so;
1256fb93f5c4SNavdeep Parhar struct inpcb *inp = sotoinpcb(so);
1257fb93f5c4SNavdeep Parhar struct tcpcb *tp = intotcpcb(inp);
1258fb93f5c4SNavdeep Parhar struct toepcb *toep = tp->t_toe;
1259fb93f5c4SNavdeep Parhar
1260fb93f5c4SNavdeep Parhar KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__));
1261fb93f5c4SNavdeep Parhar
12625c2bacdeSNavdeep Parhar CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
12635c2bacdeSNavdeep Parhar qhp->wq.sq.qid, ep, ep->hwtid);
1264fb93f5c4SNavdeep Parhar
1265077ba6a8SJohn Baldwin wr = alloc_wrqe(sizeof(*wqe), &toep->ofld_txq->wrq);
1266fb93f5c4SNavdeep Parhar if (wr == NULL)
1267fb93f5c4SNavdeep Parhar return (0);
1268fb93f5c4SNavdeep Parhar wqe = wrtod(wr);
1269fb93f5c4SNavdeep Parhar
1270fb93f5c4SNavdeep Parhar memset(wqe, 0, sizeof *wqe);
1271fb93f5c4SNavdeep Parhar
1272fb93f5c4SNavdeep Parhar wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL);
1273fb93f5c4SNavdeep Parhar wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
1274fb93f5c4SNavdeep Parhar V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1275fb93f5c4SNavdeep Parhar wqe->cookie = (unsigned long) &ep->com.wr_wait;
1276fb93f5c4SNavdeep Parhar wqe->u.fini.type = FW_RI_TYPE_FINI;
1277fb93f5c4SNavdeep Parhar
1278fb93f5c4SNavdeep Parhar c4iw_init_wr_wait(&ep->com.wr_wait);
1279fb93f5c4SNavdeep Parhar
128094036cffSNavdeep Parhar ret = creds(toep, inp, sizeof(*wqe));
128194036cffSNavdeep Parhar if (ret) {
128294036cffSNavdeep Parhar free_wrqe(wr);
128394036cffSNavdeep Parhar return ret;
128494036cffSNavdeep Parhar }
1285fb93f5c4SNavdeep Parhar t4_wrq_tx(sc, wr);
1286fb93f5c4SNavdeep Parhar
1287fb93f5c4SNavdeep Parhar ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
12885c2bacdeSNavdeep Parhar qhp->wq.sq.qid, ep->com.so, __func__);
1289fb93f5c4SNavdeep Parhar return ret;
1290fb93f5c4SNavdeep Parhar }
1291fb93f5c4SNavdeep Parhar
build_rtr_msg(u8 p2p_type,struct fw_ri_init * init)1292fb93f5c4SNavdeep Parhar static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1293fb93f5c4SNavdeep Parhar {
1294fb93f5c4SNavdeep Parhar CTR2(KTR_IW_CXGBE, "%s p2p_type = %d", __func__, p2p_type);
1295fb93f5c4SNavdeep Parhar memset(&init->u, 0, sizeof init->u);
1296fb93f5c4SNavdeep Parhar switch (p2p_type) {
1297fb93f5c4SNavdeep Parhar case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1298fb93f5c4SNavdeep Parhar init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1299fb93f5c4SNavdeep Parhar init->u.write.stag_sink = cpu_to_be32(1);
1300fb93f5c4SNavdeep Parhar init->u.write.to_sink = cpu_to_be64(1);
1301fb93f5c4SNavdeep Parhar init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1302fb93f5c4SNavdeep Parhar init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1303fb93f5c4SNavdeep Parhar sizeof(struct fw_ri_immd),
1304fb93f5c4SNavdeep Parhar 16);
1305fb93f5c4SNavdeep Parhar break;
1306fb93f5c4SNavdeep Parhar case FW_RI_INIT_P2PTYPE_READ_REQ:
1307fb93f5c4SNavdeep Parhar init->u.write.opcode = FW_RI_RDMA_READ_WR;
1308fb93f5c4SNavdeep Parhar init->u.read.stag_src = cpu_to_be32(1);
1309fb93f5c4SNavdeep Parhar init->u.read.to_src_lo = cpu_to_be32(1);
1310fb93f5c4SNavdeep Parhar init->u.read.stag_sink = cpu_to_be32(1);
1311fb93f5c4SNavdeep Parhar init->u.read.to_sink_lo = cpu_to_be32(1);
1312fb93f5c4SNavdeep Parhar init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1313fb93f5c4SNavdeep Parhar break;
1314fb93f5c4SNavdeep Parhar }
1315fb93f5c4SNavdeep Parhar }
1316fb93f5c4SNavdeep Parhar
131794036cffSNavdeep Parhar static int
creds(struct toepcb * toep,struct inpcb * inp,size_t wrsize)131894036cffSNavdeep Parhar creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
1319fb93f5c4SNavdeep Parhar {
1320fb93f5c4SNavdeep Parhar struct ofld_tx_sdesc *txsd;
1321fb93f5c4SNavdeep Parhar
1322fb93f5c4SNavdeep Parhar CTR3(KTR_IW_CXGBE, "%s:creB %p %u", __func__, toep , wrsize);
132394036cffSNavdeep Parhar INP_WLOCK(inp);
132453af6903SGleb Smirnoff if ((inp->inp_flags & INP_DROPPED) != 0) {
132594036cffSNavdeep Parhar INP_WUNLOCK(inp);
132694036cffSNavdeep Parhar return (EINVAL);
132794036cffSNavdeep Parhar }
1328fb93f5c4SNavdeep Parhar txsd = &toep->txsd[toep->txsd_pidx];
1329fb93f5c4SNavdeep Parhar txsd->tx_credits = howmany(wrsize, 16);
1330fb93f5c4SNavdeep Parhar txsd->plen = 0;
1331fb93f5c4SNavdeep Parhar KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
1332fb93f5c4SNavdeep Parhar ("%s: not enough credits (%d)", __func__, toep->tx_credits));
1333fb93f5c4SNavdeep Parhar toep->tx_credits -= txsd->tx_credits;
1334fb93f5c4SNavdeep Parhar if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
1335fb93f5c4SNavdeep Parhar toep->txsd_pidx = 0;
1336fb93f5c4SNavdeep Parhar toep->txsd_avail--;
133794036cffSNavdeep Parhar INP_WUNLOCK(inp);
1338fb93f5c4SNavdeep Parhar CTR5(KTR_IW_CXGBE, "%s:creE %p %u %u %u", __func__, toep ,
1339fb93f5c4SNavdeep Parhar txsd->tx_credits, toep->tx_credits, toep->txsd_pidx);
134094036cffSNavdeep Parhar return (0);
1341fb93f5c4SNavdeep Parhar }
1342fb93f5c4SNavdeep Parhar
rdma_init(struct c4iw_dev * rhp,struct c4iw_qp * qhp)1343fb93f5c4SNavdeep Parhar static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1344fb93f5c4SNavdeep Parhar {
1345fb93f5c4SNavdeep Parhar struct fw_ri_wr *wqe;
1346fb93f5c4SNavdeep Parhar int ret;
1347fb93f5c4SNavdeep Parhar struct wrqe *wr;
1348fb93f5c4SNavdeep Parhar struct c4iw_ep *ep = qhp->ep;
1349fb93f5c4SNavdeep Parhar struct c4iw_rdev *rdev = &qhp->rhp->rdev;
1350fb93f5c4SNavdeep Parhar struct adapter *sc = rdev->adap;
1351fb93f5c4SNavdeep Parhar struct socket *so = ep->com.so;
1352fb93f5c4SNavdeep Parhar struct inpcb *inp = sotoinpcb(so);
1353fb93f5c4SNavdeep Parhar struct tcpcb *tp = intotcpcb(inp);
1354fb93f5c4SNavdeep Parhar struct toepcb *toep = tp->t_toe;
1355fb93f5c4SNavdeep Parhar
13565c2bacdeSNavdeep Parhar CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
13575c2bacdeSNavdeep Parhar qhp->wq.sq.qid, ep, ep->hwtid);
1358fb93f5c4SNavdeep Parhar
1359077ba6a8SJohn Baldwin wr = alloc_wrqe(sizeof(*wqe), &toep->ofld_txq->wrq);
1360fb93f5c4SNavdeep Parhar if (wr == NULL)
1361fb93f5c4SNavdeep Parhar return (0);
1362fb93f5c4SNavdeep Parhar wqe = wrtod(wr);
13635c2bacdeSNavdeep Parhar ret = alloc_ird(rhp, qhp->attr.max_ird);
13645c2bacdeSNavdeep Parhar if (ret) {
13655c2bacdeSNavdeep Parhar qhp->attr.max_ird = 0;
13665c2bacdeSNavdeep Parhar free_wrqe(wr);
13675c2bacdeSNavdeep Parhar return ret;
13685c2bacdeSNavdeep Parhar }
1369fb93f5c4SNavdeep Parhar
1370fb93f5c4SNavdeep Parhar memset(wqe, 0, sizeof *wqe);
1371fb93f5c4SNavdeep Parhar
1372fb93f5c4SNavdeep Parhar wqe->op_compl = cpu_to_be32(
1373fb93f5c4SNavdeep Parhar V_FW_WR_OP(FW_RI_WR) |
1374fb93f5c4SNavdeep Parhar F_FW_WR_COMPL);
1375fb93f5c4SNavdeep Parhar wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
1376fb93f5c4SNavdeep Parhar V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1377fb93f5c4SNavdeep Parhar
1378fb93f5c4SNavdeep Parhar wqe->cookie = (unsigned long) &ep->com.wr_wait;
1379fb93f5c4SNavdeep Parhar
1380fb93f5c4SNavdeep Parhar wqe->u.init.type = FW_RI_TYPE_INIT;
1381fb93f5c4SNavdeep Parhar wqe->u.init.mpareqbit_p2ptype =
1382fb93f5c4SNavdeep Parhar V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1383fb93f5c4SNavdeep Parhar V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1384fb93f5c4SNavdeep Parhar wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1385fb93f5c4SNavdeep Parhar if (qhp->attr.mpa_attr.recv_marker_enabled)
1386fb93f5c4SNavdeep Parhar wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1387fb93f5c4SNavdeep Parhar if (qhp->attr.mpa_attr.xmit_marker_enabled)
1388fb93f5c4SNavdeep Parhar wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1389fb93f5c4SNavdeep Parhar if (qhp->attr.mpa_attr.crc_enabled)
1390fb93f5c4SNavdeep Parhar wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1391fb93f5c4SNavdeep Parhar
1392fb93f5c4SNavdeep Parhar wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1393fb93f5c4SNavdeep Parhar FW_RI_QP_RDMA_WRITE_ENABLE |
1394fb93f5c4SNavdeep Parhar FW_RI_QP_BIND_ENABLE;
1395fb93f5c4SNavdeep Parhar if (!qhp->ibqp.uobject)
1396fb93f5c4SNavdeep Parhar wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1397fb93f5c4SNavdeep Parhar FW_RI_QP_STAG0_ENABLE;
1398fb93f5c4SNavdeep Parhar wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1399fb93f5c4SNavdeep Parhar wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1400fb93f5c4SNavdeep Parhar wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1401fb93f5c4SNavdeep Parhar wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1402fb93f5c4SNavdeep Parhar wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1403fb93f5c4SNavdeep Parhar wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1404fb93f5c4SNavdeep Parhar wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1405fb93f5c4SNavdeep Parhar wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1406fb93f5c4SNavdeep Parhar wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1407fb93f5c4SNavdeep Parhar wqe->u.init.iss = cpu_to_be32(ep->snd_seq);
1408fb93f5c4SNavdeep Parhar wqe->u.init.irs = cpu_to_be32(ep->rcv_seq);
1409fb93f5c4SNavdeep Parhar wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1410fb93f5c4SNavdeep Parhar wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1411fb93f5c4SNavdeep Parhar sc->vres.rq.start);
1412fb93f5c4SNavdeep Parhar if (qhp->attr.mpa_attr.initiator)
1413fb93f5c4SNavdeep Parhar build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1414fb93f5c4SNavdeep Parhar
1415fb93f5c4SNavdeep Parhar c4iw_init_wr_wait(&ep->com.wr_wait);
1416fb93f5c4SNavdeep Parhar
141794036cffSNavdeep Parhar ret = creds(toep, inp, sizeof(*wqe));
141894036cffSNavdeep Parhar if (ret) {
141994036cffSNavdeep Parhar free_wrqe(wr);
14205c2bacdeSNavdeep Parhar free_ird(rhp, qhp->attr.max_ird);
142194036cffSNavdeep Parhar return ret;
142294036cffSNavdeep Parhar }
1423fb93f5c4SNavdeep Parhar t4_wrq_tx(sc, wr);
1424fb93f5c4SNavdeep Parhar
1425fb93f5c4SNavdeep Parhar ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
14265c2bacdeSNavdeep Parhar qhp->wq.sq.qid, ep->com.so, __func__);
1427fb93f5c4SNavdeep Parhar
1428c537e887SNavdeep Parhar toep->params.ulp_mode = ULP_MODE_RDMA;
14295c2bacdeSNavdeep Parhar free_ird(rhp, qhp->attr.max_ird);
1430fb93f5c4SNavdeep Parhar
1431fb93f5c4SNavdeep Parhar return ret;
1432fb93f5c4SNavdeep Parhar }
1433fb93f5c4SNavdeep Parhar
c4iw_modify_qp(struct c4iw_dev * rhp,struct c4iw_qp * qhp,enum c4iw_qp_attr_mask mask,struct c4iw_qp_attributes * attrs,int internal)1434fb93f5c4SNavdeep Parhar int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1435fb93f5c4SNavdeep Parhar enum c4iw_qp_attr_mask mask,
1436fb93f5c4SNavdeep Parhar struct c4iw_qp_attributes *attrs,
1437fb93f5c4SNavdeep Parhar int internal)
1438fb93f5c4SNavdeep Parhar {
1439fb93f5c4SNavdeep Parhar int ret = 0;
1440fb93f5c4SNavdeep Parhar struct c4iw_qp_attributes newattr = qhp->attr;
1441fb93f5c4SNavdeep Parhar int disconnect = 0;
1442fb93f5c4SNavdeep Parhar int terminate = 0;
1443fb93f5c4SNavdeep Parhar int abort = 0;
1444fb93f5c4SNavdeep Parhar int free = 0;
1445fb93f5c4SNavdeep Parhar struct c4iw_ep *ep = NULL;
1446fb93f5c4SNavdeep Parhar
1447fb93f5c4SNavdeep Parhar CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp,
1448fb93f5c4SNavdeep Parhar qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep);
1449fb93f5c4SNavdeep Parhar CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state,
1450fb93f5c4SNavdeep Parhar (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1451fb93f5c4SNavdeep Parhar
1452fb93f5c4SNavdeep Parhar mutex_lock(&qhp->mutex);
1453fb93f5c4SNavdeep Parhar
1454fb93f5c4SNavdeep Parhar /* Process attr changes if in IDLE */
1455fb93f5c4SNavdeep Parhar if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1456fb93f5c4SNavdeep Parhar if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1457fb93f5c4SNavdeep Parhar ret = -EIO;
1458fb93f5c4SNavdeep Parhar goto out;
1459fb93f5c4SNavdeep Parhar }
1460fb93f5c4SNavdeep Parhar if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1461fb93f5c4SNavdeep Parhar newattr.enable_rdma_read = attrs->enable_rdma_read;
1462fb93f5c4SNavdeep Parhar if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1463fb93f5c4SNavdeep Parhar newattr.enable_rdma_write = attrs->enable_rdma_write;
1464fb93f5c4SNavdeep Parhar if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1465fb93f5c4SNavdeep Parhar newattr.enable_bind = attrs->enable_bind;
1466fb93f5c4SNavdeep Parhar if (mask & C4IW_QP_ATTR_MAX_ORD) {
1467fb93f5c4SNavdeep Parhar if (attrs->max_ord > c4iw_max_read_depth) {
1468fb93f5c4SNavdeep Parhar ret = -EINVAL;
1469fb93f5c4SNavdeep Parhar goto out;
1470fb93f5c4SNavdeep Parhar }
1471fb93f5c4SNavdeep Parhar newattr.max_ord = attrs->max_ord;
1472fb93f5c4SNavdeep Parhar }
1473fb93f5c4SNavdeep Parhar if (mask & C4IW_QP_ATTR_MAX_IRD) {
14745c2bacdeSNavdeep Parhar if (attrs->max_ird > cur_max_read_depth(rhp)) {
1475fb93f5c4SNavdeep Parhar ret = -EINVAL;
1476fb93f5c4SNavdeep Parhar goto out;
1477fb93f5c4SNavdeep Parhar }
1478fb93f5c4SNavdeep Parhar newattr.max_ird = attrs->max_ird;
1479fb93f5c4SNavdeep Parhar }
1480fb93f5c4SNavdeep Parhar qhp->attr = newattr;
1481fb93f5c4SNavdeep Parhar }
1482fb93f5c4SNavdeep Parhar
1483fb93f5c4SNavdeep Parhar if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1484fb93f5c4SNavdeep Parhar goto out;
1485fb93f5c4SNavdeep Parhar if (qhp->attr.state == attrs->next_state)
1486fb93f5c4SNavdeep Parhar goto out;
1487fb93f5c4SNavdeep Parhar
14885c239d80SNavdeep Parhar /* Return EINPROGRESS if QP is already in transition state.
14895c239d80SNavdeep Parhar * Eg: CLOSING->IDLE transition or *->ERROR transition.
14905c239d80SNavdeep Parhar * This can happen while connection is switching(due to rdma_fini)
14915c239d80SNavdeep Parhar * from iWARP/RDDP to TOE mode and any inflight RDMA RX data will
14925c239d80SNavdeep Parhar * reach TOE driver -> TCP stack -> iWARP driver. In this way
14935c239d80SNavdeep Parhar * iWARP driver keep receiving inflight RDMA RX data until socket
14945c239d80SNavdeep Parhar * is closed or aborted. And if iWARP CM is in FPDU sate, then
14955c239d80SNavdeep Parhar * it tries to put QP in TERM state and disconnects endpoint.
14965c239d80SNavdeep Parhar * But as QP is already in transition state, this event is ignored.
14975c239d80SNavdeep Parhar */
14985c239d80SNavdeep Parhar if ((qhp->attr.state >= C4IW_QP_STATE_ERROR) &&
14995c239d80SNavdeep Parhar (attrs->next_state == C4IW_QP_STATE_TERMINATE)) {
15005c239d80SNavdeep Parhar ret = -EINPROGRESS;
15015c239d80SNavdeep Parhar goto out;
15025c239d80SNavdeep Parhar }
15035c239d80SNavdeep Parhar
1504fb93f5c4SNavdeep Parhar switch (qhp->attr.state) {
1505fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_IDLE:
1506fb93f5c4SNavdeep Parhar switch (attrs->next_state) {
1507fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_RTS:
1508fb93f5c4SNavdeep Parhar if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1509fb93f5c4SNavdeep Parhar ret = -EINVAL;
1510fb93f5c4SNavdeep Parhar goto out;
1511fb93f5c4SNavdeep Parhar }
1512fb93f5c4SNavdeep Parhar if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1513fb93f5c4SNavdeep Parhar ret = -EINVAL;
1514fb93f5c4SNavdeep Parhar goto out;
1515fb93f5c4SNavdeep Parhar }
1516fb93f5c4SNavdeep Parhar qhp->attr.mpa_attr = attrs->mpa_attr;
1517fb93f5c4SNavdeep Parhar qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1518fb93f5c4SNavdeep Parhar qhp->ep = qhp->attr.llp_stream_handle;
1519fb93f5c4SNavdeep Parhar set_state(qhp, C4IW_QP_STATE_RTS);
1520fb93f5c4SNavdeep Parhar
1521fb93f5c4SNavdeep Parhar /*
1522fb93f5c4SNavdeep Parhar * Ref the endpoint here and deref when we
1523fb93f5c4SNavdeep Parhar * disassociate the endpoint from the QP. This
1524fb93f5c4SNavdeep Parhar * happens in CLOSING->IDLE transition or *->ERROR
1525fb93f5c4SNavdeep Parhar * transition.
1526fb93f5c4SNavdeep Parhar */
1527fb93f5c4SNavdeep Parhar c4iw_get_ep(&qhp->ep->com);
1528fb93f5c4SNavdeep Parhar ret = rdma_init(rhp, qhp);
1529fb93f5c4SNavdeep Parhar if (ret)
1530fb93f5c4SNavdeep Parhar goto err;
1531fb93f5c4SNavdeep Parhar break;
1532fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_ERROR:
1533fb93f5c4SNavdeep Parhar set_state(qhp, C4IW_QP_STATE_ERROR);
1534fb93f5c4SNavdeep Parhar flush_qp(qhp);
1535fb93f5c4SNavdeep Parhar break;
1536fb93f5c4SNavdeep Parhar default:
1537fb93f5c4SNavdeep Parhar ret = -EINVAL;
1538fb93f5c4SNavdeep Parhar goto out;
1539fb93f5c4SNavdeep Parhar }
1540fb93f5c4SNavdeep Parhar break;
1541fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_RTS:
1542fb93f5c4SNavdeep Parhar switch (attrs->next_state) {
1543fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_CLOSING:
154409d7f260SMateusz Guzik BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
15455c2bacdeSNavdeep Parhar t4_set_wq_in_error(&qhp->wq);
1546fb93f5c4SNavdeep Parhar set_state(qhp, C4IW_QP_STATE_CLOSING);
1547fb93f5c4SNavdeep Parhar ep = qhp->ep;
1548fb93f5c4SNavdeep Parhar if (!internal) {
1549fb93f5c4SNavdeep Parhar abort = 0;
1550fb93f5c4SNavdeep Parhar disconnect = 1;
1551fb93f5c4SNavdeep Parhar c4iw_get_ep(&qhp->ep->com);
1552fb93f5c4SNavdeep Parhar }
1553fb93f5c4SNavdeep Parhar ret = rdma_fini(rhp, qhp, ep);
1554fb93f5c4SNavdeep Parhar if (ret)
1555fb93f5c4SNavdeep Parhar goto err;
1556fb93f5c4SNavdeep Parhar break;
1557fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_TERMINATE:
15585c2bacdeSNavdeep Parhar t4_set_wq_in_error(&qhp->wq);
1559fb93f5c4SNavdeep Parhar set_state(qhp, C4IW_QP_STATE_TERMINATE);
1560fb93f5c4SNavdeep Parhar qhp->attr.layer_etype = attrs->layer_etype;
1561fb93f5c4SNavdeep Parhar qhp->attr.ecode = attrs->ecode;
1562fb93f5c4SNavdeep Parhar ep = qhp->ep;
15635c2bacdeSNavdeep Parhar if (!internal) {
15645c2bacdeSNavdeep Parhar c4iw_get_ep(&qhp->ep->com);
1565fb93f5c4SNavdeep Parhar terminate = 1;
1566fb93f5c4SNavdeep Parhar disconnect = 1;
15675c2bacdeSNavdeep Parhar } else {
15685c2bacdeSNavdeep Parhar terminate = qhp->attr.send_term;
15695c2bacdeSNavdeep Parhar ret = rdma_fini(rhp, qhp, ep);
15705c2bacdeSNavdeep Parhar if (ret)
15715c2bacdeSNavdeep Parhar goto err;
15725c2bacdeSNavdeep Parhar }
1573fb93f5c4SNavdeep Parhar break;
1574fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_ERROR:
1575fb93f5c4SNavdeep Parhar t4_set_wq_in_error(&qhp->wq);
15765c2bacdeSNavdeep Parhar set_state(qhp, C4IW_QP_STATE_ERROR);
1577fb93f5c4SNavdeep Parhar if (!internal) {
1578fb93f5c4SNavdeep Parhar abort = 1;
1579fb93f5c4SNavdeep Parhar disconnect = 1;
1580fb93f5c4SNavdeep Parhar ep = qhp->ep;
1581fb93f5c4SNavdeep Parhar c4iw_get_ep(&qhp->ep->com);
1582fb93f5c4SNavdeep Parhar }
1583fb93f5c4SNavdeep Parhar goto err;
1584fb93f5c4SNavdeep Parhar break;
1585fb93f5c4SNavdeep Parhar default:
1586fb93f5c4SNavdeep Parhar ret = -EINVAL;
1587fb93f5c4SNavdeep Parhar goto out;
1588fb93f5c4SNavdeep Parhar }
1589fb93f5c4SNavdeep Parhar break;
1590fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_CLOSING:
1591401032c6SNavdeep Parhar
1592401032c6SNavdeep Parhar /*
1593401032c6SNavdeep Parhar * Allow kernel users to move to ERROR for qp draining.
1594401032c6SNavdeep Parhar */
1595401032c6SNavdeep Parhar if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1596401032c6SNavdeep Parhar C4IW_QP_STATE_ERROR)) {
1597fb93f5c4SNavdeep Parhar ret = -EINVAL;
1598fb93f5c4SNavdeep Parhar goto out;
1599fb93f5c4SNavdeep Parhar }
1600fb93f5c4SNavdeep Parhar switch (attrs->next_state) {
1601fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_IDLE:
1602fb93f5c4SNavdeep Parhar flush_qp(qhp);
1603fb93f5c4SNavdeep Parhar set_state(qhp, C4IW_QP_STATE_IDLE);
1604fb93f5c4SNavdeep Parhar qhp->attr.llp_stream_handle = NULL;
1605fb93f5c4SNavdeep Parhar c4iw_put_ep(&qhp->ep->com);
1606fb93f5c4SNavdeep Parhar qhp->ep = NULL;
1607fb93f5c4SNavdeep Parhar wake_up(&qhp->wait);
1608fb93f5c4SNavdeep Parhar break;
1609fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_ERROR:
1610fb93f5c4SNavdeep Parhar goto err;
1611fb93f5c4SNavdeep Parhar default:
1612fb93f5c4SNavdeep Parhar ret = -EINVAL;
1613fb93f5c4SNavdeep Parhar goto err;
1614fb93f5c4SNavdeep Parhar }
1615fb93f5c4SNavdeep Parhar break;
1616fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_ERROR:
1617fb93f5c4SNavdeep Parhar if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1618fb93f5c4SNavdeep Parhar ret = -EINVAL;
1619fb93f5c4SNavdeep Parhar goto out;
1620fb93f5c4SNavdeep Parhar }
1621fb93f5c4SNavdeep Parhar if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1622fb93f5c4SNavdeep Parhar ret = -EINVAL;
1623fb93f5c4SNavdeep Parhar goto out;
1624fb93f5c4SNavdeep Parhar }
1625fb93f5c4SNavdeep Parhar set_state(qhp, C4IW_QP_STATE_IDLE);
1626fb93f5c4SNavdeep Parhar break;
1627fb93f5c4SNavdeep Parhar case C4IW_QP_STATE_TERMINATE:
1628fb93f5c4SNavdeep Parhar if (!internal) {
1629fb93f5c4SNavdeep Parhar ret = -EINVAL;
1630fb93f5c4SNavdeep Parhar goto out;
1631fb93f5c4SNavdeep Parhar }
1632fb93f5c4SNavdeep Parhar goto err;
1633fb93f5c4SNavdeep Parhar break;
1634fb93f5c4SNavdeep Parhar default:
1635fb93f5c4SNavdeep Parhar printf("%s in a bad state %d\n",
1636fb93f5c4SNavdeep Parhar __func__, qhp->attr.state);
1637fb93f5c4SNavdeep Parhar ret = -EINVAL;
1638fb93f5c4SNavdeep Parhar goto err;
1639fb93f5c4SNavdeep Parhar break;
1640fb93f5c4SNavdeep Parhar }
1641fb93f5c4SNavdeep Parhar goto out;
1642fb93f5c4SNavdeep Parhar err:
1643fb93f5c4SNavdeep Parhar CTR3(KTR_IW_CXGBE, "%s disassociating ep %p qpid 0x%x", __func__,
1644fb93f5c4SNavdeep Parhar qhp->ep, qhp->wq.sq.qid);
1645fb93f5c4SNavdeep Parhar
1646fb93f5c4SNavdeep Parhar /* disassociate the LLP connection */
1647fb93f5c4SNavdeep Parhar qhp->attr.llp_stream_handle = NULL;
1648fb93f5c4SNavdeep Parhar if (!ep)
1649fb93f5c4SNavdeep Parhar ep = qhp->ep;
1650fb93f5c4SNavdeep Parhar qhp->ep = NULL;
1651fb93f5c4SNavdeep Parhar set_state(qhp, C4IW_QP_STATE_ERROR);
1652fb93f5c4SNavdeep Parhar free = 1;
16531081f354SNavdeep Parhar abort = 1;
1654fb93f5c4SNavdeep Parhar BUG_ON(!ep);
1655fb93f5c4SNavdeep Parhar flush_qp(qhp);
165653f49a7bSNavdeep Parhar wake_up(&qhp->wait);
1657fb93f5c4SNavdeep Parhar out:
1658fb93f5c4SNavdeep Parhar mutex_unlock(&qhp->mutex);
1659fb93f5c4SNavdeep Parhar
1660fb93f5c4SNavdeep Parhar if (terminate)
1661fb93f5c4SNavdeep Parhar post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1662fb93f5c4SNavdeep Parhar
1663fb93f5c4SNavdeep Parhar /*
1664fb93f5c4SNavdeep Parhar * If disconnect is 1, then we need to initiate a disconnect
1665fb93f5c4SNavdeep Parhar * on the EP. This can be a normal close (RTS->CLOSING) or
1666fb93f5c4SNavdeep Parhar * an abnormal close (RTS/CLOSING->ERROR).
1667fb93f5c4SNavdeep Parhar */
1668fb93f5c4SNavdeep Parhar if (disconnect) {
16695c2bacdeSNavdeep Parhar __c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1670fb93f5c4SNavdeep Parhar GFP_KERNEL);
1671fb93f5c4SNavdeep Parhar c4iw_put_ep(&ep->com);
1672fb93f5c4SNavdeep Parhar }
1673fb93f5c4SNavdeep Parhar
1674fb93f5c4SNavdeep Parhar /*
1675fb93f5c4SNavdeep Parhar * If free is 1, then we've disassociated the EP from the QP
1676fb93f5c4SNavdeep Parhar * and we need to dereference the EP.
1677fb93f5c4SNavdeep Parhar */
1678fb93f5c4SNavdeep Parhar if (free)
1679fb93f5c4SNavdeep Parhar c4iw_put_ep(&ep->com);
1680fb93f5c4SNavdeep Parhar CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state);
1681fb93f5c4SNavdeep Parhar return ret;
1682fb93f5c4SNavdeep Parhar }
1683fb93f5c4SNavdeep Parhar
c4iw_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)1684b633e08cSHans Petter Selasky int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
1685fb93f5c4SNavdeep Parhar {
1686fb93f5c4SNavdeep Parhar struct c4iw_dev *rhp;
1687fb93f5c4SNavdeep Parhar struct c4iw_qp *qhp;
1688fb93f5c4SNavdeep Parhar struct c4iw_qp_attributes attrs;
1689fb93f5c4SNavdeep Parhar
1690fb93f5c4SNavdeep Parhar CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ib_qp);
1691fb93f5c4SNavdeep Parhar qhp = to_c4iw_qp(ib_qp);
1692fb93f5c4SNavdeep Parhar rhp = qhp->rhp;
1693fb93f5c4SNavdeep Parhar
1694fb93f5c4SNavdeep Parhar attrs.next_state = C4IW_QP_STATE_ERROR;
1695fb93f5c4SNavdeep Parhar if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1696fb93f5c4SNavdeep Parhar c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1697fb93f5c4SNavdeep Parhar else
1698fb93f5c4SNavdeep Parhar c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1699fb93f5c4SNavdeep Parhar wait_event(qhp->wait, !qhp->ep);
1700fb93f5c4SNavdeep Parhar
17015c2bacdeSNavdeep Parhar remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1702fb93f5c4SNavdeep Parhar
17035c2bacdeSNavdeep Parhar free_ird(rhp, qhp->attr.max_ird);
17045c2bacdeSNavdeep Parhar c4iw_qp_rem_ref(ib_qp);
1705fb93f5c4SNavdeep Parhar
1706fb93f5c4SNavdeep Parhar CTR3(KTR_IW_CXGBE, "%s ib_qp %p qpid 0x%0x", __func__, ib_qp,
1707fb93f5c4SNavdeep Parhar qhp->wq.sq.qid);
1708fb93f5c4SNavdeep Parhar return 0;
1709fb93f5c4SNavdeep Parhar }
1710fb93f5c4SNavdeep Parhar
1711fb93f5c4SNavdeep Parhar struct ib_qp *
c4iw_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * attrs,struct ib_udata * udata)1712fb93f5c4SNavdeep Parhar c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1713fb93f5c4SNavdeep Parhar struct ib_udata *udata)
1714fb93f5c4SNavdeep Parhar {
1715fb93f5c4SNavdeep Parhar struct c4iw_dev *rhp;
1716fb93f5c4SNavdeep Parhar struct c4iw_qp *qhp;
1717fb93f5c4SNavdeep Parhar struct c4iw_pd *php;
1718fb93f5c4SNavdeep Parhar struct c4iw_cq *schp;
1719fb93f5c4SNavdeep Parhar struct c4iw_cq *rchp;
1720fb93f5c4SNavdeep Parhar struct c4iw_create_qp_resp uresp;
17215c2bacdeSNavdeep Parhar unsigned int sqsize, rqsize;
1722fb93f5c4SNavdeep Parhar struct c4iw_ucontext *ucontext;
17235c2bacdeSNavdeep Parhar int ret;
17245c2bacdeSNavdeep Parhar struct c4iw_mm_entry *sq_key_mm = NULL, *rq_key_mm = NULL;
17255c2bacdeSNavdeep Parhar struct c4iw_mm_entry *sq_db_key_mm = NULL, *rq_db_key_mm = NULL;
1726fb93f5c4SNavdeep Parhar
1727fb93f5c4SNavdeep Parhar CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
1728fb93f5c4SNavdeep Parhar
1729fb93f5c4SNavdeep Parhar if (attrs->qp_type != IB_QPT_RC)
1730fb93f5c4SNavdeep Parhar return ERR_PTR(-EINVAL);
1731fb93f5c4SNavdeep Parhar
1732fb93f5c4SNavdeep Parhar php = to_c4iw_pd(pd);
1733fb93f5c4SNavdeep Parhar rhp = php->rhp;
1734fb93f5c4SNavdeep Parhar schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1735fb93f5c4SNavdeep Parhar rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1736fb93f5c4SNavdeep Parhar if (!schp || !rchp)
1737fb93f5c4SNavdeep Parhar return ERR_PTR(-EINVAL);
1738fb93f5c4SNavdeep Parhar
1739fb93f5c4SNavdeep Parhar if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1740fb93f5c4SNavdeep Parhar return ERR_PTR(-EINVAL);
1741fb93f5c4SNavdeep Parhar
17425c2bacdeSNavdeep Parhar if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
1743fb93f5c4SNavdeep Parhar return ERR_PTR(-E2BIG);
17445c2bacdeSNavdeep Parhar rqsize = attrs->cap.max_recv_wr + 1;
17455c2bacdeSNavdeep Parhar if (rqsize < 8)
17465c2bacdeSNavdeep Parhar rqsize = 8;
1747fb93f5c4SNavdeep Parhar
17485c2bacdeSNavdeep Parhar if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
1749fb93f5c4SNavdeep Parhar return ERR_PTR(-E2BIG);
17505c2bacdeSNavdeep Parhar sqsize = attrs->cap.max_send_wr + 1;
17515c2bacdeSNavdeep Parhar if (sqsize < 8)
17525c2bacdeSNavdeep Parhar sqsize = 8;
1753fb93f5c4SNavdeep Parhar
1754fb93f5c4SNavdeep Parhar ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1755fb93f5c4SNavdeep Parhar
1756fb93f5c4SNavdeep Parhar qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1757fb93f5c4SNavdeep Parhar if (!qhp)
1758fb93f5c4SNavdeep Parhar return ERR_PTR(-ENOMEM);
1759fb93f5c4SNavdeep Parhar qhp->wq.sq.size = sqsize;
17605c2bacdeSNavdeep Parhar qhp->wq.sq.memsize =
17615c2bacdeSNavdeep Parhar (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
17625c2bacdeSNavdeep Parhar sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
17635c2bacdeSNavdeep Parhar qhp->wq.sq.flush_cidx = -1;
1764fb93f5c4SNavdeep Parhar qhp->wq.rq.size = rqsize;
17655c2bacdeSNavdeep Parhar qhp->wq.rq.memsize =
17665c2bacdeSNavdeep Parhar (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
17675c2bacdeSNavdeep Parhar sizeof(*qhp->wq.rq.queue);
1768fb93f5c4SNavdeep Parhar
1769fb93f5c4SNavdeep Parhar if (ucontext) {
1770fb93f5c4SNavdeep Parhar qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1771fb93f5c4SNavdeep Parhar qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1772fb93f5c4SNavdeep Parhar }
1773fb93f5c4SNavdeep Parhar
1774fb93f5c4SNavdeep Parhar CTR5(KTR_IW_CXGBE, "%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu",
1775fb93f5c4SNavdeep Parhar __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1776fb93f5c4SNavdeep Parhar
1777fb93f5c4SNavdeep Parhar ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1778fb93f5c4SNavdeep Parhar ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1779fb93f5c4SNavdeep Parhar if (ret)
1780fb93f5c4SNavdeep Parhar goto err1;
1781fb93f5c4SNavdeep Parhar
1782fb93f5c4SNavdeep Parhar attrs->cap.max_recv_wr = rqsize - 1;
1783fb93f5c4SNavdeep Parhar attrs->cap.max_send_wr = sqsize - 1;
1784fb93f5c4SNavdeep Parhar attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1785fb93f5c4SNavdeep Parhar
1786fb93f5c4SNavdeep Parhar qhp->rhp = rhp;
1787fb93f5c4SNavdeep Parhar qhp->attr.pd = php->pdid;
1788fb93f5c4SNavdeep Parhar qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1789fb93f5c4SNavdeep Parhar qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1790fb93f5c4SNavdeep Parhar qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1791fb93f5c4SNavdeep Parhar qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1792fb93f5c4SNavdeep Parhar qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1793fb93f5c4SNavdeep Parhar qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1794fb93f5c4SNavdeep Parhar qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1795fb93f5c4SNavdeep Parhar qhp->attr.state = C4IW_QP_STATE_IDLE;
1796fb93f5c4SNavdeep Parhar qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1797fb93f5c4SNavdeep Parhar qhp->attr.enable_rdma_read = 1;
1798fb93f5c4SNavdeep Parhar qhp->attr.enable_rdma_write = 1;
1799fb93f5c4SNavdeep Parhar qhp->attr.enable_bind = 1;
18005c2bacdeSNavdeep Parhar qhp->attr.max_ord = 0;
18015c2bacdeSNavdeep Parhar qhp->attr.max_ird = 0;
18028d814a45SNavdeep Parhar qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1803fb93f5c4SNavdeep Parhar spin_lock_init(&qhp->lock);
1804fb93f5c4SNavdeep Parhar mutex_init(&qhp->mutex);
1805fb93f5c4SNavdeep Parhar init_waitqueue_head(&qhp->wait);
18065c2bacdeSNavdeep Parhar kref_init(&qhp->kref);
18075c2bacdeSNavdeep Parhar INIT_WORK(&qhp->free_work, free_qp_work);
1808fb93f5c4SNavdeep Parhar
18095c2bacdeSNavdeep Parhar ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1810fb93f5c4SNavdeep Parhar if (ret)
1811fb93f5c4SNavdeep Parhar goto err2;
1812fb93f5c4SNavdeep Parhar
1813fb93f5c4SNavdeep Parhar if (udata) {
18145c2bacdeSNavdeep Parhar sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
18155c2bacdeSNavdeep Parhar if (!sq_key_mm) {
1816fb93f5c4SNavdeep Parhar ret = -ENOMEM;
1817fb93f5c4SNavdeep Parhar goto err3;
1818fb93f5c4SNavdeep Parhar }
18195c2bacdeSNavdeep Parhar rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
18205c2bacdeSNavdeep Parhar if (!rq_key_mm) {
1821fb93f5c4SNavdeep Parhar ret = -ENOMEM;
1822fb93f5c4SNavdeep Parhar goto err4;
1823fb93f5c4SNavdeep Parhar }
18245c2bacdeSNavdeep Parhar sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
18255c2bacdeSNavdeep Parhar if (!sq_db_key_mm) {
1826fb93f5c4SNavdeep Parhar ret = -ENOMEM;
1827fb93f5c4SNavdeep Parhar goto err5;
1828fb93f5c4SNavdeep Parhar }
18295c2bacdeSNavdeep Parhar rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
18305c2bacdeSNavdeep Parhar if (!rq_db_key_mm) {
1831fb93f5c4SNavdeep Parhar ret = -ENOMEM;
1832fb93f5c4SNavdeep Parhar goto err6;
1833fb93f5c4SNavdeep Parhar }
1834fb93f5c4SNavdeep Parhar uresp.flags = 0;
1835fb93f5c4SNavdeep Parhar uresp.qid_mask = rhp->rdev.qpmask;
1836fb93f5c4SNavdeep Parhar uresp.sqid = qhp->wq.sq.qid;
1837fb93f5c4SNavdeep Parhar uresp.sq_size = qhp->wq.sq.size;
1838fb93f5c4SNavdeep Parhar uresp.sq_memsize = qhp->wq.sq.memsize;
1839fb93f5c4SNavdeep Parhar uresp.rqid = qhp->wq.rq.qid;
1840fb93f5c4SNavdeep Parhar uresp.rq_size = qhp->wq.rq.size;
1841fb93f5c4SNavdeep Parhar uresp.rq_memsize = qhp->wq.rq.memsize;
1842fb93f5c4SNavdeep Parhar spin_lock(&ucontext->mmap_lock);
18435c2bacdeSNavdeep Parhar uresp.ma_sync_key = 0;
1844fb93f5c4SNavdeep Parhar uresp.sq_key = ucontext->key;
1845fb93f5c4SNavdeep Parhar ucontext->key += PAGE_SIZE;
1846fb93f5c4SNavdeep Parhar uresp.rq_key = ucontext->key;
1847fb93f5c4SNavdeep Parhar ucontext->key += PAGE_SIZE;
1848fb93f5c4SNavdeep Parhar uresp.sq_db_gts_key = ucontext->key;
1849fb93f5c4SNavdeep Parhar ucontext->key += PAGE_SIZE;
1850fb93f5c4SNavdeep Parhar uresp.rq_db_gts_key = ucontext->key;
1851fb93f5c4SNavdeep Parhar ucontext->key += PAGE_SIZE;
1852fb93f5c4SNavdeep Parhar spin_unlock(&ucontext->mmap_lock);
1853fb93f5c4SNavdeep Parhar ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1854fb93f5c4SNavdeep Parhar if (ret)
1855fb93f5c4SNavdeep Parhar goto err7;
18565c2bacdeSNavdeep Parhar sq_key_mm->key = uresp.sq_key;
18575c2bacdeSNavdeep Parhar sq_key_mm->addr = qhp->wq.sq.phys_addr;
18585c2bacdeSNavdeep Parhar sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
18595c2bacdeSNavdeep Parhar CTR4(KTR_IW_CXGBE, "%s sq_key_mm %x, %x, %d", __func__,
18605c2bacdeSNavdeep Parhar sq_key_mm->key, sq_key_mm->addr,
18615c2bacdeSNavdeep Parhar sq_key_mm->len);
18625c2bacdeSNavdeep Parhar insert_mmap(ucontext, sq_key_mm);
18635c2bacdeSNavdeep Parhar rq_key_mm->key = uresp.rq_key;
18645c2bacdeSNavdeep Parhar rq_key_mm->addr = qhp->wq.rq.phys_addr;
18655c2bacdeSNavdeep Parhar rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
18665c2bacdeSNavdeep Parhar CTR4(KTR_IW_CXGBE, "%s rq_key_mm %x, %x, %d", __func__,
18675c2bacdeSNavdeep Parhar rq_key_mm->key, rq_key_mm->addr,
18685c2bacdeSNavdeep Parhar rq_key_mm->len);
18695c2bacdeSNavdeep Parhar insert_mmap(ucontext, rq_key_mm);
18705c2bacdeSNavdeep Parhar sq_db_key_mm->key = uresp.sq_db_gts_key;
18715c2bacdeSNavdeep Parhar sq_db_key_mm->addr = (u64)qhp->wq.sq.bar2_pa;
18725c2bacdeSNavdeep Parhar sq_db_key_mm->len = PAGE_SIZE;
18735c2bacdeSNavdeep Parhar CTR4(KTR_IW_CXGBE, "%s sq_db_key_mm %x, %x, %d", __func__,
18745c2bacdeSNavdeep Parhar sq_db_key_mm->key, sq_db_key_mm->addr,
18755c2bacdeSNavdeep Parhar sq_db_key_mm->len);
18765c2bacdeSNavdeep Parhar insert_mmap(ucontext, sq_db_key_mm);
18775c2bacdeSNavdeep Parhar rq_db_key_mm->key = uresp.rq_db_gts_key;
18785c2bacdeSNavdeep Parhar rq_db_key_mm->addr = (u64)qhp->wq.rq.bar2_pa;
18795c2bacdeSNavdeep Parhar rq_db_key_mm->len = PAGE_SIZE;
18805c2bacdeSNavdeep Parhar CTR4(KTR_IW_CXGBE, "%s rq_db_key_mm %x, %x, %d", __func__,
18815c2bacdeSNavdeep Parhar rq_db_key_mm->key, rq_db_key_mm->addr,
18825c2bacdeSNavdeep Parhar rq_db_key_mm->len);
18835c2bacdeSNavdeep Parhar insert_mmap(ucontext, rq_db_key_mm);
18845c2bacdeSNavdeep Parhar
18855c2bacdeSNavdeep Parhar qhp->ucontext = ucontext;
1886fb93f5c4SNavdeep Parhar }
1887fb93f5c4SNavdeep Parhar qhp->ibqp.qp_num = qhp->wq.sq.qid;
1888fb93f5c4SNavdeep Parhar init_timer(&(qhp->timer));
18895c2bacdeSNavdeep Parhar
18906bb03465SNavdeep Parhar CTR5(KTR_IW_CXGBE, "%s sq id %u size %u memsize %zu num_entries %u",
18915c2bacdeSNavdeep Parhar __func__, qhp->wq.sq.qid,
18925c2bacdeSNavdeep Parhar qhp->wq.sq.size, qhp->wq.sq.memsize, attrs->cap.max_send_wr);
18936bb03465SNavdeep Parhar CTR5(KTR_IW_CXGBE, "%s rq id %u size %u memsize %zu num_entries %u",
18945c2bacdeSNavdeep Parhar __func__, qhp->wq.rq.qid,
18955c2bacdeSNavdeep Parhar qhp->wq.rq.size, qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
1896fb93f5c4SNavdeep Parhar return &qhp->ibqp;
1897fb93f5c4SNavdeep Parhar err7:
18985c2bacdeSNavdeep Parhar kfree(rq_db_key_mm);
1899fb93f5c4SNavdeep Parhar err6:
19005c2bacdeSNavdeep Parhar kfree(sq_db_key_mm);
1901fb93f5c4SNavdeep Parhar err5:
19025c2bacdeSNavdeep Parhar kfree(rq_key_mm);
1903fb93f5c4SNavdeep Parhar err4:
19045c2bacdeSNavdeep Parhar kfree(sq_key_mm);
1905fb93f5c4SNavdeep Parhar err3:
1906fb93f5c4SNavdeep Parhar remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1907fb93f5c4SNavdeep Parhar err2:
1908fb93f5c4SNavdeep Parhar destroy_qp(&rhp->rdev, &qhp->wq,
1909fb93f5c4SNavdeep Parhar ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1910fb93f5c4SNavdeep Parhar err1:
1911fb93f5c4SNavdeep Parhar kfree(qhp);
1912fb93f5c4SNavdeep Parhar return ERR_PTR(ret);
1913fb93f5c4SNavdeep Parhar }
1914fb93f5c4SNavdeep Parhar
c4iw_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1915fb93f5c4SNavdeep Parhar int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1916fb93f5c4SNavdeep Parhar int attr_mask, struct ib_udata *udata)
1917fb93f5c4SNavdeep Parhar {
1918fb93f5c4SNavdeep Parhar struct c4iw_dev *rhp;
1919fb93f5c4SNavdeep Parhar struct c4iw_qp *qhp;
1920fb93f5c4SNavdeep Parhar enum c4iw_qp_attr_mask mask = 0;
1921fb93f5c4SNavdeep Parhar struct c4iw_qp_attributes attrs;
1922fb93f5c4SNavdeep Parhar
1923fb93f5c4SNavdeep Parhar CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ibqp);
1924fb93f5c4SNavdeep Parhar
1925fb93f5c4SNavdeep Parhar /* iwarp does not support the RTR state */
1926fb93f5c4SNavdeep Parhar if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1927fb93f5c4SNavdeep Parhar attr_mask &= ~IB_QP_STATE;
1928fb93f5c4SNavdeep Parhar
1929fb93f5c4SNavdeep Parhar /* Make sure we still have something left to do */
1930fb93f5c4SNavdeep Parhar if (!attr_mask)
1931fb93f5c4SNavdeep Parhar return 0;
1932fb93f5c4SNavdeep Parhar
1933fb93f5c4SNavdeep Parhar memset(&attrs, 0, sizeof attrs);
1934fb93f5c4SNavdeep Parhar qhp = to_c4iw_qp(ibqp);
1935fb93f5c4SNavdeep Parhar rhp = qhp->rhp;
1936fb93f5c4SNavdeep Parhar
1937fb93f5c4SNavdeep Parhar attrs.next_state = c4iw_convert_state(attr->qp_state);
1938fb93f5c4SNavdeep Parhar attrs.enable_rdma_read = (attr->qp_access_flags &
1939fb93f5c4SNavdeep Parhar IB_ACCESS_REMOTE_READ) ? 1 : 0;
1940fb93f5c4SNavdeep Parhar attrs.enable_rdma_write = (attr->qp_access_flags &
1941fb93f5c4SNavdeep Parhar IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1942fb93f5c4SNavdeep Parhar attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1943fb93f5c4SNavdeep Parhar
1944fb93f5c4SNavdeep Parhar
1945fb93f5c4SNavdeep Parhar mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1946fb93f5c4SNavdeep Parhar mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1947fb93f5c4SNavdeep Parhar (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1948fb93f5c4SNavdeep Parhar C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1949fb93f5c4SNavdeep Parhar C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1950fb93f5c4SNavdeep Parhar
1951fb93f5c4SNavdeep Parhar return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1952fb93f5c4SNavdeep Parhar }
1953fb93f5c4SNavdeep Parhar
c4iw_get_qp(struct ib_device * dev,int qpn)1954fb93f5c4SNavdeep Parhar struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1955fb93f5c4SNavdeep Parhar {
1956fb93f5c4SNavdeep Parhar CTR3(KTR_IW_CXGBE, "%s ib_dev %p qpn 0x%x", __func__, dev, qpn);
1957fb93f5c4SNavdeep Parhar return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1958fb93f5c4SNavdeep Parhar }
1959fb93f5c4SNavdeep Parhar
c4iw_ib_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1960fb93f5c4SNavdeep Parhar int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1961fb93f5c4SNavdeep Parhar int attr_mask, struct ib_qp_init_attr *init_attr)
1962fb93f5c4SNavdeep Parhar {
1963fb93f5c4SNavdeep Parhar struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1964fb93f5c4SNavdeep Parhar
1965fb93f5c4SNavdeep Parhar memset(attr, 0, sizeof *attr);
1966fb93f5c4SNavdeep Parhar memset(init_attr, 0, sizeof *init_attr);
1967fb93f5c4SNavdeep Parhar attr->qp_state = to_ib_qp_state(qhp->attr.state);
19688d814a45SNavdeep Parhar init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
19698d814a45SNavdeep Parhar init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
19708d814a45SNavdeep Parhar init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
19718d814a45SNavdeep Parhar init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
19728d814a45SNavdeep Parhar init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
19738d814a45SNavdeep Parhar init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
1974fb93f5c4SNavdeep Parhar return 0;
1975fb93f5c4SNavdeep Parhar }
1976fb93f5c4SNavdeep Parhar #endif
1977