1 /*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/module.h>
34 #include <rdma/uverbs_ioctl.h>
35
36 #include "iw_cxgb4.h"
37
38 static int db_delay_usecs = 1;
39 module_param(db_delay_usecs, int, 0644);
40 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
41
42 static int ocqp_support = 1;
43 module_param(ocqp_support, int, 0644);
44 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
45
46 int db_fc_threshold = 1000;
47 module_param(db_fc_threshold, int, 0644);
48 MODULE_PARM_DESC(db_fc_threshold,
49 "QP count/threshold that triggers"
50 " automatic db flow control mode (default = 1000)");
51
52 int db_coalescing_threshold;
53 module_param(db_coalescing_threshold, int, 0644);
54 MODULE_PARM_DESC(db_coalescing_threshold,
55 "QP count/threshold that triggers"
56 " disabling db coalescing (default = 0)");
57
58 static int max_fr_immd = T4_MAX_FR_IMMD;
59 module_param(max_fr_immd, int, 0644);
60 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
61
alloc_ird(struct c4iw_dev * dev,u32 ird)62 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
63 {
64 int ret = 0;
65
66 xa_lock_irq(&dev->qps);
67 if (ird <= dev->avail_ird)
68 dev->avail_ird -= ird;
69 else
70 ret = -ENOMEM;
71 xa_unlock_irq(&dev->qps);
72
73 if (ret)
74 dev_warn(&dev->rdev.lldi.pdev->dev,
75 "device IRD resources exhausted\n");
76
77 return ret;
78 }
79
free_ird(struct c4iw_dev * dev,int ird)80 static void free_ird(struct c4iw_dev *dev, int ird)
81 {
82 xa_lock_irq(&dev->qps);
83 dev->avail_ird += ird;
84 xa_unlock_irq(&dev->qps);
85 }
86
set_state(struct c4iw_qp * qhp,enum c4iw_qp_state state)87 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
88 {
89 unsigned long flag;
90 spin_lock_irqsave(&qhp->lock, flag);
91 qhp->attr.state = state;
92 spin_unlock_irqrestore(&qhp->lock, flag);
93 }
94
dealloc_oc_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)95 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
96 {
97 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
98 }
99
dealloc_host_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)100 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
101 {
102 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
103 dma_unmap_addr(sq, mapping));
104 }
105
dealloc_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)106 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
107 {
108 if (t4_sq_onchip(sq))
109 dealloc_oc_sq(rdev, sq);
110 else
111 dealloc_host_sq(rdev, sq);
112 }
113
alloc_oc_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)114 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
115 {
116 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
117 return -ENOSYS;
118 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
119 if (!sq->dma_addr)
120 return -ENOMEM;
121 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
122 rdev->lldi.vr->ocq.start;
123 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
124 rdev->lldi.vr->ocq.start);
125 sq->flags |= T4_SQ_ONCHIP;
126 return 0;
127 }
128
alloc_host_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)129 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
130 {
131 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
132 &(sq->dma_addr), GFP_KERNEL);
133 if (!sq->queue)
134 return -ENOMEM;
135 sq->phys_addr = virt_to_phys(sq->queue);
136 dma_unmap_addr_set(sq, mapping, sq->dma_addr);
137 return 0;
138 }
139
alloc_sq(struct c4iw_rdev * rdev,struct t4_sq * sq,int user)140 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
141 {
142 int ret = -ENOSYS;
143 if (user)
144 ret = alloc_oc_sq(rdev, sq);
145 if (ret)
146 ret = alloc_host_sq(rdev, sq);
147 return ret;
148 }
149
destroy_qp(struct c4iw_rdev * rdev,struct t4_wq * wq,struct c4iw_dev_ucontext * uctx,int has_rq)150 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
151 struct c4iw_dev_ucontext *uctx, int has_rq)
152 {
153 /*
154 * uP clears EQ contexts when the connection exits rdma mode,
155 * so no need to post a RESET WR for these EQs.
156 */
157 dealloc_sq(rdev, &wq->sq);
158 kfree(wq->sq.sw_sq);
159 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
160
161 if (has_rq) {
162 dma_free_coherent(&rdev->lldi.pdev->dev,
163 wq->rq.memsize, wq->rq.queue,
164 dma_unmap_addr(&wq->rq, mapping));
165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
166 kfree(wq->rq.sw_rq);
167 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
168 }
169 return 0;
170 }
171
172 /*
173 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
174 * then this is a user mapping so compute the page-aligned physical address
175 * for mapping.
176 */
c4iw_bar2_addrs(struct c4iw_rdev * rdev,unsigned int qid,enum cxgb4_bar2_qtype qtype,unsigned int * pbar2_qid,u64 * pbar2_pa)177 void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
178 enum cxgb4_bar2_qtype qtype,
179 unsigned int *pbar2_qid, u64 *pbar2_pa)
180 {
181 u64 bar2_qoffset;
182 int ret;
183
184 ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
185 pbar2_pa ? 1 : 0,
186 &bar2_qoffset, pbar2_qid);
187 if (ret)
188 return NULL;
189
190 if (pbar2_pa)
191 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
192
193 if (is_t4(rdev->lldi.adapter_type))
194 return NULL;
195
196 return rdev->bar2_kva + bar2_qoffset;
197 }
198
create_qp(struct c4iw_rdev * rdev,struct t4_wq * wq,struct t4_cq * rcq,struct t4_cq * scq,struct c4iw_dev_ucontext * uctx,struct c4iw_wr_wait * wr_waitp,int need_rq)199 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
200 struct t4_cq *rcq, struct t4_cq *scq,
201 struct c4iw_dev_ucontext *uctx,
202 struct c4iw_wr_wait *wr_waitp,
203 int need_rq)
204 {
205 int user = (uctx != &rdev->uctx);
206 struct fw_ri_res_wr *res_wr;
207 struct fw_ri_res *res;
208 int wr_len;
209 struct sk_buff *skb;
210 int ret = 0;
211 int eqsize;
212
213 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
214 if (!wq->sq.qid)
215 return -ENOMEM;
216
217 if (need_rq) {
218 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
219 if (!wq->rq.qid) {
220 ret = -ENOMEM;
221 goto free_sq_qid;
222 }
223 }
224
225 if (!user) {
226 wq->sq.sw_sq = kzalloc_objs(*wq->sq.sw_sq, wq->sq.size);
227 if (!wq->sq.sw_sq) {
228 ret = -ENOMEM;
229 goto free_rq_qid;//FIXME
230 }
231
232 if (need_rq) {
233 wq->rq.sw_rq = kzalloc_objs(*wq->rq.sw_rq, wq->rq.size);
234 if (!wq->rq.sw_rq) {
235 ret = -ENOMEM;
236 goto free_sw_sq;
237 }
238 }
239 }
240
241 if (need_rq) {
242 /*
243 * RQT must be a power of 2 and at least 16 deep.
244 */
245 wq->rq.rqt_size =
246 roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
247 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
248 if (!wq->rq.rqt_hwaddr) {
249 ret = -ENOMEM;
250 goto free_sw_rq;
251 }
252 }
253
254 ret = alloc_sq(rdev, &wq->sq, user);
255 if (ret)
256 goto free_hwaddr;
257 memset(wq->sq.queue, 0, wq->sq.memsize);
258 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
259
260 if (need_rq) {
261 wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
262 wq->rq.memsize,
263 &wq->rq.dma_addr,
264 GFP_KERNEL);
265 if (!wq->rq.queue) {
266 ret = -ENOMEM;
267 goto free_sq;
268 }
269 pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
270 wq->sq.queue,
271 (unsigned long long)virt_to_phys(wq->sq.queue),
272 wq->rq.queue,
273 (unsigned long long)virt_to_phys(wq->rq.queue));
274 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
275 }
276
277 wq->db = rdev->lldi.db_reg;
278
279 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
280 CXGB4_BAR2_QTYPE_EGRESS,
281 &wq->sq.bar2_qid,
282 user ? &wq->sq.bar2_pa : NULL);
283 if (need_rq)
284 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
285 CXGB4_BAR2_QTYPE_EGRESS,
286 &wq->rq.bar2_qid,
287 user ? &wq->rq.bar2_pa : NULL);
288
289 /*
290 * User mode must have bar2 access.
291 */
292 if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
293 pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
294 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
295 ret = -EINVAL;
296 goto free_dma;
297 }
298
299 wq->rdev = rdev;
300 wq->rq.msn = 1;
301
302 /* build fw_ri_res_wr */
303 wr_len = sizeof(*res_wr) + 2 * sizeof(*res);
304 if (need_rq)
305 wr_len += sizeof(*res);
306 skb = alloc_skb(wr_len, GFP_KERNEL);
307 if (!skb) {
308 ret = -ENOMEM;
309 goto free_dma;
310 }
311 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
312
313 res_wr = __skb_put_zero(skb, wr_len);
314 res_wr->op_nres = cpu_to_be32(
315 FW_WR_OP_V(FW_RI_RES_WR) |
316 FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
317 FW_WR_COMPL_F);
318 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
319 res_wr->cookie = (uintptr_t)wr_waitp;
320 res = res_wr->res;
321 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
322 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
323
324 /*
325 * eqsize is the number of 64B entries plus the status page size.
326 */
327 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
328 rdev->hw_queue.t4_eq_status_entries;
329
330 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
331 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
332 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
333 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
334 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
335 FW_RI_RES_WR_IQID_V(scq->cqid));
336 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
337 FW_RI_RES_WR_DCAEN_V(0) |
338 FW_RI_RES_WR_DCACPU_V(0) |
339 FW_RI_RES_WR_FBMIN_V(2) |
340 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
341 FW_RI_RES_WR_FBMAX_V(3)) |
342 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
343 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
344 FW_RI_RES_WR_EQSIZE_V(eqsize));
345 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
346 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
347
348 if (need_rq) {
349 res++;
350 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
351 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
352
353 /*
354 * eqsize is the number of 64B entries plus the status page size
355 */
356 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
357 rdev->hw_queue.t4_eq_status_entries;
358 res->u.sqrq.fetchszm_to_iqid =
359 /* no host cidx updates */
360 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
361 /* don't keep in chip cache */
362 FW_RI_RES_WR_CPRIO_V(0) |
363 /* set by uP at ri_init time */
364 FW_RI_RES_WR_PCIECHN_V(0) |
365 FW_RI_RES_WR_IQID_V(rcq->cqid));
366 res->u.sqrq.dcaen_to_eqsize =
367 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
368 FW_RI_RES_WR_DCACPU_V(0) |
369 FW_RI_RES_WR_FBMIN_V(2) |
370 FW_RI_RES_WR_FBMAX_V(3) |
371 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
372 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
373 FW_RI_RES_WR_EQSIZE_V(eqsize));
374 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
375 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
376 }
377
378 c4iw_init_wr_wait(wr_waitp);
379 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
380 if (ret)
381 goto free_dma;
382
383 pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
384 wq->sq.qid, wq->rq.qid, wq->db,
385 wq->sq.bar2_va, wq->rq.bar2_va);
386
387 return 0;
388 free_dma:
389 if (need_rq)
390 dma_free_coherent(&rdev->lldi.pdev->dev,
391 wq->rq.memsize, wq->rq.queue,
392 dma_unmap_addr(&wq->rq, mapping));
393 free_sq:
394 dealloc_sq(rdev, &wq->sq);
395 free_hwaddr:
396 if (need_rq)
397 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
398 free_sw_rq:
399 if (need_rq)
400 kfree(wq->rq.sw_rq);
401 free_sw_sq:
402 kfree(wq->sq.sw_sq);
403 free_rq_qid:
404 if (need_rq)
405 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
406 free_sq_qid:
407 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
408 return ret;
409 }
410
build_immd(struct t4_sq * sq,struct fw_ri_immd * immdp,const struct ib_send_wr * wr,int max,u32 * plenp)411 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
412 const struct ib_send_wr *wr, int max, u32 *plenp)
413 {
414 u8 *dstp, *srcp;
415 u32 plen = 0;
416 int i;
417 int rem, len;
418
419 dstp = (u8 *)immdp->data;
420 for (i = 0; i < wr->num_sge; i++) {
421 if ((plen + wr->sg_list[i].length) > max)
422 return -EMSGSIZE;
423 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
424 plen += wr->sg_list[i].length;
425 rem = wr->sg_list[i].length;
426 while (rem) {
427 if (dstp == (u8 *)&sq->queue[sq->size])
428 dstp = (u8 *)sq->queue;
429 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
430 len = rem;
431 else
432 len = (u8 *)&sq->queue[sq->size] - dstp;
433 memcpy(dstp, srcp, len);
434 dstp += len;
435 srcp += len;
436 rem -= len;
437 }
438 }
439 len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp));
440 if (len)
441 memset(dstp, 0, len);
442 immdp->op = FW_RI_DATA_IMMD;
443 immdp->r1 = 0;
444 immdp->r2 = 0;
445 immdp->immdlen = cpu_to_be32(plen);
446 *plenp = plen;
447 return 0;
448 }
449
build_isgl(__be64 * queue_start,__be64 * queue_end,struct fw_ri_isgl * isglp,struct ib_sge * sg_list,int num_sge,u32 * plenp)450 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
451 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
452 int num_sge, u32 *plenp)
453
454 {
455 int i;
456 u32 plen = 0;
457 __be64 *flitp;
458
459 if ((__be64 *)isglp == queue_end)
460 isglp = (struct fw_ri_isgl *)queue_start;
461
462 flitp = (__be64 *)isglp->sge;
463
464 for (i = 0; i < num_sge; i++) {
465 if ((plen + sg_list[i].length) < plen)
466 return -EMSGSIZE;
467 plen += sg_list[i].length;
468 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
469 sg_list[i].length);
470 if (++flitp == queue_end)
471 flitp = queue_start;
472 *flitp = cpu_to_be64(sg_list[i].addr);
473 if (++flitp == queue_end)
474 flitp = queue_start;
475 }
476 *flitp = (__force __be64)0;
477 isglp->op = FW_RI_DATA_ISGL;
478 isglp->r1 = 0;
479 isglp->nsge = cpu_to_be16(num_sge);
480 isglp->r2 = 0;
481 if (plenp)
482 *plenp = plen;
483 return 0;
484 }
485
build_rdma_send(struct t4_sq * sq,union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)486 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
487 const struct ib_send_wr *wr, u8 *len16)
488 {
489 u32 plen;
490 int size;
491 int ret;
492
493 if (wr->num_sge > T4_MAX_SEND_SGE)
494 return -EINVAL;
495 switch (wr->opcode) {
496 case IB_WR_SEND:
497 if (wr->send_flags & IB_SEND_SOLICITED)
498 wqe->send.sendop_pkd = cpu_to_be32(
499 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
500 else
501 wqe->send.sendop_pkd = cpu_to_be32(
502 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
503 wqe->send.stag_inv = 0;
504 break;
505 case IB_WR_SEND_WITH_INV:
506 if (wr->send_flags & IB_SEND_SOLICITED)
507 wqe->send.sendop_pkd = cpu_to_be32(
508 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
509 else
510 wqe->send.sendop_pkd = cpu_to_be32(
511 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
512 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
513 break;
514
515 default:
516 return -EINVAL;
517 }
518 wqe->send.r3 = 0;
519 wqe->send.r4 = 0;
520
521 plen = 0;
522 if (wr->num_sge) {
523 if (wr->send_flags & IB_SEND_INLINE) {
524 ret = build_immd(sq, wqe->send.u.immd_src, wr,
525 T4_MAX_SEND_INLINE, &plen);
526 if (ret)
527 return ret;
528 size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) +
529 plen;
530 } else {
531 ret = build_isgl((__be64 *)sq->queue,
532 (__be64 *)&sq->queue[sq->size],
533 wqe->send.u.isgl_src,
534 wr->sg_list, wr->num_sge, &plen);
535 if (ret)
536 return ret;
537 size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) +
538 wr->num_sge * sizeof(struct fw_ri_sge);
539 }
540 } else {
541 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
542 wqe->send.u.immd_src[0].r1 = 0;
543 wqe->send.u.immd_src[0].r2 = 0;
544 wqe->send.u.immd_src[0].immdlen = 0;
545 size = sizeof(wqe->send) + sizeof(struct fw_ri_immd);
546 plen = 0;
547 }
548 *len16 = DIV_ROUND_UP(size, 16);
549 wqe->send.plen = cpu_to_be32(plen);
550 return 0;
551 }
552
build_rdma_write(struct t4_sq * sq,union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)553 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
554 const struct ib_send_wr *wr, u8 *len16)
555 {
556 u32 plen;
557 int size;
558 int ret;
559
560 if (wr->num_sge > T4_MAX_SEND_SGE)
561 return -EINVAL;
562
563 /*
564 * iWARP protocol supports 64 bit immediate data but rdma api
565 * limits it to 32bit.
566 */
567 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
568 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data;
569 else
570 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0;
571 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
572 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
573 if (wr->num_sge) {
574 if (wr->send_flags & IB_SEND_INLINE) {
575 ret = build_immd(sq, wqe->write.u.immd_src, wr,
576 T4_MAX_WRITE_INLINE, &plen);
577 if (ret)
578 return ret;
579 size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) +
580 plen;
581 } else {
582 ret = build_isgl((__be64 *)sq->queue,
583 (__be64 *)&sq->queue[sq->size],
584 wqe->write.u.isgl_src,
585 wr->sg_list, wr->num_sge, &plen);
586 if (ret)
587 return ret;
588 size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) +
589 wr->num_sge * sizeof(struct fw_ri_sge);
590 }
591 } else {
592 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
593 wqe->write.u.immd_src[0].r1 = 0;
594 wqe->write.u.immd_src[0].r2 = 0;
595 wqe->write.u.immd_src[0].immdlen = 0;
596 size = sizeof(wqe->write) + sizeof(struct fw_ri_immd);
597 plen = 0;
598 }
599 *len16 = DIV_ROUND_UP(size, 16);
600 wqe->write.plen = cpu_to_be32(plen);
601 return 0;
602 }
603
build_immd_cmpl(struct t4_sq * sq,struct fw_ri_immd_cmpl * immdp,struct ib_send_wr * wr)604 static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
605 struct ib_send_wr *wr)
606 {
607 memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16);
608 memset(immdp->r1, 0, 6);
609 immdp->op = FW_RI_DATA_IMMD;
610 immdp->immdlen = 16;
611 }
612
build_rdma_write_cmpl(struct t4_sq * sq,struct fw_ri_rdma_write_cmpl_wr * wcwr,const struct ib_send_wr * wr,u8 * len16)613 static void build_rdma_write_cmpl(struct t4_sq *sq,
614 struct fw_ri_rdma_write_cmpl_wr *wcwr,
615 const struct ib_send_wr *wr, u8 *len16)
616 {
617 u32 plen;
618 int size;
619
620 /*
621 * This code assumes the struct fields preceding the write isgl
622 * fit in one 64B WR slot. This is because the WQE is built
623 * directly in the dma queue, and wrapping is only handled
624 * by the code buildling sgls. IE the "fixed part" of the wr
625 * structs must all fit in 64B. The WQE build code should probably be
626 * redesigned to avoid this restriction, but for now just add
627 * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
628 */
629 BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64);
630
631 wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
632 wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
633 if (wr->next->opcode == IB_WR_SEND)
634 wcwr->stag_inv = 0;
635 else
636 wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
637 wcwr->r2 = 0;
638 wcwr->r3 = 0;
639
640 /* SEND_INV SGL */
641 if (wr->next->send_flags & IB_SEND_INLINE)
642 build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next);
643 else
644 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
645 &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL);
646
647 /* WRITE SGL */
648 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
649 wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen);
650
651 size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) +
652 wr->num_sge * sizeof(struct fw_ri_sge);
653 wcwr->plen = cpu_to_be32(plen);
654 *len16 = DIV_ROUND_UP(size, 16);
655 }
656
build_rdma_read(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)657 static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
658 u8 *len16)
659 {
660 if (wr->num_sge > 1)
661 return -EINVAL;
662 if (wr->num_sge && wr->sg_list[0].length) {
663 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
664 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
665 >> 32));
666 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
667 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
668 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
669 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
670 >> 32));
671 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
672 } else {
673 wqe->read.stag_src = cpu_to_be32(2);
674 wqe->read.to_src_hi = 0;
675 wqe->read.to_src_lo = 0;
676 wqe->read.stag_sink = cpu_to_be32(2);
677 wqe->read.plen = 0;
678 wqe->read.to_sink_hi = 0;
679 wqe->read.to_sink_lo = 0;
680 }
681 wqe->read.r2 = 0;
682 wqe->read.r5 = 0;
683 *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16);
684 return 0;
685 }
686
post_write_cmpl(struct c4iw_qp * qhp,const struct ib_send_wr * wr)687 static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
688 {
689 bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) ||
690 qhp->sq_sig_all;
691 bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
692 qhp->sq_sig_all;
693 struct t4_swsqe *swsqe;
694 union t4_wr *wqe;
695 u16 write_wrid;
696 u8 len16;
697 u16 idx;
698
699 /*
700 * The sw_sq entries still look like a WRITE and a SEND and consume
701 * 2 slots. The FW WR, however, will be a single uber-WR.
702 */
703 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
704 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
705 build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
706
707 /* WRITE swsqe */
708 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
709 swsqe->opcode = FW_RI_RDMA_WRITE;
710 swsqe->idx = qhp->wq.sq.pidx;
711 swsqe->complete = 0;
712 swsqe->signaled = write_signaled;
713 swsqe->flushed = 0;
714 swsqe->wr_id = wr->wr_id;
715 if (c4iw_wr_log) {
716 swsqe->sge_ts =
717 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
718 swsqe->host_time = ktime_get();
719 }
720
721 write_wrid = qhp->wq.sq.pidx;
722
723 /* just bump the sw_sq */
724 qhp->wq.sq.in_use++;
725 if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
726 qhp->wq.sq.pidx = 0;
727
728 /* SEND_WITH_INV swsqe */
729 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
730 if (wr->next->opcode == IB_WR_SEND)
731 swsqe->opcode = FW_RI_SEND;
732 else
733 swsqe->opcode = FW_RI_SEND_WITH_INV;
734 swsqe->idx = qhp->wq.sq.pidx;
735 swsqe->complete = 0;
736 swsqe->signaled = send_signaled;
737 swsqe->flushed = 0;
738 swsqe->wr_id = wr->next->wr_id;
739 if (c4iw_wr_log) {
740 swsqe->sge_ts =
741 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
742 swsqe->host_time = ktime_get();
743 }
744
745 wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0;
746 wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
747
748 init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR,
749 write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16);
750 t4_sq_produce(&qhp->wq, len16);
751 idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
752
753 t4_ring_sq_db(&qhp->wq, idx, wqe);
754 }
755
build_rdma_recv(struct c4iw_qp * qhp,union t4_recv_wr * wqe,const struct ib_recv_wr * wr,u8 * len16)756 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
757 const struct ib_recv_wr *wr, u8 *len16)
758 {
759 int ret;
760
761 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
762 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
763 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
764 if (ret)
765 return ret;
766 *len16 = DIV_ROUND_UP(
767 sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16);
768 return 0;
769 }
770
build_srq_recv(union t4_recv_wr * wqe,const struct ib_recv_wr * wr,u8 * len16)771 static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
772 u8 *len16)
773 {
774 int ret;
775
776 ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
777 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
778 if (ret)
779 return ret;
780 *len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
781 wr->num_sge * sizeof(struct fw_ri_sge), 16);
782 return 0;
783 }
784
build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr * fr,const struct ib_reg_wr * wr,struct c4iw_mr * mhp,u8 * len16)785 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
786 const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
787 u8 *len16)
788 {
789 __be64 *p = (__be64 *)fr->pbl;
790
791 fr->r2 = cpu_to_be32(0);
792 fr->stag = cpu_to_be32(mhp->ibmr.rkey);
793
794 fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
795 FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
796 FW_RI_TPTE_STAGSTATE_V(1) |
797 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
798 FW_RI_TPTE_PDID_V(mhp->attr.pdid));
799 fr->tpte.locread_to_qpid = cpu_to_be32(
800 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
801 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
802 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
803 fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
804 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
805 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
806 fr->tpte.len_hi = cpu_to_be32(0);
807 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
808 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
809 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
810
811 p[0] = cpu_to_be64((u64)mhp->mpl[0]);
812 p[1] = cpu_to_be64((u64)mhp->mpl[1]);
813
814 *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
815 }
816
build_memreg(struct t4_sq * sq,union t4_wr * wqe,const struct ib_reg_wr * wr,struct c4iw_mr * mhp,u8 * len16,bool dsgl_supported)817 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
818 const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
819 u8 *len16, bool dsgl_supported)
820 {
821 struct fw_ri_immd *imdp;
822 __be64 *p;
823 int i;
824 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
825 int rem;
826
827 if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
828 return -EINVAL;
829
830 wqe->fr.qpbinde_to_dcacpu = 0;
831 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
832 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
833 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
834 wqe->fr.len_hi = 0;
835 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
836 wqe->fr.stag = cpu_to_be32(wr->key);
837 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
838 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
839 0xffffffff);
840
841 if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
842 struct fw_ri_dsgl *sglp;
843
844 for (i = 0; i < mhp->mpl_len; i++)
845 mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
846
847 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
848 sglp->op = FW_RI_DATA_DSGL;
849 sglp->r1 = 0;
850 sglp->nsge = cpu_to_be16(1);
851 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
852 sglp->len0 = cpu_to_be32(pbllen);
853
854 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
855 } else {
856 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
857 imdp->op = FW_RI_DATA_IMMD;
858 imdp->r1 = 0;
859 imdp->r2 = 0;
860 imdp->immdlen = cpu_to_be32(pbllen);
861 p = (__be64 *)(imdp + 1);
862 rem = pbllen;
863 for (i = 0; i < mhp->mpl_len; i++) {
864 *p = cpu_to_be64((u64)mhp->mpl[i]);
865 rem -= sizeof(*p);
866 if (++p == (__be64 *)&sq->queue[sq->size])
867 p = (__be64 *)sq->queue;
868 }
869 while (rem) {
870 *p = 0;
871 rem -= sizeof(*p);
872 if (++p == (__be64 *)&sq->queue[sq->size])
873 p = (__be64 *)sq->queue;
874 }
875 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
876 + pbllen, 16);
877 }
878 return 0;
879 }
880
build_inv_stag(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)881 static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
882 u8 *len16)
883 {
884 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
885 wqe->inv.r2 = 0;
886 *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16);
887 return 0;
888 }
889
c4iw_qp_add_ref(struct ib_qp * qp)890 void c4iw_qp_add_ref(struct ib_qp *qp)
891 {
892 pr_debug("ib_qp %p\n", qp);
893 refcount_inc(&to_c4iw_qp(qp)->qp_refcnt);
894 }
895
c4iw_qp_rem_ref(struct ib_qp * qp)896 void c4iw_qp_rem_ref(struct ib_qp *qp)
897 {
898 pr_debug("ib_qp %p\n", qp);
899 if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt))
900 complete(&to_c4iw_qp(qp)->qp_rel_comp);
901 }
902
add_to_fc_list(struct list_head * head,struct list_head * entry)903 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
904 {
905 if (list_empty(entry))
906 list_add_tail(entry, head);
907 }
908
ring_kernel_sq_db(struct c4iw_qp * qhp,u16 inc)909 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
910 {
911 unsigned long flags;
912
913 xa_lock_irqsave(&qhp->rhp->qps, flags);
914 spin_lock(&qhp->lock);
915 if (qhp->rhp->db_state == NORMAL)
916 t4_ring_sq_db(&qhp->wq, inc, NULL);
917 else {
918 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
919 qhp->wq.sq.wq_pidx_inc += inc;
920 }
921 spin_unlock(&qhp->lock);
922 xa_unlock_irqrestore(&qhp->rhp->qps, flags);
923 return 0;
924 }
925
ring_kernel_rq_db(struct c4iw_qp * qhp,u16 inc)926 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
927 {
928 unsigned long flags;
929
930 xa_lock_irqsave(&qhp->rhp->qps, flags);
931 spin_lock(&qhp->lock);
932 if (qhp->rhp->db_state == NORMAL)
933 t4_ring_rq_db(&qhp->wq, inc, NULL);
934 else {
935 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
936 qhp->wq.rq.wq_pidx_inc += inc;
937 }
938 spin_unlock(&qhp->lock);
939 xa_unlock_irqrestore(&qhp->rhp->qps, flags);
940 return 0;
941 }
942
ib_to_fw_opcode(int ib_opcode)943 static int ib_to_fw_opcode(int ib_opcode)
944 {
945 int opcode;
946
947 switch (ib_opcode) {
948 case IB_WR_SEND_WITH_INV:
949 opcode = FW_RI_SEND_WITH_INV;
950 break;
951 case IB_WR_SEND:
952 opcode = FW_RI_SEND;
953 break;
954 case IB_WR_RDMA_WRITE:
955 opcode = FW_RI_RDMA_WRITE;
956 break;
957 case IB_WR_RDMA_WRITE_WITH_IMM:
958 opcode = FW_RI_WRITE_IMMEDIATE;
959 break;
960 case IB_WR_RDMA_READ:
961 case IB_WR_RDMA_READ_WITH_INV:
962 opcode = FW_RI_READ_REQ;
963 break;
964 case IB_WR_REG_MR:
965 opcode = FW_RI_FAST_REGISTER;
966 break;
967 case IB_WR_LOCAL_INV:
968 opcode = FW_RI_LOCAL_INV;
969 break;
970 default:
971 opcode = -EINVAL;
972 }
973 return opcode;
974 }
975
complete_sq_drain_wr(struct c4iw_qp * qhp,const struct ib_send_wr * wr)976 static int complete_sq_drain_wr(struct c4iw_qp *qhp,
977 const struct ib_send_wr *wr)
978 {
979 struct t4_cqe cqe = {};
980 struct c4iw_cq *schp;
981 unsigned long flag;
982 struct t4_cq *cq;
983 int opcode;
984
985 schp = to_c4iw_cq(qhp->ibqp.send_cq);
986 cq = &schp->cq;
987
988 opcode = ib_to_fw_opcode(wr->opcode);
989 if (opcode < 0)
990 return opcode;
991
992 cqe.u.drain_cookie = wr->wr_id;
993 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
994 CQE_OPCODE_V(opcode) |
995 CQE_TYPE_V(1) |
996 CQE_SWCQE_V(1) |
997 CQE_DRAIN_V(1) |
998 CQE_QPID_V(qhp->wq.sq.qid));
999
1000 spin_lock_irqsave(&schp->lock, flag);
1001 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1002 cq->sw_queue[cq->sw_pidx] = cqe;
1003 t4_swcq_produce(cq);
1004 spin_unlock_irqrestore(&schp->lock, flag);
1005
1006 if (t4_clear_cq_armed(&schp->cq)) {
1007 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1008 (*schp->ibcq.comp_handler)(&schp->ibcq,
1009 schp->ibcq.cq_context);
1010 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1011 }
1012 return 0;
1013 }
1014
complete_sq_drain_wrs(struct c4iw_qp * qhp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)1015 static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
1016 const struct ib_send_wr *wr,
1017 const struct ib_send_wr **bad_wr)
1018 {
1019 int ret = 0;
1020
1021 while (wr) {
1022 ret = complete_sq_drain_wr(qhp, wr);
1023 if (ret) {
1024 *bad_wr = wr;
1025 break;
1026 }
1027 wr = wr->next;
1028 }
1029 return ret;
1030 }
1031
complete_rq_drain_wr(struct c4iw_qp * qhp,const struct ib_recv_wr * wr)1032 static void complete_rq_drain_wr(struct c4iw_qp *qhp,
1033 const struct ib_recv_wr *wr)
1034 {
1035 struct t4_cqe cqe = {};
1036 struct c4iw_cq *rchp;
1037 unsigned long flag;
1038 struct t4_cq *cq;
1039
1040 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1041 cq = &rchp->cq;
1042
1043 cqe.u.drain_cookie = wr->wr_id;
1044 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
1045 CQE_OPCODE_V(FW_RI_SEND) |
1046 CQE_TYPE_V(0) |
1047 CQE_SWCQE_V(1) |
1048 CQE_DRAIN_V(1) |
1049 CQE_QPID_V(qhp->wq.sq.qid));
1050
1051 spin_lock_irqsave(&rchp->lock, flag);
1052 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1053 cq->sw_queue[cq->sw_pidx] = cqe;
1054 t4_swcq_produce(cq);
1055 spin_unlock_irqrestore(&rchp->lock, flag);
1056
1057 if (t4_clear_cq_armed(&rchp->cq)) {
1058 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1059 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1060 rchp->ibcq.cq_context);
1061 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1062 }
1063 }
1064
complete_rq_drain_wrs(struct c4iw_qp * qhp,const struct ib_recv_wr * wr)1065 static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
1066 const struct ib_recv_wr *wr)
1067 {
1068 while (wr) {
1069 complete_rq_drain_wr(qhp, wr);
1070 wr = wr->next;
1071 }
1072 }
1073
c4iw_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)1074 int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1075 const struct ib_send_wr **bad_wr)
1076 {
1077 int err = 0;
1078 u8 len16 = 0;
1079 enum fw_wr_opcodes fw_opcode = 0;
1080 enum fw_ri_wr_flags fw_flags;
1081 struct c4iw_qp *qhp;
1082 struct c4iw_dev *rhp;
1083 union t4_wr *wqe = NULL;
1084 u32 num_wrs;
1085 struct t4_swsqe *swsqe;
1086 unsigned long flag;
1087 u16 idx = 0;
1088
1089 qhp = to_c4iw_qp(ibqp);
1090 rhp = qhp->rhp;
1091 spin_lock_irqsave(&qhp->lock, flag);
1092
1093 /*
1094 * If the qp has been flushed, then just insert a special
1095 * drain cqe.
1096 */
1097 if (qhp->wq.flushed) {
1098 spin_unlock_irqrestore(&qhp->lock, flag);
1099 err = complete_sq_drain_wrs(qhp, wr, bad_wr);
1100 return err;
1101 }
1102 num_wrs = t4_sq_avail(&qhp->wq);
1103 if (num_wrs == 0) {
1104 spin_unlock_irqrestore(&qhp->lock, flag);
1105 *bad_wr = wr;
1106 return -ENOMEM;
1107 }
1108
1109 /*
1110 * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
1111 * the response for small NVMEe-oF READ requests. If the chain is
1112 * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
1113 * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
1114 * request, then build and post the write_cmpl WR. If any of the tests
1115 * below are not true, then we continue on with the tradtional WRITE
1116 * and SEND WRs.
1117 */
1118 if (qhp->rhp->rdev.lldi.write_cmpl_support &&
1119 CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
1120 CHELSIO_T5 &&
1121 wr && wr->next && !wr->next->next &&
1122 wr->opcode == IB_WR_RDMA_WRITE &&
1123 wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
1124 (wr->next->opcode == IB_WR_SEND ||
1125 wr->next->opcode == IB_WR_SEND_WITH_INV) &&
1126 wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
1127 wr->next->num_sge == 1 && num_wrs >= 2) {
1128 post_write_cmpl(qhp, wr);
1129 spin_unlock_irqrestore(&qhp->lock, flag);
1130 return 0;
1131 }
1132
1133 while (wr) {
1134 if (num_wrs == 0) {
1135 err = -ENOMEM;
1136 *bad_wr = wr;
1137 break;
1138 }
1139 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
1140 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
1141
1142 fw_flags = 0;
1143 if (wr->send_flags & IB_SEND_SOLICITED)
1144 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
1145 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
1146 fw_flags |= FW_RI_COMPLETION_FLAG;
1147 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
1148 switch (wr->opcode) {
1149 case IB_WR_SEND_WITH_INV:
1150 case IB_WR_SEND:
1151 if (wr->send_flags & IB_SEND_FENCE)
1152 fw_flags |= FW_RI_READ_FENCE_FLAG;
1153 fw_opcode = FW_RI_SEND_WR;
1154 if (wr->opcode == IB_WR_SEND)
1155 swsqe->opcode = FW_RI_SEND;
1156 else
1157 swsqe->opcode = FW_RI_SEND_WITH_INV;
1158 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
1159 break;
1160 case IB_WR_RDMA_WRITE_WITH_IMM:
1161 if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) {
1162 err = -EINVAL;
1163 break;
1164 }
1165 fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
1166 fallthrough;
1167 case IB_WR_RDMA_WRITE:
1168 fw_opcode = FW_RI_RDMA_WRITE_WR;
1169 swsqe->opcode = FW_RI_RDMA_WRITE;
1170 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
1171 break;
1172 case IB_WR_RDMA_READ:
1173 case IB_WR_RDMA_READ_WITH_INV:
1174 fw_opcode = FW_RI_RDMA_READ_WR;
1175 swsqe->opcode = FW_RI_READ_REQ;
1176 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
1177 c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey);
1178 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
1179 } else {
1180 fw_flags = 0;
1181 }
1182 err = build_rdma_read(wqe, wr, &len16);
1183 if (err)
1184 break;
1185 swsqe->read_len = wr->sg_list[0].length;
1186 if (!qhp->wq.sq.oldest_read)
1187 qhp->wq.sq.oldest_read = swsqe;
1188 break;
1189 case IB_WR_REG_MR: {
1190 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
1191
1192 swsqe->opcode = FW_RI_FAST_REGISTER;
1193 if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
1194 !mhp->attr.state && mhp->mpl_len <= 2) {
1195 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
1196 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
1197 mhp, &len16);
1198 } else {
1199 fw_opcode = FW_RI_FR_NSMR_WR;
1200 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
1201 mhp, &len16,
1202 rhp->rdev.lldi.ulptx_memwrite_dsgl);
1203 if (err)
1204 break;
1205 }
1206 mhp->attr.state = 1;
1207 break;
1208 }
1209 case IB_WR_LOCAL_INV:
1210 if (wr->send_flags & IB_SEND_FENCE)
1211 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
1212 fw_opcode = FW_RI_INV_LSTAG_WR;
1213 swsqe->opcode = FW_RI_LOCAL_INV;
1214 err = build_inv_stag(wqe, wr, &len16);
1215 c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey);
1216 break;
1217 default:
1218 pr_warn("%s post of type=%d TBD!\n", __func__,
1219 wr->opcode);
1220 err = -EINVAL;
1221 }
1222 if (err) {
1223 *bad_wr = wr;
1224 break;
1225 }
1226 swsqe->idx = qhp->wq.sq.pidx;
1227 swsqe->complete = 0;
1228 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
1229 qhp->sq_sig_all;
1230 swsqe->flushed = 0;
1231 swsqe->wr_id = wr->wr_id;
1232 if (c4iw_wr_log) {
1233 swsqe->sge_ts = cxgb4_read_sge_timestamp(
1234 rhp->rdev.lldi.ports[0]);
1235 swsqe->host_time = ktime_get();
1236 }
1237
1238 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
1239
1240 pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
1241 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
1242 swsqe->opcode, swsqe->read_len);
1243 wr = wr->next;
1244 num_wrs--;
1245 t4_sq_produce(&qhp->wq, len16);
1246 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1247 }
1248 if (!rhp->rdev.status_page->db_off) {
1249 t4_ring_sq_db(&qhp->wq, idx, wqe);
1250 spin_unlock_irqrestore(&qhp->lock, flag);
1251 } else {
1252 spin_unlock_irqrestore(&qhp->lock, flag);
1253 ring_kernel_sq_db(qhp, idx);
1254 }
1255 return err;
1256 }
1257
c4iw_post_receive(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1258 int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1259 const struct ib_recv_wr **bad_wr)
1260 {
1261 int err = 0;
1262 struct c4iw_qp *qhp;
1263 union t4_recv_wr *wqe = NULL;
1264 u32 num_wrs;
1265 u8 len16 = 0;
1266 unsigned long flag;
1267 u16 idx = 0;
1268
1269 qhp = to_c4iw_qp(ibqp);
1270 spin_lock_irqsave(&qhp->lock, flag);
1271
1272 /*
1273 * If the qp has been flushed, then just insert a special
1274 * drain cqe.
1275 */
1276 if (qhp->wq.flushed) {
1277 spin_unlock_irqrestore(&qhp->lock, flag);
1278 complete_rq_drain_wrs(qhp, wr);
1279 return err;
1280 }
1281 num_wrs = t4_rq_avail(&qhp->wq);
1282 if (num_wrs == 0) {
1283 spin_unlock_irqrestore(&qhp->lock, flag);
1284 *bad_wr = wr;
1285 return -ENOMEM;
1286 }
1287 while (wr) {
1288 if (wr->num_sge > T4_MAX_RECV_SGE) {
1289 err = -EINVAL;
1290 *bad_wr = wr;
1291 break;
1292 }
1293 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
1294 qhp->wq.rq.wq_pidx *
1295 T4_EQ_ENTRY_SIZE);
1296 if (num_wrs)
1297 err = build_rdma_recv(qhp, wqe, wr, &len16);
1298 else
1299 err = -ENOMEM;
1300 if (err) {
1301 *bad_wr = wr;
1302 break;
1303 }
1304
1305 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
1306 if (c4iw_wr_log) {
1307 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
1308 cxgb4_read_sge_timestamp(
1309 qhp->rhp->rdev.lldi.ports[0]);
1310 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
1311 ktime_get();
1312 }
1313
1314 wqe->recv.opcode = FW_RI_RECV_WR;
1315 wqe->recv.r1 = 0;
1316 wqe->recv.wrid = qhp->wq.rq.pidx;
1317 wqe->recv.r2[0] = 0;
1318 wqe->recv.r2[1] = 0;
1319 wqe->recv.r2[2] = 0;
1320 wqe->recv.len16 = len16;
1321 pr_debug("cookie 0x%llx pidx %u\n",
1322 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
1323 t4_rq_produce(&qhp->wq, len16);
1324 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1325 wr = wr->next;
1326 num_wrs--;
1327 }
1328 if (!qhp->rhp->rdev.status_page->db_off) {
1329 t4_ring_rq_db(&qhp->wq, idx, wqe);
1330 spin_unlock_irqrestore(&qhp->lock, flag);
1331 } else {
1332 spin_unlock_irqrestore(&qhp->lock, flag);
1333 ring_kernel_rq_db(qhp, idx);
1334 }
1335 return err;
1336 }
1337
defer_srq_wr(struct t4_srq * srq,union t4_recv_wr * wqe,u64 wr_id,u8 len16)1338 static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
1339 u64 wr_id, u8 len16)
1340 {
1341 struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
1342
1343 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
1344 __func__, srq->cidx, srq->pidx, srq->wq_pidx,
1345 srq->in_use, srq->ooo_count,
1346 (unsigned long long)wr_id, srq->pending_cidx,
1347 srq->pending_pidx, srq->pending_in_use);
1348 pwr->wr_id = wr_id;
1349 pwr->len16 = len16;
1350 memcpy(&pwr->wqe, wqe, len16 * 16);
1351 t4_srq_produce_pending_wr(srq);
1352 }
1353
c4iw_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1354 int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1355 const struct ib_recv_wr **bad_wr)
1356 {
1357 union t4_recv_wr *wqe, lwqe;
1358 struct c4iw_srq *srq;
1359 unsigned long flag;
1360 u8 len16 = 0;
1361 u16 idx = 0;
1362 int err = 0;
1363 u32 num_wrs;
1364
1365 srq = to_c4iw_srq(ibsrq);
1366 spin_lock_irqsave(&srq->lock, flag);
1367 num_wrs = t4_srq_avail(&srq->wq);
1368 if (num_wrs == 0) {
1369 spin_unlock_irqrestore(&srq->lock, flag);
1370 return -ENOMEM;
1371 }
1372 while (wr) {
1373 if (wr->num_sge > T4_MAX_RECV_SGE) {
1374 err = -EINVAL;
1375 *bad_wr = wr;
1376 break;
1377 }
1378 wqe = &lwqe;
1379 if (num_wrs)
1380 err = build_srq_recv(wqe, wr, &len16);
1381 else
1382 err = -ENOMEM;
1383 if (err) {
1384 *bad_wr = wr;
1385 break;
1386 }
1387
1388 wqe->recv.opcode = FW_RI_RECV_WR;
1389 wqe->recv.r1 = 0;
1390 wqe->recv.wrid = srq->wq.pidx;
1391 wqe->recv.r2[0] = 0;
1392 wqe->recv.r2[1] = 0;
1393 wqe->recv.r2[2] = 0;
1394 wqe->recv.len16 = len16;
1395
1396 if (srq->wq.ooo_count ||
1397 srq->wq.pending_in_use ||
1398 srq->wq.sw_rq[srq->wq.pidx].valid) {
1399 defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
1400 } else {
1401 srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
1402 srq->wq.sw_rq[srq->wq.pidx].valid = 1;
1403 c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
1404 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
1405 __func__, srq->wq.cidx,
1406 srq->wq.pidx, srq->wq.wq_pidx,
1407 srq->wq.in_use,
1408 (unsigned long long)wr->wr_id);
1409 t4_srq_produce(&srq->wq, len16);
1410 idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
1411 }
1412 wr = wr->next;
1413 num_wrs--;
1414 }
1415 if (idx)
1416 t4_ring_srq_db(&srq->wq, idx, len16, wqe);
1417 spin_unlock_irqrestore(&srq->lock, flag);
1418 return err;
1419 }
1420
build_term_codes(struct t4_cqe * err_cqe,u8 * layer_type,u8 * ecode)1421 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1422 u8 *ecode)
1423 {
1424 int status;
1425 int tagged;
1426 int opcode;
1427 int rqtype;
1428 int send_inv;
1429
1430 if (!err_cqe) {
1431 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1432 *ecode = 0;
1433 return;
1434 }
1435
1436 status = CQE_STATUS(err_cqe);
1437 opcode = CQE_OPCODE(err_cqe);
1438 rqtype = RQ_TYPE(err_cqe);
1439 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1440 (opcode == FW_RI_SEND_WITH_SE_INV);
1441 tagged = (opcode == FW_RI_RDMA_WRITE) ||
1442 (rqtype && (opcode == FW_RI_READ_RESP));
1443
1444 switch (status) {
1445 case T4_ERR_STAG:
1446 if (send_inv) {
1447 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1448 *ecode = RDMAP_CANT_INV_STAG;
1449 } else {
1450 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1451 *ecode = RDMAP_INV_STAG;
1452 }
1453 break;
1454 case T4_ERR_PDID:
1455 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1456 if ((opcode == FW_RI_SEND_WITH_INV) ||
1457 (opcode == FW_RI_SEND_WITH_SE_INV))
1458 *ecode = RDMAP_CANT_INV_STAG;
1459 else
1460 *ecode = RDMAP_STAG_NOT_ASSOC;
1461 break;
1462 case T4_ERR_QPID:
1463 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1464 *ecode = RDMAP_STAG_NOT_ASSOC;
1465 break;
1466 case T4_ERR_ACCESS:
1467 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1468 *ecode = RDMAP_ACC_VIOL;
1469 break;
1470 case T4_ERR_WRAP:
1471 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1472 *ecode = RDMAP_TO_WRAP;
1473 break;
1474 case T4_ERR_BOUND:
1475 if (tagged) {
1476 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1477 *ecode = DDPT_BASE_BOUNDS;
1478 } else {
1479 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1480 *ecode = RDMAP_BASE_BOUNDS;
1481 }
1482 break;
1483 case T4_ERR_INVALIDATE_SHARED_MR:
1484 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1485 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1486 *ecode = RDMAP_CANT_INV_STAG;
1487 break;
1488 case T4_ERR_ECC:
1489 case T4_ERR_ECC_PSTAG:
1490 case T4_ERR_INTERNAL_ERR:
1491 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1492 *ecode = 0;
1493 break;
1494 case T4_ERR_OUT_OF_RQE:
1495 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1496 *ecode = DDPU_INV_MSN_NOBUF;
1497 break;
1498 case T4_ERR_PBL_ADDR_BOUND:
1499 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1500 *ecode = DDPT_BASE_BOUNDS;
1501 break;
1502 case T4_ERR_CRC:
1503 *layer_type = LAYER_MPA|DDP_LLP;
1504 *ecode = MPA_CRC_ERR;
1505 break;
1506 case T4_ERR_MARKER:
1507 *layer_type = LAYER_MPA|DDP_LLP;
1508 *ecode = MPA_MARKER_ERR;
1509 break;
1510 case T4_ERR_PDU_LEN_ERR:
1511 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1512 *ecode = DDPU_MSG_TOOBIG;
1513 break;
1514 case T4_ERR_DDP_VERSION:
1515 if (tagged) {
1516 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1517 *ecode = DDPT_INV_VERS;
1518 } else {
1519 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1520 *ecode = DDPU_INV_VERS;
1521 }
1522 break;
1523 case T4_ERR_RDMA_VERSION:
1524 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1525 *ecode = RDMAP_INV_VERS;
1526 break;
1527 case T4_ERR_OPCODE:
1528 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1529 *ecode = RDMAP_INV_OPCODE;
1530 break;
1531 case T4_ERR_DDP_QUEUE_NUM:
1532 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1533 *ecode = DDPU_INV_QN;
1534 break;
1535 case T4_ERR_MSN:
1536 case T4_ERR_MSN_GAP:
1537 case T4_ERR_MSN_RANGE:
1538 case T4_ERR_IRD_OVERFLOW:
1539 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1540 *ecode = DDPU_INV_MSN_RANGE;
1541 break;
1542 case T4_ERR_TBIT:
1543 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1544 *ecode = 0;
1545 break;
1546 case T4_ERR_MO:
1547 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1548 *ecode = DDPU_INV_MO;
1549 break;
1550 default:
1551 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1552 *ecode = 0;
1553 break;
1554 }
1555 }
1556
post_terminate(struct c4iw_qp * qhp,struct t4_cqe * err_cqe,gfp_t gfp)1557 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1558 gfp_t gfp)
1559 {
1560 struct fw_ri_wr *wqe;
1561 struct sk_buff *skb;
1562 struct terminate_message *term;
1563
1564 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
1565 qhp->ep->hwtid);
1566
1567 skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1568 if (WARN_ON(!skb))
1569 return;
1570
1571 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1572
1573 wqe = __skb_put_zero(skb, sizeof(*wqe));
1574 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1575 wqe->flowid_len16 = cpu_to_be32(
1576 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1577 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1578
1579 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1580 wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term));
1581 term = (struct terminate_message *)wqe->u.terminate.termmsg;
1582 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1583 term->layer_etype = qhp->attr.layer_etype;
1584 term->ecode = qhp->attr.ecode;
1585 } else
1586 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1587 c4iw_ofld_send(&qhp->rhp->rdev, skb);
1588 }
1589
1590 /*
1591 * Assumes qhp lock is held.
1592 */
__flush_qp(struct c4iw_qp * qhp,struct c4iw_cq * rchp,struct c4iw_cq * schp)1593 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1594 struct c4iw_cq *schp)
1595 {
1596 int count;
1597 int rq_flushed = 0, sq_flushed;
1598 unsigned long flag;
1599 struct ib_event ev;
1600
1601 pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
1602
1603 /* locking hierarchy: cqs lock first, then qp lock. */
1604 spin_lock_irqsave(&rchp->lock, flag);
1605 if (schp != rchp)
1606 spin_lock(&schp->lock);
1607 spin_lock(&qhp->lock);
1608 if (qhp->srq && qhp->attr.state == C4IW_QP_STATE_ERROR &&
1609 qhp->ibqp.event_handler) {
1610 ev.device = qhp->ibqp.device;
1611 ev.element.qp = &qhp->ibqp;
1612 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1613 qhp->ibqp.event_handler(&ev, qhp->ibqp.qp_context);
1614 }
1615
1616 if (qhp->wq.flushed) {
1617 spin_unlock(&qhp->lock);
1618 if (schp != rchp)
1619 spin_unlock(&schp->lock);
1620 spin_unlock_irqrestore(&rchp->lock, flag);
1621 return;
1622 }
1623 qhp->wq.flushed = 1;
1624 t4_set_wq_in_error(&qhp->wq, 0);
1625
1626 c4iw_flush_hw_cq(rchp, qhp);
1627 if (!qhp->srq) {
1628 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1629 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1630 }
1631
1632 if (schp != rchp)
1633 c4iw_flush_hw_cq(schp, qhp);
1634 sq_flushed = c4iw_flush_sq(qhp);
1635
1636 spin_unlock(&qhp->lock);
1637 if (schp != rchp)
1638 spin_unlock(&schp->lock);
1639 spin_unlock_irqrestore(&rchp->lock, flag);
1640
1641 if (schp == rchp) {
1642 if ((rq_flushed || sq_flushed) &&
1643 t4_clear_cq_armed(&rchp->cq)) {
1644 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1645 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1646 rchp->ibcq.cq_context);
1647 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1648 }
1649 } else {
1650 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
1651 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1652 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1653 rchp->ibcq.cq_context);
1654 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1655 }
1656 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
1657 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1658 (*schp->ibcq.comp_handler)(&schp->ibcq,
1659 schp->ibcq.cq_context);
1660 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1661 }
1662 }
1663 }
1664
flush_qp(struct c4iw_qp * qhp)1665 static void flush_qp(struct c4iw_qp *qhp)
1666 {
1667 struct c4iw_cq *rchp, *schp;
1668 unsigned long flag;
1669
1670 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1671 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1672
1673 if (qhp->ibqp.uobject) {
1674
1675 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1676 if (qhp->wq.flushed)
1677 return;
1678
1679 qhp->wq.flushed = 1;
1680 t4_set_wq_in_error(&qhp->wq, 0);
1681 t4_set_cq_in_error(&rchp->cq);
1682 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1683 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1684 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1685 if (schp != rchp) {
1686 t4_set_cq_in_error(&schp->cq);
1687 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1688 (*schp->ibcq.comp_handler)(&schp->ibcq,
1689 schp->ibcq.cq_context);
1690 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1691 }
1692 return;
1693 }
1694 __flush_qp(qhp, rchp, schp);
1695 }
1696
rdma_fini(struct c4iw_dev * rhp,struct c4iw_qp * qhp,struct c4iw_ep * ep)1697 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1698 struct c4iw_ep *ep)
1699 {
1700 struct fw_ri_wr *wqe;
1701 int ret;
1702 struct sk_buff *skb;
1703
1704 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
1705
1706 skb = skb_dequeue(&ep->com.ep_skb_list);
1707 if (WARN_ON(!skb))
1708 return -ENOMEM;
1709
1710 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1711
1712 wqe = __skb_put_zero(skb, sizeof(*wqe));
1713 wqe->op_compl = cpu_to_be32(
1714 FW_WR_OP_V(FW_RI_INIT_WR) |
1715 FW_WR_COMPL_F);
1716 wqe->flowid_len16 = cpu_to_be32(
1717 FW_WR_FLOWID_V(ep->hwtid) |
1718 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1719 wqe->cookie = (uintptr_t)ep->com.wr_waitp;
1720
1721 wqe->u.fini.type = FW_RI_TYPE_FINI;
1722
1723 ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
1724 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1725
1726 pr_debug("ret %d\n", ret);
1727 return ret;
1728 }
1729
build_rtr_msg(u8 p2p_type,struct fw_ri_init * init)1730 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1731 {
1732 pr_debug("p2p_type = %d\n", p2p_type);
1733 memset(&init->u, 0, sizeof(init->u));
1734 switch (p2p_type) {
1735 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1736 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1737 init->u.write.stag_sink = cpu_to_be32(1);
1738 init->u.write.to_sink = cpu_to_be64(1);
1739 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1740 init->u.write.len16 = DIV_ROUND_UP(
1741 sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16);
1742 break;
1743 case FW_RI_INIT_P2PTYPE_READ_REQ:
1744 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1745 init->u.read.stag_src = cpu_to_be32(1);
1746 init->u.read.to_src_lo = cpu_to_be32(1);
1747 init->u.read.stag_sink = cpu_to_be32(1);
1748 init->u.read.to_sink_lo = cpu_to_be32(1);
1749 init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16);
1750 break;
1751 }
1752 }
1753
rdma_init(struct c4iw_dev * rhp,struct c4iw_qp * qhp)1754 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1755 {
1756 struct fw_ri_wr *wqe;
1757 int ret;
1758 struct sk_buff *skb;
1759
1760 pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
1761 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1762
1763 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
1764 if (!skb) {
1765 ret = -ENOMEM;
1766 goto out;
1767 }
1768 ret = alloc_ird(rhp, qhp->attr.max_ird);
1769 if (ret) {
1770 qhp->attr.max_ird = 0;
1771 kfree_skb(skb);
1772 goto out;
1773 }
1774 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1775
1776 wqe = __skb_put_zero(skb, sizeof(*wqe));
1777 wqe->op_compl = cpu_to_be32(
1778 FW_WR_OP_V(FW_RI_INIT_WR) |
1779 FW_WR_COMPL_F);
1780 wqe->flowid_len16 = cpu_to_be32(
1781 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1782 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1783
1784 wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
1785
1786 wqe->u.init.type = FW_RI_TYPE_INIT;
1787 wqe->u.init.mpareqbit_p2ptype =
1788 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1789 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1790 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1791 if (qhp->attr.mpa_attr.recv_marker_enabled)
1792 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1793 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1794 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1795 if (qhp->attr.mpa_attr.crc_enabled)
1796 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1797
1798 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1799 FW_RI_QP_RDMA_WRITE_ENABLE |
1800 FW_RI_QP_BIND_ENABLE;
1801 if (!qhp->ibqp.uobject)
1802 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1803 FW_RI_QP_STAG0_ENABLE;
1804 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1805 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1806 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1807 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1808 if (qhp->srq) {
1809 wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
1810 qhp->srq->idx);
1811 } else {
1812 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1813 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1814 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1815 rhp->rdev.lldi.vr->rq.start);
1816 }
1817 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1818 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1819 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1820 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1821 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1822 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1823 if (qhp->attr.mpa_attr.initiator)
1824 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1825
1826 ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
1827 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1828 if (!ret)
1829 goto out;
1830
1831 free_ird(rhp, qhp->attr.max_ird);
1832 out:
1833 pr_debug("ret %d\n", ret);
1834 return ret;
1835 }
1836
c4iw_modify_qp(struct c4iw_dev * rhp,struct c4iw_qp * qhp,enum c4iw_qp_attr_mask mask,struct c4iw_qp_attributes * attrs,int internal)1837 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1838 enum c4iw_qp_attr_mask mask,
1839 struct c4iw_qp_attributes *attrs,
1840 int internal)
1841 {
1842 int ret = 0;
1843 struct c4iw_qp_attributes newattr = qhp->attr;
1844 int disconnect = 0;
1845 int terminate = 0;
1846 int abort = 0;
1847 int free = 0;
1848 struct c4iw_ep *ep = NULL;
1849
1850 pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1851 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1852 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1853
1854 mutex_lock(&qhp->mutex);
1855
1856 /* Process attr changes if in IDLE */
1857 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1858 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1859 ret = -EIO;
1860 goto out;
1861 }
1862 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1863 newattr.enable_rdma_read = attrs->enable_rdma_read;
1864 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1865 newattr.enable_rdma_write = attrs->enable_rdma_write;
1866 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1867 newattr.enable_bind = attrs->enable_bind;
1868 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1869 if (attrs->max_ord > c4iw_max_read_depth) {
1870 ret = -EINVAL;
1871 goto out;
1872 }
1873 newattr.max_ord = attrs->max_ord;
1874 }
1875 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1876 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1877 ret = -EINVAL;
1878 goto out;
1879 }
1880 newattr.max_ird = attrs->max_ird;
1881 }
1882 qhp->attr = newattr;
1883 }
1884
1885 if (mask & C4IW_QP_ATTR_SQ_DB) {
1886 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1887 goto out;
1888 }
1889 if (mask & C4IW_QP_ATTR_RQ_DB) {
1890 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1891 goto out;
1892 }
1893
1894 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1895 goto out;
1896 if (qhp->attr.state == attrs->next_state)
1897 goto out;
1898
1899 switch (qhp->attr.state) {
1900 case C4IW_QP_STATE_IDLE:
1901 switch (attrs->next_state) {
1902 case C4IW_QP_STATE_RTS:
1903 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1904 ret = -EINVAL;
1905 goto out;
1906 }
1907 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1908 ret = -EINVAL;
1909 goto out;
1910 }
1911 qhp->attr.mpa_attr = attrs->mpa_attr;
1912 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1913 qhp->ep = qhp->attr.llp_stream_handle;
1914 set_state(qhp, C4IW_QP_STATE_RTS);
1915
1916 /*
1917 * Ref the endpoint here and deref when we
1918 * disassociate the endpoint from the QP. This
1919 * happens in CLOSING->IDLE transition or *->ERROR
1920 * transition.
1921 */
1922 c4iw_get_ep(&qhp->ep->com);
1923 ret = rdma_init(rhp, qhp);
1924 if (ret)
1925 goto err;
1926 break;
1927 case C4IW_QP_STATE_ERROR:
1928 set_state(qhp, C4IW_QP_STATE_ERROR);
1929 flush_qp(qhp);
1930 break;
1931 default:
1932 ret = -EINVAL;
1933 goto out;
1934 }
1935 break;
1936 case C4IW_QP_STATE_RTS:
1937 switch (attrs->next_state) {
1938 case C4IW_QP_STATE_CLOSING:
1939 t4_set_wq_in_error(&qhp->wq, 0);
1940 set_state(qhp, C4IW_QP_STATE_CLOSING);
1941 ep = qhp->ep;
1942 if (!internal) {
1943 abort = 0;
1944 disconnect = 1;
1945 c4iw_get_ep(&qhp->ep->com);
1946 }
1947 ret = rdma_fini(rhp, qhp, ep);
1948 if (ret)
1949 goto err;
1950 break;
1951 case C4IW_QP_STATE_TERMINATE:
1952 t4_set_wq_in_error(&qhp->wq, 0);
1953 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1954 qhp->attr.layer_etype = attrs->layer_etype;
1955 qhp->attr.ecode = attrs->ecode;
1956 ep = qhp->ep;
1957 if (!internal) {
1958 c4iw_get_ep(&ep->com);
1959 terminate = 1;
1960 disconnect = 1;
1961 } else {
1962 terminate = qhp->attr.send_term;
1963 ret = rdma_fini(rhp, qhp, ep);
1964 if (ret)
1965 goto err;
1966 }
1967 break;
1968 case C4IW_QP_STATE_ERROR:
1969 t4_set_wq_in_error(&qhp->wq, 0);
1970 set_state(qhp, C4IW_QP_STATE_ERROR);
1971 if (!internal) {
1972 disconnect = 1;
1973 ep = qhp->ep;
1974 c4iw_get_ep(&qhp->ep->com);
1975 }
1976 goto err;
1977 break;
1978 default:
1979 ret = -EINVAL;
1980 goto out;
1981 }
1982 break;
1983 case C4IW_QP_STATE_CLOSING:
1984
1985 /*
1986 * Allow kernel users to move to ERROR for qp draining.
1987 */
1988 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1989 C4IW_QP_STATE_ERROR)) {
1990 ret = -EINVAL;
1991 goto out;
1992 }
1993 switch (attrs->next_state) {
1994 case C4IW_QP_STATE_IDLE:
1995 flush_qp(qhp);
1996 set_state(qhp, C4IW_QP_STATE_IDLE);
1997 qhp->attr.llp_stream_handle = NULL;
1998 c4iw_put_ep(&qhp->ep->com);
1999 qhp->ep = NULL;
2000 wake_up(&qhp->wait);
2001 break;
2002 case C4IW_QP_STATE_ERROR:
2003 goto err;
2004 default:
2005 ret = -EINVAL;
2006 goto err;
2007 }
2008 break;
2009 case C4IW_QP_STATE_ERROR:
2010 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
2011 ret = -EINVAL;
2012 goto out;
2013 }
2014 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
2015 ret = -EINVAL;
2016 goto out;
2017 }
2018 set_state(qhp, C4IW_QP_STATE_IDLE);
2019 break;
2020 case C4IW_QP_STATE_TERMINATE:
2021 if (!internal) {
2022 ret = -EINVAL;
2023 goto out;
2024 }
2025 goto err;
2026 break;
2027 default:
2028 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
2029 ret = -EINVAL;
2030 goto err;
2031 break;
2032 }
2033 goto out;
2034 err:
2035 pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
2036 qhp->wq.sq.qid);
2037
2038 /* disassociate the LLP connection */
2039 qhp->attr.llp_stream_handle = NULL;
2040 if (!ep)
2041 ep = qhp->ep;
2042 qhp->ep = NULL;
2043 set_state(qhp, C4IW_QP_STATE_ERROR);
2044 free = 1;
2045 abort = 1;
2046 flush_qp(qhp);
2047 wake_up(&qhp->wait);
2048 out:
2049 mutex_unlock(&qhp->mutex);
2050
2051 if (terminate)
2052 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
2053
2054 /*
2055 * If disconnect is 1, then we need to initiate a disconnect
2056 * on the EP. This can be a normal close (RTS->CLOSING) or
2057 * an abnormal close (RTS/CLOSING->ERROR).
2058 */
2059 if (disconnect) {
2060 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
2061 GFP_KERNEL);
2062 c4iw_put_ep(&ep->com);
2063 }
2064
2065 /*
2066 * If free is 1, then we've disassociated the EP from the QP
2067 * and we need to dereference the EP.
2068 */
2069 if (free)
2070 c4iw_put_ep(&ep->com);
2071 pr_debug("exit state %d\n", qhp->attr.state);
2072 return ret;
2073 }
2074
c4iw_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)2075 int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
2076 {
2077 struct c4iw_dev *rhp;
2078 struct c4iw_qp *qhp;
2079 struct c4iw_ucontext *ucontext;
2080 struct c4iw_qp_attributes attrs;
2081
2082 qhp = to_c4iw_qp(ib_qp);
2083 rhp = qhp->rhp;
2084 ucontext = qhp->ucontext;
2085
2086 attrs.next_state = C4IW_QP_STATE_ERROR;
2087 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
2088 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2089 else
2090 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
2091 wait_event(qhp->wait, !qhp->ep);
2092
2093 xa_lock_irq(&rhp->qps);
2094 __xa_erase(&rhp->qps, qhp->wq.sq.qid);
2095 if (!list_empty(&qhp->db_fc_entry))
2096 list_del_init(&qhp->db_fc_entry);
2097 xa_unlock_irq(&rhp->qps);
2098 free_ird(rhp, qhp->attr.max_ird);
2099
2100 c4iw_qp_rem_ref(ib_qp);
2101
2102 wait_for_completion(&qhp->qp_rel_comp);
2103
2104 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
2105 pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
2106
2107 destroy_qp(&rhp->rdev, &qhp->wq,
2108 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
2109
2110 c4iw_put_wr_wait(qhp->wr_waitp);
2111 return 0;
2112 }
2113
c4iw_create_qp(struct ib_qp * qp,struct ib_qp_init_attr * attrs,struct ib_udata * udata)2114 int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
2115 struct ib_udata *udata)
2116 {
2117 struct ib_pd *pd = qp->pd;
2118 struct c4iw_dev *rhp;
2119 struct c4iw_qp *qhp = to_c4iw_qp(qp);
2120 struct c4iw_pd *php;
2121 struct c4iw_cq *schp;
2122 struct c4iw_cq *rchp;
2123 struct c4iw_create_qp_resp uresp;
2124 unsigned int sqsize, rqsize = 0;
2125 struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
2126 udata, struct c4iw_ucontext, ibucontext);
2127 int ret;
2128 struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
2129 struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
2130
2131 if (attrs->qp_type != IB_QPT_RC || attrs->create_flags)
2132 return -EOPNOTSUPP;
2133
2134 php = to_c4iw_pd(pd);
2135 rhp = php->rhp;
2136 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
2137 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
2138 if (!schp || !rchp)
2139 return -EINVAL;
2140
2141 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
2142 return -EINVAL;
2143
2144 if (!attrs->srq) {
2145 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2146 return -E2BIG;
2147 rqsize = attrs->cap.max_recv_wr + 1;
2148 if (rqsize < 8)
2149 rqsize = 8;
2150 }
2151
2152 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
2153 return -E2BIG;
2154 sqsize = attrs->cap.max_send_wr + 1;
2155 if (sqsize < 8)
2156 sqsize = 8;
2157
2158 qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2159 if (!qhp->wr_waitp)
2160 return -ENOMEM;
2161
2162 qhp->wq.sq.size = sqsize;
2163 qhp->wq.sq.memsize =
2164 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2165 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
2166 qhp->wq.sq.flush_cidx = -1;
2167 if (!attrs->srq) {
2168 qhp->wq.rq.size = rqsize;
2169 qhp->wq.rq.memsize =
2170 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2171 sizeof(*qhp->wq.rq.queue);
2172 }
2173
2174 if (ucontext) {
2175 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
2176 if (!attrs->srq)
2177 qhp->wq.rq.memsize =
2178 roundup(qhp->wq.rq.memsize, PAGE_SIZE);
2179 }
2180
2181 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
2182 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2183 qhp->wr_waitp, !attrs->srq);
2184 if (ret)
2185 goto err_free_wr_wait;
2186
2187 attrs->cap.max_recv_wr = rqsize - 1;
2188 attrs->cap.max_send_wr = sqsize - 1;
2189 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
2190
2191 qhp->rhp = rhp;
2192 qhp->attr.pd = php->pdid;
2193 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
2194 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
2195 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
2196 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
2197 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
2198 if (!attrs->srq) {
2199 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
2200 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
2201 }
2202 qhp->attr.state = C4IW_QP_STATE_IDLE;
2203 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
2204 qhp->attr.enable_rdma_read = 1;
2205 qhp->attr.enable_rdma_write = 1;
2206 qhp->attr.enable_bind = 1;
2207 qhp->attr.max_ord = 0;
2208 qhp->attr.max_ird = 0;
2209 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
2210 spin_lock_init(&qhp->lock);
2211 mutex_init(&qhp->mutex);
2212 init_waitqueue_head(&qhp->wait);
2213 init_completion(&qhp->qp_rel_comp);
2214 refcount_set(&qhp->qp_refcnt, 1);
2215
2216 ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
2217 if (ret)
2218 goto err_destroy_qp;
2219
2220 if (udata && ucontext) {
2221 sq_key_mm = kmalloc_obj(*sq_key_mm);
2222 if (!sq_key_mm) {
2223 ret = -ENOMEM;
2224 goto err_remove_handle;
2225 }
2226 if (!attrs->srq) {
2227 rq_key_mm = kmalloc_obj(*rq_key_mm);
2228 if (!rq_key_mm) {
2229 ret = -ENOMEM;
2230 goto err_free_sq_key;
2231 }
2232 }
2233 sq_db_key_mm = kmalloc_obj(*sq_db_key_mm);
2234 if (!sq_db_key_mm) {
2235 ret = -ENOMEM;
2236 goto err_free_rq_key;
2237 }
2238 if (!attrs->srq) {
2239 rq_db_key_mm = kmalloc_obj(*rq_db_key_mm);
2240 if (!rq_db_key_mm) {
2241 ret = -ENOMEM;
2242 goto err_free_sq_db_key;
2243 }
2244 }
2245 memset(&uresp, 0, sizeof(uresp));
2246 if (t4_sq_onchip(&qhp->wq.sq)) {
2247 ma_sync_key_mm = kmalloc_obj(*ma_sync_key_mm);
2248 if (!ma_sync_key_mm) {
2249 ret = -ENOMEM;
2250 goto err_free_rq_db_key;
2251 }
2252 uresp.flags = C4IW_QPF_ONCHIP;
2253 }
2254 if (rhp->rdev.lldi.write_w_imm_support)
2255 uresp.flags |= C4IW_QPF_WRITE_W_IMM;
2256 uresp.qid_mask = rhp->rdev.qpmask;
2257 uresp.sqid = qhp->wq.sq.qid;
2258 uresp.sq_size = qhp->wq.sq.size;
2259 uresp.sq_memsize = qhp->wq.sq.memsize;
2260 if (!attrs->srq) {
2261 uresp.rqid = qhp->wq.rq.qid;
2262 uresp.rq_size = qhp->wq.rq.size;
2263 uresp.rq_memsize = qhp->wq.rq.memsize;
2264 }
2265 spin_lock(&ucontext->mmap_lock);
2266 if (ma_sync_key_mm) {
2267 uresp.ma_sync_key = ucontext->key;
2268 ucontext->key += PAGE_SIZE;
2269 }
2270 uresp.sq_key = ucontext->key;
2271 ucontext->key += PAGE_SIZE;
2272 if (!attrs->srq) {
2273 uresp.rq_key = ucontext->key;
2274 ucontext->key += PAGE_SIZE;
2275 }
2276 uresp.sq_db_gts_key = ucontext->key;
2277 ucontext->key += PAGE_SIZE;
2278 if (!attrs->srq) {
2279 uresp.rq_db_gts_key = ucontext->key;
2280 ucontext->key += PAGE_SIZE;
2281 }
2282 spin_unlock(&ucontext->mmap_lock);
2283 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2284 if (ret)
2285 goto err_free_ma_sync_key;
2286 sq_key_mm->key = uresp.sq_key;
2287 sq_key_mm->addr = 0;
2288 sq_key_mm->vaddr = qhp->wq.sq.queue;
2289 sq_key_mm->dma_addr = qhp->wq.sq.dma_addr;
2290 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
2291 insert_flag_to_mmap(&rhp->rdev, sq_key_mm, sq_key_mm->addr);
2292 insert_mmap(ucontext, sq_key_mm);
2293 if (!attrs->srq) {
2294 rq_key_mm->key = uresp.rq_key;
2295 rq_key_mm->addr = 0;
2296 rq_key_mm->vaddr = qhp->wq.rq.queue;
2297 rq_key_mm->dma_addr = qhp->wq.rq.dma_addr;
2298 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
2299 insert_flag_to_mmap(&rhp->rdev, rq_key_mm,
2300 rq_key_mm->addr);
2301 insert_mmap(ucontext, rq_key_mm);
2302 }
2303 sq_db_key_mm->key = uresp.sq_db_gts_key;
2304 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
2305 sq_db_key_mm->vaddr = NULL;
2306 sq_db_key_mm->dma_addr = 0;
2307 sq_db_key_mm->len = PAGE_SIZE;
2308 insert_flag_to_mmap(&rhp->rdev, sq_db_key_mm,
2309 sq_db_key_mm->addr);
2310 insert_mmap(ucontext, sq_db_key_mm);
2311 if (!attrs->srq) {
2312 rq_db_key_mm->key = uresp.rq_db_gts_key;
2313 rq_db_key_mm->addr =
2314 (u64)(unsigned long)qhp->wq.rq.bar2_pa;
2315 rq_db_key_mm->len = PAGE_SIZE;
2316 rq_db_key_mm->vaddr = NULL;
2317 rq_db_key_mm->dma_addr = 0;
2318 insert_flag_to_mmap(&rhp->rdev, rq_db_key_mm,
2319 rq_db_key_mm->addr);
2320 insert_mmap(ucontext, rq_db_key_mm);
2321 }
2322 if (ma_sync_key_mm) {
2323 ma_sync_key_mm->key = uresp.ma_sync_key;
2324 ma_sync_key_mm->addr =
2325 (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
2326 PCIE_MA_SYNC_A) & PAGE_MASK;
2327 ma_sync_key_mm->len = PAGE_SIZE;
2328 ma_sync_key_mm->vaddr = NULL;
2329 ma_sync_key_mm->dma_addr = 0;
2330 insert_flag_to_mmap(&rhp->rdev, ma_sync_key_mm,
2331 ma_sync_key_mm->addr);
2332 insert_mmap(ucontext, ma_sync_key_mm);
2333 }
2334
2335 qhp->ucontext = ucontext;
2336 }
2337 if (!attrs->srq) {
2338 qhp->wq.qp_errp =
2339 &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
2340 } else {
2341 qhp->wq.qp_errp =
2342 &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
2343 qhp->wq.srqidxp =
2344 &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
2345 }
2346
2347 qhp->ibqp.qp_num = qhp->wq.sq.qid;
2348 if (attrs->srq)
2349 qhp->srq = to_c4iw_srq(attrs->srq);
2350 INIT_LIST_HEAD(&qhp->db_fc_entry);
2351 pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
2352 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
2353 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
2354 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
2355 return 0;
2356 err_free_ma_sync_key:
2357 kfree(ma_sync_key_mm);
2358 err_free_rq_db_key:
2359 if (!attrs->srq)
2360 kfree(rq_db_key_mm);
2361 err_free_sq_db_key:
2362 kfree(sq_db_key_mm);
2363 err_free_rq_key:
2364 if (!attrs->srq)
2365 kfree(rq_key_mm);
2366 err_free_sq_key:
2367 kfree(sq_key_mm);
2368 err_remove_handle:
2369 xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
2370 err_destroy_qp:
2371 destroy_qp(&rhp->rdev, &qhp->wq,
2372 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
2373 err_free_wr_wait:
2374 c4iw_put_wr_wait(qhp->wr_waitp);
2375 return ret;
2376 }
2377
c4iw_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)2378 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2379 int attr_mask, struct ib_udata *udata)
2380 {
2381 struct c4iw_dev *rhp;
2382 struct c4iw_qp *qhp;
2383 enum c4iw_qp_attr_mask mask = 0;
2384 struct c4iw_qp_attributes attrs = {};
2385
2386 pr_debug("ib_qp %p\n", ibqp);
2387
2388 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2389 return -EOPNOTSUPP;
2390
2391 /* iwarp does not support the RTR state */
2392 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
2393 attr_mask &= ~IB_QP_STATE;
2394
2395 /* Make sure we still have something left to do */
2396 if (!attr_mask)
2397 return 0;
2398
2399 qhp = to_c4iw_qp(ibqp);
2400 rhp = qhp->rhp;
2401
2402 attrs.next_state = c4iw_convert_state(attr->qp_state);
2403 attrs.enable_rdma_read = (attr->qp_access_flags &
2404 IB_ACCESS_REMOTE_READ) ? 1 : 0;
2405 attrs.enable_rdma_write = (attr->qp_access_flags &
2406 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2407 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
2408
2409
2410 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
2411 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
2412 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
2413 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
2414 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
2415
2416 /*
2417 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2418 * ringing the queue db when we're in DB_FULL mode.
2419 * Only allow this on T4 devices.
2420 */
2421 attrs.sq_db_inc = attr->sq_psn;
2422 attrs.rq_db_inc = attr->rq_psn;
2423 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
2424 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
2425 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
2426 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
2427 return -EINVAL;
2428
2429 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
2430 }
2431
c4iw_get_qp(struct ib_device * dev,int qpn)2432 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
2433 {
2434 pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
2435 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
2436 }
2437
c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq * srq)2438 void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
2439 {
2440 struct ib_event event = {};
2441
2442 event.device = &srq->rhp->ibdev;
2443 event.element.srq = &srq->ibsrq;
2444 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
2445 ib_dispatch_event(&event);
2446 }
2447
c4iw_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)2448 int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
2449 enum ib_srq_attr_mask srq_attr_mask,
2450 struct ib_udata *udata)
2451 {
2452 struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2453 int ret = 0;
2454
2455 /*
2456 * XXX 0 mask == a SW interrupt for srq_limit reached...
2457 */
2458 if (udata && !srq_attr_mask) {
2459 c4iw_dispatch_srq_limit_reached_event(srq);
2460 goto out;
2461 }
2462
2463 /* no support for this yet */
2464 if (srq_attr_mask & IB_SRQ_MAX_WR) {
2465 ret = -EINVAL;
2466 goto out;
2467 }
2468
2469 if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
2470 srq->armed = true;
2471 srq->srq_limit = attr->srq_limit;
2472 }
2473 out:
2474 return ret;
2475 }
2476
c4iw_ib_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)2477 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2478 int attr_mask, struct ib_qp_init_attr *init_attr)
2479 {
2480 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
2481
2482 memset(attr, 0, sizeof(*attr));
2483 memset(init_attr, 0, sizeof(*init_attr));
2484 attr->qp_state = to_ib_qp_state(qhp->attr.state);
2485 attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
2486 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
2487 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
2488 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
2489 init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
2490 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
2491 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
2492 return 0;
2493 }
2494
free_srq_queue(struct c4iw_srq * srq,struct c4iw_dev_ucontext * uctx,struct c4iw_wr_wait * wr_waitp)2495 static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2496 struct c4iw_wr_wait *wr_waitp)
2497 {
2498 struct c4iw_rdev *rdev = &srq->rhp->rdev;
2499 struct sk_buff *skb = srq->destroy_skb;
2500 struct t4_srq *wq = &srq->wq;
2501 struct fw_ri_res_wr *res_wr;
2502 struct fw_ri_res *res;
2503 int wr_len;
2504
2505 wr_len = sizeof(*res_wr) + sizeof(*res);
2506 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2507
2508 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2509 memset(res_wr, 0, wr_len);
2510 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2511 FW_RI_RES_WR_NRES_V(1) |
2512 FW_WR_COMPL_F);
2513 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2514 res_wr->cookie = (uintptr_t)wr_waitp;
2515 res = res_wr->res;
2516 res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2517 res->u.srq.op = FW_RI_RES_OP_RESET;
2518 res->u.srq.srqid = cpu_to_be32(srq->idx);
2519 res->u.srq.eqid = cpu_to_be32(wq->qid);
2520
2521 c4iw_init_wr_wait(wr_waitp);
2522 c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
2523
2524 dma_free_coherent(&rdev->lldi.pdev->dev,
2525 wq->memsize, wq->queue,
2526 dma_unmap_addr(wq, mapping));
2527 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2528 kfree(wq->sw_rq);
2529 c4iw_put_qpid(rdev, wq->qid, uctx);
2530 }
2531
alloc_srq_queue(struct c4iw_srq * srq,struct c4iw_dev_ucontext * uctx,struct c4iw_wr_wait * wr_waitp)2532 static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2533 struct c4iw_wr_wait *wr_waitp)
2534 {
2535 struct c4iw_rdev *rdev = &srq->rhp->rdev;
2536 int user = (uctx != &rdev->uctx);
2537 struct t4_srq *wq = &srq->wq;
2538 struct fw_ri_res_wr *res_wr;
2539 struct fw_ri_res *res;
2540 struct sk_buff *skb;
2541 int wr_len;
2542 int eqsize;
2543 int ret = -ENOMEM;
2544
2545 wq->qid = c4iw_get_qpid(rdev, uctx);
2546 if (!wq->qid)
2547 goto err;
2548
2549 if (!user) {
2550 wq->sw_rq = kzalloc_objs(*wq->sw_rq, wq->size);
2551 if (!wq->sw_rq)
2552 goto err_put_qpid;
2553 wq->pending_wrs = kzalloc_objs(*srq->wq.pending_wrs,
2554 srq->wq.size);
2555 if (!wq->pending_wrs)
2556 goto err_free_sw_rq;
2557 }
2558
2559 wq->rqt_size = wq->size;
2560 wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
2561 if (!wq->rqt_hwaddr)
2562 goto err_free_pending_wrs;
2563 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
2564 T4_RQT_ENTRY_SHIFT;
2565
2566 wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
2567 &wq->dma_addr, GFP_KERNEL);
2568 if (!wq->queue)
2569 goto err_free_rqtpool;
2570
2571 dma_unmap_addr_set(wq, mapping, wq->dma_addr);
2572
2573 wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
2574 &wq->bar2_qid,
2575 user ? &wq->bar2_pa : NULL);
2576
2577 /*
2578 * User mode must have bar2 access.
2579 */
2580
2581 if (user && !wq->bar2_va) {
2582 pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
2583 pci_name(rdev->lldi.pdev), wq->qid);
2584 ret = -EINVAL;
2585 goto err_free_queue;
2586 }
2587
2588 /* build fw_ri_res_wr */
2589 wr_len = sizeof(*res_wr) + sizeof(*res);
2590
2591 skb = alloc_skb(wr_len, GFP_KERNEL);
2592 if (!skb)
2593 goto err_free_queue;
2594 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2595
2596 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2597 memset(res_wr, 0, wr_len);
2598 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2599 FW_RI_RES_WR_NRES_V(1) |
2600 FW_WR_COMPL_F);
2601 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2602 res_wr->cookie = (uintptr_t)wr_waitp;
2603 res = res_wr->res;
2604 res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2605 res->u.srq.op = FW_RI_RES_OP_WRITE;
2606
2607 /*
2608 * eqsize is the number of 64B entries plus the status page size.
2609 */
2610 eqsize = wq->size * T4_RQ_NUM_SLOTS +
2611 rdev->hw_queue.t4_eq_status_entries;
2612 res->u.srq.eqid = cpu_to_be32(wq->qid);
2613 res->u.srq.fetchszm_to_iqid =
2614 /* no host cidx updates */
2615 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
2616 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
2617 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
2618 FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */
2619 res->u.srq.dcaen_to_eqsize =
2620 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
2621 FW_RI_RES_WR_DCACPU_V(0) |
2622 FW_RI_RES_WR_FBMIN_V(2) |
2623 FW_RI_RES_WR_FBMAX_V(3) |
2624 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
2625 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
2626 FW_RI_RES_WR_EQSIZE_V(eqsize));
2627 res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
2628 res->u.srq.srqid = cpu_to_be32(srq->idx);
2629 res->u.srq.pdid = cpu_to_be32(srq->pdid);
2630 res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
2631 res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
2632 rdev->lldi.vr->rq.start);
2633
2634 c4iw_init_wr_wait(wr_waitp);
2635
2636 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
2637 if (ret)
2638 goto err_free_queue;
2639
2640 pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
2641 " bar2_addr %p rqt addr 0x%x size %d\n",
2642 __func__, srq->idx, wq->qid, srq->pdid, wq->queue,
2643 (u64)virt_to_phys(wq->queue), wq->bar2_va,
2644 wq->rqt_hwaddr, wq->rqt_size);
2645
2646 return 0;
2647 err_free_queue:
2648 dma_free_coherent(&rdev->lldi.pdev->dev,
2649 wq->memsize, wq->queue,
2650 dma_unmap_addr(wq, mapping));
2651 err_free_rqtpool:
2652 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2653 err_free_pending_wrs:
2654 if (!user)
2655 kfree(wq->pending_wrs);
2656 err_free_sw_rq:
2657 if (!user)
2658 kfree(wq->sw_rq);
2659 err_put_qpid:
2660 c4iw_put_qpid(rdev, wq->qid, uctx);
2661 err:
2662 return ret;
2663 }
2664
c4iw_copy_wr_to_srq(struct t4_srq * srq,union t4_recv_wr * wqe,u8 len16)2665 void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
2666 {
2667 u64 *src, *dst;
2668
2669 src = (u64 *)wqe;
2670 dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
2671 while (len16) {
2672 *dst++ = *src++;
2673 if (dst >= (u64 *)&srq->queue[srq->size])
2674 dst = (u64 *)srq->queue;
2675 *dst++ = *src++;
2676 if (dst >= (u64 *)&srq->queue[srq->size])
2677 dst = (u64 *)srq->queue;
2678 len16--;
2679 }
2680 }
2681
c4iw_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * attrs,struct ib_udata * udata)2682 int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
2683 struct ib_udata *udata)
2684 {
2685 struct ib_pd *pd = ib_srq->pd;
2686 struct c4iw_dev *rhp;
2687 struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2688 struct c4iw_pd *php;
2689 struct c4iw_create_srq_resp uresp;
2690 struct c4iw_ucontext *ucontext;
2691 struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
2692 int rqsize;
2693 int ret;
2694 int wr_len;
2695
2696 if (attrs->srq_type != IB_SRQT_BASIC)
2697 return -EOPNOTSUPP;
2698
2699 pr_debug("%s ib_pd %p\n", __func__, pd);
2700
2701 php = to_c4iw_pd(pd);
2702 rhp = php->rhp;
2703
2704 if (!rhp->rdev.lldi.vr->srq.size)
2705 return -EINVAL;
2706 if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2707 return -E2BIG;
2708 if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
2709 return -E2BIG;
2710
2711 /*
2712 * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
2713 */
2714 rqsize = attrs->attr.max_wr + 1;
2715 rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
2716
2717 ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2718 ibucontext);
2719
2720 srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2721 if (!srq->wr_waitp)
2722 return -ENOMEM;
2723
2724 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
2725 if (srq->idx < 0) {
2726 ret = -ENOMEM;
2727 goto err_free_wr_wait;
2728 }
2729
2730 wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
2731 srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
2732 if (!srq->destroy_skb) {
2733 ret = -ENOMEM;
2734 goto err_free_srq_idx;
2735 }
2736
2737 srq->rhp = rhp;
2738 srq->pdid = php->pdid;
2739
2740 srq->wq.size = rqsize;
2741 srq->wq.memsize =
2742 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2743 sizeof(*srq->wq.queue);
2744 if (ucontext)
2745 srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
2746
2747 ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
2748 &rhp->rdev.uctx, srq->wr_waitp);
2749 if (ret)
2750 goto err_free_skb;
2751 attrs->attr.max_wr = rqsize - 1;
2752
2753 if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
2754 srq->flags = T4_SRQ_LIMIT_SUPPORT;
2755
2756 if (udata) {
2757 srq_key_mm = kmalloc_obj(*srq_key_mm);
2758 if (!srq_key_mm) {
2759 ret = -ENOMEM;
2760 goto err_free_queue;
2761 }
2762 srq_db_key_mm = kmalloc_obj(*srq_db_key_mm);
2763 if (!srq_db_key_mm) {
2764 ret = -ENOMEM;
2765 goto err_free_srq_key_mm;
2766 }
2767 memset(&uresp, 0, sizeof(uresp));
2768 uresp.flags = srq->flags;
2769 uresp.qid_mask = rhp->rdev.qpmask;
2770 uresp.srqid = srq->wq.qid;
2771 uresp.srq_size = srq->wq.size;
2772 uresp.srq_memsize = srq->wq.memsize;
2773 uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
2774 spin_lock(&ucontext->mmap_lock);
2775 uresp.srq_key = ucontext->key;
2776 ucontext->key += PAGE_SIZE;
2777 uresp.srq_db_gts_key = ucontext->key;
2778 ucontext->key += PAGE_SIZE;
2779 spin_unlock(&ucontext->mmap_lock);
2780 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2781 if (ret)
2782 goto err_free_srq_db_key_mm;
2783 srq_key_mm->key = uresp.srq_key;
2784 srq_key_mm->addr = 0;
2785 srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
2786 srq_key_mm->vaddr = srq->wq.queue;
2787 srq_key_mm->dma_addr = srq->wq.dma_addr;
2788 insert_flag_to_mmap(&rhp->rdev, srq_key_mm, srq_key_mm->addr);
2789 insert_mmap(ucontext, srq_key_mm);
2790 srq_db_key_mm->key = uresp.srq_db_gts_key;
2791 srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
2792 srq_db_key_mm->len = PAGE_SIZE;
2793 srq_db_key_mm->vaddr = NULL;
2794 srq_db_key_mm->dma_addr = 0;
2795 insert_flag_to_mmap(&rhp->rdev, srq_db_key_mm,
2796 srq_db_key_mm->addr);
2797 insert_mmap(ucontext, srq_db_key_mm);
2798 }
2799
2800 pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
2801 __func__, srq->wq.qid, srq->idx, srq->wq.size,
2802 (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
2803
2804 spin_lock_init(&srq->lock);
2805 return 0;
2806
2807 err_free_srq_db_key_mm:
2808 kfree(srq_db_key_mm);
2809 err_free_srq_key_mm:
2810 kfree(srq_key_mm);
2811 err_free_queue:
2812 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2813 srq->wr_waitp);
2814 err_free_skb:
2815 kfree_skb(srq->destroy_skb);
2816 err_free_srq_idx:
2817 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2818 err_free_wr_wait:
2819 c4iw_put_wr_wait(srq->wr_waitp);
2820 return ret;
2821 }
2822
c4iw_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)2823 int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
2824 {
2825 struct c4iw_dev *rhp;
2826 struct c4iw_srq *srq;
2827 struct c4iw_ucontext *ucontext;
2828
2829 srq = to_c4iw_srq(ibsrq);
2830 rhp = srq->rhp;
2831
2832 pr_debug("%s id %d\n", __func__, srq->wq.qid);
2833 ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2834 ibucontext);
2835 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2836 srq->wr_waitp);
2837 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2838 c4iw_put_wr_wait(srq->wr_waitp);
2839 return 0;
2840 }
2841