1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016 HGST, a Western Digital Company.
4 */
5 #include <linux/memremap.h>
6 #include <linux/moduleparam.h>
7 #include <linux/slab.h>
8 #include <linux/pci-p2pdma.h>
9 #include <rdma/mr_pool.h>
10 #include <rdma/rw.h>
11
12 enum {
13 RDMA_RW_SINGLE_WR,
14 RDMA_RW_MULTI_WR,
15 RDMA_RW_MR,
16 RDMA_RW_SIG_MR,
17 };
18
19 static bool rdma_rw_force_mr;
20 module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
21 MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
22
23 /*
24 * Report whether memory registration should be used. Memory registration must
25 * be used for iWarp devices because of iWARP-specific limitations. Memory
26 * registration is also enabled if registering memory might yield better
27 * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
28 */
rdma_rw_can_use_mr(struct ib_device * dev,u32 port_num)29 static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num)
30 {
31 if (rdma_protocol_iwarp(dev, port_num))
32 return true;
33 if (dev->attrs.max_sgl_rd)
34 return true;
35 if (unlikely(rdma_rw_force_mr))
36 return true;
37 return false;
38 }
39
40 /*
41 * Check if the device will use memory registration for this RW operation.
42 * For RDMA READs we must use MRs on iWarp and can optionally use them as an
43 * optimization otherwise. Additionally we have a debug option to force usage
44 * of MRs to help testing this code path.
45 */
rdma_rw_io_needs_mr(struct ib_device * dev,u32 port_num,enum dma_data_direction dir,int dma_nents)46 static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num,
47 enum dma_data_direction dir, int dma_nents)
48 {
49 if (dir == DMA_FROM_DEVICE) {
50 if (rdma_protocol_iwarp(dev, port_num))
51 return true;
52 if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
53 return true;
54 }
55 if (unlikely(rdma_rw_force_mr))
56 return true;
57 return false;
58 }
59
rdma_rw_fr_page_list_len(struct ib_device * dev,bool pi_support)60 static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev,
61 bool pi_support)
62 {
63 u32 max_pages;
64
65 if (pi_support)
66 max_pages = dev->attrs.max_pi_fast_reg_page_list_len;
67 else
68 max_pages = dev->attrs.max_fast_reg_page_list_len;
69
70 /* arbitrary limit to avoid allocating gigantic resources */
71 return min_t(u32, max_pages, 256);
72 }
73
rdma_rw_inv_key(struct rdma_rw_reg_ctx * reg)74 static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg)
75 {
76 int count = 0;
77
78 if (reg->mr->need_inval) {
79 reg->inv_wr.opcode = IB_WR_LOCAL_INV;
80 reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey;
81 reg->inv_wr.next = ®->reg_wr.wr;
82 count++;
83 } else {
84 reg->inv_wr.next = NULL;
85 }
86
87 return count;
88 }
89
90 /* Caller must have zero-initialized *reg. */
rdma_rw_init_one_mr(struct ib_qp * qp,u32 port_num,struct rdma_rw_reg_ctx * reg,struct scatterlist * sg,u32 sg_cnt,u32 offset)91 static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num,
92 struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
93 u32 sg_cnt, u32 offset)
94 {
95 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
96 qp->integrity_en);
97 u32 nents = min(sg_cnt, pages_per_mr);
98 int count = 0, ret;
99
100 reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
101 if (!reg->mr)
102 return -EAGAIN;
103
104 count += rdma_rw_inv_key(reg);
105
106 ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
107 if (ret < 0 || ret < nents) {
108 ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
109 return -EINVAL;
110 }
111
112 reg->reg_wr.wr.opcode = IB_WR_REG_MR;
113 reg->reg_wr.mr = reg->mr;
114 reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
115 if (rdma_protocol_iwarp(qp->device, port_num))
116 reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
117 count++;
118
119 reg->sge.addr = reg->mr->iova;
120 reg->sge.length = reg->mr->length;
121 return count;
122 }
123
rdma_rw_init_mr_wrs(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,u32 offset,u64 remote_addr,u32 rkey,enum dma_data_direction dir)124 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
125 u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
126 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
127 {
128 struct rdma_rw_reg_ctx *prev = NULL;
129 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
130 qp->integrity_en);
131 int i, j, ret = 0, count = 0;
132
133 ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr);
134 ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
135 if (!ctx->reg) {
136 ret = -ENOMEM;
137 goto out;
138 }
139
140 for (i = 0; i < ctx->nr_ops; i++) {
141 struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
142 u32 nents = min(sg_cnt, pages_per_mr);
143
144 ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt,
145 offset);
146 if (ret < 0)
147 goto out_free;
148 count += ret;
149
150 if (prev) {
151 if (reg->mr->need_inval)
152 prev->wr.wr.next = ®->inv_wr;
153 else
154 prev->wr.wr.next = ®->reg_wr.wr;
155 }
156
157 reg->reg_wr.wr.next = ®->wr.wr;
158
159 reg->wr.wr.sg_list = ®->sge;
160 reg->wr.wr.num_sge = 1;
161 reg->wr.remote_addr = remote_addr;
162 reg->wr.rkey = rkey;
163 if (dir == DMA_TO_DEVICE) {
164 reg->wr.wr.opcode = IB_WR_RDMA_WRITE;
165 } else if (!rdma_cap_read_inv(qp->device, port_num)) {
166 reg->wr.wr.opcode = IB_WR_RDMA_READ;
167 } else {
168 reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
169 reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey;
170 }
171 count++;
172
173 remote_addr += reg->sge.length;
174 sg_cnt -= nents;
175 for (j = 0; j < nents; j++)
176 sg = sg_next(sg);
177 prev = reg;
178 offset = 0;
179 }
180
181 if (prev)
182 prev->wr.wr.next = NULL;
183
184 ctx->type = RDMA_RW_MR;
185 return count;
186
187 out_free:
188 while (--i >= 0)
189 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
190 kfree(ctx->reg);
191 out:
192 return ret;
193 }
194
rdma_rw_init_map_wrs(struct rdma_rw_ctx * ctx,struct ib_qp * qp,struct scatterlist * sg,u32 sg_cnt,u32 offset,u64 remote_addr,u32 rkey,enum dma_data_direction dir)195 static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
196 struct scatterlist *sg, u32 sg_cnt, u32 offset,
197 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
198 {
199 u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
200 qp->max_read_sge;
201 struct ib_sge *sge;
202 u32 total_len = 0, i, j;
203
204 ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge);
205
206 ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL);
207 if (!ctx->map.sges)
208 goto out;
209
210 ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL);
211 if (!ctx->map.wrs)
212 goto out_free_sges;
213
214 for (i = 0; i < ctx->nr_ops; i++) {
215 struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
216 u32 nr_sge = min(sg_cnt, max_sge);
217
218 if (dir == DMA_TO_DEVICE)
219 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
220 else
221 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
222 rdma_wr->remote_addr = remote_addr + total_len;
223 rdma_wr->rkey = rkey;
224 rdma_wr->wr.num_sge = nr_sge;
225 rdma_wr->wr.sg_list = sge;
226
227 for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
228 sge->addr = sg_dma_address(sg) + offset;
229 sge->length = sg_dma_len(sg) - offset;
230 sge->lkey = qp->pd->local_dma_lkey;
231
232 total_len += sge->length;
233 sge++;
234 sg_cnt--;
235 offset = 0;
236 }
237
238 rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
239 &ctx->map.wrs[i + 1].wr : NULL;
240 }
241
242 ctx->type = RDMA_RW_MULTI_WR;
243 return ctx->nr_ops;
244
245 out_free_sges:
246 kfree(ctx->map.sges);
247 out:
248 return -ENOMEM;
249 }
250
rdma_rw_init_single_wr(struct rdma_rw_ctx * ctx,struct ib_qp * qp,struct scatterlist * sg,u32 offset,u64 remote_addr,u32 rkey,enum dma_data_direction dir)251 static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
252 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
253 enum dma_data_direction dir)
254 {
255 struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
256
257 ctx->nr_ops = 1;
258
259 ctx->single.sge.lkey = qp->pd->local_dma_lkey;
260 ctx->single.sge.addr = sg_dma_address(sg) + offset;
261 ctx->single.sge.length = sg_dma_len(sg) - offset;
262
263 memset(rdma_wr, 0, sizeof(*rdma_wr));
264 if (dir == DMA_TO_DEVICE)
265 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
266 else
267 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
268 rdma_wr->wr.sg_list = &ctx->single.sge;
269 rdma_wr->wr.num_sge = 1;
270 rdma_wr->remote_addr = remote_addr;
271 rdma_wr->rkey = rkey;
272
273 ctx->type = RDMA_RW_SINGLE_WR;
274 return 1;
275 }
276
277 /**
278 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
279 * @ctx: context to initialize
280 * @qp: queue pair to operate on
281 * @port_num: port num to which the connection is bound
282 * @sg: scatterlist to READ/WRITE from/to
283 * @sg_cnt: number of entries in @sg
284 * @sg_offset: current byte offset into @sg
285 * @remote_addr:remote address to read/write (relative to @rkey)
286 * @rkey: remote key to operate on
287 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
288 *
289 * Returns the number of WQEs that will be needed on the workqueue if
290 * successful, or a negative error code.
291 */
rdma_rw_ctx_init(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,u32 sg_offset,u64 remote_addr,u32 rkey,enum dma_data_direction dir)292 int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
293 struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
294 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
295 {
296 struct ib_device *dev = qp->pd->device;
297 struct sg_table sgt = {
298 .sgl = sg,
299 .orig_nents = sg_cnt,
300 };
301 int ret;
302
303 ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
304 if (ret)
305 return ret;
306 sg_cnt = sgt.nents;
307
308 /*
309 * Skip to the S/G entry that sg_offset falls into:
310 */
311 for (;;) {
312 u32 len = sg_dma_len(sg);
313
314 if (sg_offset < len)
315 break;
316
317 sg = sg_next(sg);
318 sg_offset -= len;
319 sg_cnt--;
320 }
321
322 ret = -EIO;
323 if (WARN_ON_ONCE(sg_cnt == 0))
324 goto out_unmap_sg;
325
326 if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
327 ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
328 sg_offset, remote_addr, rkey, dir);
329 } else if (sg_cnt > 1) {
330 ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
331 remote_addr, rkey, dir);
332 } else {
333 ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
334 remote_addr, rkey, dir);
335 }
336
337 if (ret < 0)
338 goto out_unmap_sg;
339 return ret;
340
341 out_unmap_sg:
342 ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
343 return ret;
344 }
345 EXPORT_SYMBOL(rdma_rw_ctx_init);
346
347 /**
348 * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
349 * @ctx: context to initialize
350 * @qp: queue pair to operate on
351 * @port_num: port num to which the connection is bound
352 * @sg: scatterlist to READ/WRITE from/to
353 * @sg_cnt: number of entries in @sg
354 * @prot_sg: scatterlist to READ/WRITE protection information from/to
355 * @prot_sg_cnt: number of entries in @prot_sg
356 * @sig_attrs: signature offloading algorithms
357 * @remote_addr:remote address to read/write (relative to @rkey)
358 * @rkey: remote key to operate on
359 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
360 *
361 * Returns the number of WQEs that will be needed on the workqueue if
362 * successful, or a negative error code.
363 */
rdma_rw_ctx_signature_init(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,struct scatterlist * prot_sg,u32 prot_sg_cnt,struct ib_sig_attrs * sig_attrs,u64 remote_addr,u32 rkey,enum dma_data_direction dir)364 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
365 u32 port_num, struct scatterlist *sg, u32 sg_cnt,
366 struct scatterlist *prot_sg, u32 prot_sg_cnt,
367 struct ib_sig_attrs *sig_attrs,
368 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
369 {
370 struct ib_device *dev = qp->pd->device;
371 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
372 qp->integrity_en);
373 struct sg_table sgt = {
374 .sgl = sg,
375 .orig_nents = sg_cnt,
376 };
377 struct sg_table prot_sgt = {
378 .sgl = prot_sg,
379 .orig_nents = prot_sg_cnt,
380 };
381 struct ib_rdma_wr *rdma_wr;
382 int count = 0, ret;
383
384 if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
385 pr_err("SG count too large: sg_cnt=%u, prot_sg_cnt=%u, pages_per_mr=%u\n",
386 sg_cnt, prot_sg_cnt, pages_per_mr);
387 return -EINVAL;
388 }
389
390 ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
391 if (ret)
392 return ret;
393
394 if (prot_sg_cnt) {
395 ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0);
396 if (ret)
397 goto out_unmap_sg;
398 }
399
400 ctx->type = RDMA_RW_SIG_MR;
401 ctx->nr_ops = 1;
402 ctx->reg = kzalloc(sizeof(*ctx->reg), GFP_KERNEL);
403 if (!ctx->reg) {
404 ret = -ENOMEM;
405 goto out_unmap_prot_sg;
406 }
407
408 ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs);
409 if (!ctx->reg->mr) {
410 ret = -EAGAIN;
411 goto out_free_ctx;
412 }
413
414 count += rdma_rw_inv_key(ctx->reg);
415
416 memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs));
417
418 ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg,
419 prot_sgt.nents, NULL, SZ_4K);
420 if (unlikely(ret)) {
421 pr_err("failed to map PI sg (%u)\n",
422 sgt.nents + prot_sgt.nents);
423 goto out_destroy_sig_mr;
424 }
425
426 ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY;
427 ctx->reg->reg_wr.wr.wr_cqe = NULL;
428 ctx->reg->reg_wr.wr.num_sge = 0;
429 ctx->reg->reg_wr.wr.send_flags = 0;
430 ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
431 if (rdma_protocol_iwarp(qp->device, port_num))
432 ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
433 ctx->reg->reg_wr.mr = ctx->reg->mr;
434 ctx->reg->reg_wr.key = ctx->reg->mr->lkey;
435 count++;
436
437 ctx->reg->sge.addr = ctx->reg->mr->iova;
438 ctx->reg->sge.length = ctx->reg->mr->length;
439 if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE)
440 ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length;
441
442 rdma_wr = &ctx->reg->wr;
443 rdma_wr->wr.sg_list = &ctx->reg->sge;
444 rdma_wr->wr.num_sge = 1;
445 rdma_wr->remote_addr = remote_addr;
446 rdma_wr->rkey = rkey;
447 if (dir == DMA_TO_DEVICE)
448 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
449 else
450 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
451 ctx->reg->reg_wr.wr.next = &rdma_wr->wr;
452 count++;
453
454 return count;
455
456 out_destroy_sig_mr:
457 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
458 out_free_ctx:
459 kfree(ctx->reg);
460 out_unmap_prot_sg:
461 if (prot_sgt.nents)
462 ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0);
463 out_unmap_sg:
464 ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
465 return ret;
466 }
467 EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
468
469 /*
470 * Now that we are going to post the WRs we can update the lkey and need_inval
471 * state on the MRs. If we were doing this at init time, we would get double
472 * or missing invalidations if a context was initialized but not actually
473 * posted.
474 */
rdma_rw_update_lkey(struct rdma_rw_reg_ctx * reg,bool need_inval)475 static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
476 {
477 reg->mr->need_inval = need_inval;
478 ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey));
479 reg->reg_wr.key = reg->mr->lkey;
480 reg->sge.lkey = reg->mr->lkey;
481 }
482
483 /**
484 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
485 * @ctx: context to operate on
486 * @qp: queue pair to operate on
487 * @port_num: port num to which the connection is bound
488 * @cqe: completion queue entry for the last WR
489 * @chain_wr: WR to append to the posted chain
490 *
491 * Return the WR chain for the set of RDMA READ/WRITE operations described by
492 * @ctx, as well as any memory registration operations needed. If @chain_wr
493 * is non-NULL the WR it points to will be appended to the chain of WRs posted.
494 * If @chain_wr is not set @cqe must be set so that the caller gets a
495 * completion notification.
496 */
rdma_rw_ctx_wrs(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct ib_cqe * cqe,struct ib_send_wr * chain_wr)497 struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
498 u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
499 {
500 struct ib_send_wr *first_wr, *last_wr;
501 int i;
502
503 switch (ctx->type) {
504 case RDMA_RW_SIG_MR:
505 case RDMA_RW_MR:
506 for (i = 0; i < ctx->nr_ops; i++) {
507 rdma_rw_update_lkey(&ctx->reg[i],
508 ctx->reg[i].wr.wr.opcode !=
509 IB_WR_RDMA_READ_WITH_INV);
510 }
511
512 if (ctx->reg[0].inv_wr.next)
513 first_wr = &ctx->reg[0].inv_wr;
514 else
515 first_wr = &ctx->reg[0].reg_wr.wr;
516 last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr;
517 break;
518 case RDMA_RW_MULTI_WR:
519 first_wr = &ctx->map.wrs[0].wr;
520 last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
521 break;
522 case RDMA_RW_SINGLE_WR:
523 first_wr = &ctx->single.wr.wr;
524 last_wr = &ctx->single.wr.wr;
525 break;
526 default:
527 BUG();
528 }
529
530 if (chain_wr) {
531 last_wr->next = chain_wr;
532 } else {
533 last_wr->wr_cqe = cqe;
534 last_wr->send_flags |= IB_SEND_SIGNALED;
535 }
536
537 return first_wr;
538 }
539 EXPORT_SYMBOL(rdma_rw_ctx_wrs);
540
541 /**
542 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
543 * @ctx: context to operate on
544 * @qp: queue pair to operate on
545 * @port_num: port num to which the connection is bound
546 * @cqe: completion queue entry for the last WR
547 * @chain_wr: WR to append to the posted chain
548 *
549 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
550 * any memory registration operations needed. If @chain_wr is non-NULL the
551 * WR it points to will be appended to the chain of WRs posted. If @chain_wr
552 * is not set @cqe must be set so that the caller gets a completion
553 * notification.
554 */
rdma_rw_ctx_post(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct ib_cqe * cqe,struct ib_send_wr * chain_wr)555 int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
556 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
557 {
558 struct ib_send_wr *first_wr;
559
560 first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
561 return ib_post_send(qp, first_wr, NULL);
562 }
563 EXPORT_SYMBOL(rdma_rw_ctx_post);
564
565 /**
566 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
567 * @ctx: context to release
568 * @qp: queue pair to operate on
569 * @port_num: port num to which the connection is bound
570 * @sg: scatterlist that was used for the READ/WRITE
571 * @sg_cnt: number of entries in @sg
572 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
573 */
rdma_rw_ctx_destroy(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,enum dma_data_direction dir)574 void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
575 u32 port_num, struct scatterlist *sg, u32 sg_cnt,
576 enum dma_data_direction dir)
577 {
578 int i;
579
580 switch (ctx->type) {
581 case RDMA_RW_MR:
582 for (i = 0; i < ctx->nr_ops; i++)
583 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
584 kfree(ctx->reg);
585 break;
586 case RDMA_RW_MULTI_WR:
587 kfree(ctx->map.wrs);
588 kfree(ctx->map.sges);
589 break;
590 case RDMA_RW_SINGLE_WR:
591 break;
592 default:
593 BUG();
594 break;
595 }
596
597 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
598 }
599 EXPORT_SYMBOL(rdma_rw_ctx_destroy);
600
601 /**
602 * rdma_rw_ctx_destroy_signature - release all resources allocated by
603 * rdma_rw_ctx_signature_init
604 * @ctx: context to release
605 * @qp: queue pair to operate on
606 * @port_num: port num to which the connection is bound
607 * @sg: scatterlist that was used for the READ/WRITE
608 * @sg_cnt: number of entries in @sg
609 * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
610 * @prot_sg_cnt: number of entries in @prot_sg
611 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
612 */
rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx * ctx,struct ib_qp * qp,u32 port_num,struct scatterlist * sg,u32 sg_cnt,struct scatterlist * prot_sg,u32 prot_sg_cnt,enum dma_data_direction dir)613 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
614 u32 port_num, struct scatterlist *sg, u32 sg_cnt,
615 struct scatterlist *prot_sg, u32 prot_sg_cnt,
616 enum dma_data_direction dir)
617 {
618 if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR))
619 return;
620
621 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
622 kfree(ctx->reg);
623
624 if (prot_sg_cnt)
625 ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
626 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
627 }
628 EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
629
630 /**
631 * rdma_rw_mr_factor - return number of MRs required for a payload
632 * @device: device handling the connection
633 * @port_num: port num to which the connection is bound
634 * @maxpages: maximum payload pages per rdma_rw_ctx
635 *
636 * Returns the number of MRs the device requires to move @maxpayload
637 * bytes. The returned value is used during transport creation to
638 * compute max_rdma_ctxts and the size of the transport's Send and
639 * Send Completion Queues.
640 */
rdma_rw_mr_factor(struct ib_device * device,u32 port_num,unsigned int maxpages)641 unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
642 unsigned int maxpages)
643 {
644 unsigned int mr_pages;
645
646 if (rdma_rw_can_use_mr(device, port_num))
647 mr_pages = rdma_rw_fr_page_list_len(device, false);
648 else
649 mr_pages = device->attrs.max_sge_rd;
650 return DIV_ROUND_UP(maxpages, mr_pages);
651 }
652 EXPORT_SYMBOL(rdma_rw_mr_factor);
653
rdma_rw_init_qp(struct ib_device * dev,struct ib_qp_init_attr * attr)654 void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
655 {
656 u32 factor;
657
658 WARN_ON_ONCE(attr->port_num == 0);
659
660 /*
661 * Each context needs at least one RDMA READ or WRITE WR.
662 *
663 * For some hardware we might need more, eventually we should ask the
664 * HCA driver for a multiplier here.
665 */
666 factor = 1;
667
668 /*
669 * If the device needs MRs to perform RDMA READ or WRITE operations,
670 * we'll need two additional MRs for the registrations and the
671 * invalidation.
672 */
673 if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN ||
674 rdma_rw_can_use_mr(dev, attr->port_num))
675 factor += 2; /* inv + reg */
676
677 attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
678
679 /*
680 * But maybe we were just too high in the sky and the device doesn't
681 * even support all we need, and we'll have to live with what we get..
682 */
683 attr->cap.max_send_wr =
684 min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
685 }
686
rdma_rw_init_mrs(struct ib_qp * qp,struct ib_qp_init_attr * attr)687 int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
688 {
689 struct ib_device *dev = qp->pd->device;
690 u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0;
691 int ret = 0;
692
693 if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) {
694 nr_sig_mrs = attr->cap.max_rdma_ctxs;
695 nr_mrs = attr->cap.max_rdma_ctxs;
696 max_num_sg = rdma_rw_fr_page_list_len(dev, true);
697 } else if (rdma_rw_can_use_mr(dev, attr->port_num)) {
698 nr_mrs = attr->cap.max_rdma_ctxs;
699 max_num_sg = rdma_rw_fr_page_list_len(dev, false);
700 }
701
702 if (nr_mrs) {
703 ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
704 IB_MR_TYPE_MEM_REG,
705 max_num_sg, 0);
706 if (ret) {
707 pr_err("%s: failed to allocated %u MRs\n",
708 __func__, nr_mrs);
709 return ret;
710 }
711 }
712
713 if (nr_sig_mrs) {
714 ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
715 IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg);
716 if (ret) {
717 pr_err("%s: failed to allocated %u SIG MRs\n",
718 __func__, nr_sig_mrs);
719 goto out_free_rdma_mrs;
720 }
721 }
722
723 return 0;
724
725 out_free_rdma_mrs:
726 ib_mr_pool_destroy(qp, &qp->rdma_mrs);
727 return ret;
728 }
729
rdma_rw_cleanup_mrs(struct ib_qp * qp)730 void rdma_rw_cleanup_mrs(struct ib_qp *qp)
731 {
732 ib_mr_pool_destroy(qp, &qp->sig_mrs);
733 ib_mr_pool_destroy(qp, &qp->rdma_mrs);
734 }
735