xref: /linux/drivers/infiniband/ulp/iser/iser_memory.c (revision feb7c1e38bccfd18cc06677cb648ed2340788fe8)
1 /*
2  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/slab.h>
36 #include <linux/mm.h>
37 #include <linux/highmem.h>
38 #include <linux/scatterlist.h>
39 
40 #include "iscsi_iser.h"
41 static
42 int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
43 		      struct iser_data_buf *mem,
44 		      struct iser_reg_resources *rsc,
45 		      struct iser_mem_reg *mem_reg);
46 static
47 int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
48 		     struct iser_data_buf *mem,
49 		     struct iser_reg_resources *rsc,
50 		     struct iser_mem_reg *mem_reg);
51 
52 static struct iser_reg_ops fastreg_ops = {
53 	.alloc_reg_res	= iser_alloc_fastreg_pool,
54 	.free_reg_res	= iser_free_fastreg_pool,
55 	.reg_mem	= iser_fast_reg_mr,
56 	.unreg_mem	= iser_unreg_mem_fastreg,
57 	.reg_desc_get	= iser_reg_desc_get_fr,
58 	.reg_desc_put	= iser_reg_desc_put_fr,
59 };
60 
61 static struct iser_reg_ops fmr_ops = {
62 	.alloc_reg_res	= iser_alloc_fmr_pool,
63 	.free_reg_res	= iser_free_fmr_pool,
64 	.reg_mem	= iser_fast_reg_fmr,
65 	.unreg_mem	= iser_unreg_mem_fmr,
66 	.reg_desc_get	= iser_reg_desc_get_fmr,
67 	.reg_desc_put	= iser_reg_desc_put_fmr,
68 };
69 
70 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
71 {
72 	iser_err_comp(wc, "memreg");
73 }
74 
75 int iser_assign_reg_ops(struct iser_device *device)
76 {
77 	struct ib_device *ib_dev = device->ib_device;
78 
79 	/* Assign function handles  - based on FMR support */
80 	if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr &&
81 	    ib_dev->map_phys_fmr && ib_dev->unmap_fmr) {
82 		iser_info("FMR supported, using FMR for registration\n");
83 		device->reg_ops = &fmr_ops;
84 	} else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
85 		iser_info("FastReg supported, using FastReg for registration\n");
86 		device->reg_ops = &fastreg_ops;
87 	} else {
88 		iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
89 		return -1;
90 	}
91 
92 	return 0;
93 }
94 
95 struct iser_fr_desc *
96 iser_reg_desc_get_fr(struct ib_conn *ib_conn)
97 {
98 	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
99 	struct iser_fr_desc *desc;
100 	unsigned long flags;
101 
102 	spin_lock_irqsave(&fr_pool->lock, flags);
103 	desc = list_first_entry(&fr_pool->list,
104 				struct iser_fr_desc, list);
105 	list_del(&desc->list);
106 	spin_unlock_irqrestore(&fr_pool->lock, flags);
107 
108 	return desc;
109 }
110 
111 void
112 iser_reg_desc_put_fr(struct ib_conn *ib_conn,
113 		     struct iser_fr_desc *desc)
114 {
115 	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
116 	unsigned long flags;
117 
118 	spin_lock_irqsave(&fr_pool->lock, flags);
119 	list_add(&desc->list, &fr_pool->list);
120 	spin_unlock_irqrestore(&fr_pool->lock, flags);
121 }
122 
123 struct iser_fr_desc *
124 iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
125 {
126 	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
127 
128 	return list_first_entry(&fr_pool->list,
129 				struct iser_fr_desc, list);
130 }
131 
132 void
133 iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
134 		      struct iser_fr_desc *desc)
135 {
136 }
137 
138 #define IS_4K_ALIGNED(addr)	((((unsigned long)addr) & ~MASK_4K) == 0)
139 
140 /**
141  * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
142  * and returns the length of resulting physical address array (may be less than
143  * the original due to possible compaction).
144  *
145  * we build a "page vec" under the assumption that the SG meets the RDMA
146  * alignment requirements. Other then the first and last SG elements, all
147  * the "internal" elements can be compacted into a list whose elements are
148  * dma addresses of physical pages. The code supports also the weird case
149  * where --few fragments of the same page-- are present in the SG as
150  * consecutive elements. Also, it handles one entry SG.
151  */
152 
153 static int iser_sg_to_page_vec(struct iser_data_buf *data,
154 			       struct ib_device *ibdev, u64 *pages,
155 			       int *offset, int *data_size)
156 {
157 	struct scatterlist *sg, *sgl = data->sg;
158 	u64 start_addr, end_addr, page, chunk_start = 0;
159 	unsigned long total_sz = 0;
160 	unsigned int dma_len;
161 	int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
162 
163 	/* compute the offset of first element */
164 	*offset = (u64) sgl[0].offset & ~MASK_4K;
165 
166 	new_chunk = 1;
167 	cur_page  = 0;
168 	for_each_sg(sgl, sg, data->dma_nents, i) {
169 		start_addr = ib_sg_dma_address(ibdev, sg);
170 		if (new_chunk)
171 			chunk_start = start_addr;
172 		dma_len = ib_sg_dma_len(ibdev, sg);
173 		end_addr = start_addr + dma_len;
174 		total_sz += dma_len;
175 
176 		/* collect page fragments until aligned or end of SG list */
177 		if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
178 			new_chunk = 0;
179 			continue;
180 		}
181 		new_chunk = 1;
182 
183 		/* address of the first page in the contiguous chunk;
184 		   masking relevant for the very first SG entry,
185 		   which might be unaligned */
186 		page = chunk_start & MASK_4K;
187 		do {
188 			pages[cur_page++] = page;
189 			page += SIZE_4K;
190 		} while (page < end_addr);
191 	}
192 
193 	*data_size = total_sz;
194 	iser_dbg("page_vec->data_size:%d cur_page %d\n",
195 		 *data_size, cur_page);
196 	return cur_page;
197 }
198 
199 static void iser_data_buf_dump(struct iser_data_buf *data,
200 			       struct ib_device *ibdev)
201 {
202 	struct scatterlist *sg;
203 	int i;
204 
205 	for_each_sg(data->sg, sg, data->dma_nents, i)
206 		iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
207 			 "off:0x%x sz:0x%x dma_len:0x%x\n",
208 			 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
209 			 sg_page(sg), sg->offset,
210 			 sg->length, ib_sg_dma_len(ibdev, sg));
211 }
212 
213 static void iser_dump_page_vec(struct iser_page_vec *page_vec)
214 {
215 	int i;
216 
217 	iser_err("page vec length %d data size %d\n",
218 		 page_vec->length, page_vec->data_size);
219 	for (i = 0; i < page_vec->length; i++)
220 		iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
221 }
222 
223 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
224 			    struct iser_data_buf *data,
225 			    enum iser_data_dir iser_dir,
226 			    enum dma_data_direction dma_dir)
227 {
228 	struct ib_device *dev;
229 
230 	iser_task->dir[iser_dir] = 1;
231 	dev = iser_task->iser_conn->ib_conn.device->ib_device;
232 
233 	data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
234 	if (data->dma_nents == 0) {
235 		iser_err("dma_map_sg failed!!!\n");
236 		return -EINVAL;
237 	}
238 	return 0;
239 }
240 
241 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
242 			      struct iser_data_buf *data,
243 			      enum dma_data_direction dir)
244 {
245 	struct ib_device *dev;
246 
247 	dev = iser_task->iser_conn->ib_conn.device->ib_device;
248 	ib_dma_unmap_sg(dev, data->sg, data->size, dir);
249 }
250 
251 static int
252 iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
253 	     struct iser_mem_reg *reg)
254 {
255 	struct scatterlist *sg = mem->sg;
256 
257 	reg->sge.lkey = device->pd->local_dma_lkey;
258 	reg->rkey = device->mr->rkey;
259 	reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
260 	reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
261 
262 	iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
263 		 " length=0x%x\n", reg->sge.lkey, reg->rkey,
264 		 reg->sge.addr, reg->sge.length);
265 
266 	return 0;
267 }
268 
269 /**
270  * iser_reg_page_vec - Register physical memory
271  *
272  * returns: 0 on success, errno code on failure
273  */
274 static
275 int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
276 		      struct iser_data_buf *mem,
277 		      struct iser_reg_resources *rsc,
278 		      struct iser_mem_reg *reg)
279 {
280 	struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
281 	struct iser_device *device = ib_conn->device;
282 	struct iser_page_vec *page_vec = rsc->page_vec;
283 	struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
284 	struct ib_pool_fmr *fmr;
285 	int ret, plen;
286 
287 	plen = iser_sg_to_page_vec(mem, device->ib_device,
288 				   page_vec->pages,
289 				   &page_vec->offset,
290 				   &page_vec->data_size);
291 	page_vec->length = plen;
292 	if (plen * SIZE_4K < page_vec->data_size) {
293 		iser_err("page vec too short to hold this SG\n");
294 		iser_data_buf_dump(mem, device->ib_device);
295 		iser_dump_page_vec(page_vec);
296 		return -EINVAL;
297 	}
298 
299 	fmr  = ib_fmr_pool_map_phys(fmr_pool,
300 				    page_vec->pages,
301 				    page_vec->length,
302 				    page_vec->pages[0]);
303 	if (IS_ERR(fmr)) {
304 		ret = PTR_ERR(fmr);
305 		iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
306 		return ret;
307 	}
308 
309 	reg->sge.lkey = fmr->fmr->lkey;
310 	reg->rkey = fmr->fmr->rkey;
311 	reg->sge.addr = page_vec->pages[0] + page_vec->offset;
312 	reg->sge.length = page_vec->data_size;
313 	reg->mem_h = fmr;
314 
315 	iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
316 		 " length=0x%x\n", reg->sge.lkey, reg->rkey,
317 		 reg->sge.addr, reg->sge.length);
318 
319 	return 0;
320 }
321 
322 /**
323  * Unregister (previosuly registered using FMR) memory.
324  * If memory is non-FMR does nothing.
325  */
326 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
327 			enum iser_data_dir cmd_dir)
328 {
329 	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
330 	int ret;
331 
332 	if (!reg->mem_h)
333 		return;
334 
335 	iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
336 
337 	ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
338 	if (ret)
339 		iser_err("ib_fmr_pool_unmap failed %d\n", ret);
340 
341 	reg->mem_h = NULL;
342 }
343 
344 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
345 			    enum iser_data_dir cmd_dir)
346 {
347 	struct iser_device *device = iser_task->iser_conn->ib_conn.device;
348 	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
349 
350 	if (!reg->mem_h)
351 		return;
352 
353 	device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
354 				     reg->mem_h);
355 	reg->mem_h = NULL;
356 }
357 
358 static void
359 iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
360 		    struct ib_sig_domain *domain)
361 {
362 	domain->sig_type = IB_SIG_TYPE_T10_DIF;
363 	domain->sig.dif.pi_interval = scsi_prot_interval(sc);
364 	domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
365 	/*
366 	 * At the moment we hard code those, but in the future
367 	 * we will take them from sc.
368 	 */
369 	domain->sig.dif.apptag_check_mask = 0xffff;
370 	domain->sig.dif.app_escape = true;
371 	domain->sig.dif.ref_escape = true;
372 	if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
373 		domain->sig.dif.ref_remap = true;
374 };
375 
376 static int
377 iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
378 {
379 	switch (scsi_get_prot_op(sc)) {
380 	case SCSI_PROT_WRITE_INSERT:
381 	case SCSI_PROT_READ_STRIP:
382 		sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
383 		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
384 		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
385 		break;
386 	case SCSI_PROT_READ_INSERT:
387 	case SCSI_PROT_WRITE_STRIP:
388 		sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
389 		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
390 		sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
391 						IB_T10DIF_CSUM : IB_T10DIF_CRC;
392 		break;
393 	case SCSI_PROT_READ_PASS:
394 	case SCSI_PROT_WRITE_PASS:
395 		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
396 		sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
397 		iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
398 		sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
399 						IB_T10DIF_CSUM : IB_T10DIF_CRC;
400 		break;
401 	default:
402 		iser_err("Unsupported PI operation %d\n",
403 			 scsi_get_prot_op(sc));
404 		return -EINVAL;
405 	}
406 
407 	return 0;
408 }
409 
410 static inline void
411 iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
412 {
413 	*mask = 0;
414 	if (sc->prot_flags & SCSI_PROT_REF_CHECK)
415 		*mask |= ISER_CHECK_REFTAG;
416 	if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
417 		*mask |= ISER_CHECK_GUARD;
418 }
419 
420 static void
421 iser_inv_rkey(struct ib_send_wr *inv_wr,
422 	      struct ib_mr *mr,
423 	      struct ib_cqe *cqe)
424 {
425 	u32 rkey;
426 
427 	inv_wr->opcode = IB_WR_LOCAL_INV;
428 	inv_wr->wr_cqe = cqe;
429 	inv_wr->ex.invalidate_rkey = mr->rkey;
430 	inv_wr->send_flags = 0;
431 	inv_wr->num_sge = 0;
432 
433 	rkey = ib_inc_rkey(mr->rkey);
434 	ib_update_fast_reg_key(mr, rkey);
435 }
436 
437 static int
438 iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
439 		struct iser_pi_context *pi_ctx,
440 		struct iser_mem_reg *data_reg,
441 		struct iser_mem_reg *prot_reg,
442 		struct iser_mem_reg *sig_reg)
443 {
444 	struct iser_tx_desc *tx_desc = &iser_task->desc;
445 	struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
446 	struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
447 	struct ib_sig_handover_wr *wr;
448 	int ret;
449 
450 	memset(sig_attrs, 0, sizeof(*sig_attrs));
451 	ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
452 	if (ret)
453 		goto err;
454 
455 	iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
456 
457 	if (!pi_ctx->sig_mr_valid)
458 		iser_inv_rkey(iser_tx_next_wr(tx_desc), pi_ctx->sig_mr, cqe);
459 
460 	wr = sig_handover_wr(iser_tx_next_wr(tx_desc));
461 	wr->wr.opcode = IB_WR_REG_SIG_MR;
462 	wr->wr.wr_cqe = cqe;
463 	wr->wr.sg_list = &data_reg->sge;
464 	wr->wr.num_sge = 1;
465 	wr->wr.send_flags = 0;
466 	wr->sig_attrs = sig_attrs;
467 	wr->sig_mr = pi_ctx->sig_mr;
468 	if (scsi_prot_sg_count(iser_task->sc))
469 		wr->prot = &prot_reg->sge;
470 	else
471 		wr->prot = NULL;
472 	wr->access_flags = IB_ACCESS_LOCAL_WRITE |
473 			   IB_ACCESS_REMOTE_READ |
474 			   IB_ACCESS_REMOTE_WRITE;
475 	pi_ctx->sig_mr_valid = 0;
476 
477 	sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
478 	sig_reg->rkey = pi_ctx->sig_mr->rkey;
479 	sig_reg->sge.addr = 0;
480 	sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
481 
482 	iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
483 		 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
484 		 sig_reg->sge.length);
485 err:
486 	return ret;
487 }
488 
489 static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
490 			    struct iser_data_buf *mem,
491 			    struct iser_reg_resources *rsc,
492 			    struct iser_mem_reg *reg)
493 {
494 	struct iser_tx_desc *tx_desc = &iser_task->desc;
495 	struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
496 	struct ib_mr *mr = rsc->mr;
497 	struct ib_reg_wr *wr;
498 	int n;
499 
500 	if (!rsc->mr_valid)
501 		iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
502 
503 	n = ib_map_mr_sg(mr, mem->sg, mem->size, SIZE_4K);
504 	if (unlikely(n != mem->size)) {
505 		iser_err("failed to map sg (%d/%d)\n",
506 			 n, mem->size);
507 		return n < 0 ? n : -EINVAL;
508 	}
509 
510 	wr = reg_wr(iser_tx_next_wr(tx_desc));
511 	wr->wr.opcode = IB_WR_REG_MR;
512 	wr->wr.wr_cqe = cqe;
513 	wr->wr.send_flags = 0;
514 	wr->wr.num_sge = 0;
515 	wr->mr = mr;
516 	wr->key = mr->rkey;
517 	wr->access = IB_ACCESS_LOCAL_WRITE  |
518 		     IB_ACCESS_REMOTE_WRITE |
519 		     IB_ACCESS_REMOTE_READ;
520 
521 	rsc->mr_valid = 0;
522 
523 	reg->sge.lkey = mr->lkey;
524 	reg->rkey = mr->rkey;
525 	reg->sge.addr = mr->iova;
526 	reg->sge.length = mr->length;
527 
528 	iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
529 		 reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
530 
531 	return 0;
532 }
533 
534 static int
535 iser_reg_prot_sg(struct iscsi_iser_task *task,
536 		 struct iser_data_buf *mem,
537 		 struct iser_fr_desc *desc,
538 		 bool use_dma_key,
539 		 struct iser_mem_reg *reg)
540 {
541 	struct iser_device *device = task->iser_conn->ib_conn.device;
542 
543 	if (use_dma_key)
544 		return iser_reg_dma(device, mem, reg);
545 
546 	return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
547 }
548 
549 static int
550 iser_reg_data_sg(struct iscsi_iser_task *task,
551 		 struct iser_data_buf *mem,
552 		 struct iser_fr_desc *desc,
553 		 bool use_dma_key,
554 		 struct iser_mem_reg *reg)
555 {
556 	struct iser_device *device = task->iser_conn->ib_conn.device;
557 
558 	if (use_dma_key)
559 		return iser_reg_dma(device, mem, reg);
560 
561 	return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
562 }
563 
564 int iser_reg_rdma_mem(struct iscsi_iser_task *task,
565 		      enum iser_data_dir dir)
566 {
567 	struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
568 	struct iser_device *device = ib_conn->device;
569 	struct iser_data_buf *mem = &task->data[dir];
570 	struct iser_mem_reg *reg = &task->rdma_reg[dir];
571 	struct iser_mem_reg *data_reg;
572 	struct iser_fr_desc *desc = NULL;
573 	bool use_dma_key;
574 	int err;
575 
576 	use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
577 		       scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
578 
579 	if (!use_dma_key) {
580 		desc = device->reg_ops->reg_desc_get(ib_conn);
581 		reg->mem_h = desc;
582 	}
583 
584 	if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL)
585 		data_reg = reg;
586 	else
587 		data_reg = &task->desc.data_reg;
588 
589 	err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
590 	if (unlikely(err))
591 		goto err_reg;
592 
593 	if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
594 		struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
595 
596 		if (scsi_prot_sg_count(task->sc)) {
597 			mem = &task->prot[dir];
598 			err = iser_reg_prot_sg(task, mem, desc,
599 					       use_dma_key, prot_reg);
600 			if (unlikely(err))
601 				goto err_reg;
602 		}
603 
604 		err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
605 				      prot_reg, reg);
606 		if (unlikely(err))
607 			goto err_reg;
608 
609 		desc->pi_ctx->sig_protected = 1;
610 	}
611 
612 	return 0;
613 
614 err_reg:
615 	if (desc)
616 		device->reg_ops->reg_desc_put(ib_conn, desc);
617 
618 	return err;
619 }
620 
621 void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
622 			 enum iser_data_dir dir)
623 {
624 	struct iser_device *device = task->iser_conn->ib_conn.device;
625 
626 	device->reg_ops->unreg_mem(task, dir);
627 }
628