xref: /linux/net/rds/ib_frmr.c (revision cbf658dd09419f1ef9de11b9604e950bdd5c170b)
1 /*
2  * Copyright (c) 2016 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "ib_mr.h"
34 
35 static inline void
rds_transition_frwr_state(struct rds_ib_mr * ibmr,enum rds_ib_fr_state old_state,enum rds_ib_fr_state new_state)36 rds_transition_frwr_state(struct rds_ib_mr *ibmr,
37 			  enum rds_ib_fr_state old_state,
38 			  enum rds_ib_fr_state new_state)
39 {
40 	if (cmpxchg(&ibmr->u.frmr.fr_state,
41 		    old_state, new_state) == old_state &&
42 	    old_state == FRMR_IS_INUSE) {
43 		/* enforce order of ibmr->u.frmr.fr_state update
44 		 * before decrementing i_fastreg_inuse_count
45 		 */
46 		smp_mb__before_atomic();
47 		atomic_dec(&ibmr->ic->i_fastreg_inuse_count);
48 		if (waitqueue_active(&rds_ib_ring_empty_wait))
49 			wake_up(&rds_ib_ring_empty_wait);
50 	}
51 }
52 
rds_ib_alloc_frmr(struct rds_ib_device * rds_ibdev,int npages)53 static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
54 					   int npages)
55 {
56 	struct rds_ib_mr_pool *pool;
57 	struct rds_ib_mr *ibmr = NULL;
58 	struct rds_ib_frmr *frmr;
59 	int err = 0;
60 
61 	if (npages <= RDS_MR_8K_MSG_SIZE)
62 		pool = rds_ibdev->mr_8k_pool;
63 	else
64 		pool = rds_ibdev->mr_1m_pool;
65 
66 	ibmr = rds_ib_try_reuse_ibmr(pool);
67 	if (ibmr)
68 		return ibmr;
69 
70 	ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
71 			    rdsibdev_to_node(rds_ibdev));
72 	if (!ibmr) {
73 		err = -ENOMEM;
74 		goto out_no_cigar;
75 	}
76 
77 	frmr = &ibmr->u.frmr;
78 	frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG,
79 			 pool->max_pages);
80 	if (IS_ERR(frmr->mr)) {
81 		pr_warn("RDS/IB: %s failed to allocate MR", __func__);
82 		err = PTR_ERR(frmr->mr);
83 		goto out_no_cigar;
84 	}
85 
86 	ibmr->pool = pool;
87 	if (pool->pool_type == RDS_IB_MR_8K_POOL)
88 		rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
89 	else
90 		rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
91 
92 	if (atomic_read(&pool->item_count) > pool->max_items_soft)
93 		pool->max_items_soft = pool->max_items;
94 
95 	frmr->fr_state = FRMR_IS_FREE;
96 	init_waitqueue_head(&frmr->fr_inv_done);
97 	init_waitqueue_head(&frmr->fr_reg_done);
98 	return ibmr;
99 
100 out_no_cigar:
101 	kfree(ibmr);
102 	atomic_dec(&pool->item_count);
103 	return ERR_PTR(err);
104 }
105 
rds_ib_free_frmr(struct rds_ib_mr * ibmr,bool drop)106 static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop)
107 {
108 	struct rds_ib_mr_pool *pool = ibmr->pool;
109 
110 	if (drop)
111 		llist_add(&ibmr->llnode, &pool->drop_list);
112 	else
113 		llist_add(&ibmr->llnode, &pool->free_list);
114 	atomic_add(ibmr->sg_len, &pool->free_pinned);
115 	atomic_inc(&pool->dirty_count);
116 
117 	/* If we've pinned too many pages, request a flush */
118 	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
119 	    atomic_read(&pool->dirty_count) >= pool->max_items / 5)
120 		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
121 }
122 
rds_ib_post_reg_frmr(struct rds_ib_mr * ibmr)123 static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
124 {
125 	struct rds_ib_frmr *frmr = &ibmr->u.frmr;
126 	struct ib_reg_wr reg_wr;
127 	int ret, off = 0;
128 
129 	while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
130 		atomic_inc(&ibmr->ic->i_fastreg_wrs);
131 		cpu_relax();
132 	}
133 
134 	ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
135 				&off, PAGE_SIZE);
136 	if (unlikely(ret != ibmr->sg_dma_len)) {
137 		ret = ret < 0 ? ret : -EINVAL;
138 		goto out_inc;
139 	}
140 
141 	if (cmpxchg(&frmr->fr_state, FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) {
142 		ret = -EBUSY;
143 		goto out_inc;
144 	}
145 
146 	atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
147 
148 	/* Perform a WR for the fast_reg_mr. Each individual page
149 	 * in the sg list is added to the fast reg page list and placed
150 	 * inside the fast_reg_mr WR.  The key used is a rolling 8bit
151 	 * counter, which should guarantee uniqueness.
152 	 */
153 	ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
154 	frmr->fr_reg = true;
155 
156 	memset(&reg_wr, 0, sizeof(reg_wr));
157 	reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
158 	reg_wr.wr.opcode = IB_WR_REG_MR;
159 	reg_wr.wr.num_sge = 0;
160 	reg_wr.mr = frmr->mr;
161 	reg_wr.key = frmr->mr->rkey;
162 	reg_wr.access = IB_ACCESS_LOCAL_WRITE |
163 			IB_ACCESS_REMOTE_READ |
164 			IB_ACCESS_REMOTE_WRITE;
165 	reg_wr.wr.send_flags = IB_SEND_SIGNALED;
166 
167 	ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, NULL);
168 	if (unlikely(ret)) {
169 		/* Failure here can be because of -ENOMEM as well */
170 		rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
171 
172 		if (printk_ratelimit())
173 			pr_warn("RDS/IB: %s returned error(%d)\n",
174 				__func__, ret);
175 		goto out_inc;
176 	}
177 
178 	/* Wait for the registration to complete in order to prevent an invalid
179 	 * access error resulting from a race between the memory region already
180 	 * being accessed while registration is still pending.
181 	 */
182 	wait_event(frmr->fr_reg_done, !frmr->fr_reg);
183 
184 	return ret;
185 
186 out_inc:
187 	atomic_inc(&ibmr->ic->i_fastreg_wrs);
188 	return ret;
189 }
190 
rds_ib_map_frmr(struct rds_ib_device * rds_ibdev,struct rds_ib_mr_pool * pool,struct rds_ib_mr * ibmr,struct scatterlist * sg,unsigned int sg_len)191 static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
192 			   struct rds_ib_mr_pool *pool,
193 			   struct rds_ib_mr *ibmr,
194 			   struct scatterlist *sg, unsigned int sg_len)
195 {
196 	struct ib_device *dev = rds_ibdev->dev;
197 	struct rds_ib_frmr *frmr = &ibmr->u.frmr;
198 	int i;
199 	u32 len;
200 	int ret = 0;
201 
202 	/* We want to teardown old ibmr values here and fill it up with
203 	 * new sg values
204 	 */
205 	rds_ib_teardown_mr(ibmr);
206 
207 	ibmr->sg = sg;
208 	ibmr->sg_len = sg_len;
209 	ibmr->sg_dma_len = 0;
210 	frmr->sg_byte_len = 0;
211 	WARN_ON(ibmr->sg_dma_len);
212 	ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len,
213 					 DMA_BIDIRECTIONAL);
214 	if (unlikely(!ibmr->sg_dma_len)) {
215 		pr_warn("RDS/IB: %s failed!\n", __func__);
216 		return -EBUSY;
217 	}
218 
219 	frmr->sg_byte_len = 0;
220 	frmr->dma_npages = 0;
221 	len = 0;
222 
223 	ret = -EINVAL;
224 	for (i = 0; i < ibmr->sg_dma_len; ++i) {
225 		unsigned int dma_len = sg_dma_len(&ibmr->sg[i]);
226 		u64 dma_addr = sg_dma_address(&ibmr->sg[i]);
227 
228 		frmr->sg_byte_len += dma_len;
229 		if (dma_addr & ~PAGE_MASK) {
230 			if (i > 0)
231 				goto out_unmap;
232 			else
233 				++frmr->dma_npages;
234 		}
235 
236 		if ((dma_addr + dma_len) & ~PAGE_MASK) {
237 			if (i < ibmr->sg_dma_len - 1)
238 				goto out_unmap;
239 			else
240 				++frmr->dma_npages;
241 		}
242 
243 		len += dma_len;
244 	}
245 	frmr->dma_npages += len >> PAGE_SHIFT;
246 
247 	if (frmr->dma_npages > ibmr->pool->max_pages) {
248 		ret = -EMSGSIZE;
249 		goto out_unmap;
250 	}
251 
252 	ret = rds_ib_post_reg_frmr(ibmr);
253 	if (ret)
254 		goto out_unmap;
255 
256 	if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
257 		rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
258 	else
259 		rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
260 
261 	return ret;
262 
263 out_unmap:
264 	ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len,
265 			DMA_BIDIRECTIONAL);
266 	ibmr->sg_dma_len = 0;
267 	return ret;
268 }
269 
rds_ib_post_inv(struct rds_ib_mr * ibmr)270 static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
271 {
272 	struct ib_send_wr *s_wr;
273 	struct rds_ib_frmr *frmr = &ibmr->u.frmr;
274 	struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id;
275 	int ret = -EINVAL;
276 
277 	if (!i_cm_id || !i_cm_id->qp || !frmr->mr)
278 		goto out;
279 
280 	if (frmr->fr_state != FRMR_IS_INUSE)
281 		goto out;
282 
283 	while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
284 		atomic_inc(&ibmr->ic->i_fastreg_wrs);
285 		cpu_relax();
286 	}
287 
288 	frmr->fr_inv = true;
289 	s_wr = &frmr->fr_wr;
290 
291 	memset(s_wr, 0, sizeof(*s_wr));
292 	s_wr->wr_id = (unsigned long)(void *)ibmr;
293 	s_wr->opcode = IB_WR_LOCAL_INV;
294 	s_wr->ex.invalidate_rkey = frmr->mr->rkey;
295 	s_wr->send_flags = IB_SEND_SIGNALED;
296 
297 	ret = ib_post_send(i_cm_id->qp, s_wr, NULL);
298 	if (unlikely(ret)) {
299 		rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
300 		frmr->fr_inv = false;
301 		/* enforce order of frmr->fr_inv update
302 		 * before incrementing i_fastreg_wrs
303 		 */
304 		smp_mb__before_atomic();
305 		atomic_inc(&ibmr->ic->i_fastreg_wrs);
306 		pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
307 		goto out;
308 	}
309 
310 	/* Wait for the FRMR_IS_FREE (or FRMR_IS_STALE) transition in order to
311 	 * 1) avoid a silly bouncing between "clean_list" and "drop_list"
312 	 *    triggered by function "rds_ib_reg_frmr" as it is releases frmr
313 	 *    regions whose state is not "FRMR_IS_FREE" right away.
314 	 * 2) prevents an invalid access error in a race
315 	 *    from a pending "IB_WR_LOCAL_INV" operation
316 	 *    with a teardown ("dma_unmap_sg", "put_page")
317 	 *    and de-registration ("ib_dereg_mr") of the corresponding
318 	 *    memory region.
319 	 */
320 	wait_event(frmr->fr_inv_done, frmr->fr_state != FRMR_IS_INUSE);
321 
322 out:
323 	return ret;
324 }
325 
rds_ib_mr_cqe_handler(struct rds_ib_connection * ic,struct ib_wc * wc)326 void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
327 {
328 	struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id;
329 	struct rds_ib_frmr *frmr = &ibmr->u.frmr;
330 
331 	if (wc->status != IB_WC_SUCCESS) {
332 		rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
333 		if (rds_conn_up(ic->conn))
334 			rds_ib_conn_error(ic->conn,
335 					  "frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
336 					  &ic->conn->c_laddr,
337 					  &ic->conn->c_faddr,
338 					  wc->status,
339 					  ib_wc_status_msg(wc->status),
340 					  wc->vendor_err);
341 	}
342 
343 	if (frmr->fr_inv) {
344 		rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_FREE);
345 		frmr->fr_inv = false;
346 		wake_up(&frmr->fr_inv_done);
347 	}
348 
349 	if (frmr->fr_reg) {
350 		frmr->fr_reg = false;
351 		wake_up(&frmr->fr_reg_done);
352 	}
353 
354 	/* enforce order of frmr->{fr_reg,fr_inv} update
355 	 * before incrementing i_fastreg_wrs
356 	 */
357 	smp_mb__before_atomic();
358 	atomic_inc(&ic->i_fastreg_wrs);
359 }
360 
rds_ib_unreg_frmr(struct list_head * list,unsigned int * nfreed,unsigned long * unpinned,unsigned int goal)361 void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
362 		       unsigned long *unpinned, unsigned int goal)
363 {
364 	struct rds_ib_mr *ibmr, *next;
365 	struct rds_ib_frmr *frmr;
366 	int ret = 0, ret2;
367 	unsigned int freed = *nfreed;
368 
369 	/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
370 	list_for_each_entry(ibmr, list, unmap_list) {
371 		if (ibmr->sg_dma_len) {
372 			ret2 = rds_ib_post_inv(ibmr);
373 			if (ret2 && !ret)
374 				ret = ret2;
375 		}
376 	}
377 
378 	if (ret)
379 		pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);
380 
381 	/* Now we can destroy the DMA mapping and unpin any pages */
382 	list_for_each_entry_safe(ibmr, next, list, unmap_list) {
383 		*unpinned += ibmr->sg_len;
384 		frmr = &ibmr->u.frmr;
385 		__rds_ib_teardown_mr(ibmr);
386 		if (freed < goal || frmr->fr_state == FRMR_IS_STALE) {
387 			/* Don't de-allocate if the MR is not free yet */
388 			if (frmr->fr_state == FRMR_IS_INUSE)
389 				continue;
390 
391 			if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
392 				rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
393 			else
394 				rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
395 			list_del(&ibmr->unmap_list);
396 			if (frmr->mr)
397 				ib_dereg_mr(frmr->mr);
398 			kfree(ibmr);
399 			freed++;
400 		}
401 	}
402 	*nfreed = freed;
403 }
404 
rds_ib_reg_frmr(struct rds_ib_device * rds_ibdev,struct rds_ib_connection * ic,struct scatterlist * sg,unsigned long nents,u32 * key)405 struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
406 				  struct rds_ib_connection *ic,
407 				  struct scatterlist *sg,
408 				  unsigned long nents, u32 *key)
409 {
410 	struct rds_ib_mr *ibmr = NULL;
411 	struct rds_ib_frmr *frmr;
412 	int ret;
413 
414 	if (!ic) {
415 		/* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
416 		return ERR_PTR(-EOPNOTSUPP);
417 	}
418 
419 	do {
420 		if (ibmr)
421 			rds_ib_free_frmr(ibmr, true);
422 		ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
423 		if (IS_ERR(ibmr))
424 			return ibmr;
425 		frmr = &ibmr->u.frmr;
426 	} while (frmr->fr_state != FRMR_IS_FREE);
427 
428 	ibmr->ic = ic;
429 	ibmr->device = rds_ibdev;
430 	ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
431 	if (ret == 0) {
432 		*key = frmr->mr->rkey;
433 	} else {
434 		rds_ib_free_frmr(ibmr, false);
435 		ibmr = ERR_PTR(ret);
436 	}
437 
438 	return ibmr;
439 }
440 
rds_ib_free_frmr_list(struct rds_ib_mr * ibmr)441 void rds_ib_free_frmr_list(struct rds_ib_mr *ibmr)
442 {
443 	struct rds_ib_mr_pool *pool = ibmr->pool;
444 	struct rds_ib_frmr *frmr = &ibmr->u.frmr;
445 
446 	if (frmr->fr_state == FRMR_IS_STALE)
447 		llist_add(&ibmr->llnode, &pool->drop_list);
448 	else
449 		llist_add(&ibmr->llnode, &pool->free_list);
450 }
451