xref: /linux/net/rds/rdma.c (revision e728258debd553c95d2e70f9cd97c9fde27c7130)
1 /*
2  * Copyright (c) 2007, 2020 Oracle and/or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/pagemap.h>
34 #include <linux/slab.h>
35 #include <linux/rbtree.h>
36 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
37 
38 #include "rds.h"
39 
40 /*
41  * XXX
42  *  - build with sparse
43  *  - should we detect duplicate keys on a socket?  hmm.
44  *  - an rdma is an mlock, apply rlimit?
45  */
46 
47 /*
48  * get the number of pages by looking at the page indices that the start and
49  * end addresses fall in.
50  *
51  * Returns 0 if the vec is invalid.  It is invalid if the number of bytes
52  * causes the address to wrap or overflows an unsigned int.  This comes
53  * from being stored in the 'length' member of 'struct scatterlist'.
54  */
55 static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
56 {
57 	if ((vec->addr + vec->bytes <= vec->addr) ||
58 	    (vec->bytes > (u64)UINT_MAX))
59 		return 0;
60 
61 	return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
62 		(vec->addr >> PAGE_SHIFT);
63 }
64 
65 static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
66 				       struct rds_mr *insert)
67 {
68 	struct rb_node **p = &root->rb_node;
69 	struct rb_node *parent = NULL;
70 	struct rds_mr *mr;
71 
72 	while (*p) {
73 		parent = *p;
74 		mr = rb_entry(parent, struct rds_mr, r_rb_node);
75 
76 		if (key < mr->r_key)
77 			p = &(*p)->rb_left;
78 		else if (key > mr->r_key)
79 			p = &(*p)->rb_right;
80 		else
81 			return mr;
82 	}
83 
84 	if (insert) {
85 		rb_link_node(&insert->r_rb_node, parent, p);
86 		rb_insert_color(&insert->r_rb_node, root);
87 		kref_get(&insert->r_kref);
88 	}
89 	return NULL;
90 }
91 
92 /*
93  * Destroy the transport-specific part of a MR.
94  */
95 static void rds_destroy_mr(struct rds_mr *mr)
96 {
97 	struct rds_sock *rs = mr->r_sock;
98 	void *trans_private = NULL;
99 	unsigned long flags;
100 
101 	rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
102 		 mr->r_key, kref_read(&mr->r_kref));
103 
104 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
105 	if (!RB_EMPTY_NODE(&mr->r_rb_node))
106 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
107 	trans_private = mr->r_trans_private;
108 	mr->r_trans_private = NULL;
109 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
110 
111 	if (trans_private)
112 		mr->r_trans->free_mr(trans_private, mr->r_invalidate);
113 }
114 
115 void __rds_put_mr_final(struct kref *kref)
116 {
117 	struct rds_mr *mr = container_of(kref, struct rds_mr, r_kref);
118 
119 	rds_destroy_mr(mr);
120 	kfree(mr);
121 }
122 
123 /*
124  * By the time this is called we can't have any more ioctls called on
125  * the socket so we don't need to worry about racing with others.
126  */
127 void rds_rdma_drop_keys(struct rds_sock *rs)
128 {
129 	struct rds_mr *mr;
130 	struct rb_node *node;
131 	unsigned long flags;
132 
133 	/* Release any MRs associated with this socket */
134 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
135 	while ((node = rb_first(&rs->rs_rdma_keys))) {
136 		mr = rb_entry(node, struct rds_mr, r_rb_node);
137 		if (mr->r_trans == rs->rs_transport)
138 			mr->r_invalidate = 0;
139 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
140 		RB_CLEAR_NODE(&mr->r_rb_node);
141 		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
142 		kref_put(&mr->r_kref, __rds_put_mr_final);
143 		spin_lock_irqsave(&rs->rs_rdma_lock, flags);
144 	}
145 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
146 
147 	if (rs->rs_transport && rs->rs_transport->flush_mrs)
148 		rs->rs_transport->flush_mrs();
149 }
150 
151 /*
152  * Helper function to pin user pages.
153  */
154 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
155 			struct page **pages, int write)
156 {
157 	unsigned int gup_flags = FOLL_LONGTERM;
158 	int ret;
159 
160 	if (write)
161 		gup_flags |= FOLL_WRITE;
162 
163 	ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
164 	if (ret >= 0 && ret < nr_pages) {
165 		unpin_user_pages(pages, ret);
166 		ret = -EFAULT;
167 	}
168 
169 	return ret;
170 }
171 
172 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
173 			  u64 *cookie_ret, struct rds_mr **mr_ret,
174 			  struct rds_conn_path *cp)
175 {
176 	struct rds_mr *mr = NULL, *found;
177 	struct scatterlist *sg = NULL;
178 	unsigned int nr_pages;
179 	struct page **pages = NULL;
180 	void *trans_private;
181 	unsigned long flags;
182 	rds_rdma_cookie_t cookie;
183 	unsigned int nents = 0;
184 	int need_odp = 0;
185 	long i;
186 	int ret;
187 
188 	if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
189 		ret = -ENOTCONN; /* XXX not a great errno */
190 		goto out;
191 	}
192 
193 	if (!rs->rs_transport->get_mr) {
194 		ret = -EOPNOTSUPP;
195 		goto out;
196 	}
197 
198 	/* If the combination of the addr and size requested for this memory
199 	 * region causes an integer overflow, return error.
200 	 */
201 	if (((args->vec.addr + args->vec.bytes) < args->vec.addr) ||
202 	    PAGE_ALIGN(args->vec.addr + args->vec.bytes) <
203 		    (args->vec.addr + args->vec.bytes)) {
204 		ret = -EINVAL;
205 		goto out;
206 	}
207 
208 	if (!can_do_mlock()) {
209 		ret = -EPERM;
210 		goto out;
211 	}
212 
213 	nr_pages = rds_pages_in_vec(&args->vec);
214 	if (nr_pages == 0) {
215 		ret = -EINVAL;
216 		goto out;
217 	}
218 
219 	/* Restrict the size of mr irrespective of underlying transport
220 	 * To account for unaligned mr regions, subtract one from nr_pages
221 	 */
222 	if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
223 		ret = -EMSGSIZE;
224 		goto out;
225 	}
226 
227 	rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
228 		args->vec.addr, args->vec.bytes, nr_pages);
229 
230 	/* XXX clamp nr_pages to limit the size of this alloc? */
231 	pages = kzalloc_objs(struct page *, nr_pages);
232 	if (!pages) {
233 		ret = -ENOMEM;
234 		goto out;
235 	}
236 
237 	mr = kzalloc_obj(struct rds_mr);
238 	if (!mr) {
239 		ret = -ENOMEM;
240 		goto out;
241 	}
242 
243 	kref_init(&mr->r_kref);
244 	RB_CLEAR_NODE(&mr->r_rb_node);
245 	mr->r_trans = rs->rs_transport;
246 	mr->r_sock = rs;
247 
248 	if (args->flags & RDS_RDMA_USE_ONCE)
249 		mr->r_use_once = 1;
250 	if (args->flags & RDS_RDMA_INVALIDATE)
251 		mr->r_invalidate = 1;
252 	if (args->flags & RDS_RDMA_READWRITE)
253 		mr->r_write = 1;
254 
255 	/*
256 	 * Pin the pages that make up the user buffer and transfer the page
257 	 * pointers to the mr's sg array.  We check to see if we've mapped
258 	 * the whole region after transferring the partial page references
259 	 * to the sg array so that we can have one page ref cleanup path.
260 	 *
261 	 * For now we have no flag that tells us whether the mapping is
262 	 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
263 	 * the zero page.
264 	 */
265 	ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
266 	if (ret == -EOPNOTSUPP) {
267 		need_odp = 1;
268 	} else if (ret <= 0) {
269 		goto out;
270 	} else {
271 		nents = ret;
272 		sg = kmalloc_objs(*sg, nents);
273 		if (!sg) {
274 			ret = -ENOMEM;
275 			goto out;
276 		}
277 		WARN_ON(!nents);
278 		sg_init_table(sg, nents);
279 
280 		/* Stick all pages into the scatterlist */
281 		for (i = 0 ; i < nents; i++)
282 			sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
283 
284 		rdsdebug("RDS: trans_private nents is %u\n", nents);
285 	}
286 	/* Obtain a transport specific MR. If this succeeds, the
287 	 * s/g list is now owned by the MR.
288 	 * Note that dma_map() implies that pending writes are
289 	 * flushed to RAM, so no dma_sync is needed here. */
290 	trans_private = rs->rs_transport->get_mr(
291 		sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL,
292 		args->vec.addr, args->vec.bytes,
293 		need_odp ? ODP_ZEROBASED : ODP_NOT_NEEDED);
294 
295 	if (IS_ERR(trans_private)) {
296 		/* In ODP case, we don't GUP pages, so don't need
297 		 * to release anything.
298 		 */
299 		if (!need_odp) {
300 			unpin_user_pages(pages, nr_pages);
301 			kfree(sg);
302 		}
303 		ret = PTR_ERR(trans_private);
304 		/* Trigger connection so that its ready for the next retry */
305 		if (ret == -ENODEV && cp)
306 			rds_conn_connect_if_down(cp->cp_conn);
307 		goto out;
308 	}
309 
310 	mr->r_trans_private = trans_private;
311 
312 	rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
313 	       mr->r_key, (void *)(unsigned long) args->cookie_addr);
314 
315 	/* The user may pass us an unaligned address, but we can only
316 	 * map page aligned regions. So we keep the offset, and build
317 	 * a 64bit cookie containing <R_Key, offset> and pass that
318 	 * around. */
319 	if (need_odp)
320 		cookie = rds_rdma_make_cookie(mr->r_key, 0);
321 	else
322 		cookie = rds_rdma_make_cookie(mr->r_key,
323 					      args->vec.addr & ~PAGE_MASK);
324 	if (cookie_ret)
325 		*cookie_ret = cookie;
326 
327 	if (args->cookie_addr &&
328 	    put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
329 		ret = -EFAULT;
330 		goto out;
331 	}
332 
333 	/* Inserting the new MR into the rbtree bumps its
334 	 * reference count. */
335 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
336 	found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
337 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
338 
339 	BUG_ON(found && found != mr);
340 
341 	rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
342 	if (mr_ret) {
343 		kref_get(&mr->r_kref);
344 		*mr_ret = mr;
345 	}
346 
347 	ret = 0;
348 out:
349 	kfree(pages);
350 	if (mr)
351 		kref_put(&mr->r_kref, __rds_put_mr_final);
352 	return ret;
353 }
354 
355 int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
356 {
357 	struct rds_get_mr_args args;
358 
359 	if (optlen != sizeof(struct rds_get_mr_args))
360 		return -EINVAL;
361 
362 	if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args)))
363 		return -EFAULT;
364 
365 	return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
366 }
367 
368 int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen)
369 {
370 	struct rds_get_mr_for_dest_args args;
371 	struct rds_get_mr_args new_args;
372 
373 	if (optlen != sizeof(struct rds_get_mr_for_dest_args))
374 		return -EINVAL;
375 
376 	if (copy_from_sockptr(&args, optval,
377 			   sizeof(struct rds_get_mr_for_dest_args)))
378 		return -EFAULT;
379 
380 	/*
381 	 * Initially, just behave like get_mr().
382 	 * TODO: Implement get_mr as wrapper around this
383 	 *	 and deprecate it.
384 	 */
385 	new_args.vec = args.vec;
386 	new_args.cookie_addr = args.cookie_addr;
387 	new_args.flags = args.flags;
388 
389 	return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
390 }
391 
392 /*
393  * Free the MR indicated by the given R_Key
394  */
395 int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
396 {
397 	struct rds_free_mr_args args;
398 	struct rds_mr *mr;
399 	unsigned long flags;
400 
401 	if (optlen != sizeof(struct rds_free_mr_args))
402 		return -EINVAL;
403 
404 	if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args)))
405 		return -EFAULT;
406 
407 	/* Special case - a null cookie means flush all unused MRs */
408 	if (args.cookie == 0) {
409 		if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
410 			return -EINVAL;
411 		rs->rs_transport->flush_mrs();
412 		return 0;
413 	}
414 
415 	/* Look up the MR given its R_key and remove it from the rbtree
416 	 * so nobody else finds it.
417 	 * This should also prevent races with rds_rdma_unuse.
418 	 */
419 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
420 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
421 	if (mr) {
422 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
423 		RB_CLEAR_NODE(&mr->r_rb_node);
424 		if (args.flags & RDS_RDMA_INVALIDATE)
425 			mr->r_invalidate = 1;
426 	}
427 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
428 
429 	if (!mr)
430 		return -EINVAL;
431 
432 	kref_put(&mr->r_kref, __rds_put_mr_final);
433 	return 0;
434 }
435 
436 /*
437  * This is called when we receive an extension header that
438  * tells us this MR was used. It allows us to implement
439  * use_once semantics
440  */
441 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
442 {
443 	struct rds_mr *mr;
444 	unsigned long flags;
445 	int zot_me = 0;
446 
447 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
448 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
449 	if (!mr) {
450 		pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
451 			 r_key);
452 		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
453 		return;
454 	}
455 
456 	/* Get a reference so that the MR won't go away before calling
457 	 * sync_mr() below.
458 	 */
459 	kref_get(&mr->r_kref);
460 
461 	/* If it is going to be freed, remove it from the tree now so
462 	 * that no other thread can find it and free it.
463 	 */
464 	if (mr->r_use_once || force) {
465 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
466 		RB_CLEAR_NODE(&mr->r_rb_node);
467 		zot_me = 1;
468 	}
469 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
470 
471 	/* May have to issue a dma_sync on this memory region.
472 	 * Note we could avoid this if the operation was a RDMA READ,
473 	 * but at this point we can't tell. */
474 	if (mr->r_trans->sync_mr)
475 		mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
476 
477 	/* Release the reference held above. */
478 	kref_put(&mr->r_kref, __rds_put_mr_final);
479 
480 	/* If the MR was marked as invalidate, this will
481 	 * trigger an async flush. */
482 	if (zot_me)
483 		kref_put(&mr->r_kref, __rds_put_mr_final);
484 }
485 
486 void rds_rdma_free_op(struct rm_rdma_op *ro)
487 {
488 	unsigned int i;
489 
490 	if (ro->op_odp_mr) {
491 		kref_put(&ro->op_odp_mr->r_kref, __rds_put_mr_final);
492 	} else {
493 		for (i = 0; i < ro->op_nents; i++) {
494 			struct page *page = sg_page(&ro->op_sg[i]);
495 
496 			/* Mark page dirty if it was possibly modified, which
497 			 * is the case for a RDMA_READ which copies from remote
498 			 * to local memory
499 			 */
500 			unpin_user_pages_dirty_lock(&page, 1, !ro->op_write);
501 		}
502 	}
503 
504 	kfree(ro->op_notifier);
505 	ro->op_notifier = NULL;
506 	ro->op_active = 0;
507 	ro->op_odp_mr = NULL;
508 }
509 
510 void rds_atomic_free_op(struct rm_atomic_op *ao)
511 {
512 	struct page *page = sg_page(ao->op_sg);
513 
514 	/* Mark page dirty if it was possibly modified, which
515 	 * is the case for a RDMA_READ which copies from remote
516 	 * to local memory */
517 	unpin_user_pages_dirty_lock(&page, 1, true);
518 
519 	kfree(ao->op_notifier);
520 	ao->op_notifier = NULL;
521 	ao->op_active = 0;
522 }
523 
524 
525 /*
526  * Count the number of pages needed to describe an incoming iovec array.
527  */
528 static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
529 {
530 	int tot_pages = 0;
531 	unsigned int nr_pages;
532 	unsigned int i;
533 
534 	/* figure out the number of pages in the vector */
535 	for (i = 0; i < nr_iovecs; i++) {
536 		nr_pages = rds_pages_in_vec(&iov[i]);
537 		if (nr_pages == 0)
538 			return -EINVAL;
539 
540 		tot_pages += nr_pages;
541 
542 		/*
543 		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
544 		 * so tot_pages cannot overflow without first going negative.
545 		 */
546 		if (tot_pages < 0)
547 			return -EINVAL;
548 	}
549 
550 	return tot_pages;
551 }
552 
553 int rds_rdma_extra_size(struct rds_rdma_args *args,
554 			struct rds_iov_vector *iov)
555 {
556 	struct rds_iovec *vec;
557 	struct rds_iovec __user *local_vec;
558 	int tot_pages = 0;
559 	unsigned int nr_pages;
560 	unsigned int i;
561 
562 	local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
563 
564 	if (args->nr_local == 0)
565 		return -EINVAL;
566 
567 	if (args->nr_local > UIO_MAXIOV)
568 		return -EMSGSIZE;
569 
570 	iov->iov = kzalloc_objs(struct rds_iovec, args->nr_local);
571 	if (!iov->iov)
572 		return -ENOMEM;
573 
574 	vec = &iov->iov[0];
575 
576 	if (copy_from_user(vec, local_vec, args->nr_local *
577 			   sizeof(struct rds_iovec)))
578 		return -EFAULT;
579 	iov->len = args->nr_local;
580 
581 	/* figure out the number of pages in the vector */
582 	for (i = 0; i < args->nr_local; i++, vec++) {
583 
584 		nr_pages = rds_pages_in_vec(vec);
585 		if (nr_pages == 0)
586 			return -EINVAL;
587 
588 		tot_pages += nr_pages;
589 
590 		/*
591 		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
592 		 * so tot_pages cannot overflow without first going negative.
593 		 */
594 		if (tot_pages < 0)
595 			return -EINVAL;
596 	}
597 
598 	return tot_pages * sizeof(struct scatterlist);
599 }
600 
601 /*
602  * The application asks for a RDMA transfer.
603  * Extract all arguments and set up the rdma_op
604  */
605 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
606 		       struct cmsghdr *cmsg,
607 		       struct rds_iov_vector *vec)
608 {
609 	struct rds_rdma_args *args;
610 	struct rm_rdma_op *op = &rm->rdma;
611 	int nr_pages;
612 	unsigned int nr_bytes;
613 	struct page **pages = NULL;
614 	struct rds_iovec *iovs;
615 	unsigned int i, j;
616 	int ret = 0;
617 	bool odp_supported = true;
618 
619 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
620 	    || rm->rdma.op_active)
621 		return -EINVAL;
622 
623 	args = CMSG_DATA(cmsg);
624 
625 	if (ipv6_addr_any(&rs->rs_bound_addr)) {
626 		ret = -ENOTCONN; /* XXX not a great errno */
627 		goto out_ret;
628 	}
629 
630 	if (args->nr_local > UIO_MAXIOV) {
631 		ret = -EMSGSIZE;
632 		goto out_ret;
633 	}
634 
635 	if (vec->len != args->nr_local) {
636 		ret = -EINVAL;
637 		goto out_ret;
638 	}
639 	/* odp-mr is not supported for multiple requests within one message */
640 	if (args->nr_local != 1)
641 		odp_supported = false;
642 
643 	iovs = vec->iov;
644 
645 	nr_pages = rds_rdma_pages(iovs, args->nr_local);
646 	if (nr_pages < 0) {
647 		ret = -EINVAL;
648 		goto out_ret;
649 	}
650 
651 	pages = kzalloc_objs(struct page *, nr_pages);
652 	if (!pages) {
653 		ret = -ENOMEM;
654 		goto out_ret;
655 	}
656 
657 	op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
658 	op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
659 	op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
660 	op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
661 	op->op_active = 1;
662 	op->op_recverr = rs->rs_recverr;
663 	op->op_odp_mr = NULL;
664 
665 	WARN_ON(!nr_pages);
666 	op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
667 	if (IS_ERR(op->op_sg)) {
668 		ret = PTR_ERR(op->op_sg);
669 		goto out_pages;
670 	}
671 
672 	if (op->op_notify || op->op_recverr) {
673 		/* We allocate an uninitialized notifier here, because
674 		 * we don't want to do that in the completion handler. We
675 		 * would have to use GFP_ATOMIC there, and don't want to deal
676 		 * with failed allocations.
677 		 */
678 		op->op_notifier = kmalloc_obj(struct rds_notifier);
679 		if (!op->op_notifier) {
680 			ret = -ENOMEM;
681 			goto out_pages;
682 		}
683 		op->op_notifier->n_user_token = args->user_token;
684 		op->op_notifier->n_status = RDS_RDMA_SUCCESS;
685 	}
686 
687 	/* The cookie contains the R_Key of the remote memory region, and
688 	 * optionally an offset into it. This is how we implement RDMA into
689 	 * unaligned memory.
690 	 * When setting up the RDMA, we need to add that offset to the
691 	 * destination address (which is really an offset into the MR)
692 	 * FIXME: We may want to move this into ib_rdma.c
693 	 */
694 	op->op_rkey = rds_rdma_cookie_key(args->cookie);
695 	op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
696 
697 	nr_bytes = 0;
698 
699 	rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
700 	       (unsigned long long)args->nr_local,
701 	       (unsigned long long)args->remote_vec.addr,
702 	       op->op_rkey);
703 
704 	for (i = 0; i < args->nr_local; i++) {
705 		struct rds_iovec *iov = &iovs[i];
706 		/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
707 		unsigned int nr = rds_pages_in_vec(iov);
708 
709 		rs->rs_user_addr = iov->addr;
710 		rs->rs_user_bytes = iov->bytes;
711 
712 		/* If it's a WRITE operation, we want to pin the pages for reading.
713 		 * If it's a READ operation, we need to pin the pages for writing.
714 		 */
715 		ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
716 		if ((!odp_supported && ret <= 0) ||
717 		    (odp_supported && ret <= 0 && ret != -EOPNOTSUPP))
718 			goto out_pages;
719 
720 		if (ret == -EOPNOTSUPP) {
721 			struct rds_mr *local_odp_mr;
722 
723 			if (!rs->rs_transport->get_mr) {
724 				ret = -EOPNOTSUPP;
725 				goto out_pages;
726 			}
727 			local_odp_mr = kzalloc_obj(*local_odp_mr);
728 			if (!local_odp_mr) {
729 				ret = -ENOMEM;
730 				goto out_pages;
731 			}
732 			RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
733 			kref_init(&local_odp_mr->r_kref);
734 			local_odp_mr->r_trans = rs->rs_transport;
735 			local_odp_mr->r_sock = rs;
736 			local_odp_mr->r_trans_private =
737 				rs->rs_transport->get_mr(
738 					NULL, 0, rs, &local_odp_mr->r_key, NULL,
739 					iov->addr, iov->bytes, ODP_VIRTUAL);
740 			if (IS_ERR(local_odp_mr->r_trans_private)) {
741 				ret = PTR_ERR(local_odp_mr->r_trans_private);
742 				rdsdebug("get_mr ret %d %p\"", ret,
743 					 local_odp_mr->r_trans_private);
744 				kfree(local_odp_mr);
745 				ret = -EOPNOTSUPP;
746 				goto out_pages;
747 			}
748 			rdsdebug("Need odp; local_odp_mr %p trans_private %p\n",
749 				 local_odp_mr, local_odp_mr->r_trans_private);
750 			op->op_odp_mr = local_odp_mr;
751 			op->op_odp_addr = iov->addr;
752 		}
753 
754 		rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
755 			 nr_bytes, nr, iov->bytes, iov->addr);
756 
757 		nr_bytes += iov->bytes;
758 
759 		for (j = 0; j < nr; j++) {
760 			unsigned int offset = iov->addr & ~PAGE_MASK;
761 			struct scatterlist *sg;
762 
763 			sg = &op->op_sg[op->op_nents + j];
764 			sg_set_page(sg, pages[j],
765 					min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
766 					offset);
767 
768 			sg_dma_len(sg) = sg->length;
769 			rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
770 			       sg->offset, sg->length, iov->addr, iov->bytes);
771 
772 			iov->addr += sg->length;
773 			iov->bytes -= sg->length;
774 		}
775 
776 		op->op_nents += nr;
777 	}
778 
779 	if (nr_bytes > args->remote_vec.bytes) {
780 		rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
781 				nr_bytes,
782 				(unsigned int) args->remote_vec.bytes);
783 		ret = -EINVAL;
784 		goto out_pages;
785 	}
786 	op->op_bytes = nr_bytes;
787 	ret = 0;
788 
789 out_pages:
790 	kfree(pages);
791 out_ret:
792 	if (ret)
793 		rds_rdma_free_op(op);
794 	else
795 		rds_stats_inc(s_send_rdma);
796 
797 	return ret;
798 }
799 
800 /*
801  * The application wants us to pass an RDMA destination (aka MR)
802  * to the remote
803  */
804 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
805 			  struct cmsghdr *cmsg)
806 {
807 	unsigned long flags;
808 	struct rds_mr *mr;
809 	u32 r_key;
810 	int err = 0;
811 
812 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
813 	    rm->m_rdma_cookie != 0)
814 		return -EINVAL;
815 
816 	memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
817 
818 	/* We are reusing a previously mapped MR here. Most likely, the
819 	 * application has written to the buffer, so we need to explicitly
820 	 * flush those writes to RAM. Otherwise the HCA may not see them
821 	 * when doing a DMA from that buffer.
822 	 */
823 	r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
824 
825 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
826 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
827 	if (!mr)
828 		err = -EINVAL;	/* invalid r_key */
829 	else
830 		kref_get(&mr->r_kref);
831 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
832 
833 	if (mr) {
834 		mr->r_trans->sync_mr(mr->r_trans_private,
835 				     DMA_TO_DEVICE);
836 		rm->rdma.op_rdma_mr = mr;
837 	}
838 	return err;
839 }
840 
841 /*
842  * The application passes us an address range it wants to enable RDMA
843  * to/from. We map the area, and save the <R_Key,offset> pair
844  * in rm->m_rdma_cookie. This causes it to be sent along to the peer
845  * in an extension header.
846  */
847 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
848 			  struct cmsghdr *cmsg)
849 {
850 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
851 	    rm->m_rdma_cookie != 0)
852 		return -EINVAL;
853 
854 	return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
855 			      &rm->rdma.op_rdma_mr, rm->m_conn_path);
856 }
857 
858 /*
859  * Fill in rds_message for an atomic request.
860  */
861 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
862 		    struct cmsghdr *cmsg)
863 {
864 	struct page *page = NULL;
865 	struct rds_atomic_args *args;
866 	int ret = 0;
867 
868 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
869 	 || rm->atomic.op_active)
870 		return -EINVAL;
871 
872 	args = CMSG_DATA(cmsg);
873 
874 	/* Nonmasked & masked cmsg ops converted to masked hw ops */
875 	switch (cmsg->cmsg_type) {
876 	case RDS_CMSG_ATOMIC_FADD:
877 		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
878 		rm->atomic.op_m_fadd.add = args->fadd.add;
879 		rm->atomic.op_m_fadd.nocarry_mask = 0;
880 		break;
881 	case RDS_CMSG_MASKED_ATOMIC_FADD:
882 		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
883 		rm->atomic.op_m_fadd.add = args->m_fadd.add;
884 		rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
885 		break;
886 	case RDS_CMSG_ATOMIC_CSWP:
887 		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
888 		rm->atomic.op_m_cswp.compare = args->cswp.compare;
889 		rm->atomic.op_m_cswp.swap = args->cswp.swap;
890 		rm->atomic.op_m_cswp.compare_mask = ~0;
891 		rm->atomic.op_m_cswp.swap_mask = ~0;
892 		break;
893 	case RDS_CMSG_MASKED_ATOMIC_CSWP:
894 		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
895 		rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
896 		rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
897 		rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
898 		rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
899 		break;
900 	default:
901 		BUG(); /* should never happen */
902 	}
903 
904 	rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
905 	rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
906 	rm->atomic.op_active = 1;
907 	rm->atomic.op_recverr = rs->rs_recverr;
908 	rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
909 	if (IS_ERR(rm->atomic.op_sg)) {
910 		ret = PTR_ERR(rm->atomic.op_sg);
911 		goto err;
912 	}
913 
914 	/* verify 8 byte-aligned */
915 	if (args->local_addr & 0x7) {
916 		ret = -EFAULT;
917 		goto err;
918 	}
919 
920 	ret = rds_pin_pages(args->local_addr, 1, &page, 1);
921 	if (ret != 1)
922 		goto err;
923 	ret = 0;
924 
925 	sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
926 
927 	if (rm->atomic.op_notify || rm->atomic.op_recverr) {
928 		/* We allocate an uninitialized notifier here, because
929 		 * we don't want to do that in the completion handler. We
930 		 * would have to use GFP_ATOMIC there, and don't want to deal
931 		 * with failed allocations.
932 		 */
933 		rm->atomic.op_notifier = kmalloc_obj(*rm->atomic.op_notifier);
934 		if (!rm->atomic.op_notifier) {
935 			ret = -ENOMEM;
936 			goto err;
937 		}
938 
939 		rm->atomic.op_notifier->n_user_token = args->user_token;
940 		rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
941 	}
942 
943 	rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
944 	rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
945 
946 	return ret;
947 err:
948 	if (page)
949 		unpin_user_page(page);
950 	rm->atomic.op_active = 0;
951 	kfree(rm->atomic.op_notifier);
952 
953 	return ret;
954 }
955