1 /*
2 * Copyright (c) 2006, 2020 Oracle and/or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/export.h>
36 #include <linux/skbuff.h>
37 #include <linux/list.h>
38 #include <linux/errqueue.h>
39
40 #include "rds.h"
41
42 static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
43 [RDS_EXTHDR_NONE] = 0,
44 [RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version),
45 [RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
46 [RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest),
47 [RDS_EXTHDR_RDMA_BYTES] = sizeof(struct rds_ext_header_rdma_bytes),
48 [RDS_EXTHDR_NPATHS] = sizeof(__be16),
49 [RDS_EXTHDR_GEN_NUM] = sizeof(__be32),
50 [RDS_EXTHDR_SPORT_IDX] = 1,
51 };
52
rds_message_addref(struct rds_message * rm)53 void rds_message_addref(struct rds_message *rm)
54 {
55 rdsdebug("addref rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
56 refcount_inc(&rm->m_refcount);
57 }
58 EXPORT_SYMBOL_GPL(rds_message_addref);
59
rds_zcookie_add(struct rds_msg_zcopy_info * info,u32 cookie)60 static inline bool rds_zcookie_add(struct rds_msg_zcopy_info *info, u32 cookie)
61 {
62 struct rds_zcopy_cookies *ck = &info->zcookies;
63 int ncookies = ck->num;
64
65 if (ncookies == RDS_MAX_ZCOOKIES)
66 return false;
67 ck->cookies[ncookies] = cookie;
68 ck->num = ++ncookies;
69 return true;
70 }
71
rds_info_from_znotifier(struct rds_znotifier * znotif)72 static struct rds_msg_zcopy_info *rds_info_from_znotifier(struct rds_znotifier *znotif)
73 {
74 return container_of(znotif, struct rds_msg_zcopy_info, znotif);
75 }
76
rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue * q)77 void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q)
78 {
79 unsigned long flags;
80 LIST_HEAD(copy);
81 struct rds_msg_zcopy_info *info, *tmp;
82
83 spin_lock_irqsave(&q->lock, flags);
84 list_splice(&q->zcookie_head, ©);
85 INIT_LIST_HEAD(&q->zcookie_head);
86 spin_unlock_irqrestore(&q->lock, flags);
87
88 list_for_each_entry_safe(info, tmp, ©, rs_zcookie_next) {
89 list_del(&info->rs_zcookie_next);
90 kfree(info);
91 }
92 }
93
rds_rm_zerocopy_callback(struct rds_sock * rs,struct rds_znotifier * znotif)94 static void rds_rm_zerocopy_callback(struct rds_sock *rs,
95 struct rds_znotifier *znotif)
96 {
97 struct rds_msg_zcopy_info *info;
98 struct rds_msg_zcopy_queue *q;
99 u32 cookie = znotif->z_cookie;
100 struct rds_zcopy_cookies *ck;
101 struct list_head *head;
102 unsigned long flags;
103
104 mm_unaccount_pinned_pages(&znotif->z_mmp);
105 q = &rs->rs_zcookie_queue;
106 spin_lock_irqsave(&q->lock, flags);
107 head = &q->zcookie_head;
108 if (!list_empty(head)) {
109 info = list_first_entry(head, struct rds_msg_zcopy_info,
110 rs_zcookie_next);
111 if (rds_zcookie_add(info, cookie)) {
112 spin_unlock_irqrestore(&q->lock, flags);
113 kfree(rds_info_from_znotifier(znotif));
114 /* caller invokes rds_wake_sk_sleep() */
115 return;
116 }
117 }
118
119 info = rds_info_from_znotifier(znotif);
120 ck = &info->zcookies;
121 memset(ck, 0, sizeof(*ck));
122 WARN_ON(!rds_zcookie_add(info, cookie));
123 list_add_tail(&info->rs_zcookie_next, &q->zcookie_head);
124
125 spin_unlock_irqrestore(&q->lock, flags);
126 /* caller invokes rds_wake_sk_sleep() */
127 }
128
129 /*
130 * This relies on dma_map_sg() not touching sg[].page during merging.
131 */
rds_message_purge(struct rds_message * rm)132 static void rds_message_purge(struct rds_message *rm)
133 {
134 struct rds_znotifier *znotifier;
135 unsigned long i, flags;
136 bool zcopy;
137
138 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
139 return;
140
141 spin_lock_irqsave(&rm->m_rs_lock, flags);
142 znotifier = rm->data.op_mmp_znotifier;
143 rm->data.op_mmp_znotifier = NULL;
144 zcopy = !!znotifier;
145
146 if (rm->m_rs) {
147 struct rds_sock *rs = rm->m_rs;
148
149 if (znotifier) {
150 rds_rm_zerocopy_callback(rs, znotifier);
151 rds_wake_sk_sleep(rs);
152 }
153 sock_put(rds_rs_to_sk(rs));
154 rm->m_rs = NULL;
155 } else if (znotifier) {
156 /*
157 * Zerocopy can fail before the message is queued on the
158 * socket, so there is no rs to carry the notification.
159 */
160 mm_unaccount_pinned_pages(&znotifier->z_mmp);
161 kfree(rds_info_from_znotifier(znotifier));
162 }
163 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
164
165 for (i = 0; i < rm->data.op_nents; i++) {
166 /* XXX will have to put_page for page refs */
167 if (!zcopy)
168 __free_page(sg_page(&rm->data.op_sg[i]));
169 else
170 put_page(sg_page(&rm->data.op_sg[i]));
171 }
172 rm->data.op_nents = 0;
173
174 if (rm->rdma.op_active)
175 rds_rdma_free_op(&rm->rdma);
176 if (rm->rdma.op_rdma_mr)
177 kref_put(&rm->rdma.op_rdma_mr->r_kref, __rds_put_mr_final);
178
179 if (rm->atomic.op_active)
180 rds_atomic_free_op(&rm->atomic);
181 if (rm->atomic.op_rdma_mr)
182 kref_put(&rm->atomic.op_rdma_mr->r_kref, __rds_put_mr_final);
183 }
184
rds_message_put(struct rds_message * rm)185 void rds_message_put(struct rds_message *rm)
186 {
187 rdsdebug("put rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
188 WARN(!refcount_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
189 if (refcount_dec_and_test(&rm->m_refcount)) {
190 BUG_ON(!list_empty(&rm->m_sock_item));
191 BUG_ON(!list_empty(&rm->m_conn_item));
192 rds_message_purge(rm);
193
194 kfree(rm);
195 }
196 }
197 EXPORT_SYMBOL_GPL(rds_message_put);
198
rds_message_populate_header(struct rds_header * hdr,__be16 sport,__be16 dport,u64 seq)199 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
200 __be16 dport, u64 seq)
201 {
202 hdr->h_flags = 0;
203 hdr->h_sport = sport;
204 hdr->h_dport = dport;
205 hdr->h_sequence = cpu_to_be64(seq);
206 /* see rds_find_next_ext_space for reason why we memset the
207 * ext header
208 */
209 memset(hdr->h_exthdr, RDS_EXTHDR_NONE, RDS_HEADER_EXT_SPACE);
210 }
211 EXPORT_SYMBOL_GPL(rds_message_populate_header);
212
213 /*
214 * Find the next place we can add an RDS header extension with
215 * specific length. Extension headers are pushed one after the
216 * other. In the following, the number after the colon is the number
217 * of bytes:
218 *
219 * [ type1:1 dta1:len1 [ type2:1 dta2:len2 ] ... ] RDS_EXTHDR_NONE
220 *
221 * If the extension headers fill the complete extension header space
222 * (16 bytes), the trailing RDS_EXTHDR_NONE is omitted.
223 */
rds_find_next_ext_space(struct rds_header * hdr,unsigned int len,u8 ** ext_start)224 static int rds_find_next_ext_space(struct rds_header *hdr, unsigned int len,
225 u8 **ext_start)
226 {
227 unsigned int ext_len;
228 unsigned int type;
229 int ind = 0;
230
231 while ((ind + 1 + len) <= RDS_HEADER_EXT_SPACE) {
232 if (hdr->h_exthdr[ind] == RDS_EXTHDR_NONE) {
233 *ext_start = hdr->h_exthdr + ind;
234 return 0;
235 }
236
237 type = hdr->h_exthdr[ind];
238
239 ext_len = (type < __RDS_EXTHDR_MAX) ? rds_exthdr_size[type] : 0;
240 WARN_ONCE(!ext_len, "Unknown ext hdr type %d\n", type);
241 if (!ext_len)
242 return -EINVAL;
243
244 /* ind points to a valid ext hdr with known length */
245 ind += 1 + ext_len;
246 }
247
248 /* no room for extension */
249 return -ENOSPC;
250 }
251
252 /* The ext hdr space is prefilled with zero from the kzalloc() */
rds_message_add_extension(struct rds_header * hdr,unsigned int type,const void * data)253 int rds_message_add_extension(struct rds_header *hdr,
254 unsigned int type, const void *data)
255 {
256 unsigned char *dst;
257 unsigned int len;
258
259 len = (type < __RDS_EXTHDR_MAX) ? rds_exthdr_size[type] : 0;
260 if (!len)
261 return 0;
262
263 if (rds_find_next_ext_space(hdr, len, &dst))
264 return 0;
265
266 *dst++ = type;
267 memcpy(dst, data, len);
268
269 return 1;
270 }
271 EXPORT_SYMBOL_GPL(rds_message_add_extension);
272
273 /*
274 * If a message has extension headers, retrieve them here.
275 * Call like this:
276 *
277 * unsigned int pos = 0;
278 *
279 * while (1) {
280 * buflen = sizeof(buffer);
281 * type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
282 * if (type == RDS_EXTHDR_NONE)
283 * break;
284 * ...
285 * }
286 */
rds_message_next_extension(struct rds_header * hdr,unsigned int * pos,void * buf,unsigned int * buflen)287 int rds_message_next_extension(struct rds_header *hdr,
288 unsigned int *pos, void *buf, unsigned int *buflen)
289 {
290 unsigned int offset, ext_type, ext_len;
291 u8 *src = hdr->h_exthdr;
292
293 offset = *pos;
294 if (offset >= RDS_HEADER_EXT_SPACE)
295 goto none;
296
297 /* Get the extension type and length. For now, the
298 * length is implied by the extension type. */
299 ext_type = src[offset++];
300
301 if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
302 goto none;
303 ext_len = rds_exthdr_size[ext_type];
304 if (offset + ext_len > RDS_HEADER_EXT_SPACE)
305 goto none;
306
307 *pos = offset + ext_len;
308 if (ext_len < *buflen)
309 *buflen = ext_len;
310 memcpy(buf, src + offset, *buflen);
311 return ext_type;
312
313 none:
314 *pos = RDS_HEADER_EXT_SPACE;
315 *buflen = 0;
316 return RDS_EXTHDR_NONE;
317 }
318
rds_message_add_rdma_dest_extension(struct rds_header * hdr,u32 r_key,u32 offset)319 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset)
320 {
321 struct rds_ext_header_rdma_dest ext_hdr;
322
323 ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
324 ext_hdr.h_rdma_offset = cpu_to_be32(offset);
325 return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr);
326 }
327 EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
328
329 /*
330 * Each rds_message is allocated with extra space for the scatterlist entries
331 * rds ops will need. This is to minimize memory allocation count. Then, each rds op
332 * can grab SGs when initializing its part of the rds_message.
333 */
rds_message_alloc(unsigned int extra_len,gfp_t gfp)334 struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
335 {
336 struct rds_message *rm;
337
338 if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
339 return NULL;
340
341 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
342 if (!rm)
343 goto out;
344
345 rm->m_used_sgs = 0;
346 rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
347
348 refcount_set(&rm->m_refcount, 1);
349 INIT_LIST_HEAD(&rm->m_sock_item);
350 INIT_LIST_HEAD(&rm->m_conn_item);
351 spin_lock_init(&rm->m_rs_lock);
352 init_waitqueue_head(&rm->m_flush_wait);
353
354 out:
355 return rm;
356 }
357
358 /*
359 * RDS ops use this to grab SG entries from the rm's sg pool.
360 */
rds_message_alloc_sgs(struct rds_message * rm,int nents)361 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
362 {
363 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
364 struct scatterlist *sg_ret;
365
366 if (nents <= 0) {
367 pr_warn("rds: alloc sgs failed! nents <= 0\n");
368 return ERR_PTR(-EINVAL);
369 }
370
371 if (rm->m_used_sgs + nents > rm->m_total_sgs) {
372 pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
373 rm->m_total_sgs, rm->m_used_sgs, nents);
374 return ERR_PTR(-ENOMEM);
375 }
376
377 sg_ret = &sg_first[rm->m_used_sgs];
378 sg_init_table(sg_ret, nents);
379 rm->m_used_sgs += nents;
380
381 return sg_ret;
382 }
383
rds_message_map_pages(unsigned long * page_addrs,unsigned int total_len)384 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
385 {
386 struct rds_message *rm;
387 unsigned int i;
388 int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
389 int extra_bytes = num_sgs * sizeof(struct scatterlist);
390
391 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
392 if (!rm)
393 return ERR_PTR(-ENOMEM);
394
395 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
396 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
397 rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
398 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
399 if (IS_ERR(rm->data.op_sg)) {
400 void *err = ERR_CAST(rm->data.op_sg);
401 rds_message_put(rm);
402 return err;
403 }
404
405 for (i = 0; i < rm->data.op_nents; ++i) {
406 sg_set_page(&rm->data.op_sg[i],
407 virt_to_page((void *)page_addrs[i]),
408 PAGE_SIZE, 0);
409 }
410
411 return rm;
412 }
413
rds_message_zcopy_from_user(struct rds_message * rm,struct iov_iter * from)414 static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *from)
415 {
416 struct scatterlist *sg;
417 int ret = 0;
418 int length = iov_iter_count(from);
419 struct rds_msg_zcopy_info *info;
420
421 rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
422
423 /*
424 * now allocate and copy in the data payload.
425 */
426 sg = rm->data.op_sg;
427
428 info = kzalloc_obj(*info);
429 if (!info)
430 return -ENOMEM;
431 INIT_LIST_HEAD(&info->rs_zcookie_next);
432 rm->data.op_mmp_znotifier = &info->znotif;
433 if (mm_account_pinned_pages(&rm->data.op_mmp_znotifier->z_mmp,
434 length)) {
435 ret = -ENOMEM;
436 goto err;
437 }
438 while (iov_iter_count(from)) {
439 struct page *pages;
440 size_t start;
441 ssize_t copied;
442
443 copied = iov_iter_get_pages2(from, &pages, PAGE_SIZE,
444 1, &start);
445 if (copied < 0) {
446 struct mmpin *mmp;
447 int i;
448
449 for (i = 0; i < rm->data.op_nents; i++)
450 put_page(sg_page(&rm->data.op_sg[i]));
451 mmp = &rm->data.op_mmp_znotifier->z_mmp;
452 mm_unaccount_pinned_pages(mmp);
453 ret = -EFAULT;
454 goto err;
455 }
456 length -= copied;
457 sg_set_page(sg, pages, copied, start);
458 rm->data.op_nents++;
459 sg++;
460 }
461 WARN_ON_ONCE(length != 0);
462 return ret;
463 err:
464 kfree(info);
465 rm->data.op_mmp_znotifier = NULL;
466 return ret;
467 }
468
rds_message_copy_from_user(struct rds_message * rm,struct iov_iter * from,bool zcopy)469 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
470 bool zcopy)
471 {
472 unsigned long to_copy, nbytes;
473 unsigned long sg_off;
474 struct scatterlist *sg;
475 int ret = 0;
476
477 rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
478
479 /* now allocate and copy in the data payload. */
480 sg = rm->data.op_sg;
481 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
482
483 if (zcopy)
484 return rds_message_zcopy_from_user(rm, from);
485
486 while (iov_iter_count(from)) {
487 if (!sg_page(sg)) {
488 ret = rds_page_remainder_alloc(sg, iov_iter_count(from),
489 GFP_HIGHUSER);
490 if (ret)
491 return ret;
492 rm->data.op_nents++;
493 sg_off = 0;
494 }
495
496 to_copy = min_t(unsigned long, iov_iter_count(from),
497 sg->length - sg_off);
498
499 rds_stats_add(s_copy_from_user, to_copy);
500 nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
501 to_copy, from);
502 if (nbytes != to_copy)
503 return -EFAULT;
504
505 sg_off += to_copy;
506
507 if (sg_off == sg->length)
508 sg++;
509 }
510
511 return ret;
512 }
513
rds_message_inc_copy_to_user(struct rds_incoming * inc,struct iov_iter * to)514 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
515 {
516 struct rds_message *rm;
517 struct scatterlist *sg;
518 unsigned long to_copy;
519 unsigned long vec_off;
520 int copied;
521 int ret;
522 u32 len;
523
524 rm = container_of(inc, struct rds_message, m_inc);
525 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
526
527 sg = rm->data.op_sg;
528 vec_off = 0;
529 copied = 0;
530
531 while (iov_iter_count(to) && copied < len) {
532 to_copy = min_t(unsigned long, iov_iter_count(to),
533 sg->length - vec_off);
534 to_copy = min_t(unsigned long, to_copy, len - copied);
535
536 rds_stats_add(s_copy_to_user, to_copy);
537 ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off,
538 to_copy, to);
539 if (ret != to_copy)
540 return -EFAULT;
541
542 vec_off += to_copy;
543 copied += to_copy;
544
545 if (vec_off == sg->length) {
546 vec_off = 0;
547 sg++;
548 }
549 }
550
551 return copied;
552 }
553
554 /*
555 * If the message is still on the send queue, wait until the transport
556 * is done with it. This is particularly important for RDMA operations.
557 */
rds_message_wait(struct rds_message * rm)558 void rds_message_wait(struct rds_message *rm)
559 {
560 wait_event_interruptible(rm->m_flush_wait,
561 !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
562 }
563
rds_message_unmapped(struct rds_message * rm)564 void rds_message_unmapped(struct rds_message *rm)
565 {
566 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
567 wake_up_interruptible(&rm->m_flush_wait);
568 }
569 EXPORT_SYMBOL_GPL(rds_message_unmapped);
570