1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/slab.h> 35 #include <linux/export.h> 36 #include <linux/skbuff.h> 37 #include <linux/list.h> 38 #include <linux/errqueue.h> 39 40 #include "rds.h" 41 42 static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = { 43 [RDS_EXTHDR_NONE] = 0, 44 [RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version), 45 [RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma), 46 [RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest), 47 [RDS_EXTHDR_NPATHS] = sizeof(u16), 48 [RDS_EXTHDR_GEN_NUM] = sizeof(u32), 49 }; 50 51 52 void rds_message_addref(struct rds_message *rm) 53 { 54 rdsdebug("addref rm %p ref %d\n", rm, refcount_read(&rm->m_refcount)); 55 refcount_inc(&rm->m_refcount); 56 } 57 EXPORT_SYMBOL_GPL(rds_message_addref); 58 59 static inline bool skb_zcookie_add(struct sk_buff *skb, u32 cookie) 60 { 61 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 62 int ncookies; 63 u32 *ptr; 64 65 if (serr->ee.ee_origin != SO_EE_ORIGIN_ZCOOKIE) 66 return false; 67 ncookies = serr->ee.ee_data; 68 if (ncookies == SO_EE_ORIGIN_MAX_ZCOOKIES) 69 return false; 70 ptr = skb_put(skb, sizeof(u32)); 71 *ptr = cookie; 72 serr->ee.ee_data = ++ncookies; 73 return true; 74 } 75 76 static void rds_rm_zerocopy_callback(struct rds_sock *rs, 77 struct rds_znotifier *znotif) 78 { 79 struct sock *sk = rds_rs_to_sk(rs); 80 struct sk_buff *skb, *tail; 81 struct sock_exterr_skb *serr; 82 unsigned long flags; 83 struct sk_buff_head *q; 84 u32 cookie = znotif->z_cookie; 85 86 q = &sk->sk_error_queue; 87 spin_lock_irqsave(&q->lock, flags); 88 tail = skb_peek_tail(q); 89 90 if (tail && skb_zcookie_add(tail, cookie)) { 91 spin_unlock_irqrestore(&q->lock, flags); 92 mm_unaccount_pinned_pages(&znotif->z_mmp); 93 consume_skb(rds_skb_from_znotifier(znotif)); 94 sk->sk_error_report(sk); 95 return; 96 } 97 98 skb = rds_skb_from_znotifier(znotif); 99 serr = SKB_EXT_ERR(skb); 100 memset(&serr->ee, 0, sizeof(serr->ee)); 101 serr->ee.ee_errno = 0; 102 serr->ee.ee_origin = SO_EE_ORIGIN_ZCOOKIE; 103 serr->ee.ee_info = 0; 104 WARN_ON(!skb_zcookie_add(skb, cookie)); 105 106 __skb_queue_tail(q, skb); 107 108 spin_unlock_irqrestore(&q->lock, flags); 109 sk->sk_error_report(sk); 110 111 mm_unaccount_pinned_pages(&znotif->z_mmp); 112 } 113 114 /* 115 * This relies on dma_map_sg() not touching sg[].page during merging. 116 */ 117 static void rds_message_purge(struct rds_message *rm) 118 { 119 unsigned long i, flags; 120 bool zcopy = false; 121 122 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags))) 123 return; 124 125 spin_lock_irqsave(&rm->m_rs_lock, flags); 126 if (rm->m_rs) { 127 struct rds_sock *rs = rm->m_rs; 128 129 if (rm->data.op_mmp_znotifier) { 130 zcopy = true; 131 rds_rm_zerocopy_callback(rs, rm->data.op_mmp_znotifier); 132 rm->data.op_mmp_znotifier = NULL; 133 } 134 sock_put(rds_rs_to_sk(rs)); 135 rm->m_rs = NULL; 136 } 137 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 138 139 for (i = 0; i < rm->data.op_nents; i++) { 140 /* XXX will have to put_page for page refs */ 141 if (!zcopy) 142 __free_page(sg_page(&rm->data.op_sg[i])); 143 else 144 put_page(sg_page(&rm->data.op_sg[i])); 145 } 146 rm->data.op_nents = 0; 147 148 if (rm->rdma.op_active) 149 rds_rdma_free_op(&rm->rdma); 150 if (rm->rdma.op_rdma_mr) 151 rds_mr_put(rm->rdma.op_rdma_mr); 152 153 if (rm->atomic.op_active) 154 rds_atomic_free_op(&rm->atomic); 155 if (rm->atomic.op_rdma_mr) 156 rds_mr_put(rm->atomic.op_rdma_mr); 157 } 158 159 void rds_message_put(struct rds_message *rm) 160 { 161 rdsdebug("put rm %p ref %d\n", rm, refcount_read(&rm->m_refcount)); 162 WARN(!refcount_read(&rm->m_refcount), "danger refcount zero on %p\n", rm); 163 if (refcount_dec_and_test(&rm->m_refcount)) { 164 BUG_ON(!list_empty(&rm->m_sock_item)); 165 BUG_ON(!list_empty(&rm->m_conn_item)); 166 rds_message_purge(rm); 167 168 kfree(rm); 169 } 170 } 171 EXPORT_SYMBOL_GPL(rds_message_put); 172 173 void rds_message_populate_header(struct rds_header *hdr, __be16 sport, 174 __be16 dport, u64 seq) 175 { 176 hdr->h_flags = 0; 177 hdr->h_sport = sport; 178 hdr->h_dport = dport; 179 hdr->h_sequence = cpu_to_be64(seq); 180 hdr->h_exthdr[0] = RDS_EXTHDR_NONE; 181 } 182 EXPORT_SYMBOL_GPL(rds_message_populate_header); 183 184 int rds_message_add_extension(struct rds_header *hdr, unsigned int type, 185 const void *data, unsigned int len) 186 { 187 unsigned int ext_len = sizeof(u8) + len; 188 unsigned char *dst; 189 190 /* For now, refuse to add more than one extension header */ 191 if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE) 192 return 0; 193 194 if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type]) 195 return 0; 196 197 if (ext_len >= RDS_HEADER_EXT_SPACE) 198 return 0; 199 dst = hdr->h_exthdr; 200 201 *dst++ = type; 202 memcpy(dst, data, len); 203 204 dst[len] = RDS_EXTHDR_NONE; 205 return 1; 206 } 207 EXPORT_SYMBOL_GPL(rds_message_add_extension); 208 209 /* 210 * If a message has extension headers, retrieve them here. 211 * Call like this: 212 * 213 * unsigned int pos = 0; 214 * 215 * while (1) { 216 * buflen = sizeof(buffer); 217 * type = rds_message_next_extension(hdr, &pos, buffer, &buflen); 218 * if (type == RDS_EXTHDR_NONE) 219 * break; 220 * ... 221 * } 222 */ 223 int rds_message_next_extension(struct rds_header *hdr, 224 unsigned int *pos, void *buf, unsigned int *buflen) 225 { 226 unsigned int offset, ext_type, ext_len; 227 u8 *src = hdr->h_exthdr; 228 229 offset = *pos; 230 if (offset >= RDS_HEADER_EXT_SPACE) 231 goto none; 232 233 /* Get the extension type and length. For now, the 234 * length is implied by the extension type. */ 235 ext_type = src[offset++]; 236 237 if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX) 238 goto none; 239 ext_len = rds_exthdr_size[ext_type]; 240 if (offset + ext_len > RDS_HEADER_EXT_SPACE) 241 goto none; 242 243 *pos = offset + ext_len; 244 if (ext_len < *buflen) 245 *buflen = ext_len; 246 memcpy(buf, src + offset, *buflen); 247 return ext_type; 248 249 none: 250 *pos = RDS_HEADER_EXT_SPACE; 251 *buflen = 0; 252 return RDS_EXTHDR_NONE; 253 } 254 255 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset) 256 { 257 struct rds_ext_header_rdma_dest ext_hdr; 258 259 ext_hdr.h_rdma_rkey = cpu_to_be32(r_key); 260 ext_hdr.h_rdma_offset = cpu_to_be32(offset); 261 return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr)); 262 } 263 EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension); 264 265 /* 266 * Each rds_message is allocated with extra space for the scatterlist entries 267 * rds ops will need. This is to minimize memory allocation count. Then, each rds op 268 * can grab SGs when initializing its part of the rds_message. 269 */ 270 struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp) 271 { 272 struct rds_message *rm; 273 274 if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message)) 275 return NULL; 276 277 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); 278 if (!rm) 279 goto out; 280 281 rm->m_used_sgs = 0; 282 rm->m_total_sgs = extra_len / sizeof(struct scatterlist); 283 284 refcount_set(&rm->m_refcount, 1); 285 INIT_LIST_HEAD(&rm->m_sock_item); 286 INIT_LIST_HEAD(&rm->m_conn_item); 287 spin_lock_init(&rm->m_rs_lock); 288 init_waitqueue_head(&rm->m_flush_wait); 289 290 out: 291 return rm; 292 } 293 294 /* 295 * RDS ops use this to grab SG entries from the rm's sg pool. 296 */ 297 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) 298 { 299 struct scatterlist *sg_first = (struct scatterlist *) &rm[1]; 300 struct scatterlist *sg_ret; 301 302 WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); 303 WARN_ON(!nents); 304 305 if (rm->m_used_sgs + nents > rm->m_total_sgs) 306 return NULL; 307 308 sg_ret = &sg_first[rm->m_used_sgs]; 309 sg_init_table(sg_ret, nents); 310 rm->m_used_sgs += nents; 311 312 return sg_ret; 313 } 314 315 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len) 316 { 317 struct rds_message *rm; 318 unsigned int i; 319 int num_sgs = ceil(total_len, PAGE_SIZE); 320 int extra_bytes = num_sgs * sizeof(struct scatterlist); 321 322 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT); 323 if (!rm) 324 return ERR_PTR(-ENOMEM); 325 326 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); 327 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 328 rm->data.op_nents = ceil(total_len, PAGE_SIZE); 329 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); 330 if (!rm->data.op_sg) { 331 rds_message_put(rm); 332 return ERR_PTR(-ENOMEM); 333 } 334 335 for (i = 0; i < rm->data.op_nents; ++i) { 336 sg_set_page(&rm->data.op_sg[i], 337 virt_to_page(page_addrs[i]), 338 PAGE_SIZE, 0); 339 } 340 341 return rm; 342 } 343 344 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from, 345 bool zcopy) 346 { 347 unsigned long to_copy, nbytes; 348 unsigned long sg_off; 349 struct scatterlist *sg; 350 int ret = 0; 351 int length = iov_iter_count(from); 352 353 rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from)); 354 355 /* 356 * now allocate and copy in the data payload. 357 */ 358 sg = rm->data.op_sg; 359 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ 360 361 if (zcopy) { 362 int total_copied = 0; 363 struct sk_buff *skb; 364 365 skb = alloc_skb(SO_EE_ORIGIN_MAX_ZCOOKIES * sizeof(u32), 366 GFP_KERNEL); 367 if (!skb) 368 return -ENOMEM; 369 rm->data.op_mmp_znotifier = RDS_ZCOPY_SKB(skb); 370 if (mm_account_pinned_pages(&rm->data.op_mmp_znotifier->z_mmp, 371 length)) { 372 ret = -ENOMEM; 373 goto err; 374 } 375 while (iov_iter_count(from)) { 376 struct page *pages; 377 size_t start; 378 ssize_t copied; 379 380 copied = iov_iter_get_pages(from, &pages, PAGE_SIZE, 381 1, &start); 382 if (copied < 0) { 383 struct mmpin *mmp; 384 int i; 385 386 for (i = 0; i < rm->data.op_nents; i++) 387 put_page(sg_page(&rm->data.op_sg[i])); 388 mmp = &rm->data.op_mmp_znotifier->z_mmp; 389 mm_unaccount_pinned_pages(mmp); 390 ret = -EFAULT; 391 goto err; 392 } 393 total_copied += copied; 394 iov_iter_advance(from, copied); 395 length -= copied; 396 sg_set_page(sg, pages, copied, start); 397 rm->data.op_nents++; 398 sg++; 399 } 400 WARN_ON_ONCE(length != 0); 401 return ret; 402 err: 403 consume_skb(skb); 404 rm->data.op_mmp_znotifier = NULL; 405 return ret; 406 } /* zcopy */ 407 408 while (iov_iter_count(from)) { 409 if (!sg_page(sg)) { 410 ret = rds_page_remainder_alloc(sg, iov_iter_count(from), 411 GFP_HIGHUSER); 412 if (ret) 413 return ret; 414 rm->data.op_nents++; 415 sg_off = 0; 416 } 417 418 to_copy = min_t(unsigned long, iov_iter_count(from), 419 sg->length - sg_off); 420 421 rds_stats_add(s_copy_from_user, to_copy); 422 nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off, 423 to_copy, from); 424 if (nbytes != to_copy) 425 return -EFAULT; 426 427 sg_off += to_copy; 428 429 if (sg_off == sg->length) 430 sg++; 431 } 432 433 return ret; 434 } 435 436 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) 437 { 438 struct rds_message *rm; 439 struct scatterlist *sg; 440 unsigned long to_copy; 441 unsigned long vec_off; 442 int copied; 443 int ret; 444 u32 len; 445 446 rm = container_of(inc, struct rds_message, m_inc); 447 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); 448 449 sg = rm->data.op_sg; 450 vec_off = 0; 451 copied = 0; 452 453 while (iov_iter_count(to) && copied < len) { 454 to_copy = min_t(unsigned long, iov_iter_count(to), 455 sg->length - vec_off); 456 to_copy = min_t(unsigned long, to_copy, len - copied); 457 458 rds_stats_add(s_copy_to_user, to_copy); 459 ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off, 460 to_copy, to); 461 if (ret != to_copy) 462 return -EFAULT; 463 464 vec_off += to_copy; 465 copied += to_copy; 466 467 if (vec_off == sg->length) { 468 vec_off = 0; 469 sg++; 470 } 471 } 472 473 return copied; 474 } 475 476 /* 477 * If the message is still on the send queue, wait until the transport 478 * is done with it. This is particularly important for RDMA operations. 479 */ 480 void rds_message_wait(struct rds_message *rm) 481 { 482 wait_event_interruptible(rm->m_flush_wait, 483 !test_bit(RDS_MSG_MAPPED, &rm->m_flags)); 484 } 485 486 void rds_message_unmapped(struct rds_message *rm) 487 { 488 clear_bit(RDS_MSG_MAPPED, &rm->m_flags); 489 wake_up_interruptible(&rm->m_flush_wait); 490 } 491 EXPORT_SYMBOL_GPL(rds_message_unmapped); 492 493