1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/xdr.c 4 * 5 * Generic XDR support. 6 * 7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/string.h> 14 #include <linux/kernel.h> 15 #include <linux/pagemap.h> 16 #include <linux/errno.h> 17 #include <linux/sunrpc/xdr.h> 18 #include <linux/sunrpc/msg_prot.h> 19 #include <linux/bvec.h> 20 #include <trace/events/sunrpc.h> 21 22 static void _copy_to_pages(struct page **, size_t, const char *, size_t); 23 24 25 /* 26 * XDR functions for basic NFS types 27 */ 28 __be32 * 29 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) 30 { 31 unsigned int quadlen = XDR_QUADLEN(obj->len); 32 33 p[quadlen] = 0; /* zero trailing bytes */ 34 *p++ = cpu_to_be32(obj->len); 35 memcpy(p, obj->data, obj->len); 36 return p + XDR_QUADLEN(obj->len); 37 } 38 EXPORT_SYMBOL_GPL(xdr_encode_netobj); 39 40 __be32 * 41 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) 42 { 43 unsigned int len; 44 45 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) 46 return NULL; 47 obj->len = len; 48 obj->data = (u8 *) p; 49 return p + XDR_QUADLEN(len); 50 } 51 EXPORT_SYMBOL_GPL(xdr_decode_netobj); 52 53 /** 54 * xdr_encode_opaque_fixed - Encode fixed length opaque data 55 * @p: pointer to current position in XDR buffer. 56 * @ptr: pointer to data to encode (or NULL) 57 * @nbytes: size of data. 58 * 59 * Copy the array of data of length nbytes at ptr to the XDR buffer 60 * at position p, then align to the next 32-bit boundary by padding 61 * with zero bytes (see RFC1832). 62 * Note: if ptr is NULL, only the padding is performed. 63 * 64 * Returns the updated current XDR buffer position 65 * 66 */ 67 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) 68 { 69 if (likely(nbytes != 0)) { 70 unsigned int quadlen = XDR_QUADLEN(nbytes); 71 unsigned int padding = (quadlen << 2) - nbytes; 72 73 if (ptr != NULL) 74 memcpy(p, ptr, nbytes); 75 if (padding != 0) 76 memset((char *)p + nbytes, 0, padding); 77 p += quadlen; 78 } 79 return p; 80 } 81 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); 82 83 /** 84 * xdr_encode_opaque - Encode variable length opaque data 85 * @p: pointer to current position in XDR buffer. 86 * @ptr: pointer to data to encode (or NULL) 87 * @nbytes: size of data. 88 * 89 * Returns the updated current XDR buffer position 90 */ 91 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) 92 { 93 *p++ = cpu_to_be32(nbytes); 94 return xdr_encode_opaque_fixed(p, ptr, nbytes); 95 } 96 EXPORT_SYMBOL_GPL(xdr_encode_opaque); 97 98 __be32 * 99 xdr_encode_string(__be32 *p, const char *string) 100 { 101 return xdr_encode_array(p, string, strlen(string)); 102 } 103 EXPORT_SYMBOL_GPL(xdr_encode_string); 104 105 __be32 * 106 xdr_decode_string_inplace(__be32 *p, char **sp, 107 unsigned int *lenp, unsigned int maxlen) 108 { 109 u32 len; 110 111 len = be32_to_cpu(*p++); 112 if (len > maxlen) 113 return NULL; 114 *lenp = len; 115 *sp = (char *) p; 116 return p + XDR_QUADLEN(len); 117 } 118 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); 119 120 /** 121 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf 122 * @buf: XDR buffer where string resides 123 * @len: length of string, in bytes 124 * 125 */ 126 void xdr_terminate_string(const struct xdr_buf *buf, const u32 len) 127 { 128 char *kaddr; 129 130 kaddr = kmap_atomic(buf->pages[0]); 131 kaddr[buf->page_base + len] = '\0'; 132 kunmap_atomic(kaddr); 133 } 134 EXPORT_SYMBOL_GPL(xdr_terminate_string); 135 136 size_t xdr_buf_pagecount(const struct xdr_buf *buf) 137 { 138 if (!buf->page_len) 139 return 0; 140 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 141 } 142 143 int 144 xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp) 145 { 146 size_t i, n = xdr_buf_pagecount(buf); 147 148 if (n != 0 && buf->bvec == NULL) { 149 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp); 150 if (!buf->bvec) 151 return -ENOMEM; 152 for (i = 0; i < n; i++) { 153 bvec_set_page(&buf->bvec[i], buf->pages[i], PAGE_SIZE, 154 0); 155 } 156 } 157 return 0; 158 } 159 160 void 161 xdr_free_bvec(struct xdr_buf *buf) 162 { 163 kfree(buf->bvec); 164 buf->bvec = NULL; 165 } 166 167 /** 168 * xdr_buf_to_bvec - Copy components of an xdr_buf into a bio_vec array 169 * @bvec: bio_vec array to populate 170 * @bvec_size: element count of @bio_vec 171 * @xdr: xdr_buf to be copied 172 * 173 * Returns the number of entries consumed in @bvec. 174 */ 175 unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size, 176 const struct xdr_buf *xdr) 177 { 178 const struct kvec *head = xdr->head; 179 const struct kvec *tail = xdr->tail; 180 unsigned int count = 0; 181 182 if (head->iov_len) { 183 bvec_set_virt(bvec++, head->iov_base, head->iov_len); 184 ++count; 185 } 186 187 if (xdr->page_len) { 188 unsigned int offset, len, remaining; 189 struct page **pages = xdr->pages; 190 191 offset = offset_in_page(xdr->page_base); 192 remaining = xdr->page_len; 193 while (remaining > 0) { 194 len = min_t(unsigned int, remaining, 195 PAGE_SIZE - offset); 196 bvec_set_page(bvec++, *pages++, len, offset); 197 remaining -= len; 198 offset = 0; 199 if (unlikely(++count > bvec_size)) 200 goto bvec_overflow; 201 } 202 } 203 204 if (tail->iov_len) { 205 bvec_set_virt(bvec, tail->iov_base, tail->iov_len); 206 if (unlikely(++count > bvec_size)) 207 goto bvec_overflow; 208 } 209 210 return count; 211 212 bvec_overflow: 213 pr_warn_once("%s: bio_vec array overflow\n", __func__); 214 return count - 1; 215 } 216 EXPORT_SYMBOL_GPL(xdr_buf_to_bvec); 217 218 /** 219 * xdr_inline_pages - Prepare receive buffer for a large reply 220 * @xdr: xdr_buf into which reply will be placed 221 * @offset: expected offset where data payload will start, in bytes 222 * @pages: vector of struct page pointers 223 * @base: offset in first page where receive should start, in bytes 224 * @len: expected size of the upper layer data payload, in bytes 225 * 226 */ 227 void 228 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 229 struct page **pages, unsigned int base, unsigned int len) 230 { 231 struct kvec *head = xdr->head; 232 struct kvec *tail = xdr->tail; 233 char *buf = (char *)head->iov_base; 234 unsigned int buflen = head->iov_len; 235 236 head->iov_len = offset; 237 238 xdr->pages = pages; 239 xdr->page_base = base; 240 xdr->page_len = len; 241 242 tail->iov_base = buf + offset; 243 tail->iov_len = buflen - offset; 244 xdr->buflen += len; 245 } 246 EXPORT_SYMBOL_GPL(xdr_inline_pages); 247 248 /* 249 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 250 */ 251 252 /** 253 * _shift_data_left_pages 254 * @pages: vector of pages containing both the source and dest memory area. 255 * @pgto_base: page vector address of destination 256 * @pgfrom_base: page vector address of source 257 * @len: number of bytes to copy 258 * 259 * Note: the addresses pgto_base and pgfrom_base are both calculated in 260 * the same way: 261 * if a memory area starts at byte 'base' in page 'pages[i]', 262 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 263 * Alse note: pgto_base must be < pgfrom_base, but the memory areas 264 * they point to may overlap. 265 */ 266 static void 267 _shift_data_left_pages(struct page **pages, size_t pgto_base, 268 size_t pgfrom_base, size_t len) 269 { 270 struct page **pgfrom, **pgto; 271 char *vfrom, *vto; 272 size_t copy; 273 274 BUG_ON(pgfrom_base <= pgto_base); 275 276 if (!len) 277 return; 278 279 pgto = pages + (pgto_base >> PAGE_SHIFT); 280 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); 281 282 pgto_base &= ~PAGE_MASK; 283 pgfrom_base &= ~PAGE_MASK; 284 285 do { 286 if (pgto_base >= PAGE_SIZE) { 287 pgto_base = 0; 288 pgto++; 289 } 290 if (pgfrom_base >= PAGE_SIZE){ 291 pgfrom_base = 0; 292 pgfrom++; 293 } 294 295 copy = len; 296 if (copy > (PAGE_SIZE - pgto_base)) 297 copy = PAGE_SIZE - pgto_base; 298 if (copy > (PAGE_SIZE - pgfrom_base)) 299 copy = PAGE_SIZE - pgfrom_base; 300 301 vto = kmap_atomic(*pgto); 302 if (*pgto != *pgfrom) { 303 vfrom = kmap_atomic(*pgfrom); 304 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); 305 kunmap_atomic(vfrom); 306 } else 307 memmove(vto + pgto_base, vto + pgfrom_base, copy); 308 flush_dcache_page(*pgto); 309 kunmap_atomic(vto); 310 311 pgto_base += copy; 312 pgfrom_base += copy; 313 314 } while ((len -= copy) != 0); 315 } 316 317 /** 318 * _shift_data_right_pages 319 * @pages: vector of pages containing both the source and dest memory area. 320 * @pgto_base: page vector address of destination 321 * @pgfrom_base: page vector address of source 322 * @len: number of bytes to copy 323 * 324 * Note: the addresses pgto_base and pgfrom_base are both calculated in 325 * the same way: 326 * if a memory area starts at byte 'base' in page 'pages[i]', 327 * then its address is given as (i << PAGE_SHIFT) + base 328 * Also note: pgfrom_base must be < pgto_base, but the memory areas 329 * they point to may overlap. 330 */ 331 static void 332 _shift_data_right_pages(struct page **pages, size_t pgto_base, 333 size_t pgfrom_base, size_t len) 334 { 335 struct page **pgfrom, **pgto; 336 char *vfrom, *vto; 337 size_t copy; 338 339 BUG_ON(pgto_base <= pgfrom_base); 340 341 if (!len) 342 return; 343 344 pgto_base += len; 345 pgfrom_base += len; 346 347 pgto = pages + (pgto_base >> PAGE_SHIFT); 348 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); 349 350 pgto_base &= ~PAGE_MASK; 351 pgfrom_base &= ~PAGE_MASK; 352 353 do { 354 /* Are any pointers crossing a page boundary? */ 355 if (pgto_base == 0) { 356 pgto_base = PAGE_SIZE; 357 pgto--; 358 } 359 if (pgfrom_base == 0) { 360 pgfrom_base = PAGE_SIZE; 361 pgfrom--; 362 } 363 364 copy = len; 365 if (copy > pgto_base) 366 copy = pgto_base; 367 if (copy > pgfrom_base) 368 copy = pgfrom_base; 369 pgto_base -= copy; 370 pgfrom_base -= copy; 371 372 vto = kmap_atomic(*pgto); 373 if (*pgto != *pgfrom) { 374 vfrom = kmap_atomic(*pgfrom); 375 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); 376 kunmap_atomic(vfrom); 377 } else 378 memmove(vto + pgto_base, vto + pgfrom_base, copy); 379 flush_dcache_page(*pgto); 380 kunmap_atomic(vto); 381 382 } while ((len -= copy) != 0); 383 } 384 385 /** 386 * _copy_to_pages 387 * @pages: array of pages 388 * @pgbase: page vector address of destination 389 * @p: pointer to source data 390 * @len: length 391 * 392 * Copies data from an arbitrary memory location into an array of pages 393 * The copy is assumed to be non-overlapping. 394 */ 395 static void 396 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) 397 { 398 struct page **pgto; 399 char *vto; 400 size_t copy; 401 402 if (!len) 403 return; 404 405 pgto = pages + (pgbase >> PAGE_SHIFT); 406 pgbase &= ~PAGE_MASK; 407 408 for (;;) { 409 copy = PAGE_SIZE - pgbase; 410 if (copy > len) 411 copy = len; 412 413 vto = kmap_atomic(*pgto); 414 memcpy(vto + pgbase, p, copy); 415 kunmap_atomic(vto); 416 417 len -= copy; 418 if (len == 0) 419 break; 420 421 pgbase += copy; 422 if (pgbase == PAGE_SIZE) { 423 flush_dcache_page(*pgto); 424 pgbase = 0; 425 pgto++; 426 } 427 p += copy; 428 } 429 flush_dcache_page(*pgto); 430 } 431 432 /** 433 * _copy_from_pages 434 * @p: pointer to destination 435 * @pages: array of pages 436 * @pgbase: offset of source data 437 * @len: length 438 * 439 * Copies data into an arbitrary memory location from an array of pages 440 * The copy is assumed to be non-overlapping. 441 */ 442 void 443 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) 444 { 445 struct page **pgfrom; 446 char *vfrom; 447 size_t copy; 448 449 if (!len) 450 return; 451 452 pgfrom = pages + (pgbase >> PAGE_SHIFT); 453 pgbase &= ~PAGE_MASK; 454 455 do { 456 copy = PAGE_SIZE - pgbase; 457 if (copy > len) 458 copy = len; 459 460 vfrom = kmap_atomic(*pgfrom); 461 memcpy(p, vfrom + pgbase, copy); 462 kunmap_atomic(vfrom); 463 464 pgbase += copy; 465 if (pgbase == PAGE_SIZE) { 466 pgbase = 0; 467 pgfrom++; 468 } 469 p += copy; 470 471 } while ((len -= copy) != 0); 472 } 473 EXPORT_SYMBOL_GPL(_copy_from_pages); 474 475 static void xdr_buf_iov_zero(const struct kvec *iov, unsigned int base, 476 unsigned int len) 477 { 478 if (base >= iov->iov_len) 479 return; 480 if (len > iov->iov_len - base) 481 len = iov->iov_len - base; 482 memset(iov->iov_base + base, 0, len); 483 } 484 485 /** 486 * xdr_buf_pages_zero 487 * @buf: xdr_buf 488 * @pgbase: beginning offset 489 * @len: length 490 */ 491 static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase, 492 unsigned int len) 493 { 494 struct page **pages = buf->pages; 495 struct page **page; 496 char *vpage; 497 unsigned int zero; 498 499 if (!len) 500 return; 501 if (pgbase >= buf->page_len) { 502 xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len); 503 return; 504 } 505 if (pgbase + len > buf->page_len) { 506 xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len); 507 len = buf->page_len - pgbase; 508 } 509 510 pgbase += buf->page_base; 511 512 page = pages + (pgbase >> PAGE_SHIFT); 513 pgbase &= ~PAGE_MASK; 514 515 do { 516 zero = PAGE_SIZE - pgbase; 517 if (zero > len) 518 zero = len; 519 520 vpage = kmap_atomic(*page); 521 memset(vpage + pgbase, 0, zero); 522 kunmap_atomic(vpage); 523 524 flush_dcache_page(*page); 525 pgbase = 0; 526 page++; 527 528 } while ((len -= zero) != 0); 529 } 530 531 static unsigned int xdr_buf_pages_fill_sparse(const struct xdr_buf *buf, 532 unsigned int buflen, gfp_t gfp) 533 { 534 unsigned int i, npages, pagelen; 535 536 if (!(buf->flags & XDRBUF_SPARSE_PAGES)) 537 return buflen; 538 if (buflen <= buf->head->iov_len) 539 return buflen; 540 pagelen = buflen - buf->head->iov_len; 541 if (pagelen > buf->page_len) 542 pagelen = buf->page_len; 543 npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT; 544 for (i = 0; i < npages; i++) { 545 if (!buf->pages[i]) 546 continue; 547 buf->pages[i] = alloc_page(gfp); 548 if (likely(buf->pages[i])) 549 continue; 550 buflen -= pagelen; 551 pagelen = i << PAGE_SHIFT; 552 if (pagelen > buf->page_base) 553 buflen += pagelen - buf->page_base; 554 break; 555 } 556 return buflen; 557 } 558 559 static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len) 560 { 561 struct kvec *head = buf->head; 562 struct kvec *tail = buf->tail; 563 unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; 564 unsigned int free_space, newlen; 565 566 if (sum > buf->len) { 567 free_space = min_t(unsigned int, sum - buf->len, len); 568 newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space, 569 GFP_KERNEL); 570 free_space = newlen - buf->len; 571 buf->len = newlen; 572 len -= free_space; 573 if (!len) 574 return; 575 } 576 577 if (buf->buflen > sum) { 578 /* Expand the tail buffer */ 579 free_space = min_t(unsigned int, buf->buflen - sum, len); 580 tail->iov_len += free_space; 581 buf->len += free_space; 582 } 583 } 584 585 static void xdr_buf_tail_copy_right(const struct xdr_buf *buf, 586 unsigned int base, unsigned int len, 587 unsigned int shift) 588 { 589 const struct kvec *tail = buf->tail; 590 unsigned int to = base + shift; 591 592 if (to >= tail->iov_len) 593 return; 594 if (len + to > tail->iov_len) 595 len = tail->iov_len - to; 596 memmove(tail->iov_base + to, tail->iov_base + base, len); 597 } 598 599 static void xdr_buf_pages_copy_right(const struct xdr_buf *buf, 600 unsigned int base, unsigned int len, 601 unsigned int shift) 602 { 603 const struct kvec *tail = buf->tail; 604 unsigned int to = base + shift; 605 unsigned int pglen = 0; 606 unsigned int talen = 0, tato = 0; 607 608 if (base >= buf->page_len) 609 return; 610 if (len > buf->page_len - base) 611 len = buf->page_len - base; 612 if (to >= buf->page_len) { 613 tato = to - buf->page_len; 614 if (tail->iov_len >= len + tato) 615 talen = len; 616 else if (tail->iov_len > tato) 617 talen = tail->iov_len - tato; 618 } else if (len + to >= buf->page_len) { 619 pglen = buf->page_len - to; 620 talen = len - pglen; 621 if (talen > tail->iov_len) 622 talen = tail->iov_len; 623 } else 624 pglen = len; 625 626 _copy_from_pages(tail->iov_base + tato, buf->pages, 627 buf->page_base + base + pglen, talen); 628 _shift_data_right_pages(buf->pages, buf->page_base + to, 629 buf->page_base + base, pglen); 630 } 631 632 static void xdr_buf_head_copy_right(const struct xdr_buf *buf, 633 unsigned int base, unsigned int len, 634 unsigned int shift) 635 { 636 const struct kvec *head = buf->head; 637 const struct kvec *tail = buf->tail; 638 unsigned int to = base + shift; 639 unsigned int pglen = 0, pgto = 0; 640 unsigned int talen = 0, tato = 0; 641 642 if (base >= head->iov_len) 643 return; 644 if (len > head->iov_len - base) 645 len = head->iov_len - base; 646 if (to >= buf->page_len + head->iov_len) { 647 tato = to - buf->page_len - head->iov_len; 648 talen = len; 649 } else if (to >= head->iov_len) { 650 pgto = to - head->iov_len; 651 pglen = len; 652 if (pgto + pglen > buf->page_len) { 653 talen = pgto + pglen - buf->page_len; 654 pglen -= talen; 655 } 656 } else { 657 pglen = len - to; 658 if (pglen > buf->page_len) { 659 talen = pglen - buf->page_len; 660 pglen = buf->page_len; 661 } 662 } 663 664 len -= talen; 665 base += len; 666 if (talen + tato > tail->iov_len) 667 talen = tail->iov_len > tato ? tail->iov_len - tato : 0; 668 memcpy(tail->iov_base + tato, head->iov_base + base, talen); 669 670 len -= pglen; 671 base -= pglen; 672 _copy_to_pages(buf->pages, buf->page_base + pgto, head->iov_base + base, 673 pglen); 674 675 base -= len; 676 memmove(head->iov_base + to, head->iov_base + base, len); 677 } 678 679 static void xdr_buf_tail_shift_right(const struct xdr_buf *buf, 680 unsigned int base, unsigned int len, 681 unsigned int shift) 682 { 683 const struct kvec *tail = buf->tail; 684 685 if (base >= tail->iov_len || !shift || !len) 686 return; 687 xdr_buf_tail_copy_right(buf, base, len, shift); 688 } 689 690 static void xdr_buf_pages_shift_right(const struct xdr_buf *buf, 691 unsigned int base, unsigned int len, 692 unsigned int shift) 693 { 694 if (!shift || !len) 695 return; 696 if (base >= buf->page_len) { 697 xdr_buf_tail_shift_right(buf, base - buf->page_len, len, shift); 698 return; 699 } 700 if (base + len > buf->page_len) 701 xdr_buf_tail_shift_right(buf, 0, base + len - buf->page_len, 702 shift); 703 xdr_buf_pages_copy_right(buf, base, len, shift); 704 } 705 706 static void xdr_buf_head_shift_right(const struct xdr_buf *buf, 707 unsigned int base, unsigned int len, 708 unsigned int shift) 709 { 710 const struct kvec *head = buf->head; 711 712 if (!shift) 713 return; 714 if (base >= head->iov_len) { 715 xdr_buf_pages_shift_right(buf, head->iov_len - base, len, 716 shift); 717 return; 718 } 719 if (base + len > head->iov_len) 720 xdr_buf_pages_shift_right(buf, 0, base + len - head->iov_len, 721 shift); 722 xdr_buf_head_copy_right(buf, base, len, shift); 723 } 724 725 static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base, 726 unsigned int len, unsigned int shift) 727 { 728 const struct kvec *tail = buf->tail; 729 730 if (base >= tail->iov_len) 731 return; 732 if (len > tail->iov_len - base) 733 len = tail->iov_len - base; 734 /* Shift data into head */ 735 if (shift > buf->page_len + base) { 736 const struct kvec *head = buf->head; 737 unsigned int hdto = 738 head->iov_len + buf->page_len + base - shift; 739 unsigned int hdlen = len; 740 741 if (WARN_ONCE(shift > head->iov_len + buf->page_len + base, 742 "SUNRPC: Misaligned data.\n")) 743 return; 744 if (hdto + hdlen > head->iov_len) 745 hdlen = head->iov_len - hdto; 746 memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen); 747 base += hdlen; 748 len -= hdlen; 749 if (!len) 750 return; 751 } 752 /* Shift data into pages */ 753 if (shift > base) { 754 unsigned int pgto = buf->page_len + base - shift; 755 unsigned int pglen = len; 756 757 if (pgto + pglen > buf->page_len) 758 pglen = buf->page_len - pgto; 759 _copy_to_pages(buf->pages, buf->page_base + pgto, 760 tail->iov_base + base, pglen); 761 base += pglen; 762 len -= pglen; 763 if (!len) 764 return; 765 } 766 memmove(tail->iov_base + base - shift, tail->iov_base + base, len); 767 } 768 769 static void xdr_buf_pages_copy_left(const struct xdr_buf *buf, 770 unsigned int base, unsigned int len, 771 unsigned int shift) 772 { 773 unsigned int pgto; 774 775 if (base >= buf->page_len) 776 return; 777 if (len > buf->page_len - base) 778 len = buf->page_len - base; 779 /* Shift data into head */ 780 if (shift > base) { 781 const struct kvec *head = buf->head; 782 unsigned int hdto = head->iov_len + base - shift; 783 unsigned int hdlen = len; 784 785 if (WARN_ONCE(shift > head->iov_len + base, 786 "SUNRPC: Misaligned data.\n")) 787 return; 788 if (hdto + hdlen > head->iov_len) 789 hdlen = head->iov_len - hdto; 790 _copy_from_pages(head->iov_base + hdto, buf->pages, 791 buf->page_base + base, hdlen); 792 base += hdlen; 793 len -= hdlen; 794 if (!len) 795 return; 796 } 797 pgto = base - shift; 798 _shift_data_left_pages(buf->pages, buf->page_base + pgto, 799 buf->page_base + base, len); 800 } 801 802 static void xdr_buf_tail_shift_left(const struct xdr_buf *buf, 803 unsigned int base, unsigned int len, 804 unsigned int shift) 805 { 806 if (!shift || !len) 807 return; 808 xdr_buf_tail_copy_left(buf, base, len, shift); 809 } 810 811 static void xdr_buf_pages_shift_left(const struct xdr_buf *buf, 812 unsigned int base, unsigned int len, 813 unsigned int shift) 814 { 815 if (!shift || !len) 816 return; 817 if (base >= buf->page_len) { 818 xdr_buf_tail_shift_left(buf, base - buf->page_len, len, shift); 819 return; 820 } 821 xdr_buf_pages_copy_left(buf, base, len, shift); 822 len += base; 823 if (len <= buf->page_len) 824 return; 825 xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift); 826 } 827 828 static void xdr_buf_head_shift_left(const struct xdr_buf *buf, 829 unsigned int base, unsigned int len, 830 unsigned int shift) 831 { 832 const struct kvec *head = buf->head; 833 unsigned int bytes; 834 835 if (!shift || !len) 836 return; 837 838 if (shift > base) { 839 bytes = (shift - base); 840 if (bytes >= len) 841 return; 842 base += bytes; 843 len -= bytes; 844 } 845 846 if (base < head->iov_len) { 847 bytes = min_t(unsigned int, len, head->iov_len - base); 848 memmove(head->iov_base + (base - shift), 849 head->iov_base + base, bytes); 850 base += bytes; 851 len -= bytes; 852 } 853 xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift); 854 } 855 856 /** 857 * xdr_shrink_bufhead 858 * @buf: xdr_buf 859 * @len: new length of buf->head[0] 860 * 861 * Shrinks XDR buffer's header kvec buf->head[0], setting it to 862 * 'len' bytes. The extra data is not lost, but is instead 863 * moved into the inlined pages and/or the tail. 864 */ 865 static unsigned int xdr_shrink_bufhead(struct xdr_buf *buf, unsigned int len) 866 { 867 struct kvec *head = buf->head; 868 unsigned int shift, buflen = max(buf->len, len); 869 870 WARN_ON_ONCE(len > head->iov_len); 871 if (head->iov_len > buflen) { 872 buf->buflen -= head->iov_len - buflen; 873 head->iov_len = buflen; 874 } 875 if (len >= head->iov_len) 876 return 0; 877 shift = head->iov_len - len; 878 xdr_buf_try_expand(buf, shift); 879 xdr_buf_head_shift_right(buf, len, buflen - len, shift); 880 head->iov_len = len; 881 buf->buflen -= shift; 882 buf->len -= shift; 883 return shift; 884 } 885 886 /** 887 * xdr_shrink_pagelen - shrinks buf->pages to @len bytes 888 * @buf: xdr_buf 889 * @len: new page buffer length 890 * 891 * The extra data is not lost, but is instead moved into buf->tail. 892 * Returns the actual number of bytes moved. 893 */ 894 static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len) 895 { 896 unsigned int shift, buflen = buf->len - buf->head->iov_len; 897 898 WARN_ON_ONCE(len > buf->page_len); 899 if (buf->head->iov_len >= buf->len || len > buflen) 900 buflen = len; 901 if (buf->page_len > buflen) { 902 buf->buflen -= buf->page_len - buflen; 903 buf->page_len = buflen; 904 } 905 if (len >= buf->page_len) 906 return 0; 907 shift = buf->page_len - len; 908 xdr_buf_try_expand(buf, shift); 909 xdr_buf_pages_shift_right(buf, len, buflen - len, shift); 910 buf->page_len = len; 911 buf->len -= shift; 912 buf->buflen -= shift; 913 return shift; 914 } 915 916 /** 917 * xdr_stream_pos - Return the current offset from the start of the xdr_stream 918 * @xdr: pointer to struct xdr_stream 919 */ 920 unsigned int xdr_stream_pos(const struct xdr_stream *xdr) 921 { 922 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2; 923 } 924 EXPORT_SYMBOL_GPL(xdr_stream_pos); 925 926 static void xdr_stream_set_pos(struct xdr_stream *xdr, unsigned int pos) 927 { 928 unsigned int blen = xdr->buf->len; 929 930 xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0; 931 } 932 933 static void xdr_stream_page_set_pos(struct xdr_stream *xdr, unsigned int pos) 934 { 935 xdr_stream_set_pos(xdr, pos + xdr->buf->head[0].iov_len); 936 } 937 938 /** 939 * xdr_page_pos - Return the current offset from the start of the xdr pages 940 * @xdr: pointer to struct xdr_stream 941 */ 942 unsigned int xdr_page_pos(const struct xdr_stream *xdr) 943 { 944 unsigned int pos = xdr_stream_pos(xdr); 945 946 WARN_ON(pos < xdr->buf->head[0].iov_len); 947 return pos - xdr->buf->head[0].iov_len; 948 } 949 EXPORT_SYMBOL_GPL(xdr_page_pos); 950 951 /** 952 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 953 * @xdr: pointer to xdr_stream struct 954 * @buf: pointer to XDR buffer in which to encode data 955 * @p: current pointer inside XDR buffer 956 * @rqst: pointer to controlling rpc_rqst, for debugging 957 * 958 * Note: at the moment the RPC client only passes the length of our 959 * scratch buffer in the xdr_buf's header kvec. Previously this 960 * meant we needed to call xdr_adjust_iovec() after encoding the 961 * data. With the new scheme, the xdr_stream manages the details 962 * of the buffer length, and takes care of adjusting the kvec 963 * length for us. 964 */ 965 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, 966 struct rpc_rqst *rqst) 967 { 968 struct kvec *iov = buf->head; 969 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 970 971 xdr_reset_scratch_buffer(xdr); 972 BUG_ON(scratch_len < 0); 973 xdr->buf = buf; 974 xdr->iov = iov; 975 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); 976 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); 977 BUG_ON(iov->iov_len > scratch_len); 978 979 if (p != xdr->p && p != NULL) { 980 size_t len; 981 982 BUG_ON(p < xdr->p || p > xdr->end); 983 len = (char *)p - (char *)xdr->p; 984 xdr->p = p; 985 buf->len += len; 986 iov->iov_len += len; 987 } 988 xdr->rqst = rqst; 989 } 990 EXPORT_SYMBOL_GPL(xdr_init_encode); 991 992 /** 993 * xdr_init_encode_pages - Initialize an xdr_stream for encoding into pages 994 * @xdr: pointer to xdr_stream struct 995 * @buf: pointer to XDR buffer into which to encode data 996 * 997 */ 998 void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf) 999 { 1000 xdr_reset_scratch_buffer(xdr); 1001 1002 xdr->buf = buf; 1003 xdr->page_ptr = buf->pages; 1004 xdr->iov = NULL; 1005 xdr->p = page_address(*xdr->page_ptr); 1006 xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE); 1007 xdr->rqst = NULL; 1008 } 1009 EXPORT_SYMBOL_GPL(xdr_init_encode_pages); 1010 1011 /** 1012 * __xdr_commit_encode - Ensure all data is written to buffer 1013 * @xdr: pointer to xdr_stream 1014 * 1015 * We handle encoding across page boundaries by giving the caller a 1016 * temporary location to write to, then later copying the data into 1017 * place; xdr_commit_encode does that copying. 1018 * 1019 * Normally the caller doesn't need to call this directly, as the 1020 * following xdr_reserve_space will do it. But an explicit call may be 1021 * required at the end of encoding, or any other time when the xdr_buf 1022 * data might be read. 1023 */ 1024 void __xdr_commit_encode(struct xdr_stream *xdr) 1025 { 1026 size_t shift = xdr->scratch.iov_len; 1027 void *page; 1028 1029 page = page_address(*xdr->page_ptr); 1030 memcpy(xdr->scratch.iov_base, page, shift); 1031 memmove(page, page + shift, (void *)xdr->p - page); 1032 xdr_reset_scratch_buffer(xdr); 1033 } 1034 EXPORT_SYMBOL_GPL(__xdr_commit_encode); 1035 1036 /* 1037 * The buffer space to be reserved crosses the boundary between 1038 * xdr->buf->head and xdr->buf->pages, or between two pages 1039 * in xdr->buf->pages. 1040 */ 1041 static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, 1042 size_t nbytes) 1043 { 1044 int space_left; 1045 int frag1bytes, frag2bytes; 1046 void *p; 1047 1048 if (nbytes > PAGE_SIZE) 1049 goto out_overflow; /* Bigger buffers require special handling */ 1050 if (xdr->buf->len + nbytes > xdr->buf->buflen) 1051 goto out_overflow; /* Sorry, we're totally out of space */ 1052 frag1bytes = (xdr->end - xdr->p) << 2; 1053 frag2bytes = nbytes - frag1bytes; 1054 if (xdr->iov) 1055 xdr->iov->iov_len += frag1bytes; 1056 else 1057 xdr->buf->page_len += frag1bytes; 1058 xdr->page_ptr++; 1059 xdr->iov = NULL; 1060 1061 /* 1062 * If the last encode didn't end exactly on a page boundary, the 1063 * next one will straddle boundaries. Encode into the next 1064 * page, then copy it back later in xdr_commit_encode. We use 1065 * the "scratch" iov to track any temporarily unused fragment of 1066 * space at the end of the previous buffer: 1067 */ 1068 xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes); 1069 1070 /* 1071 * xdr->p is where the next encode will start after 1072 * xdr_commit_encode() has shifted this one back: 1073 */ 1074 p = page_address(*xdr->page_ptr); 1075 xdr->p = p + frag2bytes; 1076 space_left = xdr->buf->buflen - xdr->buf->len; 1077 if (space_left - frag1bytes >= PAGE_SIZE) 1078 xdr->end = p + PAGE_SIZE; 1079 else 1080 xdr->end = p + space_left - frag1bytes; 1081 1082 xdr->buf->page_len += frag2bytes; 1083 xdr->buf->len += nbytes; 1084 return p; 1085 out_overflow: 1086 trace_rpc_xdr_overflow(xdr, nbytes); 1087 return NULL; 1088 } 1089 1090 /** 1091 * xdr_reserve_space - Reserve buffer space for sending 1092 * @xdr: pointer to xdr_stream 1093 * @nbytes: number of bytes to reserve 1094 * 1095 * Checks that we have enough buffer space to encode 'nbytes' more 1096 * bytes of data. If so, update the total xdr_buf length, and 1097 * adjust the length of the current kvec. 1098 * 1099 * The returned pointer is valid only until the next call to 1100 * xdr_reserve_space() or xdr_commit_encode() on @xdr. The current 1101 * implementation of this API guarantees that space reserved for a 1102 * four-byte data item remains valid until @xdr is destroyed, but 1103 * that might not always be true in the future. 1104 */ 1105 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 1106 { 1107 __be32 *p = xdr->p; 1108 __be32 *q; 1109 1110 xdr_commit_encode(xdr); 1111 /* align nbytes on the next 32-bit boundary */ 1112 nbytes += 3; 1113 nbytes &= ~3; 1114 q = p + (nbytes >> 2); 1115 if (unlikely(q > xdr->end || q < p)) 1116 return xdr_get_next_encode_buffer(xdr, nbytes); 1117 xdr->p = q; 1118 if (xdr->iov) 1119 xdr->iov->iov_len += nbytes; 1120 else 1121 xdr->buf->page_len += nbytes; 1122 xdr->buf->len += nbytes; 1123 return p; 1124 } 1125 EXPORT_SYMBOL_GPL(xdr_reserve_space); 1126 1127 /** 1128 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending 1129 * @xdr: pointer to xdr_stream 1130 * @nbytes: number of bytes to reserve 1131 * 1132 * The size argument passed to xdr_reserve_space() is determined based 1133 * on the number of bytes remaining in the current page to avoid 1134 * invalidating iov_base pointers when xdr_commit_encode() is called. 1135 * 1136 * Return values: 1137 * %0: success 1138 * %-EMSGSIZE: not enough space is available in @xdr 1139 */ 1140 int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes) 1141 { 1142 size_t thislen; 1143 __be32 *p; 1144 1145 /* 1146 * svcrdma requires every READ payload to start somewhere 1147 * in xdr->pages. 1148 */ 1149 if (xdr->iov == xdr->buf->head) { 1150 xdr->iov = NULL; 1151 xdr->end = xdr->p; 1152 } 1153 1154 /* XXX: Let's find a way to make this more efficient */ 1155 while (nbytes) { 1156 thislen = xdr->buf->page_len % PAGE_SIZE; 1157 thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen); 1158 1159 p = xdr_reserve_space(xdr, thislen); 1160 if (!p) 1161 return -EMSGSIZE; 1162 1163 nbytes -= thislen; 1164 } 1165 1166 return 0; 1167 } 1168 EXPORT_SYMBOL_GPL(xdr_reserve_space_vec); 1169 1170 /** 1171 * xdr_truncate_encode - truncate an encode buffer 1172 * @xdr: pointer to xdr_stream 1173 * @len: new length of buffer 1174 * 1175 * Truncates the xdr stream, so that xdr->buf->len == len, 1176 * and xdr->p points at offset len from the start of the buffer, and 1177 * head, tail, and page lengths are adjusted to correspond. 1178 * 1179 * If this means moving xdr->p to a different buffer, we assume that 1180 * the end pointer should be set to the end of the current page, 1181 * except in the case of the head buffer when we assume the head 1182 * buffer's current length represents the end of the available buffer. 1183 * 1184 * This is *not* safe to use on a buffer that already has inlined page 1185 * cache pages (as in a zero-copy server read reply), except for the 1186 * simple case of truncating from one position in the tail to another. 1187 * 1188 */ 1189 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) 1190 { 1191 struct xdr_buf *buf = xdr->buf; 1192 struct kvec *head = buf->head; 1193 struct kvec *tail = buf->tail; 1194 int fraglen; 1195 int new; 1196 1197 if (len > buf->len) { 1198 WARN_ON_ONCE(1); 1199 return; 1200 } 1201 xdr_commit_encode(xdr); 1202 1203 fraglen = min_t(int, buf->len - len, tail->iov_len); 1204 tail->iov_len -= fraglen; 1205 buf->len -= fraglen; 1206 if (tail->iov_len) { 1207 xdr->p = tail->iov_base + tail->iov_len; 1208 WARN_ON_ONCE(!xdr->end); 1209 WARN_ON_ONCE(!xdr->iov); 1210 return; 1211 } 1212 WARN_ON_ONCE(fraglen); 1213 fraglen = min_t(int, buf->len - len, buf->page_len); 1214 buf->page_len -= fraglen; 1215 buf->len -= fraglen; 1216 1217 new = buf->page_base + buf->page_len; 1218 1219 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); 1220 1221 if (buf->page_len) { 1222 xdr->p = page_address(*xdr->page_ptr); 1223 xdr->end = (void *)xdr->p + PAGE_SIZE; 1224 xdr->p = (void *)xdr->p + (new % PAGE_SIZE); 1225 WARN_ON_ONCE(xdr->iov); 1226 return; 1227 } 1228 if (fraglen) 1229 xdr->end = head->iov_base + head->iov_len; 1230 /* (otherwise assume xdr->end is already set) */ 1231 xdr->page_ptr--; 1232 head->iov_len = len; 1233 buf->len = len; 1234 xdr->p = head->iov_base + head->iov_len; 1235 xdr->iov = buf->head; 1236 } 1237 EXPORT_SYMBOL(xdr_truncate_encode); 1238 1239 /** 1240 * xdr_truncate_decode - Truncate a decoding stream 1241 * @xdr: pointer to struct xdr_stream 1242 * @len: Number of bytes to remove 1243 * 1244 */ 1245 void xdr_truncate_decode(struct xdr_stream *xdr, size_t len) 1246 { 1247 unsigned int nbytes = xdr_align_size(len); 1248 1249 xdr->buf->len -= nbytes; 1250 xdr->nwords -= XDR_QUADLEN(nbytes); 1251 } 1252 EXPORT_SYMBOL_GPL(xdr_truncate_decode); 1253 1254 /** 1255 * xdr_restrict_buflen - decrease available buffer space 1256 * @xdr: pointer to xdr_stream 1257 * @newbuflen: new maximum number of bytes available 1258 * 1259 * Adjust our idea of how much space is available in the buffer. 1260 * If we've already used too much space in the buffer, returns -1. 1261 * If the available space is already smaller than newbuflen, returns 0 1262 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen 1263 * and ensures xdr->end is set at most offset newbuflen from the start 1264 * of the buffer. 1265 */ 1266 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen) 1267 { 1268 struct xdr_buf *buf = xdr->buf; 1269 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p; 1270 int end_offset = buf->len + left_in_this_buf; 1271 1272 if (newbuflen < 0 || newbuflen < buf->len) 1273 return -1; 1274 if (newbuflen > buf->buflen) 1275 return 0; 1276 if (newbuflen < end_offset) 1277 xdr->end = (void *)xdr->end + newbuflen - end_offset; 1278 buf->buflen = newbuflen; 1279 return 0; 1280 } 1281 EXPORT_SYMBOL(xdr_restrict_buflen); 1282 1283 /** 1284 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending 1285 * @xdr: pointer to xdr_stream 1286 * @pages: array of pages to insert 1287 * @base: starting offset of first data byte in @pages 1288 * @len: number of data bytes in @pages to insert 1289 * 1290 * After the @pages are added, the tail iovec is instantiated pointing to 1291 * end of the head buffer, and the stream is set up to encode subsequent 1292 * items into the tail. 1293 */ 1294 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, 1295 unsigned int len) 1296 { 1297 struct xdr_buf *buf = xdr->buf; 1298 struct kvec *tail = buf->tail; 1299 1300 buf->pages = pages; 1301 buf->page_base = base; 1302 buf->page_len = len; 1303 1304 tail->iov_base = xdr->p; 1305 tail->iov_len = 0; 1306 xdr->iov = tail; 1307 1308 if (len & 3) { 1309 unsigned int pad = 4 - (len & 3); 1310 1311 BUG_ON(xdr->p >= xdr->end); 1312 tail->iov_base = (char *)xdr->p + (len & 3); 1313 tail->iov_len += pad; 1314 len += pad; 1315 *xdr->p++ = 0; 1316 } 1317 buf->buflen += len; 1318 buf->len += len; 1319 } 1320 EXPORT_SYMBOL_GPL(xdr_write_pages); 1321 1322 static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, 1323 unsigned int base, unsigned int len) 1324 { 1325 if (len > iov->iov_len) 1326 len = iov->iov_len; 1327 if (unlikely(base > len)) 1328 base = len; 1329 xdr->p = (__be32*)(iov->iov_base + base); 1330 xdr->end = (__be32*)(iov->iov_base + len); 1331 xdr->iov = iov; 1332 xdr->page_ptr = NULL; 1333 return len - base; 1334 } 1335 1336 static unsigned int xdr_set_tail_base(struct xdr_stream *xdr, 1337 unsigned int base, unsigned int len) 1338 { 1339 struct xdr_buf *buf = xdr->buf; 1340 1341 xdr_stream_set_pos(xdr, base + buf->page_len + buf->head->iov_len); 1342 return xdr_set_iov(xdr, buf->tail, base, len); 1343 } 1344 1345 static void xdr_stream_unmap_current_page(struct xdr_stream *xdr) 1346 { 1347 if (xdr->page_kaddr) { 1348 kunmap_local(xdr->page_kaddr); 1349 xdr->page_kaddr = NULL; 1350 } 1351 } 1352 1353 static unsigned int xdr_set_page_base(struct xdr_stream *xdr, 1354 unsigned int base, unsigned int len) 1355 { 1356 unsigned int pgnr; 1357 unsigned int maxlen; 1358 unsigned int pgoff; 1359 unsigned int pgend; 1360 void *kaddr; 1361 1362 maxlen = xdr->buf->page_len; 1363 if (base >= maxlen) 1364 return 0; 1365 else 1366 maxlen -= base; 1367 if (len > maxlen) 1368 len = maxlen; 1369 1370 xdr_stream_unmap_current_page(xdr); 1371 xdr_stream_page_set_pos(xdr, base); 1372 base += xdr->buf->page_base; 1373 1374 pgnr = base >> PAGE_SHIFT; 1375 xdr->page_ptr = &xdr->buf->pages[pgnr]; 1376 1377 if (PageHighMem(*xdr->page_ptr)) { 1378 xdr->page_kaddr = kmap_local_page(*xdr->page_ptr); 1379 kaddr = xdr->page_kaddr; 1380 } else 1381 kaddr = page_address(*xdr->page_ptr); 1382 1383 pgoff = base & ~PAGE_MASK; 1384 xdr->p = (__be32*)(kaddr + pgoff); 1385 1386 pgend = pgoff + len; 1387 if (pgend > PAGE_SIZE) 1388 pgend = PAGE_SIZE; 1389 xdr->end = (__be32*)(kaddr + pgend); 1390 xdr->iov = NULL; 1391 return len; 1392 } 1393 1394 static void xdr_set_page(struct xdr_stream *xdr, unsigned int base, 1395 unsigned int len) 1396 { 1397 if (xdr_set_page_base(xdr, base, len) == 0) { 1398 base -= xdr->buf->page_len; 1399 xdr_set_tail_base(xdr, base, len); 1400 } 1401 } 1402 1403 static void xdr_set_next_page(struct xdr_stream *xdr) 1404 { 1405 unsigned int newbase; 1406 1407 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; 1408 newbase -= xdr->buf->page_base; 1409 if (newbase < xdr->buf->page_len) 1410 xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr)); 1411 else 1412 xdr_set_tail_base(xdr, 0, xdr_stream_remaining(xdr)); 1413 } 1414 1415 static bool xdr_set_next_buffer(struct xdr_stream *xdr) 1416 { 1417 if (xdr->page_ptr != NULL) 1418 xdr_set_next_page(xdr); 1419 else if (xdr->iov == xdr->buf->head) 1420 xdr_set_page(xdr, 0, xdr_stream_remaining(xdr)); 1421 return xdr->p != xdr->end; 1422 } 1423 1424 /** 1425 * xdr_init_decode - Initialize an xdr_stream for decoding data. 1426 * @xdr: pointer to xdr_stream struct 1427 * @buf: pointer to XDR buffer from which to decode data 1428 * @p: current pointer inside XDR buffer 1429 * @rqst: pointer to controlling rpc_rqst, for debugging 1430 */ 1431 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, 1432 struct rpc_rqst *rqst) 1433 { 1434 xdr->buf = buf; 1435 xdr->page_kaddr = NULL; 1436 xdr_reset_scratch_buffer(xdr); 1437 xdr->nwords = XDR_QUADLEN(buf->len); 1438 if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 && 1439 xdr_set_page_base(xdr, 0, buf->len) == 0) 1440 xdr_set_iov(xdr, buf->tail, 0, buf->len); 1441 if (p != NULL && p > xdr->p && xdr->end >= p) { 1442 xdr->nwords -= p - xdr->p; 1443 xdr->p = p; 1444 } 1445 xdr->rqst = rqst; 1446 } 1447 EXPORT_SYMBOL_GPL(xdr_init_decode); 1448 1449 /** 1450 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages 1451 * @xdr: pointer to xdr_stream struct 1452 * @buf: pointer to XDR buffer from which to decode data 1453 * @pages: list of pages to decode into 1454 * @len: length in bytes of buffer in pages 1455 */ 1456 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, 1457 struct page **pages, unsigned int len) 1458 { 1459 memset(buf, 0, sizeof(*buf)); 1460 buf->pages = pages; 1461 buf->page_len = len; 1462 buf->buflen = len; 1463 buf->len = len; 1464 xdr_init_decode(xdr, buf, NULL, NULL); 1465 } 1466 EXPORT_SYMBOL_GPL(xdr_init_decode_pages); 1467 1468 /** 1469 * xdr_finish_decode - Clean up the xdr_stream after decoding data. 1470 * @xdr: pointer to xdr_stream struct 1471 */ 1472 void xdr_finish_decode(struct xdr_stream *xdr) 1473 { 1474 xdr_stream_unmap_current_page(xdr); 1475 } 1476 EXPORT_SYMBOL(xdr_finish_decode); 1477 1478 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 1479 { 1480 unsigned int nwords = XDR_QUADLEN(nbytes); 1481 __be32 *p = xdr->p; 1482 __be32 *q = p + nwords; 1483 1484 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p)) 1485 return NULL; 1486 xdr->p = q; 1487 xdr->nwords -= nwords; 1488 return p; 1489 } 1490 1491 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) 1492 { 1493 __be32 *p; 1494 char *cpdest = xdr->scratch.iov_base; 1495 size_t cplen = (char *)xdr->end - (char *)xdr->p; 1496 1497 if (nbytes > xdr->scratch.iov_len) 1498 goto out_overflow; 1499 p = __xdr_inline_decode(xdr, cplen); 1500 if (p == NULL) 1501 return NULL; 1502 memcpy(cpdest, p, cplen); 1503 if (!xdr_set_next_buffer(xdr)) 1504 goto out_overflow; 1505 cpdest += cplen; 1506 nbytes -= cplen; 1507 p = __xdr_inline_decode(xdr, nbytes); 1508 if (p == NULL) 1509 return NULL; 1510 memcpy(cpdest, p, nbytes); 1511 return xdr->scratch.iov_base; 1512 out_overflow: 1513 trace_rpc_xdr_overflow(xdr, nbytes); 1514 return NULL; 1515 } 1516 1517 /** 1518 * xdr_inline_decode - Retrieve XDR data to decode 1519 * @xdr: pointer to xdr_stream struct 1520 * @nbytes: number of bytes of data to decode 1521 * 1522 * Check if the input buffer is long enough to enable us to decode 1523 * 'nbytes' more bytes of data starting at the current position. 1524 * If so return the current pointer, then update the current 1525 * pointer position. 1526 */ 1527 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 1528 { 1529 __be32 *p; 1530 1531 if (unlikely(nbytes == 0)) 1532 return xdr->p; 1533 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) 1534 goto out_overflow; 1535 p = __xdr_inline_decode(xdr, nbytes); 1536 if (p != NULL) 1537 return p; 1538 return xdr_copy_to_scratch(xdr, nbytes); 1539 out_overflow: 1540 trace_rpc_xdr_overflow(xdr, nbytes); 1541 return NULL; 1542 } 1543 EXPORT_SYMBOL_GPL(xdr_inline_decode); 1544 1545 static void xdr_realign_pages(struct xdr_stream *xdr) 1546 { 1547 struct xdr_buf *buf = xdr->buf; 1548 struct kvec *iov = buf->head; 1549 unsigned int cur = xdr_stream_pos(xdr); 1550 unsigned int copied; 1551 1552 /* Realign pages to current pointer position */ 1553 if (iov->iov_len > cur) { 1554 copied = xdr_shrink_bufhead(buf, cur); 1555 trace_rpc_xdr_alignment(xdr, cur, copied); 1556 xdr_set_page(xdr, 0, buf->page_len); 1557 } 1558 } 1559 1560 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) 1561 { 1562 struct xdr_buf *buf = xdr->buf; 1563 unsigned int nwords = XDR_QUADLEN(len); 1564 unsigned int copied; 1565 1566 if (xdr->nwords == 0) 1567 return 0; 1568 1569 xdr_realign_pages(xdr); 1570 if (nwords > xdr->nwords) { 1571 nwords = xdr->nwords; 1572 len = nwords << 2; 1573 } 1574 if (buf->page_len <= len) 1575 len = buf->page_len; 1576 else if (nwords < xdr->nwords) { 1577 /* Truncate page data and move it into the tail */ 1578 copied = xdr_shrink_pagelen(buf, len); 1579 trace_rpc_xdr_alignment(xdr, len, copied); 1580 } 1581 return len; 1582 } 1583 1584 /** 1585 * xdr_read_pages - align page-based XDR data to current pointer position 1586 * @xdr: pointer to xdr_stream struct 1587 * @len: number of bytes of page data 1588 * 1589 * Moves data beyond the current pointer position from the XDR head[] buffer 1590 * into the page list. Any data that lies beyond current position + @len 1591 * bytes is moved into the XDR tail[]. The xdr_stream current position is 1592 * then advanced past that data to align to the next XDR object in the tail. 1593 * 1594 * Returns the number of XDR encoded bytes now contained in the pages 1595 */ 1596 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 1597 { 1598 unsigned int nwords = XDR_QUADLEN(len); 1599 unsigned int base, end, pglen; 1600 1601 pglen = xdr_align_pages(xdr, nwords << 2); 1602 if (pglen == 0) 1603 return 0; 1604 1605 base = (nwords << 2) - pglen; 1606 end = xdr_stream_remaining(xdr) - pglen; 1607 1608 xdr_set_tail_base(xdr, base, end); 1609 return len <= pglen ? len : pglen; 1610 } 1611 EXPORT_SYMBOL_GPL(xdr_read_pages); 1612 1613 /** 1614 * xdr_set_pagelen - Sets the length of the XDR pages 1615 * @xdr: pointer to xdr_stream struct 1616 * @len: new length of the XDR page data 1617 * 1618 * Either grows or shrinks the length of the xdr pages by setting pagelen to 1619 * @len bytes. When shrinking, any extra data is moved into buf->tail, whereas 1620 * when growing any data beyond the current pointer is moved into the tail. 1621 * 1622 * Returns True if the operation was successful, and False otherwise. 1623 */ 1624 void xdr_set_pagelen(struct xdr_stream *xdr, unsigned int len) 1625 { 1626 struct xdr_buf *buf = xdr->buf; 1627 size_t remaining = xdr_stream_remaining(xdr); 1628 size_t base = 0; 1629 1630 if (len < buf->page_len) { 1631 base = buf->page_len - len; 1632 xdr_shrink_pagelen(buf, len); 1633 } else { 1634 xdr_buf_head_shift_right(buf, xdr_stream_pos(xdr), 1635 buf->page_len, remaining); 1636 if (len > buf->page_len) 1637 xdr_buf_try_expand(buf, len - buf->page_len); 1638 } 1639 xdr_set_tail_base(xdr, base, remaining); 1640 } 1641 EXPORT_SYMBOL_GPL(xdr_set_pagelen); 1642 1643 /** 1644 * xdr_enter_page - decode data from the XDR page 1645 * @xdr: pointer to xdr_stream struct 1646 * @len: number of bytes of page data 1647 * 1648 * Moves data beyond the current pointer position from the XDR head[] buffer 1649 * into the page list. Any data that lies beyond current position + "len" 1650 * bytes is moved into the XDR tail[]. The current pointer is then 1651 * repositioned at the beginning of the first XDR page. 1652 */ 1653 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 1654 { 1655 len = xdr_align_pages(xdr, len); 1656 /* 1657 * Position current pointer at beginning of tail, and 1658 * set remaining message length. 1659 */ 1660 if (len != 0) 1661 xdr_set_page_base(xdr, 0, len); 1662 } 1663 EXPORT_SYMBOL_GPL(xdr_enter_page); 1664 1665 static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 1666 1667 void xdr_buf_from_iov(const struct kvec *iov, struct xdr_buf *buf) 1668 { 1669 buf->head[0] = *iov; 1670 buf->tail[0] = empty_iov; 1671 buf->page_len = 0; 1672 buf->buflen = buf->len = iov->iov_len; 1673 } 1674 EXPORT_SYMBOL_GPL(xdr_buf_from_iov); 1675 1676 /** 1677 * xdr_buf_subsegment - set subbuf to a portion of buf 1678 * @buf: an xdr buffer 1679 * @subbuf: the result buffer 1680 * @base: beginning of range in bytes 1681 * @len: length of range in bytes 1682 * 1683 * sets @subbuf to an xdr buffer representing the portion of @buf of 1684 * length @len starting at offset @base. 1685 * 1686 * @buf and @subbuf may be pointers to the same struct xdr_buf. 1687 * 1688 * Returns -1 if base or length are out of bounds. 1689 */ 1690 int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf, 1691 unsigned int base, unsigned int len) 1692 { 1693 subbuf->buflen = subbuf->len = len; 1694 if (base < buf->head[0].iov_len) { 1695 subbuf->head[0].iov_base = buf->head[0].iov_base + base; 1696 subbuf->head[0].iov_len = min_t(unsigned int, len, 1697 buf->head[0].iov_len - base); 1698 len -= subbuf->head[0].iov_len; 1699 base = 0; 1700 } else { 1701 base -= buf->head[0].iov_len; 1702 subbuf->head[0].iov_base = buf->head[0].iov_base; 1703 subbuf->head[0].iov_len = 0; 1704 } 1705 1706 if (base < buf->page_len) { 1707 subbuf->page_len = min(buf->page_len - base, len); 1708 base += buf->page_base; 1709 subbuf->page_base = base & ~PAGE_MASK; 1710 subbuf->pages = &buf->pages[base >> PAGE_SHIFT]; 1711 len -= subbuf->page_len; 1712 base = 0; 1713 } else { 1714 base -= buf->page_len; 1715 subbuf->pages = buf->pages; 1716 subbuf->page_base = 0; 1717 subbuf->page_len = 0; 1718 } 1719 1720 if (base < buf->tail[0].iov_len) { 1721 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; 1722 subbuf->tail[0].iov_len = min_t(unsigned int, len, 1723 buf->tail[0].iov_len - base); 1724 len -= subbuf->tail[0].iov_len; 1725 base = 0; 1726 } else { 1727 base -= buf->tail[0].iov_len; 1728 subbuf->tail[0].iov_base = buf->tail[0].iov_base; 1729 subbuf->tail[0].iov_len = 0; 1730 } 1731 1732 if (base || len) 1733 return -1; 1734 return 0; 1735 } 1736 EXPORT_SYMBOL_GPL(xdr_buf_subsegment); 1737 1738 /** 1739 * xdr_stream_subsegment - set @subbuf to a portion of @xdr 1740 * @xdr: an xdr_stream set up for decoding 1741 * @subbuf: the result buffer 1742 * @nbytes: length of @xdr to extract, in bytes 1743 * 1744 * Sets up @subbuf to represent a portion of @xdr. The portion 1745 * starts at the current offset in @xdr, and extends for a length 1746 * of @nbytes. If this is successful, @xdr is advanced to the next 1747 * XDR data item following that portion. 1748 * 1749 * Return values: 1750 * %true: @subbuf has been initialized, and @xdr has been advanced. 1751 * %false: a bounds error has occurred 1752 */ 1753 bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, 1754 unsigned int nbytes) 1755 { 1756 unsigned int start = xdr_stream_pos(xdr); 1757 unsigned int remaining, len; 1758 1759 /* Extract @subbuf and bounds-check the fn arguments */ 1760 if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes)) 1761 return false; 1762 1763 /* Advance @xdr by @nbytes */ 1764 for (remaining = nbytes; remaining;) { 1765 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) 1766 return false; 1767 1768 len = (char *)xdr->end - (char *)xdr->p; 1769 if (remaining <= len) { 1770 xdr->p = (__be32 *)((char *)xdr->p + 1771 (remaining + xdr_pad_size(nbytes))); 1772 break; 1773 } 1774 1775 xdr->p = (__be32 *)((char *)xdr->p + len); 1776 xdr->end = xdr->p; 1777 remaining -= len; 1778 } 1779 1780 xdr_stream_set_pos(xdr, start + nbytes); 1781 return true; 1782 } 1783 EXPORT_SYMBOL_GPL(xdr_stream_subsegment); 1784 1785 /** 1786 * xdr_stream_move_subsegment - Move part of a stream to another position 1787 * @xdr: the source xdr_stream 1788 * @offset: the source offset of the segment 1789 * @target: the target offset of the segment 1790 * @length: the number of bytes to move 1791 * 1792 * Moves @length bytes from @offset to @target in the xdr_stream, overwriting 1793 * anything in its space. Returns the number of bytes in the segment. 1794 */ 1795 unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset, 1796 unsigned int target, unsigned int length) 1797 { 1798 struct xdr_buf buf; 1799 unsigned int shift; 1800 1801 if (offset < target) { 1802 shift = target - offset; 1803 if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0) 1804 return 0; 1805 xdr_buf_head_shift_right(&buf, 0, length, shift); 1806 } else if (offset > target) { 1807 shift = offset - target; 1808 if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0) 1809 return 0; 1810 xdr_buf_head_shift_left(&buf, shift, length, shift); 1811 } 1812 return length; 1813 } 1814 EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment); 1815 1816 /** 1817 * xdr_stream_zero - zero out a portion of an xdr_stream 1818 * @xdr: an xdr_stream to zero out 1819 * @offset: the starting point in the stream 1820 * @length: the number of bytes to zero 1821 */ 1822 unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset, 1823 unsigned int length) 1824 { 1825 struct xdr_buf buf; 1826 1827 if (xdr_buf_subsegment(xdr->buf, &buf, offset, length) < 0) 1828 return 0; 1829 if (buf.head[0].iov_len) 1830 xdr_buf_iov_zero(buf.head, 0, buf.head[0].iov_len); 1831 if (buf.page_len > 0) 1832 xdr_buf_pages_zero(&buf, 0, buf.page_len); 1833 if (buf.tail[0].iov_len) 1834 xdr_buf_iov_zero(buf.tail, 0, buf.tail[0].iov_len); 1835 return length; 1836 } 1837 EXPORT_SYMBOL_GPL(xdr_stream_zero); 1838 1839 /** 1840 * xdr_buf_trim - lop at most "len" bytes off the end of "buf" 1841 * @buf: buf to be trimmed 1842 * @len: number of bytes to reduce "buf" by 1843 * 1844 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note 1845 * that it's possible that we'll trim less than that amount if the xdr_buf is 1846 * too small, or if (for instance) it's all in the head and the parser has 1847 * already read too far into it. 1848 */ 1849 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) 1850 { 1851 size_t cur; 1852 unsigned int trim = len; 1853 1854 if (buf->tail[0].iov_len) { 1855 cur = min_t(size_t, buf->tail[0].iov_len, trim); 1856 buf->tail[0].iov_len -= cur; 1857 trim -= cur; 1858 if (!trim) 1859 goto fix_len; 1860 } 1861 1862 if (buf->page_len) { 1863 cur = min_t(unsigned int, buf->page_len, trim); 1864 buf->page_len -= cur; 1865 trim -= cur; 1866 if (!trim) 1867 goto fix_len; 1868 } 1869 1870 if (buf->head[0].iov_len) { 1871 cur = min_t(size_t, buf->head[0].iov_len, trim); 1872 buf->head[0].iov_len -= cur; 1873 trim -= cur; 1874 } 1875 fix_len: 1876 buf->len -= (len - trim); 1877 } 1878 EXPORT_SYMBOL_GPL(xdr_buf_trim); 1879 1880 static void __read_bytes_from_xdr_buf(const struct xdr_buf *subbuf, 1881 void *obj, unsigned int len) 1882 { 1883 unsigned int this_len; 1884 1885 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 1886 memcpy(obj, subbuf->head[0].iov_base, this_len); 1887 len -= this_len; 1888 obj += this_len; 1889 this_len = min_t(unsigned int, len, subbuf->page_len); 1890 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); 1891 len -= this_len; 1892 obj += this_len; 1893 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 1894 memcpy(obj, subbuf->tail[0].iov_base, this_len); 1895 } 1896 1897 /* obj is assumed to point to allocated memory of size at least len: */ 1898 int read_bytes_from_xdr_buf(const struct xdr_buf *buf, unsigned int base, 1899 void *obj, unsigned int len) 1900 { 1901 struct xdr_buf subbuf; 1902 int status; 1903 1904 status = xdr_buf_subsegment(buf, &subbuf, base, len); 1905 if (status != 0) 1906 return status; 1907 __read_bytes_from_xdr_buf(&subbuf, obj, len); 1908 return 0; 1909 } 1910 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); 1911 1912 static void __write_bytes_to_xdr_buf(const struct xdr_buf *subbuf, 1913 void *obj, unsigned int len) 1914 { 1915 unsigned int this_len; 1916 1917 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 1918 memcpy(subbuf->head[0].iov_base, obj, this_len); 1919 len -= this_len; 1920 obj += this_len; 1921 this_len = min_t(unsigned int, len, subbuf->page_len); 1922 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); 1923 len -= this_len; 1924 obj += this_len; 1925 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 1926 memcpy(subbuf->tail[0].iov_base, obj, this_len); 1927 } 1928 1929 /* obj is assumed to point to allocated memory of size at least len: */ 1930 int write_bytes_to_xdr_buf(const struct xdr_buf *buf, unsigned int base, 1931 void *obj, unsigned int len) 1932 { 1933 struct xdr_buf subbuf; 1934 int status; 1935 1936 status = xdr_buf_subsegment(buf, &subbuf, base, len); 1937 if (status != 0) 1938 return status; 1939 __write_bytes_to_xdr_buf(&subbuf, obj, len); 1940 return 0; 1941 } 1942 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); 1943 1944 int xdr_decode_word(const struct xdr_buf *buf, unsigned int base, u32 *obj) 1945 { 1946 __be32 raw; 1947 int status; 1948 1949 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 1950 if (status) 1951 return status; 1952 *obj = be32_to_cpu(raw); 1953 return 0; 1954 } 1955 EXPORT_SYMBOL_GPL(xdr_decode_word); 1956 1957 int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj) 1958 { 1959 __be32 raw = cpu_to_be32(obj); 1960 1961 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 1962 } 1963 EXPORT_SYMBOL_GPL(xdr_encode_word); 1964 1965 /* Returns 0 on success, or else a negative error code. */ 1966 static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base, 1967 struct xdr_array2_desc *desc, int encode) 1968 { 1969 char *elem = NULL, *c; 1970 unsigned int copied = 0, todo, avail_here; 1971 struct page **ppages = NULL; 1972 int err; 1973 1974 if (encode) { 1975 if (xdr_encode_word(buf, base, desc->array_len) != 0) 1976 return -EINVAL; 1977 } else { 1978 if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 1979 desc->array_len > desc->array_maxlen || 1980 (unsigned long) base + 4 + desc->array_len * 1981 desc->elem_size > buf->len) 1982 return -EINVAL; 1983 } 1984 base += 4; 1985 1986 if (!desc->xcode) 1987 return 0; 1988 1989 todo = desc->array_len * desc->elem_size; 1990 1991 /* process head */ 1992 if (todo && base < buf->head->iov_len) { 1993 c = buf->head->iov_base + base; 1994 avail_here = min_t(unsigned int, todo, 1995 buf->head->iov_len - base); 1996 todo -= avail_here; 1997 1998 while (avail_here >= desc->elem_size) { 1999 err = desc->xcode(desc, c); 2000 if (err) 2001 goto out; 2002 c += desc->elem_size; 2003 avail_here -= desc->elem_size; 2004 } 2005 if (avail_here) { 2006 if (!elem) { 2007 elem = kmalloc(desc->elem_size, GFP_KERNEL); 2008 err = -ENOMEM; 2009 if (!elem) 2010 goto out; 2011 } 2012 if (encode) { 2013 err = desc->xcode(desc, elem); 2014 if (err) 2015 goto out; 2016 memcpy(c, elem, avail_here); 2017 } else 2018 memcpy(elem, c, avail_here); 2019 copied = avail_here; 2020 } 2021 base = buf->head->iov_len; /* align to start of pages */ 2022 } 2023 2024 /* process pages array */ 2025 base -= buf->head->iov_len; 2026 if (todo && base < buf->page_len) { 2027 unsigned int avail_page; 2028 2029 avail_here = min(todo, buf->page_len - base); 2030 todo -= avail_here; 2031 2032 base += buf->page_base; 2033 ppages = buf->pages + (base >> PAGE_SHIFT); 2034 base &= ~PAGE_MASK; 2035 avail_page = min_t(unsigned int, PAGE_SIZE - base, 2036 avail_here); 2037 c = kmap(*ppages) + base; 2038 2039 while (avail_here) { 2040 avail_here -= avail_page; 2041 if (copied || avail_page < desc->elem_size) { 2042 unsigned int l = min(avail_page, 2043 desc->elem_size - copied); 2044 if (!elem) { 2045 elem = kmalloc(desc->elem_size, 2046 GFP_KERNEL); 2047 err = -ENOMEM; 2048 if (!elem) 2049 goto out; 2050 } 2051 if (encode) { 2052 if (!copied) { 2053 err = desc->xcode(desc, elem); 2054 if (err) 2055 goto out; 2056 } 2057 memcpy(c, elem + copied, l); 2058 copied += l; 2059 if (copied == desc->elem_size) 2060 copied = 0; 2061 } else { 2062 memcpy(elem + copied, c, l); 2063 copied += l; 2064 if (copied == desc->elem_size) { 2065 err = desc->xcode(desc, elem); 2066 if (err) 2067 goto out; 2068 copied = 0; 2069 } 2070 } 2071 avail_page -= l; 2072 c += l; 2073 } 2074 while (avail_page >= desc->elem_size) { 2075 err = desc->xcode(desc, c); 2076 if (err) 2077 goto out; 2078 c += desc->elem_size; 2079 avail_page -= desc->elem_size; 2080 } 2081 if (avail_page) { 2082 unsigned int l = min(avail_page, 2083 desc->elem_size - copied); 2084 if (!elem) { 2085 elem = kmalloc(desc->elem_size, 2086 GFP_KERNEL); 2087 err = -ENOMEM; 2088 if (!elem) 2089 goto out; 2090 } 2091 if (encode) { 2092 if (!copied) { 2093 err = desc->xcode(desc, elem); 2094 if (err) 2095 goto out; 2096 } 2097 memcpy(c, elem + copied, l); 2098 copied += l; 2099 if (copied == desc->elem_size) 2100 copied = 0; 2101 } else { 2102 memcpy(elem + copied, c, l); 2103 copied += l; 2104 if (copied == desc->elem_size) { 2105 err = desc->xcode(desc, elem); 2106 if (err) 2107 goto out; 2108 copied = 0; 2109 } 2110 } 2111 } 2112 if (avail_here) { 2113 kunmap(*ppages); 2114 ppages++; 2115 c = kmap(*ppages); 2116 } 2117 2118 avail_page = min(avail_here, 2119 (unsigned int) PAGE_SIZE); 2120 } 2121 base = buf->page_len; /* align to start of tail */ 2122 } 2123 2124 /* process tail */ 2125 base -= buf->page_len; 2126 if (todo) { 2127 c = buf->tail->iov_base + base; 2128 if (copied) { 2129 unsigned int l = desc->elem_size - copied; 2130 2131 if (encode) 2132 memcpy(c, elem + copied, l); 2133 else { 2134 memcpy(elem + copied, c, l); 2135 err = desc->xcode(desc, elem); 2136 if (err) 2137 goto out; 2138 } 2139 todo -= l; 2140 c += l; 2141 } 2142 while (todo) { 2143 err = desc->xcode(desc, c); 2144 if (err) 2145 goto out; 2146 c += desc->elem_size; 2147 todo -= desc->elem_size; 2148 } 2149 } 2150 err = 0; 2151 2152 out: 2153 kfree(elem); 2154 if (ppages) 2155 kunmap(*ppages); 2156 return err; 2157 } 2158 2159 int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base, 2160 struct xdr_array2_desc *desc) 2161 { 2162 if (base >= buf->len) 2163 return -EINVAL; 2164 2165 return xdr_xcode_array2(buf, base, desc, 0); 2166 } 2167 EXPORT_SYMBOL_GPL(xdr_decode_array2); 2168 2169 int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base, 2170 struct xdr_array2_desc *desc) 2171 { 2172 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > 2173 buf->head->iov_len + buf->page_len + buf->tail->iov_len) 2174 return -EINVAL; 2175 2176 return xdr_xcode_array2(buf, base, desc, 1); 2177 } 2178 EXPORT_SYMBOL_GPL(xdr_encode_array2); 2179 2180 int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, 2181 unsigned int len, 2182 int (*actor)(struct scatterlist *, void *), void *data) 2183 { 2184 int i, ret = 0; 2185 unsigned int page_len, thislen, page_offset; 2186 struct scatterlist sg[1]; 2187 2188 sg_init_table(sg, 1); 2189 2190 if (offset >= buf->head[0].iov_len) { 2191 offset -= buf->head[0].iov_len; 2192 } else { 2193 thislen = buf->head[0].iov_len - offset; 2194 if (thislen > len) 2195 thislen = len; 2196 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); 2197 ret = actor(sg, data); 2198 if (ret) 2199 goto out; 2200 offset = 0; 2201 len -= thislen; 2202 } 2203 if (len == 0) 2204 goto out; 2205 2206 if (offset >= buf->page_len) { 2207 offset -= buf->page_len; 2208 } else { 2209 page_len = buf->page_len - offset; 2210 if (page_len > len) 2211 page_len = len; 2212 len -= page_len; 2213 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1); 2214 i = (offset + buf->page_base) >> PAGE_SHIFT; 2215 thislen = PAGE_SIZE - page_offset; 2216 do { 2217 if (thislen > page_len) 2218 thislen = page_len; 2219 sg_set_page(sg, buf->pages[i], thislen, page_offset); 2220 ret = actor(sg, data); 2221 if (ret) 2222 goto out; 2223 page_len -= thislen; 2224 i++; 2225 page_offset = 0; 2226 thislen = PAGE_SIZE; 2227 } while (page_len != 0); 2228 offset = 0; 2229 } 2230 if (len == 0) 2231 goto out; 2232 if (offset < buf->tail[0].iov_len) { 2233 thislen = buf->tail[0].iov_len - offset; 2234 if (thislen > len) 2235 thislen = len; 2236 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); 2237 ret = actor(sg, data); 2238 len -= thislen; 2239 } 2240 if (len != 0) 2241 ret = -EINVAL; 2242 out: 2243 return ret; 2244 } 2245 EXPORT_SYMBOL_GPL(xdr_process_buf); 2246 2247 /** 2248 * xdr_stream_decode_opaque - Decode variable length opaque 2249 * @xdr: pointer to xdr_stream 2250 * @ptr: location to store opaque data 2251 * @size: size of storage buffer @ptr 2252 * 2253 * Return values: 2254 * On success, returns size of object stored in *@ptr 2255 * %-EBADMSG on XDR buffer overflow 2256 * %-EMSGSIZE on overflow of storage buffer @ptr 2257 */ 2258 ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size) 2259 { 2260 ssize_t ret; 2261 void *p; 2262 2263 ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 2264 if (ret <= 0) 2265 return ret; 2266 memcpy(ptr, p, ret); 2267 return ret; 2268 } 2269 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque); 2270 2271 /** 2272 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque 2273 * @xdr: pointer to xdr_stream 2274 * @ptr: location to store pointer to opaque data 2275 * @maxlen: maximum acceptable object size 2276 * @gfp_flags: GFP mask to use 2277 * 2278 * Return values: 2279 * On success, returns size of object stored in *@ptr 2280 * %-EBADMSG on XDR buffer overflow 2281 * %-EMSGSIZE if the size of the object would exceed @maxlen 2282 * %-ENOMEM on memory allocation failure 2283 */ 2284 ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, 2285 size_t maxlen, gfp_t gfp_flags) 2286 { 2287 ssize_t ret; 2288 void *p; 2289 2290 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); 2291 if (ret > 0) { 2292 *ptr = kmemdup(p, ret, gfp_flags); 2293 if (*ptr != NULL) 2294 return ret; 2295 ret = -ENOMEM; 2296 } 2297 *ptr = NULL; 2298 return ret; 2299 } 2300 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup); 2301 2302 /** 2303 * xdr_stream_decode_string - Decode variable length string 2304 * @xdr: pointer to xdr_stream 2305 * @str: location to store string 2306 * @size: size of storage buffer @str 2307 * 2308 * Return values: 2309 * On success, returns length of NUL-terminated string stored in *@str 2310 * %-EBADMSG on XDR buffer overflow 2311 * %-EMSGSIZE on overflow of storage buffer @str 2312 */ 2313 ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size) 2314 { 2315 ssize_t ret; 2316 void *p; 2317 2318 ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 2319 if (ret > 0) { 2320 memcpy(str, p, ret); 2321 str[ret] = '\0'; 2322 return strlen(str); 2323 } 2324 *str = '\0'; 2325 return ret; 2326 } 2327 EXPORT_SYMBOL_GPL(xdr_stream_decode_string); 2328 2329 /** 2330 * xdr_stream_decode_string_dup - Decode and duplicate variable length string 2331 * @xdr: pointer to xdr_stream 2332 * @str: location to store pointer to string 2333 * @maxlen: maximum acceptable string length 2334 * @gfp_flags: GFP mask to use 2335 * 2336 * Return values: 2337 * On success, returns length of NUL-terminated string stored in *@ptr 2338 * %-EBADMSG on XDR buffer overflow 2339 * %-EMSGSIZE if the size of the string would exceed @maxlen 2340 * %-ENOMEM on memory allocation failure 2341 */ 2342 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, 2343 size_t maxlen, gfp_t gfp_flags) 2344 { 2345 void *p; 2346 ssize_t ret; 2347 2348 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); 2349 if (ret > 0) { 2350 char *s = kmemdup_nul(p, ret, gfp_flags); 2351 if (s != NULL) { 2352 *str = s; 2353 return strlen(s); 2354 } 2355 ret = -ENOMEM; 2356 } 2357 *str = NULL; 2358 return ret; 2359 } 2360 EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup); 2361 2362 /** 2363 * xdr_stream_decode_opaque_auth - Decode struct opaque_auth (RFC5531 S8.2) 2364 * @xdr: pointer to xdr_stream 2365 * @flavor: location to store decoded flavor 2366 * @body: location to store decode body 2367 * @body_len: location to store length of decoded body 2368 * 2369 * Return values: 2370 * On success, returns the number of buffer bytes consumed 2371 * %-EBADMSG on XDR buffer overflow 2372 * %-EMSGSIZE if the decoded size of the body field exceeds 400 octets 2373 */ 2374 ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor, 2375 void **body, unsigned int *body_len) 2376 { 2377 ssize_t ret, len; 2378 2379 len = xdr_stream_decode_u32(xdr, flavor); 2380 if (unlikely(len < 0)) 2381 return len; 2382 ret = xdr_stream_decode_opaque_inline(xdr, body, RPC_MAX_AUTH_SIZE); 2383 if (unlikely(ret < 0)) 2384 return ret; 2385 *body_len = ret; 2386 return len + ret; 2387 } 2388 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_auth); 2389 2390 /** 2391 * xdr_stream_encode_opaque_auth - Encode struct opaque_auth (RFC5531 S8.2) 2392 * @xdr: pointer to xdr_stream 2393 * @flavor: verifier flavor to encode 2394 * @body: content of body to encode 2395 * @body_len: length of body to encode 2396 * 2397 * Return values: 2398 * On success, returns length in bytes of XDR buffer consumed 2399 * %-EBADMSG on XDR buffer overflow 2400 * %-EMSGSIZE if the size of @body exceeds 400 octets 2401 */ 2402 ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor, 2403 void *body, unsigned int body_len) 2404 { 2405 ssize_t ret, len; 2406 2407 if (unlikely(body_len > RPC_MAX_AUTH_SIZE)) 2408 return -EMSGSIZE; 2409 len = xdr_stream_encode_u32(xdr, flavor); 2410 if (unlikely(len < 0)) 2411 return len; 2412 ret = xdr_stream_encode_opaque(xdr, body, body_len); 2413 if (unlikely(ret < 0)) 2414 return ret; 2415 return len + ret; 2416 } 2417 EXPORT_SYMBOL_GPL(xdr_stream_encode_opaque_auth); 2418