1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/xdr.c 4 * 5 * Generic XDR support. 6 * 7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/string.h> 14 #include <linux/kernel.h> 15 #include <linux/pagemap.h> 16 #include <linux/errno.h> 17 #include <linux/sunrpc/xdr.h> 18 #include <linux/sunrpc/msg_prot.h> 19 #include <linux/bvec.h> 20 #include <trace/events/sunrpc.h> 21 22 static void _copy_to_pages(struct page **, size_t, const char *, size_t); 23 24 25 /* 26 * XDR functions for basic NFS types 27 */ 28 __be32 * 29 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) 30 { 31 unsigned int quadlen = XDR_QUADLEN(obj->len); 32 33 p[quadlen] = 0; /* zero trailing bytes */ 34 *p++ = cpu_to_be32(obj->len); 35 memcpy(p, obj->data, obj->len); 36 return p + XDR_QUADLEN(obj->len); 37 } 38 EXPORT_SYMBOL_GPL(xdr_encode_netobj); 39 40 __be32 * 41 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) 42 { 43 unsigned int len; 44 45 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) 46 return NULL; 47 obj->len = len; 48 obj->data = (u8 *) p; 49 return p + XDR_QUADLEN(len); 50 } 51 EXPORT_SYMBOL_GPL(xdr_decode_netobj); 52 53 /** 54 * xdr_encode_opaque_fixed - Encode fixed length opaque data 55 * @p: pointer to current position in XDR buffer. 56 * @ptr: pointer to data to encode (or NULL) 57 * @nbytes: size of data. 58 * 59 * Copy the array of data of length nbytes at ptr to the XDR buffer 60 * at position p, then align to the next 32-bit boundary by padding 61 * with zero bytes (see RFC1832). 62 * Note: if ptr is NULL, only the padding is performed. 63 * 64 * Returns the updated current XDR buffer position 65 * 66 */ 67 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) 68 { 69 if (likely(nbytes != 0)) { 70 unsigned int quadlen = XDR_QUADLEN(nbytes); 71 unsigned int padding = (quadlen << 2) - nbytes; 72 73 if (ptr != NULL) 74 memcpy(p, ptr, nbytes); 75 if (padding != 0) 76 memset((char *)p + nbytes, 0, padding); 77 p += quadlen; 78 } 79 return p; 80 } 81 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); 82 83 /** 84 * xdr_encode_opaque - Encode variable length opaque data 85 * @p: pointer to current position in XDR buffer. 86 * @ptr: pointer to data to encode (or NULL) 87 * @nbytes: size of data. 88 * 89 * Returns the updated current XDR buffer position 90 */ 91 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) 92 { 93 *p++ = cpu_to_be32(nbytes); 94 return xdr_encode_opaque_fixed(p, ptr, nbytes); 95 } 96 EXPORT_SYMBOL_GPL(xdr_encode_opaque); 97 98 __be32 * 99 xdr_encode_string(__be32 *p, const char *string) 100 { 101 return xdr_encode_array(p, string, strlen(string)); 102 } 103 EXPORT_SYMBOL_GPL(xdr_encode_string); 104 105 __be32 * 106 xdr_decode_string_inplace(__be32 *p, char **sp, 107 unsigned int *lenp, unsigned int maxlen) 108 { 109 u32 len; 110 111 len = be32_to_cpu(*p++); 112 if (len > maxlen) 113 return NULL; 114 *lenp = len; 115 *sp = (char *) p; 116 return p + XDR_QUADLEN(len); 117 } 118 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); 119 120 /** 121 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf 122 * @buf: XDR buffer where string resides 123 * @len: length of string, in bytes 124 * 125 */ 126 void 127 xdr_terminate_string(struct xdr_buf *buf, const u32 len) 128 { 129 char *kaddr; 130 131 kaddr = kmap_atomic(buf->pages[0]); 132 kaddr[buf->page_base + len] = '\0'; 133 kunmap_atomic(kaddr); 134 } 135 EXPORT_SYMBOL_GPL(xdr_terminate_string); 136 137 size_t 138 xdr_buf_pagecount(struct xdr_buf *buf) 139 { 140 if (!buf->page_len) 141 return 0; 142 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 143 } 144 145 int 146 xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp) 147 { 148 size_t i, n = xdr_buf_pagecount(buf); 149 150 if (n != 0 && buf->bvec == NULL) { 151 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp); 152 if (!buf->bvec) 153 return -ENOMEM; 154 for (i = 0; i < n; i++) { 155 buf->bvec[i].bv_page = buf->pages[i]; 156 buf->bvec[i].bv_len = PAGE_SIZE; 157 buf->bvec[i].bv_offset = 0; 158 } 159 } 160 return 0; 161 } 162 163 void 164 xdr_free_bvec(struct xdr_buf *buf) 165 { 166 kfree(buf->bvec); 167 buf->bvec = NULL; 168 } 169 170 /** 171 * xdr_inline_pages - Prepare receive buffer for a large reply 172 * @xdr: xdr_buf into which reply will be placed 173 * @offset: expected offset where data payload will start, in bytes 174 * @pages: vector of struct page pointers 175 * @base: offset in first page where receive should start, in bytes 176 * @len: expected size of the upper layer data payload, in bytes 177 * 178 */ 179 void 180 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 181 struct page **pages, unsigned int base, unsigned int len) 182 { 183 struct kvec *head = xdr->head; 184 struct kvec *tail = xdr->tail; 185 char *buf = (char *)head->iov_base; 186 unsigned int buflen = head->iov_len; 187 188 head->iov_len = offset; 189 190 xdr->pages = pages; 191 xdr->page_base = base; 192 xdr->page_len = len; 193 194 tail->iov_base = buf + offset; 195 tail->iov_len = buflen - offset; 196 if ((xdr->page_len & 3) == 0) 197 tail->iov_len -= sizeof(__be32); 198 199 xdr->buflen += len; 200 } 201 EXPORT_SYMBOL_GPL(xdr_inline_pages); 202 203 /* 204 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 205 */ 206 207 /** 208 * _shift_data_left_pages 209 * @pages: vector of pages containing both the source and dest memory area. 210 * @pgto_base: page vector address of destination 211 * @pgfrom_base: page vector address of source 212 * @len: number of bytes to copy 213 * 214 * Note: the addresses pgto_base and pgfrom_base are both calculated in 215 * the same way: 216 * if a memory area starts at byte 'base' in page 'pages[i]', 217 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 218 * Alse note: pgto_base must be < pgfrom_base, but the memory areas 219 * they point to may overlap. 220 */ 221 static void 222 _shift_data_left_pages(struct page **pages, size_t pgto_base, 223 size_t pgfrom_base, size_t len) 224 { 225 struct page **pgfrom, **pgto; 226 char *vfrom, *vto; 227 size_t copy; 228 229 BUG_ON(pgfrom_base <= pgto_base); 230 231 pgto = pages + (pgto_base >> PAGE_SHIFT); 232 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); 233 234 pgto_base &= ~PAGE_MASK; 235 pgfrom_base &= ~PAGE_MASK; 236 237 do { 238 if (pgto_base >= PAGE_SIZE) { 239 pgto_base = 0; 240 pgto++; 241 } 242 if (pgfrom_base >= PAGE_SIZE){ 243 pgfrom_base = 0; 244 pgfrom++; 245 } 246 247 copy = len; 248 if (copy > (PAGE_SIZE - pgto_base)) 249 copy = PAGE_SIZE - pgto_base; 250 if (copy > (PAGE_SIZE - pgfrom_base)) 251 copy = PAGE_SIZE - pgfrom_base; 252 253 vto = kmap_atomic(*pgto); 254 if (*pgto != *pgfrom) { 255 vfrom = kmap_atomic(*pgfrom); 256 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); 257 kunmap_atomic(vfrom); 258 } else 259 memmove(vto + pgto_base, vto + pgfrom_base, copy); 260 flush_dcache_page(*pgto); 261 kunmap_atomic(vto); 262 263 pgto_base += copy; 264 pgfrom_base += copy; 265 266 } while ((len -= copy) != 0); 267 } 268 269 static void 270 _shift_data_left_tail(struct xdr_buf *buf, unsigned int pgto, size_t len) 271 { 272 struct kvec *tail = buf->tail; 273 274 if (len > tail->iov_len) 275 len = tail->iov_len; 276 277 _copy_to_pages(buf->pages, 278 buf->page_base + pgto, 279 (char *)tail->iov_base, 280 len); 281 tail->iov_len -= len; 282 283 if (tail->iov_len > 0) 284 memmove((char *)tail->iov_base, 285 tail->iov_base + len, 286 tail->iov_len); 287 } 288 289 /** 290 * _shift_data_right_pages 291 * @pages: vector of pages containing both the source and dest memory area. 292 * @pgto_base: page vector address of destination 293 * @pgfrom_base: page vector address of source 294 * @len: number of bytes to copy 295 * 296 * Note: the addresses pgto_base and pgfrom_base are both calculated in 297 * the same way: 298 * if a memory area starts at byte 'base' in page 'pages[i]', 299 * then its address is given as (i << PAGE_SHIFT) + base 300 * Also note: pgfrom_base must be < pgto_base, but the memory areas 301 * they point to may overlap. 302 */ 303 static void 304 _shift_data_right_pages(struct page **pages, size_t pgto_base, 305 size_t pgfrom_base, size_t len) 306 { 307 struct page **pgfrom, **pgto; 308 char *vfrom, *vto; 309 size_t copy; 310 311 BUG_ON(pgto_base <= pgfrom_base); 312 313 pgto_base += len; 314 pgfrom_base += len; 315 316 pgto = pages + (pgto_base >> PAGE_SHIFT); 317 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); 318 319 pgto_base &= ~PAGE_MASK; 320 pgfrom_base &= ~PAGE_MASK; 321 322 do { 323 /* Are any pointers crossing a page boundary? */ 324 if (pgto_base == 0) { 325 pgto_base = PAGE_SIZE; 326 pgto--; 327 } 328 if (pgfrom_base == 0) { 329 pgfrom_base = PAGE_SIZE; 330 pgfrom--; 331 } 332 333 copy = len; 334 if (copy > pgto_base) 335 copy = pgto_base; 336 if (copy > pgfrom_base) 337 copy = pgfrom_base; 338 pgto_base -= copy; 339 pgfrom_base -= copy; 340 341 vto = kmap_atomic(*pgto); 342 if (*pgto != *pgfrom) { 343 vfrom = kmap_atomic(*pgfrom); 344 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); 345 kunmap_atomic(vfrom); 346 } else 347 memmove(vto + pgto_base, vto + pgfrom_base, copy); 348 flush_dcache_page(*pgto); 349 kunmap_atomic(vto); 350 351 } while ((len -= copy) != 0); 352 } 353 354 static unsigned int 355 _shift_data_right_tail(struct xdr_buf *buf, unsigned int pgfrom, size_t len) 356 { 357 struct kvec *tail = buf->tail; 358 unsigned int tailbuf_len; 359 unsigned int result = 0; 360 size_t copy; 361 362 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; 363 364 /* Shift the tail first */ 365 if (tailbuf_len != 0) { 366 unsigned int free_space = tailbuf_len - tail->iov_len; 367 368 if (len < free_space) 369 free_space = len; 370 if (len > free_space) 371 len = free_space; 372 373 tail->iov_len += free_space; 374 copy = len; 375 376 if (tail->iov_len > len) { 377 char *p = (char *)tail->iov_base + len; 378 memmove(p, tail->iov_base, tail->iov_len - free_space); 379 result += tail->iov_len - free_space; 380 } else 381 copy = tail->iov_len; 382 383 /* Copy from the inlined pages into the tail */ 384 _copy_from_pages((char *)tail->iov_base, 385 buf->pages, 386 buf->page_base + pgfrom, 387 copy); 388 result += copy; 389 } 390 391 return result; 392 } 393 394 /** 395 * _copy_to_pages 396 * @pages: array of pages 397 * @pgbase: page vector address of destination 398 * @p: pointer to source data 399 * @len: length 400 * 401 * Copies data from an arbitrary memory location into an array of pages 402 * The copy is assumed to be non-overlapping. 403 */ 404 static void 405 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) 406 { 407 struct page **pgto; 408 char *vto; 409 size_t copy; 410 411 pgto = pages + (pgbase >> PAGE_SHIFT); 412 pgbase &= ~PAGE_MASK; 413 414 for (;;) { 415 copy = PAGE_SIZE - pgbase; 416 if (copy > len) 417 copy = len; 418 419 vto = kmap_atomic(*pgto); 420 memcpy(vto + pgbase, p, copy); 421 kunmap_atomic(vto); 422 423 len -= copy; 424 if (len == 0) 425 break; 426 427 pgbase += copy; 428 if (pgbase == PAGE_SIZE) { 429 flush_dcache_page(*pgto); 430 pgbase = 0; 431 pgto++; 432 } 433 p += copy; 434 } 435 flush_dcache_page(*pgto); 436 } 437 438 /** 439 * _copy_from_pages 440 * @p: pointer to destination 441 * @pages: array of pages 442 * @pgbase: offset of source data 443 * @len: length 444 * 445 * Copies data into an arbitrary memory location from an array of pages 446 * The copy is assumed to be non-overlapping. 447 */ 448 void 449 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) 450 { 451 struct page **pgfrom; 452 char *vfrom; 453 size_t copy; 454 455 pgfrom = pages + (pgbase >> PAGE_SHIFT); 456 pgbase &= ~PAGE_MASK; 457 458 do { 459 copy = PAGE_SIZE - pgbase; 460 if (copy > len) 461 copy = len; 462 463 vfrom = kmap_atomic(*pgfrom); 464 memcpy(p, vfrom + pgbase, copy); 465 kunmap_atomic(vfrom); 466 467 pgbase += copy; 468 if (pgbase == PAGE_SIZE) { 469 pgbase = 0; 470 pgfrom++; 471 } 472 p += copy; 473 474 } while ((len -= copy) != 0); 475 } 476 EXPORT_SYMBOL_GPL(_copy_from_pages); 477 478 /** 479 * _zero_pages 480 * @pages: array of pages 481 * @pgbase: beginning page vector address 482 * @len: length 483 */ 484 static void 485 _zero_pages(struct page **pages, size_t pgbase, size_t len) 486 { 487 struct page **page; 488 char *vpage; 489 size_t zero; 490 491 page = pages + (pgbase >> PAGE_SHIFT); 492 pgbase &= ~PAGE_MASK; 493 494 do { 495 zero = PAGE_SIZE - pgbase; 496 if (zero > len) 497 zero = len; 498 499 vpage = kmap_atomic(*page); 500 memset(vpage + pgbase, 0, zero); 501 kunmap_atomic(vpage); 502 503 flush_dcache_page(*page); 504 pgbase = 0; 505 page++; 506 507 } while ((len -= zero) != 0); 508 } 509 510 /** 511 * xdr_shrink_bufhead 512 * @buf: xdr_buf 513 * @len: bytes to remove from buf->head[0] 514 * 515 * Shrinks XDR buffer's header kvec buf->head[0] by 516 * 'len' bytes. The extra data is not lost, but is instead 517 * moved into the inlined pages and/or the tail. 518 */ 519 static unsigned int 520 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) 521 { 522 struct kvec *head, *tail; 523 size_t copy, offs; 524 unsigned int pglen = buf->page_len; 525 unsigned int result; 526 527 result = 0; 528 tail = buf->tail; 529 head = buf->head; 530 531 WARN_ON_ONCE(len > head->iov_len); 532 if (len > head->iov_len) 533 len = head->iov_len; 534 535 /* Shift the tail first */ 536 if (tail->iov_len != 0) { 537 if (tail->iov_len > len) { 538 copy = tail->iov_len - len; 539 memmove((char *)tail->iov_base + len, 540 tail->iov_base, copy); 541 result += copy; 542 } 543 /* Copy from the inlined pages into the tail */ 544 copy = len; 545 if (copy > pglen) 546 copy = pglen; 547 offs = len - copy; 548 if (offs >= tail->iov_len) 549 copy = 0; 550 else if (copy > tail->iov_len - offs) 551 copy = tail->iov_len - offs; 552 if (copy != 0) { 553 _copy_from_pages((char *)tail->iov_base + offs, 554 buf->pages, 555 buf->page_base + pglen + offs - len, 556 copy); 557 result += copy; 558 } 559 /* Do we also need to copy data from the head into the tail ? */ 560 if (len > pglen) { 561 offs = copy = len - pglen; 562 if (copy > tail->iov_len) 563 copy = tail->iov_len; 564 memcpy(tail->iov_base, 565 (char *)head->iov_base + 566 head->iov_len - offs, 567 copy); 568 result += copy; 569 } 570 } 571 /* Now handle pages */ 572 if (pglen != 0) { 573 if (pglen > len) 574 _shift_data_right_pages(buf->pages, 575 buf->page_base + len, 576 buf->page_base, 577 pglen - len); 578 copy = len; 579 if (len > pglen) 580 copy = pglen; 581 _copy_to_pages(buf->pages, buf->page_base, 582 (char *)head->iov_base + head->iov_len - len, 583 copy); 584 result += copy; 585 } 586 head->iov_len -= len; 587 buf->buflen -= len; 588 /* Have we truncated the message? */ 589 if (buf->len > buf->buflen) 590 buf->len = buf->buflen; 591 592 return result; 593 } 594 595 /** 596 * xdr_shrink_pagelen - shrinks buf->pages by up to @len bytes 597 * @buf: xdr_buf 598 * @len: bytes to remove from buf->pages 599 * 600 * The extra data is not lost, but is instead moved into buf->tail. 601 * Returns the actual number of bytes moved. 602 */ 603 static unsigned int 604 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) 605 { 606 unsigned int pglen = buf->page_len; 607 unsigned int result; 608 609 if (len > buf->page_len) 610 len = buf-> page_len; 611 612 result = _shift_data_right_tail(buf, pglen - len, len); 613 buf->page_len -= len; 614 buf->buflen -= len; 615 /* Have we truncated the message? */ 616 if (buf->len > buf->buflen) 617 buf->len = buf->buflen; 618 619 return result; 620 } 621 622 void 623 xdr_shift_buf(struct xdr_buf *buf, size_t len) 624 { 625 xdr_shrink_bufhead(buf, len); 626 } 627 EXPORT_SYMBOL_GPL(xdr_shift_buf); 628 629 /** 630 * xdr_stream_pos - Return the current offset from the start of the xdr_stream 631 * @xdr: pointer to struct xdr_stream 632 */ 633 unsigned int xdr_stream_pos(const struct xdr_stream *xdr) 634 { 635 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2; 636 } 637 EXPORT_SYMBOL_GPL(xdr_stream_pos); 638 639 /** 640 * xdr_page_pos - Return the current offset from the start of the xdr pages 641 * @xdr: pointer to struct xdr_stream 642 */ 643 unsigned int xdr_page_pos(const struct xdr_stream *xdr) 644 { 645 unsigned int pos = xdr_stream_pos(xdr); 646 647 WARN_ON(pos < xdr->buf->head[0].iov_len); 648 return pos - xdr->buf->head[0].iov_len; 649 } 650 EXPORT_SYMBOL_GPL(xdr_page_pos); 651 652 /** 653 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 654 * @xdr: pointer to xdr_stream struct 655 * @buf: pointer to XDR buffer in which to encode data 656 * @p: current pointer inside XDR buffer 657 * @rqst: pointer to controlling rpc_rqst, for debugging 658 * 659 * Note: at the moment the RPC client only passes the length of our 660 * scratch buffer in the xdr_buf's header kvec. Previously this 661 * meant we needed to call xdr_adjust_iovec() after encoding the 662 * data. With the new scheme, the xdr_stream manages the details 663 * of the buffer length, and takes care of adjusting the kvec 664 * length for us. 665 */ 666 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, 667 struct rpc_rqst *rqst) 668 { 669 struct kvec *iov = buf->head; 670 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 671 672 xdr_reset_scratch_buffer(xdr); 673 BUG_ON(scratch_len < 0); 674 xdr->buf = buf; 675 xdr->iov = iov; 676 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); 677 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); 678 BUG_ON(iov->iov_len > scratch_len); 679 680 if (p != xdr->p && p != NULL) { 681 size_t len; 682 683 BUG_ON(p < xdr->p || p > xdr->end); 684 len = (char *)p - (char *)xdr->p; 685 xdr->p = p; 686 buf->len += len; 687 iov->iov_len += len; 688 } 689 xdr->rqst = rqst; 690 } 691 EXPORT_SYMBOL_GPL(xdr_init_encode); 692 693 /** 694 * xdr_commit_encode - Ensure all data is written to buffer 695 * @xdr: pointer to xdr_stream 696 * 697 * We handle encoding across page boundaries by giving the caller a 698 * temporary location to write to, then later copying the data into 699 * place; xdr_commit_encode does that copying. 700 * 701 * Normally the caller doesn't need to call this directly, as the 702 * following xdr_reserve_space will do it. But an explicit call may be 703 * required at the end of encoding, or any other time when the xdr_buf 704 * data might be read. 705 */ 706 inline void xdr_commit_encode(struct xdr_stream *xdr) 707 { 708 int shift = xdr->scratch.iov_len; 709 void *page; 710 711 if (shift == 0) 712 return; 713 page = page_address(*xdr->page_ptr); 714 memcpy(xdr->scratch.iov_base, page, shift); 715 memmove(page, page + shift, (void *)xdr->p - page); 716 xdr_reset_scratch_buffer(xdr); 717 } 718 EXPORT_SYMBOL_GPL(xdr_commit_encode); 719 720 static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, 721 size_t nbytes) 722 { 723 __be32 *p; 724 int space_left; 725 int frag1bytes, frag2bytes; 726 727 if (nbytes > PAGE_SIZE) 728 goto out_overflow; /* Bigger buffers require special handling */ 729 if (xdr->buf->len + nbytes > xdr->buf->buflen) 730 goto out_overflow; /* Sorry, we're totally out of space */ 731 frag1bytes = (xdr->end - xdr->p) << 2; 732 frag2bytes = nbytes - frag1bytes; 733 if (xdr->iov) 734 xdr->iov->iov_len += frag1bytes; 735 else 736 xdr->buf->page_len += frag1bytes; 737 xdr->page_ptr++; 738 xdr->iov = NULL; 739 /* 740 * If the last encode didn't end exactly on a page boundary, the 741 * next one will straddle boundaries. Encode into the next 742 * page, then copy it back later in xdr_commit_encode. We use 743 * the "scratch" iov to track any temporarily unused fragment of 744 * space at the end of the previous buffer: 745 */ 746 xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes); 747 p = page_address(*xdr->page_ptr); 748 /* 749 * Note this is where the next encode will start after we've 750 * shifted this one back: 751 */ 752 xdr->p = (void *)p + frag2bytes; 753 space_left = xdr->buf->buflen - xdr->buf->len; 754 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE); 755 xdr->buf->page_len += frag2bytes; 756 xdr->buf->len += nbytes; 757 return p; 758 out_overflow: 759 trace_rpc_xdr_overflow(xdr, nbytes); 760 return NULL; 761 } 762 763 /** 764 * xdr_reserve_space - Reserve buffer space for sending 765 * @xdr: pointer to xdr_stream 766 * @nbytes: number of bytes to reserve 767 * 768 * Checks that we have enough buffer space to encode 'nbytes' more 769 * bytes of data. If so, update the total xdr_buf length, and 770 * adjust the length of the current kvec. 771 */ 772 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 773 { 774 __be32 *p = xdr->p; 775 __be32 *q; 776 777 xdr_commit_encode(xdr); 778 /* align nbytes on the next 32-bit boundary */ 779 nbytes += 3; 780 nbytes &= ~3; 781 q = p + (nbytes >> 2); 782 if (unlikely(q > xdr->end || q < p)) 783 return xdr_get_next_encode_buffer(xdr, nbytes); 784 xdr->p = q; 785 if (xdr->iov) 786 xdr->iov->iov_len += nbytes; 787 else 788 xdr->buf->page_len += nbytes; 789 xdr->buf->len += nbytes; 790 return p; 791 } 792 EXPORT_SYMBOL_GPL(xdr_reserve_space); 793 794 795 /** 796 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending 797 * @xdr: pointer to xdr_stream 798 * @vec: pointer to a kvec array 799 * @nbytes: number of bytes to reserve 800 * 801 * Reserves enough buffer space to encode 'nbytes' of data and stores the 802 * pointers in 'vec'. The size argument passed to xdr_reserve_space() is 803 * determined based on the number of bytes remaining in the current page to 804 * avoid invalidating iov_base pointers when xdr_commit_encode() is called. 805 */ 806 int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes) 807 { 808 int thislen; 809 int v = 0; 810 __be32 *p; 811 812 /* 813 * svcrdma requires every READ payload to start somewhere 814 * in xdr->pages. 815 */ 816 if (xdr->iov == xdr->buf->head) { 817 xdr->iov = NULL; 818 xdr->end = xdr->p; 819 } 820 821 while (nbytes) { 822 thislen = xdr->buf->page_len % PAGE_SIZE; 823 thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen); 824 825 p = xdr_reserve_space(xdr, thislen); 826 if (!p) 827 return -EIO; 828 829 vec[v].iov_base = p; 830 vec[v].iov_len = thislen; 831 v++; 832 nbytes -= thislen; 833 } 834 835 return v; 836 } 837 EXPORT_SYMBOL_GPL(xdr_reserve_space_vec); 838 839 /** 840 * xdr_truncate_encode - truncate an encode buffer 841 * @xdr: pointer to xdr_stream 842 * @len: new length of buffer 843 * 844 * Truncates the xdr stream, so that xdr->buf->len == len, 845 * and xdr->p points at offset len from the start of the buffer, and 846 * head, tail, and page lengths are adjusted to correspond. 847 * 848 * If this means moving xdr->p to a different buffer, we assume that 849 * the end pointer should be set to the end of the current page, 850 * except in the case of the head buffer when we assume the head 851 * buffer's current length represents the end of the available buffer. 852 * 853 * This is *not* safe to use on a buffer that already has inlined page 854 * cache pages (as in a zero-copy server read reply), except for the 855 * simple case of truncating from one position in the tail to another. 856 * 857 */ 858 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) 859 { 860 struct xdr_buf *buf = xdr->buf; 861 struct kvec *head = buf->head; 862 struct kvec *tail = buf->tail; 863 int fraglen; 864 int new; 865 866 if (len > buf->len) { 867 WARN_ON_ONCE(1); 868 return; 869 } 870 xdr_commit_encode(xdr); 871 872 fraglen = min_t(int, buf->len - len, tail->iov_len); 873 tail->iov_len -= fraglen; 874 buf->len -= fraglen; 875 if (tail->iov_len) { 876 xdr->p = tail->iov_base + tail->iov_len; 877 WARN_ON_ONCE(!xdr->end); 878 WARN_ON_ONCE(!xdr->iov); 879 return; 880 } 881 WARN_ON_ONCE(fraglen); 882 fraglen = min_t(int, buf->len - len, buf->page_len); 883 buf->page_len -= fraglen; 884 buf->len -= fraglen; 885 886 new = buf->page_base + buf->page_len; 887 888 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); 889 890 if (buf->page_len) { 891 xdr->p = page_address(*xdr->page_ptr); 892 xdr->end = (void *)xdr->p + PAGE_SIZE; 893 xdr->p = (void *)xdr->p + (new % PAGE_SIZE); 894 WARN_ON_ONCE(xdr->iov); 895 return; 896 } 897 if (fraglen) 898 xdr->end = head->iov_base + head->iov_len; 899 /* (otherwise assume xdr->end is already set) */ 900 xdr->page_ptr--; 901 head->iov_len = len; 902 buf->len = len; 903 xdr->p = head->iov_base + head->iov_len; 904 xdr->iov = buf->head; 905 } 906 EXPORT_SYMBOL(xdr_truncate_encode); 907 908 /** 909 * xdr_restrict_buflen - decrease available buffer space 910 * @xdr: pointer to xdr_stream 911 * @newbuflen: new maximum number of bytes available 912 * 913 * Adjust our idea of how much space is available in the buffer. 914 * If we've already used too much space in the buffer, returns -1. 915 * If the available space is already smaller than newbuflen, returns 0 916 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen 917 * and ensures xdr->end is set at most offset newbuflen from the start 918 * of the buffer. 919 */ 920 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen) 921 { 922 struct xdr_buf *buf = xdr->buf; 923 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p; 924 int end_offset = buf->len + left_in_this_buf; 925 926 if (newbuflen < 0 || newbuflen < buf->len) 927 return -1; 928 if (newbuflen > buf->buflen) 929 return 0; 930 if (newbuflen < end_offset) 931 xdr->end = (void *)xdr->end + newbuflen - end_offset; 932 buf->buflen = newbuflen; 933 return 0; 934 } 935 EXPORT_SYMBOL(xdr_restrict_buflen); 936 937 /** 938 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending 939 * @xdr: pointer to xdr_stream 940 * @pages: list of pages 941 * @base: offset of first byte 942 * @len: length of data in bytes 943 * 944 */ 945 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, 946 unsigned int len) 947 { 948 struct xdr_buf *buf = xdr->buf; 949 struct kvec *iov = buf->tail; 950 buf->pages = pages; 951 buf->page_base = base; 952 buf->page_len = len; 953 954 iov->iov_base = (char *)xdr->p; 955 iov->iov_len = 0; 956 xdr->iov = iov; 957 958 if (len & 3) { 959 unsigned int pad = 4 - (len & 3); 960 961 BUG_ON(xdr->p >= xdr->end); 962 iov->iov_base = (char *)xdr->p + (len & 3); 963 iov->iov_len += pad; 964 len += pad; 965 *xdr->p++ = 0; 966 } 967 buf->buflen += len; 968 buf->len += len; 969 } 970 EXPORT_SYMBOL_GPL(xdr_write_pages); 971 972 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, 973 unsigned int len) 974 { 975 if (len > iov->iov_len) 976 len = iov->iov_len; 977 xdr->p = (__be32*)iov->iov_base; 978 xdr->end = (__be32*)(iov->iov_base + len); 979 xdr->iov = iov; 980 xdr->page_ptr = NULL; 981 } 982 983 static int xdr_set_page_base(struct xdr_stream *xdr, 984 unsigned int base, unsigned int len) 985 { 986 unsigned int pgnr; 987 unsigned int maxlen; 988 unsigned int pgoff; 989 unsigned int pgend; 990 void *kaddr; 991 992 maxlen = xdr->buf->page_len; 993 if (base >= maxlen) 994 return -EINVAL; 995 maxlen -= base; 996 if (len > maxlen) 997 len = maxlen; 998 999 base += xdr->buf->page_base; 1000 1001 pgnr = base >> PAGE_SHIFT; 1002 xdr->page_ptr = &xdr->buf->pages[pgnr]; 1003 kaddr = page_address(*xdr->page_ptr); 1004 1005 pgoff = base & ~PAGE_MASK; 1006 xdr->p = (__be32*)(kaddr + pgoff); 1007 1008 pgend = pgoff + len; 1009 if (pgend > PAGE_SIZE) 1010 pgend = PAGE_SIZE; 1011 xdr->end = (__be32*)(kaddr + pgend); 1012 xdr->iov = NULL; 1013 return 0; 1014 } 1015 1016 static void xdr_set_page(struct xdr_stream *xdr, unsigned int base, 1017 unsigned int len) 1018 { 1019 if (xdr_set_page_base(xdr, base, len) < 0) 1020 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2); 1021 } 1022 1023 static void xdr_set_next_page(struct xdr_stream *xdr) 1024 { 1025 unsigned int newbase; 1026 1027 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; 1028 newbase -= xdr->buf->page_base; 1029 1030 xdr_set_page(xdr, newbase, PAGE_SIZE); 1031 } 1032 1033 static bool xdr_set_next_buffer(struct xdr_stream *xdr) 1034 { 1035 if (xdr->page_ptr != NULL) 1036 xdr_set_next_page(xdr); 1037 else if (xdr->iov == xdr->buf->head) { 1038 xdr_set_page(xdr, 0, PAGE_SIZE); 1039 } 1040 return xdr->p != xdr->end; 1041 } 1042 1043 /** 1044 * xdr_init_decode - Initialize an xdr_stream for decoding data. 1045 * @xdr: pointer to xdr_stream struct 1046 * @buf: pointer to XDR buffer from which to decode data 1047 * @p: current pointer inside XDR buffer 1048 * @rqst: pointer to controlling rpc_rqst, for debugging 1049 */ 1050 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, 1051 struct rpc_rqst *rqst) 1052 { 1053 xdr->buf = buf; 1054 xdr_reset_scratch_buffer(xdr); 1055 xdr->nwords = XDR_QUADLEN(buf->len); 1056 if (buf->head[0].iov_len != 0) 1057 xdr_set_iov(xdr, buf->head, buf->len); 1058 else if (buf->page_len != 0) 1059 xdr_set_page_base(xdr, 0, buf->len); 1060 else 1061 xdr_set_iov(xdr, buf->head, buf->len); 1062 if (p != NULL && p > xdr->p && xdr->end >= p) { 1063 xdr->nwords -= p - xdr->p; 1064 xdr->p = p; 1065 } 1066 xdr->rqst = rqst; 1067 } 1068 EXPORT_SYMBOL_GPL(xdr_init_decode); 1069 1070 /** 1071 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages 1072 * @xdr: pointer to xdr_stream struct 1073 * @buf: pointer to XDR buffer from which to decode data 1074 * @pages: list of pages to decode into 1075 * @len: length in bytes of buffer in pages 1076 */ 1077 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, 1078 struct page **pages, unsigned int len) 1079 { 1080 memset(buf, 0, sizeof(*buf)); 1081 buf->pages = pages; 1082 buf->page_len = len; 1083 buf->buflen = len; 1084 buf->len = len; 1085 xdr_init_decode(xdr, buf, NULL, NULL); 1086 } 1087 EXPORT_SYMBOL_GPL(xdr_init_decode_pages); 1088 1089 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 1090 { 1091 unsigned int nwords = XDR_QUADLEN(nbytes); 1092 __be32 *p = xdr->p; 1093 __be32 *q = p + nwords; 1094 1095 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p)) 1096 return NULL; 1097 xdr->p = q; 1098 xdr->nwords -= nwords; 1099 return p; 1100 } 1101 1102 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) 1103 { 1104 __be32 *p; 1105 char *cpdest = xdr->scratch.iov_base; 1106 size_t cplen = (char *)xdr->end - (char *)xdr->p; 1107 1108 if (nbytes > xdr->scratch.iov_len) 1109 goto out_overflow; 1110 p = __xdr_inline_decode(xdr, cplen); 1111 if (p == NULL) 1112 return NULL; 1113 memcpy(cpdest, p, cplen); 1114 if (!xdr_set_next_buffer(xdr)) 1115 goto out_overflow; 1116 cpdest += cplen; 1117 nbytes -= cplen; 1118 p = __xdr_inline_decode(xdr, nbytes); 1119 if (p == NULL) 1120 return NULL; 1121 memcpy(cpdest, p, nbytes); 1122 return xdr->scratch.iov_base; 1123 out_overflow: 1124 trace_rpc_xdr_overflow(xdr, nbytes); 1125 return NULL; 1126 } 1127 1128 /** 1129 * xdr_inline_decode - Retrieve XDR data to decode 1130 * @xdr: pointer to xdr_stream struct 1131 * @nbytes: number of bytes of data to decode 1132 * 1133 * Check if the input buffer is long enough to enable us to decode 1134 * 'nbytes' more bytes of data starting at the current position. 1135 * If so return the current pointer, then update the current 1136 * pointer position. 1137 */ 1138 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 1139 { 1140 __be32 *p; 1141 1142 if (unlikely(nbytes == 0)) 1143 return xdr->p; 1144 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) 1145 goto out_overflow; 1146 p = __xdr_inline_decode(xdr, nbytes); 1147 if (p != NULL) 1148 return p; 1149 return xdr_copy_to_scratch(xdr, nbytes); 1150 out_overflow: 1151 trace_rpc_xdr_overflow(xdr, nbytes); 1152 return NULL; 1153 } 1154 EXPORT_SYMBOL_GPL(xdr_inline_decode); 1155 1156 static void xdr_realign_pages(struct xdr_stream *xdr) 1157 { 1158 struct xdr_buf *buf = xdr->buf; 1159 struct kvec *iov = buf->head; 1160 unsigned int cur = xdr_stream_pos(xdr); 1161 unsigned int copied, offset; 1162 1163 /* Realign pages to current pointer position */ 1164 if (iov->iov_len > cur) { 1165 offset = iov->iov_len - cur; 1166 copied = xdr_shrink_bufhead(buf, offset); 1167 trace_rpc_xdr_alignment(xdr, offset, copied); 1168 xdr->nwords = XDR_QUADLEN(buf->len - cur); 1169 } 1170 } 1171 1172 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) 1173 { 1174 struct xdr_buf *buf = xdr->buf; 1175 unsigned int nwords = XDR_QUADLEN(len); 1176 unsigned int cur = xdr_stream_pos(xdr); 1177 unsigned int copied, offset; 1178 1179 if (xdr->nwords == 0) 1180 return 0; 1181 1182 xdr_realign_pages(xdr); 1183 if (nwords > xdr->nwords) { 1184 nwords = xdr->nwords; 1185 len = nwords << 2; 1186 } 1187 if (buf->page_len <= len) 1188 len = buf->page_len; 1189 else if (nwords < xdr->nwords) { 1190 /* Truncate page data and move it into the tail */ 1191 offset = buf->page_len - len; 1192 copied = xdr_shrink_pagelen(buf, offset); 1193 trace_rpc_xdr_alignment(xdr, offset, copied); 1194 xdr->nwords = XDR_QUADLEN(buf->len - cur); 1195 } 1196 return len; 1197 } 1198 1199 /** 1200 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position 1201 * @xdr: pointer to xdr_stream struct 1202 * @len: number of bytes of page data 1203 * 1204 * Moves data beyond the current pointer position from the XDR head[] buffer 1205 * into the page list. Any data that lies beyond current position + "len" 1206 * bytes is moved into the XDR tail[]. 1207 * 1208 * Returns the number of XDR encoded bytes now contained in the pages 1209 */ 1210 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 1211 { 1212 struct xdr_buf *buf = xdr->buf; 1213 struct kvec *iov; 1214 unsigned int nwords; 1215 unsigned int end; 1216 unsigned int padding; 1217 1218 len = xdr_align_pages(xdr, len); 1219 if (len == 0) 1220 return 0; 1221 nwords = XDR_QUADLEN(len); 1222 padding = (nwords << 2) - len; 1223 xdr->iov = iov = buf->tail; 1224 /* Compute remaining message length. */ 1225 end = ((xdr->nwords - nwords) << 2) + padding; 1226 if (end > iov->iov_len) 1227 end = iov->iov_len; 1228 1229 /* 1230 * Position current pointer at beginning of tail, and 1231 * set remaining message length. 1232 */ 1233 xdr->p = (__be32 *)((char *)iov->iov_base + padding); 1234 xdr->end = (__be32 *)((char *)iov->iov_base + end); 1235 xdr->page_ptr = NULL; 1236 xdr->nwords = XDR_QUADLEN(end - padding); 1237 return len; 1238 } 1239 EXPORT_SYMBOL_GPL(xdr_read_pages); 1240 1241 uint64_t xdr_align_data(struct xdr_stream *xdr, uint64_t offset, uint32_t length) 1242 { 1243 struct xdr_buf *buf = xdr->buf; 1244 unsigned int from, bytes; 1245 unsigned int shift = 0; 1246 1247 if ((offset + length) < offset || 1248 (offset + length) > buf->page_len) 1249 length = buf->page_len - offset; 1250 1251 xdr_realign_pages(xdr); 1252 from = xdr_page_pos(xdr); 1253 bytes = xdr->nwords << 2; 1254 if (length < bytes) 1255 bytes = length; 1256 1257 /* Move page data to the left */ 1258 if (from > offset) { 1259 shift = min_t(unsigned int, bytes, buf->page_len - from); 1260 _shift_data_left_pages(buf->pages, 1261 buf->page_base + offset, 1262 buf->page_base + from, 1263 shift); 1264 bytes -= shift; 1265 1266 /* Move tail data into the pages, if necessary */ 1267 if (bytes > 0) 1268 _shift_data_left_tail(buf, offset + shift, bytes); 1269 } 1270 1271 xdr->nwords -= XDR_QUADLEN(length); 1272 xdr_set_page(xdr, from + length, PAGE_SIZE); 1273 return length; 1274 } 1275 EXPORT_SYMBOL_GPL(xdr_align_data); 1276 1277 uint64_t xdr_expand_hole(struct xdr_stream *xdr, uint64_t offset, uint64_t length) 1278 { 1279 struct xdr_buf *buf = xdr->buf; 1280 unsigned int bytes; 1281 unsigned int from; 1282 unsigned int truncated = 0; 1283 1284 if ((offset + length) < offset || 1285 (offset + length) > buf->page_len) 1286 length = buf->page_len - offset; 1287 1288 xdr_realign_pages(xdr); 1289 from = xdr_page_pos(xdr); 1290 bytes = xdr->nwords << 2; 1291 1292 if (offset + length + bytes > buf->page_len) { 1293 unsigned int shift = (offset + length + bytes) - buf->page_len; 1294 unsigned int res = _shift_data_right_tail(buf, from + bytes - shift, shift); 1295 truncated = shift - res; 1296 xdr->nwords -= XDR_QUADLEN(truncated); 1297 bytes -= shift; 1298 } 1299 1300 /* Now move the page data over and zero pages */ 1301 if (bytes > 0) 1302 _shift_data_right_pages(buf->pages, 1303 buf->page_base + offset + length, 1304 buf->page_base + from, 1305 bytes); 1306 _zero_pages(buf->pages, buf->page_base + offset, length); 1307 1308 buf->len += length - (from - offset) - truncated; 1309 xdr_set_page(xdr, offset + length, PAGE_SIZE); 1310 return length; 1311 } 1312 EXPORT_SYMBOL_GPL(xdr_expand_hole); 1313 1314 /** 1315 * xdr_enter_page - decode data from the XDR page 1316 * @xdr: pointer to xdr_stream struct 1317 * @len: number of bytes of page data 1318 * 1319 * Moves data beyond the current pointer position from the XDR head[] buffer 1320 * into the page list. Any data that lies beyond current position + "len" 1321 * bytes is moved into the XDR tail[]. The current pointer is then 1322 * repositioned at the beginning of the first XDR page. 1323 */ 1324 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 1325 { 1326 len = xdr_align_pages(xdr, len); 1327 /* 1328 * Position current pointer at beginning of tail, and 1329 * set remaining message length. 1330 */ 1331 if (len != 0) 1332 xdr_set_page_base(xdr, 0, len); 1333 } 1334 EXPORT_SYMBOL_GPL(xdr_enter_page); 1335 1336 static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 1337 1338 void 1339 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) 1340 { 1341 buf->head[0] = *iov; 1342 buf->tail[0] = empty_iov; 1343 buf->page_len = 0; 1344 buf->buflen = buf->len = iov->iov_len; 1345 } 1346 EXPORT_SYMBOL_GPL(xdr_buf_from_iov); 1347 1348 /** 1349 * xdr_buf_subsegment - set subbuf to a portion of buf 1350 * @buf: an xdr buffer 1351 * @subbuf: the result buffer 1352 * @base: beginning of range in bytes 1353 * @len: length of range in bytes 1354 * 1355 * sets @subbuf to an xdr buffer representing the portion of @buf of 1356 * length @len starting at offset @base. 1357 * 1358 * @buf and @subbuf may be pointers to the same struct xdr_buf. 1359 * 1360 * Returns -1 if base of length are out of bounds. 1361 */ 1362 int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf, 1363 unsigned int base, unsigned int len) 1364 { 1365 subbuf->buflen = subbuf->len = len; 1366 if (base < buf->head[0].iov_len) { 1367 subbuf->head[0].iov_base = buf->head[0].iov_base + base; 1368 subbuf->head[0].iov_len = min_t(unsigned int, len, 1369 buf->head[0].iov_len - base); 1370 len -= subbuf->head[0].iov_len; 1371 base = 0; 1372 } else { 1373 base -= buf->head[0].iov_len; 1374 subbuf->head[0].iov_base = buf->head[0].iov_base; 1375 subbuf->head[0].iov_len = 0; 1376 } 1377 1378 if (base < buf->page_len) { 1379 subbuf->page_len = min(buf->page_len - base, len); 1380 base += buf->page_base; 1381 subbuf->page_base = base & ~PAGE_MASK; 1382 subbuf->pages = &buf->pages[base >> PAGE_SHIFT]; 1383 len -= subbuf->page_len; 1384 base = 0; 1385 } else { 1386 base -= buf->page_len; 1387 subbuf->pages = buf->pages; 1388 subbuf->page_base = 0; 1389 subbuf->page_len = 0; 1390 } 1391 1392 if (base < buf->tail[0].iov_len) { 1393 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; 1394 subbuf->tail[0].iov_len = min_t(unsigned int, len, 1395 buf->tail[0].iov_len - base); 1396 len -= subbuf->tail[0].iov_len; 1397 base = 0; 1398 } else { 1399 base -= buf->tail[0].iov_len; 1400 subbuf->tail[0].iov_base = buf->tail[0].iov_base; 1401 subbuf->tail[0].iov_len = 0; 1402 } 1403 1404 if (base || len) 1405 return -1; 1406 return 0; 1407 } 1408 EXPORT_SYMBOL_GPL(xdr_buf_subsegment); 1409 1410 /** 1411 * xdr_stream_subsegment - set @subbuf to a portion of @xdr 1412 * @xdr: an xdr_stream set up for decoding 1413 * @subbuf: the result buffer 1414 * @nbytes: length of @xdr to extract, in bytes 1415 * 1416 * Sets up @subbuf to represent a portion of @xdr. The portion 1417 * starts at the current offset in @xdr, and extends for a length 1418 * of @nbytes. If this is successful, @xdr is advanced to the next 1419 * position following that portion. 1420 * 1421 * Return values: 1422 * %true: @subbuf has been initialized, and @xdr has been advanced. 1423 * %false: a bounds error has occurred 1424 */ 1425 bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, 1426 unsigned int nbytes) 1427 { 1428 unsigned int remaining, offset, len; 1429 1430 if (xdr_buf_subsegment(xdr->buf, subbuf, xdr_stream_pos(xdr), nbytes)) 1431 return false; 1432 1433 if (subbuf->head[0].iov_len) 1434 if (!__xdr_inline_decode(xdr, subbuf->head[0].iov_len)) 1435 return false; 1436 1437 remaining = subbuf->page_len; 1438 offset = subbuf->page_base; 1439 while (remaining) { 1440 len = min_t(unsigned int, remaining, PAGE_SIZE) - offset; 1441 1442 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) 1443 return false; 1444 if (!__xdr_inline_decode(xdr, len)) 1445 return false; 1446 1447 remaining -= len; 1448 offset = 0; 1449 } 1450 1451 return true; 1452 } 1453 EXPORT_SYMBOL_GPL(xdr_stream_subsegment); 1454 1455 /** 1456 * xdr_buf_trim - lop at most "len" bytes off the end of "buf" 1457 * @buf: buf to be trimmed 1458 * @len: number of bytes to reduce "buf" by 1459 * 1460 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note 1461 * that it's possible that we'll trim less than that amount if the xdr_buf is 1462 * too small, or if (for instance) it's all in the head and the parser has 1463 * already read too far into it. 1464 */ 1465 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) 1466 { 1467 size_t cur; 1468 unsigned int trim = len; 1469 1470 if (buf->tail[0].iov_len) { 1471 cur = min_t(size_t, buf->tail[0].iov_len, trim); 1472 buf->tail[0].iov_len -= cur; 1473 trim -= cur; 1474 if (!trim) 1475 goto fix_len; 1476 } 1477 1478 if (buf->page_len) { 1479 cur = min_t(unsigned int, buf->page_len, trim); 1480 buf->page_len -= cur; 1481 trim -= cur; 1482 if (!trim) 1483 goto fix_len; 1484 } 1485 1486 if (buf->head[0].iov_len) { 1487 cur = min_t(size_t, buf->head[0].iov_len, trim); 1488 buf->head[0].iov_len -= cur; 1489 trim -= cur; 1490 } 1491 fix_len: 1492 buf->len -= (len - trim); 1493 } 1494 EXPORT_SYMBOL_GPL(xdr_buf_trim); 1495 1496 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 1497 { 1498 unsigned int this_len; 1499 1500 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 1501 memcpy(obj, subbuf->head[0].iov_base, this_len); 1502 len -= this_len; 1503 obj += this_len; 1504 this_len = min_t(unsigned int, len, subbuf->page_len); 1505 if (this_len) 1506 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); 1507 len -= this_len; 1508 obj += this_len; 1509 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 1510 memcpy(obj, subbuf->tail[0].iov_base, this_len); 1511 } 1512 1513 /* obj is assumed to point to allocated memory of size at least len: */ 1514 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 1515 { 1516 struct xdr_buf subbuf; 1517 int status; 1518 1519 status = xdr_buf_subsegment(buf, &subbuf, base, len); 1520 if (status != 0) 1521 return status; 1522 __read_bytes_from_xdr_buf(&subbuf, obj, len); 1523 return 0; 1524 } 1525 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); 1526 1527 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 1528 { 1529 unsigned int this_len; 1530 1531 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 1532 memcpy(subbuf->head[0].iov_base, obj, this_len); 1533 len -= this_len; 1534 obj += this_len; 1535 this_len = min_t(unsigned int, len, subbuf->page_len); 1536 if (this_len) 1537 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); 1538 len -= this_len; 1539 obj += this_len; 1540 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 1541 memcpy(subbuf->tail[0].iov_base, obj, this_len); 1542 } 1543 1544 /* obj is assumed to point to allocated memory of size at least len: */ 1545 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 1546 { 1547 struct xdr_buf subbuf; 1548 int status; 1549 1550 status = xdr_buf_subsegment(buf, &subbuf, base, len); 1551 if (status != 0) 1552 return status; 1553 __write_bytes_to_xdr_buf(&subbuf, obj, len); 1554 return 0; 1555 } 1556 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); 1557 1558 int 1559 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) 1560 { 1561 __be32 raw; 1562 int status; 1563 1564 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 1565 if (status) 1566 return status; 1567 *obj = be32_to_cpu(raw); 1568 return 0; 1569 } 1570 EXPORT_SYMBOL_GPL(xdr_decode_word); 1571 1572 int 1573 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) 1574 { 1575 __be32 raw = cpu_to_be32(obj); 1576 1577 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 1578 } 1579 EXPORT_SYMBOL_GPL(xdr_encode_word); 1580 1581 /* Returns 0 on success, or else a negative error code. */ 1582 static int 1583 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, 1584 struct xdr_array2_desc *desc, int encode) 1585 { 1586 char *elem = NULL, *c; 1587 unsigned int copied = 0, todo, avail_here; 1588 struct page **ppages = NULL; 1589 int err; 1590 1591 if (encode) { 1592 if (xdr_encode_word(buf, base, desc->array_len) != 0) 1593 return -EINVAL; 1594 } else { 1595 if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 1596 desc->array_len > desc->array_maxlen || 1597 (unsigned long) base + 4 + desc->array_len * 1598 desc->elem_size > buf->len) 1599 return -EINVAL; 1600 } 1601 base += 4; 1602 1603 if (!desc->xcode) 1604 return 0; 1605 1606 todo = desc->array_len * desc->elem_size; 1607 1608 /* process head */ 1609 if (todo && base < buf->head->iov_len) { 1610 c = buf->head->iov_base + base; 1611 avail_here = min_t(unsigned int, todo, 1612 buf->head->iov_len - base); 1613 todo -= avail_here; 1614 1615 while (avail_here >= desc->elem_size) { 1616 err = desc->xcode(desc, c); 1617 if (err) 1618 goto out; 1619 c += desc->elem_size; 1620 avail_here -= desc->elem_size; 1621 } 1622 if (avail_here) { 1623 if (!elem) { 1624 elem = kmalloc(desc->elem_size, GFP_KERNEL); 1625 err = -ENOMEM; 1626 if (!elem) 1627 goto out; 1628 } 1629 if (encode) { 1630 err = desc->xcode(desc, elem); 1631 if (err) 1632 goto out; 1633 memcpy(c, elem, avail_here); 1634 } else 1635 memcpy(elem, c, avail_here); 1636 copied = avail_here; 1637 } 1638 base = buf->head->iov_len; /* align to start of pages */ 1639 } 1640 1641 /* process pages array */ 1642 base -= buf->head->iov_len; 1643 if (todo && base < buf->page_len) { 1644 unsigned int avail_page; 1645 1646 avail_here = min(todo, buf->page_len - base); 1647 todo -= avail_here; 1648 1649 base += buf->page_base; 1650 ppages = buf->pages + (base >> PAGE_SHIFT); 1651 base &= ~PAGE_MASK; 1652 avail_page = min_t(unsigned int, PAGE_SIZE - base, 1653 avail_here); 1654 c = kmap(*ppages) + base; 1655 1656 while (avail_here) { 1657 avail_here -= avail_page; 1658 if (copied || avail_page < desc->elem_size) { 1659 unsigned int l = min(avail_page, 1660 desc->elem_size - copied); 1661 if (!elem) { 1662 elem = kmalloc(desc->elem_size, 1663 GFP_KERNEL); 1664 err = -ENOMEM; 1665 if (!elem) 1666 goto out; 1667 } 1668 if (encode) { 1669 if (!copied) { 1670 err = desc->xcode(desc, elem); 1671 if (err) 1672 goto out; 1673 } 1674 memcpy(c, elem + copied, l); 1675 copied += l; 1676 if (copied == desc->elem_size) 1677 copied = 0; 1678 } else { 1679 memcpy(elem + copied, c, l); 1680 copied += l; 1681 if (copied == desc->elem_size) { 1682 err = desc->xcode(desc, elem); 1683 if (err) 1684 goto out; 1685 copied = 0; 1686 } 1687 } 1688 avail_page -= l; 1689 c += l; 1690 } 1691 while (avail_page >= desc->elem_size) { 1692 err = desc->xcode(desc, c); 1693 if (err) 1694 goto out; 1695 c += desc->elem_size; 1696 avail_page -= desc->elem_size; 1697 } 1698 if (avail_page) { 1699 unsigned int l = min(avail_page, 1700 desc->elem_size - copied); 1701 if (!elem) { 1702 elem = kmalloc(desc->elem_size, 1703 GFP_KERNEL); 1704 err = -ENOMEM; 1705 if (!elem) 1706 goto out; 1707 } 1708 if (encode) { 1709 if (!copied) { 1710 err = desc->xcode(desc, elem); 1711 if (err) 1712 goto out; 1713 } 1714 memcpy(c, elem + copied, l); 1715 copied += l; 1716 if (copied == desc->elem_size) 1717 copied = 0; 1718 } else { 1719 memcpy(elem + copied, c, l); 1720 copied += l; 1721 if (copied == desc->elem_size) { 1722 err = desc->xcode(desc, elem); 1723 if (err) 1724 goto out; 1725 copied = 0; 1726 } 1727 } 1728 } 1729 if (avail_here) { 1730 kunmap(*ppages); 1731 ppages++; 1732 c = kmap(*ppages); 1733 } 1734 1735 avail_page = min(avail_here, 1736 (unsigned int) PAGE_SIZE); 1737 } 1738 base = buf->page_len; /* align to start of tail */ 1739 } 1740 1741 /* process tail */ 1742 base -= buf->page_len; 1743 if (todo) { 1744 c = buf->tail->iov_base + base; 1745 if (copied) { 1746 unsigned int l = desc->elem_size - copied; 1747 1748 if (encode) 1749 memcpy(c, elem + copied, l); 1750 else { 1751 memcpy(elem + copied, c, l); 1752 err = desc->xcode(desc, elem); 1753 if (err) 1754 goto out; 1755 } 1756 todo -= l; 1757 c += l; 1758 } 1759 while (todo) { 1760 err = desc->xcode(desc, c); 1761 if (err) 1762 goto out; 1763 c += desc->elem_size; 1764 todo -= desc->elem_size; 1765 } 1766 } 1767 err = 0; 1768 1769 out: 1770 kfree(elem); 1771 if (ppages) 1772 kunmap(*ppages); 1773 return err; 1774 } 1775 1776 int 1777 xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 1778 struct xdr_array2_desc *desc) 1779 { 1780 if (base >= buf->len) 1781 return -EINVAL; 1782 1783 return xdr_xcode_array2(buf, base, desc, 0); 1784 } 1785 EXPORT_SYMBOL_GPL(xdr_decode_array2); 1786 1787 int 1788 xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 1789 struct xdr_array2_desc *desc) 1790 { 1791 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > 1792 buf->head->iov_len + buf->page_len + buf->tail->iov_len) 1793 return -EINVAL; 1794 1795 return xdr_xcode_array2(buf, base, desc, 1); 1796 } 1797 EXPORT_SYMBOL_GPL(xdr_encode_array2); 1798 1799 int 1800 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, 1801 int (*actor)(struct scatterlist *, void *), void *data) 1802 { 1803 int i, ret = 0; 1804 unsigned int page_len, thislen, page_offset; 1805 struct scatterlist sg[1]; 1806 1807 sg_init_table(sg, 1); 1808 1809 if (offset >= buf->head[0].iov_len) { 1810 offset -= buf->head[0].iov_len; 1811 } else { 1812 thislen = buf->head[0].iov_len - offset; 1813 if (thislen > len) 1814 thislen = len; 1815 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); 1816 ret = actor(sg, data); 1817 if (ret) 1818 goto out; 1819 offset = 0; 1820 len -= thislen; 1821 } 1822 if (len == 0) 1823 goto out; 1824 1825 if (offset >= buf->page_len) { 1826 offset -= buf->page_len; 1827 } else { 1828 page_len = buf->page_len - offset; 1829 if (page_len > len) 1830 page_len = len; 1831 len -= page_len; 1832 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1); 1833 i = (offset + buf->page_base) >> PAGE_SHIFT; 1834 thislen = PAGE_SIZE - page_offset; 1835 do { 1836 if (thislen > page_len) 1837 thislen = page_len; 1838 sg_set_page(sg, buf->pages[i], thislen, page_offset); 1839 ret = actor(sg, data); 1840 if (ret) 1841 goto out; 1842 page_len -= thislen; 1843 i++; 1844 page_offset = 0; 1845 thislen = PAGE_SIZE; 1846 } while (page_len != 0); 1847 offset = 0; 1848 } 1849 if (len == 0) 1850 goto out; 1851 if (offset < buf->tail[0].iov_len) { 1852 thislen = buf->tail[0].iov_len - offset; 1853 if (thislen > len) 1854 thislen = len; 1855 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); 1856 ret = actor(sg, data); 1857 len -= thislen; 1858 } 1859 if (len != 0) 1860 ret = -EINVAL; 1861 out: 1862 return ret; 1863 } 1864 EXPORT_SYMBOL_GPL(xdr_process_buf); 1865 1866 /** 1867 * xdr_stream_decode_opaque - Decode variable length opaque 1868 * @xdr: pointer to xdr_stream 1869 * @ptr: location to store opaque data 1870 * @size: size of storage buffer @ptr 1871 * 1872 * Return values: 1873 * On success, returns size of object stored in *@ptr 1874 * %-EBADMSG on XDR buffer overflow 1875 * %-EMSGSIZE on overflow of storage buffer @ptr 1876 */ 1877 ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size) 1878 { 1879 ssize_t ret; 1880 void *p; 1881 1882 ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 1883 if (ret <= 0) 1884 return ret; 1885 memcpy(ptr, p, ret); 1886 return ret; 1887 } 1888 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque); 1889 1890 /** 1891 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque 1892 * @xdr: pointer to xdr_stream 1893 * @ptr: location to store pointer to opaque data 1894 * @maxlen: maximum acceptable object size 1895 * @gfp_flags: GFP mask to use 1896 * 1897 * Return values: 1898 * On success, returns size of object stored in *@ptr 1899 * %-EBADMSG on XDR buffer overflow 1900 * %-EMSGSIZE if the size of the object would exceed @maxlen 1901 * %-ENOMEM on memory allocation failure 1902 */ 1903 ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, 1904 size_t maxlen, gfp_t gfp_flags) 1905 { 1906 ssize_t ret; 1907 void *p; 1908 1909 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); 1910 if (ret > 0) { 1911 *ptr = kmemdup(p, ret, gfp_flags); 1912 if (*ptr != NULL) 1913 return ret; 1914 ret = -ENOMEM; 1915 } 1916 *ptr = NULL; 1917 return ret; 1918 } 1919 EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup); 1920 1921 /** 1922 * xdr_stream_decode_string - Decode variable length string 1923 * @xdr: pointer to xdr_stream 1924 * @str: location to store string 1925 * @size: size of storage buffer @str 1926 * 1927 * Return values: 1928 * On success, returns length of NUL-terminated string stored in *@str 1929 * %-EBADMSG on XDR buffer overflow 1930 * %-EMSGSIZE on overflow of storage buffer @str 1931 */ 1932 ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size) 1933 { 1934 ssize_t ret; 1935 void *p; 1936 1937 ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 1938 if (ret > 0) { 1939 memcpy(str, p, ret); 1940 str[ret] = '\0'; 1941 return strlen(str); 1942 } 1943 *str = '\0'; 1944 return ret; 1945 } 1946 EXPORT_SYMBOL_GPL(xdr_stream_decode_string); 1947 1948 /** 1949 * xdr_stream_decode_string_dup - Decode and duplicate variable length string 1950 * @xdr: pointer to xdr_stream 1951 * @str: location to store pointer to string 1952 * @maxlen: maximum acceptable string length 1953 * @gfp_flags: GFP mask to use 1954 * 1955 * Return values: 1956 * On success, returns length of NUL-terminated string stored in *@ptr 1957 * %-EBADMSG on XDR buffer overflow 1958 * %-EMSGSIZE if the size of the string would exceed @maxlen 1959 * %-ENOMEM on memory allocation failure 1960 */ 1961 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, 1962 size_t maxlen, gfp_t gfp_flags) 1963 { 1964 void *p; 1965 ssize_t ret; 1966 1967 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); 1968 if (ret > 0) { 1969 char *s = kmalloc(ret + 1, gfp_flags); 1970 if (s != NULL) { 1971 memcpy(s, p, ret); 1972 s[ret] = '\0'; 1973 *str = s; 1974 return strlen(s); 1975 } 1976 ret = -ENOMEM; 1977 } 1978 *str = NULL; 1979 return ret; 1980 } 1981 EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup); 1982