1 /* 2 * linux/net/sunrpc/xdr.c 3 * 4 * Generic XDR support. 5 * 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/slab.h> 11 #include <linux/types.h> 12 #include <linux/string.h> 13 #include <linux/kernel.h> 14 #include <linux/pagemap.h> 15 #include <linux/errno.h> 16 #include <linux/sunrpc/xdr.h> 17 #include <linux/sunrpc/msg_prot.h> 18 19 /* 20 * XDR functions for basic NFS types 21 */ 22 __be32 * 23 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) 24 { 25 unsigned int quadlen = XDR_QUADLEN(obj->len); 26 27 p[quadlen] = 0; /* zero trailing bytes */ 28 *p++ = cpu_to_be32(obj->len); 29 memcpy(p, obj->data, obj->len); 30 return p + XDR_QUADLEN(obj->len); 31 } 32 EXPORT_SYMBOL_GPL(xdr_encode_netobj); 33 34 __be32 * 35 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) 36 { 37 unsigned int len; 38 39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) 40 return NULL; 41 obj->len = len; 42 obj->data = (u8 *) p; 43 return p + XDR_QUADLEN(len); 44 } 45 EXPORT_SYMBOL_GPL(xdr_decode_netobj); 46 47 /** 48 * xdr_encode_opaque_fixed - Encode fixed length opaque data 49 * @p: pointer to current position in XDR buffer. 50 * @ptr: pointer to data to encode (or NULL) 51 * @nbytes: size of data. 52 * 53 * Copy the array of data of length nbytes at ptr to the XDR buffer 54 * at position p, then align to the next 32-bit boundary by padding 55 * with zero bytes (see RFC1832). 56 * Note: if ptr is NULL, only the padding is performed. 57 * 58 * Returns the updated current XDR buffer position 59 * 60 */ 61 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) 62 { 63 if (likely(nbytes != 0)) { 64 unsigned int quadlen = XDR_QUADLEN(nbytes); 65 unsigned int padding = (quadlen << 2) - nbytes; 66 67 if (ptr != NULL) 68 memcpy(p, ptr, nbytes); 69 if (padding != 0) 70 memset((char *)p + nbytes, 0, padding); 71 p += quadlen; 72 } 73 return p; 74 } 75 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); 76 77 /** 78 * xdr_encode_opaque - Encode variable length opaque data 79 * @p: pointer to current position in XDR buffer. 80 * @ptr: pointer to data to encode (or NULL) 81 * @nbytes: size of data. 82 * 83 * Returns the updated current XDR buffer position 84 */ 85 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) 86 { 87 *p++ = cpu_to_be32(nbytes); 88 return xdr_encode_opaque_fixed(p, ptr, nbytes); 89 } 90 EXPORT_SYMBOL_GPL(xdr_encode_opaque); 91 92 __be32 * 93 xdr_encode_string(__be32 *p, const char *string) 94 { 95 return xdr_encode_array(p, string, strlen(string)); 96 } 97 EXPORT_SYMBOL_GPL(xdr_encode_string); 98 99 __be32 * 100 xdr_decode_string_inplace(__be32 *p, char **sp, 101 unsigned int *lenp, unsigned int maxlen) 102 { 103 u32 len; 104 105 len = be32_to_cpu(*p++); 106 if (len > maxlen) 107 return NULL; 108 *lenp = len; 109 *sp = (char *) p; 110 return p + XDR_QUADLEN(len); 111 } 112 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); 113 114 /** 115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf 116 * @buf: XDR buffer where string resides 117 * @len: length of string, in bytes 118 * 119 */ 120 void 121 xdr_terminate_string(struct xdr_buf *buf, const u32 len) 122 { 123 char *kaddr; 124 125 kaddr = kmap_atomic(buf->pages[0]); 126 kaddr[buf->page_base + len] = '\0'; 127 kunmap_atomic(kaddr); 128 } 129 EXPORT_SYMBOL_GPL(xdr_terminate_string); 130 131 void 132 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 133 struct page **pages, unsigned int base, unsigned int len) 134 { 135 struct kvec *head = xdr->head; 136 struct kvec *tail = xdr->tail; 137 char *buf = (char *)head->iov_base; 138 unsigned int buflen = head->iov_len; 139 140 head->iov_len = offset; 141 142 xdr->pages = pages; 143 xdr->page_base = base; 144 xdr->page_len = len; 145 146 tail->iov_base = buf + offset; 147 tail->iov_len = buflen - offset; 148 149 xdr->buflen += len; 150 } 151 EXPORT_SYMBOL_GPL(xdr_inline_pages); 152 153 /* 154 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 155 */ 156 157 /** 158 * _shift_data_right_pages 159 * @pages: vector of pages containing both the source and dest memory area. 160 * @pgto_base: page vector address of destination 161 * @pgfrom_base: page vector address of source 162 * @len: number of bytes to copy 163 * 164 * Note: the addresses pgto_base and pgfrom_base are both calculated in 165 * the same way: 166 * if a memory area starts at byte 'base' in page 'pages[i]', 167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 168 * Also note: pgfrom_base must be < pgto_base, but the memory areas 169 * they point to may overlap. 170 */ 171 static void 172 _shift_data_right_pages(struct page **pages, size_t pgto_base, 173 size_t pgfrom_base, size_t len) 174 { 175 struct page **pgfrom, **pgto; 176 char *vfrom, *vto; 177 size_t copy; 178 179 BUG_ON(pgto_base <= pgfrom_base); 180 181 pgto_base += len; 182 pgfrom_base += len; 183 184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); 185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); 186 187 pgto_base &= ~PAGE_CACHE_MASK; 188 pgfrom_base &= ~PAGE_CACHE_MASK; 189 190 do { 191 /* Are any pointers crossing a page boundary? */ 192 if (pgto_base == 0) { 193 pgto_base = PAGE_CACHE_SIZE; 194 pgto--; 195 } 196 if (pgfrom_base == 0) { 197 pgfrom_base = PAGE_CACHE_SIZE; 198 pgfrom--; 199 } 200 201 copy = len; 202 if (copy > pgto_base) 203 copy = pgto_base; 204 if (copy > pgfrom_base) 205 copy = pgfrom_base; 206 pgto_base -= copy; 207 pgfrom_base -= copy; 208 209 vto = kmap_atomic(*pgto); 210 if (*pgto != *pgfrom) { 211 vfrom = kmap_atomic(*pgfrom); 212 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); 213 kunmap_atomic(vfrom); 214 } else 215 memmove(vto + pgto_base, vto + pgfrom_base, copy); 216 flush_dcache_page(*pgto); 217 kunmap_atomic(vto); 218 219 } while ((len -= copy) != 0); 220 } 221 222 /** 223 * _copy_to_pages 224 * @pages: array of pages 225 * @pgbase: page vector address of destination 226 * @p: pointer to source data 227 * @len: length 228 * 229 * Copies data from an arbitrary memory location into an array of pages 230 * The copy is assumed to be non-overlapping. 231 */ 232 static void 233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) 234 { 235 struct page **pgto; 236 char *vto; 237 size_t copy; 238 239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); 240 pgbase &= ~PAGE_CACHE_MASK; 241 242 for (;;) { 243 copy = PAGE_CACHE_SIZE - pgbase; 244 if (copy > len) 245 copy = len; 246 247 vto = kmap_atomic(*pgto); 248 memcpy(vto + pgbase, p, copy); 249 kunmap_atomic(vto); 250 251 len -= copy; 252 if (len == 0) 253 break; 254 255 pgbase += copy; 256 if (pgbase == PAGE_CACHE_SIZE) { 257 flush_dcache_page(*pgto); 258 pgbase = 0; 259 pgto++; 260 } 261 p += copy; 262 } 263 flush_dcache_page(*pgto); 264 } 265 266 /** 267 * _copy_from_pages 268 * @p: pointer to destination 269 * @pages: array of pages 270 * @pgbase: offset of source data 271 * @len: length 272 * 273 * Copies data into an arbitrary memory location from an array of pages 274 * The copy is assumed to be non-overlapping. 275 */ 276 void 277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) 278 { 279 struct page **pgfrom; 280 char *vfrom; 281 size_t copy; 282 283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); 284 pgbase &= ~PAGE_CACHE_MASK; 285 286 do { 287 copy = PAGE_CACHE_SIZE - pgbase; 288 if (copy > len) 289 copy = len; 290 291 vfrom = kmap_atomic(*pgfrom); 292 memcpy(p, vfrom + pgbase, copy); 293 kunmap_atomic(vfrom); 294 295 pgbase += copy; 296 if (pgbase == PAGE_CACHE_SIZE) { 297 pgbase = 0; 298 pgfrom++; 299 } 300 p += copy; 301 302 } while ((len -= copy) != 0); 303 } 304 EXPORT_SYMBOL_GPL(_copy_from_pages); 305 306 /** 307 * xdr_shrink_bufhead 308 * @buf: xdr_buf 309 * @len: bytes to remove from buf->head[0] 310 * 311 * Shrinks XDR buffer's header kvec buf->head[0] by 312 * 'len' bytes. The extra data is not lost, but is instead 313 * moved into the inlined pages and/or the tail. 314 */ 315 static void 316 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) 317 { 318 struct kvec *head, *tail; 319 size_t copy, offs; 320 unsigned int pglen = buf->page_len; 321 322 tail = buf->tail; 323 head = buf->head; 324 325 WARN_ON_ONCE(len > head->iov_len); 326 if (len > head->iov_len) 327 len = head->iov_len; 328 329 /* Shift the tail first */ 330 if (tail->iov_len != 0) { 331 if (tail->iov_len > len) { 332 copy = tail->iov_len - len; 333 memmove((char *)tail->iov_base + len, 334 tail->iov_base, copy); 335 } 336 /* Copy from the inlined pages into the tail */ 337 copy = len; 338 if (copy > pglen) 339 copy = pglen; 340 offs = len - copy; 341 if (offs >= tail->iov_len) 342 copy = 0; 343 else if (copy > tail->iov_len - offs) 344 copy = tail->iov_len - offs; 345 if (copy != 0) 346 _copy_from_pages((char *)tail->iov_base + offs, 347 buf->pages, 348 buf->page_base + pglen + offs - len, 349 copy); 350 /* Do we also need to copy data from the head into the tail ? */ 351 if (len > pglen) { 352 offs = copy = len - pglen; 353 if (copy > tail->iov_len) 354 copy = tail->iov_len; 355 memcpy(tail->iov_base, 356 (char *)head->iov_base + 357 head->iov_len - offs, 358 copy); 359 } 360 } 361 /* Now handle pages */ 362 if (pglen != 0) { 363 if (pglen > len) 364 _shift_data_right_pages(buf->pages, 365 buf->page_base + len, 366 buf->page_base, 367 pglen - len); 368 copy = len; 369 if (len > pglen) 370 copy = pglen; 371 _copy_to_pages(buf->pages, buf->page_base, 372 (char *)head->iov_base + head->iov_len - len, 373 copy); 374 } 375 head->iov_len -= len; 376 buf->buflen -= len; 377 /* Have we truncated the message? */ 378 if (buf->len > buf->buflen) 379 buf->len = buf->buflen; 380 } 381 382 /** 383 * xdr_shrink_pagelen 384 * @buf: xdr_buf 385 * @len: bytes to remove from buf->pages 386 * 387 * Shrinks XDR buffer's page array buf->pages by 388 * 'len' bytes. The extra data is not lost, but is instead 389 * moved into the tail. 390 */ 391 static void 392 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) 393 { 394 struct kvec *tail; 395 size_t copy; 396 unsigned int pglen = buf->page_len; 397 unsigned int tailbuf_len; 398 399 tail = buf->tail; 400 BUG_ON (len > pglen); 401 402 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; 403 404 /* Shift the tail first */ 405 if (tailbuf_len != 0) { 406 unsigned int free_space = tailbuf_len - tail->iov_len; 407 408 if (len < free_space) 409 free_space = len; 410 tail->iov_len += free_space; 411 412 copy = len; 413 if (tail->iov_len > len) { 414 char *p = (char *)tail->iov_base + len; 415 memmove(p, tail->iov_base, tail->iov_len - len); 416 } else 417 copy = tail->iov_len; 418 /* Copy from the inlined pages into the tail */ 419 _copy_from_pages((char *)tail->iov_base, 420 buf->pages, buf->page_base + pglen - len, 421 copy); 422 } 423 buf->page_len -= len; 424 buf->buflen -= len; 425 /* Have we truncated the message? */ 426 if (buf->len > buf->buflen) 427 buf->len = buf->buflen; 428 } 429 430 void 431 xdr_shift_buf(struct xdr_buf *buf, size_t len) 432 { 433 xdr_shrink_bufhead(buf, len); 434 } 435 EXPORT_SYMBOL_GPL(xdr_shift_buf); 436 437 /** 438 * xdr_stream_pos - Return the current offset from the start of the xdr_stream 439 * @xdr: pointer to struct xdr_stream 440 */ 441 unsigned int xdr_stream_pos(const struct xdr_stream *xdr) 442 { 443 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2; 444 } 445 EXPORT_SYMBOL_GPL(xdr_stream_pos); 446 447 /** 448 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 449 * @xdr: pointer to xdr_stream struct 450 * @buf: pointer to XDR buffer in which to encode data 451 * @p: current pointer inside XDR buffer 452 * 453 * Note: at the moment the RPC client only passes the length of our 454 * scratch buffer in the xdr_buf's header kvec. Previously this 455 * meant we needed to call xdr_adjust_iovec() after encoding the 456 * data. With the new scheme, the xdr_stream manages the details 457 * of the buffer length, and takes care of adjusting the kvec 458 * length for us. 459 */ 460 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) 461 { 462 struct kvec *iov = buf->head; 463 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 464 465 BUG_ON(scratch_len < 0); 466 xdr->buf = buf; 467 xdr->iov = iov; 468 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); 469 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); 470 BUG_ON(iov->iov_len > scratch_len); 471 472 if (p != xdr->p && p != NULL) { 473 size_t len; 474 475 BUG_ON(p < xdr->p || p > xdr->end); 476 len = (char *)p - (char *)xdr->p; 477 xdr->p = p; 478 buf->len += len; 479 iov->iov_len += len; 480 } 481 } 482 EXPORT_SYMBOL_GPL(xdr_init_encode); 483 484 /** 485 * xdr_reserve_space - Reserve buffer space for sending 486 * @xdr: pointer to xdr_stream 487 * @nbytes: number of bytes to reserve 488 * 489 * Checks that we have enough buffer space to encode 'nbytes' more 490 * bytes of data. If so, update the total xdr_buf length, and 491 * adjust the length of the current kvec. 492 */ 493 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 494 { 495 __be32 *p = xdr->p; 496 __be32 *q; 497 498 /* align nbytes on the next 32-bit boundary */ 499 nbytes += 3; 500 nbytes &= ~3; 501 q = p + (nbytes >> 2); 502 if (unlikely(q > xdr->end || q < p)) 503 return NULL; 504 xdr->p = q; 505 xdr->iov->iov_len += nbytes; 506 xdr->buf->len += nbytes; 507 return p; 508 } 509 EXPORT_SYMBOL_GPL(xdr_reserve_space); 510 511 /** 512 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending 513 * @xdr: pointer to xdr_stream 514 * @pages: list of pages 515 * @base: offset of first byte 516 * @len: length of data in bytes 517 * 518 */ 519 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, 520 unsigned int len) 521 { 522 struct xdr_buf *buf = xdr->buf; 523 struct kvec *iov = buf->tail; 524 buf->pages = pages; 525 buf->page_base = base; 526 buf->page_len = len; 527 528 iov->iov_base = (char *)xdr->p; 529 iov->iov_len = 0; 530 xdr->iov = iov; 531 532 if (len & 3) { 533 unsigned int pad = 4 - (len & 3); 534 535 BUG_ON(xdr->p >= xdr->end); 536 iov->iov_base = (char *)xdr->p + (len & 3); 537 iov->iov_len += pad; 538 len += pad; 539 *xdr->p++ = 0; 540 } 541 buf->buflen += len; 542 buf->len += len; 543 } 544 EXPORT_SYMBOL_GPL(xdr_write_pages); 545 546 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, 547 unsigned int len) 548 { 549 if (len > iov->iov_len) 550 len = iov->iov_len; 551 xdr->p = (__be32*)iov->iov_base; 552 xdr->end = (__be32*)(iov->iov_base + len); 553 xdr->iov = iov; 554 xdr->page_ptr = NULL; 555 } 556 557 static int xdr_set_page_base(struct xdr_stream *xdr, 558 unsigned int base, unsigned int len) 559 { 560 unsigned int pgnr; 561 unsigned int maxlen; 562 unsigned int pgoff; 563 unsigned int pgend; 564 void *kaddr; 565 566 maxlen = xdr->buf->page_len; 567 if (base >= maxlen) 568 return -EINVAL; 569 maxlen -= base; 570 if (len > maxlen) 571 len = maxlen; 572 573 base += xdr->buf->page_base; 574 575 pgnr = base >> PAGE_SHIFT; 576 xdr->page_ptr = &xdr->buf->pages[pgnr]; 577 kaddr = page_address(*xdr->page_ptr); 578 579 pgoff = base & ~PAGE_MASK; 580 xdr->p = (__be32*)(kaddr + pgoff); 581 582 pgend = pgoff + len; 583 if (pgend > PAGE_SIZE) 584 pgend = PAGE_SIZE; 585 xdr->end = (__be32*)(kaddr + pgend); 586 xdr->iov = NULL; 587 return 0; 588 } 589 590 static void xdr_set_next_page(struct xdr_stream *xdr) 591 { 592 unsigned int newbase; 593 594 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; 595 newbase -= xdr->buf->page_base; 596 597 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0) 598 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len); 599 } 600 601 static bool xdr_set_next_buffer(struct xdr_stream *xdr) 602 { 603 if (xdr->page_ptr != NULL) 604 xdr_set_next_page(xdr); 605 else if (xdr->iov == xdr->buf->head) { 606 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0) 607 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len); 608 } 609 return xdr->p != xdr->end; 610 } 611 612 /** 613 * xdr_init_decode - Initialize an xdr_stream for decoding data. 614 * @xdr: pointer to xdr_stream struct 615 * @buf: pointer to XDR buffer from which to decode data 616 * @p: current pointer inside XDR buffer 617 */ 618 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) 619 { 620 xdr->buf = buf; 621 xdr->scratch.iov_base = NULL; 622 xdr->scratch.iov_len = 0; 623 xdr->nwords = XDR_QUADLEN(buf->len); 624 if (buf->head[0].iov_len != 0) 625 xdr_set_iov(xdr, buf->head, buf->len); 626 else if (buf->page_len != 0) 627 xdr_set_page_base(xdr, 0, buf->len); 628 if (p != NULL && p > xdr->p && xdr->end >= p) { 629 xdr->nwords -= p - xdr->p; 630 xdr->p = p; 631 } 632 } 633 EXPORT_SYMBOL_GPL(xdr_init_decode); 634 635 /** 636 * xdr_init_decode - Initialize an xdr_stream for decoding data. 637 * @xdr: pointer to xdr_stream struct 638 * @buf: pointer to XDR buffer from which to decode data 639 * @pages: list of pages to decode into 640 * @len: length in bytes of buffer in pages 641 */ 642 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, 643 struct page **pages, unsigned int len) 644 { 645 memset(buf, 0, sizeof(*buf)); 646 buf->pages = pages; 647 buf->page_len = len; 648 buf->buflen = len; 649 buf->len = len; 650 xdr_init_decode(xdr, buf, NULL); 651 } 652 EXPORT_SYMBOL_GPL(xdr_init_decode_pages); 653 654 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 655 { 656 unsigned int nwords = XDR_QUADLEN(nbytes); 657 __be32 *p = xdr->p; 658 __be32 *q = p + nwords; 659 660 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p)) 661 return NULL; 662 xdr->p = q; 663 xdr->nwords -= nwords; 664 return p; 665 } 666 667 /** 668 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. 669 * @xdr: pointer to xdr_stream struct 670 * @buf: pointer to an empty buffer 671 * @buflen: size of 'buf' 672 * 673 * The scratch buffer is used when decoding from an array of pages. 674 * If an xdr_inline_decode() call spans across page boundaries, then 675 * we copy the data into the scratch buffer in order to allow linear 676 * access. 677 */ 678 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen) 679 { 680 xdr->scratch.iov_base = buf; 681 xdr->scratch.iov_len = buflen; 682 } 683 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer); 684 685 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) 686 { 687 __be32 *p; 688 void *cpdest = xdr->scratch.iov_base; 689 size_t cplen = (char *)xdr->end - (char *)xdr->p; 690 691 if (nbytes > xdr->scratch.iov_len) 692 return NULL; 693 memcpy(cpdest, xdr->p, cplen); 694 cpdest += cplen; 695 nbytes -= cplen; 696 if (!xdr_set_next_buffer(xdr)) 697 return NULL; 698 p = __xdr_inline_decode(xdr, nbytes); 699 if (p == NULL) 700 return NULL; 701 memcpy(cpdest, p, nbytes); 702 return xdr->scratch.iov_base; 703 } 704 705 /** 706 * xdr_inline_decode - Retrieve XDR data to decode 707 * @xdr: pointer to xdr_stream struct 708 * @nbytes: number of bytes of data to decode 709 * 710 * Check if the input buffer is long enough to enable us to decode 711 * 'nbytes' more bytes of data starting at the current position. 712 * If so return the current pointer, then update the current 713 * pointer position. 714 */ 715 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 716 { 717 __be32 *p; 718 719 if (nbytes == 0) 720 return xdr->p; 721 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) 722 return NULL; 723 p = __xdr_inline_decode(xdr, nbytes); 724 if (p != NULL) 725 return p; 726 return xdr_copy_to_scratch(xdr, nbytes); 727 } 728 EXPORT_SYMBOL_GPL(xdr_inline_decode); 729 730 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) 731 { 732 struct xdr_buf *buf = xdr->buf; 733 struct kvec *iov; 734 unsigned int nwords = XDR_QUADLEN(len); 735 unsigned int cur = xdr_stream_pos(xdr); 736 737 if (xdr->nwords == 0) 738 return 0; 739 /* Realign pages to current pointer position */ 740 iov = buf->head; 741 if (iov->iov_len > cur) { 742 xdr_shrink_bufhead(buf, iov->iov_len - cur); 743 xdr->nwords = XDR_QUADLEN(buf->len - cur); 744 } 745 746 if (nwords > xdr->nwords) { 747 nwords = xdr->nwords; 748 len = nwords << 2; 749 } 750 if (buf->page_len <= len) 751 len = buf->page_len; 752 else if (nwords < xdr->nwords) { 753 /* Truncate page data and move it into the tail */ 754 xdr_shrink_pagelen(buf, buf->page_len - len); 755 xdr->nwords = XDR_QUADLEN(buf->len - cur); 756 } 757 return len; 758 } 759 760 /** 761 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position 762 * @xdr: pointer to xdr_stream struct 763 * @len: number of bytes of page data 764 * 765 * Moves data beyond the current pointer position from the XDR head[] buffer 766 * into the page list. Any data that lies beyond current position + "len" 767 * bytes is moved into the XDR tail[]. 768 * 769 * Returns the number of XDR encoded bytes now contained in the pages 770 */ 771 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 772 { 773 struct xdr_buf *buf = xdr->buf; 774 struct kvec *iov; 775 unsigned int nwords; 776 unsigned int end; 777 unsigned int padding; 778 779 len = xdr_align_pages(xdr, len); 780 if (len == 0) 781 return 0; 782 nwords = XDR_QUADLEN(len); 783 padding = (nwords << 2) - len; 784 xdr->iov = iov = buf->tail; 785 /* Compute remaining message length. */ 786 end = ((xdr->nwords - nwords) << 2) + padding; 787 if (end > iov->iov_len) 788 end = iov->iov_len; 789 790 /* 791 * Position current pointer at beginning of tail, and 792 * set remaining message length. 793 */ 794 xdr->p = (__be32 *)((char *)iov->iov_base + padding); 795 xdr->end = (__be32 *)((char *)iov->iov_base + end); 796 xdr->page_ptr = NULL; 797 xdr->nwords = XDR_QUADLEN(end - padding); 798 return len; 799 } 800 EXPORT_SYMBOL_GPL(xdr_read_pages); 801 802 /** 803 * xdr_enter_page - decode data from the XDR page 804 * @xdr: pointer to xdr_stream struct 805 * @len: number of bytes of page data 806 * 807 * Moves data beyond the current pointer position from the XDR head[] buffer 808 * into the page list. Any data that lies beyond current position + "len" 809 * bytes is moved into the XDR tail[]. The current pointer is then 810 * repositioned at the beginning of the first XDR page. 811 */ 812 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 813 { 814 len = xdr_align_pages(xdr, len); 815 /* 816 * Position current pointer at beginning of tail, and 817 * set remaining message length. 818 */ 819 if (len != 0) 820 xdr_set_page_base(xdr, 0, len); 821 } 822 EXPORT_SYMBOL_GPL(xdr_enter_page); 823 824 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 825 826 void 827 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) 828 { 829 buf->head[0] = *iov; 830 buf->tail[0] = empty_iov; 831 buf->page_len = 0; 832 buf->buflen = buf->len = iov->iov_len; 833 } 834 EXPORT_SYMBOL_GPL(xdr_buf_from_iov); 835 836 /* Sets subbuf to the portion of buf of length len beginning base bytes 837 * from the start of buf. Returns -1 if base of length are out of bounds. */ 838 int 839 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, 840 unsigned int base, unsigned int len) 841 { 842 subbuf->buflen = subbuf->len = len; 843 if (base < buf->head[0].iov_len) { 844 subbuf->head[0].iov_base = buf->head[0].iov_base + base; 845 subbuf->head[0].iov_len = min_t(unsigned int, len, 846 buf->head[0].iov_len - base); 847 len -= subbuf->head[0].iov_len; 848 base = 0; 849 } else { 850 subbuf->head[0].iov_base = NULL; 851 subbuf->head[0].iov_len = 0; 852 base -= buf->head[0].iov_len; 853 } 854 855 if (base < buf->page_len) { 856 subbuf->page_len = min(buf->page_len - base, len); 857 base += buf->page_base; 858 subbuf->page_base = base & ~PAGE_CACHE_MASK; 859 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; 860 len -= subbuf->page_len; 861 base = 0; 862 } else { 863 base -= buf->page_len; 864 subbuf->page_len = 0; 865 } 866 867 if (base < buf->tail[0].iov_len) { 868 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; 869 subbuf->tail[0].iov_len = min_t(unsigned int, len, 870 buf->tail[0].iov_len - base); 871 len -= subbuf->tail[0].iov_len; 872 base = 0; 873 } else { 874 subbuf->tail[0].iov_base = NULL; 875 subbuf->tail[0].iov_len = 0; 876 base -= buf->tail[0].iov_len; 877 } 878 879 if (base || len) 880 return -1; 881 return 0; 882 } 883 EXPORT_SYMBOL_GPL(xdr_buf_subsegment); 884 885 /** 886 * xdr_buf_trim - lop at most "len" bytes off the end of "buf" 887 * @buf: buf to be trimmed 888 * @len: number of bytes to reduce "buf" by 889 * 890 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note 891 * that it's possible that we'll trim less than that amount if the xdr_buf is 892 * too small, or if (for instance) it's all in the head and the parser has 893 * already read too far into it. 894 */ 895 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) 896 { 897 size_t cur; 898 unsigned int trim = len; 899 900 if (buf->tail[0].iov_len) { 901 cur = min_t(size_t, buf->tail[0].iov_len, trim); 902 buf->tail[0].iov_len -= cur; 903 trim -= cur; 904 if (!trim) 905 goto fix_len; 906 } 907 908 if (buf->page_len) { 909 cur = min_t(unsigned int, buf->page_len, trim); 910 buf->page_len -= cur; 911 trim -= cur; 912 if (!trim) 913 goto fix_len; 914 } 915 916 if (buf->head[0].iov_len) { 917 cur = min_t(size_t, buf->head[0].iov_len, trim); 918 buf->head[0].iov_len -= cur; 919 trim -= cur; 920 } 921 fix_len: 922 buf->len -= (len - trim); 923 } 924 EXPORT_SYMBOL_GPL(xdr_buf_trim); 925 926 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 927 { 928 unsigned int this_len; 929 930 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 931 memcpy(obj, subbuf->head[0].iov_base, this_len); 932 len -= this_len; 933 obj += this_len; 934 this_len = min_t(unsigned int, len, subbuf->page_len); 935 if (this_len) 936 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); 937 len -= this_len; 938 obj += this_len; 939 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 940 memcpy(obj, subbuf->tail[0].iov_base, this_len); 941 } 942 943 /* obj is assumed to point to allocated memory of size at least len: */ 944 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 945 { 946 struct xdr_buf subbuf; 947 int status; 948 949 status = xdr_buf_subsegment(buf, &subbuf, base, len); 950 if (status != 0) 951 return status; 952 __read_bytes_from_xdr_buf(&subbuf, obj, len); 953 return 0; 954 } 955 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); 956 957 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 958 { 959 unsigned int this_len; 960 961 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); 962 memcpy(subbuf->head[0].iov_base, obj, this_len); 963 len -= this_len; 964 obj += this_len; 965 this_len = min_t(unsigned int, len, subbuf->page_len); 966 if (this_len) 967 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); 968 len -= this_len; 969 obj += this_len; 970 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); 971 memcpy(subbuf->tail[0].iov_base, obj, this_len); 972 } 973 974 /* obj is assumed to point to allocated memory of size at least len: */ 975 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) 976 { 977 struct xdr_buf subbuf; 978 int status; 979 980 status = xdr_buf_subsegment(buf, &subbuf, base, len); 981 if (status != 0) 982 return status; 983 __write_bytes_to_xdr_buf(&subbuf, obj, len); 984 return 0; 985 } 986 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); 987 988 int 989 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) 990 { 991 __be32 raw; 992 int status; 993 994 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 995 if (status) 996 return status; 997 *obj = be32_to_cpu(raw); 998 return 0; 999 } 1000 EXPORT_SYMBOL_GPL(xdr_decode_word); 1001 1002 int 1003 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) 1004 { 1005 __be32 raw = cpu_to_be32(obj); 1006 1007 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 1008 } 1009 EXPORT_SYMBOL_GPL(xdr_encode_word); 1010 1011 /* If the netobj starting offset bytes from the start of xdr_buf is contained 1012 * entirely in the head or the tail, set object to point to it; otherwise 1013 * try to find space for it at the end of the tail, copy it there, and 1014 * set obj to point to it. */ 1015 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset) 1016 { 1017 struct xdr_buf subbuf; 1018 1019 if (xdr_decode_word(buf, offset, &obj->len)) 1020 return -EFAULT; 1021 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len)) 1022 return -EFAULT; 1023 1024 /* Is the obj contained entirely in the head? */ 1025 obj->data = subbuf.head[0].iov_base; 1026 if (subbuf.head[0].iov_len == obj->len) 1027 return 0; 1028 /* ..or is the obj contained entirely in the tail? */ 1029 obj->data = subbuf.tail[0].iov_base; 1030 if (subbuf.tail[0].iov_len == obj->len) 1031 return 0; 1032 1033 /* use end of tail as storage for obj: 1034 * (We don't copy to the beginning because then we'd have 1035 * to worry about doing a potentially overlapping copy. 1036 * This assumes the object is at most half the length of the 1037 * tail.) */ 1038 if (obj->len > buf->buflen - buf->len) 1039 return -ENOMEM; 1040 if (buf->tail[0].iov_len != 0) 1041 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len; 1042 else 1043 obj->data = buf->head[0].iov_base + buf->head[0].iov_len; 1044 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len); 1045 return 0; 1046 } 1047 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj); 1048 1049 /* Returns 0 on success, or else a negative error code. */ 1050 static int 1051 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, 1052 struct xdr_array2_desc *desc, int encode) 1053 { 1054 char *elem = NULL, *c; 1055 unsigned int copied = 0, todo, avail_here; 1056 struct page **ppages = NULL; 1057 int err; 1058 1059 if (encode) { 1060 if (xdr_encode_word(buf, base, desc->array_len) != 0) 1061 return -EINVAL; 1062 } else { 1063 if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 1064 desc->array_len > desc->array_maxlen || 1065 (unsigned long) base + 4 + desc->array_len * 1066 desc->elem_size > buf->len) 1067 return -EINVAL; 1068 } 1069 base += 4; 1070 1071 if (!desc->xcode) 1072 return 0; 1073 1074 todo = desc->array_len * desc->elem_size; 1075 1076 /* process head */ 1077 if (todo && base < buf->head->iov_len) { 1078 c = buf->head->iov_base + base; 1079 avail_here = min_t(unsigned int, todo, 1080 buf->head->iov_len - base); 1081 todo -= avail_here; 1082 1083 while (avail_here >= desc->elem_size) { 1084 err = desc->xcode(desc, c); 1085 if (err) 1086 goto out; 1087 c += desc->elem_size; 1088 avail_here -= desc->elem_size; 1089 } 1090 if (avail_here) { 1091 if (!elem) { 1092 elem = kmalloc(desc->elem_size, GFP_KERNEL); 1093 err = -ENOMEM; 1094 if (!elem) 1095 goto out; 1096 } 1097 if (encode) { 1098 err = desc->xcode(desc, elem); 1099 if (err) 1100 goto out; 1101 memcpy(c, elem, avail_here); 1102 } else 1103 memcpy(elem, c, avail_here); 1104 copied = avail_here; 1105 } 1106 base = buf->head->iov_len; /* align to start of pages */ 1107 } 1108 1109 /* process pages array */ 1110 base -= buf->head->iov_len; 1111 if (todo && base < buf->page_len) { 1112 unsigned int avail_page; 1113 1114 avail_here = min(todo, buf->page_len - base); 1115 todo -= avail_here; 1116 1117 base += buf->page_base; 1118 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); 1119 base &= ~PAGE_CACHE_MASK; 1120 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, 1121 avail_here); 1122 c = kmap(*ppages) + base; 1123 1124 while (avail_here) { 1125 avail_here -= avail_page; 1126 if (copied || avail_page < desc->elem_size) { 1127 unsigned int l = min(avail_page, 1128 desc->elem_size - copied); 1129 if (!elem) { 1130 elem = kmalloc(desc->elem_size, 1131 GFP_KERNEL); 1132 err = -ENOMEM; 1133 if (!elem) 1134 goto out; 1135 } 1136 if (encode) { 1137 if (!copied) { 1138 err = desc->xcode(desc, elem); 1139 if (err) 1140 goto out; 1141 } 1142 memcpy(c, elem + copied, l); 1143 copied += l; 1144 if (copied == desc->elem_size) 1145 copied = 0; 1146 } else { 1147 memcpy(elem + copied, c, l); 1148 copied += l; 1149 if (copied == desc->elem_size) { 1150 err = desc->xcode(desc, elem); 1151 if (err) 1152 goto out; 1153 copied = 0; 1154 } 1155 } 1156 avail_page -= l; 1157 c += l; 1158 } 1159 while (avail_page >= desc->elem_size) { 1160 err = desc->xcode(desc, c); 1161 if (err) 1162 goto out; 1163 c += desc->elem_size; 1164 avail_page -= desc->elem_size; 1165 } 1166 if (avail_page) { 1167 unsigned int l = min(avail_page, 1168 desc->elem_size - copied); 1169 if (!elem) { 1170 elem = kmalloc(desc->elem_size, 1171 GFP_KERNEL); 1172 err = -ENOMEM; 1173 if (!elem) 1174 goto out; 1175 } 1176 if (encode) { 1177 if (!copied) { 1178 err = desc->xcode(desc, elem); 1179 if (err) 1180 goto out; 1181 } 1182 memcpy(c, elem + copied, l); 1183 copied += l; 1184 if (copied == desc->elem_size) 1185 copied = 0; 1186 } else { 1187 memcpy(elem + copied, c, l); 1188 copied += l; 1189 if (copied == desc->elem_size) { 1190 err = desc->xcode(desc, elem); 1191 if (err) 1192 goto out; 1193 copied = 0; 1194 } 1195 } 1196 } 1197 if (avail_here) { 1198 kunmap(*ppages); 1199 ppages++; 1200 c = kmap(*ppages); 1201 } 1202 1203 avail_page = min(avail_here, 1204 (unsigned int) PAGE_CACHE_SIZE); 1205 } 1206 base = buf->page_len; /* align to start of tail */ 1207 } 1208 1209 /* process tail */ 1210 base -= buf->page_len; 1211 if (todo) { 1212 c = buf->tail->iov_base + base; 1213 if (copied) { 1214 unsigned int l = desc->elem_size - copied; 1215 1216 if (encode) 1217 memcpy(c, elem + copied, l); 1218 else { 1219 memcpy(elem + copied, c, l); 1220 err = desc->xcode(desc, elem); 1221 if (err) 1222 goto out; 1223 } 1224 todo -= l; 1225 c += l; 1226 } 1227 while (todo) { 1228 err = desc->xcode(desc, c); 1229 if (err) 1230 goto out; 1231 c += desc->elem_size; 1232 todo -= desc->elem_size; 1233 } 1234 } 1235 err = 0; 1236 1237 out: 1238 kfree(elem); 1239 if (ppages) 1240 kunmap(*ppages); 1241 return err; 1242 } 1243 1244 int 1245 xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 1246 struct xdr_array2_desc *desc) 1247 { 1248 if (base >= buf->len) 1249 return -EINVAL; 1250 1251 return xdr_xcode_array2(buf, base, desc, 0); 1252 } 1253 EXPORT_SYMBOL_GPL(xdr_decode_array2); 1254 1255 int 1256 xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 1257 struct xdr_array2_desc *desc) 1258 { 1259 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > 1260 buf->head->iov_len + buf->page_len + buf->tail->iov_len) 1261 return -EINVAL; 1262 1263 return xdr_xcode_array2(buf, base, desc, 1); 1264 } 1265 EXPORT_SYMBOL_GPL(xdr_encode_array2); 1266 1267 int 1268 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, 1269 int (*actor)(struct scatterlist *, void *), void *data) 1270 { 1271 int i, ret = 0; 1272 unsigned int page_len, thislen, page_offset; 1273 struct scatterlist sg[1]; 1274 1275 sg_init_table(sg, 1); 1276 1277 if (offset >= buf->head[0].iov_len) { 1278 offset -= buf->head[0].iov_len; 1279 } else { 1280 thislen = buf->head[0].iov_len - offset; 1281 if (thislen > len) 1282 thislen = len; 1283 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); 1284 ret = actor(sg, data); 1285 if (ret) 1286 goto out; 1287 offset = 0; 1288 len -= thislen; 1289 } 1290 if (len == 0) 1291 goto out; 1292 1293 if (offset >= buf->page_len) { 1294 offset -= buf->page_len; 1295 } else { 1296 page_len = buf->page_len - offset; 1297 if (page_len > len) 1298 page_len = len; 1299 len -= page_len; 1300 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); 1301 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; 1302 thislen = PAGE_CACHE_SIZE - page_offset; 1303 do { 1304 if (thislen > page_len) 1305 thislen = page_len; 1306 sg_set_page(sg, buf->pages[i], thislen, page_offset); 1307 ret = actor(sg, data); 1308 if (ret) 1309 goto out; 1310 page_len -= thislen; 1311 i++; 1312 page_offset = 0; 1313 thislen = PAGE_CACHE_SIZE; 1314 } while (page_len != 0); 1315 offset = 0; 1316 } 1317 if (len == 0) 1318 goto out; 1319 if (offset < buf->tail[0].iov_len) { 1320 thislen = buf->tail[0].iov_len - offset; 1321 if (thislen > len) 1322 thislen = len; 1323 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); 1324 ret = actor(sg, data); 1325 len -= thislen; 1326 } 1327 if (len != 0) 1328 ret = -EINVAL; 1329 out: 1330 return ret; 1331 } 1332 EXPORT_SYMBOL_GPL(xdr_process_buf); 1333 1334