1 /* 2 * linux/net/sunrpc/xdr.c 3 * 4 * Generic XDR support. 5 * 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/types.h> 11 #include <linux/string.h> 12 #include <linux/kernel.h> 13 #include <linux/pagemap.h> 14 #include <linux/errno.h> 15 #include <linux/sunrpc/xdr.h> 16 #include <linux/sunrpc/msg_prot.h> 17 18 /* 19 * XDR functions for basic NFS types 20 */ 21 u32 * 22 xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj) 23 { 24 unsigned int quadlen = XDR_QUADLEN(obj->len); 25 26 p[quadlen] = 0; /* zero trailing bytes */ 27 *p++ = htonl(obj->len); 28 memcpy(p, obj->data, obj->len); 29 return p + XDR_QUADLEN(obj->len); 30 } 31 32 u32 * 33 xdr_decode_netobj(u32 *p, struct xdr_netobj *obj) 34 { 35 unsigned int len; 36 37 if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ) 38 return NULL; 39 obj->len = len; 40 obj->data = (u8 *) p; 41 return p + XDR_QUADLEN(len); 42 } 43 44 /** 45 * xdr_encode_opaque_fixed - Encode fixed length opaque data 46 * @p: pointer to current position in XDR buffer. 47 * @ptr: pointer to data to encode (or NULL) 48 * @nbytes: size of data. 49 * 50 * Copy the array of data of length nbytes at ptr to the XDR buffer 51 * at position p, then align to the next 32-bit boundary by padding 52 * with zero bytes (see RFC1832). 53 * Note: if ptr is NULL, only the padding is performed. 54 * 55 * Returns the updated current XDR buffer position 56 * 57 */ 58 u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes) 59 { 60 if (likely(nbytes != 0)) { 61 unsigned int quadlen = XDR_QUADLEN(nbytes); 62 unsigned int padding = (quadlen << 2) - nbytes; 63 64 if (ptr != NULL) 65 memcpy(p, ptr, nbytes); 66 if (padding != 0) 67 memset((char *)p + nbytes, 0, padding); 68 p += quadlen; 69 } 70 return p; 71 } 72 EXPORT_SYMBOL(xdr_encode_opaque_fixed); 73 74 /** 75 * xdr_encode_opaque - Encode variable length opaque data 76 * @p: pointer to current position in XDR buffer. 77 * @ptr: pointer to data to encode (or NULL) 78 * @nbytes: size of data. 79 * 80 * Returns the updated current XDR buffer position 81 */ 82 u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes) 83 { 84 *p++ = htonl(nbytes); 85 return xdr_encode_opaque_fixed(p, ptr, nbytes); 86 } 87 EXPORT_SYMBOL(xdr_encode_opaque); 88 89 u32 * 90 xdr_encode_string(u32 *p, const char *string) 91 { 92 return xdr_encode_array(p, string, strlen(string)); 93 } 94 95 u32 * 96 xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen) 97 { 98 unsigned int len; 99 100 if ((len = ntohl(*p++)) > maxlen) 101 return NULL; 102 *lenp = len; 103 *sp = (char *) p; 104 return p + XDR_QUADLEN(len); 105 } 106 107 void 108 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, 109 unsigned int len) 110 { 111 struct kvec *tail = xdr->tail; 112 u32 *p; 113 114 xdr->pages = pages; 115 xdr->page_base = base; 116 xdr->page_len = len; 117 118 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len); 119 tail->iov_base = p; 120 tail->iov_len = 0; 121 122 if (len & 3) { 123 unsigned int pad = 4 - (len & 3); 124 125 *p = 0; 126 tail->iov_base = (char *)p + (len & 3); 127 tail->iov_len = pad; 128 len += pad; 129 } 130 xdr->buflen += len; 131 xdr->len += len; 132 } 133 134 void 135 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 136 struct page **pages, unsigned int base, unsigned int len) 137 { 138 struct kvec *head = xdr->head; 139 struct kvec *tail = xdr->tail; 140 char *buf = (char *)head->iov_base; 141 unsigned int buflen = head->iov_len; 142 143 head->iov_len = offset; 144 145 xdr->pages = pages; 146 xdr->page_base = base; 147 xdr->page_len = len; 148 149 tail->iov_base = buf + offset; 150 tail->iov_len = buflen - offset; 151 152 xdr->buflen += len; 153 } 154 155 156 /* 157 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 158 * 159 * _shift_data_right_pages 160 * @pages: vector of pages containing both the source and dest memory area. 161 * @pgto_base: page vector address of destination 162 * @pgfrom_base: page vector address of source 163 * @len: number of bytes to copy 164 * 165 * Note: the addresses pgto_base and pgfrom_base are both calculated in 166 * the same way: 167 * if a memory area starts at byte 'base' in page 'pages[i]', 168 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 169 * Also note: pgfrom_base must be < pgto_base, but the memory areas 170 * they point to may overlap. 171 */ 172 static void 173 _shift_data_right_pages(struct page **pages, size_t pgto_base, 174 size_t pgfrom_base, size_t len) 175 { 176 struct page **pgfrom, **pgto; 177 char *vfrom, *vto; 178 size_t copy; 179 180 BUG_ON(pgto_base <= pgfrom_base); 181 182 pgto_base += len; 183 pgfrom_base += len; 184 185 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); 186 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); 187 188 pgto_base &= ~PAGE_CACHE_MASK; 189 pgfrom_base &= ~PAGE_CACHE_MASK; 190 191 do { 192 /* Are any pointers crossing a page boundary? */ 193 if (pgto_base == 0) { 194 pgto_base = PAGE_CACHE_SIZE; 195 pgto--; 196 } 197 if (pgfrom_base == 0) { 198 pgfrom_base = PAGE_CACHE_SIZE; 199 pgfrom--; 200 } 201 202 copy = len; 203 if (copy > pgto_base) 204 copy = pgto_base; 205 if (copy > pgfrom_base) 206 copy = pgfrom_base; 207 pgto_base -= copy; 208 pgfrom_base -= copy; 209 210 vto = kmap_atomic(*pgto, KM_USER0); 211 vfrom = kmap_atomic(*pgfrom, KM_USER1); 212 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 213 flush_dcache_page(*pgto); 214 kunmap_atomic(vfrom, KM_USER1); 215 kunmap_atomic(vto, KM_USER0); 216 217 } while ((len -= copy) != 0); 218 } 219 220 /* 221 * _copy_to_pages 222 * @pages: array of pages 223 * @pgbase: page vector address of destination 224 * @p: pointer to source data 225 * @len: length 226 * 227 * Copies data from an arbitrary memory location into an array of pages 228 * The copy is assumed to be non-overlapping. 229 */ 230 static void 231 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) 232 { 233 struct page **pgto; 234 char *vto; 235 size_t copy; 236 237 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); 238 pgbase &= ~PAGE_CACHE_MASK; 239 240 do { 241 copy = PAGE_CACHE_SIZE - pgbase; 242 if (copy > len) 243 copy = len; 244 245 vto = kmap_atomic(*pgto, KM_USER0); 246 memcpy(vto + pgbase, p, copy); 247 kunmap_atomic(vto, KM_USER0); 248 249 pgbase += copy; 250 if (pgbase == PAGE_CACHE_SIZE) { 251 flush_dcache_page(*pgto); 252 pgbase = 0; 253 pgto++; 254 } 255 p += copy; 256 257 } while ((len -= copy) != 0); 258 flush_dcache_page(*pgto); 259 } 260 261 /* 262 * _copy_from_pages 263 * @p: pointer to destination 264 * @pages: array of pages 265 * @pgbase: offset of source data 266 * @len: length 267 * 268 * Copies data into an arbitrary memory location from an array of pages 269 * The copy is assumed to be non-overlapping. 270 */ 271 static void 272 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) 273 { 274 struct page **pgfrom; 275 char *vfrom; 276 size_t copy; 277 278 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); 279 pgbase &= ~PAGE_CACHE_MASK; 280 281 do { 282 copy = PAGE_CACHE_SIZE - pgbase; 283 if (copy > len) 284 copy = len; 285 286 vfrom = kmap_atomic(*pgfrom, KM_USER0); 287 memcpy(p, vfrom + pgbase, copy); 288 kunmap_atomic(vfrom, KM_USER0); 289 290 pgbase += copy; 291 if (pgbase == PAGE_CACHE_SIZE) { 292 pgbase = 0; 293 pgfrom++; 294 } 295 p += copy; 296 297 } while ((len -= copy) != 0); 298 } 299 300 /* 301 * xdr_shrink_bufhead 302 * @buf: xdr_buf 303 * @len: bytes to remove from buf->head[0] 304 * 305 * Shrinks XDR buffer's header kvec buf->head[0] by 306 * 'len' bytes. The extra data is not lost, but is instead 307 * moved into the inlined pages and/or the tail. 308 */ 309 static void 310 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) 311 { 312 struct kvec *head, *tail; 313 size_t copy, offs; 314 unsigned int pglen = buf->page_len; 315 316 tail = buf->tail; 317 head = buf->head; 318 BUG_ON (len > head->iov_len); 319 320 /* Shift the tail first */ 321 if (tail->iov_len != 0) { 322 if (tail->iov_len > len) { 323 copy = tail->iov_len - len; 324 memmove((char *)tail->iov_base + len, 325 tail->iov_base, copy); 326 } 327 /* Copy from the inlined pages into the tail */ 328 copy = len; 329 if (copy > pglen) 330 copy = pglen; 331 offs = len - copy; 332 if (offs >= tail->iov_len) 333 copy = 0; 334 else if (copy > tail->iov_len - offs) 335 copy = tail->iov_len - offs; 336 if (copy != 0) 337 _copy_from_pages((char *)tail->iov_base + offs, 338 buf->pages, 339 buf->page_base + pglen + offs - len, 340 copy); 341 /* Do we also need to copy data from the head into the tail ? */ 342 if (len > pglen) { 343 offs = copy = len - pglen; 344 if (copy > tail->iov_len) 345 copy = tail->iov_len; 346 memcpy(tail->iov_base, 347 (char *)head->iov_base + 348 head->iov_len - offs, 349 copy); 350 } 351 } 352 /* Now handle pages */ 353 if (pglen != 0) { 354 if (pglen > len) 355 _shift_data_right_pages(buf->pages, 356 buf->page_base + len, 357 buf->page_base, 358 pglen - len); 359 copy = len; 360 if (len > pglen) 361 copy = pglen; 362 _copy_to_pages(buf->pages, buf->page_base, 363 (char *)head->iov_base + head->iov_len - len, 364 copy); 365 } 366 head->iov_len -= len; 367 buf->buflen -= len; 368 /* Have we truncated the message? */ 369 if (buf->len > buf->buflen) 370 buf->len = buf->buflen; 371 } 372 373 /* 374 * xdr_shrink_pagelen 375 * @buf: xdr_buf 376 * @len: bytes to remove from buf->pages 377 * 378 * Shrinks XDR buffer's page array buf->pages by 379 * 'len' bytes. The extra data is not lost, but is instead 380 * moved into the tail. 381 */ 382 static void 383 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) 384 { 385 struct kvec *tail; 386 size_t copy; 387 char *p; 388 unsigned int pglen = buf->page_len; 389 390 tail = buf->tail; 391 BUG_ON (len > pglen); 392 393 /* Shift the tail first */ 394 if (tail->iov_len != 0) { 395 p = (char *)tail->iov_base + len; 396 if (tail->iov_len > len) { 397 copy = tail->iov_len - len; 398 memmove(p, tail->iov_base, copy); 399 } else 400 buf->buflen -= len; 401 /* Copy from the inlined pages into the tail */ 402 copy = len; 403 if (copy > tail->iov_len) 404 copy = tail->iov_len; 405 _copy_from_pages((char *)tail->iov_base, 406 buf->pages, buf->page_base + pglen - len, 407 copy); 408 } 409 buf->page_len -= len; 410 buf->buflen -= len; 411 /* Have we truncated the message? */ 412 if (buf->len > buf->buflen) 413 buf->len = buf->buflen; 414 } 415 416 void 417 xdr_shift_buf(struct xdr_buf *buf, size_t len) 418 { 419 xdr_shrink_bufhead(buf, len); 420 } 421 422 /** 423 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 424 * @xdr: pointer to xdr_stream struct 425 * @buf: pointer to XDR buffer in which to encode data 426 * @p: current pointer inside XDR buffer 427 * 428 * Note: at the moment the RPC client only passes the length of our 429 * scratch buffer in the xdr_buf's header kvec. Previously this 430 * meant we needed to call xdr_adjust_iovec() after encoding the 431 * data. With the new scheme, the xdr_stream manages the details 432 * of the buffer length, and takes care of adjusting the kvec 433 * length for us. 434 */ 435 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 436 { 437 struct kvec *iov = buf->head; 438 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 439 440 BUG_ON(scratch_len < 0); 441 xdr->buf = buf; 442 xdr->iov = iov; 443 xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len); 444 xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len); 445 BUG_ON(iov->iov_len > scratch_len); 446 447 if (p != xdr->p && p != NULL) { 448 size_t len; 449 450 BUG_ON(p < xdr->p || p > xdr->end); 451 len = (char *)p - (char *)xdr->p; 452 xdr->p = p; 453 buf->len += len; 454 iov->iov_len += len; 455 } 456 } 457 EXPORT_SYMBOL(xdr_init_encode); 458 459 /** 460 * xdr_reserve_space - Reserve buffer space for sending 461 * @xdr: pointer to xdr_stream 462 * @nbytes: number of bytes to reserve 463 * 464 * Checks that we have enough buffer space to encode 'nbytes' more 465 * bytes of data. If so, update the total xdr_buf length, and 466 * adjust the length of the current kvec. 467 */ 468 uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 469 { 470 uint32_t *p = xdr->p; 471 uint32_t *q; 472 473 /* align nbytes on the next 32-bit boundary */ 474 nbytes += 3; 475 nbytes &= ~3; 476 q = p + (nbytes >> 2); 477 if (unlikely(q > xdr->end || q < p)) 478 return NULL; 479 xdr->p = q; 480 xdr->iov->iov_len += nbytes; 481 xdr->buf->len += nbytes; 482 return p; 483 } 484 EXPORT_SYMBOL(xdr_reserve_space); 485 486 /** 487 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending 488 * @xdr: pointer to xdr_stream 489 * @pages: list of pages 490 * @base: offset of first byte 491 * @len: length of data in bytes 492 * 493 */ 494 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, 495 unsigned int len) 496 { 497 struct xdr_buf *buf = xdr->buf; 498 struct kvec *iov = buf->tail; 499 buf->pages = pages; 500 buf->page_base = base; 501 buf->page_len = len; 502 503 iov->iov_base = (char *)xdr->p; 504 iov->iov_len = 0; 505 xdr->iov = iov; 506 507 if (len & 3) { 508 unsigned int pad = 4 - (len & 3); 509 510 BUG_ON(xdr->p >= xdr->end); 511 iov->iov_base = (char *)xdr->p + (len & 3); 512 iov->iov_len += pad; 513 len += pad; 514 *xdr->p++ = 0; 515 } 516 buf->buflen += len; 517 buf->len += len; 518 } 519 EXPORT_SYMBOL(xdr_write_pages); 520 521 /** 522 * xdr_init_decode - Initialize an xdr_stream for decoding data. 523 * @xdr: pointer to xdr_stream struct 524 * @buf: pointer to XDR buffer from which to decode data 525 * @p: current pointer inside XDR buffer 526 */ 527 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 528 { 529 struct kvec *iov = buf->head; 530 unsigned int len = iov->iov_len; 531 532 if (len > buf->len) 533 len = buf->len; 534 xdr->buf = buf; 535 xdr->iov = iov; 536 xdr->p = p; 537 xdr->end = (uint32_t *)((char *)iov->iov_base + len); 538 } 539 EXPORT_SYMBOL(xdr_init_decode); 540 541 /** 542 * xdr_inline_decode - Retrieve non-page XDR data to decode 543 * @xdr: pointer to xdr_stream struct 544 * @nbytes: number of bytes of data to decode 545 * 546 * Check if the input buffer is long enough to enable us to decode 547 * 'nbytes' more bytes of data starting at the current position. 548 * If so return the current pointer, then update the current 549 * pointer position. 550 */ 551 uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 552 { 553 uint32_t *p = xdr->p; 554 uint32_t *q = p + XDR_QUADLEN(nbytes); 555 556 if (unlikely(q > xdr->end || q < p)) 557 return NULL; 558 xdr->p = q; 559 return p; 560 } 561 EXPORT_SYMBOL(xdr_inline_decode); 562 563 /** 564 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position 565 * @xdr: pointer to xdr_stream struct 566 * @len: number of bytes of page data 567 * 568 * Moves data beyond the current pointer position from the XDR head[] buffer 569 * into the page list. Any data that lies beyond current position + "len" 570 * bytes is moved into the XDR tail[]. 571 */ 572 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 573 { 574 struct xdr_buf *buf = xdr->buf; 575 struct kvec *iov; 576 ssize_t shift; 577 unsigned int end; 578 int padding; 579 580 /* Realign pages to current pointer position */ 581 iov = buf->head; 582 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p; 583 if (shift > 0) 584 xdr_shrink_bufhead(buf, shift); 585 586 /* Truncate page data and move it into the tail */ 587 if (buf->page_len > len) 588 xdr_shrink_pagelen(buf, buf->page_len - len); 589 padding = (XDR_QUADLEN(len) << 2) - len; 590 xdr->iov = iov = buf->tail; 591 /* Compute remaining message length. */ 592 end = iov->iov_len; 593 shift = buf->buflen - buf->len; 594 if (shift < end) 595 end -= shift; 596 else if (shift > 0) 597 end = 0; 598 /* 599 * Position current pointer at beginning of tail, and 600 * set remaining message length. 601 */ 602 xdr->p = (uint32_t *)((char *)iov->iov_base + padding); 603 xdr->end = (uint32_t *)((char *)iov->iov_base + end); 604 } 605 EXPORT_SYMBOL(xdr_read_pages); 606 607 /** 608 * xdr_enter_page - decode data from the XDR page 609 * @xdr: pointer to xdr_stream struct 610 * @len: number of bytes of page data 611 * 612 * Moves data beyond the current pointer position from the XDR head[] buffer 613 * into the page list. Any data that lies beyond current position + "len" 614 * bytes is moved into the XDR tail[]. The current pointer is then 615 * repositioned at the beginning of the first XDR page. 616 */ 617 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 618 { 619 char * kaddr = page_address(xdr->buf->pages[0]); 620 xdr_read_pages(xdr, len); 621 /* 622 * Position current pointer at beginning of tail, and 623 * set remaining message length. 624 */ 625 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base) 626 len = PAGE_CACHE_SIZE - xdr->buf->page_base; 627 xdr->p = (uint32_t *)(kaddr + xdr->buf->page_base); 628 xdr->end = (uint32_t *)((char *)xdr->p + len); 629 } 630 EXPORT_SYMBOL(xdr_enter_page); 631 632 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 633 634 void 635 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) 636 { 637 buf->head[0] = *iov; 638 buf->tail[0] = empty_iov; 639 buf->page_len = 0; 640 buf->buflen = buf->len = iov->iov_len; 641 } 642 643 /* Sets subiov to the intersection of iov with the buffer of length len 644 * starting base bytes after iov. Indicates empty intersection by setting 645 * length of subiov to zero. Decrements len by length of subiov, sets base 646 * to zero (or decrements it by length of iov if subiov is empty). */ 647 static void 648 iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len) 649 { 650 if (*base > iov->iov_len) { 651 subiov->iov_base = NULL; 652 subiov->iov_len = 0; 653 *base -= iov->iov_len; 654 } else { 655 subiov->iov_base = iov->iov_base + *base; 656 subiov->iov_len = min(*len, (int)iov->iov_len - *base); 657 *base = 0; 658 } 659 *len -= subiov->iov_len; 660 } 661 662 /* Sets subbuf to the portion of buf of length len beginning base bytes 663 * from the start of buf. Returns -1 if base of length are out of bounds. */ 664 int 665 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, 666 int base, int len) 667 { 668 int i; 669 670 subbuf->buflen = subbuf->len = len; 671 iov_subsegment(buf->head, subbuf->head, &base, &len); 672 673 if (base < buf->page_len) { 674 i = (base + buf->page_base) >> PAGE_CACHE_SHIFT; 675 subbuf->pages = &buf->pages[i]; 676 subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK; 677 subbuf->page_len = min((int)buf->page_len - base, len); 678 len -= subbuf->page_len; 679 base = 0; 680 } else { 681 base -= buf->page_len; 682 subbuf->page_len = 0; 683 } 684 685 iov_subsegment(buf->tail, subbuf->tail, &base, &len); 686 if (base || len) 687 return -1; 688 return 0; 689 } 690 691 /* obj is assumed to point to allocated memory of size at least len: */ 692 int 693 read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) 694 { 695 struct xdr_buf subbuf; 696 int this_len; 697 int status; 698 699 status = xdr_buf_subsegment(buf, &subbuf, base, len); 700 if (status) 701 goto out; 702 this_len = min(len, (int)subbuf.head[0].iov_len); 703 memcpy(obj, subbuf.head[0].iov_base, this_len); 704 len -= this_len; 705 obj += this_len; 706 this_len = min(len, (int)subbuf.page_len); 707 if (this_len) 708 _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len); 709 len -= this_len; 710 obj += this_len; 711 this_len = min(len, (int)subbuf.tail[0].iov_len); 712 memcpy(obj, subbuf.tail[0].iov_base, this_len); 713 out: 714 return status; 715 } 716 717 /* obj is assumed to point to allocated memory of size at least len: */ 718 int 719 write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) 720 { 721 struct xdr_buf subbuf; 722 int this_len; 723 int status; 724 725 status = xdr_buf_subsegment(buf, &subbuf, base, len); 726 if (status) 727 goto out; 728 this_len = min(len, (int)subbuf.head[0].iov_len); 729 memcpy(subbuf.head[0].iov_base, obj, this_len); 730 len -= this_len; 731 obj += this_len; 732 this_len = min(len, (int)subbuf.page_len); 733 if (this_len) 734 _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len); 735 len -= this_len; 736 obj += this_len; 737 this_len = min(len, (int)subbuf.tail[0].iov_len); 738 memcpy(subbuf.tail[0].iov_base, obj, this_len); 739 out: 740 return status; 741 } 742 743 int 744 xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) 745 { 746 u32 raw; 747 int status; 748 749 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 750 if (status) 751 return status; 752 *obj = ntohl(raw); 753 return 0; 754 } 755 756 int 757 xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) 758 { 759 u32 raw = htonl(obj); 760 761 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 762 } 763 764 /* If the netobj starting offset bytes from the start of xdr_buf is contained 765 * entirely in the head or the tail, set object to point to it; otherwise 766 * try to find space for it at the end of the tail, copy it there, and 767 * set obj to point to it. */ 768 int 769 xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset) 770 { 771 u32 tail_offset = buf->head[0].iov_len + buf->page_len; 772 u32 obj_end_offset; 773 774 if (xdr_decode_word(buf, offset, &obj->len)) 775 goto out; 776 obj_end_offset = offset + 4 + obj->len; 777 778 if (obj_end_offset <= buf->head[0].iov_len) { 779 /* The obj is contained entirely in the head: */ 780 obj->data = buf->head[0].iov_base + offset + 4; 781 } else if (offset + 4 >= tail_offset) { 782 if (obj_end_offset - tail_offset 783 > buf->tail[0].iov_len) 784 goto out; 785 /* The obj is contained entirely in the tail: */ 786 obj->data = buf->tail[0].iov_base 787 + offset - tail_offset + 4; 788 } else { 789 /* use end of tail as storage for obj: 790 * (We don't copy to the beginning because then we'd have 791 * to worry about doing a potentially overlapping copy. 792 * This assumes the object is at most half the length of the 793 * tail.) */ 794 if (obj->len > buf->tail[0].iov_len) 795 goto out; 796 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len - 797 obj->len; 798 if (read_bytes_from_xdr_buf(buf, offset + 4, 799 obj->data, obj->len)) 800 goto out; 801 802 } 803 return 0; 804 out: 805 return -1; 806 } 807 808 /* Returns 0 on success, or else a negative error code. */ 809 static int 810 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, 811 struct xdr_array2_desc *desc, int encode) 812 { 813 char *elem = NULL, *c; 814 unsigned int copied = 0, todo, avail_here; 815 struct page **ppages = NULL; 816 int err; 817 818 if (encode) { 819 if (xdr_encode_word(buf, base, desc->array_len) != 0) 820 return -EINVAL; 821 } else { 822 if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 823 desc->array_len > desc->array_maxlen || 824 (unsigned long) base + 4 + desc->array_len * 825 desc->elem_size > buf->len) 826 return -EINVAL; 827 } 828 base += 4; 829 830 if (!desc->xcode) 831 return 0; 832 833 todo = desc->array_len * desc->elem_size; 834 835 /* process head */ 836 if (todo && base < buf->head->iov_len) { 837 c = buf->head->iov_base + base; 838 avail_here = min_t(unsigned int, todo, 839 buf->head->iov_len - base); 840 todo -= avail_here; 841 842 while (avail_here >= desc->elem_size) { 843 err = desc->xcode(desc, c); 844 if (err) 845 goto out; 846 c += desc->elem_size; 847 avail_here -= desc->elem_size; 848 } 849 if (avail_here) { 850 if (!elem) { 851 elem = kmalloc(desc->elem_size, GFP_KERNEL); 852 err = -ENOMEM; 853 if (!elem) 854 goto out; 855 } 856 if (encode) { 857 err = desc->xcode(desc, elem); 858 if (err) 859 goto out; 860 memcpy(c, elem, avail_here); 861 } else 862 memcpy(elem, c, avail_here); 863 copied = avail_here; 864 } 865 base = buf->head->iov_len; /* align to start of pages */ 866 } 867 868 /* process pages array */ 869 base -= buf->head->iov_len; 870 if (todo && base < buf->page_len) { 871 unsigned int avail_page; 872 873 avail_here = min(todo, buf->page_len - base); 874 todo -= avail_here; 875 876 base += buf->page_base; 877 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); 878 base &= ~PAGE_CACHE_MASK; 879 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, 880 avail_here); 881 c = kmap(*ppages) + base; 882 883 while (avail_here) { 884 avail_here -= avail_page; 885 if (copied || avail_page < desc->elem_size) { 886 unsigned int l = min(avail_page, 887 desc->elem_size - copied); 888 if (!elem) { 889 elem = kmalloc(desc->elem_size, 890 GFP_KERNEL); 891 err = -ENOMEM; 892 if (!elem) 893 goto out; 894 } 895 if (encode) { 896 if (!copied) { 897 err = desc->xcode(desc, elem); 898 if (err) 899 goto out; 900 } 901 memcpy(c, elem + copied, l); 902 copied += l; 903 if (copied == desc->elem_size) 904 copied = 0; 905 } else { 906 memcpy(elem + copied, c, l); 907 copied += l; 908 if (copied == desc->elem_size) { 909 err = desc->xcode(desc, elem); 910 if (err) 911 goto out; 912 copied = 0; 913 } 914 } 915 avail_page -= l; 916 c += l; 917 } 918 while (avail_page >= desc->elem_size) { 919 err = desc->xcode(desc, c); 920 if (err) 921 goto out; 922 c += desc->elem_size; 923 avail_page -= desc->elem_size; 924 } 925 if (avail_page) { 926 unsigned int l = min(avail_page, 927 desc->elem_size - copied); 928 if (!elem) { 929 elem = kmalloc(desc->elem_size, 930 GFP_KERNEL); 931 err = -ENOMEM; 932 if (!elem) 933 goto out; 934 } 935 if (encode) { 936 if (!copied) { 937 err = desc->xcode(desc, elem); 938 if (err) 939 goto out; 940 } 941 memcpy(c, elem + copied, l); 942 copied += l; 943 if (copied == desc->elem_size) 944 copied = 0; 945 } else { 946 memcpy(elem + copied, c, l); 947 copied += l; 948 if (copied == desc->elem_size) { 949 err = desc->xcode(desc, elem); 950 if (err) 951 goto out; 952 copied = 0; 953 } 954 } 955 } 956 if (avail_here) { 957 kunmap(*ppages); 958 ppages++; 959 c = kmap(*ppages); 960 } 961 962 avail_page = min(avail_here, 963 (unsigned int) PAGE_CACHE_SIZE); 964 } 965 base = buf->page_len; /* align to start of tail */ 966 } 967 968 /* process tail */ 969 base -= buf->page_len; 970 if (todo) { 971 c = buf->tail->iov_base + base; 972 if (copied) { 973 unsigned int l = desc->elem_size - copied; 974 975 if (encode) 976 memcpy(c, elem + copied, l); 977 else { 978 memcpy(elem + copied, c, l); 979 err = desc->xcode(desc, elem); 980 if (err) 981 goto out; 982 } 983 todo -= l; 984 c += l; 985 } 986 while (todo) { 987 err = desc->xcode(desc, c); 988 if (err) 989 goto out; 990 c += desc->elem_size; 991 todo -= desc->elem_size; 992 } 993 } 994 err = 0; 995 996 out: 997 kfree(elem); 998 if (ppages) 999 kunmap(*ppages); 1000 return err; 1001 } 1002 1003 int 1004 xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 1005 struct xdr_array2_desc *desc) 1006 { 1007 if (base >= buf->len) 1008 return -EINVAL; 1009 1010 return xdr_xcode_array2(buf, base, desc, 0); 1011 } 1012 1013 int 1014 xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 1015 struct xdr_array2_desc *desc) 1016 { 1017 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > 1018 buf->head->iov_len + buf->page_len + buf->tail->iov_len) 1019 return -EINVAL; 1020 1021 return xdr_xcode_array2(buf, base, desc, 1); 1022 } 1023