1 /* 2 * linux/net/sunrpc/xdr.c 3 * 4 * Generic XDR support. 5 * 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/types.h> 11 #include <linux/string.h> 12 #include <linux/kernel.h> 13 #include <linux/pagemap.h> 14 #include <linux/errno.h> 15 #include <linux/sunrpc/xdr.h> 16 #include <linux/sunrpc/msg_prot.h> 17 18 /* 19 * XDR functions for basic NFS types 20 */ 21 u32 * 22 xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj) 23 { 24 unsigned int quadlen = XDR_QUADLEN(obj->len); 25 26 p[quadlen] = 0; /* zero trailing bytes */ 27 *p++ = htonl(obj->len); 28 memcpy(p, obj->data, obj->len); 29 return p + XDR_QUADLEN(obj->len); 30 } 31 32 u32 * 33 xdr_decode_netobj(u32 *p, struct xdr_netobj *obj) 34 { 35 unsigned int len; 36 37 if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ) 38 return NULL; 39 obj->len = len; 40 obj->data = (u8 *) p; 41 return p + XDR_QUADLEN(len); 42 } 43 44 /** 45 * xdr_encode_opaque_fixed - Encode fixed length opaque data 46 * @p: pointer to current position in XDR buffer. 47 * @ptr: pointer to data to encode (or NULL) 48 * @nbytes: size of data. 49 * 50 * Copy the array of data of length nbytes at ptr to the XDR buffer 51 * at position p, then align to the next 32-bit boundary by padding 52 * with zero bytes (see RFC1832). 53 * Note: if ptr is NULL, only the padding is performed. 54 * 55 * Returns the updated current XDR buffer position 56 * 57 */ 58 u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes) 59 { 60 if (likely(nbytes != 0)) { 61 unsigned int quadlen = XDR_QUADLEN(nbytes); 62 unsigned int padding = (quadlen << 2) - nbytes; 63 64 if (ptr != NULL) 65 memcpy(p, ptr, nbytes); 66 if (padding != 0) 67 memset((char *)p + nbytes, 0, padding); 68 p += quadlen; 69 } 70 return p; 71 } 72 EXPORT_SYMBOL(xdr_encode_opaque_fixed); 73 74 /** 75 * xdr_encode_opaque - Encode variable length opaque data 76 * @p: pointer to current position in XDR buffer. 77 * @ptr: pointer to data to encode (or NULL) 78 * @nbytes: size of data. 79 * 80 * Returns the updated current XDR buffer position 81 */ 82 u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes) 83 { 84 *p++ = htonl(nbytes); 85 return xdr_encode_opaque_fixed(p, ptr, nbytes); 86 } 87 EXPORT_SYMBOL(xdr_encode_opaque); 88 89 u32 * 90 xdr_encode_string(u32 *p, const char *string) 91 { 92 return xdr_encode_array(p, string, strlen(string)); 93 } 94 95 u32 * 96 xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen) 97 { 98 unsigned int len; 99 100 if ((len = ntohl(*p++)) > maxlen) 101 return NULL; 102 *lenp = len; 103 *sp = (char *) p; 104 return p + XDR_QUADLEN(len); 105 } 106 107 void 108 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, 109 unsigned int len) 110 { 111 struct kvec *tail = xdr->tail; 112 u32 *p; 113 114 xdr->pages = pages; 115 xdr->page_base = base; 116 xdr->page_len = len; 117 118 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len); 119 tail->iov_base = p; 120 tail->iov_len = 0; 121 122 if (len & 3) { 123 unsigned int pad = 4 - (len & 3); 124 125 *p = 0; 126 tail->iov_base = (char *)p + (len & 3); 127 tail->iov_len = pad; 128 len += pad; 129 } 130 xdr->buflen += len; 131 xdr->len += len; 132 } 133 134 void 135 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, 136 struct page **pages, unsigned int base, unsigned int len) 137 { 138 struct kvec *head = xdr->head; 139 struct kvec *tail = xdr->tail; 140 char *buf = (char *)head->iov_base; 141 unsigned int buflen = head->iov_len; 142 143 head->iov_len = offset; 144 145 xdr->pages = pages; 146 xdr->page_base = base; 147 xdr->page_len = len; 148 149 tail->iov_base = buf + offset; 150 tail->iov_len = buflen - offset; 151 152 xdr->buflen += len; 153 } 154 155 156 /* 157 * Helper routines for doing 'memmove' like operations on a struct xdr_buf 158 * 159 * _shift_data_right_pages 160 * @pages: vector of pages containing both the source and dest memory area. 161 * @pgto_base: page vector address of destination 162 * @pgfrom_base: page vector address of source 163 * @len: number of bytes to copy 164 * 165 * Note: the addresses pgto_base and pgfrom_base are both calculated in 166 * the same way: 167 * if a memory area starts at byte 'base' in page 'pages[i]', 168 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 169 * Also note: pgfrom_base must be < pgto_base, but the memory areas 170 * they point to may overlap. 171 */ 172 static void 173 _shift_data_right_pages(struct page **pages, size_t pgto_base, 174 size_t pgfrom_base, size_t len) 175 { 176 struct page **pgfrom, **pgto; 177 char *vfrom, *vto; 178 size_t copy; 179 180 BUG_ON(pgto_base <= pgfrom_base); 181 182 pgto_base += len; 183 pgfrom_base += len; 184 185 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); 186 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); 187 188 pgto_base &= ~PAGE_CACHE_MASK; 189 pgfrom_base &= ~PAGE_CACHE_MASK; 190 191 do { 192 /* Are any pointers crossing a page boundary? */ 193 if (pgto_base == 0) { 194 flush_dcache_page(*pgto); 195 pgto_base = PAGE_CACHE_SIZE; 196 pgto--; 197 } 198 if (pgfrom_base == 0) { 199 pgfrom_base = PAGE_CACHE_SIZE; 200 pgfrom--; 201 } 202 203 copy = len; 204 if (copy > pgto_base) 205 copy = pgto_base; 206 if (copy > pgfrom_base) 207 copy = pgfrom_base; 208 pgto_base -= copy; 209 pgfrom_base -= copy; 210 211 vto = kmap_atomic(*pgto, KM_USER0); 212 vfrom = kmap_atomic(*pgfrom, KM_USER1); 213 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 214 kunmap_atomic(vfrom, KM_USER1); 215 kunmap_atomic(vto, KM_USER0); 216 217 } while ((len -= copy) != 0); 218 flush_dcache_page(*pgto); 219 } 220 221 /* 222 * _copy_to_pages 223 * @pages: array of pages 224 * @pgbase: page vector address of destination 225 * @p: pointer to source data 226 * @len: length 227 * 228 * Copies data from an arbitrary memory location into an array of pages 229 * The copy is assumed to be non-overlapping. 230 */ 231 static void 232 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) 233 { 234 struct page **pgto; 235 char *vto; 236 size_t copy; 237 238 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); 239 pgbase &= ~PAGE_CACHE_MASK; 240 241 do { 242 copy = PAGE_CACHE_SIZE - pgbase; 243 if (copy > len) 244 copy = len; 245 246 vto = kmap_atomic(*pgto, KM_USER0); 247 memcpy(vto + pgbase, p, copy); 248 kunmap_atomic(vto, KM_USER0); 249 250 pgbase += copy; 251 if (pgbase == PAGE_CACHE_SIZE) { 252 flush_dcache_page(*pgto); 253 pgbase = 0; 254 pgto++; 255 } 256 p += copy; 257 258 } while ((len -= copy) != 0); 259 flush_dcache_page(*pgto); 260 } 261 262 /* 263 * _copy_from_pages 264 * @p: pointer to destination 265 * @pages: array of pages 266 * @pgbase: offset of source data 267 * @len: length 268 * 269 * Copies data into an arbitrary memory location from an array of pages 270 * The copy is assumed to be non-overlapping. 271 */ 272 static void 273 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) 274 { 275 struct page **pgfrom; 276 char *vfrom; 277 size_t copy; 278 279 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); 280 pgbase &= ~PAGE_CACHE_MASK; 281 282 do { 283 copy = PAGE_CACHE_SIZE - pgbase; 284 if (copy > len) 285 copy = len; 286 287 vfrom = kmap_atomic(*pgfrom, KM_USER0); 288 memcpy(p, vfrom + pgbase, copy); 289 kunmap_atomic(vfrom, KM_USER0); 290 291 pgbase += copy; 292 if (pgbase == PAGE_CACHE_SIZE) { 293 pgbase = 0; 294 pgfrom++; 295 } 296 p += copy; 297 298 } while ((len -= copy) != 0); 299 } 300 301 /* 302 * xdr_shrink_bufhead 303 * @buf: xdr_buf 304 * @len: bytes to remove from buf->head[0] 305 * 306 * Shrinks XDR buffer's header kvec buf->head[0] by 307 * 'len' bytes. The extra data is not lost, but is instead 308 * moved into the inlined pages and/or the tail. 309 */ 310 static void 311 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) 312 { 313 struct kvec *head, *tail; 314 size_t copy, offs; 315 unsigned int pglen = buf->page_len; 316 317 tail = buf->tail; 318 head = buf->head; 319 BUG_ON (len > head->iov_len); 320 321 /* Shift the tail first */ 322 if (tail->iov_len != 0) { 323 if (tail->iov_len > len) { 324 copy = tail->iov_len - len; 325 memmove((char *)tail->iov_base + len, 326 tail->iov_base, copy); 327 } 328 /* Copy from the inlined pages into the tail */ 329 copy = len; 330 if (copy > pglen) 331 copy = pglen; 332 offs = len - copy; 333 if (offs >= tail->iov_len) 334 copy = 0; 335 else if (copy > tail->iov_len - offs) 336 copy = tail->iov_len - offs; 337 if (copy != 0) 338 _copy_from_pages((char *)tail->iov_base + offs, 339 buf->pages, 340 buf->page_base + pglen + offs - len, 341 copy); 342 /* Do we also need to copy data from the head into the tail ? */ 343 if (len > pglen) { 344 offs = copy = len - pglen; 345 if (copy > tail->iov_len) 346 copy = tail->iov_len; 347 memcpy(tail->iov_base, 348 (char *)head->iov_base + 349 head->iov_len - offs, 350 copy); 351 } 352 } 353 /* Now handle pages */ 354 if (pglen != 0) { 355 if (pglen > len) 356 _shift_data_right_pages(buf->pages, 357 buf->page_base + len, 358 buf->page_base, 359 pglen - len); 360 copy = len; 361 if (len > pglen) 362 copy = pglen; 363 _copy_to_pages(buf->pages, buf->page_base, 364 (char *)head->iov_base + head->iov_len - len, 365 copy); 366 } 367 head->iov_len -= len; 368 buf->buflen -= len; 369 /* Have we truncated the message? */ 370 if (buf->len > buf->buflen) 371 buf->len = buf->buflen; 372 } 373 374 /* 375 * xdr_shrink_pagelen 376 * @buf: xdr_buf 377 * @len: bytes to remove from buf->pages 378 * 379 * Shrinks XDR buffer's page array buf->pages by 380 * 'len' bytes. The extra data is not lost, but is instead 381 * moved into the tail. 382 */ 383 static void 384 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) 385 { 386 struct kvec *tail; 387 size_t copy; 388 char *p; 389 unsigned int pglen = buf->page_len; 390 391 tail = buf->tail; 392 BUG_ON (len > pglen); 393 394 /* Shift the tail first */ 395 if (tail->iov_len != 0) { 396 p = (char *)tail->iov_base + len; 397 if (tail->iov_len > len) { 398 copy = tail->iov_len - len; 399 memmove(p, tail->iov_base, copy); 400 } else 401 buf->buflen -= len; 402 /* Copy from the inlined pages into the tail */ 403 copy = len; 404 if (copy > tail->iov_len) 405 copy = tail->iov_len; 406 _copy_from_pages((char *)tail->iov_base, 407 buf->pages, buf->page_base + pglen - len, 408 copy); 409 } 410 buf->page_len -= len; 411 buf->buflen -= len; 412 /* Have we truncated the message? */ 413 if (buf->len > buf->buflen) 414 buf->len = buf->buflen; 415 } 416 417 void 418 xdr_shift_buf(struct xdr_buf *buf, size_t len) 419 { 420 xdr_shrink_bufhead(buf, len); 421 } 422 423 /** 424 * xdr_init_encode - Initialize a struct xdr_stream for sending data. 425 * @xdr: pointer to xdr_stream struct 426 * @buf: pointer to XDR buffer in which to encode data 427 * @p: current pointer inside XDR buffer 428 * 429 * Note: at the moment the RPC client only passes the length of our 430 * scratch buffer in the xdr_buf's header kvec. Previously this 431 * meant we needed to call xdr_adjust_iovec() after encoding the 432 * data. With the new scheme, the xdr_stream manages the details 433 * of the buffer length, and takes care of adjusting the kvec 434 * length for us. 435 */ 436 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 437 { 438 struct kvec *iov = buf->head; 439 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; 440 441 BUG_ON(scratch_len < 0); 442 xdr->buf = buf; 443 xdr->iov = iov; 444 xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len); 445 xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len); 446 BUG_ON(iov->iov_len > scratch_len); 447 448 if (p != xdr->p && p != NULL) { 449 size_t len; 450 451 BUG_ON(p < xdr->p || p > xdr->end); 452 len = (char *)p - (char *)xdr->p; 453 xdr->p = p; 454 buf->len += len; 455 iov->iov_len += len; 456 } 457 } 458 EXPORT_SYMBOL(xdr_init_encode); 459 460 /** 461 * xdr_reserve_space - Reserve buffer space for sending 462 * @xdr: pointer to xdr_stream 463 * @nbytes: number of bytes to reserve 464 * 465 * Checks that we have enough buffer space to encode 'nbytes' more 466 * bytes of data. If so, update the total xdr_buf length, and 467 * adjust the length of the current kvec. 468 */ 469 uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) 470 { 471 uint32_t *p = xdr->p; 472 uint32_t *q; 473 474 /* align nbytes on the next 32-bit boundary */ 475 nbytes += 3; 476 nbytes &= ~3; 477 q = p + (nbytes >> 2); 478 if (unlikely(q > xdr->end || q < p)) 479 return NULL; 480 xdr->p = q; 481 xdr->iov->iov_len += nbytes; 482 xdr->buf->len += nbytes; 483 return p; 484 } 485 EXPORT_SYMBOL(xdr_reserve_space); 486 487 /** 488 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending 489 * @xdr: pointer to xdr_stream 490 * @pages: list of pages 491 * @base: offset of first byte 492 * @len: length of data in bytes 493 * 494 */ 495 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, 496 unsigned int len) 497 { 498 struct xdr_buf *buf = xdr->buf; 499 struct kvec *iov = buf->tail; 500 buf->pages = pages; 501 buf->page_base = base; 502 buf->page_len = len; 503 504 iov->iov_base = (char *)xdr->p; 505 iov->iov_len = 0; 506 xdr->iov = iov; 507 508 if (len & 3) { 509 unsigned int pad = 4 - (len & 3); 510 511 BUG_ON(xdr->p >= xdr->end); 512 iov->iov_base = (char *)xdr->p + (len & 3); 513 iov->iov_len += pad; 514 len += pad; 515 *xdr->p++ = 0; 516 } 517 buf->buflen += len; 518 buf->len += len; 519 } 520 EXPORT_SYMBOL(xdr_write_pages); 521 522 /** 523 * xdr_init_decode - Initialize an xdr_stream for decoding data. 524 * @xdr: pointer to xdr_stream struct 525 * @buf: pointer to XDR buffer from which to decode data 526 * @p: current pointer inside XDR buffer 527 */ 528 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) 529 { 530 struct kvec *iov = buf->head; 531 unsigned int len = iov->iov_len; 532 533 if (len > buf->len) 534 len = buf->len; 535 xdr->buf = buf; 536 xdr->iov = iov; 537 xdr->p = p; 538 xdr->end = (uint32_t *)((char *)iov->iov_base + len); 539 } 540 EXPORT_SYMBOL(xdr_init_decode); 541 542 /** 543 * xdr_inline_decode - Retrieve non-page XDR data to decode 544 * @xdr: pointer to xdr_stream struct 545 * @nbytes: number of bytes of data to decode 546 * 547 * Check if the input buffer is long enough to enable us to decode 548 * 'nbytes' more bytes of data starting at the current position. 549 * If so return the current pointer, then update the current 550 * pointer position. 551 */ 552 uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 553 { 554 uint32_t *p = xdr->p; 555 uint32_t *q = p + XDR_QUADLEN(nbytes); 556 557 if (unlikely(q > xdr->end || q < p)) 558 return NULL; 559 xdr->p = q; 560 return p; 561 } 562 EXPORT_SYMBOL(xdr_inline_decode); 563 564 /** 565 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position 566 * @xdr: pointer to xdr_stream struct 567 * @len: number of bytes of page data 568 * 569 * Moves data beyond the current pointer position from the XDR head[] buffer 570 * into the page list. Any data that lies beyond current position + "len" 571 * bytes is moved into the XDR tail[]. The current pointer is then 572 * repositioned at the beginning of the XDR tail. 573 */ 574 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len) 575 { 576 struct xdr_buf *buf = xdr->buf; 577 struct kvec *iov; 578 ssize_t shift; 579 unsigned int end; 580 int padding; 581 582 /* Realign pages to current pointer position */ 583 iov = buf->head; 584 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p; 585 if (shift > 0) 586 xdr_shrink_bufhead(buf, shift); 587 588 /* Truncate page data and move it into the tail */ 589 if (buf->page_len > len) 590 xdr_shrink_pagelen(buf, buf->page_len - len); 591 padding = (XDR_QUADLEN(len) << 2) - len; 592 xdr->iov = iov = buf->tail; 593 /* Compute remaining message length. */ 594 end = iov->iov_len; 595 shift = buf->buflen - buf->len; 596 if (shift < end) 597 end -= shift; 598 else if (shift > 0) 599 end = 0; 600 /* 601 * Position current pointer at beginning of tail, and 602 * set remaining message length. 603 */ 604 xdr->p = (uint32_t *)((char *)iov->iov_base + padding); 605 xdr->end = (uint32_t *)((char *)iov->iov_base + end); 606 } 607 EXPORT_SYMBOL(xdr_read_pages); 608 609 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; 610 611 void 612 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) 613 { 614 buf->head[0] = *iov; 615 buf->tail[0] = empty_iov; 616 buf->page_len = 0; 617 buf->buflen = buf->len = iov->iov_len; 618 } 619 620 /* Sets subiov to the intersection of iov with the buffer of length len 621 * starting base bytes after iov. Indicates empty intersection by setting 622 * length of subiov to zero. Decrements len by length of subiov, sets base 623 * to zero (or decrements it by length of iov if subiov is empty). */ 624 static void 625 iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len) 626 { 627 if (*base > iov->iov_len) { 628 subiov->iov_base = NULL; 629 subiov->iov_len = 0; 630 *base -= iov->iov_len; 631 } else { 632 subiov->iov_base = iov->iov_base + *base; 633 subiov->iov_len = min(*len, (int)iov->iov_len - *base); 634 *base = 0; 635 } 636 *len -= subiov->iov_len; 637 } 638 639 /* Sets subbuf to the portion of buf of length len beginning base bytes 640 * from the start of buf. Returns -1 if base of length are out of bounds. */ 641 int 642 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, 643 int base, int len) 644 { 645 int i; 646 647 subbuf->buflen = subbuf->len = len; 648 iov_subsegment(buf->head, subbuf->head, &base, &len); 649 650 if (base < buf->page_len) { 651 i = (base + buf->page_base) >> PAGE_CACHE_SHIFT; 652 subbuf->pages = &buf->pages[i]; 653 subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK; 654 subbuf->page_len = min((int)buf->page_len - base, len); 655 len -= subbuf->page_len; 656 base = 0; 657 } else { 658 base -= buf->page_len; 659 subbuf->page_len = 0; 660 } 661 662 iov_subsegment(buf->tail, subbuf->tail, &base, &len); 663 if (base || len) 664 return -1; 665 return 0; 666 } 667 668 /* obj is assumed to point to allocated memory of size at least len: */ 669 int 670 read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) 671 { 672 struct xdr_buf subbuf; 673 int this_len; 674 int status; 675 676 status = xdr_buf_subsegment(buf, &subbuf, base, len); 677 if (status) 678 goto out; 679 this_len = min(len, (int)subbuf.head[0].iov_len); 680 memcpy(obj, subbuf.head[0].iov_base, this_len); 681 len -= this_len; 682 obj += this_len; 683 this_len = min(len, (int)subbuf.page_len); 684 if (this_len) 685 _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len); 686 len -= this_len; 687 obj += this_len; 688 this_len = min(len, (int)subbuf.tail[0].iov_len); 689 memcpy(obj, subbuf.tail[0].iov_base, this_len); 690 out: 691 return status; 692 } 693 694 /* obj is assumed to point to allocated memory of size at least len: */ 695 int 696 write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) 697 { 698 struct xdr_buf subbuf; 699 int this_len; 700 int status; 701 702 status = xdr_buf_subsegment(buf, &subbuf, base, len); 703 if (status) 704 goto out; 705 this_len = min(len, (int)subbuf.head[0].iov_len); 706 memcpy(subbuf.head[0].iov_base, obj, this_len); 707 len -= this_len; 708 obj += this_len; 709 this_len = min(len, (int)subbuf.page_len); 710 if (this_len) 711 _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len); 712 len -= this_len; 713 obj += this_len; 714 this_len = min(len, (int)subbuf.tail[0].iov_len); 715 memcpy(subbuf.tail[0].iov_base, obj, this_len); 716 out: 717 return status; 718 } 719 720 int 721 xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) 722 { 723 u32 raw; 724 int status; 725 726 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 727 if (status) 728 return status; 729 *obj = ntohl(raw); 730 return 0; 731 } 732 733 int 734 xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) 735 { 736 u32 raw = htonl(obj); 737 738 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 739 } 740 741 /* If the netobj starting offset bytes from the start of xdr_buf is contained 742 * entirely in the head or the tail, set object to point to it; otherwise 743 * try to find space for it at the end of the tail, copy it there, and 744 * set obj to point to it. */ 745 int 746 xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset) 747 { 748 u32 tail_offset = buf->head[0].iov_len + buf->page_len; 749 u32 obj_end_offset; 750 751 if (xdr_decode_word(buf, offset, &obj->len)) 752 goto out; 753 obj_end_offset = offset + 4 + obj->len; 754 755 if (obj_end_offset <= buf->head[0].iov_len) { 756 /* The obj is contained entirely in the head: */ 757 obj->data = buf->head[0].iov_base + offset + 4; 758 } else if (offset + 4 >= tail_offset) { 759 if (obj_end_offset - tail_offset 760 > buf->tail[0].iov_len) 761 goto out; 762 /* The obj is contained entirely in the tail: */ 763 obj->data = buf->tail[0].iov_base 764 + offset - tail_offset + 4; 765 } else { 766 /* use end of tail as storage for obj: 767 * (We don't copy to the beginning because then we'd have 768 * to worry about doing a potentially overlapping copy. 769 * This assumes the object is at most half the length of the 770 * tail.) */ 771 if (obj->len > buf->tail[0].iov_len) 772 goto out; 773 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len - 774 obj->len; 775 if (read_bytes_from_xdr_buf(buf, offset + 4, 776 obj->data, obj->len)) 777 goto out; 778 779 } 780 return 0; 781 out: 782 return -1; 783 } 784 785 /* Returns 0 on success, or else a negative error code. */ 786 static int 787 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, 788 struct xdr_array2_desc *desc, int encode) 789 { 790 char *elem = NULL, *c; 791 unsigned int copied = 0, todo, avail_here; 792 struct page **ppages = NULL; 793 int err; 794 795 if (encode) { 796 if (xdr_encode_word(buf, base, desc->array_len) != 0) 797 return -EINVAL; 798 } else { 799 if (xdr_decode_word(buf, base, &desc->array_len) != 0 || 800 desc->array_len > desc->array_maxlen || 801 (unsigned long) base + 4 + desc->array_len * 802 desc->elem_size > buf->len) 803 return -EINVAL; 804 } 805 base += 4; 806 807 if (!desc->xcode) 808 return 0; 809 810 todo = desc->array_len * desc->elem_size; 811 812 /* process head */ 813 if (todo && base < buf->head->iov_len) { 814 c = buf->head->iov_base + base; 815 avail_here = min_t(unsigned int, todo, 816 buf->head->iov_len - base); 817 todo -= avail_here; 818 819 while (avail_here >= desc->elem_size) { 820 err = desc->xcode(desc, c); 821 if (err) 822 goto out; 823 c += desc->elem_size; 824 avail_here -= desc->elem_size; 825 } 826 if (avail_here) { 827 if (!elem) { 828 elem = kmalloc(desc->elem_size, GFP_KERNEL); 829 err = -ENOMEM; 830 if (!elem) 831 goto out; 832 } 833 if (encode) { 834 err = desc->xcode(desc, elem); 835 if (err) 836 goto out; 837 memcpy(c, elem, avail_here); 838 } else 839 memcpy(elem, c, avail_here); 840 copied = avail_here; 841 } 842 base = buf->head->iov_len; /* align to start of pages */ 843 } 844 845 /* process pages array */ 846 base -= buf->head->iov_len; 847 if (todo && base < buf->page_len) { 848 unsigned int avail_page; 849 850 avail_here = min(todo, buf->page_len - base); 851 todo -= avail_here; 852 853 base += buf->page_base; 854 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); 855 base &= ~PAGE_CACHE_MASK; 856 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, 857 avail_here); 858 c = kmap(*ppages) + base; 859 860 while (avail_here) { 861 avail_here -= avail_page; 862 if (copied || avail_page < desc->elem_size) { 863 unsigned int l = min(avail_page, 864 desc->elem_size - copied); 865 if (!elem) { 866 elem = kmalloc(desc->elem_size, 867 GFP_KERNEL); 868 err = -ENOMEM; 869 if (!elem) 870 goto out; 871 } 872 if (encode) { 873 if (!copied) { 874 err = desc->xcode(desc, elem); 875 if (err) 876 goto out; 877 } 878 memcpy(c, elem + copied, l); 879 copied += l; 880 if (copied == desc->elem_size) 881 copied = 0; 882 } else { 883 memcpy(elem + copied, c, l); 884 copied += l; 885 if (copied == desc->elem_size) { 886 err = desc->xcode(desc, elem); 887 if (err) 888 goto out; 889 copied = 0; 890 } 891 } 892 avail_page -= l; 893 c += l; 894 } 895 while (avail_page >= desc->elem_size) { 896 err = desc->xcode(desc, c); 897 if (err) 898 goto out; 899 c += desc->elem_size; 900 avail_page -= desc->elem_size; 901 } 902 if (avail_page) { 903 unsigned int l = min(avail_page, 904 desc->elem_size - copied); 905 if (!elem) { 906 elem = kmalloc(desc->elem_size, 907 GFP_KERNEL); 908 err = -ENOMEM; 909 if (!elem) 910 goto out; 911 } 912 if (encode) { 913 if (!copied) { 914 err = desc->xcode(desc, elem); 915 if (err) 916 goto out; 917 } 918 memcpy(c, elem + copied, l); 919 copied += l; 920 if (copied == desc->elem_size) 921 copied = 0; 922 } else { 923 memcpy(elem + copied, c, l); 924 copied += l; 925 if (copied == desc->elem_size) { 926 err = desc->xcode(desc, elem); 927 if (err) 928 goto out; 929 copied = 0; 930 } 931 } 932 } 933 if (avail_here) { 934 kunmap(*ppages); 935 ppages++; 936 c = kmap(*ppages); 937 } 938 939 avail_page = min(avail_here, 940 (unsigned int) PAGE_CACHE_SIZE); 941 } 942 base = buf->page_len; /* align to start of tail */ 943 } 944 945 /* process tail */ 946 base -= buf->page_len; 947 if (todo) { 948 c = buf->tail->iov_base + base; 949 if (copied) { 950 unsigned int l = desc->elem_size - copied; 951 952 if (encode) 953 memcpy(c, elem + copied, l); 954 else { 955 memcpy(elem + copied, c, l); 956 err = desc->xcode(desc, elem); 957 if (err) 958 goto out; 959 } 960 todo -= l; 961 c += l; 962 } 963 while (todo) { 964 err = desc->xcode(desc, c); 965 if (err) 966 goto out; 967 c += desc->elem_size; 968 todo -= desc->elem_size; 969 } 970 } 971 err = 0; 972 973 out: 974 kfree(elem); 975 if (ppages) 976 kunmap(*ppages); 977 return err; 978 } 979 980 int 981 xdr_decode_array2(struct xdr_buf *buf, unsigned int base, 982 struct xdr_array2_desc *desc) 983 { 984 if (base >= buf->len) 985 return -EINVAL; 986 987 return xdr_xcode_array2(buf, base, desc, 0); 988 } 989 990 int 991 xdr_encode_array2(struct xdr_buf *buf, unsigned int base, 992 struct xdr_array2_desc *desc) 993 { 994 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > 995 buf->head->iov_len + buf->page_len + buf->tail->iov_len) 996 return -EINVAL; 997 998 return xdr_xcode_array2(buf, base, desc, 1); 999 } 1000