xref: /linux/net/sunrpc/xdr.c (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1 /*
2  * linux/net/sunrpc/xdr.c
3  *
4  * Generic XDR support.
5  *
6  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
15 #include <linux/sunrpc/xdr.h>
16 #include <linux/sunrpc/msg_prot.h>
17 
18 /*
19  * XDR functions for basic NFS types
20  */
21 u32 *
22 xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj)
23 {
24 	unsigned int	quadlen = XDR_QUADLEN(obj->len);
25 
26 	p[quadlen] = 0;		/* zero trailing bytes */
27 	*p++ = htonl(obj->len);
28 	memcpy(p, obj->data, obj->len);
29 	return p + XDR_QUADLEN(obj->len);
30 }
31 
32 u32 *
33 xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
34 {
35 	unsigned int	len;
36 
37 	if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
38 		return NULL;
39 	obj->len  = len;
40 	obj->data = (u8 *) p;
41 	return p + XDR_QUADLEN(len);
42 }
43 
44 /**
45  * xdr_encode_opaque_fixed - Encode fixed length opaque data
46  * @p: pointer to current position in XDR buffer.
47  * @ptr: pointer to data to encode (or NULL)
48  * @nbytes: size of data.
49  *
50  * Copy the array of data of length nbytes at ptr to the XDR buffer
51  * at position p, then align to the next 32-bit boundary by padding
52  * with zero bytes (see RFC1832).
53  * Note: if ptr is NULL, only the padding is performed.
54  *
55  * Returns the updated current XDR buffer position
56  *
57  */
58 u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes)
59 {
60 	if (likely(nbytes != 0)) {
61 		unsigned int quadlen = XDR_QUADLEN(nbytes);
62 		unsigned int padding = (quadlen << 2) - nbytes;
63 
64 		if (ptr != NULL)
65 			memcpy(p, ptr, nbytes);
66 		if (padding != 0)
67 			memset((char *)p + nbytes, 0, padding);
68 		p += quadlen;
69 	}
70 	return p;
71 }
72 EXPORT_SYMBOL(xdr_encode_opaque_fixed);
73 
74 /**
75  * xdr_encode_opaque - Encode variable length opaque data
76  * @p: pointer to current position in XDR buffer.
77  * @ptr: pointer to data to encode (or NULL)
78  * @nbytes: size of data.
79  *
80  * Returns the updated current XDR buffer position
81  */
82 u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes)
83 {
84 	*p++ = htonl(nbytes);
85 	return xdr_encode_opaque_fixed(p, ptr, nbytes);
86 }
87 EXPORT_SYMBOL(xdr_encode_opaque);
88 
89 u32 *
90 xdr_encode_string(u32 *p, const char *string)
91 {
92 	return xdr_encode_array(p, string, strlen(string));
93 }
94 
95 u32 *
96 xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
97 {
98 	unsigned int	len;
99 
100 	if ((len = ntohl(*p++)) > maxlen)
101 		return NULL;
102 	*lenp = len;
103 	*sp = (char *) p;
104 	return p + XDR_QUADLEN(len);
105 }
106 
107 void
108 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
109 		 unsigned int len)
110 {
111 	struct kvec *tail = xdr->tail;
112 	u32 *p;
113 
114 	xdr->pages = pages;
115 	xdr->page_base = base;
116 	xdr->page_len = len;
117 
118 	p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
119 	tail->iov_base = p;
120 	tail->iov_len = 0;
121 
122 	if (len & 3) {
123 		unsigned int pad = 4 - (len & 3);
124 
125 		*p = 0;
126 		tail->iov_base = (char *)p + (len & 3);
127 		tail->iov_len  = pad;
128 		len += pad;
129 	}
130 	xdr->buflen += len;
131 	xdr->len += len;
132 }
133 
134 void
135 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
136 		 struct page **pages, unsigned int base, unsigned int len)
137 {
138 	struct kvec *head = xdr->head;
139 	struct kvec *tail = xdr->tail;
140 	char *buf = (char *)head->iov_base;
141 	unsigned int buflen = head->iov_len;
142 
143 	head->iov_len  = offset;
144 
145 	xdr->pages = pages;
146 	xdr->page_base = base;
147 	xdr->page_len = len;
148 
149 	tail->iov_base = buf + offset;
150 	tail->iov_len = buflen - offset;
151 
152 	xdr->buflen += len;
153 }
154 
155 
156 /*
157  * Helper routines for doing 'memmove' like operations on a struct xdr_buf
158  *
159  * _shift_data_right_pages
160  * @pages: vector of pages containing both the source and dest memory area.
161  * @pgto_base: page vector address of destination
162  * @pgfrom_base: page vector address of source
163  * @len: number of bytes to copy
164  *
165  * Note: the addresses pgto_base and pgfrom_base are both calculated in
166  *       the same way:
167  *            if a memory area starts at byte 'base' in page 'pages[i]',
168  *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
169  * Also note: pgfrom_base must be < pgto_base, but the memory areas
170  * 	they point to may overlap.
171  */
172 static void
173 _shift_data_right_pages(struct page **pages, size_t pgto_base,
174 		size_t pgfrom_base, size_t len)
175 {
176 	struct page **pgfrom, **pgto;
177 	char *vfrom, *vto;
178 	size_t copy;
179 
180 	BUG_ON(pgto_base <= pgfrom_base);
181 
182 	pgto_base += len;
183 	pgfrom_base += len;
184 
185 	pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
186 	pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
187 
188 	pgto_base &= ~PAGE_CACHE_MASK;
189 	pgfrom_base &= ~PAGE_CACHE_MASK;
190 
191 	do {
192 		/* Are any pointers crossing a page boundary? */
193 		if (pgto_base == 0) {
194 			flush_dcache_page(*pgto);
195 			pgto_base = PAGE_CACHE_SIZE;
196 			pgto--;
197 		}
198 		if (pgfrom_base == 0) {
199 			pgfrom_base = PAGE_CACHE_SIZE;
200 			pgfrom--;
201 		}
202 
203 		copy = len;
204 		if (copy > pgto_base)
205 			copy = pgto_base;
206 		if (copy > pgfrom_base)
207 			copy = pgfrom_base;
208 		pgto_base -= copy;
209 		pgfrom_base -= copy;
210 
211 		vto = kmap_atomic(*pgto, KM_USER0);
212 		vfrom = kmap_atomic(*pgfrom, KM_USER1);
213 		memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
214 		kunmap_atomic(vfrom, KM_USER1);
215 		kunmap_atomic(vto, KM_USER0);
216 
217 	} while ((len -= copy) != 0);
218 	flush_dcache_page(*pgto);
219 }
220 
221 /*
222  * _copy_to_pages
223  * @pages: array of pages
224  * @pgbase: page vector address of destination
225  * @p: pointer to source data
226  * @len: length
227  *
228  * Copies data from an arbitrary memory location into an array of pages
229  * The copy is assumed to be non-overlapping.
230  */
231 static void
232 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
233 {
234 	struct page **pgto;
235 	char *vto;
236 	size_t copy;
237 
238 	pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
239 	pgbase &= ~PAGE_CACHE_MASK;
240 
241 	do {
242 		copy = PAGE_CACHE_SIZE - pgbase;
243 		if (copy > len)
244 			copy = len;
245 
246 		vto = kmap_atomic(*pgto, KM_USER0);
247 		memcpy(vto + pgbase, p, copy);
248 		kunmap_atomic(vto, KM_USER0);
249 
250 		pgbase += copy;
251 		if (pgbase == PAGE_CACHE_SIZE) {
252 			flush_dcache_page(*pgto);
253 			pgbase = 0;
254 			pgto++;
255 		}
256 		p += copy;
257 
258 	} while ((len -= copy) != 0);
259 	flush_dcache_page(*pgto);
260 }
261 
262 /*
263  * _copy_from_pages
264  * @p: pointer to destination
265  * @pages: array of pages
266  * @pgbase: offset of source data
267  * @len: length
268  *
269  * Copies data into an arbitrary memory location from an array of pages
270  * The copy is assumed to be non-overlapping.
271  */
272 static void
273 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
274 {
275 	struct page **pgfrom;
276 	char *vfrom;
277 	size_t copy;
278 
279 	pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
280 	pgbase &= ~PAGE_CACHE_MASK;
281 
282 	do {
283 		copy = PAGE_CACHE_SIZE - pgbase;
284 		if (copy > len)
285 			copy = len;
286 
287 		vfrom = kmap_atomic(*pgfrom, KM_USER0);
288 		memcpy(p, vfrom + pgbase, copy);
289 		kunmap_atomic(vfrom, KM_USER0);
290 
291 		pgbase += copy;
292 		if (pgbase == PAGE_CACHE_SIZE) {
293 			pgbase = 0;
294 			pgfrom++;
295 		}
296 		p += copy;
297 
298 	} while ((len -= copy) != 0);
299 }
300 
301 /*
302  * xdr_shrink_bufhead
303  * @buf: xdr_buf
304  * @len: bytes to remove from buf->head[0]
305  *
306  * Shrinks XDR buffer's header kvec buf->head[0] by
307  * 'len' bytes. The extra data is not lost, but is instead
308  * moved into the inlined pages and/or the tail.
309  */
310 static void
311 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
312 {
313 	struct kvec *head, *tail;
314 	size_t copy, offs;
315 	unsigned int pglen = buf->page_len;
316 
317 	tail = buf->tail;
318 	head = buf->head;
319 	BUG_ON (len > head->iov_len);
320 
321 	/* Shift the tail first */
322 	if (tail->iov_len != 0) {
323 		if (tail->iov_len > len) {
324 			copy = tail->iov_len - len;
325 			memmove((char *)tail->iov_base + len,
326 					tail->iov_base, copy);
327 		}
328 		/* Copy from the inlined pages into the tail */
329 		copy = len;
330 		if (copy > pglen)
331 			copy = pglen;
332 		offs = len - copy;
333 		if (offs >= tail->iov_len)
334 			copy = 0;
335 		else if (copy > tail->iov_len - offs)
336 			copy = tail->iov_len - offs;
337 		if (copy != 0)
338 			_copy_from_pages((char *)tail->iov_base + offs,
339 					buf->pages,
340 					buf->page_base + pglen + offs - len,
341 					copy);
342 		/* Do we also need to copy data from the head into the tail ? */
343 		if (len > pglen) {
344 			offs = copy = len - pglen;
345 			if (copy > tail->iov_len)
346 				copy = tail->iov_len;
347 			memcpy(tail->iov_base,
348 					(char *)head->iov_base +
349 					head->iov_len - offs,
350 					copy);
351 		}
352 	}
353 	/* Now handle pages */
354 	if (pglen != 0) {
355 		if (pglen > len)
356 			_shift_data_right_pages(buf->pages,
357 					buf->page_base + len,
358 					buf->page_base,
359 					pglen - len);
360 		copy = len;
361 		if (len > pglen)
362 			copy = pglen;
363 		_copy_to_pages(buf->pages, buf->page_base,
364 				(char *)head->iov_base + head->iov_len - len,
365 				copy);
366 	}
367 	head->iov_len -= len;
368 	buf->buflen -= len;
369 	/* Have we truncated the message? */
370 	if (buf->len > buf->buflen)
371 		buf->len = buf->buflen;
372 }
373 
374 /*
375  * xdr_shrink_pagelen
376  * @buf: xdr_buf
377  * @len: bytes to remove from buf->pages
378  *
379  * Shrinks XDR buffer's page array buf->pages by
380  * 'len' bytes. The extra data is not lost, but is instead
381  * moved into the tail.
382  */
383 static void
384 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
385 {
386 	struct kvec *tail;
387 	size_t copy;
388 	char *p;
389 	unsigned int pglen = buf->page_len;
390 
391 	tail = buf->tail;
392 	BUG_ON (len > pglen);
393 
394 	/* Shift the tail first */
395 	if (tail->iov_len != 0) {
396 		p = (char *)tail->iov_base + len;
397 		if (tail->iov_len > len) {
398 			copy = tail->iov_len - len;
399 			memmove(p, tail->iov_base, copy);
400 		} else
401 			buf->buflen -= len;
402 		/* Copy from the inlined pages into the tail */
403 		copy = len;
404 		if (copy > tail->iov_len)
405 			copy = tail->iov_len;
406 		_copy_from_pages((char *)tail->iov_base,
407 				buf->pages, buf->page_base + pglen - len,
408 				copy);
409 	}
410 	buf->page_len -= len;
411 	buf->buflen -= len;
412 	/* Have we truncated the message? */
413 	if (buf->len > buf->buflen)
414 		buf->len = buf->buflen;
415 }
416 
417 void
418 xdr_shift_buf(struct xdr_buf *buf, size_t len)
419 {
420 	xdr_shrink_bufhead(buf, len);
421 }
422 
423 /**
424  * xdr_init_encode - Initialize a struct xdr_stream for sending data.
425  * @xdr: pointer to xdr_stream struct
426  * @buf: pointer to XDR buffer in which to encode data
427  * @p: current pointer inside XDR buffer
428  *
429  * Note: at the moment the RPC client only passes the length of our
430  *	 scratch buffer in the xdr_buf's header kvec. Previously this
431  *	 meant we needed to call xdr_adjust_iovec() after encoding the
432  *	 data. With the new scheme, the xdr_stream manages the details
433  *	 of the buffer length, and takes care of adjusting the kvec
434  *	 length for us.
435  */
436 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
437 {
438 	struct kvec *iov = buf->head;
439 	int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
440 
441 	BUG_ON(scratch_len < 0);
442 	xdr->buf = buf;
443 	xdr->iov = iov;
444 	xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len);
445 	xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len);
446 	BUG_ON(iov->iov_len > scratch_len);
447 
448 	if (p != xdr->p && p != NULL) {
449 		size_t len;
450 
451 		BUG_ON(p < xdr->p || p > xdr->end);
452 		len = (char *)p - (char *)xdr->p;
453 		xdr->p = p;
454 		buf->len += len;
455 		iov->iov_len += len;
456 	}
457 }
458 EXPORT_SYMBOL(xdr_init_encode);
459 
460 /**
461  * xdr_reserve_space - Reserve buffer space for sending
462  * @xdr: pointer to xdr_stream
463  * @nbytes: number of bytes to reserve
464  *
465  * Checks that we have enough buffer space to encode 'nbytes' more
466  * bytes of data. If so, update the total xdr_buf length, and
467  * adjust the length of the current kvec.
468  */
469 uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
470 {
471 	uint32_t *p = xdr->p;
472 	uint32_t *q;
473 
474 	/* align nbytes on the next 32-bit boundary */
475 	nbytes += 3;
476 	nbytes &= ~3;
477 	q = p + (nbytes >> 2);
478 	if (unlikely(q > xdr->end || q < p))
479 		return NULL;
480 	xdr->p = q;
481 	xdr->iov->iov_len += nbytes;
482 	xdr->buf->len += nbytes;
483 	return p;
484 }
485 EXPORT_SYMBOL(xdr_reserve_space);
486 
487 /**
488  * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
489  * @xdr: pointer to xdr_stream
490  * @pages: list of pages
491  * @base: offset of first byte
492  * @len: length of data in bytes
493  *
494  */
495 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
496 		 unsigned int len)
497 {
498 	struct xdr_buf *buf = xdr->buf;
499 	struct kvec *iov = buf->tail;
500 	buf->pages = pages;
501 	buf->page_base = base;
502 	buf->page_len = len;
503 
504 	iov->iov_base = (char *)xdr->p;
505 	iov->iov_len  = 0;
506 	xdr->iov = iov;
507 
508 	if (len & 3) {
509 		unsigned int pad = 4 - (len & 3);
510 
511 		BUG_ON(xdr->p >= xdr->end);
512 		iov->iov_base = (char *)xdr->p + (len & 3);
513 		iov->iov_len  += pad;
514 		len += pad;
515 		*xdr->p++ = 0;
516 	}
517 	buf->buflen += len;
518 	buf->len += len;
519 }
520 EXPORT_SYMBOL(xdr_write_pages);
521 
522 /**
523  * xdr_init_decode - Initialize an xdr_stream for decoding data.
524  * @xdr: pointer to xdr_stream struct
525  * @buf: pointer to XDR buffer from which to decode data
526  * @p: current pointer inside XDR buffer
527  */
528 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p)
529 {
530 	struct kvec *iov = buf->head;
531 	unsigned int len = iov->iov_len;
532 
533 	if (len > buf->len)
534 		len = buf->len;
535 	xdr->buf = buf;
536 	xdr->iov = iov;
537 	xdr->p = p;
538 	xdr->end = (uint32_t *)((char *)iov->iov_base + len);
539 }
540 EXPORT_SYMBOL(xdr_init_decode);
541 
542 /**
543  * xdr_inline_decode - Retrieve non-page XDR data to decode
544  * @xdr: pointer to xdr_stream struct
545  * @nbytes: number of bytes of data to decode
546  *
547  * Check if the input buffer is long enough to enable us to decode
548  * 'nbytes' more bytes of data starting at the current position.
549  * If so return the current pointer, then update the current
550  * pointer position.
551  */
552 uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
553 {
554 	uint32_t *p = xdr->p;
555 	uint32_t *q = p + XDR_QUADLEN(nbytes);
556 
557 	if (unlikely(q > xdr->end || q < p))
558 		return NULL;
559 	xdr->p = q;
560 	return p;
561 }
562 EXPORT_SYMBOL(xdr_inline_decode);
563 
564 /**
565  * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
566  * @xdr: pointer to xdr_stream struct
567  * @len: number of bytes of page data
568  *
569  * Moves data beyond the current pointer position from the XDR head[] buffer
570  * into the page list. Any data that lies beyond current position + "len"
571  * bytes is moved into the XDR tail[].
572  */
573 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
574 {
575 	struct xdr_buf *buf = xdr->buf;
576 	struct kvec *iov;
577 	ssize_t shift;
578 	unsigned int end;
579 	int padding;
580 
581 	/* Realign pages to current pointer position */
582 	iov  = buf->head;
583 	shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
584 	if (shift > 0)
585 		xdr_shrink_bufhead(buf, shift);
586 
587 	/* Truncate page data and move it into the tail */
588 	if (buf->page_len > len)
589 		xdr_shrink_pagelen(buf, buf->page_len - len);
590 	padding = (XDR_QUADLEN(len) << 2) - len;
591 	xdr->iov = iov = buf->tail;
592 	/* Compute remaining message length.  */
593 	end = iov->iov_len;
594 	shift = buf->buflen - buf->len;
595 	if (shift < end)
596 		end -= shift;
597 	else if (shift > 0)
598 		end = 0;
599 	/*
600 	 * Position current pointer at beginning of tail, and
601 	 * set remaining message length.
602 	 */
603 	xdr->p = (uint32_t *)((char *)iov->iov_base + padding);
604 	xdr->end = (uint32_t *)((char *)iov->iov_base + end);
605 }
606 EXPORT_SYMBOL(xdr_read_pages);
607 
608 /**
609  * xdr_enter_page - decode data from the XDR page
610  * @xdr: pointer to xdr_stream struct
611  * @len: number of bytes of page data
612  *
613  * Moves data beyond the current pointer position from the XDR head[] buffer
614  * into the page list. Any data that lies beyond current position + "len"
615  * bytes is moved into the XDR tail[]. The current pointer is then
616  * repositioned at the beginning of the first XDR page.
617  */
618 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
619 {
620 	char * kaddr = page_address(xdr->buf->pages[0]);
621 	xdr_read_pages(xdr, len);
622 	/*
623 	 * Position current pointer at beginning of tail, and
624 	 * set remaining message length.
625 	 */
626 	if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
627 		len = PAGE_CACHE_SIZE - xdr->buf->page_base;
628 	xdr->p = (uint32_t *)(kaddr + xdr->buf->page_base);
629 	xdr->end = (uint32_t *)((char *)xdr->p + len);
630 }
631 EXPORT_SYMBOL(xdr_enter_page);
632 
633 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
634 
635 void
636 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
637 {
638 	buf->head[0] = *iov;
639 	buf->tail[0] = empty_iov;
640 	buf->page_len = 0;
641 	buf->buflen = buf->len = iov->iov_len;
642 }
643 
644 /* Sets subiov to the intersection of iov with the buffer of length len
645  * starting base bytes after iov.  Indicates empty intersection by setting
646  * length of subiov to zero.  Decrements len by length of subiov, sets base
647  * to zero (or decrements it by length of iov if subiov is empty). */
648 static void
649 iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
650 {
651 	if (*base > iov->iov_len) {
652 		subiov->iov_base = NULL;
653 		subiov->iov_len = 0;
654 		*base -= iov->iov_len;
655 	} else {
656 		subiov->iov_base = iov->iov_base + *base;
657 		subiov->iov_len = min(*len, (int)iov->iov_len - *base);
658 		*base = 0;
659 	}
660 	*len -= subiov->iov_len;
661 }
662 
663 /* Sets subbuf to the portion of buf of length len beginning base bytes
664  * from the start of buf. Returns -1 if base of length are out of bounds. */
665 int
666 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
667 			int base, int len)
668 {
669 	int i;
670 
671 	subbuf->buflen = subbuf->len = len;
672 	iov_subsegment(buf->head, subbuf->head, &base, &len);
673 
674 	if (base < buf->page_len) {
675 		i = (base + buf->page_base) >> PAGE_CACHE_SHIFT;
676 		subbuf->pages = &buf->pages[i];
677 		subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK;
678 		subbuf->page_len = min((int)buf->page_len - base, len);
679 		len -= subbuf->page_len;
680 		base = 0;
681 	} else {
682 		base -= buf->page_len;
683 		subbuf->page_len = 0;
684 	}
685 
686 	iov_subsegment(buf->tail, subbuf->tail, &base, &len);
687 	if (base || len)
688 		return -1;
689 	return 0;
690 }
691 
692 /* obj is assumed to point to allocated memory of size at least len: */
693 int
694 read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
695 {
696 	struct xdr_buf subbuf;
697 	int this_len;
698 	int status;
699 
700 	status = xdr_buf_subsegment(buf, &subbuf, base, len);
701 	if (status)
702 		goto out;
703 	this_len = min(len, (int)subbuf.head[0].iov_len);
704 	memcpy(obj, subbuf.head[0].iov_base, this_len);
705 	len -= this_len;
706 	obj += this_len;
707 	this_len = min(len, (int)subbuf.page_len);
708 	if (this_len)
709 		_copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len);
710 	len -= this_len;
711 	obj += this_len;
712 	this_len = min(len, (int)subbuf.tail[0].iov_len);
713 	memcpy(obj, subbuf.tail[0].iov_base, this_len);
714 out:
715 	return status;
716 }
717 
718 /* obj is assumed to point to allocated memory of size at least len: */
719 int
720 write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
721 {
722 	struct xdr_buf subbuf;
723 	int this_len;
724 	int status;
725 
726 	status = xdr_buf_subsegment(buf, &subbuf, base, len);
727 	if (status)
728 		goto out;
729 	this_len = min(len, (int)subbuf.head[0].iov_len);
730 	memcpy(subbuf.head[0].iov_base, obj, this_len);
731 	len -= this_len;
732 	obj += this_len;
733 	this_len = min(len, (int)subbuf.page_len);
734 	if (this_len)
735 		_copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len);
736 	len -= this_len;
737 	obj += this_len;
738 	this_len = min(len, (int)subbuf.tail[0].iov_len);
739 	memcpy(subbuf.tail[0].iov_base, obj, this_len);
740 out:
741 	return status;
742 }
743 
744 int
745 xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
746 {
747 	u32	raw;
748 	int	status;
749 
750 	status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
751 	if (status)
752 		return status;
753 	*obj = ntohl(raw);
754 	return 0;
755 }
756 
757 int
758 xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
759 {
760 	u32	raw = htonl(obj);
761 
762 	return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
763 }
764 
765 /* If the netobj starting offset bytes from the start of xdr_buf is contained
766  * entirely in the head or the tail, set object to point to it; otherwise
767  * try to find space for it at the end of the tail, copy it there, and
768  * set obj to point to it. */
769 int
770 xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
771 {
772 	u32	tail_offset = buf->head[0].iov_len + buf->page_len;
773 	u32	obj_end_offset;
774 
775 	if (xdr_decode_word(buf, offset, &obj->len))
776 		goto out;
777 	obj_end_offset = offset + 4 + obj->len;
778 
779 	if (obj_end_offset <= buf->head[0].iov_len) {
780 		/* The obj is contained entirely in the head: */
781 		obj->data = buf->head[0].iov_base + offset + 4;
782 	} else if (offset + 4 >= tail_offset) {
783 		if (obj_end_offset - tail_offset
784 				> buf->tail[0].iov_len)
785 			goto out;
786 		/* The obj is contained entirely in the tail: */
787 		obj->data = buf->tail[0].iov_base
788 			+ offset - tail_offset + 4;
789 	} else {
790 		/* use end of tail as storage for obj:
791 		 * (We don't copy to the beginning because then we'd have
792 		 * to worry about doing a potentially overlapping copy.
793 		 * This assumes the object is at most half the length of the
794 		 * tail.) */
795 		if (obj->len > buf->tail[0].iov_len)
796 			goto out;
797 		obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len -
798 				obj->len;
799 		if (read_bytes_from_xdr_buf(buf, offset + 4,
800 					obj->data, obj->len))
801 			goto out;
802 
803 	}
804 	return 0;
805 out:
806 	return -1;
807 }
808 
809 /* Returns 0 on success, or else a negative error code. */
810 static int
811 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
812 		 struct xdr_array2_desc *desc, int encode)
813 {
814 	char *elem = NULL, *c;
815 	unsigned int copied = 0, todo, avail_here;
816 	struct page **ppages = NULL;
817 	int err;
818 
819 	if (encode) {
820 		if (xdr_encode_word(buf, base, desc->array_len) != 0)
821 			return -EINVAL;
822 	} else {
823 		if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
824 		    desc->array_len > desc->array_maxlen ||
825 		    (unsigned long) base + 4 + desc->array_len *
826 				    desc->elem_size > buf->len)
827 			return -EINVAL;
828 	}
829 	base += 4;
830 
831 	if (!desc->xcode)
832 		return 0;
833 
834 	todo = desc->array_len * desc->elem_size;
835 
836 	/* process head */
837 	if (todo && base < buf->head->iov_len) {
838 		c = buf->head->iov_base + base;
839 		avail_here = min_t(unsigned int, todo,
840 				   buf->head->iov_len - base);
841 		todo -= avail_here;
842 
843 		while (avail_here >= desc->elem_size) {
844 			err = desc->xcode(desc, c);
845 			if (err)
846 				goto out;
847 			c += desc->elem_size;
848 			avail_here -= desc->elem_size;
849 		}
850 		if (avail_here) {
851 			if (!elem) {
852 				elem = kmalloc(desc->elem_size, GFP_KERNEL);
853 				err = -ENOMEM;
854 				if (!elem)
855 					goto out;
856 			}
857 			if (encode) {
858 				err = desc->xcode(desc, elem);
859 				if (err)
860 					goto out;
861 				memcpy(c, elem, avail_here);
862 			} else
863 				memcpy(elem, c, avail_here);
864 			copied = avail_here;
865 		}
866 		base = buf->head->iov_len;  /* align to start of pages */
867 	}
868 
869 	/* process pages array */
870 	base -= buf->head->iov_len;
871 	if (todo && base < buf->page_len) {
872 		unsigned int avail_page;
873 
874 		avail_here = min(todo, buf->page_len - base);
875 		todo -= avail_here;
876 
877 		base += buf->page_base;
878 		ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
879 		base &= ~PAGE_CACHE_MASK;
880 		avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
881 					avail_here);
882 		c = kmap(*ppages) + base;
883 
884 		while (avail_here) {
885 			avail_here -= avail_page;
886 			if (copied || avail_page < desc->elem_size) {
887 				unsigned int l = min(avail_page,
888 					desc->elem_size - copied);
889 				if (!elem) {
890 					elem = kmalloc(desc->elem_size,
891 						       GFP_KERNEL);
892 					err = -ENOMEM;
893 					if (!elem)
894 						goto out;
895 				}
896 				if (encode) {
897 					if (!copied) {
898 						err = desc->xcode(desc, elem);
899 						if (err)
900 							goto out;
901 					}
902 					memcpy(c, elem + copied, l);
903 					copied += l;
904 					if (copied == desc->elem_size)
905 						copied = 0;
906 				} else {
907 					memcpy(elem + copied, c, l);
908 					copied += l;
909 					if (copied == desc->elem_size) {
910 						err = desc->xcode(desc, elem);
911 						if (err)
912 							goto out;
913 						copied = 0;
914 					}
915 				}
916 				avail_page -= l;
917 				c += l;
918 			}
919 			while (avail_page >= desc->elem_size) {
920 				err = desc->xcode(desc, c);
921 				if (err)
922 					goto out;
923 				c += desc->elem_size;
924 				avail_page -= desc->elem_size;
925 			}
926 			if (avail_page) {
927 				unsigned int l = min(avail_page,
928 					    desc->elem_size - copied);
929 				if (!elem) {
930 					elem = kmalloc(desc->elem_size,
931 						       GFP_KERNEL);
932 					err = -ENOMEM;
933 					if (!elem)
934 						goto out;
935 				}
936 				if (encode) {
937 					if (!copied) {
938 						err = desc->xcode(desc, elem);
939 						if (err)
940 							goto out;
941 					}
942 					memcpy(c, elem + copied, l);
943 					copied += l;
944 					if (copied == desc->elem_size)
945 						copied = 0;
946 				} else {
947 					memcpy(elem + copied, c, l);
948 					copied += l;
949 					if (copied == desc->elem_size) {
950 						err = desc->xcode(desc, elem);
951 						if (err)
952 							goto out;
953 						copied = 0;
954 					}
955 				}
956 			}
957 			if (avail_here) {
958 				kunmap(*ppages);
959 				ppages++;
960 				c = kmap(*ppages);
961 			}
962 
963 			avail_page = min(avail_here,
964 				 (unsigned int) PAGE_CACHE_SIZE);
965 		}
966 		base = buf->page_len;  /* align to start of tail */
967 	}
968 
969 	/* process tail */
970 	base -= buf->page_len;
971 	if (todo) {
972 		c = buf->tail->iov_base + base;
973 		if (copied) {
974 			unsigned int l = desc->elem_size - copied;
975 
976 			if (encode)
977 				memcpy(c, elem + copied, l);
978 			else {
979 				memcpy(elem + copied, c, l);
980 				err = desc->xcode(desc, elem);
981 				if (err)
982 					goto out;
983 			}
984 			todo -= l;
985 			c += l;
986 		}
987 		while (todo) {
988 			err = desc->xcode(desc, c);
989 			if (err)
990 				goto out;
991 			c += desc->elem_size;
992 			todo -= desc->elem_size;
993 		}
994 	}
995 	err = 0;
996 
997 out:
998 	kfree(elem);
999 	if (ppages)
1000 		kunmap(*ppages);
1001 	return err;
1002 }
1003 
1004 int
1005 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1006 		  struct xdr_array2_desc *desc)
1007 {
1008 	if (base >= buf->len)
1009 		return -EINVAL;
1010 
1011 	return xdr_xcode_array2(buf, base, desc, 0);
1012 }
1013 
1014 int
1015 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1016 		  struct xdr_array2_desc *desc)
1017 {
1018 	if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1019 	    buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1020 		return -EINVAL;
1021 
1022 	return xdr_xcode_array2(buf, base, desc, 1);
1023 }
1024