Lines Matching +full:data +full:- +full:shift

1 // SPDX-License-Identifier: GPL-2.0-only
31 unsigned int quadlen = XDR_QUADLEN(obj->len);
34 *p++ = cpu_to_be32(obj->len);
35 memcpy(p, obj->data, obj->len);
36 return p + XDR_QUADLEN(obj->len);
47 obj->len = len;
48 obj->data = (u8 *) p;
54 * xdr_encode_opaque_fixed - Encode fixed length opaque data
56 * @ptr: pointer to data to encode (or NULL)
57 * @nbytes: size of data.
59 * Copy the array of data of length nbytes at ptr to the XDR buffer
60 * at position p, then align to the next 32-bit boundary by padding
71 unsigned int padding = (quadlen << 2) - nbytes;
84 * xdr_encode_opaque - Encode variable length opaque data
86 * @ptr: pointer to data to encode (or NULL)
87 * @nbytes: size of data.
121 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
130 kaddr = kmap_atomic(buf->pages[0]);
131 kaddr[buf->page_base + len] = '\0';
138 if (!buf->page_len)
140 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
148 if (n != 0 && buf->bvec == NULL) {
149 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
150 if (!buf->bvec)
151 return -ENOMEM;
153 bvec_set_page(&buf->bvec[i], buf->pages[i], PAGE_SIZE,
163 kfree(buf->bvec);
164 buf->bvec = NULL;
168 * xdr_buf_to_bvec - Copy components of an xdr_buf into a bio_vec array
178 const struct kvec *head = xdr->head;
179 const struct kvec *tail = xdr->tail;
182 if (head->iov_len) {
183 bvec_set_virt(bvec++, head->iov_base, head->iov_len);
187 if (xdr->page_len) {
189 struct page **pages = xdr->pages;
191 offset = offset_in_page(xdr->page_base);
192 remaining = xdr->page_len;
195 PAGE_SIZE - offset);
197 remaining -= len;
204 if (tail->iov_len) {
205 bvec_set_virt(bvec, tail->iov_base, tail->iov_len);
214 return count - 1;
218 * xdr_inline_pages - Prepare receive buffer for a large reply
220 * @offset: expected offset where data payload will start, in bytes
223 * @len: expected size of the upper layer data payload, in bytes
230 struct kvec *head = xdr->head;
231 struct kvec *tail = xdr->tail;
232 char *buf = (char *)head->iov_base;
233 unsigned int buflen = head->iov_len;
235 head->iov_len = offset;
237 xdr->pages = pages;
238 xdr->page_base = base;
239 xdr->page_len = len;
241 tail->iov_base = buf + offset;
242 tail->iov_len = buflen - offset;
243 xdr->buflen += len;
295 if (copy > (PAGE_SIZE - pgto_base))
296 copy = PAGE_SIZE - pgto_base;
297 if (copy > (PAGE_SIZE - pgfrom_base))
298 copy = PAGE_SIZE - pgfrom_base;
313 } while ((len -= copy) != 0);
356 pgto--;
360 pgfrom--;
368 pgto_base -= copy;
369 pgfrom_base -= copy;
381 } while ((len -= copy) != 0);
388 * @p: pointer to source data
391 * Copies data from an arbitrary memory location into an array of pages
392 * The copy is assumed to be non-overlapping.
408 copy = PAGE_SIZE - pgbase;
416 len -= copy;
435 * @pgbase: offset of source data
438 * Copies data into an arbitrary memory location from an array of pages
439 * The copy is assumed to be non-overlapping.
455 copy = PAGE_SIZE - pgbase;
470 } while ((len -= copy) != 0);
477 if (base >= iov->iov_len)
479 if (len > iov->iov_len - base)
480 len = iov->iov_len - base;
481 memset(iov->iov_base + base, 0, len);
493 struct page **pages = buf->pages;
500 if (pgbase >= buf->page_len) {
501 xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len);
504 if (pgbase + len > buf->page_len) {
505 xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len);
506 len = buf->page_len - pgbase;
509 pgbase += buf->page_base;
515 zero = PAGE_SIZE - pgbase;
527 } while ((len -= zero) != 0);
535 if (!(buf->flags & XDRBUF_SPARSE_PAGES))
537 if (buflen <= buf->head->iov_len)
539 pagelen = buflen - buf->head->iov_len;
540 if (pagelen > buf->page_len)
541 pagelen = buf->page_len;
542 npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT;
544 if (!buf->pages[i])
546 buf->pages[i] = alloc_page(gfp);
547 if (likely(buf->pages[i]))
549 buflen -= pagelen;
551 if (pagelen > buf->page_base)
552 buflen += pagelen - buf->page_base;
560 struct kvec *head = buf->head;
561 struct kvec *tail = buf->tail;
562 unsigned int sum = head->iov_len + buf->page_len + tail->iov_len;
565 if (sum > buf->len) {
566 free_space = min_t(unsigned int, sum - buf->len, len);
567 newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space,
569 free_space = newlen - buf->len;
570 buf->len = newlen;
571 len -= free_space;
576 if (buf->buflen > sum) {
578 free_space = min_t(unsigned int, buf->buflen - sum, len);
579 tail->iov_len += free_space;
580 buf->len += free_space;
586 unsigned int shift)
588 const struct kvec *tail = buf->tail;
589 unsigned int to = base + shift;
591 if (to >= tail->iov_len)
593 if (len + to > tail->iov_len)
594 len = tail->iov_len - to;
595 memmove(tail->iov_base + to, tail->iov_base + base, len);
600 unsigned int shift)
602 const struct kvec *tail = buf->tail;
603 unsigned int to = base + shift;
607 if (base >= buf->page_len)
609 if (len > buf->page_len - base)
610 len = buf->page_len - base;
611 if (to >= buf->page_len) {
612 tato = to - buf->page_len;
613 if (tail->iov_len >= len + tato)
615 else if (tail->iov_len > tato)
616 talen = tail->iov_len - tato;
617 } else if (len + to >= buf->page_len) {
618 pglen = buf->page_len - to;
619 talen = len - pglen;
620 if (talen > tail->iov_len)
621 talen = tail->iov_len;
625 _copy_from_pages(tail->iov_base + tato, buf->pages,
626 buf->page_base + base + pglen, talen);
627 _shift_data_right_pages(buf->pages, buf->page_base + to,
628 buf->page_base + base, pglen);
633 unsigned int shift)
635 const struct kvec *head = buf->head;
636 const struct kvec *tail = buf->tail;
637 unsigned int to = base + shift;
641 if (base >= head->iov_len)
643 if (len > head->iov_len - base)
644 len = head->iov_len - base;
645 if (to >= buf->page_len + head->iov_len) {
646 tato = to - buf->page_len - head->iov_len;
648 } else if (to >= head->iov_len) {
649 pgto = to - head->iov_len;
651 if (pgto + pglen > buf->page_len) {
652 talen = pgto + pglen - buf->page_len;
653 pglen -= talen;
656 pglen = len - to;
657 if (pglen > buf->page_len) {
658 talen = pglen - buf->page_len;
659 pglen = buf->page_len;
663 len -= talen;
665 if (talen + tato > tail->iov_len)
666 talen = tail->iov_len > tato ? tail->iov_len - tato : 0;
667 memcpy(tail->iov_base + tato, head->iov_base + base, talen);
669 len -= pglen;
670 base -= pglen;
671 _copy_to_pages(buf->pages, buf->page_base + pgto, head->iov_base + base,
674 base -= len;
675 memmove(head->iov_base + to, head->iov_base + base, len);
680 unsigned int shift)
682 const struct kvec *tail = buf->tail;
684 if (base >= tail->iov_len || !shift || !len)
686 xdr_buf_tail_copy_right(buf, base, len, shift);
691 unsigned int shift)
693 if (!shift || !len)
695 if (base >= buf->page_len) {
696 xdr_buf_tail_shift_right(buf, base - buf->page_len, len, shift);
699 if (base + len > buf->page_len)
700 xdr_buf_tail_shift_right(buf, 0, base + len - buf->page_len,
701 shift);
702 xdr_buf_pages_copy_right(buf, base, len, shift);
707 unsigned int shift)
709 const struct kvec *head = buf->head;
711 if (!shift)
713 if (base >= head->iov_len) {
714 xdr_buf_pages_shift_right(buf, head->iov_len - base, len,
715 shift);
718 if (base + len > head->iov_len)
719 xdr_buf_pages_shift_right(buf, 0, base + len - head->iov_len,
720 shift);
721 xdr_buf_head_copy_right(buf, base, len, shift);
725 unsigned int len, unsigned int shift)
727 const struct kvec *tail = buf->tail;
729 if (base >= tail->iov_len)
731 if (len > tail->iov_len - base)
732 len = tail->iov_len - base;
733 /* Shift data into head */
734 if (shift > buf->page_len + base) {
735 const struct kvec *head = buf->head;
737 head->iov_len + buf->page_len + base - shift;
740 if (WARN_ONCE(shift > head->iov_len + buf->page_len + base,
741 "SUNRPC: Misaligned data.\n"))
743 if (hdto + hdlen > head->iov_len)
744 hdlen = head->iov_len - hdto;
745 memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen);
747 len -= hdlen;
751 /* Shift data into pages */
752 if (shift > base) {
753 unsigned int pgto = buf->page_len + base - shift;
756 if (pgto + pglen > buf->page_len)
757 pglen = buf->page_len - pgto;
758 _copy_to_pages(buf->pages, buf->page_base + pgto,
759 tail->iov_base + base, pglen);
761 len -= pglen;
765 memmove(tail->iov_base + base - shift, tail->iov_base + base, len);
770 unsigned int shift)
774 if (base >= buf->page_len)
776 if (len > buf->page_len - base)
777 len = buf->page_len - base;
778 /* Shift data into head */
779 if (shift > base) {
780 const struct kvec *head = buf->head;
781 unsigned int hdto = head->iov_len + base - shift;
784 if (WARN_ONCE(shift > head->iov_len + base,
785 "SUNRPC: Misaligned data.\n"))
787 if (hdto + hdlen > head->iov_len)
788 hdlen = head->iov_len - hdto;
789 _copy_from_pages(head->iov_base + hdto, buf->pages,
790 buf->page_base + base, hdlen);
792 len -= hdlen;
796 pgto = base - shift;
797 _shift_data_left_pages(buf->pages, buf->page_base + pgto,
798 buf->page_base + base, len);
803 unsigned int shift)
805 if (!shift || !len)
807 xdr_buf_tail_copy_left(buf, base, len, shift);
812 unsigned int shift)
814 if (!shift || !len)
816 if (base >= buf->page_len) {
817 xdr_buf_tail_shift_left(buf, base - buf->page_len, len, shift);
820 xdr_buf_pages_copy_left(buf, base, len, shift);
822 if (len <= buf->page_len)
824 xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift);
829 unsigned int shift)
831 const struct kvec *head = buf->head;
834 if (!shift || !len)
837 if (shift > base) {
838 bytes = (shift - base);
842 len -= bytes;
845 if (base < head->iov_len) {
846 bytes = min_t(unsigned int, len, head->iov_len - base);
847 memmove(head->iov_base + (base - shift),
848 head->iov_base + base, bytes);
850 len -= bytes;
852 xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift);
858 * @len: new length of buf->head[0]
860 * Shrinks XDR buffer's header kvec buf->head[0], setting it to
861 * 'len' bytes. The extra data is not lost, but is instead
866 struct kvec *head = buf->head;
867 unsigned int shift, buflen = max(buf->len, len);
869 WARN_ON_ONCE(len > head->iov_len);
870 if (head->iov_len > buflen) {
871 buf->buflen -= head->iov_len - buflen;
872 head->iov_len = buflen;
874 if (len >= head->iov_len)
876 shift = head->iov_len - len;
877 xdr_buf_try_expand(buf, shift);
878 xdr_buf_head_shift_right(buf, len, buflen - len, shift);
879 head->iov_len = len;
880 buf->buflen -= shift;
881 buf->len -= shift;
882 return shift;
886 * xdr_shrink_pagelen - shrinks buf->pages to @len bytes
890 * The extra data is not lost, but is instead moved into buf->tail.
895 unsigned int shift, buflen = buf->len - buf->head->iov_len;
897 WARN_ON_ONCE(len > buf->page_len);
898 if (buf->head->iov_len >= buf->len || len > buflen)
900 if (buf->page_len > buflen) {
901 buf->buflen -= buf->page_len - buflen;
902 buf->page_len = buflen;
904 if (len >= buf->page_len)
906 shift = buf->page_len - len;
907 xdr_buf_try_expand(buf, shift);
908 xdr_buf_pages_shift_right(buf, len, buflen - len, shift);
909 buf->page_len = len;
910 buf->len -= shift;
911 buf->buflen -= shift;
912 return shift;
916 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
921 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
927 unsigned int blen = xdr->buf->len;
929 xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0;
934 xdr_stream_set_pos(xdr, pos + xdr->buf->head[0].iov_len);
938 * xdr_page_pos - Return the current offset from the start of the xdr pages
945 WARN_ON(pos < xdr->buf->head[0].iov_len);
946 return pos - xdr->buf->head[0].iov_len;
951 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
953 * @buf: pointer to XDR buffer in which to encode data
960 * data. With the new scheme, the xdr_stream manages the details
967 struct kvec *iov = buf->head;
968 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
972 xdr->buf = buf;
973 xdr->iov = iov;
974 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
975 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
976 BUG_ON(iov->iov_len > scratch_len);
978 if (p != xdr->p && p != NULL) {
981 BUG_ON(p < xdr->p || p > xdr->end);
982 len = (char *)p - (char *)xdr->p;
983 xdr->p = p;
984 buf->len += len;
985 iov->iov_len += len;
987 xdr->rqst = rqst;
992 * xdr_init_encode_pages - Initialize an xdr_stream for encoding into pages
994 * @buf: pointer to XDR buffer into which to encode data
1004 xdr->buf = buf;
1005 xdr->page_ptr = pages;
1006 xdr->iov = NULL;
1007 xdr->p = page_address(*pages);
1008 xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
1009 xdr->rqst = rqst;
1014 * __xdr_commit_encode - Ensure all data is written to buffer
1018 * temporary location to write to, then later copying the data into
1024 * data might be read.
1028 size_t shift = xdr->scratch.iov_len;
1031 page = page_address(*xdr->page_ptr);
1032 memcpy(xdr->scratch.iov_base, page, shift);
1033 memmove(page, page + shift, (void *)xdr->p - page);
1040 * xdr->buf->head and xdr->buf->pages, or between two pages
1041 * in xdr->buf->pages.
1052 if (xdr->buf->len + nbytes > xdr->buf->buflen)
1054 frag1bytes = (xdr->end - xdr->p) << 2;
1055 frag2bytes = nbytes - frag1bytes;
1056 if (xdr->iov)
1057 xdr->iov->iov_len += frag1bytes;
1059 xdr->buf->page_len += frag1bytes;
1060 xdr->page_ptr++;
1061 xdr->iov = NULL;
1070 xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes);
1073 * xdr->p is where the next encode will start after
1076 p = page_address(*xdr->page_ptr);
1077 xdr->p = p + frag2bytes;
1078 space_left = xdr->buf->buflen - xdr->buf->len;
1079 if (space_left - frag1bytes >= PAGE_SIZE)
1080 xdr->end = p + PAGE_SIZE;
1082 xdr->end = p + space_left - frag1bytes;
1084 xdr->buf->page_len += frag2bytes;
1085 xdr->buf->len += nbytes;
1093 * xdr_reserve_space - Reserve buffer space for sending
1098 * bytes of data. If so, update the total xdr_buf length, and
1104 * four-byte data item remains valid until @xdr is destroyed, but
1109 __be32 *p = xdr->p;
1113 /* align nbytes on the next 32-bit boundary */
1117 if (unlikely(q > xdr->end || q < p))
1119 xdr->p = q;
1120 if (xdr->iov)
1121 xdr->iov->iov_len += nbytes;
1123 xdr->buf->page_len += nbytes;
1124 xdr->buf->len += nbytes;
1130 * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
1140 * %-EMSGSIZE: not enough space is available in @xdr
1149 * in xdr->pages.
1151 if (xdr->iov == xdr->buf->head) {
1152 xdr->iov = NULL;
1153 xdr->end = xdr->p;
1158 thislen = xdr->buf->page_len % PAGE_SIZE;
1159 thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
1163 return -EMSGSIZE;
1165 nbytes -= thislen;
1173 * xdr_truncate_encode - truncate an encode buffer
1177 * Truncates the xdr stream, so that xdr->buf->len == len,
1178 * and xdr->p points at offset len from the start of the buffer, and
1181 * If this means moving xdr->p to a different buffer, we assume that
1187 * cache pages (as in a zero-copy server read reply), except for the
1193 struct xdr_buf *buf = xdr->buf;
1194 struct kvec *head = buf->head;
1195 struct kvec *tail = buf->tail;
1199 if (len > buf->len) {
1205 fraglen = min_t(int, buf->len - len, tail->iov_len);
1206 tail->iov_len -= fraglen;
1207 buf->len -= fraglen;
1208 if (tail->iov_len) {
1209 xdr->p = tail->iov_base + tail->iov_len;
1210 WARN_ON_ONCE(!xdr->end);
1211 WARN_ON_ONCE(!xdr->iov);
1215 fraglen = min_t(int, buf->len - len, buf->page_len);
1216 buf->page_len -= fraglen;
1217 buf->len -= fraglen;
1219 new = buf->page_base + buf->page_len;
1221 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
1223 if (buf->page_len) {
1224 xdr->p = page_address(*xdr->page_ptr);
1225 xdr->end = (void *)xdr->p + PAGE_SIZE;
1226 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
1227 WARN_ON_ONCE(xdr->iov);
1231 xdr->end = head->iov_base + head->iov_len;
1232 /* (otherwise assume xdr->end is already set) */
1233 xdr->page_ptr--;
1234 head->iov_len = len;
1235 buf->len = len;
1236 xdr->p = head->iov_base + head->iov_len;
1237 xdr->iov = buf->head;
1242 * xdr_truncate_decode - Truncate a decoding stream
1251 xdr->buf->len -= nbytes;
1252 xdr->nwords -= XDR_QUADLEN(nbytes);
1257 * xdr_restrict_buflen - decrease available buffer space
1262 * If we've already used too much space in the buffer, returns -1.
1264 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
1265 * and ensures xdr->end is set at most offset newbuflen from the start
1270 struct xdr_buf *buf = xdr->buf;
1271 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
1272 int end_offset = buf->len + left_in_this_buf;
1274 if (newbuflen < 0 || newbuflen < buf->len)
1275 return -1;
1276 if (newbuflen > buf->buflen)
1279 xdr->end = (void *)xdr->end + newbuflen - end_offset;
1280 buf->buflen = newbuflen;
1286 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
1289 * @base: starting offset of first data byte in @pages
1290 * @len: number of data bytes in @pages to insert
1299 struct xdr_buf *buf = xdr->buf;
1300 struct kvec *tail = buf->tail;
1302 buf->pages = pages;
1303 buf->page_base = base;
1304 buf->page_len = len;
1306 tail->iov_base = xdr->p;
1307 tail->iov_len = 0;
1308 xdr->iov = tail;
1311 unsigned int pad = 4 - (len & 3);
1313 BUG_ON(xdr->p >= xdr->end);
1314 tail->iov_base = (char *)xdr->p + (len & 3);
1315 tail->iov_len += pad;
1317 *xdr->p++ = 0;
1319 buf->buflen += len;
1320 buf->len += len;
1327 if (len > iov->iov_len)
1328 len = iov->iov_len;
1331 xdr->p = (__be32*)(iov->iov_base + base);
1332 xdr->end = (__be32*)(iov->iov_base + len);
1333 xdr->iov = iov;
1334 xdr->page_ptr = NULL;
1335 return len - base;
1341 struct xdr_buf *buf = xdr->buf;
1343 xdr_stream_set_pos(xdr, base + buf->page_len + buf->head->iov_len);
1344 return xdr_set_iov(xdr, buf->tail, base, len);
1349 if (xdr->page_kaddr) {
1350 kunmap_local(xdr->page_kaddr);
1351 xdr->page_kaddr = NULL;
1364 maxlen = xdr->buf->page_len;
1368 maxlen -= base;
1374 base += xdr->buf->page_base;
1377 xdr->page_ptr = &xdr->buf->pages[pgnr];
1379 if (PageHighMem(*xdr->page_ptr)) {
1380 xdr->page_kaddr = kmap_local_page(*xdr->page_ptr);
1381 kaddr = xdr->page_kaddr;
1383 kaddr = page_address(*xdr->page_ptr);
1386 xdr->p = (__be32*)(kaddr + pgoff);
1391 xdr->end = (__be32*)(kaddr + pgend);
1392 xdr->iov = NULL;
1400 base -= xdr->buf->page_len;
1409 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
1410 newbase -= xdr->buf->page_base;
1411 if (newbase < xdr->buf->page_len)
1419 if (xdr->page_ptr != NULL)
1421 else if (xdr->iov == xdr->buf->head)
1423 return xdr->p != xdr->end;
1427 * xdr_init_decode - Initialize an xdr_stream for decoding data.
1429 * @buf: pointer to XDR buffer from which to decode data
1436 xdr->buf = buf;
1437 xdr->page_kaddr = NULL;
1439 xdr->nwords = XDR_QUADLEN(buf->len);
1440 if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 &&
1441 xdr_set_page_base(xdr, 0, buf->len) == 0)
1442 xdr_set_iov(xdr, buf->tail, 0, buf->len);
1443 if (p != NULL && p > xdr->p && xdr->end >= p) {
1444 xdr->nwords -= p - xdr->p;
1445 xdr->p = p;
1447 xdr->rqst = rqst;
1452 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
1454 * @buf: pointer to XDR buffer from which to decode data
1462 buf->pages = pages;
1463 buf->page_len = len;
1464 buf->buflen = len;
1465 buf->len = len;
1471 * xdr_finish_decode - Clean up the xdr_stream after decoding data.
1483 __be32 *p = xdr->p;
1486 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
1488 xdr->p = q;
1489 xdr->nwords -= nwords;
1496 char *cpdest = xdr->scratch.iov_base;
1497 size_t cplen = (char *)xdr->end - (char *)xdr->p;
1499 if (nbytes > xdr->scratch.iov_len)
1508 nbytes -= cplen;
1513 return xdr->scratch.iov_base;
1520 * xdr_inline_decode - Retrieve XDR data to decode
1522 * @nbytes: number of bytes of data to decode
1525 * 'nbytes' more bytes of data starting at the current position.
1534 return xdr->p;
1535 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1549 struct xdr_buf *buf = xdr->buf;
1550 struct kvec *iov = buf->head;
1555 if (iov->iov_len > cur) {
1558 xdr_set_page(xdr, 0, buf->page_len);
1564 struct xdr_buf *buf = xdr->buf;
1568 if (xdr->nwords == 0)
1572 if (nwords > xdr->nwords) {
1573 nwords = xdr->nwords;
1576 if (buf->page_len <= len)
1577 len = buf->page_len;
1578 else if (nwords < xdr->nwords) {
1579 /* Truncate page data and move it into the tail */
1587 * xdr_read_pages - align page-based XDR data to current pointer position
1589 * @len: number of bytes of page data
1591 * Moves data beyond the current pointer position from the XDR head[] buffer
1592 * into the page list. Any data that lies beyond current position + @len
1594 * then advanced past that data to align to the next XDR object in the tail.
1607 base = (nwords << 2) - pglen;
1608 end = xdr_stream_remaining(xdr) - pglen;
1616 * xdr_set_pagelen - Sets the length of the XDR pages
1618 * @len: new length of the XDR page data
1621 * @len bytes. When shrinking, any extra data is moved into buf->tail, whereas
1622 * when growing any data beyond the current pointer is moved into the tail.
1628 struct xdr_buf *buf = xdr->buf;
1632 if (len < buf->page_len) {
1633 base = buf->page_len - len;
1637 buf->page_len, remaining);
1638 if (len > buf->page_len)
1639 xdr_buf_try_expand(buf, len - buf->page_len);
1646 * xdr_enter_page - decode data from the XDR page
1648 * @len: number of bytes of page data
1650 * Moves data beyond the current pointer position from the XDR head[] buffer
1651 * into the page list. Any data that lies beyond current position + "len"
1671 buf->head[0] = *iov;
1672 buf->tail[0] = empty_iov;
1673 buf->page_len = 0;
1674 buf->buflen = buf->len = iov->iov_len;
1679 * xdr_buf_subsegment - set subbuf to a portion of buf
1690 * Returns -1 if base or length are out of bounds.
1695 subbuf->buflen = subbuf->len = len;
1696 if (base < buf->head[0].iov_len) {
1697 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1698 subbuf->head[0].iov_len = min_t(unsigned int, len,
1699 buf->head[0].iov_len - base);
1700 len -= subbuf->head[0].iov_len;
1703 base -= buf->head[0].iov_len;
1704 subbuf->head[0].iov_base = buf->head[0].iov_base;
1705 subbuf->head[0].iov_len = 0;
1708 if (base < buf->page_len) {
1709 subbuf->page_len = min(buf->page_len - base, len);
1710 base += buf->page_base;
1711 subbuf->page_base = base & ~PAGE_MASK;
1712 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1713 len -= subbuf->page_len;
1716 base -= buf->page_len;
1717 subbuf->pages = buf->pages;
1718 subbuf->page_base = 0;
1719 subbuf->page_len = 0;
1722 if (base < buf->tail[0].iov_len) {
1723 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1724 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1725 buf->tail[0].iov_len - base);
1726 len -= subbuf->tail[0].iov_len;
1729 base -= buf->tail[0].iov_len;
1730 subbuf->tail[0].iov_base = buf->tail[0].iov_base;
1731 subbuf->tail[0].iov_len = 0;
1735 return -1;
1741 * xdr_stream_subsegment - set @subbuf to a portion of @xdr
1749 * XDR data item following that portion.
1761 /* Extract @subbuf and bounds-check the fn arguments */
1762 if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes))
1767 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
1770 len = (char *)xdr->end - (char *)xdr->p;
1772 xdr->p = (__be32 *)((char *)xdr->p +
1777 xdr->p = (__be32 *)((char *)xdr->p + len);
1778 xdr->end = xdr->p;
1779 remaining -= len;
1788 * xdr_stream_move_subsegment - Move part of a stream to another position
1801 unsigned int shift;
1804 shift = target - offset;
1805 if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0)
1807 xdr_buf_head_shift_right(&buf, 0, length, shift);
1809 shift = offset - target;
1810 if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0)
1812 xdr_buf_head_shift_left(&buf, shift, length, shift);
1819 * xdr_stream_zero - zero out a portion of an xdr_stream
1829 if (xdr_buf_subsegment(xdr->buf, &buf, offset, length) < 0)
1842 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1856 if (buf->tail[0].iov_len) {
1857 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1858 buf->tail[0].iov_len -= cur;
1859 trim -= cur;
1864 if (buf->page_len) {
1865 cur = min_t(unsigned int, buf->page_len, trim);
1866 buf->page_len -= cur;
1867 trim -= cur;
1872 if (buf->head[0].iov_len) {
1873 cur = min_t(size_t, buf->head[0].iov_len, trim);
1874 buf->head[0].iov_len -= cur;
1875 trim -= cur;
1878 buf->len -= (len - trim);
1887 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1888 memcpy(obj, subbuf->head[0].iov_base, this_len);
1889 len -= this_len;
1891 this_len = min_t(unsigned int, len, subbuf->page_len);
1892 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1893 len -= this_len;
1895 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1896 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1919 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1920 memcpy(subbuf->head[0].iov_base, obj, this_len);
1921 len -= this_len;
1923 this_len = min_t(unsigned int, len, subbuf->page_len);
1924 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1925 len -= this_len;
1927 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1928 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1977 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1978 return -EINVAL;
1980 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1981 desc->array_len > desc->array_maxlen ||
1982 (unsigned long) base + 4 + desc->array_len *
1983 desc->elem_size > buf->len)
1984 return -EINVAL;
1988 if (!desc->xcode)
1991 todo = desc->array_len * desc->elem_size;
1994 if (todo && base < buf->head->iov_len) {
1995 c = buf->head->iov_base + base;
1997 buf->head->iov_len - base);
1998 todo -= avail_here;
2000 while (avail_here >= desc->elem_size) {
2001 err = desc->xcode(desc, c);
2004 c += desc->elem_size;
2005 avail_here -= desc->elem_size;
2009 elem = kmalloc(desc->elem_size, GFP_KERNEL);
2010 err = -ENOMEM;
2015 err = desc->xcode(desc, elem);
2023 base = buf->head->iov_len; /* align to start of pages */
2027 base -= buf->head->iov_len;
2028 if (todo && base < buf->page_len) {
2031 avail_here = min(todo, buf->page_len - base);
2032 todo -= avail_here;
2034 base += buf->page_base;
2035 ppages = buf->pages + (base >> PAGE_SHIFT);
2037 avail_page = min_t(unsigned int, PAGE_SIZE - base,
2042 avail_here -= avail_page;
2043 if (copied || avail_page < desc->elem_size) {
2045 desc->elem_size - copied);
2047 elem = kmalloc(desc->elem_size,
2049 err = -ENOMEM;
2055 err = desc->xcode(desc, elem);
2061 if (copied == desc->elem_size)
2066 if (copied == desc->elem_size) {
2067 err = desc->xcode(desc, elem);
2073 avail_page -= l;
2076 while (avail_page >= desc->elem_size) {
2077 err = desc->xcode(desc, c);
2080 c += desc->elem_size;
2081 avail_page -= desc->elem_size;
2085 desc->elem_size - copied);
2087 elem = kmalloc(desc->elem_size,
2089 err = -ENOMEM;
2095 err = desc->xcode(desc, elem);
2101 if (copied == desc->elem_size)
2106 if (copied == desc->elem_size) {
2107 err = desc->xcode(desc, elem);
2123 base = buf->page_len; /* align to start of tail */
2127 base -= buf->page_len;
2129 c = buf->tail->iov_base + base;
2131 unsigned int l = desc->elem_size - copied;
2137 err = desc->xcode(desc, elem);
2141 todo -= l;
2145 err = desc->xcode(desc, c);
2148 c += desc->elem_size;
2149 todo -= desc->elem_size;
2164 if (base >= buf->len)
2165 return -EINVAL;
2174 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
2175 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
2176 return -EINVAL;
2184 int (*actor)(struct scatterlist *, void *), void *data)
2192 if (offset >= buf->head[0].iov_len) {
2193 offset -= buf->head[0].iov_len;
2195 thislen = buf->head[0].iov_len - offset;
2198 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
2199 ret = actor(sg, data);
2203 len -= thislen;
2208 if (offset >= buf->page_len) {
2209 offset -= buf->page_len;
2211 page_len = buf->page_len - offset;
2214 len -= page_len;
2215 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
2216 i = (offset + buf->page_base) >> PAGE_SHIFT;
2217 thislen = PAGE_SIZE - page_offset;
2221 sg_set_page(sg, buf->pages[i], thislen, page_offset);
2222 ret = actor(sg, data);
2225 page_len -= thislen;
2234 if (offset < buf->tail[0].iov_len) {
2235 thislen = buf->tail[0].iov_len - offset;
2238 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
2239 ret = actor(sg, data);
2240 len -= thislen;
2243 ret = -EINVAL;
2250 * xdr_stream_decode_opaque - Decode variable length opaque
2252 * @ptr: location to store opaque data
2257 * %-EBADMSG on XDR buffer overflow
2258 * %-EMSGSIZE on overflow of storage buffer @ptr
2274 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
2276 * @ptr: location to store pointer to opaque data
2282 * %-EBADMSG on XDR buffer overflow
2283 * %-EMSGSIZE if the size of the object would exceed @maxlen
2284 * %-ENOMEM on memory allocation failure
2297 ret = -ENOMEM;
2305 * xdr_stream_decode_string - Decode variable length string
2311 * On success, returns length of NUL-terminated string stored in *@str
2312 * %-EBADMSG on XDR buffer overflow
2313 * %-EMSGSIZE on overflow of storage buffer @str
2332 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
2339 * On success, returns length of NUL-terminated string stored in *@ptr
2340 * %-EBADMSG on XDR buffer overflow
2341 * %-EMSGSIZE if the size of the string would exceed @maxlen
2342 * %-ENOMEM on memory allocation failure
2357 ret = -ENOMEM;
2365 * xdr_stream_decode_opaque_auth - Decode struct opaque_auth (RFC5531 S8.2)
2373 * %-EBADMSG on XDR buffer overflow
2374 * %-EMSGSIZE if the decoded size of the body field exceeds 400 octets
2393 * xdr_stream_encode_opaque_auth - Encode struct opaque_auth (RFC5531 S8.2)
2401 * %-EBADMSG on XDR buffer overflow
2402 * %-EMSGSIZE if the size of @body exceeds 400 octets
2410 return -EMSGSIZE;