xref: /freebsd/sys/compat/linuxkpi/common/include/linux/scatterlist.h (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
7  * Copyright (c) 2016 Matthew Macy
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 #ifndef	_LINUXKPI_LINUX_SCATTERLIST_H_
34 #define	_LINUXKPI_LINUX_SCATTERLIST_H_
35 
36 #include <sys/types.h>
37 #include <sys/sf_buf.h>
38 
39 #include <linux/page.h>
40 #include <linux/slab.h>
41 #include <linux/mm.h>
42 
43 struct bus_dmamap;
44 struct scatterlist {
45 	unsigned long page_link;
46 #define	SG_PAGE_LINK_CHAIN	0x1UL
47 #define	SG_PAGE_LINK_LAST	0x2UL
48 #define	SG_PAGE_LINK_MASK	0x3UL
49 	unsigned int offset;
50 	unsigned int length;
51 	dma_addr_t dma_address;
52 	struct bus_dmamap *dma_map;	/* FreeBSD specific */
53 };
54 
55 CTASSERT((sizeof(struct scatterlist) & SG_PAGE_LINK_MASK) == 0);
56 
57 struct sg_table {
58 	struct scatterlist *sgl;
59 	unsigned int nents;
60 	unsigned int orig_nents;
61 };
62 
63 struct sg_page_iter {
64 	struct scatterlist *sg;
65 	unsigned int sg_pgoffset;
66 	unsigned int maxents;
67 	struct {
68 		unsigned int nents;
69 		int	pg_advance;
70 	} internal;
71 };
72 
73 struct sg_dma_page_iter {
74 	struct sg_page_iter base;
75 };
76 
77 #define	SCATTERLIST_MAX_SEGMENT	(-1U & ~(PAGE_SIZE - 1))
78 
79 #define	SG_MAX_SINGLE_ALLOC	(PAGE_SIZE / sizeof(struct scatterlist))
80 
81 #define	SG_MAGIC		0x87654321UL
82 #define	SG_CHAIN		SG_PAGE_LINK_CHAIN
83 #define	SG_END			SG_PAGE_LINK_LAST
84 
85 #define	sg_is_chain(sg)		((sg)->page_link & SG_PAGE_LINK_CHAIN)
86 #define	sg_is_last(sg)		((sg)->page_link & SG_PAGE_LINK_LAST)
87 #define	sg_chain_ptr(sg)	\
88 	((struct scatterlist *) ((sg)->page_link & ~SG_PAGE_LINK_MASK))
89 
90 #define	sg_dma_address(sg)	(sg)->dma_address
91 #define	sg_dma_len(sg)		(sg)->length
92 
93 #define	for_each_sg_page(sgl, iter, nents, pgoffset)			\
94 	for (_sg_iter_init(sgl, iter, nents, pgoffset);			\
95 	     (iter)->sg; _sg_iter_next(iter))
96 #define	for_each_sg_dma_page(sgl, iter, nents, pgoffset) 		\
97 	for_each_sg_page(sgl, &(iter)->base, nents, pgoffset)
98 
99 #define	for_each_sg(sglist, sg, sgmax, iter)				\
100 	for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg))
101 
102 #define	for_each_sgtable_sg(sgt, sg, i) \
103 	for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
104 
105 #define	for_each_sgtable_page(sgt, iter, pgoffset) \
106 	for_each_sg_page((sgt)->sgl, iter, (sgt)->orig_nents, pgoffset)
107 
108 #define	for_each_sgtable_dma_sg(sgt, sg, iter)				\
109 	for_each_sg((sgt)->sgl, sg, (sgt)->nents, iter)
110 
111 #define	for_each_sgtable_dma_page(sgt, iter, pgoffset)			\
112 	for_each_sg_dma_page((sgt)->sgl, iter, (sgt)->nents, pgoffset)
113 
114 typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t);
115 typedef void (sg_free_fn) (struct scatterlist *, unsigned int);
116 
117 static inline void
118 sg_assign_page(struct scatterlist *sg, struct page *page)
119 {
120 	unsigned long page_link = sg->page_link & SG_PAGE_LINK_MASK;
121 
122 	sg->page_link = page_link | (unsigned long)page;
123 }
124 
125 static inline void
126 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
127     unsigned int offset)
128 {
129 	sg_assign_page(sg, page);
130 	sg->offset = offset;
131 	sg->length = len;
132 }
133 
134 static inline struct page *
135 sg_page(struct scatterlist *sg)
136 {
137 	return ((struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK));
138 }
139 
140 static inline void
141 sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
142 {
143 	sg_set_page(sg, virt_to_page(buf), buflen,
144 	    ((uintptr_t)buf) & (PAGE_SIZE - 1));
145 }
146 
147 static inline struct scatterlist *
148 sg_next(struct scatterlist *sg)
149 {
150 	if (sg_is_last(sg))
151 		return (NULL);
152 	sg++;
153 	if (sg_is_chain(sg))
154 		sg = sg_chain_ptr(sg);
155 	return (sg);
156 }
157 
158 static inline vm_paddr_t
159 sg_phys(struct scatterlist *sg)
160 {
161 	return (VM_PAGE_TO_PHYS(sg_page(sg)) + sg->offset);
162 }
163 
164 static inline void *
165 sg_virt(struct scatterlist *sg)
166 {
167 
168 	return ((void *)((unsigned long)page_address(sg_page(sg)) + sg->offset));
169 }
170 
171 static inline void
172 sg_chain(struct scatterlist *prv, unsigned int prv_nents,
173     struct scatterlist *sgl)
174 {
175 	struct scatterlist *sg = &prv[prv_nents - 1];
176 
177 	sg->offset = 0;
178 	sg->length = 0;
179 	sg->page_link = ((unsigned long)sgl |
180 	    SG_PAGE_LINK_CHAIN) & ~SG_PAGE_LINK_LAST;
181 }
182 
183 static inline void
184 sg_mark_end(struct scatterlist *sg)
185 {
186 	sg->page_link |= SG_PAGE_LINK_LAST;
187 	sg->page_link &= ~SG_PAGE_LINK_CHAIN;
188 }
189 
190 static inline void
191 sg_init_table(struct scatterlist *sg, unsigned int nents)
192 {
193 	bzero(sg, sizeof(*sg) * nents);
194 	sg_mark_end(&sg[nents - 1]);
195 }
196 
197 static inline void
198 sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
199 {
200 	sg_init_table(sg, 1);
201 	sg_set_buf(sg, buf, buflen);
202 }
203 
204 static struct scatterlist *
205 sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
206 {
207 	if (nents == SG_MAX_SINGLE_ALLOC) {
208 		return ((void *)__get_free_page(gfp_mask));
209 	} else
210 		return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask));
211 }
212 
213 static inline void
214 sg_kfree(struct scatterlist *sg, unsigned int nents)
215 {
216 	if (nents == SG_MAX_SINGLE_ALLOC) {
217 		free_page((unsigned long)sg);
218 	} else
219 		kfree(sg);
220 }
221 
222 static inline void
223 __sg_free_table(struct sg_table *table, unsigned int max_ents,
224     bool skip_first_chunk, sg_free_fn * free_fn)
225 {
226 	struct scatterlist *sgl, *next;
227 
228 	if (unlikely(!table->sgl))
229 		return;
230 
231 	sgl = table->sgl;
232 	while (table->orig_nents) {
233 		unsigned int alloc_size = table->orig_nents;
234 		unsigned int sg_size;
235 
236 		if (alloc_size > max_ents) {
237 			next = sg_chain_ptr(&sgl[max_ents - 1]);
238 			alloc_size = max_ents;
239 			sg_size = alloc_size - 1;
240 		} else {
241 			sg_size = alloc_size;
242 			next = NULL;
243 		}
244 
245 		table->orig_nents -= sg_size;
246 		if (skip_first_chunk)
247 			skip_first_chunk = 0;
248 		else
249 			free_fn(sgl, alloc_size);
250 		sgl = next;
251 	}
252 
253 	table->sgl = NULL;
254 }
255 
256 static inline void
257 sg_free_table(struct sg_table *table)
258 {
259 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
260 }
261 
262 static inline int
263 __sg_alloc_table(struct sg_table *table, unsigned int nents,
264     unsigned int max_ents, struct scatterlist *first_chunk,
265     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
266 {
267 	struct scatterlist *sg, *prv;
268 	unsigned int left;
269 
270 	memset(table, 0, sizeof(*table));
271 
272 	if (nents == 0)
273 		return (-EINVAL);
274 	left = nents;
275 	prv = NULL;
276 	do {
277 		unsigned int sg_size;
278 		unsigned int alloc_size = left;
279 
280 		if (alloc_size > max_ents) {
281 			alloc_size = max_ents;
282 			sg_size = alloc_size - 1;
283 		} else
284 			sg_size = alloc_size;
285 
286 		left -= sg_size;
287 
288 		if (first_chunk) {
289 			sg = first_chunk;
290 			first_chunk = NULL;
291 		} else {
292 			sg = alloc_fn(alloc_size, gfp_mask);
293 		}
294 		if (unlikely(!sg)) {
295 			if (prv)
296 				table->nents = ++table->orig_nents;
297 
298 			return (-ENOMEM);
299 		}
300 		sg_init_table(sg, alloc_size);
301 		table->nents = table->orig_nents += sg_size;
302 
303 		if (prv)
304 			sg_chain(prv, max_ents, sg);
305 		else
306 			table->sgl = sg;
307 
308 		if (!left)
309 			sg_mark_end(&sg[sg_size - 1]);
310 
311 		prv = sg;
312 	} while (left);
313 
314 	return (0);
315 }
316 
317 static inline int
318 sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
319 {
320 	int ret;
321 
322 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
323 	    NULL, gfp_mask, sg_kmalloc);
324 	if (unlikely(ret))
325 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
326 
327 	return (ret);
328 }
329 
330 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
331 static inline struct scatterlist *
332 __sg_alloc_table_from_pages(struct sg_table *sgt,
333     struct page **pages, unsigned int count,
334     unsigned long off, unsigned long size,
335     unsigned int max_segment,
336     struct scatterlist *prv, unsigned int left_pages,
337     gfp_t gfp_mask)
338 #else
339 static inline int
340 __sg_alloc_table_from_pages(struct sg_table *sgt,
341     struct page **pages, unsigned int count,
342     unsigned long off, unsigned long size,
343     unsigned int max_segment, gfp_t gfp_mask)
344 #endif
345 {
346 	unsigned int i, segs, cur, len;
347 	int rc;
348 	struct scatterlist *s;
349 
350 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
351 	if (prv != NULL) {
352 		panic(
353 		    "Support for prv != NULL not implemented in "
354 		    "__sg_alloc_table_from_pages()");
355 	}
356 #endif
357 
358 	if (__predict_false(!max_segment || offset_in_page(max_segment)))
359 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
360 		return (ERR_PTR(-EINVAL));
361 #else
362 		return (-EINVAL);
363 #endif
364 
365 	len = 0;
366 	for (segs = i = 1; i < count; ++i) {
367 		len += PAGE_SIZE;
368 		if (len >= max_segment ||
369 		    page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
370 			++segs;
371 			len = 0;
372 		}
373 	}
374 	if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask))))
375 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
376 		return (ERR_PTR(rc));
377 #else
378 		return (rc);
379 #endif
380 
381 	cur = 0;
382 	for (i = 0, s = sgt->sgl; i < sgt->orig_nents; i++) {
383 		unsigned long seg_size;
384 		unsigned int j;
385 
386 		len = 0;
387 		for (j = cur + 1; j < count; ++j) {
388 			len += PAGE_SIZE;
389 			if (len >= max_segment || page_to_pfn(pages[j]) !=
390 			    page_to_pfn(pages[j - 1]) + 1)
391 				break;
392 		}
393 
394 		seg_size = ((j - cur) << PAGE_SHIFT) - off;
395 		sg_set_page(s, pages[cur], MIN(size, seg_size), off);
396 		size -= seg_size;
397 		off = 0;
398 		cur = j;
399 
400 		s = sg_next(s);
401 	}
402 	KASSERT(s != NULL, ("s is NULL after loop in __sg_alloc_table_from_pages()"));
403 
404 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
405 	if (left_pages == 0)
406 		sg_mark_end(s);
407 
408 	return (s);
409 #else
410 	return (0);
411 #endif
412 }
413 
414 static inline int
415 sg_alloc_table_from_pages(struct sg_table *sgt,
416     struct page **pages, unsigned int count,
417     unsigned long off, unsigned long size,
418     gfp_t gfp_mask)
419 {
420 
421 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
422 	return (PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, count, off, size,
423 	    SCATTERLIST_MAX_SEGMENT, NULL, 0, gfp_mask)));
424 #else
425 	return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
426 	    SCATTERLIST_MAX_SEGMENT, gfp_mask));
427 #endif
428 }
429 
430 static inline int
431 sg_alloc_table_from_pages_segment(struct sg_table *sgt,
432     struct page **pages, unsigned int count, unsigned int off,
433     unsigned long size, unsigned int max_segment, gfp_t gfp_mask)
434 {
435 #if defined(LINUXKPI_VERSION) && LINUXKPI_VERSION >= 51300
436 	return (PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, count, off, size,
437 	    max_segment, NULL, 0, gfp_mask)));
438 #else
439 	return (__sg_alloc_table_from_pages(sgt, pages, count, off, size,
440 	    max_segment, gfp_mask));
441 #endif
442 }
443 
444 static inline int
445 sg_nents(struct scatterlist *sg)
446 {
447 	int nents;
448 
449 	for (nents = 0; sg; sg = sg_next(sg))
450 		nents++;
451 	return (nents);
452 }
453 
454 static inline void
455 __sg_page_iter_start(struct sg_page_iter *piter,
456     struct scatterlist *sglist, unsigned int nents,
457     unsigned long pgoffset)
458 {
459 	piter->internal.pg_advance = 0;
460 	piter->internal.nents = nents;
461 
462 	piter->sg = sglist;
463 	piter->sg_pgoffset = pgoffset;
464 }
465 
466 static inline void
467 _sg_iter_next(struct sg_page_iter *iter)
468 {
469 	struct scatterlist *sg;
470 	unsigned int pgcount;
471 
472 	sg = iter->sg;
473 	pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
474 
475 	++iter->sg_pgoffset;
476 	while (iter->sg_pgoffset >= pgcount) {
477 		iter->sg_pgoffset -= pgcount;
478 		sg = sg_next(sg);
479 		--iter->maxents;
480 		if (sg == NULL || iter->maxents == 0)
481 			break;
482 		pgcount = (sg->offset + sg->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
483 	}
484 	iter->sg = sg;
485 }
486 
487 static inline int
488 sg_page_count(struct scatterlist *sg)
489 {
490 	return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT);
491 }
492 #define	sg_dma_page_count(sg) \
493 	sg_page_count(sg)
494 
495 static inline bool
496 __sg_page_iter_next(struct sg_page_iter *piter)
497 {
498 	unsigned int pgcount;
499 
500 	if (piter->internal.nents == 0)
501 		return (0);
502 	if (piter->sg == NULL)
503 		return (0);
504 
505 	piter->sg_pgoffset += piter->internal.pg_advance;
506 	piter->internal.pg_advance = 1;
507 
508 	while (1) {
509 		pgcount = sg_page_count(piter->sg);
510 		if (likely(piter->sg_pgoffset < pgcount))
511 			break;
512 		piter->sg_pgoffset -= pgcount;
513 		piter->sg = sg_next(piter->sg);
514 		if (--piter->internal.nents == 0)
515 			return (0);
516 		if (piter->sg == NULL)
517 			return (0);
518 	}
519 	return (1);
520 }
521 #define	__sg_page_iter_dma_next(itr) \
522 	__sg_page_iter_next(&(itr)->base)
523 
524 static inline void
525 _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
526     unsigned int nents, unsigned long pgoffset)
527 {
528 	if (nents) {
529 		iter->sg = sgl;
530 		iter->sg_pgoffset = pgoffset - 1;
531 		iter->maxents = nents;
532 		_sg_iter_next(iter);
533 	} else {
534 		iter->sg = NULL;
535 		iter->sg_pgoffset = 0;
536 		iter->maxents = 0;
537 	}
538 }
539 
540 /*
541  * sg_page_iter_dma_address() is implemented as a macro because it
542  * needs to accept two different and identical structure types. This
543  * allows both old and new code to co-exist. The compile time assert
544  * adds some safety, that the structure sizes match.
545  */
546 #define	sg_page_iter_dma_address(spi) ({		\
547 	struct sg_page_iter *__spi = (void *)(spi);	\
548 	dma_addr_t __dma_address;			\
549 	CTASSERT(sizeof(*(spi)) == sizeof(*__spi));	\
550 	__dma_address = __spi->sg->dma_address +	\
551 	    (__spi->sg_pgoffset << PAGE_SHIFT);		\
552 	__dma_address;					\
553 })
554 
555 static inline struct page *
556 sg_page_iter_page(struct sg_page_iter *piter)
557 {
558 	return (nth_page(sg_page(piter->sg), piter->sg_pgoffset));
559 }
560 
561 static __inline size_t
562 sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
563     const void *buf, size_t buflen, off_t skip)
564 {
565 	struct sg_page_iter piter;
566 	struct page *page;
567 	struct sf_buf *sf;
568 	size_t len, copied;
569 	char *p, *b;
570 
571 	if (buflen == 0)
572 		return (0);
573 
574 	b = __DECONST(char *, buf);
575 	copied = 0;
576 	sched_pin();
577 	for_each_sg_page(sgl, &piter, nents, 0) {
578 
579 		/* Skip to the start. */
580 		if (piter.sg->length <= skip) {
581 			skip -= piter.sg->length;
582 			continue;
583 		}
584 
585 		/* See how much to copy. */
586 		KASSERT(((piter.sg->length - skip) != 0 && (buflen != 0)),
587 		    ("%s: sg len %u - skip %ju || buflen %zu is 0\n",
588 		    __func__, piter.sg->length, (uintmax_t)skip, buflen));
589 		len = min(piter.sg->length - skip, buflen);
590 
591 		page = sg_page_iter_page(&piter);
592 		sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
593 		if (sf == NULL)
594 			break;
595 		p = (char *)sf_buf_kva(sf) + piter.sg_pgoffset + skip;
596 		memcpy(p, b, len);
597 		sf_buf_free(sf);
598 
599 		/* We copied so nothing more to skip. */
600 		skip = 0;
601 		copied += len;
602 		/* Either we exactly filled the page, or we are done. */
603 		buflen -= len;
604 		if (buflen == 0)
605 			break;
606 		b += len;
607 	}
608 	sched_unpin();
609 
610 	return (copied);
611 }
612 
613 static inline size_t
614 sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
615     const void *buf, size_t buflen)
616 {
617 	return (sg_pcopy_from_buffer(sgl, nents, buf, buflen, 0));
618 }
619 
620 static inline size_t
621 sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
622     void *buf, size_t buflen, off_t offset)
623 {
624 	struct sg_page_iter iter;
625 	struct scatterlist *sg;
626 	struct page *page;
627 	struct sf_buf *sf;
628 	char *vaddr;
629 	size_t total = 0;
630 	size_t len;
631 
632 	if (!PMAP_HAS_DMAP)
633 		sched_pin();
634 	for_each_sg_page(sgl, &iter, nents, 0) {
635 		sg = iter.sg;
636 
637 		if (offset >= sg->length) {
638 			offset -= sg->length;
639 			continue;
640 		}
641 		len = ulmin(buflen, sg->length - offset);
642 		if (len == 0)
643 			break;
644 
645 		page = sg_page_iter_page(&iter);
646 		if (!PMAP_HAS_DMAP) {
647 			sf = sf_buf_alloc(page, SFB_CPUPRIVATE | SFB_NOWAIT);
648 			if (sf == NULL)
649 				break;
650 			vaddr = (char *)sf_buf_kva(sf);
651 		} else
652 			vaddr = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(page));
653 		memcpy(buf, vaddr + sg->offset + offset, len);
654 		if (!PMAP_HAS_DMAP)
655 			sf_buf_free(sf);
656 
657 		/* start at beginning of next page */
658 		offset = 0;
659 
660 		/* advance buffer */
661 		buf = (char *)buf + len;
662 		buflen -= len;
663 		total += len;
664 	}
665 	if (!PMAP_HAS_DMAP)
666 		sched_unpin();
667 	return (total);
668 }
669 
670 #endif					/* _LINUXKPI_LINUX_SCATTERLIST_H_ */
671