xref: /linux/lib/scatterlist.c (revision 529d6dad5bc69de14cdd24831e2a14264e93daa4)
1 /*
2  * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3  *
4  * Scatterlist handling helpers.
5  *
6  * This source code is licensed under the GNU General Public License,
7  * Version 2. See the file COPYING for more details.
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 
14 /**
15  * sg_next - return the next scatterlist entry in a list
16  * @sg:		The current sg entry
17  *
18  * Description:
19  *   Usually the next entry will be @sg@ + 1, but if this sg element is part
20  *   of a chained scatterlist, it could jump to the start of a new
21  *   scatterlist array.
22  *
23  **/
24 struct scatterlist *sg_next(struct scatterlist *sg)
25 {
26 #ifdef CONFIG_DEBUG_SG
27 	BUG_ON(sg->sg_magic != SG_MAGIC);
28 #endif
29 	if (sg_is_last(sg))
30 		return NULL;
31 
32 	sg++;
33 	if (unlikely(sg_is_chain(sg)))
34 		sg = sg_chain_ptr(sg);
35 
36 	return sg;
37 }
38 EXPORT_SYMBOL(sg_next);
39 
40 /**
41  * sg_last - return the last scatterlist entry in a list
42  * @sgl:	First entry in the scatterlist
43  * @nents:	Number of entries in the scatterlist
44  *
45  * Description:
46  *   Should only be used casually, it (currently) scans the entire list
47  *   to get the last entry.
48  *
49  *   Note that the @sgl@ pointer passed in need not be the first one,
50  *   the important bit is that @nents@ denotes the number of entries that
51  *   exist from @sgl@.
52  *
53  **/
54 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
55 {
56 #ifndef ARCH_HAS_SG_CHAIN
57 	struct scatterlist *ret = &sgl[nents - 1];
58 #else
59 	struct scatterlist *sg, *ret = NULL;
60 	unsigned int i;
61 
62 	for_each_sg(sgl, sg, nents, i)
63 		ret = sg;
64 
65 #endif
66 #ifdef CONFIG_DEBUG_SG
67 	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
68 	BUG_ON(!sg_is_last(ret));
69 #endif
70 	return ret;
71 }
72 EXPORT_SYMBOL(sg_last);
73 
74 /**
75  * sg_init_table - Initialize SG table
76  * @sgl:	   The SG table
77  * @nents:	   Number of entries in table
78  *
79  * Notes:
80  *   If this is part of a chained sg table, sg_mark_end() should be
81  *   used only on the last table part.
82  *
83  **/
84 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
85 {
86 	memset(sgl, 0, sizeof(*sgl) * nents);
87 #ifdef CONFIG_DEBUG_SG
88 	{
89 		unsigned int i;
90 		for (i = 0; i < nents; i++)
91 			sgl[i].sg_magic = SG_MAGIC;
92 	}
93 #endif
94 	sg_mark_end(&sgl[nents - 1]);
95 }
96 EXPORT_SYMBOL(sg_init_table);
97 
98 /**
99  * sg_init_one - Initialize a single entry sg list
100  * @sg:		 SG entry
101  * @buf:	 Virtual address for IO
102  * @buflen:	 IO length
103  *
104  **/
105 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
106 {
107 	sg_init_table(sg, 1);
108 	sg_set_buf(sg, buf, buflen);
109 }
110 EXPORT_SYMBOL(sg_init_one);
111 
112 /*
113  * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
114  * helpers.
115  */
116 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
117 {
118 	if (nents == SG_MAX_SINGLE_ALLOC)
119 		return (struct scatterlist *) __get_free_page(gfp_mask);
120 	else
121 		return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
122 }
123 
124 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
125 {
126 	if (nents == SG_MAX_SINGLE_ALLOC)
127 		free_page((unsigned long) sg);
128 	else
129 		kfree(sg);
130 }
131 
132 /**
133  * __sg_free_table - Free a previously mapped sg table
134  * @table:	The sg table header to use
135  * @max_ents:	The maximum number of entries per single scatterlist
136  * @free_fn:	Free function
137  *
138  *  Description:
139  *    Free an sg table previously allocated and setup with
140  *    __sg_alloc_table().  The @max_ents value must be identical to
141  *    that previously used with __sg_alloc_table().
142  *
143  **/
144 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
145 		     sg_free_fn *free_fn)
146 {
147 	struct scatterlist *sgl, *next;
148 
149 	if (unlikely(!table->sgl))
150 		return;
151 
152 	sgl = table->sgl;
153 	while (table->orig_nents) {
154 		unsigned int alloc_size = table->orig_nents;
155 		unsigned int sg_size;
156 
157 		/*
158 		 * If we have more than max_ents segments left,
159 		 * then assign 'next' to the sg table after the current one.
160 		 * sg_size is then one less than alloc size, since the last
161 		 * element is the chain pointer.
162 		 */
163 		if (alloc_size > max_ents) {
164 			next = sg_chain_ptr(&sgl[max_ents - 1]);
165 			alloc_size = max_ents;
166 			sg_size = alloc_size - 1;
167 		} else {
168 			sg_size = alloc_size;
169 			next = NULL;
170 		}
171 
172 		table->orig_nents -= sg_size;
173 		free_fn(sgl, alloc_size);
174 		sgl = next;
175 	}
176 
177 	table->sgl = NULL;
178 }
179 EXPORT_SYMBOL(__sg_free_table);
180 
181 /**
182  * sg_free_table - Free a previously allocated sg table
183  * @table:	The mapped sg table header
184  *
185  **/
186 void sg_free_table(struct sg_table *table)
187 {
188 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
189 }
190 EXPORT_SYMBOL(sg_free_table);
191 
192 /**
193  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
194  * @table:	The sg table header to use
195  * @nents:	Number of entries in sg list
196  * @max_ents:	The maximum number of entries the allocator returns per call
197  * @gfp_mask:	GFP allocation mask
198  * @alloc_fn:	Allocator to use
199  *
200  * Description:
201  *   This function returns a @table @nents long. The allocator is
202  *   defined to return scatterlist chunks of maximum size @max_ents.
203  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
204  *   chained in units of @max_ents.
205  *
206  * Notes:
207  *   If this function returns non-0 (eg failure), the caller must call
208  *   __sg_free_table() to cleanup any leftover allocations.
209  *
210  **/
211 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
212 		     unsigned int max_ents, gfp_t gfp_mask,
213 		     sg_alloc_fn *alloc_fn)
214 {
215 	struct scatterlist *sg, *prv;
216 	unsigned int left;
217 
218 #ifndef ARCH_HAS_SG_CHAIN
219 	BUG_ON(nents > max_ents);
220 #endif
221 
222 	memset(table, 0, sizeof(*table));
223 
224 	left = nents;
225 	prv = NULL;
226 	do {
227 		unsigned int sg_size, alloc_size = left;
228 
229 		if (alloc_size > max_ents) {
230 			alloc_size = max_ents;
231 			sg_size = alloc_size - 1;
232 		} else
233 			sg_size = alloc_size;
234 
235 		left -= sg_size;
236 
237 		sg = alloc_fn(alloc_size, gfp_mask);
238 		if (unlikely(!sg))
239 			return -ENOMEM;
240 
241 		sg_init_table(sg, alloc_size);
242 		table->nents = table->orig_nents += sg_size;
243 
244 		/*
245 		 * If this is the first mapping, assign the sg table header.
246 		 * If this is not the first mapping, chain previous part.
247 		 */
248 		if (prv)
249 			sg_chain(prv, max_ents, sg);
250 		else
251 			table->sgl = sg;
252 
253 		/*
254 		 * If no more entries after this one, mark the end
255 		 */
256 		if (!left)
257 			sg_mark_end(&sg[sg_size - 1]);
258 
259 		/*
260 		 * only really needed for mempool backed sg allocations (like
261 		 * SCSI), a possible improvement here would be to pass the
262 		 * table pointer into the allocator and let that clear these
263 		 * flags
264 		 */
265 		gfp_mask &= ~__GFP_WAIT;
266 		gfp_mask |= __GFP_HIGH;
267 		prv = sg;
268 	} while (left);
269 
270 	return 0;
271 }
272 EXPORT_SYMBOL(__sg_alloc_table);
273 
274 /**
275  * sg_alloc_table - Allocate and initialize an sg table
276  * @table:	The sg table header to use
277  * @nents:	Number of entries in sg list
278  * @gfp_mask:	GFP allocation mask
279  *
280  *  Description:
281  *    Allocate and initialize an sg table. If @nents@ is larger than
282  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
283  *
284  **/
285 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
286 {
287 	int ret;
288 
289 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
290 			       gfp_mask, sg_kmalloc);
291 	if (unlikely(ret))
292 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
293 
294 	return ret;
295 }
296 EXPORT_SYMBOL(sg_alloc_table);
297 
298 /**
299  * sg_miter_start - start mapping iteration over a sg list
300  * @miter: sg mapping iter to be started
301  * @sgl: sg list to iterate over
302  * @nents: number of sg entries
303  *
304  * Description:
305  *   Starts mapping iterator @miter.
306  *
307  * Context:
308  *   Don't care.
309  */
310 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
311 		    unsigned int nents, unsigned int flags)
312 {
313 	memset(miter, 0, sizeof(struct sg_mapping_iter));
314 
315 	miter->__sg = sgl;
316 	miter->__nents = nents;
317 	miter->__offset = 0;
318 	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
319 	miter->__flags = flags;
320 }
321 EXPORT_SYMBOL(sg_miter_start);
322 
323 /**
324  * sg_miter_next - proceed mapping iterator to the next mapping
325  * @miter: sg mapping iter to proceed
326  *
327  * Description:
328  *   Proceeds @miter@ to the next mapping.  @miter@ should have been
329  *   started using sg_miter_start().  On successful return,
330  *   @miter@->page, @miter@->addr and @miter@->length point to the
331  *   current mapping.
332  *
333  * Context:
334  *   IRQ disabled if SG_MITER_ATOMIC.  IRQ must stay disabled till
335  *   @miter@ is stopped.  May sleep if !SG_MITER_ATOMIC.
336  *
337  * Returns:
338  *   true if @miter contains the next mapping.  false if end of sg
339  *   list is reached.
340  */
341 bool sg_miter_next(struct sg_mapping_iter *miter)
342 {
343 	unsigned int off, len;
344 
345 	/* check for end and drop resources from the last iteration */
346 	if (!miter->__nents)
347 		return false;
348 
349 	sg_miter_stop(miter);
350 
351 	/* get to the next sg if necessary.  __offset is adjusted by stop */
352 	while (miter->__offset == miter->__sg->length) {
353 		if (--miter->__nents) {
354 			miter->__sg = sg_next(miter->__sg);
355 			miter->__offset = 0;
356 		} else
357 			return false;
358 	}
359 
360 	/* map the next page */
361 	off = miter->__sg->offset + miter->__offset;
362 	len = miter->__sg->length - miter->__offset;
363 
364 	miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
365 	off &= ~PAGE_MASK;
366 	miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
367 	miter->consumed = miter->length;
368 
369 	if (miter->__flags & SG_MITER_ATOMIC)
370 		miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
371 	else
372 		miter->addr = kmap(miter->page) + off;
373 
374 	return true;
375 }
376 EXPORT_SYMBOL(sg_miter_next);
377 
378 /**
379  * sg_miter_stop - stop mapping iteration
380  * @miter: sg mapping iter to be stopped
381  *
382  * Description:
383  *   Stops mapping iterator @miter.  @miter should have been started
384  *   started using sg_miter_start().  A stopped iteration can be
385  *   resumed by calling sg_miter_next() on it.  This is useful when
386  *   resources (kmap) need to be released during iteration.
387  *
388  * Context:
389  *   IRQ disabled if the SG_MITER_ATOMIC is set.  Don't care otherwise.
390  */
391 void sg_miter_stop(struct sg_mapping_iter *miter)
392 {
393 	WARN_ON(miter->consumed > miter->length);
394 
395 	/* drop resources from the last iteration */
396 	if (miter->addr) {
397 		miter->__offset += miter->consumed;
398 
399 		if (miter->__flags & SG_MITER_TO_SG)
400 			flush_kernel_dcache_page(miter->page);
401 
402 		if (miter->__flags & SG_MITER_ATOMIC) {
403 			WARN_ON(!irqs_disabled());
404 			kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
405 		} else
406 			kunmap(miter->page);
407 
408 		miter->page = NULL;
409 		miter->addr = NULL;
410 		miter->length = 0;
411 		miter->consumed = 0;
412 	}
413 }
414 EXPORT_SYMBOL(sg_miter_stop);
415 
416 /**
417  * sg_copy_buffer - Copy data between a linear buffer and an SG list
418  * @sgl:		 The SG list
419  * @nents:		 Number of SG entries
420  * @buf:		 Where to copy from
421  * @buflen:		 The number of bytes to copy
422  * @to_buffer: 		 transfer direction (non zero == from an sg list to a
423  * 			 buffer, 0 == from a buffer to an sg list
424  *
425  * Returns the number of copied bytes.
426  *
427  **/
428 static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
429 			     void *buf, size_t buflen, int to_buffer)
430 {
431 	unsigned int offset = 0;
432 	struct sg_mapping_iter miter;
433 	unsigned long flags;
434 	unsigned int sg_flags = SG_MITER_ATOMIC;
435 
436 	if (to_buffer)
437 		sg_flags |= SG_MITER_FROM_SG;
438 	else
439 		sg_flags |= SG_MITER_TO_SG;
440 
441 	sg_miter_start(&miter, sgl, nents, sg_flags);
442 
443 	local_irq_save(flags);
444 
445 	while (sg_miter_next(&miter) && offset < buflen) {
446 		unsigned int len;
447 
448 		len = min(miter.length, buflen - offset);
449 
450 		if (to_buffer)
451 			memcpy(buf + offset, miter.addr, len);
452 		else
453 			memcpy(miter.addr, buf + offset, len);
454 
455 		offset += len;
456 	}
457 
458 	sg_miter_stop(&miter);
459 
460 	local_irq_restore(flags);
461 	return offset;
462 }
463 
464 /**
465  * sg_copy_from_buffer - Copy from a linear buffer to an SG list
466  * @sgl:		 The SG list
467  * @nents:		 Number of SG entries
468  * @buf:		 Where to copy from
469  * @buflen:		 The number of bytes to copy
470  *
471  * Returns the number of copied bytes.
472  *
473  **/
474 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
475 			   void *buf, size_t buflen)
476 {
477 	return sg_copy_buffer(sgl, nents, buf, buflen, 0);
478 }
479 EXPORT_SYMBOL(sg_copy_from_buffer);
480 
481 /**
482  * sg_copy_to_buffer - Copy from an SG list to a linear buffer
483  * @sgl:		 The SG list
484  * @nents:		 Number of SG entries
485  * @buf:		 Where to copy to
486  * @buflen:		 The number of bytes to copy
487  *
488  * Returns the number of copied bytes.
489  *
490  **/
491 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
492 			 void *buf, size_t buflen)
493 {
494 	return sg_copy_buffer(sgl, nents, buf, buflen, 1);
495 }
496 EXPORT_SYMBOL(sg_copy_to_buffer);
497