xref: /linux/lib/sg_pool.c (revision 78beef629fd95be4ed853b2d37b832f766bd96ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
3 #include <linux/scatterlist.h>
4 #include <linux/mempool.h>
5 #include <linux/slab.h>
6 
7 #define SG_MEMPOOL_NR		ARRAY_SIZE(sg_pools)
8 #define SG_MEMPOOL_SIZE		2
9 
10 struct sg_pool {
11 	size_t		size;
12 	char		*name;
13 	struct kmem_cache	*slab;
14 	mempool_t	*pool;
15 };
16 
17 #define SP(x) { .size = x, "sgpool-" __stringify(x) }
18 #if (SG_CHUNK_SIZE < 32)
19 #error SG_CHUNK_SIZE is too small (must be 32 or greater)
20 #endif
21 static struct sg_pool sg_pools[] = {
22 	SP(8),
23 	SP(16),
24 #if (SG_CHUNK_SIZE > 32)
25 	SP(32),
26 #if (SG_CHUNK_SIZE > 64)
27 	SP(64),
28 #if (SG_CHUNK_SIZE > 128)
29 	SP(128),
30 #if (SG_CHUNK_SIZE > 256)
31 #error SG_CHUNK_SIZE is too large (256 MAX)
32 #endif
33 #endif
34 #endif
35 #endif
36 	SP(SG_CHUNK_SIZE)
37 };
38 #undef SP
39 
40 static inline unsigned int sg_pool_index(unsigned short nents)
41 {
42 	unsigned int index;
43 
44 	BUG_ON(nents > SG_CHUNK_SIZE);
45 
46 	if (nents <= 8)
47 		index = 0;
48 	else
49 		index = get_count_order(nents) - 3;
50 
51 	return index;
52 }
53 
54 static void sg_pool_free(struct scatterlist *sgl, unsigned int nents)
55 {
56 	struct sg_pool *sgp;
57 
58 	sgp = sg_pools + sg_pool_index(nents);
59 	mempool_free(sgl, sgp->pool);
60 }
61 
62 static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
63 {
64 	struct sg_pool *sgp;
65 
66 	sgp = sg_pools + sg_pool_index(nents);
67 	return mempool_alloc(sgp->pool, gfp_mask);
68 }
69 
70 /**
71  * sg_free_table_chained - Free a previously mapped sg table
72  * @table:	The sg table header to use
73  * @nents_first_chunk: size of the first_chunk SGL passed to
74  *		sg_alloc_table_chained
75  *
76  *  Description:
77  *    Free an sg table previously allocated and setup with
78  *    sg_alloc_table_chained().
79  *
80  *    @nents_first_chunk has to be same with that same parameter passed
81  *    to sg_alloc_table_chained().
82  *
83  **/
84 void sg_free_table_chained(struct sg_table *table,
85 		unsigned nents_first_chunk)
86 {
87 	if (table->orig_nents <= nents_first_chunk)
88 		return;
89 
90 	if (nents_first_chunk == 1)
91 		nents_first_chunk = 0;
92 
93 	__sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
94 }
95 EXPORT_SYMBOL_GPL(sg_free_table_chained);
96 
97 /**
98  * sg_alloc_table_chained - Allocate and chain SGLs in an sg table
99  * @table:	The sg table header to use
100  * @nents:	Number of entries in sg list
101  * @first_chunk: first SGL
102  * @nents_first_chunk: number of the SGL of @first_chunk
103  *
104  *  Description:
105  *    Allocate and chain SGLs in an sg table. If @nents@ is larger than
106  *    @nents_first_chunk a chained sg table will be setup. @first_chunk is
107  *    ignored if nents_first_chunk <= 1 because user expects the SGL points
108  *    non-chain SGL.
109  *
110  **/
111 int sg_alloc_table_chained(struct sg_table *table, int nents,
112 		struct scatterlist *first_chunk, unsigned nents_first_chunk)
113 {
114 	int ret;
115 
116 	BUG_ON(!nents);
117 
118 	if (first_chunk && nents_first_chunk) {
119 		if (nents <= nents_first_chunk) {
120 			table->nents = table->orig_nents = nents;
121 			sg_init_table(table->sgl, nents);
122 			return 0;
123 		}
124 	}
125 
126 	/* User supposes that the 1st SGL includes real entry */
127 	if (nents_first_chunk <= 1) {
128 		first_chunk = NULL;
129 		nents_first_chunk = 0;
130 	}
131 
132 	ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
133 			       first_chunk, nents_first_chunk,
134 			       GFP_ATOMIC, sg_pool_alloc);
135 	if (unlikely(ret))
136 		sg_free_table_chained(table, nents_first_chunk);
137 	return ret;
138 }
139 EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
140 
141 static __init int sg_pool_init(void)
142 {
143 	int i;
144 
145 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
146 		struct sg_pool *sgp = sg_pools + i;
147 		int size = sgp->size * sizeof(struct scatterlist);
148 
149 		sgp->slab = kmem_cache_create(sgp->name, size, 0,
150 				SLAB_HWCACHE_ALIGN, NULL);
151 		if (!sgp->slab) {
152 			printk(KERN_ERR "SG_POOL: can't init sg slab %s\n",
153 					sgp->name);
154 			goto cleanup_sdb;
155 		}
156 
157 		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
158 						     sgp->slab);
159 		if (!sgp->pool) {
160 			printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n",
161 					sgp->name);
162 			goto cleanup_sdb;
163 		}
164 	}
165 
166 	return 0;
167 
168 cleanup_sdb:
169 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
170 		struct sg_pool *sgp = sg_pools + i;
171 
172 		mempool_destroy(sgp->pool);
173 		kmem_cache_destroy(sgp->slab);
174 	}
175 
176 	return -ENOMEM;
177 }
178 
179 static __exit void sg_pool_exit(void)
180 {
181 	int i;
182 
183 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
184 		struct sg_pool *sgp = sg_pools + i;
185 		mempool_destroy(sgp->pool);
186 		kmem_cache_destroy(sgp->slab);
187 	}
188 }
189 
190 module_init(sg_pool_init);
191 module_exit(sg_pool_exit);
192