xref: /freebsd/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c (revision 3f0efe05432b1633991114ca4ca330102a561959)
1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *
10  *  The SPL is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the
12  *  Free Software Foundation; either version 2 of the License, or (at your
13  *  option) any later version.
14  *
15  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18  *  for more details.
19  *
20  *  You should have received a copy of the GNU General Public License along
21  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #define	SPL_KMEM_CACHE_IMPLEMENTING
25 
26 #include <linux/percpu_compat.h>
27 #include <sys/kmem.h>
28 #include <sys/kmem_cache.h>
29 #include <sys/taskq.h>
30 #include <sys/timer.h>
31 #include <sys/vmem.h>
32 #include <sys/wait.h>
33 #include <sys/string.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/prefetch.h>
37 
38 /*
39  * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
40  * with smp_mb__{before,after}_atomic() because they were redundant. This is
41  * only used inside our SLAB allocator, so we implement an internal wrapper
42  * here to give us smp_mb__{before,after}_atomic() on older kernels.
43  */
44 #ifndef smp_mb__before_atomic
45 #define	smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
46 #endif
47 
48 #ifndef smp_mb__after_atomic
49 #define	smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
50 #endif
51 
52 /* BEGIN CSTYLED */
53 /*
54  * Cache magazines are an optimization designed to minimize the cost of
55  * allocating memory.  They do this by keeping a per-cpu cache of recently
56  * freed objects, which can then be reallocated without taking a lock. This
57  * can improve performance on highly contended caches.  However, because
58  * objects in magazines will prevent otherwise empty slabs from being
59  * immediately released this may not be ideal for low memory machines.
60  *
61  * For this reason spl_kmem_cache_magazine_size can be used to set a maximum
62  * magazine size.  When this value is set to 0 the magazine size will be
63  * automatically determined based on the object size.  Otherwise magazines
64  * will be limited to 2-256 objects per magazine (i.e per cpu).  Magazines
65  * may never be entirely disabled in this implementation.
66  */
67 static unsigned int spl_kmem_cache_magazine_size = 0;
68 module_param(spl_kmem_cache_magazine_size, uint, 0444);
69 MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
70 	"Default magazine size (2-256), set automatically (0)");
71 
72 static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
73 module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
74 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
75 
76 static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
77 module_param(spl_kmem_cache_max_size, uint, 0644);
78 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
79 
80 /*
81  * For small objects the Linux slab allocator should be used to make the most
82  * efficient use of the memory.  However, large objects are not supported by
83  * the Linux slab and therefore the SPL implementation is preferred.  A cutoff
84  * of 16K was determined to be optimal for architectures using 4K pages and
85  * to also work well on architecutres using larger 64K page sizes.
86  */
87 static unsigned int spl_kmem_cache_slab_limit =
88     SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE;
89 module_param(spl_kmem_cache_slab_limit, uint, 0644);
90 MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
91 	"Objects less than N bytes use the Linux slab");
92 
93 /*
94  * The number of threads available to allocate new slabs for caches.  This
95  * should not need to be tuned but it is available for performance analysis.
96  */
97 static unsigned int spl_kmem_cache_kmem_threads = 4;
98 module_param(spl_kmem_cache_kmem_threads, uint, 0444);
99 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
100 	"Number of spl_kmem_cache threads");
101 /* END CSTYLED */
102 
103 /*
104  * Slab allocation interfaces
105  *
106  * While the Linux slab implementation was inspired by the Solaris
107  * implementation I cannot use it to emulate the Solaris APIs.  I
108  * require two features which are not provided by the Linux slab.
109  *
110  * 1) Constructors AND destructors.  Recent versions of the Linux
111  *    kernel have removed support for destructors.  This is a deal
112  *    breaker for the SPL which contains particularly expensive
113  *    initializers for mutex's, condition variables, etc.  We also
114  *    require a minimal level of cleanup for these data types unlike
115  *    many Linux data types which do need to be explicitly destroyed.
116  *
117  * 2) Virtual address space backed slab.  Callers of the Solaris slab
118  *    expect it to work well for both small are very large allocations.
119  *    Because of memory fragmentation the Linux slab which is backed
120  *    by kmalloc'ed memory performs very badly when confronted with
121  *    large numbers of large allocations.  Basing the slab on the
122  *    virtual address space removes the need for contiguous pages
123  *    and greatly improve performance for large allocations.
124  *
125  * For these reasons, the SPL has its own slab implementation with
126  * the needed features.  It is not as highly optimized as either the
127  * Solaris or Linux slabs, but it should get me most of what is
128  * needed until it can be optimized or obsoleted by another approach.
129  *
130  * One serious concern I do have about this method is the relatively
131  * small virtual address space on 32bit arches.  This will seriously
132  * constrain the size of the slab caches and their performance.
133  */
134 
135 struct list_head spl_kmem_cache_list;   /* List of caches */
136 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
137 static taskq_t *spl_kmem_cache_taskq;   /* Task queue for aging / reclaim */
138 
139 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
140 
141 static void *
142 kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
143 {
144 	gfp_t lflags = kmem_flags_convert(flags);
145 	void *ptr;
146 
147 	if (skc->skc_flags & KMC_RECLAIMABLE)
148 		lflags |= __GFP_RECLAIMABLE;
149 	ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
150 
151 	/* Resulting allocated memory will be page aligned */
152 	ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
153 
154 	return (ptr);
155 }
156 
157 static void
158 kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
159 {
160 	ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
161 
162 	/*
163 	 * The Linux direct reclaim path uses this out of band value to
164 	 * determine if forward progress is being made.  Normally this is
165 	 * incremented by kmem_freepages() which is part of the various
166 	 * Linux slab implementations.  However, since we are using none
167 	 * of that infrastructure we are responsible for incrementing it.
168 	 */
169 	if (current->reclaim_state)
170 #ifdef	HAVE_RECLAIM_STATE_RECLAIMED
171 		current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
172 #else
173 		current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
174 #endif
175 	vfree(ptr);
176 }
177 
178 /*
179  * Required space for each aligned sks.
180  */
181 static inline uint32_t
182 spl_sks_size(spl_kmem_cache_t *skc)
183 {
184 	return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
185 	    skc->skc_obj_align, uint32_t));
186 }
187 
188 /*
189  * Required space for each aligned object.
190  */
191 static inline uint32_t
192 spl_obj_size(spl_kmem_cache_t *skc)
193 {
194 	uint32_t align = skc->skc_obj_align;
195 
196 	return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
197 	    P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
198 }
199 
200 uint64_t
201 spl_kmem_cache_inuse(kmem_cache_t *cache)
202 {
203 	return (cache->skc_obj_total);
204 }
205 EXPORT_SYMBOL(spl_kmem_cache_inuse);
206 
207 uint64_t
208 spl_kmem_cache_entry_size(kmem_cache_t *cache)
209 {
210 	return (cache->skc_obj_size);
211 }
212 EXPORT_SYMBOL(spl_kmem_cache_entry_size);
213 
214 /*
215  * Lookup the spl_kmem_object_t for an object given that object.
216  */
217 static inline spl_kmem_obj_t *
218 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
219 {
220 	return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
221 	    skc->skc_obj_align, uint32_t));
222 }
223 
224 /*
225  * It's important that we pack the spl_kmem_obj_t structure and the
226  * actual objects in to one large address space to minimize the number
227  * of calls to the allocator.  It is far better to do a few large
228  * allocations and then subdivide it ourselves.  Now which allocator
229  * we use requires balancing a few trade offs.
230  *
231  * For small objects we use kmem_alloc() because as long as you are
232  * only requesting a small number of pages (ideally just one) its cheap.
233  * However, when you start requesting multiple pages with kmem_alloc()
234  * it gets increasingly expensive since it requires contiguous pages.
235  * For this reason we shift to vmem_alloc() for slabs of large objects
236  * which removes the need for contiguous pages.  We do not use
237  * vmem_alloc() in all cases because there is significant locking
238  * overhead in __get_vm_area_node().  This function takes a single
239  * global lock when acquiring an available virtual address range which
240  * serializes all vmem_alloc()'s for all slab caches.  Using slightly
241  * different allocation functions for small and large objects should
242  * give us the best of both worlds.
243  *
244  * +------------------------+
245  * | spl_kmem_slab_t --+-+  |
246  * | skc_obj_size    <-+ |  |
247  * | spl_kmem_obj_t      |  |
248  * | skc_obj_size    <---+  |
249  * | spl_kmem_obj_t      |  |
250  * | ...                 v  |
251  * +------------------------+
252  */
253 static spl_kmem_slab_t *
254 spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
255 {
256 	spl_kmem_slab_t *sks;
257 	void *base;
258 	uint32_t obj_size;
259 
260 	base = kv_alloc(skc, skc->skc_slab_size, flags);
261 	if (base == NULL)
262 		return (NULL);
263 
264 	sks = (spl_kmem_slab_t *)base;
265 	sks->sks_magic = SKS_MAGIC;
266 	sks->sks_objs = skc->skc_slab_objs;
267 	sks->sks_age = jiffies;
268 	sks->sks_cache = skc;
269 	INIT_LIST_HEAD(&sks->sks_list);
270 	INIT_LIST_HEAD(&sks->sks_free_list);
271 	sks->sks_ref = 0;
272 	obj_size = spl_obj_size(skc);
273 
274 	for (int i = 0; i < sks->sks_objs; i++) {
275 		void *obj = base + spl_sks_size(skc) + (i * obj_size);
276 
277 		ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
278 		spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
279 		sko->sko_addr = obj;
280 		sko->sko_magic = SKO_MAGIC;
281 		sko->sko_slab = sks;
282 		INIT_LIST_HEAD(&sko->sko_list);
283 		list_add_tail(&sko->sko_list, &sks->sks_free_list);
284 	}
285 
286 	return (sks);
287 }
288 
289 /*
290  * Remove a slab from complete or partial list, it must be called with
291  * the 'skc->skc_lock' held but the actual free must be performed
292  * outside the lock to prevent deadlocking on vmem addresses.
293  */
294 static void
295 spl_slab_free(spl_kmem_slab_t *sks,
296     struct list_head *sks_list, struct list_head *sko_list)
297 {
298 	spl_kmem_cache_t *skc;
299 
300 	ASSERT(sks->sks_magic == SKS_MAGIC);
301 	ASSERT(sks->sks_ref == 0);
302 
303 	skc = sks->sks_cache;
304 	ASSERT(skc->skc_magic == SKC_MAGIC);
305 
306 	/*
307 	 * Update slab/objects counters in the cache, then remove the
308 	 * slab from the skc->skc_partial_list.  Finally add the slab
309 	 * and all its objects in to the private work lists where the
310 	 * destructors will be called and the memory freed to the system.
311 	 */
312 	skc->skc_obj_total -= sks->sks_objs;
313 	skc->skc_slab_total--;
314 	list_del(&sks->sks_list);
315 	list_add(&sks->sks_list, sks_list);
316 	list_splice_init(&sks->sks_free_list, sko_list);
317 }
318 
319 /*
320  * Reclaim empty slabs at the end of the partial list.
321  */
322 static void
323 spl_slab_reclaim(spl_kmem_cache_t *skc)
324 {
325 	spl_kmem_slab_t *sks = NULL, *m = NULL;
326 	spl_kmem_obj_t *sko = NULL, *n = NULL;
327 	LIST_HEAD(sks_list);
328 	LIST_HEAD(sko_list);
329 
330 	/*
331 	 * Empty slabs and objects must be moved to a private list so they
332 	 * can be safely freed outside the spin lock.  All empty slabs are
333 	 * at the end of skc->skc_partial_list, therefore once a non-empty
334 	 * slab is found we can stop scanning.
335 	 */
336 	spin_lock(&skc->skc_lock);
337 	list_for_each_entry_safe_reverse(sks, m,
338 	    &skc->skc_partial_list, sks_list) {
339 
340 		if (sks->sks_ref > 0)
341 			break;
342 
343 		spl_slab_free(sks, &sks_list, &sko_list);
344 	}
345 	spin_unlock(&skc->skc_lock);
346 
347 	/*
348 	 * The following two loops ensure all the object destructors are run,
349 	 * and the slabs themselves are freed.  This is all done outside the
350 	 * skc->skc_lock since this allows the destructor to sleep, and
351 	 * allows us to perform a conditional reschedule when a freeing a
352 	 * large number of objects and slabs back to the system.
353 	 */
354 
355 	list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
356 		ASSERT(sko->sko_magic == SKO_MAGIC);
357 	}
358 
359 	list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
360 		ASSERT(sks->sks_magic == SKS_MAGIC);
361 		kv_free(skc, sks, skc->skc_slab_size);
362 	}
363 }
364 
365 static spl_kmem_emergency_t *
366 spl_emergency_search(struct rb_root *root, void *obj)
367 {
368 	struct rb_node *node = root->rb_node;
369 	spl_kmem_emergency_t *ske;
370 	unsigned long address = (unsigned long)obj;
371 
372 	while (node) {
373 		ske = container_of(node, spl_kmem_emergency_t, ske_node);
374 
375 		if (address < ske->ske_obj)
376 			node = node->rb_left;
377 		else if (address > ske->ske_obj)
378 			node = node->rb_right;
379 		else
380 			return (ske);
381 	}
382 
383 	return (NULL);
384 }
385 
386 static int
387 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
388 {
389 	struct rb_node **new = &(root->rb_node), *parent = NULL;
390 	spl_kmem_emergency_t *ske_tmp;
391 	unsigned long address = ske->ske_obj;
392 
393 	while (*new) {
394 		ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
395 
396 		parent = *new;
397 		if (address < ske_tmp->ske_obj)
398 			new = &((*new)->rb_left);
399 		else if (address > ske_tmp->ske_obj)
400 			new = &((*new)->rb_right);
401 		else
402 			return (0);
403 	}
404 
405 	rb_link_node(&ske->ske_node, parent, new);
406 	rb_insert_color(&ske->ske_node, root);
407 
408 	return (1);
409 }
410 
411 /*
412  * Allocate a single emergency object and track it in a red black tree.
413  */
414 static int
415 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
416 {
417 	gfp_t lflags = kmem_flags_convert(flags);
418 	spl_kmem_emergency_t *ske;
419 	int order = get_order(skc->skc_obj_size);
420 	int empty;
421 
422 	/* Last chance use a partial slab if one now exists */
423 	spin_lock(&skc->skc_lock);
424 	empty = list_empty(&skc->skc_partial_list);
425 	spin_unlock(&skc->skc_lock);
426 	if (!empty)
427 		return (-EEXIST);
428 
429 	if (skc->skc_flags & KMC_RECLAIMABLE)
430 		lflags |= __GFP_RECLAIMABLE;
431 	ske = kmalloc(sizeof (*ske), lflags);
432 	if (ske == NULL)
433 		return (-ENOMEM);
434 
435 	ske->ske_obj = __get_free_pages(lflags, order);
436 	if (ske->ske_obj == 0) {
437 		kfree(ske);
438 		return (-ENOMEM);
439 	}
440 
441 	spin_lock(&skc->skc_lock);
442 	empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
443 	if (likely(empty)) {
444 		skc->skc_obj_total++;
445 		skc->skc_obj_emergency++;
446 		if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
447 			skc->skc_obj_emergency_max = skc->skc_obj_emergency;
448 	}
449 	spin_unlock(&skc->skc_lock);
450 
451 	if (unlikely(!empty)) {
452 		free_pages(ske->ske_obj, order);
453 		kfree(ske);
454 		return (-EINVAL);
455 	}
456 
457 	*obj = (void *)ske->ske_obj;
458 
459 	return (0);
460 }
461 
462 /*
463  * Locate the passed object in the red black tree and free it.
464  */
465 static int
466 spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
467 {
468 	spl_kmem_emergency_t *ske;
469 	int order = get_order(skc->skc_obj_size);
470 
471 	spin_lock(&skc->skc_lock);
472 	ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
473 	if (ske) {
474 		rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
475 		skc->skc_obj_emergency--;
476 		skc->skc_obj_total--;
477 	}
478 	spin_unlock(&skc->skc_lock);
479 
480 	if (ske == NULL)
481 		return (-ENOENT);
482 
483 	free_pages(ske->ske_obj, order);
484 	kfree(ske);
485 
486 	return (0);
487 }
488 
489 /*
490  * Release objects from the per-cpu magazine back to their slab.  The flush
491  * argument contains the max number of entries to remove from the magazine.
492  */
493 static void
494 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
495 {
496 	spin_lock(&skc->skc_lock);
497 
498 	ASSERT(skc->skc_magic == SKC_MAGIC);
499 	ASSERT(skm->skm_magic == SKM_MAGIC);
500 
501 	int count = MIN(flush, skm->skm_avail);
502 	for (int i = 0; i < count; i++)
503 		spl_cache_shrink(skc, skm->skm_objs[i]);
504 
505 	skm->skm_avail -= count;
506 	memmove(skm->skm_objs, &(skm->skm_objs[count]),
507 	    sizeof (void *) * skm->skm_avail);
508 
509 	spin_unlock(&skc->skc_lock);
510 }
511 
512 /*
513  * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
514  * When on-slab we want to target spl_kmem_cache_obj_per_slab.  However,
515  * for very small objects we may end up with more than this so as not
516  * to waste space in the minimal allocation of a single page.
517  */
518 static int
519 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
520 {
521 	uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
522 
523 	sks_size = spl_sks_size(skc);
524 	obj_size = spl_obj_size(skc);
525 	max_size = (spl_kmem_cache_max_size * 1024 * 1024);
526 	tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
527 
528 	if (tgt_size <= max_size) {
529 		tgt_objs = (tgt_size - sks_size) / obj_size;
530 	} else {
531 		tgt_objs = (max_size - sks_size) / obj_size;
532 		tgt_size = (tgt_objs * obj_size) + sks_size;
533 	}
534 
535 	if (tgt_objs == 0)
536 		return (-ENOSPC);
537 
538 	*objs = tgt_objs;
539 	*size = tgt_size;
540 
541 	return (0);
542 }
543 
544 /*
545  * Make a guess at reasonable per-cpu magazine size based on the size of
546  * each object and the cost of caching N of them in each magazine.  Long
547  * term this should really adapt based on an observed usage heuristic.
548  */
549 static int
550 spl_magazine_size(spl_kmem_cache_t *skc)
551 {
552 	uint32_t obj_size = spl_obj_size(skc);
553 	int size;
554 
555 	if (spl_kmem_cache_magazine_size > 0)
556 		return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
557 
558 	/* Per-magazine sizes below assume a 4Kib page size */
559 	if (obj_size > (PAGE_SIZE * 256))
560 		size = 4;  /* Minimum 4Mib per-magazine */
561 	else if (obj_size > (PAGE_SIZE * 32))
562 		size = 16; /* Minimum 2Mib per-magazine */
563 	else if (obj_size > (PAGE_SIZE))
564 		size = 64; /* Minimum 256Kib per-magazine */
565 	else if (obj_size > (PAGE_SIZE / 4))
566 		size = 128; /* Minimum 128Kib per-magazine */
567 	else
568 		size = 256;
569 
570 	return (size);
571 }
572 
573 /*
574  * Allocate a per-cpu magazine to associate with a specific core.
575  */
576 static spl_kmem_magazine_t *
577 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
578 {
579 	spl_kmem_magazine_t *skm;
580 	int size = sizeof (spl_kmem_magazine_t) +
581 	    sizeof (void *) * skc->skc_mag_size;
582 
583 	skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
584 	if (skm) {
585 		skm->skm_magic = SKM_MAGIC;
586 		skm->skm_avail = 0;
587 		skm->skm_size = skc->skc_mag_size;
588 		skm->skm_refill = skc->skc_mag_refill;
589 		skm->skm_cache = skc;
590 		skm->skm_cpu = cpu;
591 	}
592 
593 	return (skm);
594 }
595 
596 /*
597  * Free a per-cpu magazine associated with a specific core.
598  */
599 static void
600 spl_magazine_free(spl_kmem_magazine_t *skm)
601 {
602 	ASSERT(skm->skm_magic == SKM_MAGIC);
603 	ASSERT(skm->skm_avail == 0);
604 	kfree(skm);
605 }
606 
607 /*
608  * Create all pre-cpu magazines of reasonable sizes.
609  */
610 static int
611 spl_magazine_create(spl_kmem_cache_t *skc)
612 {
613 	int i = 0;
614 
615 	ASSERT((skc->skc_flags & KMC_SLAB) == 0);
616 
617 	skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
618 	    num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
619 	skc->skc_mag_size = spl_magazine_size(skc);
620 	skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
621 
622 	for_each_possible_cpu(i) {
623 		skc->skc_mag[i] = spl_magazine_alloc(skc, i);
624 		if (!skc->skc_mag[i]) {
625 			for (i--; i >= 0; i--)
626 				spl_magazine_free(skc->skc_mag[i]);
627 
628 			kfree(skc->skc_mag);
629 			return (-ENOMEM);
630 		}
631 	}
632 
633 	return (0);
634 }
635 
636 /*
637  * Destroy all pre-cpu magazines.
638  */
639 static void
640 spl_magazine_destroy(spl_kmem_cache_t *skc)
641 {
642 	spl_kmem_magazine_t *skm;
643 	int i = 0;
644 
645 	ASSERT((skc->skc_flags & KMC_SLAB) == 0);
646 
647 	for_each_possible_cpu(i) {
648 		skm = skc->skc_mag[i];
649 		spl_cache_flush(skc, skm, skm->skm_avail);
650 		spl_magazine_free(skm);
651 	}
652 
653 	kfree(skc->skc_mag);
654 }
655 
656 /*
657  * Create a object cache based on the following arguments:
658  * name		cache name
659  * size		cache object size
660  * align	cache object alignment
661  * ctor		cache object constructor
662  * dtor		cache object destructor
663  * reclaim	cache object reclaim
664  * priv		cache private data for ctor/dtor/reclaim
665  * vmp		unused must be NULL
666  * flags
667  *	KMC_KVMEM       Force kvmem backed SPL cache
668  *	KMC_SLAB        Force Linux slab backed cache
669  *	KMC_NODEBUG	Disable debugging (unsupported)
670  *	KMC_RECLAIMABLE	Memory can be freed under pressure
671  */
672 spl_kmem_cache_t *
673 spl_kmem_cache_create(const char *name, size_t size, size_t align,
674     spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
675     void *priv, void *vmp, int flags)
676 {
677 	gfp_t lflags = kmem_flags_convert(KM_SLEEP);
678 	spl_kmem_cache_t *skc;
679 	int rc;
680 
681 	/*
682 	 * Unsupported flags
683 	 */
684 	ASSERT(vmp == NULL);
685 	ASSERT(reclaim == NULL);
686 
687 	might_sleep();
688 
689 	skc = kzalloc(sizeof (*skc), lflags);
690 	if (skc == NULL)
691 		return (NULL);
692 
693 	skc->skc_magic = SKC_MAGIC;
694 	skc->skc_name_size = strlen(name) + 1;
695 	skc->skc_name = kmalloc(skc->skc_name_size, lflags);
696 	if (skc->skc_name == NULL) {
697 		kfree(skc);
698 		return (NULL);
699 	}
700 	strlcpy(skc->skc_name, name, skc->skc_name_size);
701 
702 	skc->skc_ctor = ctor;
703 	skc->skc_dtor = dtor;
704 	skc->skc_private = priv;
705 	skc->skc_vmp = vmp;
706 	skc->skc_linux_cache = NULL;
707 	skc->skc_flags = flags;
708 	skc->skc_obj_size = size;
709 	skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
710 	atomic_set(&skc->skc_ref, 0);
711 
712 	INIT_LIST_HEAD(&skc->skc_list);
713 	INIT_LIST_HEAD(&skc->skc_complete_list);
714 	INIT_LIST_HEAD(&skc->skc_partial_list);
715 	skc->skc_emergency_tree = RB_ROOT;
716 	spin_lock_init(&skc->skc_lock);
717 	init_waitqueue_head(&skc->skc_waitq);
718 	skc->skc_slab_fail = 0;
719 	skc->skc_slab_create = 0;
720 	skc->skc_slab_destroy = 0;
721 	skc->skc_slab_total = 0;
722 	skc->skc_slab_alloc = 0;
723 	skc->skc_slab_max = 0;
724 	skc->skc_obj_total = 0;
725 	skc->skc_obj_alloc = 0;
726 	skc->skc_obj_max = 0;
727 	skc->skc_obj_deadlock = 0;
728 	skc->skc_obj_emergency = 0;
729 	skc->skc_obj_emergency_max = 0;
730 
731 	rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0,
732 	    GFP_KERNEL);
733 	if (rc != 0) {
734 		kfree(skc);
735 		return (NULL);
736 	}
737 
738 	/*
739 	 * Verify the requested alignment restriction is sane.
740 	 */
741 	if (align) {
742 		VERIFY(ISP2(align));
743 		VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
744 		VERIFY3U(align, <=, PAGE_SIZE);
745 		skc->skc_obj_align = align;
746 	}
747 
748 	/*
749 	 * When no specific type of slab is requested (kmem, vmem, or
750 	 * linuxslab) then select a cache type based on the object size
751 	 * and default tunables.
752 	 */
753 	if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
754 		if (spl_kmem_cache_slab_limit &&
755 		    size <= (size_t)spl_kmem_cache_slab_limit) {
756 			/*
757 			 * Objects smaller than spl_kmem_cache_slab_limit can
758 			 * use the Linux slab for better space-efficiency.
759 			 */
760 			skc->skc_flags |= KMC_SLAB;
761 		} else {
762 			/*
763 			 * All other objects are considered large and are
764 			 * placed on kvmem backed slabs.
765 			 */
766 			skc->skc_flags |= KMC_KVMEM;
767 		}
768 	}
769 
770 	/*
771 	 * Given the type of slab allocate the required resources.
772 	 */
773 	if (skc->skc_flags & KMC_KVMEM) {
774 		rc = spl_slab_size(skc,
775 		    &skc->skc_slab_objs, &skc->skc_slab_size);
776 		if (rc)
777 			goto out;
778 
779 		rc = spl_magazine_create(skc);
780 		if (rc)
781 			goto out;
782 	} else {
783 		unsigned long slabflags = 0;
784 
785 		if (size > spl_kmem_cache_slab_limit)
786 			goto out;
787 
788 		if (skc->skc_flags & KMC_RECLAIMABLE)
789 			slabflags |= SLAB_RECLAIM_ACCOUNT;
790 
791 #if defined(SLAB_USERCOPY)
792 		/*
793 		 * Required for PAX-enabled kernels if the slab is to be
794 		 * used for copying between user and kernel space.
795 		 */
796 		slabflags |= SLAB_USERCOPY;
797 #endif
798 
799 #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
800 		/*
801 		 * Newer grsec patchset uses kmem_cache_create_usercopy()
802 		 * instead of SLAB_USERCOPY flag
803 		 */
804 		skc->skc_linux_cache = kmem_cache_create_usercopy(
805 		    skc->skc_name, size, align, slabflags, 0, size, NULL);
806 #else
807 		skc->skc_linux_cache = kmem_cache_create(
808 		    skc->skc_name, size, align, slabflags, NULL);
809 #endif
810 		if (skc->skc_linux_cache == NULL)
811 			goto out;
812 	}
813 
814 	down_write(&spl_kmem_cache_sem);
815 	list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
816 	up_write(&spl_kmem_cache_sem);
817 
818 	return (skc);
819 out:
820 	kfree(skc->skc_name);
821 	percpu_counter_destroy(&skc->skc_linux_alloc);
822 	kfree(skc);
823 	return (NULL);
824 }
825 EXPORT_SYMBOL(spl_kmem_cache_create);
826 
827 /*
828  * Register a move callback for cache defragmentation.
829  * XXX: Unimplemented but harmless to stub out for now.
830  */
831 void
832 spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
833     kmem_cbrc_t (move)(void *, void *, size_t, void *))
834 {
835 	ASSERT(move != NULL);
836 }
837 EXPORT_SYMBOL(spl_kmem_cache_set_move);
838 
839 /*
840  * Destroy a cache and all objects associated with the cache.
841  */
842 void
843 spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
844 {
845 	DECLARE_WAIT_QUEUE_HEAD(wq);
846 	taskqid_t id;
847 
848 	ASSERT(skc->skc_magic == SKC_MAGIC);
849 	ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
850 
851 	down_write(&spl_kmem_cache_sem);
852 	list_del_init(&skc->skc_list);
853 	up_write(&spl_kmem_cache_sem);
854 
855 	/* Cancel any and wait for any pending delayed tasks */
856 	VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
857 
858 	spin_lock(&skc->skc_lock);
859 	id = skc->skc_taskqid;
860 	spin_unlock(&skc->skc_lock);
861 
862 	taskq_cancel_id(spl_kmem_cache_taskq, id);
863 
864 	/*
865 	 * Wait until all current callers complete, this is mainly
866 	 * to catch the case where a low memory situation triggers a
867 	 * cache reaping action which races with this destroy.
868 	 */
869 	wait_event(wq, atomic_read(&skc->skc_ref) == 0);
870 
871 	if (skc->skc_flags & KMC_KVMEM) {
872 		spl_magazine_destroy(skc);
873 		spl_slab_reclaim(skc);
874 	} else {
875 		ASSERT(skc->skc_flags & KMC_SLAB);
876 		kmem_cache_destroy(skc->skc_linux_cache);
877 	}
878 
879 	spin_lock(&skc->skc_lock);
880 
881 	/*
882 	 * Validate there are no objects in use and free all the
883 	 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
884 	 */
885 	ASSERT3U(skc->skc_slab_alloc, ==, 0);
886 	ASSERT3U(skc->skc_obj_alloc, ==, 0);
887 	ASSERT3U(skc->skc_slab_total, ==, 0);
888 	ASSERT3U(skc->skc_obj_total, ==, 0);
889 	ASSERT3U(skc->skc_obj_emergency, ==, 0);
890 	ASSERT(list_empty(&skc->skc_complete_list));
891 
892 	ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
893 	percpu_counter_destroy(&skc->skc_linux_alloc);
894 
895 	spin_unlock(&skc->skc_lock);
896 
897 	kfree(skc->skc_name);
898 	kfree(skc);
899 }
900 EXPORT_SYMBOL(spl_kmem_cache_destroy);
901 
902 /*
903  * Allocate an object from a slab attached to the cache.  This is used to
904  * repopulate the per-cpu magazine caches in batches when they run low.
905  */
906 static void *
907 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
908 {
909 	spl_kmem_obj_t *sko;
910 
911 	ASSERT(skc->skc_magic == SKC_MAGIC);
912 	ASSERT(sks->sks_magic == SKS_MAGIC);
913 
914 	sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
915 	ASSERT(sko->sko_magic == SKO_MAGIC);
916 	ASSERT(sko->sko_addr != NULL);
917 
918 	/* Remove from sks_free_list */
919 	list_del_init(&sko->sko_list);
920 
921 	sks->sks_age = jiffies;
922 	sks->sks_ref++;
923 	skc->skc_obj_alloc++;
924 
925 	/* Track max obj usage statistics */
926 	if (skc->skc_obj_alloc > skc->skc_obj_max)
927 		skc->skc_obj_max = skc->skc_obj_alloc;
928 
929 	/* Track max slab usage statistics */
930 	if (sks->sks_ref == 1) {
931 		skc->skc_slab_alloc++;
932 
933 		if (skc->skc_slab_alloc > skc->skc_slab_max)
934 			skc->skc_slab_max = skc->skc_slab_alloc;
935 	}
936 
937 	return (sko->sko_addr);
938 }
939 
940 /*
941  * Generic slab allocation function to run by the global work queues.
942  * It is responsible for allocating a new slab, linking it in to the list
943  * of partial slabs, and then waking any waiters.
944  */
945 static int
946 __spl_cache_grow(spl_kmem_cache_t *skc, int flags)
947 {
948 	spl_kmem_slab_t *sks;
949 
950 	fstrans_cookie_t cookie = spl_fstrans_mark();
951 	sks = spl_slab_alloc(skc, flags);
952 	spl_fstrans_unmark(cookie);
953 
954 	spin_lock(&skc->skc_lock);
955 	if (sks) {
956 		skc->skc_slab_total++;
957 		skc->skc_obj_total += sks->sks_objs;
958 		list_add_tail(&sks->sks_list, &skc->skc_partial_list);
959 
960 		smp_mb__before_atomic();
961 		clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
962 		smp_mb__after_atomic();
963 	}
964 	spin_unlock(&skc->skc_lock);
965 
966 	return (sks == NULL ? -ENOMEM : 0);
967 }
968 
969 static void
970 spl_cache_grow_work(void *data)
971 {
972 	spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
973 	spl_kmem_cache_t *skc = ska->ska_cache;
974 
975 	int error = __spl_cache_grow(skc, ska->ska_flags);
976 
977 	atomic_dec(&skc->skc_ref);
978 	smp_mb__before_atomic();
979 	clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
980 	smp_mb__after_atomic();
981 	if (error == 0)
982 		wake_up_all(&skc->skc_waitq);
983 
984 	kfree(ska);
985 }
986 
987 /*
988  * Returns non-zero when a new slab should be available.
989  */
990 static int
991 spl_cache_grow_wait(spl_kmem_cache_t *skc)
992 {
993 	return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
994 }
995 
996 /*
997  * No available objects on any slabs, create a new slab.  Note that this
998  * functionality is disabled for KMC_SLAB caches which are backed by the
999  * Linux slab.
1000  */
1001 static int
1002 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
1003 {
1004 	int remaining, rc = 0;
1005 
1006 	ASSERT0(flags & ~KM_PUBLIC_MASK);
1007 	ASSERT(skc->skc_magic == SKC_MAGIC);
1008 	ASSERT((skc->skc_flags & KMC_SLAB) == 0);
1009 
1010 	*obj = NULL;
1011 
1012 	/*
1013 	 * Since we can't sleep attempt an emergency allocation to satisfy
1014 	 * the request.  The only alterative is to fail the allocation but
1015 	 * it's preferable try.  The use of KM_NOSLEEP is expected to be rare.
1016 	 */
1017 	if (flags & KM_NOSLEEP)
1018 		return (spl_emergency_alloc(skc, flags, obj));
1019 
1020 	might_sleep();
1021 
1022 	/*
1023 	 * Before allocating a new slab wait for any reaping to complete and
1024 	 * then return so the local magazine can be rechecked for new objects.
1025 	 */
1026 	if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1027 		rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1028 		    TASK_UNINTERRUPTIBLE);
1029 		return (rc ? rc : -EAGAIN);
1030 	}
1031 
1032 	/*
1033 	 * Note: It would be nice to reduce the overhead of context switch
1034 	 * and improve NUMA locality, by trying to allocate a new slab in the
1035 	 * current process context with KM_NOSLEEP flag.
1036 	 *
1037 	 * However, this can't be applied to vmem/kvmem due to a bug that
1038 	 * spl_vmalloc() doesn't honor gfp flags in page table allocation.
1039 	 */
1040 
1041 	/*
1042 	 * This is handled by dispatching a work request to the global work
1043 	 * queue.  This allows us to asynchronously allocate a new slab while
1044 	 * retaining the ability to safely fall back to a smaller synchronous
1045 	 * allocations to ensure forward progress is always maintained.
1046 	 */
1047 	if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1048 		spl_kmem_alloc_t *ska;
1049 
1050 		ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
1051 		if (ska == NULL) {
1052 			clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
1053 			smp_mb__after_atomic();
1054 			wake_up_all(&skc->skc_waitq);
1055 			return (-ENOMEM);
1056 		}
1057 
1058 		atomic_inc(&skc->skc_ref);
1059 		ska->ska_cache = skc;
1060 		ska->ska_flags = flags;
1061 		taskq_init_ent(&ska->ska_tqe);
1062 		taskq_dispatch_ent(spl_kmem_cache_taskq,
1063 		    spl_cache_grow_work, ska, 0, &ska->ska_tqe);
1064 	}
1065 
1066 	/*
1067 	 * The goal here is to only detect the rare case where a virtual slab
1068 	 * allocation has deadlocked.  We must be careful to minimize the use
1069 	 * of emergency objects which are more expensive to track.  Therefore,
1070 	 * we set a very long timeout for the asynchronous allocation and if
1071 	 * the timeout is reached the cache is flagged as deadlocked.  From
1072 	 * this point only new emergency objects will be allocated until the
1073 	 * asynchronous allocation completes and clears the deadlocked flag.
1074 	 */
1075 	if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1076 		rc = spl_emergency_alloc(skc, flags, obj);
1077 	} else {
1078 		remaining = wait_event_timeout(skc->skc_waitq,
1079 		    spl_cache_grow_wait(skc), HZ / 10);
1080 
1081 		if (!remaining) {
1082 			spin_lock(&skc->skc_lock);
1083 			if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1084 				set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1085 				skc->skc_obj_deadlock++;
1086 			}
1087 			spin_unlock(&skc->skc_lock);
1088 		}
1089 
1090 		rc = -ENOMEM;
1091 	}
1092 
1093 	return (rc);
1094 }
1095 
1096 /*
1097  * Refill a per-cpu magazine with objects from the slabs for this cache.
1098  * Ideally the magazine can be repopulated using existing objects which have
1099  * been released, however if we are unable to locate enough free objects new
1100  * slabs of objects will be created.  On success NULL is returned, otherwise
1101  * the address of a single emergency object is returned for use by the caller.
1102  */
1103 static void *
1104 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
1105 {
1106 	spl_kmem_slab_t *sks;
1107 	int count = 0, rc, refill;
1108 	void *obj = NULL;
1109 
1110 	ASSERT(skc->skc_magic == SKC_MAGIC);
1111 	ASSERT(skm->skm_magic == SKM_MAGIC);
1112 
1113 	refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
1114 	spin_lock(&skc->skc_lock);
1115 
1116 	while (refill > 0) {
1117 		/* No slabs available we may need to grow the cache */
1118 		if (list_empty(&skc->skc_partial_list)) {
1119 			spin_unlock(&skc->skc_lock);
1120 
1121 			local_irq_enable();
1122 			rc = spl_cache_grow(skc, flags, &obj);
1123 			local_irq_disable();
1124 
1125 			/* Emergency object for immediate use by caller */
1126 			if (rc == 0 && obj != NULL)
1127 				return (obj);
1128 
1129 			if (rc)
1130 				goto out;
1131 
1132 			/* Rescheduled to different CPU skm is not local */
1133 			if (skm != skc->skc_mag[smp_processor_id()])
1134 				goto out;
1135 
1136 			/*
1137 			 * Potentially rescheduled to the same CPU but
1138 			 * allocations may have occurred from this CPU while
1139 			 * we were sleeping so recalculate max refill.
1140 			 */
1141 			refill = MIN(refill, skm->skm_size - skm->skm_avail);
1142 
1143 			spin_lock(&skc->skc_lock);
1144 			continue;
1145 		}
1146 
1147 		/* Grab the next available slab */
1148 		sks = list_entry((&skc->skc_partial_list)->next,
1149 		    spl_kmem_slab_t, sks_list);
1150 		ASSERT(sks->sks_magic == SKS_MAGIC);
1151 		ASSERT(sks->sks_ref < sks->sks_objs);
1152 		ASSERT(!list_empty(&sks->sks_free_list));
1153 
1154 		/*
1155 		 * Consume as many objects as needed to refill the requested
1156 		 * cache.  We must also be careful not to overfill it.
1157 		 */
1158 		while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
1159 		    ++count) {
1160 			ASSERT(skm->skm_avail < skm->skm_size);
1161 			ASSERT(count < skm->skm_size);
1162 			skm->skm_objs[skm->skm_avail++] =
1163 			    spl_cache_obj(skc, sks);
1164 		}
1165 
1166 		/* Move slab to skc_complete_list when full */
1167 		if (sks->sks_ref == sks->sks_objs) {
1168 			list_del(&sks->sks_list);
1169 			list_add(&sks->sks_list, &skc->skc_complete_list);
1170 		}
1171 	}
1172 
1173 	spin_unlock(&skc->skc_lock);
1174 out:
1175 	return (NULL);
1176 }
1177 
1178 /*
1179  * Release an object back to the slab from which it came.
1180  */
1181 static void
1182 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
1183 {
1184 	spl_kmem_slab_t *sks = NULL;
1185 	spl_kmem_obj_t *sko = NULL;
1186 
1187 	ASSERT(skc->skc_magic == SKC_MAGIC);
1188 
1189 	sko = spl_sko_from_obj(skc, obj);
1190 	ASSERT(sko->sko_magic == SKO_MAGIC);
1191 	sks = sko->sko_slab;
1192 	ASSERT(sks->sks_magic == SKS_MAGIC);
1193 	ASSERT(sks->sks_cache == skc);
1194 	list_add(&sko->sko_list, &sks->sks_free_list);
1195 
1196 	sks->sks_age = jiffies;
1197 	sks->sks_ref--;
1198 	skc->skc_obj_alloc--;
1199 
1200 	/*
1201 	 * Move slab to skc_partial_list when no longer full.  Slabs
1202 	 * are added to the head to keep the partial list is quasi-full
1203 	 * sorted order.  Fuller at the head, emptier at the tail.
1204 	 */
1205 	if (sks->sks_ref == (sks->sks_objs - 1)) {
1206 		list_del(&sks->sks_list);
1207 		list_add(&sks->sks_list, &skc->skc_partial_list);
1208 	}
1209 
1210 	/*
1211 	 * Move empty slabs to the end of the partial list so
1212 	 * they can be easily found and freed during reclamation.
1213 	 */
1214 	if (sks->sks_ref == 0) {
1215 		list_del(&sks->sks_list);
1216 		list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1217 		skc->skc_slab_alloc--;
1218 	}
1219 }
1220 
1221 /*
1222  * Allocate an object from the per-cpu magazine, or if the magazine
1223  * is empty directly allocate from a slab and repopulate the magazine.
1224  */
1225 void *
1226 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
1227 {
1228 	spl_kmem_magazine_t *skm;
1229 	void *obj = NULL;
1230 
1231 	ASSERT0(flags & ~KM_PUBLIC_MASK);
1232 	ASSERT(skc->skc_magic == SKC_MAGIC);
1233 	ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1234 
1235 	/*
1236 	 * Allocate directly from a Linux slab.  All optimizations are left
1237 	 * to the underlying cache we only need to guarantee that KM_SLEEP
1238 	 * callers will never fail.
1239 	 */
1240 	if (skc->skc_flags & KMC_SLAB) {
1241 		struct kmem_cache *slc = skc->skc_linux_cache;
1242 		do {
1243 			obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
1244 		} while ((obj == NULL) && !(flags & KM_NOSLEEP));
1245 
1246 		if (obj != NULL) {
1247 			/*
1248 			 * Even though we leave everything up to the
1249 			 * underlying cache we still keep track of
1250 			 * how many objects we've allocated in it for
1251 			 * better debuggability.
1252 			 */
1253 			percpu_counter_inc(&skc->skc_linux_alloc);
1254 		}
1255 		goto ret;
1256 	}
1257 
1258 	local_irq_disable();
1259 
1260 restart:
1261 	/*
1262 	 * Safe to update per-cpu structure without lock, but
1263 	 * in the restart case we must be careful to reacquire
1264 	 * the local magazine since this may have changed
1265 	 * when we need to grow the cache.
1266 	 */
1267 	skm = skc->skc_mag[smp_processor_id()];
1268 	ASSERT(skm->skm_magic == SKM_MAGIC);
1269 
1270 	if (likely(skm->skm_avail)) {
1271 		/* Object available in CPU cache, use it */
1272 		obj = skm->skm_objs[--skm->skm_avail];
1273 	} else {
1274 		obj = spl_cache_refill(skc, skm, flags);
1275 		if ((obj == NULL) && !(flags & KM_NOSLEEP))
1276 			goto restart;
1277 
1278 		local_irq_enable();
1279 		goto ret;
1280 	}
1281 
1282 	local_irq_enable();
1283 	ASSERT(obj);
1284 	ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1285 
1286 ret:
1287 	/* Pre-emptively migrate object to CPU L1 cache */
1288 	if (obj) {
1289 		if (obj && skc->skc_ctor)
1290 			skc->skc_ctor(obj, skc->skc_private, flags);
1291 		else
1292 			prefetchw(obj);
1293 	}
1294 
1295 	return (obj);
1296 }
1297 EXPORT_SYMBOL(spl_kmem_cache_alloc);
1298 
1299 /*
1300  * Free an object back to the local per-cpu magazine, there is no
1301  * guarantee that this is the same magazine the object was originally
1302  * allocated from.  We may need to flush entire from the magazine
1303  * back to the slabs to make space.
1304  */
1305 void
1306 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
1307 {
1308 	spl_kmem_magazine_t *skm;
1309 	unsigned long flags;
1310 	int do_reclaim = 0;
1311 	int do_emergency = 0;
1312 
1313 	ASSERT(skc->skc_magic == SKC_MAGIC);
1314 	ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1315 
1316 	/*
1317 	 * Run the destructor
1318 	 */
1319 	if (skc->skc_dtor)
1320 		skc->skc_dtor(obj, skc->skc_private);
1321 
1322 	/*
1323 	 * Free the object from the Linux underlying Linux slab.
1324 	 */
1325 	if (skc->skc_flags & KMC_SLAB) {
1326 		kmem_cache_free(skc->skc_linux_cache, obj);
1327 		percpu_counter_dec(&skc->skc_linux_alloc);
1328 		return;
1329 	}
1330 
1331 	/*
1332 	 * While a cache has outstanding emergency objects all freed objects
1333 	 * must be checked.  However, since emergency objects will never use
1334 	 * a virtual address these objects can be safely excluded as an
1335 	 * optimization.
1336 	 */
1337 	if (!is_vmalloc_addr(obj)) {
1338 		spin_lock(&skc->skc_lock);
1339 		do_emergency = (skc->skc_obj_emergency > 0);
1340 		spin_unlock(&skc->skc_lock);
1341 
1342 		if (do_emergency && (spl_emergency_free(skc, obj) == 0))
1343 			return;
1344 	}
1345 
1346 	local_irq_save(flags);
1347 
1348 	/*
1349 	 * Safe to update per-cpu structure without lock, but
1350 	 * no remote memory allocation tracking is being performed
1351 	 * it is entirely possible to allocate an object from one
1352 	 * CPU cache and return it to another.
1353 	 */
1354 	skm = skc->skc_mag[smp_processor_id()];
1355 	ASSERT(skm->skm_magic == SKM_MAGIC);
1356 
1357 	/*
1358 	 * Per-CPU cache full, flush it to make space for this object,
1359 	 * this may result in an empty slab which can be reclaimed once
1360 	 * interrupts are re-enabled.
1361 	 */
1362 	if (unlikely(skm->skm_avail >= skm->skm_size)) {
1363 		spl_cache_flush(skc, skm, skm->skm_refill);
1364 		do_reclaim = 1;
1365 	}
1366 
1367 	/* Available space in cache, use it */
1368 	skm->skm_objs[skm->skm_avail++] = obj;
1369 
1370 	local_irq_restore(flags);
1371 
1372 	if (do_reclaim)
1373 		spl_slab_reclaim(skc);
1374 }
1375 EXPORT_SYMBOL(spl_kmem_cache_free);
1376 
1377 /*
1378  * Depending on how many and which objects are released it may simply
1379  * repopulate the local magazine which will then need to age-out.  Objects
1380  * which cannot fit in the magazine will be released back to their slabs
1381  * which will also need to age out before being released.  This is all just
1382  * best effort and we do not want to thrash creating and destroying slabs.
1383  */
1384 void
1385 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
1386 {
1387 	ASSERT(skc->skc_magic == SKC_MAGIC);
1388 	ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1389 
1390 	if (skc->skc_flags & KMC_SLAB)
1391 		return;
1392 
1393 	atomic_inc(&skc->skc_ref);
1394 
1395 	/*
1396 	 * Prevent concurrent cache reaping when contended.
1397 	 */
1398 	if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
1399 		goto out;
1400 
1401 	/* Reclaim from the magazine and free all now empty slabs. */
1402 	unsigned long irq_flags;
1403 	local_irq_save(irq_flags);
1404 	spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1405 	spl_cache_flush(skc, skm, skm->skm_avail);
1406 	local_irq_restore(irq_flags);
1407 
1408 	spl_slab_reclaim(skc);
1409 	clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
1410 	smp_mb__after_atomic();
1411 	wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
1412 out:
1413 	atomic_dec(&skc->skc_ref);
1414 }
1415 EXPORT_SYMBOL(spl_kmem_cache_reap_now);
1416 
1417 /*
1418  * This is stubbed out for code consistency with other platforms.  There
1419  * is existing logic to prevent concurrent reaping so while this is ugly
1420  * it should do no harm.
1421  */
1422 int
1423 spl_kmem_cache_reap_active(void)
1424 {
1425 	return (0);
1426 }
1427 EXPORT_SYMBOL(spl_kmem_cache_reap_active);
1428 
1429 /*
1430  * Reap all free slabs from all registered caches.
1431  */
1432 void
1433 spl_kmem_reap(void)
1434 {
1435 	spl_kmem_cache_t *skc = NULL;
1436 
1437 	down_read(&spl_kmem_cache_sem);
1438 	list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
1439 		spl_kmem_cache_reap_now(skc);
1440 	}
1441 	up_read(&spl_kmem_cache_sem);
1442 }
1443 EXPORT_SYMBOL(spl_kmem_reap);
1444 
1445 int
1446 spl_kmem_cache_init(void)
1447 {
1448 	init_rwsem(&spl_kmem_cache_sem);
1449 	INIT_LIST_HEAD(&spl_kmem_cache_list);
1450 	spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
1451 	    spl_kmem_cache_kmem_threads, maxclsyspri,
1452 	    spl_kmem_cache_kmem_threads * 8, INT_MAX,
1453 	    TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
1454 
1455 	if (spl_kmem_cache_taskq == NULL)
1456 		return (-ENOMEM);
1457 
1458 	return (0);
1459 }
1460 
1461 void
1462 spl_kmem_cache_fini(void)
1463 {
1464 	taskq_destroy(spl_kmem_cache_taskq);
1465 }
1466