xref: /linux/mm/slub.c (revision d04baa157d1b35cbd27c87b4a13111d9675b61f3)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks or atomic operatios
6  * and only uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  * (C) 2011 Linux Foundation, Christoph Lameter
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/swap.h> /* struct reclaim_state */
14 #include <linux/module.h>
15 #include <linux/bit_spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/slab.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/kmemcheck.h>
22 #include <linux/cpu.h>
23 #include <linux/cpuset.h>
24 #include <linux/mempolicy.h>
25 #include <linux/ctype.h>
26 #include <linux/debugobjects.h>
27 #include <linux/kallsyms.h>
28 #include <linux/memory.h>
29 #include <linux/math64.h>
30 #include <linux/fault-inject.h>
31 #include <linux/stacktrace.h>
32 
33 #include <trace/events/kmem.h>
34 
35 /*
36  * Lock order:
37  *   1. slub_lock (Global Semaphore)
38  *   2. node->list_lock
39  *   3. slab_lock(page) (Only on some arches and for debugging)
40  *
41  *   slub_lock
42  *
43  *   The role of the slub_lock is to protect the list of all the slabs
44  *   and to synchronize major metadata changes to slab cache structures.
45  *
46  *   The slab_lock is only used for debugging and on arches that do not
47  *   have the ability to do a cmpxchg_double. It only protects the second
48  *   double word in the page struct. Meaning
49  *	A. page->freelist	-> List of object free in a page
50  *	B. page->counters	-> Counters of objects
51  *	C. page->frozen		-> frozen state
52  *
53  *   If a slab is frozen then it is exempt from list management. It is not
54  *   on any list. The processor that froze the slab is the one who can
55  *   perform list operations on the page. Other processors may put objects
56  *   onto the freelist but the processor that froze the slab is the only
57  *   one that can retrieve the objects from the page's freelist.
58  *
59  *   The list_lock protects the partial and full list on each node and
60  *   the partial slab counter. If taken then no new slabs may be added or
61  *   removed from the lists nor make the number of partial slabs be modified.
62  *   (Note that the total number of slabs is an atomic value that may be
63  *   modified without taking the list lock).
64  *
65  *   The list_lock is a centralized lock and thus we avoid taking it as
66  *   much as possible. As long as SLUB does not have to handle partial
67  *   slabs, operations can continue without any centralized lock. F.e.
68  *   allocating a long series of objects that fill up slabs does not require
69  *   the list lock.
70  *   Interrupts are disabled during allocation and deallocation in order to
71  *   make the slab allocator safe to use in the context of an irq. In addition
72  *   interrupts are disabled to ensure that the processor does not change
73  *   while handling per_cpu slabs, due to kernel preemption.
74  *
75  * SLUB assigns one slab for allocation to each processor.
76  * Allocations only occur from these slabs called cpu slabs.
77  *
78  * Slabs with free elements are kept on a partial list and during regular
79  * operations no list for full slabs is used. If an object in a full slab is
80  * freed then the slab will show up again on the partial lists.
81  * We track full slabs for debugging purposes though because otherwise we
82  * cannot scan all objects.
83  *
84  * Slabs are freed when they become empty. Teardown and setup is
85  * minimal so we rely on the page allocators per cpu caches for
86  * fast frees and allocs.
87  *
88  * Overloading of page flags that are otherwise used for LRU management.
89  *
90  * PageActive 		The slab is frozen and exempt from list processing.
91  * 			This means that the slab is dedicated to a purpose
92  * 			such as satisfying allocations for a specific
93  * 			processor. Objects may be freed in the slab while
94  * 			it is frozen but slab_free will then skip the usual
95  * 			list operations. It is up to the processor holding
96  * 			the slab to integrate the slab into the slab lists
97  * 			when the slab is no longer needed.
98  *
99  * 			One use of this flag is to mark slabs that are
100  * 			used for allocations. Then such a slab becomes a cpu
101  * 			slab. The cpu slab may be equipped with an additional
102  * 			freelist that allows lockless access to
103  * 			free objects in addition to the regular freelist
104  * 			that requires the slab lock.
105  *
106  * PageError		Slab requires special handling due to debug
107  * 			options set. This moves	slab handling out of
108  * 			the fast path and disables lockless freelists.
109  */
110 
111 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
112 		SLAB_TRACE | SLAB_DEBUG_FREE)
113 
114 static inline int kmem_cache_debug(struct kmem_cache *s)
115 {
116 #ifdef CONFIG_SLUB_DEBUG
117 	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
118 #else
119 	return 0;
120 #endif
121 }
122 
123 /*
124  * Issues still to be resolved:
125  *
126  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
127  *
128  * - Variable sizing of the per node arrays
129  */
130 
131 /* Enable to test recovery from slab corruption on boot */
132 #undef SLUB_RESILIENCY_TEST
133 
134 /* Enable to log cmpxchg failures */
135 #undef SLUB_DEBUG_CMPXCHG
136 
137 /*
138  * Mininum number of partial slabs. These will be left on the partial
139  * lists even if they are empty. kmem_cache_shrink may reclaim them.
140  */
141 #define MIN_PARTIAL 5
142 
143 /*
144  * Maximum number of desirable partial slabs.
145  * The existence of more partial slabs makes kmem_cache_shrink
146  * sort the partial list by the number of objects in the.
147  */
148 #define MAX_PARTIAL 10
149 
150 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
151 				SLAB_POISON | SLAB_STORE_USER)
152 
153 /*
154  * Debugging flags that require metadata to be stored in the slab.  These get
155  * disabled when slub_debug=O is used and a cache's min order increases with
156  * metadata.
157  */
158 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
159 
160 /*
161  * Set of flags that will prevent slab merging
162  */
163 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
164 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
165 		SLAB_FAILSLAB)
166 
167 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
168 		SLAB_CACHE_DMA | SLAB_NOTRACK)
169 
170 #define OO_SHIFT	16
171 #define OO_MASK		((1 << OO_SHIFT) - 1)
172 #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
173 
174 /* Internal SLUB flags */
175 #define __OBJECT_POISON		0x80000000UL /* Poison object */
176 #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
177 
178 static int kmem_size = sizeof(struct kmem_cache);
179 
180 #ifdef CONFIG_SMP
181 static struct notifier_block slab_notifier;
182 #endif
183 
184 static enum {
185 	DOWN,		/* No slab functionality available */
186 	PARTIAL,	/* Kmem_cache_node works */
187 	UP,		/* Everything works but does not show up in sysfs */
188 	SYSFS		/* Sysfs up */
189 } slab_state = DOWN;
190 
191 /* A list of all slab caches on the system */
192 static DECLARE_RWSEM(slub_lock);
193 static LIST_HEAD(slab_caches);
194 
195 /*
196  * Tracking user of a slab.
197  */
198 #define TRACK_ADDRS_COUNT 16
199 struct track {
200 	unsigned long addr;	/* Called from address */
201 #ifdef CONFIG_STACKTRACE
202 	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
203 #endif
204 	int cpu;		/* Was running on cpu */
205 	int pid;		/* Pid context */
206 	unsigned long when;	/* When did the operation occur */
207 };
208 
209 enum track_item { TRACK_ALLOC, TRACK_FREE };
210 
211 #ifdef CONFIG_SYSFS
212 static int sysfs_slab_add(struct kmem_cache *);
213 static int sysfs_slab_alias(struct kmem_cache *, const char *);
214 static void sysfs_slab_remove(struct kmem_cache *);
215 
216 #else
217 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
218 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
219 							{ return 0; }
220 static inline void sysfs_slab_remove(struct kmem_cache *s)
221 {
222 	kfree(s->name);
223 	kfree(s);
224 }
225 
226 #endif
227 
228 static inline void stat(const struct kmem_cache *s, enum stat_item si)
229 {
230 #ifdef CONFIG_SLUB_STATS
231 	__this_cpu_inc(s->cpu_slab->stat[si]);
232 #endif
233 }
234 
235 /********************************************************************
236  * 			Core slab cache functions
237  *******************************************************************/
238 
239 int slab_is_available(void)
240 {
241 	return slab_state >= UP;
242 }
243 
244 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
245 {
246 	return s->node[node];
247 }
248 
249 /* Verify that a pointer has an address that is valid within a slab page */
250 static inline int check_valid_pointer(struct kmem_cache *s,
251 				struct page *page, const void *object)
252 {
253 	void *base;
254 
255 	if (!object)
256 		return 1;
257 
258 	base = page_address(page);
259 	if (object < base || object >= base + page->objects * s->size ||
260 		(object - base) % s->size) {
261 		return 0;
262 	}
263 
264 	return 1;
265 }
266 
267 static inline void *get_freepointer(struct kmem_cache *s, void *object)
268 {
269 	return *(void **)(object + s->offset);
270 }
271 
272 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
273 {
274 	void *p;
275 
276 #ifdef CONFIG_DEBUG_PAGEALLOC
277 	probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
278 #else
279 	p = get_freepointer(s, object);
280 #endif
281 	return p;
282 }
283 
284 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
285 {
286 	*(void **)(object + s->offset) = fp;
287 }
288 
289 /* Loop over all objects in a slab */
290 #define for_each_object(__p, __s, __addr, __objects) \
291 	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
292 			__p += (__s)->size)
293 
294 /* Determine object index from a given position */
295 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
296 {
297 	return (p - addr) / s->size;
298 }
299 
300 static inline size_t slab_ksize(const struct kmem_cache *s)
301 {
302 #ifdef CONFIG_SLUB_DEBUG
303 	/*
304 	 * Debugging requires use of the padding between object
305 	 * and whatever may come after it.
306 	 */
307 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
308 		return s->objsize;
309 
310 #endif
311 	/*
312 	 * If we have the need to store the freelist pointer
313 	 * back there or track user information then we can
314 	 * only use the space before that information.
315 	 */
316 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
317 		return s->inuse;
318 	/*
319 	 * Else we can use all the padding etc for the allocation
320 	 */
321 	return s->size;
322 }
323 
324 static inline int order_objects(int order, unsigned long size, int reserved)
325 {
326 	return ((PAGE_SIZE << order) - reserved) / size;
327 }
328 
329 static inline struct kmem_cache_order_objects oo_make(int order,
330 		unsigned long size, int reserved)
331 {
332 	struct kmem_cache_order_objects x = {
333 		(order << OO_SHIFT) + order_objects(order, size, reserved)
334 	};
335 
336 	return x;
337 }
338 
339 static inline int oo_order(struct kmem_cache_order_objects x)
340 {
341 	return x.x >> OO_SHIFT;
342 }
343 
344 static inline int oo_objects(struct kmem_cache_order_objects x)
345 {
346 	return x.x & OO_MASK;
347 }
348 
349 /*
350  * Per slab locking using the pagelock
351  */
352 static __always_inline void slab_lock(struct page *page)
353 {
354 	bit_spin_lock(PG_locked, &page->flags);
355 }
356 
357 static __always_inline void slab_unlock(struct page *page)
358 {
359 	__bit_spin_unlock(PG_locked, &page->flags);
360 }
361 
362 /* Interrupts must be disabled (for the fallback code to work right) */
363 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
364 		void *freelist_old, unsigned long counters_old,
365 		void *freelist_new, unsigned long counters_new,
366 		const char *n)
367 {
368 	VM_BUG_ON(!irqs_disabled());
369 #ifdef CONFIG_CMPXCHG_DOUBLE
370 	if (s->flags & __CMPXCHG_DOUBLE) {
371 		if (cmpxchg_double(&page->freelist, &page->counters,
372 			freelist_old, counters_old,
373 			freelist_new, counters_new))
374 		return 1;
375 	} else
376 #endif
377 	{
378 		slab_lock(page);
379 		if (page->freelist == freelist_old && page->counters == counters_old) {
380 			page->freelist = freelist_new;
381 			page->counters = counters_new;
382 			slab_unlock(page);
383 			return 1;
384 		}
385 		slab_unlock(page);
386 	}
387 
388 	cpu_relax();
389 	stat(s, CMPXCHG_DOUBLE_FAIL);
390 
391 #ifdef SLUB_DEBUG_CMPXCHG
392 	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
393 #endif
394 
395 	return 0;
396 }
397 
398 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
399 		void *freelist_old, unsigned long counters_old,
400 		void *freelist_new, unsigned long counters_new,
401 		const char *n)
402 {
403 #ifdef CONFIG_CMPXCHG_DOUBLE
404 	if (s->flags & __CMPXCHG_DOUBLE) {
405 		if (cmpxchg_double(&page->freelist, &page->counters,
406 			freelist_old, counters_old,
407 			freelist_new, counters_new))
408 		return 1;
409 	} else
410 #endif
411 	{
412 		unsigned long flags;
413 
414 		local_irq_save(flags);
415 		slab_lock(page);
416 		if (page->freelist == freelist_old && page->counters == counters_old) {
417 			page->freelist = freelist_new;
418 			page->counters = counters_new;
419 			slab_unlock(page);
420 			local_irq_restore(flags);
421 			return 1;
422 		}
423 		slab_unlock(page);
424 		local_irq_restore(flags);
425 	}
426 
427 	cpu_relax();
428 	stat(s, CMPXCHG_DOUBLE_FAIL);
429 
430 #ifdef SLUB_DEBUG_CMPXCHG
431 	printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
432 #endif
433 
434 	return 0;
435 }
436 
437 #ifdef CONFIG_SLUB_DEBUG
438 /*
439  * Determine a map of object in use on a page.
440  *
441  * Node listlock must be held to guarantee that the page does
442  * not vanish from under us.
443  */
444 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
445 {
446 	void *p;
447 	void *addr = page_address(page);
448 
449 	for (p = page->freelist; p; p = get_freepointer(s, p))
450 		set_bit(slab_index(p, s, addr), map);
451 }
452 
453 /*
454  * Debug settings:
455  */
456 #ifdef CONFIG_SLUB_DEBUG_ON
457 static int slub_debug = DEBUG_DEFAULT_FLAGS;
458 #else
459 static int slub_debug;
460 #endif
461 
462 static char *slub_debug_slabs;
463 static int disable_higher_order_debug;
464 
465 /*
466  * Object debugging
467  */
468 static void print_section(char *text, u8 *addr, unsigned int length)
469 {
470 	print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
471 			length, 1);
472 }
473 
474 static struct track *get_track(struct kmem_cache *s, void *object,
475 	enum track_item alloc)
476 {
477 	struct track *p;
478 
479 	if (s->offset)
480 		p = object + s->offset + sizeof(void *);
481 	else
482 		p = object + s->inuse;
483 
484 	return p + alloc;
485 }
486 
487 static void set_track(struct kmem_cache *s, void *object,
488 			enum track_item alloc, unsigned long addr)
489 {
490 	struct track *p = get_track(s, object, alloc);
491 
492 	if (addr) {
493 #ifdef CONFIG_STACKTRACE
494 		struct stack_trace trace;
495 		int i;
496 
497 		trace.nr_entries = 0;
498 		trace.max_entries = TRACK_ADDRS_COUNT;
499 		trace.entries = p->addrs;
500 		trace.skip = 3;
501 		save_stack_trace(&trace);
502 
503 		/* See rant in lockdep.c */
504 		if (trace.nr_entries != 0 &&
505 		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
506 			trace.nr_entries--;
507 
508 		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
509 			p->addrs[i] = 0;
510 #endif
511 		p->addr = addr;
512 		p->cpu = smp_processor_id();
513 		p->pid = current->pid;
514 		p->when = jiffies;
515 	} else
516 		memset(p, 0, sizeof(struct track));
517 }
518 
519 static void init_tracking(struct kmem_cache *s, void *object)
520 {
521 	if (!(s->flags & SLAB_STORE_USER))
522 		return;
523 
524 	set_track(s, object, TRACK_FREE, 0UL);
525 	set_track(s, object, TRACK_ALLOC, 0UL);
526 }
527 
528 static void print_track(const char *s, struct track *t)
529 {
530 	if (!t->addr)
531 		return;
532 
533 	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
534 		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
535 #ifdef CONFIG_STACKTRACE
536 	{
537 		int i;
538 		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
539 			if (t->addrs[i])
540 				printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
541 			else
542 				break;
543 	}
544 #endif
545 }
546 
547 static void print_tracking(struct kmem_cache *s, void *object)
548 {
549 	if (!(s->flags & SLAB_STORE_USER))
550 		return;
551 
552 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
553 	print_track("Freed", get_track(s, object, TRACK_FREE));
554 }
555 
556 static void print_page_info(struct page *page)
557 {
558 	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
559 		page, page->objects, page->inuse, page->freelist, page->flags);
560 
561 }
562 
563 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
564 {
565 	va_list args;
566 	char buf[100];
567 
568 	va_start(args, fmt);
569 	vsnprintf(buf, sizeof(buf), fmt, args);
570 	va_end(args);
571 	printk(KERN_ERR "========================================"
572 			"=====================================\n");
573 	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
574 	printk(KERN_ERR "----------------------------------------"
575 			"-------------------------------------\n\n");
576 }
577 
578 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
579 {
580 	va_list args;
581 	char buf[100];
582 
583 	va_start(args, fmt);
584 	vsnprintf(buf, sizeof(buf), fmt, args);
585 	va_end(args);
586 	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
587 }
588 
589 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
590 {
591 	unsigned int off;	/* Offset of last byte */
592 	u8 *addr = page_address(page);
593 
594 	print_tracking(s, p);
595 
596 	print_page_info(page);
597 
598 	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
599 			p, p - addr, get_freepointer(s, p));
600 
601 	if (p > addr + 16)
602 		print_section("Bytes b4 ", p - 16, 16);
603 
604 	print_section("Object ", p, min_t(unsigned long, s->objsize,
605 				PAGE_SIZE));
606 	if (s->flags & SLAB_RED_ZONE)
607 		print_section("Redzone ", p + s->objsize,
608 			s->inuse - s->objsize);
609 
610 	if (s->offset)
611 		off = s->offset + sizeof(void *);
612 	else
613 		off = s->inuse;
614 
615 	if (s->flags & SLAB_STORE_USER)
616 		off += 2 * sizeof(struct track);
617 
618 	if (off != s->size)
619 		/* Beginning of the filler is the free pointer */
620 		print_section("Padding ", p + off, s->size - off);
621 
622 	dump_stack();
623 }
624 
625 static void object_err(struct kmem_cache *s, struct page *page,
626 			u8 *object, char *reason)
627 {
628 	slab_bug(s, "%s", reason);
629 	print_trailer(s, page, object);
630 }
631 
632 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
633 {
634 	va_list args;
635 	char buf[100];
636 
637 	va_start(args, fmt);
638 	vsnprintf(buf, sizeof(buf), fmt, args);
639 	va_end(args);
640 	slab_bug(s, "%s", buf);
641 	print_page_info(page);
642 	dump_stack();
643 }
644 
645 static void init_object(struct kmem_cache *s, void *object, u8 val)
646 {
647 	u8 *p = object;
648 
649 	if (s->flags & __OBJECT_POISON) {
650 		memset(p, POISON_FREE, s->objsize - 1);
651 		p[s->objsize - 1] = POISON_END;
652 	}
653 
654 	if (s->flags & SLAB_RED_ZONE)
655 		memset(p + s->objsize, val, s->inuse - s->objsize);
656 }
657 
658 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
659 						void *from, void *to)
660 {
661 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
662 	memset(from, data, to - from);
663 }
664 
665 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
666 			u8 *object, char *what,
667 			u8 *start, unsigned int value, unsigned int bytes)
668 {
669 	u8 *fault;
670 	u8 *end;
671 
672 	fault = memchr_inv(start, value, bytes);
673 	if (!fault)
674 		return 1;
675 
676 	end = start + bytes;
677 	while (end > fault && end[-1] == value)
678 		end--;
679 
680 	slab_bug(s, "%s overwritten", what);
681 	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
682 					fault, end - 1, fault[0], value);
683 	print_trailer(s, page, object);
684 
685 	restore_bytes(s, what, value, fault, end);
686 	return 0;
687 }
688 
689 /*
690  * Object layout:
691  *
692  * object address
693  * 	Bytes of the object to be managed.
694  * 	If the freepointer may overlay the object then the free
695  * 	pointer is the first word of the object.
696  *
697  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
698  * 	0xa5 (POISON_END)
699  *
700  * object + s->objsize
701  * 	Padding to reach word boundary. This is also used for Redzoning.
702  * 	Padding is extended by another word if Redzoning is enabled and
703  * 	objsize == inuse.
704  *
705  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
706  * 	0xcc (RED_ACTIVE) for objects in use.
707  *
708  * object + s->inuse
709  * 	Meta data starts here.
710  *
711  * 	A. Free pointer (if we cannot overwrite object on free)
712  * 	B. Tracking data for SLAB_STORE_USER
713  * 	C. Padding to reach required alignment boundary or at mininum
714  * 		one word if debugging is on to be able to detect writes
715  * 		before the word boundary.
716  *
717  *	Padding is done using 0x5a (POISON_INUSE)
718  *
719  * object + s->size
720  * 	Nothing is used beyond s->size.
721  *
722  * If slabcaches are merged then the objsize and inuse boundaries are mostly
723  * ignored. And therefore no slab options that rely on these boundaries
724  * may be used with merged slabcaches.
725  */
726 
727 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
728 {
729 	unsigned long off = s->inuse;	/* The end of info */
730 
731 	if (s->offset)
732 		/* Freepointer is placed after the object. */
733 		off += sizeof(void *);
734 
735 	if (s->flags & SLAB_STORE_USER)
736 		/* We also have user information there */
737 		off += 2 * sizeof(struct track);
738 
739 	if (s->size == off)
740 		return 1;
741 
742 	return check_bytes_and_report(s, page, p, "Object padding",
743 				p + off, POISON_INUSE, s->size - off);
744 }
745 
746 /* Check the pad bytes at the end of a slab page */
747 static int slab_pad_check(struct kmem_cache *s, struct page *page)
748 {
749 	u8 *start;
750 	u8 *fault;
751 	u8 *end;
752 	int length;
753 	int remainder;
754 
755 	if (!(s->flags & SLAB_POISON))
756 		return 1;
757 
758 	start = page_address(page);
759 	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
760 	end = start + length;
761 	remainder = length % s->size;
762 	if (!remainder)
763 		return 1;
764 
765 	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
766 	if (!fault)
767 		return 1;
768 	while (end > fault && end[-1] == POISON_INUSE)
769 		end--;
770 
771 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
772 	print_section("Padding ", end - remainder, remainder);
773 
774 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
775 	return 0;
776 }
777 
778 static int check_object(struct kmem_cache *s, struct page *page,
779 					void *object, u8 val)
780 {
781 	u8 *p = object;
782 	u8 *endobject = object + s->objsize;
783 
784 	if (s->flags & SLAB_RED_ZONE) {
785 		if (!check_bytes_and_report(s, page, object, "Redzone",
786 			endobject, val, s->inuse - s->objsize))
787 			return 0;
788 	} else {
789 		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
790 			check_bytes_and_report(s, page, p, "Alignment padding",
791 				endobject, POISON_INUSE, s->inuse - s->objsize);
792 		}
793 	}
794 
795 	if (s->flags & SLAB_POISON) {
796 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
797 			(!check_bytes_and_report(s, page, p, "Poison", p,
798 					POISON_FREE, s->objsize - 1) ||
799 			 !check_bytes_and_report(s, page, p, "Poison",
800 				p + s->objsize - 1, POISON_END, 1)))
801 			return 0;
802 		/*
803 		 * check_pad_bytes cleans up on its own.
804 		 */
805 		check_pad_bytes(s, page, p);
806 	}
807 
808 	if (!s->offset && val == SLUB_RED_ACTIVE)
809 		/*
810 		 * Object and freepointer overlap. Cannot check
811 		 * freepointer while object is allocated.
812 		 */
813 		return 1;
814 
815 	/* Check free pointer validity */
816 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
817 		object_err(s, page, p, "Freepointer corrupt");
818 		/*
819 		 * No choice but to zap it and thus lose the remainder
820 		 * of the free objects in this slab. May cause
821 		 * another error because the object count is now wrong.
822 		 */
823 		set_freepointer(s, p, NULL);
824 		return 0;
825 	}
826 	return 1;
827 }
828 
829 static int check_slab(struct kmem_cache *s, struct page *page)
830 {
831 	int maxobj;
832 
833 	VM_BUG_ON(!irqs_disabled());
834 
835 	if (!PageSlab(page)) {
836 		slab_err(s, page, "Not a valid slab page");
837 		return 0;
838 	}
839 
840 	maxobj = order_objects(compound_order(page), s->size, s->reserved);
841 	if (page->objects > maxobj) {
842 		slab_err(s, page, "objects %u > max %u",
843 			s->name, page->objects, maxobj);
844 		return 0;
845 	}
846 	if (page->inuse > page->objects) {
847 		slab_err(s, page, "inuse %u > max %u",
848 			s->name, page->inuse, page->objects);
849 		return 0;
850 	}
851 	/* Slab_pad_check fixes things up after itself */
852 	slab_pad_check(s, page);
853 	return 1;
854 }
855 
856 /*
857  * Determine if a certain object on a page is on the freelist. Must hold the
858  * slab lock to guarantee that the chains are in a consistent state.
859  */
860 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
861 {
862 	int nr = 0;
863 	void *fp;
864 	void *object = NULL;
865 	unsigned long max_objects;
866 
867 	fp = page->freelist;
868 	while (fp && nr <= page->objects) {
869 		if (fp == search)
870 			return 1;
871 		if (!check_valid_pointer(s, page, fp)) {
872 			if (object) {
873 				object_err(s, page, object,
874 					"Freechain corrupt");
875 				set_freepointer(s, object, NULL);
876 				break;
877 			} else {
878 				slab_err(s, page, "Freepointer corrupt");
879 				page->freelist = NULL;
880 				page->inuse = page->objects;
881 				slab_fix(s, "Freelist cleared");
882 				return 0;
883 			}
884 			break;
885 		}
886 		object = fp;
887 		fp = get_freepointer(s, object);
888 		nr++;
889 	}
890 
891 	max_objects = order_objects(compound_order(page), s->size, s->reserved);
892 	if (max_objects > MAX_OBJS_PER_PAGE)
893 		max_objects = MAX_OBJS_PER_PAGE;
894 
895 	if (page->objects != max_objects) {
896 		slab_err(s, page, "Wrong number of objects. Found %d but "
897 			"should be %d", page->objects, max_objects);
898 		page->objects = max_objects;
899 		slab_fix(s, "Number of objects adjusted.");
900 	}
901 	if (page->inuse != page->objects - nr) {
902 		slab_err(s, page, "Wrong object count. Counter is %d but "
903 			"counted were %d", page->inuse, page->objects - nr);
904 		page->inuse = page->objects - nr;
905 		slab_fix(s, "Object count adjusted.");
906 	}
907 	return search == NULL;
908 }
909 
910 static void trace(struct kmem_cache *s, struct page *page, void *object,
911 								int alloc)
912 {
913 	if (s->flags & SLAB_TRACE) {
914 		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
915 			s->name,
916 			alloc ? "alloc" : "free",
917 			object, page->inuse,
918 			page->freelist);
919 
920 		if (!alloc)
921 			print_section("Object ", (void *)object, s->objsize);
922 
923 		dump_stack();
924 	}
925 }
926 
927 /*
928  * Hooks for other subsystems that check memory allocations. In a typical
929  * production configuration these hooks all should produce no code at all.
930  */
931 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
932 {
933 	flags &= gfp_allowed_mask;
934 	lockdep_trace_alloc(flags);
935 	might_sleep_if(flags & __GFP_WAIT);
936 
937 	return should_failslab(s->objsize, flags, s->flags);
938 }
939 
940 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
941 {
942 	flags &= gfp_allowed_mask;
943 	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
944 	kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
945 }
946 
947 static inline void slab_free_hook(struct kmem_cache *s, void *x)
948 {
949 	kmemleak_free_recursive(x, s->flags);
950 
951 	/*
952 	 * Trouble is that we may no longer disable interupts in the fast path
953 	 * So in order to make the debug calls that expect irqs to be
954 	 * disabled we need to disable interrupts temporarily.
955 	 */
956 #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
957 	{
958 		unsigned long flags;
959 
960 		local_irq_save(flags);
961 		kmemcheck_slab_free(s, x, s->objsize);
962 		debug_check_no_locks_freed(x, s->objsize);
963 		local_irq_restore(flags);
964 	}
965 #endif
966 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
967 		debug_check_no_obj_freed(x, s->objsize);
968 }
969 
970 /*
971  * Tracking of fully allocated slabs for debugging purposes.
972  *
973  * list_lock must be held.
974  */
975 static void add_full(struct kmem_cache *s,
976 	struct kmem_cache_node *n, struct page *page)
977 {
978 	if (!(s->flags & SLAB_STORE_USER))
979 		return;
980 
981 	list_add(&page->lru, &n->full);
982 }
983 
984 /*
985  * list_lock must be held.
986  */
987 static void remove_full(struct kmem_cache *s, struct page *page)
988 {
989 	if (!(s->flags & SLAB_STORE_USER))
990 		return;
991 
992 	list_del(&page->lru);
993 }
994 
995 /* Tracking of the number of slabs for debugging purposes */
996 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
997 {
998 	struct kmem_cache_node *n = get_node(s, node);
999 
1000 	return atomic_long_read(&n->nr_slabs);
1001 }
1002 
1003 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1004 {
1005 	return atomic_long_read(&n->nr_slabs);
1006 }
1007 
1008 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1009 {
1010 	struct kmem_cache_node *n = get_node(s, node);
1011 
1012 	/*
1013 	 * May be called early in order to allocate a slab for the
1014 	 * kmem_cache_node structure. Solve the chicken-egg
1015 	 * dilemma by deferring the increment of the count during
1016 	 * bootstrap (see early_kmem_cache_node_alloc).
1017 	 */
1018 	if (n) {
1019 		atomic_long_inc(&n->nr_slabs);
1020 		atomic_long_add(objects, &n->total_objects);
1021 	}
1022 }
1023 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1024 {
1025 	struct kmem_cache_node *n = get_node(s, node);
1026 
1027 	atomic_long_dec(&n->nr_slabs);
1028 	atomic_long_sub(objects, &n->total_objects);
1029 }
1030 
1031 /* Object debug checks for alloc/free paths */
1032 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1033 								void *object)
1034 {
1035 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1036 		return;
1037 
1038 	init_object(s, object, SLUB_RED_INACTIVE);
1039 	init_tracking(s, object);
1040 }
1041 
1042 static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
1043 					void *object, unsigned long addr)
1044 {
1045 	if (!check_slab(s, page))
1046 		goto bad;
1047 
1048 	if (!check_valid_pointer(s, page, object)) {
1049 		object_err(s, page, object, "Freelist Pointer check fails");
1050 		goto bad;
1051 	}
1052 
1053 	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1054 		goto bad;
1055 
1056 	/* Success perform special debug activities for allocs */
1057 	if (s->flags & SLAB_STORE_USER)
1058 		set_track(s, object, TRACK_ALLOC, addr);
1059 	trace(s, page, object, 1);
1060 	init_object(s, object, SLUB_RED_ACTIVE);
1061 	return 1;
1062 
1063 bad:
1064 	if (PageSlab(page)) {
1065 		/*
1066 		 * If this is a slab page then lets do the best we can
1067 		 * to avoid issues in the future. Marking all objects
1068 		 * as used avoids touching the remaining objects.
1069 		 */
1070 		slab_fix(s, "Marking all objects used");
1071 		page->inuse = page->objects;
1072 		page->freelist = NULL;
1073 	}
1074 	return 0;
1075 }
1076 
1077 static noinline int free_debug_processing(struct kmem_cache *s,
1078 		 struct page *page, void *object, unsigned long addr)
1079 {
1080 	unsigned long flags;
1081 	int rc = 0;
1082 
1083 	local_irq_save(flags);
1084 	slab_lock(page);
1085 
1086 	if (!check_slab(s, page))
1087 		goto fail;
1088 
1089 	if (!check_valid_pointer(s, page, object)) {
1090 		slab_err(s, page, "Invalid object pointer 0x%p", object);
1091 		goto fail;
1092 	}
1093 
1094 	if (on_freelist(s, page, object)) {
1095 		object_err(s, page, object, "Object already free");
1096 		goto fail;
1097 	}
1098 
1099 	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1100 		goto out;
1101 
1102 	if (unlikely(s != page->slab)) {
1103 		if (!PageSlab(page)) {
1104 			slab_err(s, page, "Attempt to free object(0x%p) "
1105 				"outside of slab", object);
1106 		} else if (!page->slab) {
1107 			printk(KERN_ERR
1108 				"SLUB <none>: no slab for object 0x%p.\n",
1109 						object);
1110 			dump_stack();
1111 		} else
1112 			object_err(s, page, object,
1113 					"page slab pointer corrupt.");
1114 		goto fail;
1115 	}
1116 
1117 	if (s->flags & SLAB_STORE_USER)
1118 		set_track(s, object, TRACK_FREE, addr);
1119 	trace(s, page, object, 0);
1120 	init_object(s, object, SLUB_RED_INACTIVE);
1121 	rc = 1;
1122 out:
1123 	slab_unlock(page);
1124 	local_irq_restore(flags);
1125 	return rc;
1126 
1127 fail:
1128 	slab_fix(s, "Object at 0x%p not freed", object);
1129 	goto out;
1130 }
1131 
1132 static int __init setup_slub_debug(char *str)
1133 {
1134 	slub_debug = DEBUG_DEFAULT_FLAGS;
1135 	if (*str++ != '=' || !*str)
1136 		/*
1137 		 * No options specified. Switch on full debugging.
1138 		 */
1139 		goto out;
1140 
1141 	if (*str == ',')
1142 		/*
1143 		 * No options but restriction on slabs. This means full
1144 		 * debugging for slabs matching a pattern.
1145 		 */
1146 		goto check_slabs;
1147 
1148 	if (tolower(*str) == 'o') {
1149 		/*
1150 		 * Avoid enabling debugging on caches if its minimum order
1151 		 * would increase as a result.
1152 		 */
1153 		disable_higher_order_debug = 1;
1154 		goto out;
1155 	}
1156 
1157 	slub_debug = 0;
1158 	if (*str == '-')
1159 		/*
1160 		 * Switch off all debugging measures.
1161 		 */
1162 		goto out;
1163 
1164 	/*
1165 	 * Determine which debug features should be switched on
1166 	 */
1167 	for (; *str && *str != ','; str++) {
1168 		switch (tolower(*str)) {
1169 		case 'f':
1170 			slub_debug |= SLAB_DEBUG_FREE;
1171 			break;
1172 		case 'z':
1173 			slub_debug |= SLAB_RED_ZONE;
1174 			break;
1175 		case 'p':
1176 			slub_debug |= SLAB_POISON;
1177 			break;
1178 		case 'u':
1179 			slub_debug |= SLAB_STORE_USER;
1180 			break;
1181 		case 't':
1182 			slub_debug |= SLAB_TRACE;
1183 			break;
1184 		case 'a':
1185 			slub_debug |= SLAB_FAILSLAB;
1186 			break;
1187 		default:
1188 			printk(KERN_ERR "slub_debug option '%c' "
1189 				"unknown. skipped\n", *str);
1190 		}
1191 	}
1192 
1193 check_slabs:
1194 	if (*str == ',')
1195 		slub_debug_slabs = str + 1;
1196 out:
1197 	return 1;
1198 }
1199 
1200 __setup("slub_debug", setup_slub_debug);
1201 
1202 static unsigned long kmem_cache_flags(unsigned long objsize,
1203 	unsigned long flags, const char *name,
1204 	void (*ctor)(void *))
1205 {
1206 	/*
1207 	 * Enable debugging if selected on the kernel commandline.
1208 	 */
1209 	if (slub_debug && (!slub_debug_slabs ||
1210 		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1211 		flags |= slub_debug;
1212 
1213 	return flags;
1214 }
1215 #else
1216 static inline void setup_object_debug(struct kmem_cache *s,
1217 			struct page *page, void *object) {}
1218 
1219 static inline int alloc_debug_processing(struct kmem_cache *s,
1220 	struct page *page, void *object, unsigned long addr) { return 0; }
1221 
1222 static inline int free_debug_processing(struct kmem_cache *s,
1223 	struct page *page, void *object, unsigned long addr) { return 0; }
1224 
1225 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1226 			{ return 1; }
1227 static inline int check_object(struct kmem_cache *s, struct page *page,
1228 			void *object, u8 val) { return 1; }
1229 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1230 					struct page *page) {}
1231 static inline void remove_full(struct kmem_cache *s, struct page *page) {}
1232 static inline unsigned long kmem_cache_flags(unsigned long objsize,
1233 	unsigned long flags, const char *name,
1234 	void (*ctor)(void *))
1235 {
1236 	return flags;
1237 }
1238 #define slub_debug 0
1239 
1240 #define disable_higher_order_debug 0
1241 
1242 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1243 							{ return 0; }
1244 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1245 							{ return 0; }
1246 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1247 							int objects) {}
1248 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1249 							int objects) {}
1250 
1251 static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1252 							{ return 0; }
1253 
1254 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1255 		void *object) {}
1256 
1257 static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1258 
1259 #endif /* CONFIG_SLUB_DEBUG */
1260 
1261 /*
1262  * Slab allocation and freeing
1263  */
1264 static inline struct page *alloc_slab_page(gfp_t flags, int node,
1265 					struct kmem_cache_order_objects oo)
1266 {
1267 	int order = oo_order(oo);
1268 
1269 	flags |= __GFP_NOTRACK;
1270 
1271 	if (node == NUMA_NO_NODE)
1272 		return alloc_pages(flags, order);
1273 	else
1274 		return alloc_pages_exact_node(node, flags, order);
1275 }
1276 
1277 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1278 {
1279 	struct page *page;
1280 	struct kmem_cache_order_objects oo = s->oo;
1281 	gfp_t alloc_gfp;
1282 
1283 	flags &= gfp_allowed_mask;
1284 
1285 	if (flags & __GFP_WAIT)
1286 		local_irq_enable();
1287 
1288 	flags |= s->allocflags;
1289 
1290 	/*
1291 	 * Let the initial higher-order allocation fail under memory pressure
1292 	 * so we fall-back to the minimum order allocation.
1293 	 */
1294 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1295 
1296 	page = alloc_slab_page(alloc_gfp, node, oo);
1297 	if (unlikely(!page)) {
1298 		oo = s->min;
1299 		/*
1300 		 * Allocation may have failed due to fragmentation.
1301 		 * Try a lower order alloc if possible
1302 		 */
1303 		page = alloc_slab_page(flags, node, oo);
1304 
1305 		if (page)
1306 			stat(s, ORDER_FALLBACK);
1307 	}
1308 
1309 	if (flags & __GFP_WAIT)
1310 		local_irq_disable();
1311 
1312 	if (!page)
1313 		return NULL;
1314 
1315 	if (kmemcheck_enabled
1316 		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1317 		int pages = 1 << oo_order(oo);
1318 
1319 		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1320 
1321 		/*
1322 		 * Objects from caches that have a constructor don't get
1323 		 * cleared when they're allocated, so we need to do it here.
1324 		 */
1325 		if (s->ctor)
1326 			kmemcheck_mark_uninitialized_pages(page, pages);
1327 		else
1328 			kmemcheck_mark_unallocated_pages(page, pages);
1329 	}
1330 
1331 	page->objects = oo_objects(oo);
1332 	mod_zone_page_state(page_zone(page),
1333 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1334 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1335 		1 << oo_order(oo));
1336 
1337 	return page;
1338 }
1339 
1340 static void setup_object(struct kmem_cache *s, struct page *page,
1341 				void *object)
1342 {
1343 	setup_object_debug(s, page, object);
1344 	if (unlikely(s->ctor))
1345 		s->ctor(object);
1346 }
1347 
1348 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1349 {
1350 	struct page *page;
1351 	void *start;
1352 	void *last;
1353 	void *p;
1354 
1355 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1356 
1357 	page = allocate_slab(s,
1358 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1359 	if (!page)
1360 		goto out;
1361 
1362 	inc_slabs_node(s, page_to_nid(page), page->objects);
1363 	page->slab = s;
1364 	page->flags |= 1 << PG_slab;
1365 
1366 	start = page_address(page);
1367 
1368 	if (unlikely(s->flags & SLAB_POISON))
1369 		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
1370 
1371 	last = start;
1372 	for_each_object(p, s, start, page->objects) {
1373 		setup_object(s, page, last);
1374 		set_freepointer(s, last, p);
1375 		last = p;
1376 	}
1377 	setup_object(s, page, last);
1378 	set_freepointer(s, last, NULL);
1379 
1380 	page->freelist = start;
1381 	page->inuse = page->objects;
1382 	page->frozen = 1;
1383 out:
1384 	return page;
1385 }
1386 
1387 static void __free_slab(struct kmem_cache *s, struct page *page)
1388 {
1389 	int order = compound_order(page);
1390 	int pages = 1 << order;
1391 
1392 	if (kmem_cache_debug(s)) {
1393 		void *p;
1394 
1395 		slab_pad_check(s, page);
1396 		for_each_object(p, s, page_address(page),
1397 						page->objects)
1398 			check_object(s, page, p, SLUB_RED_INACTIVE);
1399 	}
1400 
1401 	kmemcheck_free_shadow(page, compound_order(page));
1402 
1403 	mod_zone_page_state(page_zone(page),
1404 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1405 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1406 		-pages);
1407 
1408 	__ClearPageSlab(page);
1409 	reset_page_mapcount(page);
1410 	if (current->reclaim_state)
1411 		current->reclaim_state->reclaimed_slab += pages;
1412 	__free_pages(page, order);
1413 }
1414 
1415 #define need_reserve_slab_rcu						\
1416 	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1417 
1418 static void rcu_free_slab(struct rcu_head *h)
1419 {
1420 	struct page *page;
1421 
1422 	if (need_reserve_slab_rcu)
1423 		page = virt_to_head_page(h);
1424 	else
1425 		page = container_of((struct list_head *)h, struct page, lru);
1426 
1427 	__free_slab(page->slab, page);
1428 }
1429 
1430 static void free_slab(struct kmem_cache *s, struct page *page)
1431 {
1432 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1433 		struct rcu_head *head;
1434 
1435 		if (need_reserve_slab_rcu) {
1436 			int order = compound_order(page);
1437 			int offset = (PAGE_SIZE << order) - s->reserved;
1438 
1439 			VM_BUG_ON(s->reserved != sizeof(*head));
1440 			head = page_address(page) + offset;
1441 		} else {
1442 			/*
1443 			 * RCU free overloads the RCU head over the LRU
1444 			 */
1445 			head = (void *)&page->lru;
1446 		}
1447 
1448 		call_rcu(head, rcu_free_slab);
1449 	} else
1450 		__free_slab(s, page);
1451 }
1452 
1453 static void discard_slab(struct kmem_cache *s, struct page *page)
1454 {
1455 	dec_slabs_node(s, page_to_nid(page), page->objects);
1456 	free_slab(s, page);
1457 }
1458 
1459 /*
1460  * Management of partially allocated slabs.
1461  *
1462  * list_lock must be held.
1463  */
1464 static inline void add_partial(struct kmem_cache_node *n,
1465 				struct page *page, int tail)
1466 {
1467 	n->nr_partial++;
1468 	if (tail == DEACTIVATE_TO_TAIL)
1469 		list_add_tail(&page->lru, &n->partial);
1470 	else
1471 		list_add(&page->lru, &n->partial);
1472 }
1473 
1474 /*
1475  * list_lock must be held.
1476  */
1477 static inline void remove_partial(struct kmem_cache_node *n,
1478 					struct page *page)
1479 {
1480 	list_del(&page->lru);
1481 	n->nr_partial--;
1482 }
1483 
1484 /*
1485  * Lock slab, remove from the partial list and put the object into the
1486  * per cpu freelist.
1487  *
1488  * Returns a list of objects or NULL if it fails.
1489  *
1490  * Must hold list_lock.
1491  */
1492 static inline void *acquire_slab(struct kmem_cache *s,
1493 		struct kmem_cache_node *n, struct page *page,
1494 		int mode)
1495 {
1496 	void *freelist;
1497 	unsigned long counters;
1498 	struct page new;
1499 
1500 	/*
1501 	 * Zap the freelist and set the frozen bit.
1502 	 * The old freelist is the list of objects for the
1503 	 * per cpu allocation list.
1504 	 */
1505 	do {
1506 		freelist = page->freelist;
1507 		counters = page->counters;
1508 		new.counters = counters;
1509 		if (mode)
1510 			new.inuse = page->objects;
1511 
1512 		VM_BUG_ON(new.frozen);
1513 		new.frozen = 1;
1514 
1515 	} while (!__cmpxchg_double_slab(s, page,
1516 			freelist, counters,
1517 			NULL, new.counters,
1518 			"lock and freeze"));
1519 
1520 	remove_partial(n, page);
1521 	return freelist;
1522 }
1523 
1524 static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1525 
1526 /*
1527  * Try to allocate a partial slab from a specific node.
1528  */
1529 static void *get_partial_node(struct kmem_cache *s,
1530 		struct kmem_cache_node *n, struct kmem_cache_cpu *c)
1531 {
1532 	struct page *page, *page2;
1533 	void *object = NULL;
1534 
1535 	/*
1536 	 * Racy check. If we mistakenly see no partial slabs then we
1537 	 * just allocate an empty slab. If we mistakenly try to get a
1538 	 * partial slab and there is none available then get_partials()
1539 	 * will return NULL.
1540 	 */
1541 	if (!n || !n->nr_partial)
1542 		return NULL;
1543 
1544 	spin_lock(&n->list_lock);
1545 	list_for_each_entry_safe(page, page2, &n->partial, lru) {
1546 		void *t = acquire_slab(s, n, page, object == NULL);
1547 		int available;
1548 
1549 		if (!t)
1550 			break;
1551 
1552 		if (!object) {
1553 			c->page = page;
1554 			c->node = page_to_nid(page);
1555 			stat(s, ALLOC_FROM_PARTIAL);
1556 			object = t;
1557 			available =  page->objects - page->inuse;
1558 		} else {
1559 			page->freelist = t;
1560 			available = put_cpu_partial(s, page, 0);
1561 		}
1562 		if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
1563 			break;
1564 
1565 	}
1566 	spin_unlock(&n->list_lock);
1567 	return object;
1568 }
1569 
1570 /*
1571  * Get a page from somewhere. Search in increasing NUMA distances.
1572  */
1573 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
1574 		struct kmem_cache_cpu *c)
1575 {
1576 #ifdef CONFIG_NUMA
1577 	struct zonelist *zonelist;
1578 	struct zoneref *z;
1579 	struct zone *zone;
1580 	enum zone_type high_zoneidx = gfp_zone(flags);
1581 	void *object;
1582 
1583 	/*
1584 	 * The defrag ratio allows a configuration of the tradeoffs between
1585 	 * inter node defragmentation and node local allocations. A lower
1586 	 * defrag_ratio increases the tendency to do local allocations
1587 	 * instead of attempting to obtain partial slabs from other nodes.
1588 	 *
1589 	 * If the defrag_ratio is set to 0 then kmalloc() always
1590 	 * returns node local objects. If the ratio is higher then kmalloc()
1591 	 * may return off node objects because partial slabs are obtained
1592 	 * from other nodes and filled up.
1593 	 *
1594 	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1595 	 * defrag_ratio = 1000) then every (well almost) allocation will
1596 	 * first attempt to defrag slab caches on other nodes. This means
1597 	 * scanning over all nodes to look for partial slabs which may be
1598 	 * expensive if we do it every time we are trying to find a slab
1599 	 * with available objects.
1600 	 */
1601 	if (!s->remote_node_defrag_ratio ||
1602 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1603 		return NULL;
1604 
1605 	get_mems_allowed();
1606 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1607 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1608 		struct kmem_cache_node *n;
1609 
1610 		n = get_node(s, zone_to_nid(zone));
1611 
1612 		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1613 				n->nr_partial > s->min_partial) {
1614 			object = get_partial_node(s, n, c);
1615 			if (object) {
1616 				put_mems_allowed();
1617 				return object;
1618 			}
1619 		}
1620 	}
1621 	put_mems_allowed();
1622 #endif
1623 	return NULL;
1624 }
1625 
1626 /*
1627  * Get a partial page, lock it and return it.
1628  */
1629 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1630 		struct kmem_cache_cpu *c)
1631 {
1632 	void *object;
1633 	int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1634 
1635 	object = get_partial_node(s, get_node(s, searchnode), c);
1636 	if (object || node != NUMA_NO_NODE)
1637 		return object;
1638 
1639 	return get_any_partial(s, flags, c);
1640 }
1641 
1642 #ifdef CONFIG_PREEMPT
1643 /*
1644  * Calculate the next globally unique transaction for disambiguiation
1645  * during cmpxchg. The transactions start with the cpu number and are then
1646  * incremented by CONFIG_NR_CPUS.
1647  */
1648 #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1649 #else
1650 /*
1651  * No preemption supported therefore also no need to check for
1652  * different cpus.
1653  */
1654 #define TID_STEP 1
1655 #endif
1656 
1657 static inline unsigned long next_tid(unsigned long tid)
1658 {
1659 	return tid + TID_STEP;
1660 }
1661 
1662 static inline unsigned int tid_to_cpu(unsigned long tid)
1663 {
1664 	return tid % TID_STEP;
1665 }
1666 
1667 static inline unsigned long tid_to_event(unsigned long tid)
1668 {
1669 	return tid / TID_STEP;
1670 }
1671 
1672 static inline unsigned int init_tid(int cpu)
1673 {
1674 	return cpu;
1675 }
1676 
1677 static inline void note_cmpxchg_failure(const char *n,
1678 		const struct kmem_cache *s, unsigned long tid)
1679 {
1680 #ifdef SLUB_DEBUG_CMPXCHG
1681 	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1682 
1683 	printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1684 
1685 #ifdef CONFIG_PREEMPT
1686 	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1687 		printk("due to cpu change %d -> %d\n",
1688 			tid_to_cpu(tid), tid_to_cpu(actual_tid));
1689 	else
1690 #endif
1691 	if (tid_to_event(tid) != tid_to_event(actual_tid))
1692 		printk("due to cpu running other code. Event %ld->%ld\n",
1693 			tid_to_event(tid), tid_to_event(actual_tid));
1694 	else
1695 		printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1696 			actual_tid, tid, next_tid(tid));
1697 #endif
1698 	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1699 }
1700 
1701 void init_kmem_cache_cpus(struct kmem_cache *s)
1702 {
1703 	int cpu;
1704 
1705 	for_each_possible_cpu(cpu)
1706 		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1707 }
1708 
1709 /*
1710  * Remove the cpu slab
1711  */
1712 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1713 {
1714 	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1715 	struct page *page = c->page;
1716 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1717 	int lock = 0;
1718 	enum slab_modes l = M_NONE, m = M_NONE;
1719 	void *freelist;
1720 	void *nextfree;
1721 	int tail = DEACTIVATE_TO_HEAD;
1722 	struct page new;
1723 	struct page old;
1724 
1725 	if (page->freelist) {
1726 		stat(s, DEACTIVATE_REMOTE_FREES);
1727 		tail = DEACTIVATE_TO_TAIL;
1728 	}
1729 
1730 	c->tid = next_tid(c->tid);
1731 	c->page = NULL;
1732 	freelist = c->freelist;
1733 	c->freelist = NULL;
1734 
1735 	/*
1736 	 * Stage one: Free all available per cpu objects back
1737 	 * to the page freelist while it is still frozen. Leave the
1738 	 * last one.
1739 	 *
1740 	 * There is no need to take the list->lock because the page
1741 	 * is still frozen.
1742 	 */
1743 	while (freelist && (nextfree = get_freepointer(s, freelist))) {
1744 		void *prior;
1745 		unsigned long counters;
1746 
1747 		do {
1748 			prior = page->freelist;
1749 			counters = page->counters;
1750 			set_freepointer(s, freelist, prior);
1751 			new.counters = counters;
1752 			new.inuse--;
1753 			VM_BUG_ON(!new.frozen);
1754 
1755 		} while (!__cmpxchg_double_slab(s, page,
1756 			prior, counters,
1757 			freelist, new.counters,
1758 			"drain percpu freelist"));
1759 
1760 		freelist = nextfree;
1761 	}
1762 
1763 	/*
1764 	 * Stage two: Ensure that the page is unfrozen while the
1765 	 * list presence reflects the actual number of objects
1766 	 * during unfreeze.
1767 	 *
1768 	 * We setup the list membership and then perform a cmpxchg
1769 	 * with the count. If there is a mismatch then the page
1770 	 * is not unfrozen but the page is on the wrong list.
1771 	 *
1772 	 * Then we restart the process which may have to remove
1773 	 * the page from the list that we just put it on again
1774 	 * because the number of objects in the slab may have
1775 	 * changed.
1776 	 */
1777 redo:
1778 
1779 	old.freelist = page->freelist;
1780 	old.counters = page->counters;
1781 	VM_BUG_ON(!old.frozen);
1782 
1783 	/* Determine target state of the slab */
1784 	new.counters = old.counters;
1785 	if (freelist) {
1786 		new.inuse--;
1787 		set_freepointer(s, freelist, old.freelist);
1788 		new.freelist = freelist;
1789 	} else
1790 		new.freelist = old.freelist;
1791 
1792 	new.frozen = 0;
1793 
1794 	if (!new.inuse && n->nr_partial > s->min_partial)
1795 		m = M_FREE;
1796 	else if (new.freelist) {
1797 		m = M_PARTIAL;
1798 		if (!lock) {
1799 			lock = 1;
1800 			/*
1801 			 * Taking the spinlock removes the possiblity
1802 			 * that acquire_slab() will see a slab page that
1803 			 * is frozen
1804 			 */
1805 			spin_lock(&n->list_lock);
1806 		}
1807 	} else {
1808 		m = M_FULL;
1809 		if (kmem_cache_debug(s) && !lock) {
1810 			lock = 1;
1811 			/*
1812 			 * This also ensures that the scanning of full
1813 			 * slabs from diagnostic functions will not see
1814 			 * any frozen slabs.
1815 			 */
1816 			spin_lock(&n->list_lock);
1817 		}
1818 	}
1819 
1820 	if (l != m) {
1821 
1822 		if (l == M_PARTIAL)
1823 
1824 			remove_partial(n, page);
1825 
1826 		else if (l == M_FULL)
1827 
1828 			remove_full(s, page);
1829 
1830 		if (m == M_PARTIAL) {
1831 
1832 			add_partial(n, page, tail);
1833 			stat(s, tail);
1834 
1835 		} else if (m == M_FULL) {
1836 
1837 			stat(s, DEACTIVATE_FULL);
1838 			add_full(s, n, page);
1839 
1840 		}
1841 	}
1842 
1843 	l = m;
1844 	if (!__cmpxchg_double_slab(s, page,
1845 				old.freelist, old.counters,
1846 				new.freelist, new.counters,
1847 				"unfreezing slab"))
1848 		goto redo;
1849 
1850 	if (lock)
1851 		spin_unlock(&n->list_lock);
1852 
1853 	if (m == M_FREE) {
1854 		stat(s, DEACTIVATE_EMPTY);
1855 		discard_slab(s, page);
1856 		stat(s, FREE_SLAB);
1857 	}
1858 }
1859 
1860 /* Unfreeze all the cpu partial slabs */
1861 static void unfreeze_partials(struct kmem_cache *s)
1862 {
1863 	struct kmem_cache_node *n = NULL;
1864 	struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
1865 	struct page *page, *discard_page = NULL;
1866 
1867 	while ((page = c->partial)) {
1868 		enum slab_modes { M_PARTIAL, M_FREE };
1869 		enum slab_modes l, m;
1870 		struct page new;
1871 		struct page old;
1872 
1873 		c->partial = page->next;
1874 		l = M_FREE;
1875 
1876 		do {
1877 
1878 			old.freelist = page->freelist;
1879 			old.counters = page->counters;
1880 			VM_BUG_ON(!old.frozen);
1881 
1882 			new.counters = old.counters;
1883 			new.freelist = old.freelist;
1884 
1885 			new.frozen = 0;
1886 
1887 			if (!new.inuse && (!n || n->nr_partial > s->min_partial))
1888 				m = M_FREE;
1889 			else {
1890 				struct kmem_cache_node *n2 = get_node(s,
1891 							page_to_nid(page));
1892 
1893 				m = M_PARTIAL;
1894 				if (n != n2) {
1895 					if (n)
1896 						spin_unlock(&n->list_lock);
1897 
1898 					n = n2;
1899 					spin_lock(&n->list_lock);
1900 				}
1901 			}
1902 
1903 			if (l != m) {
1904 				if (l == M_PARTIAL)
1905 					remove_partial(n, page);
1906 				else
1907 					add_partial(n, page,
1908 						DEACTIVATE_TO_TAIL);
1909 
1910 				l = m;
1911 			}
1912 
1913 		} while (!cmpxchg_double_slab(s, page,
1914 				old.freelist, old.counters,
1915 				new.freelist, new.counters,
1916 				"unfreezing slab"));
1917 
1918 		if (m == M_FREE) {
1919 			page->next = discard_page;
1920 			discard_page = page;
1921 		}
1922 	}
1923 
1924 	if (n)
1925 		spin_unlock(&n->list_lock);
1926 
1927 	while (discard_page) {
1928 		page = discard_page;
1929 		discard_page = discard_page->next;
1930 
1931 		stat(s, DEACTIVATE_EMPTY);
1932 		discard_slab(s, page);
1933 		stat(s, FREE_SLAB);
1934 	}
1935 }
1936 
1937 /*
1938  * Put a page that was just frozen (in __slab_free) into a partial page
1939  * slot if available. This is done without interrupts disabled and without
1940  * preemption disabled. The cmpxchg is racy and may put the partial page
1941  * onto a random cpus partial slot.
1942  *
1943  * If we did not find a slot then simply move all the partials to the
1944  * per node partial list.
1945  */
1946 int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1947 {
1948 	struct page *oldpage;
1949 	int pages;
1950 	int pobjects;
1951 
1952 	do {
1953 		pages = 0;
1954 		pobjects = 0;
1955 		oldpage = this_cpu_read(s->cpu_slab->partial);
1956 
1957 		if (oldpage) {
1958 			pobjects = oldpage->pobjects;
1959 			pages = oldpage->pages;
1960 			if (drain && pobjects > s->cpu_partial) {
1961 				unsigned long flags;
1962 				/*
1963 				 * partial array is full. Move the existing
1964 				 * set to the per node partial list.
1965 				 */
1966 				local_irq_save(flags);
1967 				unfreeze_partials(s);
1968 				local_irq_restore(flags);
1969 				pobjects = 0;
1970 				pages = 0;
1971 			}
1972 		}
1973 
1974 		pages++;
1975 		pobjects += page->objects - page->inuse;
1976 
1977 		page->pages = pages;
1978 		page->pobjects = pobjects;
1979 		page->next = oldpage;
1980 
1981 	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
1982 	stat(s, CPU_PARTIAL_FREE);
1983 	return pobjects;
1984 }
1985 
1986 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1987 {
1988 	stat(s, CPUSLAB_FLUSH);
1989 	deactivate_slab(s, c);
1990 }
1991 
1992 /*
1993  * Flush cpu slab.
1994  *
1995  * Called from IPI handler with interrupts disabled.
1996  */
1997 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1998 {
1999 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2000 
2001 	if (likely(c)) {
2002 		if (c->page)
2003 			flush_slab(s, c);
2004 
2005 		unfreeze_partials(s);
2006 	}
2007 }
2008 
2009 static void flush_cpu_slab(void *d)
2010 {
2011 	struct kmem_cache *s = d;
2012 
2013 	__flush_cpu_slab(s, smp_processor_id());
2014 }
2015 
2016 static void flush_all(struct kmem_cache *s)
2017 {
2018 	on_each_cpu(flush_cpu_slab, s, 1);
2019 }
2020 
2021 /*
2022  * Check if the objects in a per cpu structure fit numa
2023  * locality expectations.
2024  */
2025 static inline int node_match(struct kmem_cache_cpu *c, int node)
2026 {
2027 #ifdef CONFIG_NUMA
2028 	if (node != NUMA_NO_NODE && c->node != node)
2029 		return 0;
2030 #endif
2031 	return 1;
2032 }
2033 
2034 static int count_free(struct page *page)
2035 {
2036 	return page->objects - page->inuse;
2037 }
2038 
2039 static unsigned long count_partial(struct kmem_cache_node *n,
2040 					int (*get_count)(struct page *))
2041 {
2042 	unsigned long flags;
2043 	unsigned long x = 0;
2044 	struct page *page;
2045 
2046 	spin_lock_irqsave(&n->list_lock, flags);
2047 	list_for_each_entry(page, &n->partial, lru)
2048 		x += get_count(page);
2049 	spin_unlock_irqrestore(&n->list_lock, flags);
2050 	return x;
2051 }
2052 
2053 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2054 {
2055 #ifdef CONFIG_SLUB_DEBUG
2056 	return atomic_long_read(&n->total_objects);
2057 #else
2058 	return 0;
2059 #endif
2060 }
2061 
2062 static noinline void
2063 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2064 {
2065 	int node;
2066 
2067 	printk(KERN_WARNING
2068 		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2069 		nid, gfpflags);
2070 	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
2071 		"default order: %d, min order: %d\n", s->name, s->objsize,
2072 		s->size, oo_order(s->oo), oo_order(s->min));
2073 
2074 	if (oo_order(s->min) > get_order(s->objsize))
2075 		printk(KERN_WARNING "  %s debugging increased min order, use "
2076 		       "slub_debug=O to disable.\n", s->name);
2077 
2078 	for_each_online_node(node) {
2079 		struct kmem_cache_node *n = get_node(s, node);
2080 		unsigned long nr_slabs;
2081 		unsigned long nr_objs;
2082 		unsigned long nr_free;
2083 
2084 		if (!n)
2085 			continue;
2086 
2087 		nr_free  = count_partial(n, count_free);
2088 		nr_slabs = node_nr_slabs(n);
2089 		nr_objs  = node_nr_objs(n);
2090 
2091 		printk(KERN_WARNING
2092 			"  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2093 			node, nr_slabs, nr_objs, nr_free);
2094 	}
2095 }
2096 
2097 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2098 			int node, struct kmem_cache_cpu **pc)
2099 {
2100 	void *object;
2101 	struct kmem_cache_cpu *c;
2102 	struct page *page = new_slab(s, flags, node);
2103 
2104 	if (page) {
2105 		c = __this_cpu_ptr(s->cpu_slab);
2106 		if (c->page)
2107 			flush_slab(s, c);
2108 
2109 		/*
2110 		 * No other reference to the page yet so we can
2111 		 * muck around with it freely without cmpxchg
2112 		 */
2113 		object = page->freelist;
2114 		page->freelist = NULL;
2115 
2116 		stat(s, ALLOC_SLAB);
2117 		c->node = page_to_nid(page);
2118 		c->page = page;
2119 		*pc = c;
2120 	} else
2121 		object = NULL;
2122 
2123 	return object;
2124 }
2125 
2126 /*
2127  * Slow path. The lockless freelist is empty or we need to perform
2128  * debugging duties.
2129  *
2130  * Processing is still very fast if new objects have been freed to the
2131  * regular freelist. In that case we simply take over the regular freelist
2132  * as the lockless freelist and zap the regular freelist.
2133  *
2134  * If that is not working then we fall back to the partial lists. We take the
2135  * first element of the freelist as the object to allocate now and move the
2136  * rest of the freelist to the lockless freelist.
2137  *
2138  * And if we were unable to get a new slab from the partial slab lists then
2139  * we need to allocate a new slab. This is the slowest path since it involves
2140  * a call to the page allocator and the setup of a new slab.
2141  */
2142 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2143 			  unsigned long addr, struct kmem_cache_cpu *c)
2144 {
2145 	void **object;
2146 	unsigned long flags;
2147 	struct page new;
2148 	unsigned long counters;
2149 
2150 	local_irq_save(flags);
2151 #ifdef CONFIG_PREEMPT
2152 	/*
2153 	 * We may have been preempted and rescheduled on a different
2154 	 * cpu before disabling interrupts. Need to reload cpu area
2155 	 * pointer.
2156 	 */
2157 	c = this_cpu_ptr(s->cpu_slab);
2158 #endif
2159 
2160 	if (!c->page)
2161 		goto new_slab;
2162 redo:
2163 	if (unlikely(!node_match(c, node))) {
2164 		stat(s, ALLOC_NODE_MISMATCH);
2165 		deactivate_slab(s, c);
2166 		goto new_slab;
2167 	}
2168 
2169 	stat(s, ALLOC_SLOWPATH);
2170 
2171 	do {
2172 		object = c->page->freelist;
2173 		counters = c->page->counters;
2174 		new.counters = counters;
2175 		VM_BUG_ON(!new.frozen);
2176 
2177 		/*
2178 		 * If there is no object left then we use this loop to
2179 		 * deactivate the slab which is simple since no objects
2180 		 * are left in the slab and therefore we do not need to
2181 		 * put the page back onto the partial list.
2182 		 *
2183 		 * If there are objects left then we retrieve them
2184 		 * and use them to refill the per cpu queue.
2185 		 */
2186 
2187 		new.inuse = c->page->objects;
2188 		new.frozen = object != NULL;
2189 
2190 	} while (!__cmpxchg_double_slab(s, c->page,
2191 			object, counters,
2192 			NULL, new.counters,
2193 			"__slab_alloc"));
2194 
2195 	if (!object) {
2196 		c->page = NULL;
2197 		stat(s, DEACTIVATE_BYPASS);
2198 		goto new_slab;
2199 	}
2200 
2201 	stat(s, ALLOC_REFILL);
2202 
2203 load_freelist:
2204 	c->freelist = get_freepointer(s, object);
2205 	c->tid = next_tid(c->tid);
2206 	local_irq_restore(flags);
2207 	return object;
2208 
2209 new_slab:
2210 
2211 	if (c->partial) {
2212 		c->page = c->partial;
2213 		c->partial = c->page->next;
2214 		c->node = page_to_nid(c->page);
2215 		stat(s, CPU_PARTIAL_ALLOC);
2216 		c->freelist = NULL;
2217 		goto redo;
2218 	}
2219 
2220 	/* Then do expensive stuff like retrieving pages from the partial lists */
2221 	object = get_partial(s, gfpflags, node, c);
2222 
2223 	if (unlikely(!object)) {
2224 
2225 		object = new_slab_objects(s, gfpflags, node, &c);
2226 
2227 		if (unlikely(!object)) {
2228 			if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2229 				slab_out_of_memory(s, gfpflags, node);
2230 
2231 			local_irq_restore(flags);
2232 			return NULL;
2233 		}
2234 	}
2235 
2236 	if (likely(!kmem_cache_debug(s)))
2237 		goto load_freelist;
2238 
2239 	/* Only entered in the debug case */
2240 	if (!alloc_debug_processing(s, c->page, object, addr))
2241 		goto new_slab;	/* Slab failed checks. Next slab needed */
2242 
2243 	c->freelist = get_freepointer(s, object);
2244 	deactivate_slab(s, c);
2245 	c->node = NUMA_NO_NODE;
2246 	local_irq_restore(flags);
2247 	return object;
2248 }
2249 
2250 /*
2251  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2252  * have the fastpath folded into their functions. So no function call
2253  * overhead for requests that can be satisfied on the fastpath.
2254  *
2255  * The fastpath works by first checking if the lockless freelist can be used.
2256  * If not then __slab_alloc is called for slow processing.
2257  *
2258  * Otherwise we can simply pick the next object from the lockless free list.
2259  */
2260 static __always_inline void *slab_alloc(struct kmem_cache *s,
2261 		gfp_t gfpflags, int node, unsigned long addr)
2262 {
2263 	void **object;
2264 	struct kmem_cache_cpu *c;
2265 	unsigned long tid;
2266 
2267 	if (slab_pre_alloc_hook(s, gfpflags))
2268 		return NULL;
2269 
2270 redo:
2271 
2272 	/*
2273 	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2274 	 * enabled. We may switch back and forth between cpus while
2275 	 * reading from one cpu area. That does not matter as long
2276 	 * as we end up on the original cpu again when doing the cmpxchg.
2277 	 */
2278 	c = __this_cpu_ptr(s->cpu_slab);
2279 
2280 	/*
2281 	 * The transaction ids are globally unique per cpu and per operation on
2282 	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2283 	 * occurs on the right processor and that there was no operation on the
2284 	 * linked list in between.
2285 	 */
2286 	tid = c->tid;
2287 	barrier();
2288 
2289 	object = c->freelist;
2290 	if (unlikely(!object || !node_match(c, node)))
2291 
2292 		object = __slab_alloc(s, gfpflags, node, addr, c);
2293 
2294 	else {
2295 		/*
2296 		 * The cmpxchg will only match if there was no additional
2297 		 * operation and if we are on the right processor.
2298 		 *
2299 		 * The cmpxchg does the following atomically (without lock semantics!)
2300 		 * 1. Relocate first pointer to the current per cpu area.
2301 		 * 2. Verify that tid and freelist have not been changed
2302 		 * 3. If they were not changed replace tid and freelist
2303 		 *
2304 		 * Since this is without lock semantics the protection is only against
2305 		 * code executing on this cpu *not* from access by other cpus.
2306 		 */
2307 		if (unlikely(!this_cpu_cmpxchg_double(
2308 				s->cpu_slab->freelist, s->cpu_slab->tid,
2309 				object, tid,
2310 				get_freepointer_safe(s, object), next_tid(tid)))) {
2311 
2312 			note_cmpxchg_failure("slab_alloc", s, tid);
2313 			goto redo;
2314 		}
2315 		stat(s, ALLOC_FASTPATH);
2316 	}
2317 
2318 	if (unlikely(gfpflags & __GFP_ZERO) && object)
2319 		memset(object, 0, s->objsize);
2320 
2321 	slab_post_alloc_hook(s, gfpflags, object);
2322 
2323 	return object;
2324 }
2325 
2326 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2327 {
2328 	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2329 
2330 	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
2331 
2332 	return ret;
2333 }
2334 EXPORT_SYMBOL(kmem_cache_alloc);
2335 
2336 #ifdef CONFIG_TRACING
2337 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2338 {
2339 	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2340 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2341 	return ret;
2342 }
2343 EXPORT_SYMBOL(kmem_cache_alloc_trace);
2344 
2345 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
2346 {
2347 	void *ret = kmalloc_order(size, flags, order);
2348 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2349 	return ret;
2350 }
2351 EXPORT_SYMBOL(kmalloc_order_trace);
2352 #endif
2353 
2354 #ifdef CONFIG_NUMA
2355 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2356 {
2357 	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2358 
2359 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2360 				    s->objsize, s->size, gfpflags, node);
2361 
2362 	return ret;
2363 }
2364 EXPORT_SYMBOL(kmem_cache_alloc_node);
2365 
2366 #ifdef CONFIG_TRACING
2367 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2368 				    gfp_t gfpflags,
2369 				    int node, size_t size)
2370 {
2371 	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2372 
2373 	trace_kmalloc_node(_RET_IP_, ret,
2374 			   size, s->size, gfpflags, node);
2375 	return ret;
2376 }
2377 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2378 #endif
2379 #endif
2380 
2381 /*
2382  * Slow patch handling. This may still be called frequently since objects
2383  * have a longer lifetime than the cpu slabs in most processing loads.
2384  *
2385  * So we still attempt to reduce cache line usage. Just take the slab
2386  * lock and free the item. If there is no additional partial page
2387  * handling required then we can return immediately.
2388  */
2389 static void __slab_free(struct kmem_cache *s, struct page *page,
2390 			void *x, unsigned long addr)
2391 {
2392 	void *prior;
2393 	void **object = (void *)x;
2394 	int was_frozen;
2395 	int inuse;
2396 	struct page new;
2397 	unsigned long counters;
2398 	struct kmem_cache_node *n = NULL;
2399 	unsigned long uninitialized_var(flags);
2400 
2401 	stat(s, FREE_SLOWPATH);
2402 
2403 	if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
2404 		return;
2405 
2406 	do {
2407 		prior = page->freelist;
2408 		counters = page->counters;
2409 		set_freepointer(s, object, prior);
2410 		new.counters = counters;
2411 		was_frozen = new.frozen;
2412 		new.inuse--;
2413 		if ((!new.inuse || !prior) && !was_frozen && !n) {
2414 
2415 			if (!kmem_cache_debug(s) && !prior)
2416 
2417 				/*
2418 				 * Slab was on no list before and will be partially empty
2419 				 * We can defer the list move and instead freeze it.
2420 				 */
2421 				new.frozen = 1;
2422 
2423 			else { /* Needs to be taken off a list */
2424 
2425 	                        n = get_node(s, page_to_nid(page));
2426 				/*
2427 				 * Speculatively acquire the list_lock.
2428 				 * If the cmpxchg does not succeed then we may
2429 				 * drop the list_lock without any processing.
2430 				 *
2431 				 * Otherwise the list_lock will synchronize with
2432 				 * other processors updating the list of slabs.
2433 				 */
2434 				spin_lock_irqsave(&n->list_lock, flags);
2435 
2436 			}
2437 		}
2438 		inuse = new.inuse;
2439 
2440 	} while (!cmpxchg_double_slab(s, page,
2441 		prior, counters,
2442 		object, new.counters,
2443 		"__slab_free"));
2444 
2445 	if (likely(!n)) {
2446 
2447 		/*
2448 		 * If we just froze the page then put it onto the
2449 		 * per cpu partial list.
2450 		 */
2451 		if (new.frozen && !was_frozen)
2452 			put_cpu_partial(s, page, 1);
2453 
2454 		/*
2455 		 * The list lock was not taken therefore no list
2456 		 * activity can be necessary.
2457 		 */
2458                 if (was_frozen)
2459                         stat(s, FREE_FROZEN);
2460                 return;
2461         }
2462 
2463 	/*
2464 	 * was_frozen may have been set after we acquired the list_lock in
2465 	 * an earlier loop. So we need to check it here again.
2466 	 */
2467 	if (was_frozen)
2468 		stat(s, FREE_FROZEN);
2469 	else {
2470 		if (unlikely(!inuse && n->nr_partial > s->min_partial))
2471                         goto slab_empty;
2472 
2473 		/*
2474 		 * Objects left in the slab. If it was not on the partial list before
2475 		 * then add it.
2476 		 */
2477 		if (unlikely(!prior)) {
2478 			remove_full(s, page);
2479 			add_partial(n, page, DEACTIVATE_TO_TAIL);
2480 			stat(s, FREE_ADD_PARTIAL);
2481 		}
2482 	}
2483 	spin_unlock_irqrestore(&n->list_lock, flags);
2484 	return;
2485 
2486 slab_empty:
2487 	if (prior) {
2488 		/*
2489 		 * Slab on the partial list.
2490 		 */
2491 		remove_partial(n, page);
2492 		stat(s, FREE_REMOVE_PARTIAL);
2493 	} else
2494 		/* Slab must be on the full list */
2495 		remove_full(s, page);
2496 
2497 	spin_unlock_irqrestore(&n->list_lock, flags);
2498 	stat(s, FREE_SLAB);
2499 	discard_slab(s, page);
2500 }
2501 
2502 /*
2503  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2504  * can perform fastpath freeing without additional function calls.
2505  *
2506  * The fastpath is only possible if we are freeing to the current cpu slab
2507  * of this processor. This typically the case if we have just allocated
2508  * the item before.
2509  *
2510  * If fastpath is not possible then fall back to __slab_free where we deal
2511  * with all sorts of special processing.
2512  */
2513 static __always_inline void slab_free(struct kmem_cache *s,
2514 			struct page *page, void *x, unsigned long addr)
2515 {
2516 	void **object = (void *)x;
2517 	struct kmem_cache_cpu *c;
2518 	unsigned long tid;
2519 
2520 	slab_free_hook(s, x);
2521 
2522 redo:
2523 	/*
2524 	 * Determine the currently cpus per cpu slab.
2525 	 * The cpu may change afterward. However that does not matter since
2526 	 * data is retrieved via this pointer. If we are on the same cpu
2527 	 * during the cmpxchg then the free will succedd.
2528 	 */
2529 	c = __this_cpu_ptr(s->cpu_slab);
2530 
2531 	tid = c->tid;
2532 	barrier();
2533 
2534 	if (likely(page == c->page)) {
2535 		set_freepointer(s, object, c->freelist);
2536 
2537 		if (unlikely(!this_cpu_cmpxchg_double(
2538 				s->cpu_slab->freelist, s->cpu_slab->tid,
2539 				c->freelist, tid,
2540 				object, next_tid(tid)))) {
2541 
2542 			note_cmpxchg_failure("slab_free", s, tid);
2543 			goto redo;
2544 		}
2545 		stat(s, FREE_FASTPATH);
2546 	} else
2547 		__slab_free(s, page, x, addr);
2548 
2549 }
2550 
2551 void kmem_cache_free(struct kmem_cache *s, void *x)
2552 {
2553 	struct page *page;
2554 
2555 	page = virt_to_head_page(x);
2556 
2557 	slab_free(s, page, x, _RET_IP_);
2558 
2559 	trace_kmem_cache_free(_RET_IP_, x);
2560 }
2561 EXPORT_SYMBOL(kmem_cache_free);
2562 
2563 /*
2564  * Object placement in a slab is made very easy because we always start at
2565  * offset 0. If we tune the size of the object to the alignment then we can
2566  * get the required alignment by putting one properly sized object after
2567  * another.
2568  *
2569  * Notice that the allocation order determines the sizes of the per cpu
2570  * caches. Each processor has always one slab available for allocations.
2571  * Increasing the allocation order reduces the number of times that slabs
2572  * must be moved on and off the partial lists and is therefore a factor in
2573  * locking overhead.
2574  */
2575 
2576 /*
2577  * Mininum / Maximum order of slab pages. This influences locking overhead
2578  * and slab fragmentation. A higher order reduces the number of partial slabs
2579  * and increases the number of allocations possible without having to
2580  * take the list_lock.
2581  */
2582 static int slub_min_order;
2583 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
2584 static int slub_min_objects;
2585 
2586 /*
2587  * Merge control. If this is set then no merging of slab caches will occur.
2588  * (Could be removed. This was introduced to pacify the merge skeptics.)
2589  */
2590 static int slub_nomerge;
2591 
2592 /*
2593  * Calculate the order of allocation given an slab object size.
2594  *
2595  * The order of allocation has significant impact on performance and other
2596  * system components. Generally order 0 allocations should be preferred since
2597  * order 0 does not cause fragmentation in the page allocator. Larger objects
2598  * be problematic to put into order 0 slabs because there may be too much
2599  * unused space left. We go to a higher order if more than 1/16th of the slab
2600  * would be wasted.
2601  *
2602  * In order to reach satisfactory performance we must ensure that a minimum
2603  * number of objects is in one slab. Otherwise we may generate too much
2604  * activity on the partial lists which requires taking the list_lock. This is
2605  * less a concern for large slabs though which are rarely used.
2606  *
2607  * slub_max_order specifies the order where we begin to stop considering the
2608  * number of objects in a slab as critical. If we reach slub_max_order then
2609  * we try to keep the page order as low as possible. So we accept more waste
2610  * of space in favor of a small page order.
2611  *
2612  * Higher order allocations also allow the placement of more objects in a
2613  * slab and thereby reduce object handling overhead. If the user has
2614  * requested a higher mininum order then we start with that one instead of
2615  * the smallest order which will fit the object.
2616  */
2617 static inline int slab_order(int size, int min_objects,
2618 				int max_order, int fract_leftover, int reserved)
2619 {
2620 	int order;
2621 	int rem;
2622 	int min_order = slub_min_order;
2623 
2624 	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
2625 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
2626 
2627 	for (order = max(min_order,
2628 				fls(min_objects * size - 1) - PAGE_SHIFT);
2629 			order <= max_order; order++) {
2630 
2631 		unsigned long slab_size = PAGE_SIZE << order;
2632 
2633 		if (slab_size < min_objects * size + reserved)
2634 			continue;
2635 
2636 		rem = (slab_size - reserved) % size;
2637 
2638 		if (rem <= slab_size / fract_leftover)
2639 			break;
2640 
2641 	}
2642 
2643 	return order;
2644 }
2645 
2646 static inline int calculate_order(int size, int reserved)
2647 {
2648 	int order;
2649 	int min_objects;
2650 	int fraction;
2651 	int max_objects;
2652 
2653 	/*
2654 	 * Attempt to find best configuration for a slab. This
2655 	 * works by first attempting to generate a layout with
2656 	 * the best configuration and backing off gradually.
2657 	 *
2658 	 * First we reduce the acceptable waste in a slab. Then
2659 	 * we reduce the minimum objects required in a slab.
2660 	 */
2661 	min_objects = slub_min_objects;
2662 	if (!min_objects)
2663 		min_objects = 4 * (fls(nr_cpu_ids) + 1);
2664 	max_objects = order_objects(slub_max_order, size, reserved);
2665 	min_objects = min(min_objects, max_objects);
2666 
2667 	while (min_objects > 1) {
2668 		fraction = 16;
2669 		while (fraction >= 4) {
2670 			order = slab_order(size, min_objects,
2671 					slub_max_order, fraction, reserved);
2672 			if (order <= slub_max_order)
2673 				return order;
2674 			fraction /= 2;
2675 		}
2676 		min_objects--;
2677 	}
2678 
2679 	/*
2680 	 * We were unable to place multiple objects in a slab. Now
2681 	 * lets see if we can place a single object there.
2682 	 */
2683 	order = slab_order(size, 1, slub_max_order, 1, reserved);
2684 	if (order <= slub_max_order)
2685 		return order;
2686 
2687 	/*
2688 	 * Doh this slab cannot be placed using slub_max_order.
2689 	 */
2690 	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2691 	if (order < MAX_ORDER)
2692 		return order;
2693 	return -ENOSYS;
2694 }
2695 
2696 /*
2697  * Figure out what the alignment of the objects will be.
2698  */
2699 static unsigned long calculate_alignment(unsigned long flags,
2700 		unsigned long align, unsigned long size)
2701 {
2702 	/*
2703 	 * If the user wants hardware cache aligned objects then follow that
2704 	 * suggestion if the object is sufficiently large.
2705 	 *
2706 	 * The hardware cache alignment cannot override the specified
2707 	 * alignment though. If that is greater then use it.
2708 	 */
2709 	if (flags & SLAB_HWCACHE_ALIGN) {
2710 		unsigned long ralign = cache_line_size();
2711 		while (size <= ralign / 2)
2712 			ralign /= 2;
2713 		align = max(align, ralign);
2714 	}
2715 
2716 	if (align < ARCH_SLAB_MINALIGN)
2717 		align = ARCH_SLAB_MINALIGN;
2718 
2719 	return ALIGN(align, sizeof(void *));
2720 }
2721 
2722 static void
2723 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2724 {
2725 	n->nr_partial = 0;
2726 	spin_lock_init(&n->list_lock);
2727 	INIT_LIST_HEAD(&n->partial);
2728 #ifdef CONFIG_SLUB_DEBUG
2729 	atomic_long_set(&n->nr_slabs, 0);
2730 	atomic_long_set(&n->total_objects, 0);
2731 	INIT_LIST_HEAD(&n->full);
2732 #endif
2733 }
2734 
2735 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2736 {
2737 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2738 			SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2739 
2740 	/*
2741 	 * Must align to double word boundary for the double cmpxchg
2742 	 * instructions to work; see __pcpu_double_call_return_bool().
2743 	 */
2744 	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2745 				     2 * sizeof(void *));
2746 
2747 	if (!s->cpu_slab)
2748 		return 0;
2749 
2750 	init_kmem_cache_cpus(s);
2751 
2752 	return 1;
2753 }
2754 
2755 static struct kmem_cache *kmem_cache_node;
2756 
2757 /*
2758  * No kmalloc_node yet so do it by hand. We know that this is the first
2759  * slab on the node for this slabcache. There are no concurrent accesses
2760  * possible.
2761  *
2762  * Note that this function only works on the kmalloc_node_cache
2763  * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2764  * memory on a fresh node that has no slab structures yet.
2765  */
2766 static void early_kmem_cache_node_alloc(int node)
2767 {
2768 	struct page *page;
2769 	struct kmem_cache_node *n;
2770 
2771 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
2772 
2773 	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
2774 
2775 	BUG_ON(!page);
2776 	if (page_to_nid(page) != node) {
2777 		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2778 				"node %d\n", node);
2779 		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2780 				"in order to be able to continue\n");
2781 	}
2782 
2783 	n = page->freelist;
2784 	BUG_ON(!n);
2785 	page->freelist = get_freepointer(kmem_cache_node, n);
2786 	page->inuse = 1;
2787 	page->frozen = 0;
2788 	kmem_cache_node->node[node] = n;
2789 #ifdef CONFIG_SLUB_DEBUG
2790 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2791 	init_tracking(kmem_cache_node, n);
2792 #endif
2793 	init_kmem_cache_node(n, kmem_cache_node);
2794 	inc_slabs_node(kmem_cache_node, node, page->objects);
2795 
2796 	add_partial(n, page, DEACTIVATE_TO_HEAD);
2797 }
2798 
2799 static void free_kmem_cache_nodes(struct kmem_cache *s)
2800 {
2801 	int node;
2802 
2803 	for_each_node_state(node, N_NORMAL_MEMORY) {
2804 		struct kmem_cache_node *n = s->node[node];
2805 
2806 		if (n)
2807 			kmem_cache_free(kmem_cache_node, n);
2808 
2809 		s->node[node] = NULL;
2810 	}
2811 }
2812 
2813 static int init_kmem_cache_nodes(struct kmem_cache *s)
2814 {
2815 	int node;
2816 
2817 	for_each_node_state(node, N_NORMAL_MEMORY) {
2818 		struct kmem_cache_node *n;
2819 
2820 		if (slab_state == DOWN) {
2821 			early_kmem_cache_node_alloc(node);
2822 			continue;
2823 		}
2824 		n = kmem_cache_alloc_node(kmem_cache_node,
2825 						GFP_KERNEL, node);
2826 
2827 		if (!n) {
2828 			free_kmem_cache_nodes(s);
2829 			return 0;
2830 		}
2831 
2832 		s->node[node] = n;
2833 		init_kmem_cache_node(n, s);
2834 	}
2835 	return 1;
2836 }
2837 
2838 static void set_min_partial(struct kmem_cache *s, unsigned long min)
2839 {
2840 	if (min < MIN_PARTIAL)
2841 		min = MIN_PARTIAL;
2842 	else if (min > MAX_PARTIAL)
2843 		min = MAX_PARTIAL;
2844 	s->min_partial = min;
2845 }
2846 
2847 /*
2848  * calculate_sizes() determines the order and the distribution of data within
2849  * a slab object.
2850  */
2851 static int calculate_sizes(struct kmem_cache *s, int forced_order)
2852 {
2853 	unsigned long flags = s->flags;
2854 	unsigned long size = s->objsize;
2855 	unsigned long align = s->align;
2856 	int order;
2857 
2858 	/*
2859 	 * Round up object size to the next word boundary. We can only
2860 	 * place the free pointer at word boundaries and this determines
2861 	 * the possible location of the free pointer.
2862 	 */
2863 	size = ALIGN(size, sizeof(void *));
2864 
2865 #ifdef CONFIG_SLUB_DEBUG
2866 	/*
2867 	 * Determine if we can poison the object itself. If the user of
2868 	 * the slab may touch the object after free or before allocation
2869 	 * then we should never poison the object itself.
2870 	 */
2871 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2872 			!s->ctor)
2873 		s->flags |= __OBJECT_POISON;
2874 	else
2875 		s->flags &= ~__OBJECT_POISON;
2876 
2877 
2878 	/*
2879 	 * If we are Redzoning then check if there is some space between the
2880 	 * end of the object and the free pointer. If not then add an
2881 	 * additional word to have some bytes to store Redzone information.
2882 	 */
2883 	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2884 		size += sizeof(void *);
2885 #endif
2886 
2887 	/*
2888 	 * With that we have determined the number of bytes in actual use
2889 	 * by the object. This is the potential offset to the free pointer.
2890 	 */
2891 	s->inuse = size;
2892 
2893 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2894 		s->ctor)) {
2895 		/*
2896 		 * Relocate free pointer after the object if it is not
2897 		 * permitted to overwrite the first word of the object on
2898 		 * kmem_cache_free.
2899 		 *
2900 		 * This is the case if we do RCU, have a constructor or
2901 		 * destructor or are poisoning the objects.
2902 		 */
2903 		s->offset = size;
2904 		size += sizeof(void *);
2905 	}
2906 
2907 #ifdef CONFIG_SLUB_DEBUG
2908 	if (flags & SLAB_STORE_USER)
2909 		/*
2910 		 * Need to store information about allocs and frees after
2911 		 * the object.
2912 		 */
2913 		size += 2 * sizeof(struct track);
2914 
2915 	if (flags & SLAB_RED_ZONE)
2916 		/*
2917 		 * Add some empty padding so that we can catch
2918 		 * overwrites from earlier objects rather than let
2919 		 * tracking information or the free pointer be
2920 		 * corrupted if a user writes before the start
2921 		 * of the object.
2922 		 */
2923 		size += sizeof(void *);
2924 #endif
2925 
2926 	/*
2927 	 * Determine the alignment based on various parameters that the
2928 	 * user specified and the dynamic determination of cache line size
2929 	 * on bootup.
2930 	 */
2931 	align = calculate_alignment(flags, align, s->objsize);
2932 	s->align = align;
2933 
2934 	/*
2935 	 * SLUB stores one object immediately after another beginning from
2936 	 * offset 0. In order to align the objects we have to simply size
2937 	 * each object to conform to the alignment.
2938 	 */
2939 	size = ALIGN(size, align);
2940 	s->size = size;
2941 	if (forced_order >= 0)
2942 		order = forced_order;
2943 	else
2944 		order = calculate_order(size, s->reserved);
2945 
2946 	if (order < 0)
2947 		return 0;
2948 
2949 	s->allocflags = 0;
2950 	if (order)
2951 		s->allocflags |= __GFP_COMP;
2952 
2953 	if (s->flags & SLAB_CACHE_DMA)
2954 		s->allocflags |= SLUB_DMA;
2955 
2956 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2957 		s->allocflags |= __GFP_RECLAIMABLE;
2958 
2959 	/*
2960 	 * Determine the number of objects per slab
2961 	 */
2962 	s->oo = oo_make(order, size, s->reserved);
2963 	s->min = oo_make(get_order(size), size, s->reserved);
2964 	if (oo_objects(s->oo) > oo_objects(s->max))
2965 		s->max = s->oo;
2966 
2967 	return !!oo_objects(s->oo);
2968 
2969 }
2970 
2971 static int kmem_cache_open(struct kmem_cache *s,
2972 		const char *name, size_t size,
2973 		size_t align, unsigned long flags,
2974 		void (*ctor)(void *))
2975 {
2976 	memset(s, 0, kmem_size);
2977 	s->name = name;
2978 	s->ctor = ctor;
2979 	s->objsize = size;
2980 	s->align = align;
2981 	s->flags = kmem_cache_flags(size, flags, name, ctor);
2982 	s->reserved = 0;
2983 
2984 	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
2985 		s->reserved = sizeof(struct rcu_head);
2986 
2987 	if (!calculate_sizes(s, -1))
2988 		goto error;
2989 	if (disable_higher_order_debug) {
2990 		/*
2991 		 * Disable debugging flags that store metadata if the min slab
2992 		 * order increased.
2993 		 */
2994 		if (get_order(s->size) > get_order(s->objsize)) {
2995 			s->flags &= ~DEBUG_METADATA_FLAGS;
2996 			s->offset = 0;
2997 			if (!calculate_sizes(s, -1))
2998 				goto error;
2999 		}
3000 	}
3001 
3002 #ifdef CONFIG_CMPXCHG_DOUBLE
3003 	if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3004 		/* Enable fast mode */
3005 		s->flags |= __CMPXCHG_DOUBLE;
3006 #endif
3007 
3008 	/*
3009 	 * The larger the object size is, the more pages we want on the partial
3010 	 * list to avoid pounding the page allocator excessively.
3011 	 */
3012 	set_min_partial(s, ilog2(s->size) / 2);
3013 
3014 	/*
3015 	 * cpu_partial determined the maximum number of objects kept in the
3016 	 * per cpu partial lists of a processor.
3017 	 *
3018 	 * Per cpu partial lists mainly contain slabs that just have one
3019 	 * object freed. If they are used for allocation then they can be
3020 	 * filled up again with minimal effort. The slab will never hit the
3021 	 * per node partial lists and therefore no locking will be required.
3022 	 *
3023 	 * This setting also determines
3024 	 *
3025 	 * A) The number of objects from per cpu partial slabs dumped to the
3026 	 *    per node list when we reach the limit.
3027 	 * B) The number of objects in cpu partial slabs to extract from the
3028 	 *    per node list when we run out of per cpu objects. We only fetch 50%
3029 	 *    to keep some capacity around for frees.
3030 	 */
3031 	if (s->size >= PAGE_SIZE)
3032 		s->cpu_partial = 2;
3033 	else if (s->size >= 1024)
3034 		s->cpu_partial = 6;
3035 	else if (s->size >= 256)
3036 		s->cpu_partial = 13;
3037 	else
3038 		s->cpu_partial = 30;
3039 
3040 	s->refcount = 1;
3041 #ifdef CONFIG_NUMA
3042 	s->remote_node_defrag_ratio = 1000;
3043 #endif
3044 	if (!init_kmem_cache_nodes(s))
3045 		goto error;
3046 
3047 	if (alloc_kmem_cache_cpus(s))
3048 		return 1;
3049 
3050 	free_kmem_cache_nodes(s);
3051 error:
3052 	if (flags & SLAB_PANIC)
3053 		panic("Cannot create slab %s size=%lu realsize=%u "
3054 			"order=%u offset=%u flags=%lx\n",
3055 			s->name, (unsigned long)size, s->size, oo_order(s->oo),
3056 			s->offset, flags);
3057 	return 0;
3058 }
3059 
3060 /*
3061  * Determine the size of a slab object
3062  */
3063 unsigned int kmem_cache_size(struct kmem_cache *s)
3064 {
3065 	return s->objsize;
3066 }
3067 EXPORT_SYMBOL(kmem_cache_size);
3068 
3069 static void list_slab_objects(struct kmem_cache *s, struct page *page,
3070 							const char *text)
3071 {
3072 #ifdef CONFIG_SLUB_DEBUG
3073 	void *addr = page_address(page);
3074 	void *p;
3075 	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3076 				     sizeof(long), GFP_ATOMIC);
3077 	if (!map)
3078 		return;
3079 	slab_err(s, page, "%s", text);
3080 	slab_lock(page);
3081 
3082 	get_map(s, page, map);
3083 	for_each_object(p, s, addr, page->objects) {
3084 
3085 		if (!test_bit(slab_index(p, s, addr), map)) {
3086 			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
3087 							p, p - addr);
3088 			print_tracking(s, p);
3089 		}
3090 	}
3091 	slab_unlock(page);
3092 	kfree(map);
3093 #endif
3094 }
3095 
3096 /*
3097  * Attempt to free all partial slabs on a node.
3098  * This is called from kmem_cache_close(). We must be the last thread
3099  * using the cache and therefore we do not need to lock anymore.
3100  */
3101 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3102 {
3103 	struct page *page, *h;
3104 
3105 	list_for_each_entry_safe(page, h, &n->partial, lru) {
3106 		if (!page->inuse) {
3107 			remove_partial(n, page);
3108 			discard_slab(s, page);
3109 		} else {
3110 			list_slab_objects(s, page,
3111 				"Objects remaining on kmem_cache_close()");
3112 		}
3113 	}
3114 }
3115 
3116 /*
3117  * Release all resources used by a slab cache.
3118  */
3119 static inline int kmem_cache_close(struct kmem_cache *s)
3120 {
3121 	int node;
3122 
3123 	flush_all(s);
3124 	free_percpu(s->cpu_slab);
3125 	/* Attempt to free all objects */
3126 	for_each_node_state(node, N_NORMAL_MEMORY) {
3127 		struct kmem_cache_node *n = get_node(s, node);
3128 
3129 		free_partial(s, n);
3130 		if (n->nr_partial || slabs_node(s, node))
3131 			return 1;
3132 	}
3133 	free_kmem_cache_nodes(s);
3134 	return 0;
3135 }
3136 
3137 /*
3138  * Close a cache and release the kmem_cache structure
3139  * (must be used for caches created using kmem_cache_create)
3140  */
3141 void kmem_cache_destroy(struct kmem_cache *s)
3142 {
3143 	down_write(&slub_lock);
3144 	s->refcount--;
3145 	if (!s->refcount) {
3146 		list_del(&s->list);
3147 		up_write(&slub_lock);
3148 		if (kmem_cache_close(s)) {
3149 			printk(KERN_ERR "SLUB %s: %s called for cache that "
3150 				"still has objects.\n", s->name, __func__);
3151 			dump_stack();
3152 		}
3153 		if (s->flags & SLAB_DESTROY_BY_RCU)
3154 			rcu_barrier();
3155 		sysfs_slab_remove(s);
3156 	} else
3157 		up_write(&slub_lock);
3158 }
3159 EXPORT_SYMBOL(kmem_cache_destroy);
3160 
3161 /********************************************************************
3162  *		Kmalloc subsystem
3163  *******************************************************************/
3164 
3165 struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
3166 EXPORT_SYMBOL(kmalloc_caches);
3167 
3168 static struct kmem_cache *kmem_cache;
3169 
3170 #ifdef CONFIG_ZONE_DMA
3171 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
3172 #endif
3173 
3174 static int __init setup_slub_min_order(char *str)
3175 {
3176 	get_option(&str, &slub_min_order);
3177 
3178 	return 1;
3179 }
3180 
3181 __setup("slub_min_order=", setup_slub_min_order);
3182 
3183 static int __init setup_slub_max_order(char *str)
3184 {
3185 	get_option(&str, &slub_max_order);
3186 	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
3187 
3188 	return 1;
3189 }
3190 
3191 __setup("slub_max_order=", setup_slub_max_order);
3192 
3193 static int __init setup_slub_min_objects(char *str)
3194 {
3195 	get_option(&str, &slub_min_objects);
3196 
3197 	return 1;
3198 }
3199 
3200 __setup("slub_min_objects=", setup_slub_min_objects);
3201 
3202 static int __init setup_slub_nomerge(char *str)
3203 {
3204 	slub_nomerge = 1;
3205 	return 1;
3206 }
3207 
3208 __setup("slub_nomerge", setup_slub_nomerge);
3209 
3210 static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3211 						int size, unsigned int flags)
3212 {
3213 	struct kmem_cache *s;
3214 
3215 	s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3216 
3217 	/*
3218 	 * This function is called with IRQs disabled during early-boot on
3219 	 * single CPU so there's no need to take slub_lock here.
3220 	 */
3221 	if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
3222 								flags, NULL))
3223 		goto panic;
3224 
3225 	list_add(&s->list, &slab_caches);
3226 	return s;
3227 
3228 panic:
3229 	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
3230 	return NULL;
3231 }
3232 
3233 /*
3234  * Conversion table for small slabs sizes / 8 to the index in the
3235  * kmalloc array. This is necessary for slabs < 192 since we have non power
3236  * of two cache sizes there. The size of larger slabs can be determined using
3237  * fls.
3238  */
3239 static s8 size_index[24] = {
3240 	3,	/* 8 */
3241 	4,	/* 16 */
3242 	5,	/* 24 */
3243 	5,	/* 32 */
3244 	6,	/* 40 */
3245 	6,	/* 48 */
3246 	6,	/* 56 */
3247 	6,	/* 64 */
3248 	1,	/* 72 */
3249 	1,	/* 80 */
3250 	1,	/* 88 */
3251 	1,	/* 96 */
3252 	7,	/* 104 */
3253 	7,	/* 112 */
3254 	7,	/* 120 */
3255 	7,	/* 128 */
3256 	2,	/* 136 */
3257 	2,	/* 144 */
3258 	2,	/* 152 */
3259 	2,	/* 160 */
3260 	2,	/* 168 */
3261 	2,	/* 176 */
3262 	2,	/* 184 */
3263 	2	/* 192 */
3264 };
3265 
3266 static inline int size_index_elem(size_t bytes)
3267 {
3268 	return (bytes - 1) / 8;
3269 }
3270 
3271 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
3272 {
3273 	int index;
3274 
3275 	if (size <= 192) {
3276 		if (!size)
3277 			return ZERO_SIZE_PTR;
3278 
3279 		index = size_index[size_index_elem(size)];
3280 	} else
3281 		index = fls(size - 1);
3282 
3283 #ifdef CONFIG_ZONE_DMA
3284 	if (unlikely((flags & SLUB_DMA)))
3285 		return kmalloc_dma_caches[index];
3286 
3287 #endif
3288 	return kmalloc_caches[index];
3289 }
3290 
3291 void *__kmalloc(size_t size, gfp_t flags)
3292 {
3293 	struct kmem_cache *s;
3294 	void *ret;
3295 
3296 	if (unlikely(size > SLUB_MAX_SIZE))
3297 		return kmalloc_large(size, flags);
3298 
3299 	s = get_slab(size, flags);
3300 
3301 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3302 		return s;
3303 
3304 	ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
3305 
3306 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3307 
3308 	return ret;
3309 }
3310 EXPORT_SYMBOL(__kmalloc);
3311 
3312 #ifdef CONFIG_NUMA
3313 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3314 {
3315 	struct page *page;
3316 	void *ptr = NULL;
3317 
3318 	flags |= __GFP_COMP | __GFP_NOTRACK;
3319 	page = alloc_pages_node(node, flags, get_order(size));
3320 	if (page)
3321 		ptr = page_address(page);
3322 
3323 	kmemleak_alloc(ptr, size, 1, flags);
3324 	return ptr;
3325 }
3326 
3327 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3328 {
3329 	struct kmem_cache *s;
3330 	void *ret;
3331 
3332 	if (unlikely(size > SLUB_MAX_SIZE)) {
3333 		ret = kmalloc_large_node(size, flags, node);
3334 
3335 		trace_kmalloc_node(_RET_IP_, ret,
3336 				   size, PAGE_SIZE << get_order(size),
3337 				   flags, node);
3338 
3339 		return ret;
3340 	}
3341 
3342 	s = get_slab(size, flags);
3343 
3344 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3345 		return s;
3346 
3347 	ret = slab_alloc(s, flags, node, _RET_IP_);
3348 
3349 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3350 
3351 	return ret;
3352 }
3353 EXPORT_SYMBOL(__kmalloc_node);
3354 #endif
3355 
3356 size_t ksize(const void *object)
3357 {
3358 	struct page *page;
3359 
3360 	if (unlikely(object == ZERO_SIZE_PTR))
3361 		return 0;
3362 
3363 	page = virt_to_head_page(object);
3364 
3365 	if (unlikely(!PageSlab(page))) {
3366 		WARN_ON(!PageCompound(page));
3367 		return PAGE_SIZE << compound_order(page);
3368 	}
3369 
3370 	return slab_ksize(page->slab);
3371 }
3372 EXPORT_SYMBOL(ksize);
3373 
3374 #ifdef CONFIG_SLUB_DEBUG
3375 bool verify_mem_not_deleted(const void *x)
3376 {
3377 	struct page *page;
3378 	void *object = (void *)x;
3379 	unsigned long flags;
3380 	bool rv;
3381 
3382 	if (unlikely(ZERO_OR_NULL_PTR(x)))
3383 		return false;
3384 
3385 	local_irq_save(flags);
3386 
3387 	page = virt_to_head_page(x);
3388 	if (unlikely(!PageSlab(page))) {
3389 		/* maybe it was from stack? */
3390 		rv = true;
3391 		goto out_unlock;
3392 	}
3393 
3394 	slab_lock(page);
3395 	if (on_freelist(page->slab, page, object)) {
3396 		object_err(page->slab, page, object, "Object is on free-list");
3397 		rv = false;
3398 	} else {
3399 		rv = true;
3400 	}
3401 	slab_unlock(page);
3402 
3403 out_unlock:
3404 	local_irq_restore(flags);
3405 	return rv;
3406 }
3407 EXPORT_SYMBOL(verify_mem_not_deleted);
3408 #endif
3409 
3410 void kfree(const void *x)
3411 {
3412 	struct page *page;
3413 	void *object = (void *)x;
3414 
3415 	trace_kfree(_RET_IP_, x);
3416 
3417 	if (unlikely(ZERO_OR_NULL_PTR(x)))
3418 		return;
3419 
3420 	page = virt_to_head_page(x);
3421 	if (unlikely(!PageSlab(page))) {
3422 		BUG_ON(!PageCompound(page));
3423 		kmemleak_free(x);
3424 		put_page(page);
3425 		return;
3426 	}
3427 	slab_free(page->slab, page, object, _RET_IP_);
3428 }
3429 EXPORT_SYMBOL(kfree);
3430 
3431 /*
3432  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3433  * the remaining slabs by the number of items in use. The slabs with the
3434  * most items in use come first. New allocations will then fill those up
3435  * and thus they can be removed from the partial lists.
3436  *
3437  * The slabs with the least items are placed last. This results in them
3438  * being allocated from last increasing the chance that the last objects
3439  * are freed in them.
3440  */
3441 int kmem_cache_shrink(struct kmem_cache *s)
3442 {
3443 	int node;
3444 	int i;
3445 	struct kmem_cache_node *n;
3446 	struct page *page;
3447 	struct page *t;
3448 	int objects = oo_objects(s->max);
3449 	struct list_head *slabs_by_inuse =
3450 		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
3451 	unsigned long flags;
3452 
3453 	if (!slabs_by_inuse)
3454 		return -ENOMEM;
3455 
3456 	flush_all(s);
3457 	for_each_node_state(node, N_NORMAL_MEMORY) {
3458 		n = get_node(s, node);
3459 
3460 		if (!n->nr_partial)
3461 			continue;
3462 
3463 		for (i = 0; i < objects; i++)
3464 			INIT_LIST_HEAD(slabs_by_inuse + i);
3465 
3466 		spin_lock_irqsave(&n->list_lock, flags);
3467 
3468 		/*
3469 		 * Build lists indexed by the items in use in each slab.
3470 		 *
3471 		 * Note that concurrent frees may occur while we hold the
3472 		 * list_lock. page->inuse here is the upper limit.
3473 		 */
3474 		list_for_each_entry_safe(page, t, &n->partial, lru) {
3475 			list_move(&page->lru, slabs_by_inuse + page->inuse);
3476 			if (!page->inuse)
3477 				n->nr_partial--;
3478 		}
3479 
3480 		/*
3481 		 * Rebuild the partial list with the slabs filled up most
3482 		 * first and the least used slabs at the end.
3483 		 */
3484 		for (i = objects - 1; i > 0; i--)
3485 			list_splice(slabs_by_inuse + i, n->partial.prev);
3486 
3487 		spin_unlock_irqrestore(&n->list_lock, flags);
3488 
3489 		/* Release empty slabs */
3490 		list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3491 			discard_slab(s, page);
3492 	}
3493 
3494 	kfree(slabs_by_inuse);
3495 	return 0;
3496 }
3497 EXPORT_SYMBOL(kmem_cache_shrink);
3498 
3499 #if defined(CONFIG_MEMORY_HOTPLUG)
3500 static int slab_mem_going_offline_callback(void *arg)
3501 {
3502 	struct kmem_cache *s;
3503 
3504 	down_read(&slub_lock);
3505 	list_for_each_entry(s, &slab_caches, list)
3506 		kmem_cache_shrink(s);
3507 	up_read(&slub_lock);
3508 
3509 	return 0;
3510 }
3511 
3512 static void slab_mem_offline_callback(void *arg)
3513 {
3514 	struct kmem_cache_node *n;
3515 	struct kmem_cache *s;
3516 	struct memory_notify *marg = arg;
3517 	int offline_node;
3518 
3519 	offline_node = marg->status_change_nid;
3520 
3521 	/*
3522 	 * If the node still has available memory. we need kmem_cache_node
3523 	 * for it yet.
3524 	 */
3525 	if (offline_node < 0)
3526 		return;
3527 
3528 	down_read(&slub_lock);
3529 	list_for_each_entry(s, &slab_caches, list) {
3530 		n = get_node(s, offline_node);
3531 		if (n) {
3532 			/*
3533 			 * if n->nr_slabs > 0, slabs still exist on the node
3534 			 * that is going down. We were unable to free them,
3535 			 * and offline_pages() function shouldn't call this
3536 			 * callback. So, we must fail.
3537 			 */
3538 			BUG_ON(slabs_node(s, offline_node));
3539 
3540 			s->node[offline_node] = NULL;
3541 			kmem_cache_free(kmem_cache_node, n);
3542 		}
3543 	}
3544 	up_read(&slub_lock);
3545 }
3546 
3547 static int slab_mem_going_online_callback(void *arg)
3548 {
3549 	struct kmem_cache_node *n;
3550 	struct kmem_cache *s;
3551 	struct memory_notify *marg = arg;
3552 	int nid = marg->status_change_nid;
3553 	int ret = 0;
3554 
3555 	/*
3556 	 * If the node's memory is already available, then kmem_cache_node is
3557 	 * already created. Nothing to do.
3558 	 */
3559 	if (nid < 0)
3560 		return 0;
3561 
3562 	/*
3563 	 * We are bringing a node online. No memory is available yet. We must
3564 	 * allocate a kmem_cache_node structure in order to bring the node
3565 	 * online.
3566 	 */
3567 	down_read(&slub_lock);
3568 	list_for_each_entry(s, &slab_caches, list) {
3569 		/*
3570 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
3571 		 *      since memory is not yet available from the node that
3572 		 *      is brought up.
3573 		 */
3574 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
3575 		if (!n) {
3576 			ret = -ENOMEM;
3577 			goto out;
3578 		}
3579 		init_kmem_cache_node(n, s);
3580 		s->node[nid] = n;
3581 	}
3582 out:
3583 	up_read(&slub_lock);
3584 	return ret;
3585 }
3586 
3587 static int slab_memory_callback(struct notifier_block *self,
3588 				unsigned long action, void *arg)
3589 {
3590 	int ret = 0;
3591 
3592 	switch (action) {
3593 	case MEM_GOING_ONLINE:
3594 		ret = slab_mem_going_online_callback(arg);
3595 		break;
3596 	case MEM_GOING_OFFLINE:
3597 		ret = slab_mem_going_offline_callback(arg);
3598 		break;
3599 	case MEM_OFFLINE:
3600 	case MEM_CANCEL_ONLINE:
3601 		slab_mem_offline_callback(arg);
3602 		break;
3603 	case MEM_ONLINE:
3604 	case MEM_CANCEL_OFFLINE:
3605 		break;
3606 	}
3607 	if (ret)
3608 		ret = notifier_from_errno(ret);
3609 	else
3610 		ret = NOTIFY_OK;
3611 	return ret;
3612 }
3613 
3614 #endif /* CONFIG_MEMORY_HOTPLUG */
3615 
3616 /********************************************************************
3617  *			Basic setup of slabs
3618  *******************************************************************/
3619 
3620 /*
3621  * Used for early kmem_cache structures that were allocated using
3622  * the page allocator
3623  */
3624 
3625 static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3626 {
3627 	int node;
3628 
3629 	list_add(&s->list, &slab_caches);
3630 	s->refcount = -1;
3631 
3632 	for_each_node_state(node, N_NORMAL_MEMORY) {
3633 		struct kmem_cache_node *n = get_node(s, node);
3634 		struct page *p;
3635 
3636 		if (n) {
3637 			list_for_each_entry(p, &n->partial, lru)
3638 				p->slab = s;
3639 
3640 #ifdef CONFIG_SLUB_DEBUG
3641 			list_for_each_entry(p, &n->full, lru)
3642 				p->slab = s;
3643 #endif
3644 		}
3645 	}
3646 }
3647 
3648 void __init kmem_cache_init(void)
3649 {
3650 	int i;
3651 	int caches = 0;
3652 	struct kmem_cache *temp_kmem_cache;
3653 	int order;
3654 	struct kmem_cache *temp_kmem_cache_node;
3655 	unsigned long kmalloc_size;
3656 
3657 	kmem_size = offsetof(struct kmem_cache, node) +
3658 				nr_node_ids * sizeof(struct kmem_cache_node *);
3659 
3660 	/* Allocate two kmem_caches from the page allocator */
3661 	kmalloc_size = ALIGN(kmem_size, cache_line_size());
3662 	order = get_order(2 * kmalloc_size);
3663 	kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
3664 
3665 	/*
3666 	 * Must first have the slab cache available for the allocations of the
3667 	 * struct kmem_cache_node's. There is special bootstrap code in
3668 	 * kmem_cache_open for slab_state == DOWN.
3669 	 */
3670 	kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3671 
3672 	kmem_cache_open(kmem_cache_node, "kmem_cache_node",
3673 		sizeof(struct kmem_cache_node),
3674 		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3675 
3676 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3677 
3678 	/* Able to allocate the per node structures */
3679 	slab_state = PARTIAL;
3680 
3681 	temp_kmem_cache = kmem_cache;
3682 	kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
3683 		0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3684 	kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3685 	memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3686 
3687 	/*
3688 	 * Allocate kmem_cache_node properly from the kmem_cache slab.
3689 	 * kmem_cache_node is separately allocated so no need to
3690 	 * update any list pointers.
3691 	 */
3692 	temp_kmem_cache_node = kmem_cache_node;
3693 
3694 	kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3695 	memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3696 
3697 	kmem_cache_bootstrap_fixup(kmem_cache_node);
3698 
3699 	caches++;
3700 	kmem_cache_bootstrap_fixup(kmem_cache);
3701 	caches++;
3702 	/* Free temporary boot structure */
3703 	free_pages((unsigned long)temp_kmem_cache, order);
3704 
3705 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
3706 
3707 	/*
3708 	 * Patch up the size_index table if we have strange large alignment
3709 	 * requirements for the kmalloc array. This is only the case for
3710 	 * MIPS it seems. The standard arches will not generate any code here.
3711 	 *
3712 	 * Largest permitted alignment is 256 bytes due to the way we
3713 	 * handle the index determination for the smaller caches.
3714 	 *
3715 	 * Make sure that nothing crazy happens if someone starts tinkering
3716 	 * around with ARCH_KMALLOC_MINALIGN
3717 	 */
3718 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3719 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3720 
3721 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3722 		int elem = size_index_elem(i);
3723 		if (elem >= ARRAY_SIZE(size_index))
3724 			break;
3725 		size_index[elem] = KMALLOC_SHIFT_LOW;
3726 	}
3727 
3728 	if (KMALLOC_MIN_SIZE == 64) {
3729 		/*
3730 		 * The 96 byte size cache is not used if the alignment
3731 		 * is 64 byte.
3732 		 */
3733 		for (i = 64 + 8; i <= 96; i += 8)
3734 			size_index[size_index_elem(i)] = 7;
3735 	} else if (KMALLOC_MIN_SIZE == 128) {
3736 		/*
3737 		 * The 192 byte sized cache is not used if the alignment
3738 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3739 		 * instead.
3740 		 */
3741 		for (i = 128 + 8; i <= 192; i += 8)
3742 			size_index[size_index_elem(i)] = 8;
3743 	}
3744 
3745 	/* Caches that are not of the two-to-the-power-of size */
3746 	if (KMALLOC_MIN_SIZE <= 32) {
3747 		kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3748 		caches++;
3749 	}
3750 
3751 	if (KMALLOC_MIN_SIZE <= 64) {
3752 		kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3753 		caches++;
3754 	}
3755 
3756 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3757 		kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3758 		caches++;
3759 	}
3760 
3761 	slab_state = UP;
3762 
3763 	/* Provide the correct kmalloc names now that the caches are up */
3764 	if (KMALLOC_MIN_SIZE <= 32) {
3765 		kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3766 		BUG_ON(!kmalloc_caches[1]->name);
3767 	}
3768 
3769 	if (KMALLOC_MIN_SIZE <= 64) {
3770 		kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3771 		BUG_ON(!kmalloc_caches[2]->name);
3772 	}
3773 
3774 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3775 		char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3776 
3777 		BUG_ON(!s);
3778 		kmalloc_caches[i]->name = s;
3779 	}
3780 
3781 #ifdef CONFIG_SMP
3782 	register_cpu_notifier(&slab_notifier);
3783 #endif
3784 
3785 #ifdef CONFIG_ZONE_DMA
3786 	for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3787 		struct kmem_cache *s = kmalloc_caches[i];
3788 
3789 		if (s && s->size) {
3790 			char *name = kasprintf(GFP_NOWAIT,
3791 				 "dma-kmalloc-%d", s->objsize);
3792 
3793 			BUG_ON(!name);
3794 			kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3795 				s->objsize, SLAB_CACHE_DMA);
3796 		}
3797 	}
3798 #endif
3799 	printk(KERN_INFO
3800 		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3801 		" CPUs=%d, Nodes=%d\n",
3802 		caches, cache_line_size(),
3803 		slub_min_order, slub_max_order, slub_min_objects,
3804 		nr_cpu_ids, nr_node_ids);
3805 }
3806 
3807 void __init kmem_cache_init_late(void)
3808 {
3809 }
3810 
3811 /*
3812  * Find a mergeable slab cache
3813  */
3814 static int slab_unmergeable(struct kmem_cache *s)
3815 {
3816 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3817 		return 1;
3818 
3819 	if (s->ctor)
3820 		return 1;
3821 
3822 	/*
3823 	 * We may have set a slab to be unmergeable during bootstrap.
3824 	 */
3825 	if (s->refcount < 0)
3826 		return 1;
3827 
3828 	return 0;
3829 }
3830 
3831 static struct kmem_cache *find_mergeable(size_t size,
3832 		size_t align, unsigned long flags, const char *name,
3833 		void (*ctor)(void *))
3834 {
3835 	struct kmem_cache *s;
3836 
3837 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3838 		return NULL;
3839 
3840 	if (ctor)
3841 		return NULL;
3842 
3843 	size = ALIGN(size, sizeof(void *));
3844 	align = calculate_alignment(flags, align, size);
3845 	size = ALIGN(size, align);
3846 	flags = kmem_cache_flags(size, flags, name, NULL);
3847 
3848 	list_for_each_entry(s, &slab_caches, list) {
3849 		if (slab_unmergeable(s))
3850 			continue;
3851 
3852 		if (size > s->size)
3853 			continue;
3854 
3855 		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3856 				continue;
3857 		/*
3858 		 * Check if alignment is compatible.
3859 		 * Courtesy of Adrian Drzewiecki
3860 		 */
3861 		if ((s->size & ~(align - 1)) != s->size)
3862 			continue;
3863 
3864 		if (s->size - size >= sizeof(void *))
3865 			continue;
3866 
3867 		return s;
3868 	}
3869 	return NULL;
3870 }
3871 
3872 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3873 		size_t align, unsigned long flags, void (*ctor)(void *))
3874 {
3875 	struct kmem_cache *s;
3876 	char *n;
3877 
3878 	if (WARN_ON(!name))
3879 		return NULL;
3880 
3881 	down_write(&slub_lock);
3882 	s = find_mergeable(size, align, flags, name, ctor);
3883 	if (s) {
3884 		s->refcount++;
3885 		/*
3886 		 * Adjust the object sizes so that we clear
3887 		 * the complete object on kzalloc.
3888 		 */
3889 		s->objsize = max(s->objsize, (int)size);
3890 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3891 
3892 		if (sysfs_slab_alias(s, name)) {
3893 			s->refcount--;
3894 			goto err;
3895 		}
3896 		up_write(&slub_lock);
3897 		return s;
3898 	}
3899 
3900 	n = kstrdup(name, GFP_KERNEL);
3901 	if (!n)
3902 		goto err;
3903 
3904 	s = kmalloc(kmem_size, GFP_KERNEL);
3905 	if (s) {
3906 		if (kmem_cache_open(s, n,
3907 				size, align, flags, ctor)) {
3908 			list_add(&s->list, &slab_caches);
3909 			if (sysfs_slab_add(s)) {
3910 				list_del(&s->list);
3911 				kfree(n);
3912 				kfree(s);
3913 				goto err;
3914 			}
3915 			up_write(&slub_lock);
3916 			return s;
3917 		}
3918 		kfree(n);
3919 		kfree(s);
3920 	}
3921 err:
3922 	up_write(&slub_lock);
3923 
3924 	if (flags & SLAB_PANIC)
3925 		panic("Cannot create slabcache %s\n", name);
3926 	else
3927 		s = NULL;
3928 	return s;
3929 }
3930 EXPORT_SYMBOL(kmem_cache_create);
3931 
3932 #ifdef CONFIG_SMP
3933 /*
3934  * Use the cpu notifier to insure that the cpu slabs are flushed when
3935  * necessary.
3936  */
3937 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3938 		unsigned long action, void *hcpu)
3939 {
3940 	long cpu = (long)hcpu;
3941 	struct kmem_cache *s;
3942 	unsigned long flags;
3943 
3944 	switch (action) {
3945 	case CPU_UP_CANCELED:
3946 	case CPU_UP_CANCELED_FROZEN:
3947 	case CPU_DEAD:
3948 	case CPU_DEAD_FROZEN:
3949 		down_read(&slub_lock);
3950 		list_for_each_entry(s, &slab_caches, list) {
3951 			local_irq_save(flags);
3952 			__flush_cpu_slab(s, cpu);
3953 			local_irq_restore(flags);
3954 		}
3955 		up_read(&slub_lock);
3956 		break;
3957 	default:
3958 		break;
3959 	}
3960 	return NOTIFY_OK;
3961 }
3962 
3963 static struct notifier_block __cpuinitdata slab_notifier = {
3964 	.notifier_call = slab_cpuup_callback
3965 };
3966 
3967 #endif
3968 
3969 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3970 {
3971 	struct kmem_cache *s;
3972 	void *ret;
3973 
3974 	if (unlikely(size > SLUB_MAX_SIZE))
3975 		return kmalloc_large(size, gfpflags);
3976 
3977 	s = get_slab(size, gfpflags);
3978 
3979 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3980 		return s;
3981 
3982 	ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
3983 
3984 	/* Honor the call site pointer we received. */
3985 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
3986 
3987 	return ret;
3988 }
3989 
3990 #ifdef CONFIG_NUMA
3991 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3992 					int node, unsigned long caller)
3993 {
3994 	struct kmem_cache *s;
3995 	void *ret;
3996 
3997 	if (unlikely(size > SLUB_MAX_SIZE)) {
3998 		ret = kmalloc_large_node(size, gfpflags, node);
3999 
4000 		trace_kmalloc_node(caller, ret,
4001 				   size, PAGE_SIZE << get_order(size),
4002 				   gfpflags, node);
4003 
4004 		return ret;
4005 	}
4006 
4007 	s = get_slab(size, gfpflags);
4008 
4009 	if (unlikely(ZERO_OR_NULL_PTR(s)))
4010 		return s;
4011 
4012 	ret = slab_alloc(s, gfpflags, node, caller);
4013 
4014 	/* Honor the call site pointer we received. */
4015 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4016 
4017 	return ret;
4018 }
4019 #endif
4020 
4021 #ifdef CONFIG_SYSFS
4022 static int count_inuse(struct page *page)
4023 {
4024 	return page->inuse;
4025 }
4026 
4027 static int count_total(struct page *page)
4028 {
4029 	return page->objects;
4030 }
4031 #endif
4032 
4033 #ifdef CONFIG_SLUB_DEBUG
4034 static int validate_slab(struct kmem_cache *s, struct page *page,
4035 						unsigned long *map)
4036 {
4037 	void *p;
4038 	void *addr = page_address(page);
4039 
4040 	if (!check_slab(s, page) ||
4041 			!on_freelist(s, page, NULL))
4042 		return 0;
4043 
4044 	/* Now we know that a valid freelist exists */
4045 	bitmap_zero(map, page->objects);
4046 
4047 	get_map(s, page, map);
4048 	for_each_object(p, s, addr, page->objects) {
4049 		if (test_bit(slab_index(p, s, addr), map))
4050 			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4051 				return 0;
4052 	}
4053 
4054 	for_each_object(p, s, addr, page->objects)
4055 		if (!test_bit(slab_index(p, s, addr), map))
4056 			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4057 				return 0;
4058 	return 1;
4059 }
4060 
4061 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4062 						unsigned long *map)
4063 {
4064 	slab_lock(page);
4065 	validate_slab(s, page, map);
4066 	slab_unlock(page);
4067 }
4068 
4069 static int validate_slab_node(struct kmem_cache *s,
4070 		struct kmem_cache_node *n, unsigned long *map)
4071 {
4072 	unsigned long count = 0;
4073 	struct page *page;
4074 	unsigned long flags;
4075 
4076 	spin_lock_irqsave(&n->list_lock, flags);
4077 
4078 	list_for_each_entry(page, &n->partial, lru) {
4079 		validate_slab_slab(s, page, map);
4080 		count++;
4081 	}
4082 	if (count != n->nr_partial)
4083 		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
4084 			"counter=%ld\n", s->name, count, n->nr_partial);
4085 
4086 	if (!(s->flags & SLAB_STORE_USER))
4087 		goto out;
4088 
4089 	list_for_each_entry(page, &n->full, lru) {
4090 		validate_slab_slab(s, page, map);
4091 		count++;
4092 	}
4093 	if (count != atomic_long_read(&n->nr_slabs))
4094 		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
4095 			"counter=%ld\n", s->name, count,
4096 			atomic_long_read(&n->nr_slabs));
4097 
4098 out:
4099 	spin_unlock_irqrestore(&n->list_lock, flags);
4100 	return count;
4101 }
4102 
4103 static long validate_slab_cache(struct kmem_cache *s)
4104 {
4105 	int node;
4106 	unsigned long count = 0;
4107 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4108 				sizeof(unsigned long), GFP_KERNEL);
4109 
4110 	if (!map)
4111 		return -ENOMEM;
4112 
4113 	flush_all(s);
4114 	for_each_node_state(node, N_NORMAL_MEMORY) {
4115 		struct kmem_cache_node *n = get_node(s, node);
4116 
4117 		count += validate_slab_node(s, n, map);
4118 	}
4119 	kfree(map);
4120 	return count;
4121 }
4122 /*
4123  * Generate lists of code addresses where slabcache objects are allocated
4124  * and freed.
4125  */
4126 
4127 struct location {
4128 	unsigned long count;
4129 	unsigned long addr;
4130 	long long sum_time;
4131 	long min_time;
4132 	long max_time;
4133 	long min_pid;
4134 	long max_pid;
4135 	DECLARE_BITMAP(cpus, NR_CPUS);
4136 	nodemask_t nodes;
4137 };
4138 
4139 struct loc_track {
4140 	unsigned long max;
4141 	unsigned long count;
4142 	struct location *loc;
4143 };
4144 
4145 static void free_loc_track(struct loc_track *t)
4146 {
4147 	if (t->max)
4148 		free_pages((unsigned long)t->loc,
4149 			get_order(sizeof(struct location) * t->max));
4150 }
4151 
4152 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4153 {
4154 	struct location *l;
4155 	int order;
4156 
4157 	order = get_order(sizeof(struct location) * max);
4158 
4159 	l = (void *)__get_free_pages(flags, order);
4160 	if (!l)
4161 		return 0;
4162 
4163 	if (t->count) {
4164 		memcpy(l, t->loc, sizeof(struct location) * t->count);
4165 		free_loc_track(t);
4166 	}
4167 	t->max = max;
4168 	t->loc = l;
4169 	return 1;
4170 }
4171 
4172 static int add_location(struct loc_track *t, struct kmem_cache *s,
4173 				const struct track *track)
4174 {
4175 	long start, end, pos;
4176 	struct location *l;
4177 	unsigned long caddr;
4178 	unsigned long age = jiffies - track->when;
4179 
4180 	start = -1;
4181 	end = t->count;
4182 
4183 	for ( ; ; ) {
4184 		pos = start + (end - start + 1) / 2;
4185 
4186 		/*
4187 		 * There is nothing at "end". If we end up there
4188 		 * we need to add something to before end.
4189 		 */
4190 		if (pos == end)
4191 			break;
4192 
4193 		caddr = t->loc[pos].addr;
4194 		if (track->addr == caddr) {
4195 
4196 			l = &t->loc[pos];
4197 			l->count++;
4198 			if (track->when) {
4199 				l->sum_time += age;
4200 				if (age < l->min_time)
4201 					l->min_time = age;
4202 				if (age > l->max_time)
4203 					l->max_time = age;
4204 
4205 				if (track->pid < l->min_pid)
4206 					l->min_pid = track->pid;
4207 				if (track->pid > l->max_pid)
4208 					l->max_pid = track->pid;
4209 
4210 				cpumask_set_cpu(track->cpu,
4211 						to_cpumask(l->cpus));
4212 			}
4213 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4214 			return 1;
4215 		}
4216 
4217 		if (track->addr < caddr)
4218 			end = pos;
4219 		else
4220 			start = pos;
4221 	}
4222 
4223 	/*
4224 	 * Not found. Insert new tracking element.
4225 	 */
4226 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4227 		return 0;
4228 
4229 	l = t->loc + pos;
4230 	if (pos < t->count)
4231 		memmove(l + 1, l,
4232 			(t->count - pos) * sizeof(struct location));
4233 	t->count++;
4234 	l->count = 1;
4235 	l->addr = track->addr;
4236 	l->sum_time = age;
4237 	l->min_time = age;
4238 	l->max_time = age;
4239 	l->min_pid = track->pid;
4240 	l->max_pid = track->pid;
4241 	cpumask_clear(to_cpumask(l->cpus));
4242 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4243 	nodes_clear(l->nodes);
4244 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4245 	return 1;
4246 }
4247 
4248 static void process_slab(struct loc_track *t, struct kmem_cache *s,
4249 		struct page *page, enum track_item alloc,
4250 		unsigned long *map)
4251 {
4252 	void *addr = page_address(page);
4253 	void *p;
4254 
4255 	bitmap_zero(map, page->objects);
4256 	get_map(s, page, map);
4257 
4258 	for_each_object(p, s, addr, page->objects)
4259 		if (!test_bit(slab_index(p, s, addr), map))
4260 			add_location(t, s, get_track(s, p, alloc));
4261 }
4262 
4263 static int list_locations(struct kmem_cache *s, char *buf,
4264 					enum track_item alloc)
4265 {
4266 	int len = 0;
4267 	unsigned long i;
4268 	struct loc_track t = { 0, 0, NULL };
4269 	int node;
4270 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4271 				     sizeof(unsigned long), GFP_KERNEL);
4272 
4273 	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4274 				     GFP_TEMPORARY)) {
4275 		kfree(map);
4276 		return sprintf(buf, "Out of memory\n");
4277 	}
4278 	/* Push back cpu slabs */
4279 	flush_all(s);
4280 
4281 	for_each_node_state(node, N_NORMAL_MEMORY) {
4282 		struct kmem_cache_node *n = get_node(s, node);
4283 		unsigned long flags;
4284 		struct page *page;
4285 
4286 		if (!atomic_long_read(&n->nr_slabs))
4287 			continue;
4288 
4289 		spin_lock_irqsave(&n->list_lock, flags);
4290 		list_for_each_entry(page, &n->partial, lru)
4291 			process_slab(&t, s, page, alloc, map);
4292 		list_for_each_entry(page, &n->full, lru)
4293 			process_slab(&t, s, page, alloc, map);
4294 		spin_unlock_irqrestore(&n->list_lock, flags);
4295 	}
4296 
4297 	for (i = 0; i < t.count; i++) {
4298 		struct location *l = &t.loc[i];
4299 
4300 		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4301 			break;
4302 		len += sprintf(buf + len, "%7ld ", l->count);
4303 
4304 		if (l->addr)
4305 			len += sprintf(buf + len, "%pS", (void *)l->addr);
4306 		else
4307 			len += sprintf(buf + len, "<not-available>");
4308 
4309 		if (l->sum_time != l->min_time) {
4310 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
4311 				l->min_time,
4312 				(long)div_u64(l->sum_time, l->count),
4313 				l->max_time);
4314 		} else
4315 			len += sprintf(buf + len, " age=%ld",
4316 				l->min_time);
4317 
4318 		if (l->min_pid != l->max_pid)
4319 			len += sprintf(buf + len, " pid=%ld-%ld",
4320 				l->min_pid, l->max_pid);
4321 		else
4322 			len += sprintf(buf + len, " pid=%ld",
4323 				l->min_pid);
4324 
4325 		if (num_online_cpus() > 1 &&
4326 				!cpumask_empty(to_cpumask(l->cpus)) &&
4327 				len < PAGE_SIZE - 60) {
4328 			len += sprintf(buf + len, " cpus=");
4329 			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
4330 						 to_cpumask(l->cpus));
4331 		}
4332 
4333 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4334 				len < PAGE_SIZE - 60) {
4335 			len += sprintf(buf + len, " nodes=");
4336 			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
4337 					l->nodes);
4338 		}
4339 
4340 		len += sprintf(buf + len, "\n");
4341 	}
4342 
4343 	free_loc_track(&t);
4344 	kfree(map);
4345 	if (!t.count)
4346 		len += sprintf(buf, "No data\n");
4347 	return len;
4348 }
4349 #endif
4350 
4351 #ifdef SLUB_RESILIENCY_TEST
4352 static void resiliency_test(void)
4353 {
4354 	u8 *p;
4355 
4356 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
4357 
4358 	printk(KERN_ERR "SLUB resiliency testing\n");
4359 	printk(KERN_ERR "-----------------------\n");
4360 	printk(KERN_ERR "A. Corruption after allocation\n");
4361 
4362 	p = kzalloc(16, GFP_KERNEL);
4363 	p[16] = 0x12;
4364 	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4365 			" 0x12->0x%p\n\n", p + 16);
4366 
4367 	validate_slab_cache(kmalloc_caches[4]);
4368 
4369 	/* Hmmm... The next two are dangerous */
4370 	p = kzalloc(32, GFP_KERNEL);
4371 	p[32 + sizeof(void *)] = 0x34;
4372 	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4373 			" 0x34 -> -0x%p\n", p);
4374 	printk(KERN_ERR
4375 		"If allocated object is overwritten then not detectable\n\n");
4376 
4377 	validate_slab_cache(kmalloc_caches[5]);
4378 	p = kzalloc(64, GFP_KERNEL);
4379 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4380 	*p = 0x56;
4381 	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4382 									p);
4383 	printk(KERN_ERR
4384 		"If allocated object is overwritten then not detectable\n\n");
4385 	validate_slab_cache(kmalloc_caches[6]);
4386 
4387 	printk(KERN_ERR "\nB. Corruption after free\n");
4388 	p = kzalloc(128, GFP_KERNEL);
4389 	kfree(p);
4390 	*p = 0x78;
4391 	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4392 	validate_slab_cache(kmalloc_caches[7]);
4393 
4394 	p = kzalloc(256, GFP_KERNEL);
4395 	kfree(p);
4396 	p[50] = 0x9a;
4397 	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4398 			p);
4399 	validate_slab_cache(kmalloc_caches[8]);
4400 
4401 	p = kzalloc(512, GFP_KERNEL);
4402 	kfree(p);
4403 	p[512] = 0xab;
4404 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4405 	validate_slab_cache(kmalloc_caches[9]);
4406 }
4407 #else
4408 #ifdef CONFIG_SYSFS
4409 static void resiliency_test(void) {};
4410 #endif
4411 #endif
4412 
4413 #ifdef CONFIG_SYSFS
4414 enum slab_stat_type {
4415 	SL_ALL,			/* All slabs */
4416 	SL_PARTIAL,		/* Only partially allocated slabs */
4417 	SL_CPU,			/* Only slabs used for cpu caches */
4418 	SL_OBJECTS,		/* Determine allocated objects not slabs */
4419 	SL_TOTAL		/* Determine object capacity not slabs */
4420 };
4421 
4422 #define SO_ALL		(1 << SL_ALL)
4423 #define SO_PARTIAL	(1 << SL_PARTIAL)
4424 #define SO_CPU		(1 << SL_CPU)
4425 #define SO_OBJECTS	(1 << SL_OBJECTS)
4426 #define SO_TOTAL	(1 << SL_TOTAL)
4427 
4428 static ssize_t show_slab_objects(struct kmem_cache *s,
4429 			    char *buf, unsigned long flags)
4430 {
4431 	unsigned long total = 0;
4432 	int node;
4433 	int x;
4434 	unsigned long *nodes;
4435 	unsigned long *per_cpu;
4436 
4437 	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4438 	if (!nodes)
4439 		return -ENOMEM;
4440 	per_cpu = nodes + nr_node_ids;
4441 
4442 	if (flags & SO_CPU) {
4443 		int cpu;
4444 
4445 		for_each_possible_cpu(cpu) {
4446 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
4447 			int node = ACCESS_ONCE(c->node);
4448 			struct page *page;
4449 
4450 			if (node < 0)
4451 				continue;
4452 			page = ACCESS_ONCE(c->page);
4453 			if (page) {
4454 				if (flags & SO_TOTAL)
4455 					x = page->objects;
4456 				else if (flags & SO_OBJECTS)
4457 					x = page->inuse;
4458 				else
4459 					x = 1;
4460 
4461 				total += x;
4462 				nodes[node] += x;
4463 			}
4464 			page = c->partial;
4465 
4466 			if (page) {
4467 				x = page->pobjects;
4468 				total += x;
4469 				nodes[node] += x;
4470 			}
4471 			per_cpu[node]++;
4472 		}
4473 	}
4474 
4475 	lock_memory_hotplug();
4476 #ifdef CONFIG_SLUB_DEBUG
4477 	if (flags & SO_ALL) {
4478 		for_each_node_state(node, N_NORMAL_MEMORY) {
4479 			struct kmem_cache_node *n = get_node(s, node);
4480 
4481 		if (flags & SO_TOTAL)
4482 			x = atomic_long_read(&n->total_objects);
4483 		else if (flags & SO_OBJECTS)
4484 			x = atomic_long_read(&n->total_objects) -
4485 				count_partial(n, count_free);
4486 
4487 			else
4488 				x = atomic_long_read(&n->nr_slabs);
4489 			total += x;
4490 			nodes[node] += x;
4491 		}
4492 
4493 	} else
4494 #endif
4495 	if (flags & SO_PARTIAL) {
4496 		for_each_node_state(node, N_NORMAL_MEMORY) {
4497 			struct kmem_cache_node *n = get_node(s, node);
4498 
4499 			if (flags & SO_TOTAL)
4500 				x = count_partial(n, count_total);
4501 			else if (flags & SO_OBJECTS)
4502 				x = count_partial(n, count_inuse);
4503 			else
4504 				x = n->nr_partial;
4505 			total += x;
4506 			nodes[node] += x;
4507 		}
4508 	}
4509 	x = sprintf(buf, "%lu", total);
4510 #ifdef CONFIG_NUMA
4511 	for_each_node_state(node, N_NORMAL_MEMORY)
4512 		if (nodes[node])
4513 			x += sprintf(buf + x, " N%d=%lu",
4514 					node, nodes[node]);
4515 #endif
4516 	unlock_memory_hotplug();
4517 	kfree(nodes);
4518 	return x + sprintf(buf + x, "\n");
4519 }
4520 
4521 #ifdef CONFIG_SLUB_DEBUG
4522 static int any_slab_objects(struct kmem_cache *s)
4523 {
4524 	int node;
4525 
4526 	for_each_online_node(node) {
4527 		struct kmem_cache_node *n = get_node(s, node);
4528 
4529 		if (!n)
4530 			continue;
4531 
4532 		if (atomic_long_read(&n->total_objects))
4533 			return 1;
4534 	}
4535 	return 0;
4536 }
4537 #endif
4538 
4539 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4540 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
4541 
4542 struct slab_attribute {
4543 	struct attribute attr;
4544 	ssize_t (*show)(struct kmem_cache *s, char *buf);
4545 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4546 };
4547 
4548 #define SLAB_ATTR_RO(_name) \
4549 	static struct slab_attribute _name##_attr = \
4550 	__ATTR(_name, 0400, _name##_show, NULL)
4551 
4552 #define SLAB_ATTR(_name) \
4553 	static struct slab_attribute _name##_attr =  \
4554 	__ATTR(_name, 0600, _name##_show, _name##_store)
4555 
4556 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4557 {
4558 	return sprintf(buf, "%d\n", s->size);
4559 }
4560 SLAB_ATTR_RO(slab_size);
4561 
4562 static ssize_t align_show(struct kmem_cache *s, char *buf)
4563 {
4564 	return sprintf(buf, "%d\n", s->align);
4565 }
4566 SLAB_ATTR_RO(align);
4567 
4568 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4569 {
4570 	return sprintf(buf, "%d\n", s->objsize);
4571 }
4572 SLAB_ATTR_RO(object_size);
4573 
4574 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4575 {
4576 	return sprintf(buf, "%d\n", oo_objects(s->oo));
4577 }
4578 SLAB_ATTR_RO(objs_per_slab);
4579 
4580 static ssize_t order_store(struct kmem_cache *s,
4581 				const char *buf, size_t length)
4582 {
4583 	unsigned long order;
4584 	int err;
4585 
4586 	err = strict_strtoul(buf, 10, &order);
4587 	if (err)
4588 		return err;
4589 
4590 	if (order > slub_max_order || order < slub_min_order)
4591 		return -EINVAL;
4592 
4593 	calculate_sizes(s, order);
4594 	return length;
4595 }
4596 
4597 static ssize_t order_show(struct kmem_cache *s, char *buf)
4598 {
4599 	return sprintf(buf, "%d\n", oo_order(s->oo));
4600 }
4601 SLAB_ATTR(order);
4602 
4603 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4604 {
4605 	return sprintf(buf, "%lu\n", s->min_partial);
4606 }
4607 
4608 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4609 				 size_t length)
4610 {
4611 	unsigned long min;
4612 	int err;
4613 
4614 	err = strict_strtoul(buf, 10, &min);
4615 	if (err)
4616 		return err;
4617 
4618 	set_min_partial(s, min);
4619 	return length;
4620 }
4621 SLAB_ATTR(min_partial);
4622 
4623 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4624 {
4625 	return sprintf(buf, "%u\n", s->cpu_partial);
4626 }
4627 
4628 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4629 				 size_t length)
4630 {
4631 	unsigned long objects;
4632 	int err;
4633 
4634 	err = strict_strtoul(buf, 10, &objects);
4635 	if (err)
4636 		return err;
4637 
4638 	s->cpu_partial = objects;
4639 	flush_all(s);
4640 	return length;
4641 }
4642 SLAB_ATTR(cpu_partial);
4643 
4644 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4645 {
4646 	if (!s->ctor)
4647 		return 0;
4648 	return sprintf(buf, "%pS\n", s->ctor);
4649 }
4650 SLAB_ATTR_RO(ctor);
4651 
4652 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4653 {
4654 	return sprintf(buf, "%d\n", s->refcount - 1);
4655 }
4656 SLAB_ATTR_RO(aliases);
4657 
4658 static ssize_t partial_show(struct kmem_cache *s, char *buf)
4659 {
4660 	return show_slab_objects(s, buf, SO_PARTIAL);
4661 }
4662 SLAB_ATTR_RO(partial);
4663 
4664 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4665 {
4666 	return show_slab_objects(s, buf, SO_CPU);
4667 }
4668 SLAB_ATTR_RO(cpu_slabs);
4669 
4670 static ssize_t objects_show(struct kmem_cache *s, char *buf)
4671 {
4672 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
4673 }
4674 SLAB_ATTR_RO(objects);
4675 
4676 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4677 {
4678 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4679 }
4680 SLAB_ATTR_RO(objects_partial);
4681 
4682 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4683 {
4684 	int objects = 0;
4685 	int pages = 0;
4686 	int cpu;
4687 	int len;
4688 
4689 	for_each_online_cpu(cpu) {
4690 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4691 
4692 		if (page) {
4693 			pages += page->pages;
4694 			objects += page->pobjects;
4695 		}
4696 	}
4697 
4698 	len = sprintf(buf, "%d(%d)", objects, pages);
4699 
4700 #ifdef CONFIG_SMP
4701 	for_each_online_cpu(cpu) {
4702 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4703 
4704 		if (page && len < PAGE_SIZE - 20)
4705 			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4706 				page->pobjects, page->pages);
4707 	}
4708 #endif
4709 	return len + sprintf(buf + len, "\n");
4710 }
4711 SLAB_ATTR_RO(slabs_cpu_partial);
4712 
4713 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4714 {
4715 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4716 }
4717 
4718 static ssize_t reclaim_account_store(struct kmem_cache *s,
4719 				const char *buf, size_t length)
4720 {
4721 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4722 	if (buf[0] == '1')
4723 		s->flags |= SLAB_RECLAIM_ACCOUNT;
4724 	return length;
4725 }
4726 SLAB_ATTR(reclaim_account);
4727 
4728 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4729 {
4730 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4731 }
4732 SLAB_ATTR_RO(hwcache_align);
4733 
4734 #ifdef CONFIG_ZONE_DMA
4735 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4736 {
4737 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4738 }
4739 SLAB_ATTR_RO(cache_dma);
4740 #endif
4741 
4742 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4743 {
4744 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4745 }
4746 SLAB_ATTR_RO(destroy_by_rcu);
4747 
4748 static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4749 {
4750 	return sprintf(buf, "%d\n", s->reserved);
4751 }
4752 SLAB_ATTR_RO(reserved);
4753 
4754 #ifdef CONFIG_SLUB_DEBUG
4755 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4756 {
4757 	return show_slab_objects(s, buf, SO_ALL);
4758 }
4759 SLAB_ATTR_RO(slabs);
4760 
4761 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4762 {
4763 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4764 }
4765 SLAB_ATTR_RO(total_objects);
4766 
4767 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4768 {
4769 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4770 }
4771 
4772 static ssize_t sanity_checks_store(struct kmem_cache *s,
4773 				const char *buf, size_t length)
4774 {
4775 	s->flags &= ~SLAB_DEBUG_FREE;
4776 	if (buf[0] == '1') {
4777 		s->flags &= ~__CMPXCHG_DOUBLE;
4778 		s->flags |= SLAB_DEBUG_FREE;
4779 	}
4780 	return length;
4781 }
4782 SLAB_ATTR(sanity_checks);
4783 
4784 static ssize_t trace_show(struct kmem_cache *s, char *buf)
4785 {
4786 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4787 }
4788 
4789 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4790 							size_t length)
4791 {
4792 	s->flags &= ~SLAB_TRACE;
4793 	if (buf[0] == '1') {
4794 		s->flags &= ~__CMPXCHG_DOUBLE;
4795 		s->flags |= SLAB_TRACE;
4796 	}
4797 	return length;
4798 }
4799 SLAB_ATTR(trace);
4800 
4801 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4802 {
4803 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4804 }
4805 
4806 static ssize_t red_zone_store(struct kmem_cache *s,
4807 				const char *buf, size_t length)
4808 {
4809 	if (any_slab_objects(s))
4810 		return -EBUSY;
4811 
4812 	s->flags &= ~SLAB_RED_ZONE;
4813 	if (buf[0] == '1') {
4814 		s->flags &= ~__CMPXCHG_DOUBLE;
4815 		s->flags |= SLAB_RED_ZONE;
4816 	}
4817 	calculate_sizes(s, -1);
4818 	return length;
4819 }
4820 SLAB_ATTR(red_zone);
4821 
4822 static ssize_t poison_show(struct kmem_cache *s, char *buf)
4823 {
4824 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4825 }
4826 
4827 static ssize_t poison_store(struct kmem_cache *s,
4828 				const char *buf, size_t length)
4829 {
4830 	if (any_slab_objects(s))
4831 		return -EBUSY;
4832 
4833 	s->flags &= ~SLAB_POISON;
4834 	if (buf[0] == '1') {
4835 		s->flags &= ~__CMPXCHG_DOUBLE;
4836 		s->flags |= SLAB_POISON;
4837 	}
4838 	calculate_sizes(s, -1);
4839 	return length;
4840 }
4841 SLAB_ATTR(poison);
4842 
4843 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4844 {
4845 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4846 }
4847 
4848 static ssize_t store_user_store(struct kmem_cache *s,
4849 				const char *buf, size_t length)
4850 {
4851 	if (any_slab_objects(s))
4852 		return -EBUSY;
4853 
4854 	s->flags &= ~SLAB_STORE_USER;
4855 	if (buf[0] == '1') {
4856 		s->flags &= ~__CMPXCHG_DOUBLE;
4857 		s->flags |= SLAB_STORE_USER;
4858 	}
4859 	calculate_sizes(s, -1);
4860 	return length;
4861 }
4862 SLAB_ATTR(store_user);
4863 
4864 static ssize_t validate_show(struct kmem_cache *s, char *buf)
4865 {
4866 	return 0;
4867 }
4868 
4869 static ssize_t validate_store(struct kmem_cache *s,
4870 			const char *buf, size_t length)
4871 {
4872 	int ret = -EINVAL;
4873 
4874 	if (buf[0] == '1') {
4875 		ret = validate_slab_cache(s);
4876 		if (ret >= 0)
4877 			ret = length;
4878 	}
4879 	return ret;
4880 }
4881 SLAB_ATTR(validate);
4882 
4883 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4884 {
4885 	if (!(s->flags & SLAB_STORE_USER))
4886 		return -ENOSYS;
4887 	return list_locations(s, buf, TRACK_ALLOC);
4888 }
4889 SLAB_ATTR_RO(alloc_calls);
4890 
4891 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4892 {
4893 	if (!(s->flags & SLAB_STORE_USER))
4894 		return -ENOSYS;
4895 	return list_locations(s, buf, TRACK_FREE);
4896 }
4897 SLAB_ATTR_RO(free_calls);
4898 #endif /* CONFIG_SLUB_DEBUG */
4899 
4900 #ifdef CONFIG_FAILSLAB
4901 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4902 {
4903 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4904 }
4905 
4906 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4907 							size_t length)
4908 {
4909 	s->flags &= ~SLAB_FAILSLAB;
4910 	if (buf[0] == '1')
4911 		s->flags |= SLAB_FAILSLAB;
4912 	return length;
4913 }
4914 SLAB_ATTR(failslab);
4915 #endif
4916 
4917 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4918 {
4919 	return 0;
4920 }
4921 
4922 static ssize_t shrink_store(struct kmem_cache *s,
4923 			const char *buf, size_t length)
4924 {
4925 	if (buf[0] == '1') {
4926 		int rc = kmem_cache_shrink(s);
4927 
4928 		if (rc)
4929 			return rc;
4930 	} else
4931 		return -EINVAL;
4932 	return length;
4933 }
4934 SLAB_ATTR(shrink);
4935 
4936 #ifdef CONFIG_NUMA
4937 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4938 {
4939 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4940 }
4941 
4942 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4943 				const char *buf, size_t length)
4944 {
4945 	unsigned long ratio;
4946 	int err;
4947 
4948 	err = strict_strtoul(buf, 10, &ratio);
4949 	if (err)
4950 		return err;
4951 
4952 	if (ratio <= 100)
4953 		s->remote_node_defrag_ratio = ratio * 10;
4954 
4955 	return length;
4956 }
4957 SLAB_ATTR(remote_node_defrag_ratio);
4958 #endif
4959 
4960 #ifdef CONFIG_SLUB_STATS
4961 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4962 {
4963 	unsigned long sum  = 0;
4964 	int cpu;
4965 	int len;
4966 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4967 
4968 	if (!data)
4969 		return -ENOMEM;
4970 
4971 	for_each_online_cpu(cpu) {
4972 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
4973 
4974 		data[cpu] = x;
4975 		sum += x;
4976 	}
4977 
4978 	len = sprintf(buf, "%lu", sum);
4979 
4980 #ifdef CONFIG_SMP
4981 	for_each_online_cpu(cpu) {
4982 		if (data[cpu] && len < PAGE_SIZE - 20)
4983 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4984 	}
4985 #endif
4986 	kfree(data);
4987 	return len + sprintf(buf + len, "\n");
4988 }
4989 
4990 static void clear_stat(struct kmem_cache *s, enum stat_item si)
4991 {
4992 	int cpu;
4993 
4994 	for_each_online_cpu(cpu)
4995 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
4996 }
4997 
4998 #define STAT_ATTR(si, text) 					\
4999 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
5000 {								\
5001 	return show_stat(s, buf, si);				\
5002 }								\
5003 static ssize_t text##_store(struct kmem_cache *s,		\
5004 				const char *buf, size_t length)	\
5005 {								\
5006 	if (buf[0] != '0')					\
5007 		return -EINVAL;					\
5008 	clear_stat(s, si);					\
5009 	return length;						\
5010 }								\
5011 SLAB_ATTR(text);						\
5012 
5013 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5014 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5015 STAT_ATTR(FREE_FASTPATH, free_fastpath);
5016 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5017 STAT_ATTR(FREE_FROZEN, free_frozen);
5018 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5019 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5020 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5021 STAT_ATTR(ALLOC_SLAB, alloc_slab);
5022 STAT_ATTR(ALLOC_REFILL, alloc_refill);
5023 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5024 STAT_ATTR(FREE_SLAB, free_slab);
5025 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5026 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5027 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5028 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5029 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5030 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5031 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5032 STAT_ATTR(ORDER_FALLBACK, order_fallback);
5033 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5034 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5035 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5036 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5037 #endif
5038 
5039 static struct attribute *slab_attrs[] = {
5040 	&slab_size_attr.attr,
5041 	&object_size_attr.attr,
5042 	&objs_per_slab_attr.attr,
5043 	&order_attr.attr,
5044 	&min_partial_attr.attr,
5045 	&cpu_partial_attr.attr,
5046 	&objects_attr.attr,
5047 	&objects_partial_attr.attr,
5048 	&partial_attr.attr,
5049 	&cpu_slabs_attr.attr,
5050 	&ctor_attr.attr,
5051 	&aliases_attr.attr,
5052 	&align_attr.attr,
5053 	&hwcache_align_attr.attr,
5054 	&reclaim_account_attr.attr,
5055 	&destroy_by_rcu_attr.attr,
5056 	&shrink_attr.attr,
5057 	&reserved_attr.attr,
5058 	&slabs_cpu_partial_attr.attr,
5059 #ifdef CONFIG_SLUB_DEBUG
5060 	&total_objects_attr.attr,
5061 	&slabs_attr.attr,
5062 	&sanity_checks_attr.attr,
5063 	&trace_attr.attr,
5064 	&red_zone_attr.attr,
5065 	&poison_attr.attr,
5066 	&store_user_attr.attr,
5067 	&validate_attr.attr,
5068 	&alloc_calls_attr.attr,
5069 	&free_calls_attr.attr,
5070 #endif
5071 #ifdef CONFIG_ZONE_DMA
5072 	&cache_dma_attr.attr,
5073 #endif
5074 #ifdef CONFIG_NUMA
5075 	&remote_node_defrag_ratio_attr.attr,
5076 #endif
5077 #ifdef CONFIG_SLUB_STATS
5078 	&alloc_fastpath_attr.attr,
5079 	&alloc_slowpath_attr.attr,
5080 	&free_fastpath_attr.attr,
5081 	&free_slowpath_attr.attr,
5082 	&free_frozen_attr.attr,
5083 	&free_add_partial_attr.attr,
5084 	&free_remove_partial_attr.attr,
5085 	&alloc_from_partial_attr.attr,
5086 	&alloc_slab_attr.attr,
5087 	&alloc_refill_attr.attr,
5088 	&alloc_node_mismatch_attr.attr,
5089 	&free_slab_attr.attr,
5090 	&cpuslab_flush_attr.attr,
5091 	&deactivate_full_attr.attr,
5092 	&deactivate_empty_attr.attr,
5093 	&deactivate_to_head_attr.attr,
5094 	&deactivate_to_tail_attr.attr,
5095 	&deactivate_remote_frees_attr.attr,
5096 	&deactivate_bypass_attr.attr,
5097 	&order_fallback_attr.attr,
5098 	&cmpxchg_double_fail_attr.attr,
5099 	&cmpxchg_double_cpu_fail_attr.attr,
5100 	&cpu_partial_alloc_attr.attr,
5101 	&cpu_partial_free_attr.attr,
5102 #endif
5103 #ifdef CONFIG_FAILSLAB
5104 	&failslab_attr.attr,
5105 #endif
5106 
5107 	NULL
5108 };
5109 
5110 static struct attribute_group slab_attr_group = {
5111 	.attrs = slab_attrs,
5112 };
5113 
5114 static ssize_t slab_attr_show(struct kobject *kobj,
5115 				struct attribute *attr,
5116 				char *buf)
5117 {
5118 	struct slab_attribute *attribute;
5119 	struct kmem_cache *s;
5120 	int err;
5121 
5122 	attribute = to_slab_attr(attr);
5123 	s = to_slab(kobj);
5124 
5125 	if (!attribute->show)
5126 		return -EIO;
5127 
5128 	err = attribute->show(s, buf);
5129 
5130 	return err;
5131 }
5132 
5133 static ssize_t slab_attr_store(struct kobject *kobj,
5134 				struct attribute *attr,
5135 				const char *buf, size_t len)
5136 {
5137 	struct slab_attribute *attribute;
5138 	struct kmem_cache *s;
5139 	int err;
5140 
5141 	attribute = to_slab_attr(attr);
5142 	s = to_slab(kobj);
5143 
5144 	if (!attribute->store)
5145 		return -EIO;
5146 
5147 	err = attribute->store(s, buf, len);
5148 
5149 	return err;
5150 }
5151 
5152 static void kmem_cache_release(struct kobject *kobj)
5153 {
5154 	struct kmem_cache *s = to_slab(kobj);
5155 
5156 	kfree(s->name);
5157 	kfree(s);
5158 }
5159 
5160 static const struct sysfs_ops slab_sysfs_ops = {
5161 	.show = slab_attr_show,
5162 	.store = slab_attr_store,
5163 };
5164 
5165 static struct kobj_type slab_ktype = {
5166 	.sysfs_ops = &slab_sysfs_ops,
5167 	.release = kmem_cache_release
5168 };
5169 
5170 static int uevent_filter(struct kset *kset, struct kobject *kobj)
5171 {
5172 	struct kobj_type *ktype = get_ktype(kobj);
5173 
5174 	if (ktype == &slab_ktype)
5175 		return 1;
5176 	return 0;
5177 }
5178 
5179 static const struct kset_uevent_ops slab_uevent_ops = {
5180 	.filter = uevent_filter,
5181 };
5182 
5183 static struct kset *slab_kset;
5184 
5185 #define ID_STR_LENGTH 64
5186 
5187 /* Create a unique string id for a slab cache:
5188  *
5189  * Format	:[flags-]size
5190  */
5191 static char *create_unique_id(struct kmem_cache *s)
5192 {
5193 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5194 	char *p = name;
5195 
5196 	BUG_ON(!name);
5197 
5198 	*p++ = ':';
5199 	/*
5200 	 * First flags affecting slabcache operations. We will only
5201 	 * get here for aliasable slabs so we do not need to support
5202 	 * too many flags. The flags here must cover all flags that
5203 	 * are matched during merging to guarantee that the id is
5204 	 * unique.
5205 	 */
5206 	if (s->flags & SLAB_CACHE_DMA)
5207 		*p++ = 'd';
5208 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5209 		*p++ = 'a';
5210 	if (s->flags & SLAB_DEBUG_FREE)
5211 		*p++ = 'F';
5212 	if (!(s->flags & SLAB_NOTRACK))
5213 		*p++ = 't';
5214 	if (p != name + 1)
5215 		*p++ = '-';
5216 	p += sprintf(p, "%07d", s->size);
5217 	BUG_ON(p > name + ID_STR_LENGTH - 1);
5218 	return name;
5219 }
5220 
5221 static int sysfs_slab_add(struct kmem_cache *s)
5222 {
5223 	int err;
5224 	const char *name;
5225 	int unmergeable;
5226 
5227 	if (slab_state < SYSFS)
5228 		/* Defer until later */
5229 		return 0;
5230 
5231 	unmergeable = slab_unmergeable(s);
5232 	if (unmergeable) {
5233 		/*
5234 		 * Slabcache can never be merged so we can use the name proper.
5235 		 * This is typically the case for debug situations. In that
5236 		 * case we can catch duplicate names easily.
5237 		 */
5238 		sysfs_remove_link(&slab_kset->kobj, s->name);
5239 		name = s->name;
5240 	} else {
5241 		/*
5242 		 * Create a unique name for the slab as a target
5243 		 * for the symlinks.
5244 		 */
5245 		name = create_unique_id(s);
5246 	}
5247 
5248 	s->kobj.kset = slab_kset;
5249 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
5250 	if (err) {
5251 		kobject_put(&s->kobj);
5252 		return err;
5253 	}
5254 
5255 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5256 	if (err) {
5257 		kobject_del(&s->kobj);
5258 		kobject_put(&s->kobj);
5259 		return err;
5260 	}
5261 	kobject_uevent(&s->kobj, KOBJ_ADD);
5262 	if (!unmergeable) {
5263 		/* Setup first alias */
5264 		sysfs_slab_alias(s, s->name);
5265 		kfree(name);
5266 	}
5267 	return 0;
5268 }
5269 
5270 static void sysfs_slab_remove(struct kmem_cache *s)
5271 {
5272 	if (slab_state < SYSFS)
5273 		/*
5274 		 * Sysfs has not been setup yet so no need to remove the
5275 		 * cache from sysfs.
5276 		 */
5277 		return;
5278 
5279 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
5280 	kobject_del(&s->kobj);
5281 	kobject_put(&s->kobj);
5282 }
5283 
5284 /*
5285  * Need to buffer aliases during bootup until sysfs becomes
5286  * available lest we lose that information.
5287  */
5288 struct saved_alias {
5289 	struct kmem_cache *s;
5290 	const char *name;
5291 	struct saved_alias *next;
5292 };
5293 
5294 static struct saved_alias *alias_list;
5295 
5296 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5297 {
5298 	struct saved_alias *al;
5299 
5300 	if (slab_state == SYSFS) {
5301 		/*
5302 		 * If we have a leftover link then remove it.
5303 		 */
5304 		sysfs_remove_link(&slab_kset->kobj, name);
5305 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5306 	}
5307 
5308 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5309 	if (!al)
5310 		return -ENOMEM;
5311 
5312 	al->s = s;
5313 	al->name = name;
5314 	al->next = alias_list;
5315 	alias_list = al;
5316 	return 0;
5317 }
5318 
5319 static int __init slab_sysfs_init(void)
5320 {
5321 	struct kmem_cache *s;
5322 	int err;
5323 
5324 	down_write(&slub_lock);
5325 
5326 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5327 	if (!slab_kset) {
5328 		up_write(&slub_lock);
5329 		printk(KERN_ERR "Cannot register slab subsystem.\n");
5330 		return -ENOSYS;
5331 	}
5332 
5333 	slab_state = SYSFS;
5334 
5335 	list_for_each_entry(s, &slab_caches, list) {
5336 		err = sysfs_slab_add(s);
5337 		if (err)
5338 			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
5339 						" to sysfs\n", s->name);
5340 	}
5341 
5342 	while (alias_list) {
5343 		struct saved_alias *al = alias_list;
5344 
5345 		alias_list = alias_list->next;
5346 		err = sysfs_slab_alias(al->s, al->name);
5347 		if (err)
5348 			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
5349 					" %s to sysfs\n", s->name);
5350 		kfree(al);
5351 	}
5352 
5353 	up_write(&slub_lock);
5354 	resiliency_test();
5355 	return 0;
5356 }
5357 
5358 __initcall(slab_sysfs_init);
5359 #endif /* CONFIG_SYSFS */
5360 
5361 /*
5362  * The /proc/slabinfo ABI
5363  */
5364 #ifdef CONFIG_SLABINFO
5365 static void print_slabinfo_header(struct seq_file *m)
5366 {
5367 	seq_puts(m, "slabinfo - version: 2.1\n");
5368 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
5369 		 "<objperslab> <pagesperslab>");
5370 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
5371 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
5372 	seq_putc(m, '\n');
5373 }
5374 
5375 static void *s_start(struct seq_file *m, loff_t *pos)
5376 {
5377 	loff_t n = *pos;
5378 
5379 	down_read(&slub_lock);
5380 	if (!n)
5381 		print_slabinfo_header(m);
5382 
5383 	return seq_list_start(&slab_caches, *pos);
5384 }
5385 
5386 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
5387 {
5388 	return seq_list_next(p, &slab_caches, pos);
5389 }
5390 
5391 static void s_stop(struct seq_file *m, void *p)
5392 {
5393 	up_read(&slub_lock);
5394 }
5395 
5396 static int s_show(struct seq_file *m, void *p)
5397 {
5398 	unsigned long nr_partials = 0;
5399 	unsigned long nr_slabs = 0;
5400 	unsigned long nr_inuse = 0;
5401 	unsigned long nr_objs = 0;
5402 	unsigned long nr_free = 0;
5403 	struct kmem_cache *s;
5404 	int node;
5405 
5406 	s = list_entry(p, struct kmem_cache, list);
5407 
5408 	for_each_online_node(node) {
5409 		struct kmem_cache_node *n = get_node(s, node);
5410 
5411 		if (!n)
5412 			continue;
5413 
5414 		nr_partials += n->nr_partial;
5415 		nr_slabs += atomic_long_read(&n->nr_slabs);
5416 		nr_objs += atomic_long_read(&n->total_objects);
5417 		nr_free += count_partial(n, count_free);
5418 	}
5419 
5420 	nr_inuse = nr_objs - nr_free;
5421 
5422 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
5423 		   nr_objs, s->size, oo_objects(s->oo),
5424 		   (1 << oo_order(s->oo)));
5425 	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
5426 	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
5427 		   0UL);
5428 	seq_putc(m, '\n');
5429 	return 0;
5430 }
5431 
5432 static const struct seq_operations slabinfo_op = {
5433 	.start = s_start,
5434 	.next = s_next,
5435 	.stop = s_stop,
5436 	.show = s_show,
5437 };
5438 
5439 static int slabinfo_open(struct inode *inode, struct file *file)
5440 {
5441 	return seq_open(file, &slabinfo_op);
5442 }
5443 
5444 static const struct file_operations proc_slabinfo_operations = {
5445 	.open		= slabinfo_open,
5446 	.read		= seq_read,
5447 	.llseek		= seq_lseek,
5448 	.release	= seq_release,
5449 };
5450 
5451 static int __init slab_proc_init(void)
5452 {
5453 	proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
5454 	return 0;
5455 }
5456 module_init(slab_proc_init);
5457 #endif /* CONFIG_SLABINFO */
5458