xref: /linux/mm/slub.c (revision 5d4a2e29fba5b2bef95b96a46b338ec4d76fa4fd)
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks and only
6  * uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/swap.h> /* struct reclaim_state */
13 #include <linux/module.h>
14 #include <linux/bit_spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/bitops.h>
17 #include <linux/slab.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/kmemtrace.h>
21 #include <linux/kmemcheck.h>
22 #include <linux/cpu.h>
23 #include <linux/cpuset.h>
24 #include <linux/mempolicy.h>
25 #include <linux/ctype.h>
26 #include <linux/debugobjects.h>
27 #include <linux/kallsyms.h>
28 #include <linux/memory.h>
29 #include <linux/math64.h>
30 #include <linux/fault-inject.h>
31 
32 /*
33  * Lock order:
34  *   1. slab_lock(page)
35  *   2. slab->list_lock
36  *
37  *   The slab_lock protects operations on the object of a particular
38  *   slab and its metadata in the page struct. If the slab lock
39  *   has been taken then no allocations nor frees can be performed
40  *   on the objects in the slab nor can the slab be added or removed
41  *   from the partial or full lists since this would mean modifying
42  *   the page_struct of the slab.
43  *
44  *   The list_lock protects the partial and full list on each node and
45  *   the partial slab counter. If taken then no new slabs may be added or
46  *   removed from the lists nor make the number of partial slabs be modified.
47  *   (Note that the total number of slabs is an atomic value that may be
48  *   modified without taking the list lock).
49  *
50  *   The list_lock is a centralized lock and thus we avoid taking it as
51  *   much as possible. As long as SLUB does not have to handle partial
52  *   slabs, operations can continue without any centralized lock. F.e.
53  *   allocating a long series of objects that fill up slabs does not require
54  *   the list lock.
55  *
56  *   The lock order is sometimes inverted when we are trying to get a slab
57  *   off a list. We take the list_lock and then look for a page on the list
58  *   to use. While we do that objects in the slabs may be freed. We can
59  *   only operate on the slab if we have also taken the slab_lock. So we use
60  *   a slab_trylock() on the slab. If trylock was successful then no frees
61  *   can occur anymore and we can use the slab for allocations etc. If the
62  *   slab_trylock() does not succeed then frees are in progress in the slab and
63  *   we must stay away from it for a while since we may cause a bouncing
64  *   cacheline if we try to acquire the lock. So go onto the next slab.
65  *   If all pages are busy then we may allocate a new slab instead of reusing
66  *   a partial slab. A new slab has noone operating on it and thus there is
67  *   no danger of cacheline contention.
68  *
69  *   Interrupts are disabled during allocation and deallocation in order to
70  *   make the slab allocator safe to use in the context of an irq. In addition
71  *   interrupts are disabled to ensure that the processor does not change
72  *   while handling per_cpu slabs, due to kernel preemption.
73  *
74  * SLUB assigns one slab for allocation to each processor.
75  * Allocations only occur from these slabs called cpu slabs.
76  *
77  * Slabs with free elements are kept on a partial list and during regular
78  * operations no list for full slabs is used. If an object in a full slab is
79  * freed then the slab will show up again on the partial lists.
80  * We track full slabs for debugging purposes though because otherwise we
81  * cannot scan all objects.
82  *
83  * Slabs are freed when they become empty. Teardown and setup is
84  * minimal so we rely on the page allocators per cpu caches for
85  * fast frees and allocs.
86  *
87  * Overloading of page flags that are otherwise used for LRU management.
88  *
89  * PageActive 		The slab is frozen and exempt from list processing.
90  * 			This means that the slab is dedicated to a purpose
91  * 			such as satisfying allocations for a specific
92  * 			processor. Objects may be freed in the slab while
93  * 			it is frozen but slab_free will then skip the usual
94  * 			list operations. It is up to the processor holding
95  * 			the slab to integrate the slab into the slab lists
96  * 			when the slab is no longer needed.
97  *
98  * 			One use of this flag is to mark slabs that are
99  * 			used for allocations. Then such a slab becomes a cpu
100  * 			slab. The cpu slab may be equipped with an additional
101  * 			freelist that allows lockless access to
102  * 			free objects in addition to the regular freelist
103  * 			that requires the slab lock.
104  *
105  * PageError		Slab requires special handling due to debug
106  * 			options set. This moves	slab handling out of
107  * 			the fast path and disables lockless freelists.
108  */
109 
110 #ifdef CONFIG_SLUB_DEBUG
111 #define SLABDEBUG 1
112 #else
113 #define SLABDEBUG 0
114 #endif
115 
116 /*
117  * Issues still to be resolved:
118  *
119  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
120  *
121  * - Variable sizing of the per node arrays
122  */
123 
124 /* Enable to test recovery from slab corruption on boot */
125 #undef SLUB_RESILIENCY_TEST
126 
127 /*
128  * Mininum number of partial slabs. These will be left on the partial
129  * lists even if they are empty. kmem_cache_shrink may reclaim them.
130  */
131 #define MIN_PARTIAL 5
132 
133 /*
134  * Maximum number of desirable partial slabs.
135  * The existence of more partial slabs makes kmem_cache_shrink
136  * sort the partial list by the number of objects in the.
137  */
138 #define MAX_PARTIAL 10
139 
140 #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
141 				SLAB_POISON | SLAB_STORE_USER)
142 
143 /*
144  * Debugging flags that require metadata to be stored in the slab.  These get
145  * disabled when slub_debug=O is used and a cache's min order increases with
146  * metadata.
147  */
148 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
149 
150 /*
151  * Set of flags that will prevent slab merging
152  */
153 #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
154 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
155 		SLAB_FAILSLAB)
156 
157 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
158 		SLAB_CACHE_DMA | SLAB_NOTRACK)
159 
160 #define OO_SHIFT	16
161 #define OO_MASK		((1 << OO_SHIFT) - 1)
162 #define MAX_OBJS_PER_PAGE	65535 /* since page.objects is u16 */
163 
164 /* Internal SLUB flags */
165 #define __OBJECT_POISON		0x80000000 /* Poison object */
166 #define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
167 
168 static int kmem_size = sizeof(struct kmem_cache);
169 
170 #ifdef CONFIG_SMP
171 static struct notifier_block slab_notifier;
172 #endif
173 
174 static enum {
175 	DOWN,		/* No slab functionality available */
176 	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
177 	UP,		/* Everything works but does not show up in sysfs */
178 	SYSFS		/* Sysfs up */
179 } slab_state = DOWN;
180 
181 /* A list of all slab caches on the system */
182 static DECLARE_RWSEM(slub_lock);
183 static LIST_HEAD(slab_caches);
184 
185 /*
186  * Tracking user of a slab.
187  */
188 struct track {
189 	unsigned long addr;	/* Called from address */
190 	int cpu;		/* Was running on cpu */
191 	int pid;		/* Pid context */
192 	unsigned long when;	/* When did the operation occur */
193 };
194 
195 enum track_item { TRACK_ALLOC, TRACK_FREE };
196 
197 #ifdef CONFIG_SLUB_DEBUG
198 static int sysfs_slab_add(struct kmem_cache *);
199 static int sysfs_slab_alias(struct kmem_cache *, const char *);
200 static void sysfs_slab_remove(struct kmem_cache *);
201 
202 #else
203 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
204 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
205 							{ return 0; }
206 static inline void sysfs_slab_remove(struct kmem_cache *s)
207 {
208 	kfree(s);
209 }
210 
211 #endif
212 
213 static inline void stat(struct kmem_cache *s, enum stat_item si)
214 {
215 #ifdef CONFIG_SLUB_STATS
216 	__this_cpu_inc(s->cpu_slab->stat[si]);
217 #endif
218 }
219 
220 /********************************************************************
221  * 			Core slab cache functions
222  *******************************************************************/
223 
224 int slab_is_available(void)
225 {
226 	return slab_state >= UP;
227 }
228 
229 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
230 {
231 #ifdef CONFIG_NUMA
232 	return s->node[node];
233 #else
234 	return &s->local_node;
235 #endif
236 }
237 
238 /* Verify that a pointer has an address that is valid within a slab page */
239 static inline int check_valid_pointer(struct kmem_cache *s,
240 				struct page *page, const void *object)
241 {
242 	void *base;
243 
244 	if (!object)
245 		return 1;
246 
247 	base = page_address(page);
248 	if (object < base || object >= base + page->objects * s->size ||
249 		(object - base) % s->size) {
250 		return 0;
251 	}
252 
253 	return 1;
254 }
255 
256 static inline void *get_freepointer(struct kmem_cache *s, void *object)
257 {
258 	return *(void **)(object + s->offset);
259 }
260 
261 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
262 {
263 	*(void **)(object + s->offset) = fp;
264 }
265 
266 /* Loop over all objects in a slab */
267 #define for_each_object(__p, __s, __addr, __objects) \
268 	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
269 			__p += (__s)->size)
270 
271 /* Scan freelist */
272 #define for_each_free_object(__p, __s, __free) \
273 	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
274 
275 /* Determine object index from a given position */
276 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
277 {
278 	return (p - addr) / s->size;
279 }
280 
281 static inline struct kmem_cache_order_objects oo_make(int order,
282 						unsigned long size)
283 {
284 	struct kmem_cache_order_objects x = {
285 		(order << OO_SHIFT) + (PAGE_SIZE << order) / size
286 	};
287 
288 	return x;
289 }
290 
291 static inline int oo_order(struct kmem_cache_order_objects x)
292 {
293 	return x.x >> OO_SHIFT;
294 }
295 
296 static inline int oo_objects(struct kmem_cache_order_objects x)
297 {
298 	return x.x & OO_MASK;
299 }
300 
301 #ifdef CONFIG_SLUB_DEBUG
302 /*
303  * Debug settings:
304  */
305 #ifdef CONFIG_SLUB_DEBUG_ON
306 static int slub_debug = DEBUG_DEFAULT_FLAGS;
307 #else
308 static int slub_debug;
309 #endif
310 
311 static char *slub_debug_slabs;
312 static int disable_higher_order_debug;
313 
314 /*
315  * Object debugging
316  */
317 static void print_section(char *text, u8 *addr, unsigned int length)
318 {
319 	int i, offset;
320 	int newline = 1;
321 	char ascii[17];
322 
323 	ascii[16] = 0;
324 
325 	for (i = 0; i < length; i++) {
326 		if (newline) {
327 			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
328 			newline = 0;
329 		}
330 		printk(KERN_CONT " %02x", addr[i]);
331 		offset = i % 16;
332 		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
333 		if (offset == 15) {
334 			printk(KERN_CONT " %s\n", ascii);
335 			newline = 1;
336 		}
337 	}
338 	if (!newline) {
339 		i %= 16;
340 		while (i < 16) {
341 			printk(KERN_CONT "   ");
342 			ascii[i] = ' ';
343 			i++;
344 		}
345 		printk(KERN_CONT " %s\n", ascii);
346 	}
347 }
348 
349 static struct track *get_track(struct kmem_cache *s, void *object,
350 	enum track_item alloc)
351 {
352 	struct track *p;
353 
354 	if (s->offset)
355 		p = object + s->offset + sizeof(void *);
356 	else
357 		p = object + s->inuse;
358 
359 	return p + alloc;
360 }
361 
362 static void set_track(struct kmem_cache *s, void *object,
363 			enum track_item alloc, unsigned long addr)
364 {
365 	struct track *p = get_track(s, object, alloc);
366 
367 	if (addr) {
368 		p->addr = addr;
369 		p->cpu = smp_processor_id();
370 		p->pid = current->pid;
371 		p->when = jiffies;
372 	} else
373 		memset(p, 0, sizeof(struct track));
374 }
375 
376 static void init_tracking(struct kmem_cache *s, void *object)
377 {
378 	if (!(s->flags & SLAB_STORE_USER))
379 		return;
380 
381 	set_track(s, object, TRACK_FREE, 0UL);
382 	set_track(s, object, TRACK_ALLOC, 0UL);
383 }
384 
385 static void print_track(const char *s, struct track *t)
386 {
387 	if (!t->addr)
388 		return;
389 
390 	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
391 		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
392 }
393 
394 static void print_tracking(struct kmem_cache *s, void *object)
395 {
396 	if (!(s->flags & SLAB_STORE_USER))
397 		return;
398 
399 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
400 	print_track("Freed", get_track(s, object, TRACK_FREE));
401 }
402 
403 static void print_page_info(struct page *page)
404 {
405 	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
406 		page, page->objects, page->inuse, page->freelist, page->flags);
407 
408 }
409 
410 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
411 {
412 	va_list args;
413 	char buf[100];
414 
415 	va_start(args, fmt);
416 	vsnprintf(buf, sizeof(buf), fmt, args);
417 	va_end(args);
418 	printk(KERN_ERR "========================================"
419 			"=====================================\n");
420 	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
421 	printk(KERN_ERR "----------------------------------------"
422 			"-------------------------------------\n\n");
423 }
424 
425 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
426 {
427 	va_list args;
428 	char buf[100];
429 
430 	va_start(args, fmt);
431 	vsnprintf(buf, sizeof(buf), fmt, args);
432 	va_end(args);
433 	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
434 }
435 
436 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
437 {
438 	unsigned int off;	/* Offset of last byte */
439 	u8 *addr = page_address(page);
440 
441 	print_tracking(s, p);
442 
443 	print_page_info(page);
444 
445 	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
446 			p, p - addr, get_freepointer(s, p));
447 
448 	if (p > addr + 16)
449 		print_section("Bytes b4", p - 16, 16);
450 
451 	print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
452 
453 	if (s->flags & SLAB_RED_ZONE)
454 		print_section("Redzone", p + s->objsize,
455 			s->inuse - s->objsize);
456 
457 	if (s->offset)
458 		off = s->offset + sizeof(void *);
459 	else
460 		off = s->inuse;
461 
462 	if (s->flags & SLAB_STORE_USER)
463 		off += 2 * sizeof(struct track);
464 
465 	if (off != s->size)
466 		/* Beginning of the filler is the free pointer */
467 		print_section("Padding", p + off, s->size - off);
468 
469 	dump_stack();
470 }
471 
472 static void object_err(struct kmem_cache *s, struct page *page,
473 			u8 *object, char *reason)
474 {
475 	slab_bug(s, "%s", reason);
476 	print_trailer(s, page, object);
477 }
478 
479 static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
480 {
481 	va_list args;
482 	char buf[100];
483 
484 	va_start(args, fmt);
485 	vsnprintf(buf, sizeof(buf), fmt, args);
486 	va_end(args);
487 	slab_bug(s, "%s", buf);
488 	print_page_info(page);
489 	dump_stack();
490 }
491 
492 static void init_object(struct kmem_cache *s, void *object, int active)
493 {
494 	u8 *p = object;
495 
496 	if (s->flags & __OBJECT_POISON) {
497 		memset(p, POISON_FREE, s->objsize - 1);
498 		p[s->objsize - 1] = POISON_END;
499 	}
500 
501 	if (s->flags & SLAB_RED_ZONE)
502 		memset(p + s->objsize,
503 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
504 			s->inuse - s->objsize);
505 }
506 
507 static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
508 {
509 	while (bytes) {
510 		if (*start != (u8)value)
511 			return start;
512 		start++;
513 		bytes--;
514 	}
515 	return NULL;
516 }
517 
518 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
519 						void *from, void *to)
520 {
521 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
522 	memset(from, data, to - from);
523 }
524 
525 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
526 			u8 *object, char *what,
527 			u8 *start, unsigned int value, unsigned int bytes)
528 {
529 	u8 *fault;
530 	u8 *end;
531 
532 	fault = check_bytes(start, value, bytes);
533 	if (!fault)
534 		return 1;
535 
536 	end = start + bytes;
537 	while (end > fault && end[-1] == value)
538 		end--;
539 
540 	slab_bug(s, "%s overwritten", what);
541 	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
542 					fault, end - 1, fault[0], value);
543 	print_trailer(s, page, object);
544 
545 	restore_bytes(s, what, value, fault, end);
546 	return 0;
547 }
548 
549 /*
550  * Object layout:
551  *
552  * object address
553  * 	Bytes of the object to be managed.
554  * 	If the freepointer may overlay the object then the free
555  * 	pointer is the first word of the object.
556  *
557  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
558  * 	0xa5 (POISON_END)
559  *
560  * object + s->objsize
561  * 	Padding to reach word boundary. This is also used for Redzoning.
562  * 	Padding is extended by another word if Redzoning is enabled and
563  * 	objsize == inuse.
564  *
565  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
566  * 	0xcc (RED_ACTIVE) for objects in use.
567  *
568  * object + s->inuse
569  * 	Meta data starts here.
570  *
571  * 	A. Free pointer (if we cannot overwrite object on free)
572  * 	B. Tracking data for SLAB_STORE_USER
573  * 	C. Padding to reach required alignment boundary or at mininum
574  * 		one word if debugging is on to be able to detect writes
575  * 		before the word boundary.
576  *
577  *	Padding is done using 0x5a (POISON_INUSE)
578  *
579  * object + s->size
580  * 	Nothing is used beyond s->size.
581  *
582  * If slabcaches are merged then the objsize and inuse boundaries are mostly
583  * ignored. And therefore no slab options that rely on these boundaries
584  * may be used with merged slabcaches.
585  */
586 
587 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
588 {
589 	unsigned long off = s->inuse;	/* The end of info */
590 
591 	if (s->offset)
592 		/* Freepointer is placed after the object. */
593 		off += sizeof(void *);
594 
595 	if (s->flags & SLAB_STORE_USER)
596 		/* We also have user information there */
597 		off += 2 * sizeof(struct track);
598 
599 	if (s->size == off)
600 		return 1;
601 
602 	return check_bytes_and_report(s, page, p, "Object padding",
603 				p + off, POISON_INUSE, s->size - off);
604 }
605 
606 /* Check the pad bytes at the end of a slab page */
607 static int slab_pad_check(struct kmem_cache *s, struct page *page)
608 {
609 	u8 *start;
610 	u8 *fault;
611 	u8 *end;
612 	int length;
613 	int remainder;
614 
615 	if (!(s->flags & SLAB_POISON))
616 		return 1;
617 
618 	start = page_address(page);
619 	length = (PAGE_SIZE << compound_order(page));
620 	end = start + length;
621 	remainder = length % s->size;
622 	if (!remainder)
623 		return 1;
624 
625 	fault = check_bytes(end - remainder, POISON_INUSE, remainder);
626 	if (!fault)
627 		return 1;
628 	while (end > fault && end[-1] == POISON_INUSE)
629 		end--;
630 
631 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
632 	print_section("Padding", end - remainder, remainder);
633 
634 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
635 	return 0;
636 }
637 
638 static int check_object(struct kmem_cache *s, struct page *page,
639 					void *object, int active)
640 {
641 	u8 *p = object;
642 	u8 *endobject = object + s->objsize;
643 
644 	if (s->flags & SLAB_RED_ZONE) {
645 		unsigned int red =
646 			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
647 
648 		if (!check_bytes_and_report(s, page, object, "Redzone",
649 			endobject, red, s->inuse - s->objsize))
650 			return 0;
651 	} else {
652 		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
653 			check_bytes_and_report(s, page, p, "Alignment padding",
654 				endobject, POISON_INUSE, s->inuse - s->objsize);
655 		}
656 	}
657 
658 	if (s->flags & SLAB_POISON) {
659 		if (!active && (s->flags & __OBJECT_POISON) &&
660 			(!check_bytes_and_report(s, page, p, "Poison", p,
661 					POISON_FREE, s->objsize - 1) ||
662 			 !check_bytes_and_report(s, page, p, "Poison",
663 				p + s->objsize - 1, POISON_END, 1)))
664 			return 0;
665 		/*
666 		 * check_pad_bytes cleans up on its own.
667 		 */
668 		check_pad_bytes(s, page, p);
669 	}
670 
671 	if (!s->offset && active)
672 		/*
673 		 * Object and freepointer overlap. Cannot check
674 		 * freepointer while object is allocated.
675 		 */
676 		return 1;
677 
678 	/* Check free pointer validity */
679 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
680 		object_err(s, page, p, "Freepointer corrupt");
681 		/*
682 		 * No choice but to zap it and thus lose the remainder
683 		 * of the free objects in this slab. May cause
684 		 * another error because the object count is now wrong.
685 		 */
686 		set_freepointer(s, p, NULL);
687 		return 0;
688 	}
689 	return 1;
690 }
691 
692 static int check_slab(struct kmem_cache *s, struct page *page)
693 {
694 	int maxobj;
695 
696 	VM_BUG_ON(!irqs_disabled());
697 
698 	if (!PageSlab(page)) {
699 		slab_err(s, page, "Not a valid slab page");
700 		return 0;
701 	}
702 
703 	maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
704 	if (page->objects > maxobj) {
705 		slab_err(s, page, "objects %u > max %u",
706 			s->name, page->objects, maxobj);
707 		return 0;
708 	}
709 	if (page->inuse > page->objects) {
710 		slab_err(s, page, "inuse %u > max %u",
711 			s->name, page->inuse, page->objects);
712 		return 0;
713 	}
714 	/* Slab_pad_check fixes things up after itself */
715 	slab_pad_check(s, page);
716 	return 1;
717 }
718 
719 /*
720  * Determine if a certain object on a page is on the freelist. Must hold the
721  * slab lock to guarantee that the chains are in a consistent state.
722  */
723 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
724 {
725 	int nr = 0;
726 	void *fp = page->freelist;
727 	void *object = NULL;
728 	unsigned long max_objects;
729 
730 	while (fp && nr <= page->objects) {
731 		if (fp == search)
732 			return 1;
733 		if (!check_valid_pointer(s, page, fp)) {
734 			if (object) {
735 				object_err(s, page, object,
736 					"Freechain corrupt");
737 				set_freepointer(s, object, NULL);
738 				break;
739 			} else {
740 				slab_err(s, page, "Freepointer corrupt");
741 				page->freelist = NULL;
742 				page->inuse = page->objects;
743 				slab_fix(s, "Freelist cleared");
744 				return 0;
745 			}
746 			break;
747 		}
748 		object = fp;
749 		fp = get_freepointer(s, object);
750 		nr++;
751 	}
752 
753 	max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
754 	if (max_objects > MAX_OBJS_PER_PAGE)
755 		max_objects = MAX_OBJS_PER_PAGE;
756 
757 	if (page->objects != max_objects) {
758 		slab_err(s, page, "Wrong number of objects. Found %d but "
759 			"should be %d", page->objects, max_objects);
760 		page->objects = max_objects;
761 		slab_fix(s, "Number of objects adjusted.");
762 	}
763 	if (page->inuse != page->objects - nr) {
764 		slab_err(s, page, "Wrong object count. Counter is %d but "
765 			"counted were %d", page->inuse, page->objects - nr);
766 		page->inuse = page->objects - nr;
767 		slab_fix(s, "Object count adjusted.");
768 	}
769 	return search == NULL;
770 }
771 
772 static void trace(struct kmem_cache *s, struct page *page, void *object,
773 								int alloc)
774 {
775 	if (s->flags & SLAB_TRACE) {
776 		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
777 			s->name,
778 			alloc ? "alloc" : "free",
779 			object, page->inuse,
780 			page->freelist);
781 
782 		if (!alloc)
783 			print_section("Object", (void *)object, s->objsize);
784 
785 		dump_stack();
786 	}
787 }
788 
789 /*
790  * Tracking of fully allocated slabs for debugging purposes.
791  */
792 static void add_full(struct kmem_cache_node *n, struct page *page)
793 {
794 	spin_lock(&n->list_lock);
795 	list_add(&page->lru, &n->full);
796 	spin_unlock(&n->list_lock);
797 }
798 
799 static void remove_full(struct kmem_cache *s, struct page *page)
800 {
801 	struct kmem_cache_node *n;
802 
803 	if (!(s->flags & SLAB_STORE_USER))
804 		return;
805 
806 	n = get_node(s, page_to_nid(page));
807 
808 	spin_lock(&n->list_lock);
809 	list_del(&page->lru);
810 	spin_unlock(&n->list_lock);
811 }
812 
813 /* Tracking of the number of slabs for debugging purposes */
814 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
815 {
816 	struct kmem_cache_node *n = get_node(s, node);
817 
818 	return atomic_long_read(&n->nr_slabs);
819 }
820 
821 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
822 {
823 	return atomic_long_read(&n->nr_slabs);
824 }
825 
826 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
827 {
828 	struct kmem_cache_node *n = get_node(s, node);
829 
830 	/*
831 	 * May be called early in order to allocate a slab for the
832 	 * kmem_cache_node structure. Solve the chicken-egg
833 	 * dilemma by deferring the increment of the count during
834 	 * bootstrap (see early_kmem_cache_node_alloc).
835 	 */
836 	if (!NUMA_BUILD || n) {
837 		atomic_long_inc(&n->nr_slabs);
838 		atomic_long_add(objects, &n->total_objects);
839 	}
840 }
841 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
842 {
843 	struct kmem_cache_node *n = get_node(s, node);
844 
845 	atomic_long_dec(&n->nr_slabs);
846 	atomic_long_sub(objects, &n->total_objects);
847 }
848 
849 /* Object debug checks for alloc/free paths */
850 static void setup_object_debug(struct kmem_cache *s, struct page *page,
851 								void *object)
852 {
853 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
854 		return;
855 
856 	init_object(s, object, 0);
857 	init_tracking(s, object);
858 }
859 
860 static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
861 					void *object, unsigned long addr)
862 {
863 	if (!check_slab(s, page))
864 		goto bad;
865 
866 	if (!on_freelist(s, page, object)) {
867 		object_err(s, page, object, "Object already allocated");
868 		goto bad;
869 	}
870 
871 	if (!check_valid_pointer(s, page, object)) {
872 		object_err(s, page, object, "Freelist Pointer check fails");
873 		goto bad;
874 	}
875 
876 	if (!check_object(s, page, object, 0))
877 		goto bad;
878 
879 	/* Success perform special debug activities for allocs */
880 	if (s->flags & SLAB_STORE_USER)
881 		set_track(s, object, TRACK_ALLOC, addr);
882 	trace(s, page, object, 1);
883 	init_object(s, object, 1);
884 	return 1;
885 
886 bad:
887 	if (PageSlab(page)) {
888 		/*
889 		 * If this is a slab page then lets do the best we can
890 		 * to avoid issues in the future. Marking all objects
891 		 * as used avoids touching the remaining objects.
892 		 */
893 		slab_fix(s, "Marking all objects used");
894 		page->inuse = page->objects;
895 		page->freelist = NULL;
896 	}
897 	return 0;
898 }
899 
900 static int free_debug_processing(struct kmem_cache *s, struct page *page,
901 					void *object, unsigned long addr)
902 {
903 	if (!check_slab(s, page))
904 		goto fail;
905 
906 	if (!check_valid_pointer(s, page, object)) {
907 		slab_err(s, page, "Invalid object pointer 0x%p", object);
908 		goto fail;
909 	}
910 
911 	if (on_freelist(s, page, object)) {
912 		object_err(s, page, object, "Object already free");
913 		goto fail;
914 	}
915 
916 	if (!check_object(s, page, object, 1))
917 		return 0;
918 
919 	if (unlikely(s != page->slab)) {
920 		if (!PageSlab(page)) {
921 			slab_err(s, page, "Attempt to free object(0x%p) "
922 				"outside of slab", object);
923 		} else if (!page->slab) {
924 			printk(KERN_ERR
925 				"SLUB <none>: no slab for object 0x%p.\n",
926 						object);
927 			dump_stack();
928 		} else
929 			object_err(s, page, object,
930 					"page slab pointer corrupt.");
931 		goto fail;
932 	}
933 
934 	/* Special debug activities for freeing objects */
935 	if (!PageSlubFrozen(page) && !page->freelist)
936 		remove_full(s, page);
937 	if (s->flags & SLAB_STORE_USER)
938 		set_track(s, object, TRACK_FREE, addr);
939 	trace(s, page, object, 0);
940 	init_object(s, object, 0);
941 	return 1;
942 
943 fail:
944 	slab_fix(s, "Object at 0x%p not freed", object);
945 	return 0;
946 }
947 
948 static int __init setup_slub_debug(char *str)
949 {
950 	slub_debug = DEBUG_DEFAULT_FLAGS;
951 	if (*str++ != '=' || !*str)
952 		/*
953 		 * No options specified. Switch on full debugging.
954 		 */
955 		goto out;
956 
957 	if (*str == ',')
958 		/*
959 		 * No options but restriction on slabs. This means full
960 		 * debugging for slabs matching a pattern.
961 		 */
962 		goto check_slabs;
963 
964 	if (tolower(*str) == 'o') {
965 		/*
966 		 * Avoid enabling debugging on caches if its minimum order
967 		 * would increase as a result.
968 		 */
969 		disable_higher_order_debug = 1;
970 		goto out;
971 	}
972 
973 	slub_debug = 0;
974 	if (*str == '-')
975 		/*
976 		 * Switch off all debugging measures.
977 		 */
978 		goto out;
979 
980 	/*
981 	 * Determine which debug features should be switched on
982 	 */
983 	for (; *str && *str != ','; str++) {
984 		switch (tolower(*str)) {
985 		case 'f':
986 			slub_debug |= SLAB_DEBUG_FREE;
987 			break;
988 		case 'z':
989 			slub_debug |= SLAB_RED_ZONE;
990 			break;
991 		case 'p':
992 			slub_debug |= SLAB_POISON;
993 			break;
994 		case 'u':
995 			slub_debug |= SLAB_STORE_USER;
996 			break;
997 		case 't':
998 			slub_debug |= SLAB_TRACE;
999 			break;
1000 		case 'a':
1001 			slub_debug |= SLAB_FAILSLAB;
1002 			break;
1003 		default:
1004 			printk(KERN_ERR "slub_debug option '%c' "
1005 				"unknown. skipped\n", *str);
1006 		}
1007 	}
1008 
1009 check_slabs:
1010 	if (*str == ',')
1011 		slub_debug_slabs = str + 1;
1012 out:
1013 	return 1;
1014 }
1015 
1016 __setup("slub_debug", setup_slub_debug);
1017 
1018 static unsigned long kmem_cache_flags(unsigned long objsize,
1019 	unsigned long flags, const char *name,
1020 	void (*ctor)(void *))
1021 {
1022 	/*
1023 	 * Enable debugging if selected on the kernel commandline.
1024 	 */
1025 	if (slub_debug && (!slub_debug_slabs ||
1026 		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1027 		flags |= slub_debug;
1028 
1029 	return flags;
1030 }
1031 #else
1032 static inline void setup_object_debug(struct kmem_cache *s,
1033 			struct page *page, void *object) {}
1034 
1035 static inline int alloc_debug_processing(struct kmem_cache *s,
1036 	struct page *page, void *object, unsigned long addr) { return 0; }
1037 
1038 static inline int free_debug_processing(struct kmem_cache *s,
1039 	struct page *page, void *object, unsigned long addr) { return 0; }
1040 
1041 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1042 			{ return 1; }
1043 static inline int check_object(struct kmem_cache *s, struct page *page,
1044 			void *object, int active) { return 1; }
1045 static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1046 static inline unsigned long kmem_cache_flags(unsigned long objsize,
1047 	unsigned long flags, const char *name,
1048 	void (*ctor)(void *))
1049 {
1050 	return flags;
1051 }
1052 #define slub_debug 0
1053 
1054 #define disable_higher_order_debug 0
1055 
1056 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1057 							{ return 0; }
1058 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1059 							{ return 0; }
1060 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1061 							int objects) {}
1062 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1063 							int objects) {}
1064 #endif
1065 
1066 /*
1067  * Slab allocation and freeing
1068  */
1069 static inline struct page *alloc_slab_page(gfp_t flags, int node,
1070 					struct kmem_cache_order_objects oo)
1071 {
1072 	int order = oo_order(oo);
1073 
1074 	flags |= __GFP_NOTRACK;
1075 
1076 	if (node == -1)
1077 		return alloc_pages(flags, order);
1078 	else
1079 		return alloc_pages_exact_node(node, flags, order);
1080 }
1081 
1082 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1083 {
1084 	struct page *page;
1085 	struct kmem_cache_order_objects oo = s->oo;
1086 	gfp_t alloc_gfp;
1087 
1088 	flags |= s->allocflags;
1089 
1090 	/*
1091 	 * Let the initial higher-order allocation fail under memory pressure
1092 	 * so we fall-back to the minimum order allocation.
1093 	 */
1094 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1095 
1096 	page = alloc_slab_page(alloc_gfp, node, oo);
1097 	if (unlikely(!page)) {
1098 		oo = s->min;
1099 		/*
1100 		 * Allocation may have failed due to fragmentation.
1101 		 * Try a lower order alloc if possible
1102 		 */
1103 		page = alloc_slab_page(flags, node, oo);
1104 		if (!page)
1105 			return NULL;
1106 
1107 		stat(s, ORDER_FALLBACK);
1108 	}
1109 
1110 	if (kmemcheck_enabled
1111 		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1112 		int pages = 1 << oo_order(oo);
1113 
1114 		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1115 
1116 		/*
1117 		 * Objects from caches that have a constructor don't get
1118 		 * cleared when they're allocated, so we need to do it here.
1119 		 */
1120 		if (s->ctor)
1121 			kmemcheck_mark_uninitialized_pages(page, pages);
1122 		else
1123 			kmemcheck_mark_unallocated_pages(page, pages);
1124 	}
1125 
1126 	page->objects = oo_objects(oo);
1127 	mod_zone_page_state(page_zone(page),
1128 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1129 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1130 		1 << oo_order(oo));
1131 
1132 	return page;
1133 }
1134 
1135 static void setup_object(struct kmem_cache *s, struct page *page,
1136 				void *object)
1137 {
1138 	setup_object_debug(s, page, object);
1139 	if (unlikely(s->ctor))
1140 		s->ctor(object);
1141 }
1142 
1143 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1144 {
1145 	struct page *page;
1146 	void *start;
1147 	void *last;
1148 	void *p;
1149 
1150 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1151 
1152 	page = allocate_slab(s,
1153 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1154 	if (!page)
1155 		goto out;
1156 
1157 	inc_slabs_node(s, page_to_nid(page), page->objects);
1158 	page->slab = s;
1159 	page->flags |= 1 << PG_slab;
1160 	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1161 			SLAB_STORE_USER | SLAB_TRACE))
1162 		__SetPageSlubDebug(page);
1163 
1164 	start = page_address(page);
1165 
1166 	if (unlikely(s->flags & SLAB_POISON))
1167 		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
1168 
1169 	last = start;
1170 	for_each_object(p, s, start, page->objects) {
1171 		setup_object(s, page, last);
1172 		set_freepointer(s, last, p);
1173 		last = p;
1174 	}
1175 	setup_object(s, page, last);
1176 	set_freepointer(s, last, NULL);
1177 
1178 	page->freelist = start;
1179 	page->inuse = 0;
1180 out:
1181 	return page;
1182 }
1183 
1184 static void __free_slab(struct kmem_cache *s, struct page *page)
1185 {
1186 	int order = compound_order(page);
1187 	int pages = 1 << order;
1188 
1189 	if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
1190 		void *p;
1191 
1192 		slab_pad_check(s, page);
1193 		for_each_object(p, s, page_address(page),
1194 						page->objects)
1195 			check_object(s, page, p, 0);
1196 		__ClearPageSlubDebug(page);
1197 	}
1198 
1199 	kmemcheck_free_shadow(page, compound_order(page));
1200 
1201 	mod_zone_page_state(page_zone(page),
1202 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1203 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1204 		-pages);
1205 
1206 	__ClearPageSlab(page);
1207 	reset_page_mapcount(page);
1208 	if (current->reclaim_state)
1209 		current->reclaim_state->reclaimed_slab += pages;
1210 	__free_pages(page, order);
1211 }
1212 
1213 static void rcu_free_slab(struct rcu_head *h)
1214 {
1215 	struct page *page;
1216 
1217 	page = container_of((struct list_head *)h, struct page, lru);
1218 	__free_slab(page->slab, page);
1219 }
1220 
1221 static void free_slab(struct kmem_cache *s, struct page *page)
1222 {
1223 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1224 		/*
1225 		 * RCU free overloads the RCU head over the LRU
1226 		 */
1227 		struct rcu_head *head = (void *)&page->lru;
1228 
1229 		call_rcu(head, rcu_free_slab);
1230 	} else
1231 		__free_slab(s, page);
1232 }
1233 
1234 static void discard_slab(struct kmem_cache *s, struct page *page)
1235 {
1236 	dec_slabs_node(s, page_to_nid(page), page->objects);
1237 	free_slab(s, page);
1238 }
1239 
1240 /*
1241  * Per slab locking using the pagelock
1242  */
1243 static __always_inline void slab_lock(struct page *page)
1244 {
1245 	bit_spin_lock(PG_locked, &page->flags);
1246 }
1247 
1248 static __always_inline void slab_unlock(struct page *page)
1249 {
1250 	__bit_spin_unlock(PG_locked, &page->flags);
1251 }
1252 
1253 static __always_inline int slab_trylock(struct page *page)
1254 {
1255 	int rc = 1;
1256 
1257 	rc = bit_spin_trylock(PG_locked, &page->flags);
1258 	return rc;
1259 }
1260 
1261 /*
1262  * Management of partially allocated slabs
1263  */
1264 static void add_partial(struct kmem_cache_node *n,
1265 				struct page *page, int tail)
1266 {
1267 	spin_lock(&n->list_lock);
1268 	n->nr_partial++;
1269 	if (tail)
1270 		list_add_tail(&page->lru, &n->partial);
1271 	else
1272 		list_add(&page->lru, &n->partial);
1273 	spin_unlock(&n->list_lock);
1274 }
1275 
1276 static void remove_partial(struct kmem_cache *s, struct page *page)
1277 {
1278 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1279 
1280 	spin_lock(&n->list_lock);
1281 	list_del(&page->lru);
1282 	n->nr_partial--;
1283 	spin_unlock(&n->list_lock);
1284 }
1285 
1286 /*
1287  * Lock slab and remove from the partial list.
1288  *
1289  * Must hold list_lock.
1290  */
1291 static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1292 							struct page *page)
1293 {
1294 	if (slab_trylock(page)) {
1295 		list_del(&page->lru);
1296 		n->nr_partial--;
1297 		__SetPageSlubFrozen(page);
1298 		return 1;
1299 	}
1300 	return 0;
1301 }
1302 
1303 /*
1304  * Try to allocate a partial slab from a specific node.
1305  */
1306 static struct page *get_partial_node(struct kmem_cache_node *n)
1307 {
1308 	struct page *page;
1309 
1310 	/*
1311 	 * Racy check. If we mistakenly see no partial slabs then we
1312 	 * just allocate an empty slab. If we mistakenly try to get a
1313 	 * partial slab and there is none available then get_partials()
1314 	 * will return NULL.
1315 	 */
1316 	if (!n || !n->nr_partial)
1317 		return NULL;
1318 
1319 	spin_lock(&n->list_lock);
1320 	list_for_each_entry(page, &n->partial, lru)
1321 		if (lock_and_freeze_slab(n, page))
1322 			goto out;
1323 	page = NULL;
1324 out:
1325 	spin_unlock(&n->list_lock);
1326 	return page;
1327 }
1328 
1329 /*
1330  * Get a page from somewhere. Search in increasing NUMA distances.
1331  */
1332 static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1333 {
1334 #ifdef CONFIG_NUMA
1335 	struct zonelist *zonelist;
1336 	struct zoneref *z;
1337 	struct zone *zone;
1338 	enum zone_type high_zoneidx = gfp_zone(flags);
1339 	struct page *page;
1340 
1341 	/*
1342 	 * The defrag ratio allows a configuration of the tradeoffs between
1343 	 * inter node defragmentation and node local allocations. A lower
1344 	 * defrag_ratio increases the tendency to do local allocations
1345 	 * instead of attempting to obtain partial slabs from other nodes.
1346 	 *
1347 	 * If the defrag_ratio is set to 0 then kmalloc() always
1348 	 * returns node local objects. If the ratio is higher then kmalloc()
1349 	 * may return off node objects because partial slabs are obtained
1350 	 * from other nodes and filled up.
1351 	 *
1352 	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1353 	 * defrag_ratio = 1000) then every (well almost) allocation will
1354 	 * first attempt to defrag slab caches on other nodes. This means
1355 	 * scanning over all nodes to look for partial slabs which may be
1356 	 * expensive if we do it every time we are trying to find a slab
1357 	 * with available objects.
1358 	 */
1359 	if (!s->remote_node_defrag_ratio ||
1360 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1361 		return NULL;
1362 
1363 	get_mems_allowed();
1364 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1365 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1366 		struct kmem_cache_node *n;
1367 
1368 		n = get_node(s, zone_to_nid(zone));
1369 
1370 		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1371 				n->nr_partial > s->min_partial) {
1372 			page = get_partial_node(n);
1373 			if (page) {
1374 				put_mems_allowed();
1375 				return page;
1376 			}
1377 		}
1378 	}
1379 	put_mems_allowed();
1380 #endif
1381 	return NULL;
1382 }
1383 
1384 /*
1385  * Get a partial page, lock it and return it.
1386  */
1387 static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1388 {
1389 	struct page *page;
1390 	int searchnode = (node == -1) ? numa_node_id() : node;
1391 
1392 	page = get_partial_node(get_node(s, searchnode));
1393 	if (page || (flags & __GFP_THISNODE))
1394 		return page;
1395 
1396 	return get_any_partial(s, flags);
1397 }
1398 
1399 /*
1400  * Move a page back to the lists.
1401  *
1402  * Must be called with the slab lock held.
1403  *
1404  * On exit the slab lock will have been dropped.
1405  */
1406 static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1407 {
1408 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1409 
1410 	__ClearPageSlubFrozen(page);
1411 	if (page->inuse) {
1412 
1413 		if (page->freelist) {
1414 			add_partial(n, page, tail);
1415 			stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1416 		} else {
1417 			stat(s, DEACTIVATE_FULL);
1418 			if (SLABDEBUG && PageSlubDebug(page) &&
1419 						(s->flags & SLAB_STORE_USER))
1420 				add_full(n, page);
1421 		}
1422 		slab_unlock(page);
1423 	} else {
1424 		stat(s, DEACTIVATE_EMPTY);
1425 		if (n->nr_partial < s->min_partial) {
1426 			/*
1427 			 * Adding an empty slab to the partial slabs in order
1428 			 * to avoid page allocator overhead. This slab needs
1429 			 * to come after the other slabs with objects in
1430 			 * so that the others get filled first. That way the
1431 			 * size of the partial list stays small.
1432 			 *
1433 			 * kmem_cache_shrink can reclaim any empty slabs from
1434 			 * the partial list.
1435 			 */
1436 			add_partial(n, page, 1);
1437 			slab_unlock(page);
1438 		} else {
1439 			slab_unlock(page);
1440 			stat(s, FREE_SLAB);
1441 			discard_slab(s, page);
1442 		}
1443 	}
1444 }
1445 
1446 /*
1447  * Remove the cpu slab
1448  */
1449 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1450 {
1451 	struct page *page = c->page;
1452 	int tail = 1;
1453 
1454 	if (page->freelist)
1455 		stat(s, DEACTIVATE_REMOTE_FREES);
1456 	/*
1457 	 * Merge cpu freelist into slab freelist. Typically we get here
1458 	 * because both freelists are empty. So this is unlikely
1459 	 * to occur.
1460 	 */
1461 	while (unlikely(c->freelist)) {
1462 		void **object;
1463 
1464 		tail = 0;	/* Hot objects. Put the slab first */
1465 
1466 		/* Retrieve object from cpu_freelist */
1467 		object = c->freelist;
1468 		c->freelist = get_freepointer(s, c->freelist);
1469 
1470 		/* And put onto the regular freelist */
1471 		set_freepointer(s, object, page->freelist);
1472 		page->freelist = object;
1473 		page->inuse--;
1474 	}
1475 	c->page = NULL;
1476 	unfreeze_slab(s, page, tail);
1477 }
1478 
1479 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1480 {
1481 	stat(s, CPUSLAB_FLUSH);
1482 	slab_lock(c->page);
1483 	deactivate_slab(s, c);
1484 }
1485 
1486 /*
1487  * Flush cpu slab.
1488  *
1489  * Called from IPI handler with interrupts disabled.
1490  */
1491 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1492 {
1493 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
1494 
1495 	if (likely(c && c->page))
1496 		flush_slab(s, c);
1497 }
1498 
1499 static void flush_cpu_slab(void *d)
1500 {
1501 	struct kmem_cache *s = d;
1502 
1503 	__flush_cpu_slab(s, smp_processor_id());
1504 }
1505 
1506 static void flush_all(struct kmem_cache *s)
1507 {
1508 	on_each_cpu(flush_cpu_slab, s, 1);
1509 }
1510 
1511 /*
1512  * Check if the objects in a per cpu structure fit numa
1513  * locality expectations.
1514  */
1515 static inline int node_match(struct kmem_cache_cpu *c, int node)
1516 {
1517 #ifdef CONFIG_NUMA
1518 	if (node != -1 && c->node != node)
1519 		return 0;
1520 #endif
1521 	return 1;
1522 }
1523 
1524 static int count_free(struct page *page)
1525 {
1526 	return page->objects - page->inuse;
1527 }
1528 
1529 static unsigned long count_partial(struct kmem_cache_node *n,
1530 					int (*get_count)(struct page *))
1531 {
1532 	unsigned long flags;
1533 	unsigned long x = 0;
1534 	struct page *page;
1535 
1536 	spin_lock_irqsave(&n->list_lock, flags);
1537 	list_for_each_entry(page, &n->partial, lru)
1538 		x += get_count(page);
1539 	spin_unlock_irqrestore(&n->list_lock, flags);
1540 	return x;
1541 }
1542 
1543 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
1544 {
1545 #ifdef CONFIG_SLUB_DEBUG
1546 	return atomic_long_read(&n->total_objects);
1547 #else
1548 	return 0;
1549 #endif
1550 }
1551 
1552 static noinline void
1553 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
1554 {
1555 	int node;
1556 
1557 	printk(KERN_WARNING
1558 		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1559 		nid, gfpflags);
1560 	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
1561 		"default order: %d, min order: %d\n", s->name, s->objsize,
1562 		s->size, oo_order(s->oo), oo_order(s->min));
1563 
1564 	if (oo_order(s->min) > get_order(s->objsize))
1565 		printk(KERN_WARNING "  %s debugging increased min order, use "
1566 		       "slub_debug=O to disable.\n", s->name);
1567 
1568 	for_each_online_node(node) {
1569 		struct kmem_cache_node *n = get_node(s, node);
1570 		unsigned long nr_slabs;
1571 		unsigned long nr_objs;
1572 		unsigned long nr_free;
1573 
1574 		if (!n)
1575 			continue;
1576 
1577 		nr_free  = count_partial(n, count_free);
1578 		nr_slabs = node_nr_slabs(n);
1579 		nr_objs  = node_nr_objs(n);
1580 
1581 		printk(KERN_WARNING
1582 			"  node %d: slabs: %ld, objs: %ld, free: %ld\n",
1583 			node, nr_slabs, nr_objs, nr_free);
1584 	}
1585 }
1586 
1587 /*
1588  * Slow path. The lockless freelist is empty or we need to perform
1589  * debugging duties.
1590  *
1591  * Interrupts are disabled.
1592  *
1593  * Processing is still very fast if new objects have been freed to the
1594  * regular freelist. In that case we simply take over the regular freelist
1595  * as the lockless freelist and zap the regular freelist.
1596  *
1597  * If that is not working then we fall back to the partial lists. We take the
1598  * first element of the freelist as the object to allocate now and move the
1599  * rest of the freelist to the lockless freelist.
1600  *
1601  * And if we were unable to get a new slab from the partial slab lists then
1602  * we need to allocate a new slab. This is the slowest path since it involves
1603  * a call to the page allocator and the setup of a new slab.
1604  */
1605 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1606 			  unsigned long addr, struct kmem_cache_cpu *c)
1607 {
1608 	void **object;
1609 	struct page *new;
1610 
1611 	/* We handle __GFP_ZERO in the caller */
1612 	gfpflags &= ~__GFP_ZERO;
1613 
1614 	if (!c->page)
1615 		goto new_slab;
1616 
1617 	slab_lock(c->page);
1618 	if (unlikely(!node_match(c, node)))
1619 		goto another_slab;
1620 
1621 	stat(s, ALLOC_REFILL);
1622 
1623 load_freelist:
1624 	object = c->page->freelist;
1625 	if (unlikely(!object))
1626 		goto another_slab;
1627 	if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
1628 		goto debug;
1629 
1630 	c->freelist = get_freepointer(s, object);
1631 	c->page->inuse = c->page->objects;
1632 	c->page->freelist = NULL;
1633 	c->node = page_to_nid(c->page);
1634 unlock_out:
1635 	slab_unlock(c->page);
1636 	stat(s, ALLOC_SLOWPATH);
1637 	return object;
1638 
1639 another_slab:
1640 	deactivate_slab(s, c);
1641 
1642 new_slab:
1643 	new = get_partial(s, gfpflags, node);
1644 	if (new) {
1645 		c->page = new;
1646 		stat(s, ALLOC_FROM_PARTIAL);
1647 		goto load_freelist;
1648 	}
1649 
1650 	if (gfpflags & __GFP_WAIT)
1651 		local_irq_enable();
1652 
1653 	new = new_slab(s, gfpflags, node);
1654 
1655 	if (gfpflags & __GFP_WAIT)
1656 		local_irq_disable();
1657 
1658 	if (new) {
1659 		c = __this_cpu_ptr(s->cpu_slab);
1660 		stat(s, ALLOC_SLAB);
1661 		if (c->page)
1662 			flush_slab(s, c);
1663 		slab_lock(new);
1664 		__SetPageSlubFrozen(new);
1665 		c->page = new;
1666 		goto load_freelist;
1667 	}
1668 	if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
1669 		slab_out_of_memory(s, gfpflags, node);
1670 	return NULL;
1671 debug:
1672 	if (!alloc_debug_processing(s, c->page, object, addr))
1673 		goto another_slab;
1674 
1675 	c->page->inuse++;
1676 	c->page->freelist = get_freepointer(s, object);
1677 	c->node = -1;
1678 	goto unlock_out;
1679 }
1680 
1681 /*
1682  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1683  * have the fastpath folded into their functions. So no function call
1684  * overhead for requests that can be satisfied on the fastpath.
1685  *
1686  * The fastpath works by first checking if the lockless freelist can be used.
1687  * If not then __slab_alloc is called for slow processing.
1688  *
1689  * Otherwise we can simply pick the next object from the lockless free list.
1690  */
1691 static __always_inline void *slab_alloc(struct kmem_cache *s,
1692 		gfp_t gfpflags, int node, unsigned long addr)
1693 {
1694 	void **object;
1695 	struct kmem_cache_cpu *c;
1696 	unsigned long flags;
1697 
1698 	gfpflags &= gfp_allowed_mask;
1699 
1700 	lockdep_trace_alloc(gfpflags);
1701 	might_sleep_if(gfpflags & __GFP_WAIT);
1702 
1703 	if (should_failslab(s->objsize, gfpflags, s->flags))
1704 		return NULL;
1705 
1706 	local_irq_save(flags);
1707 	c = __this_cpu_ptr(s->cpu_slab);
1708 	object = c->freelist;
1709 	if (unlikely(!object || !node_match(c, node)))
1710 
1711 		object = __slab_alloc(s, gfpflags, node, addr, c);
1712 
1713 	else {
1714 		c->freelist = get_freepointer(s, object);
1715 		stat(s, ALLOC_FASTPATH);
1716 	}
1717 	local_irq_restore(flags);
1718 
1719 	if (unlikely(gfpflags & __GFP_ZERO) && object)
1720 		memset(object, 0, s->objsize);
1721 
1722 	kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
1723 	kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
1724 
1725 	return object;
1726 }
1727 
1728 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1729 {
1730 	void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1731 
1732 	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1733 
1734 	return ret;
1735 }
1736 EXPORT_SYMBOL(kmem_cache_alloc);
1737 
1738 #ifdef CONFIG_TRACING
1739 void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1740 {
1741 	return slab_alloc(s, gfpflags, -1, _RET_IP_);
1742 }
1743 EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1744 #endif
1745 
1746 #ifdef CONFIG_NUMA
1747 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1748 {
1749 	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1750 
1751 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
1752 				    s->objsize, s->size, gfpflags, node);
1753 
1754 	return ret;
1755 }
1756 EXPORT_SYMBOL(kmem_cache_alloc_node);
1757 #endif
1758 
1759 #ifdef CONFIG_TRACING
1760 void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1761 				    gfp_t gfpflags,
1762 				    int node)
1763 {
1764 	return slab_alloc(s, gfpflags, node, _RET_IP_);
1765 }
1766 EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1767 #endif
1768 
1769 /*
1770  * Slow patch handling. This may still be called frequently since objects
1771  * have a longer lifetime than the cpu slabs in most processing loads.
1772  *
1773  * So we still attempt to reduce cache line usage. Just take the slab
1774  * lock and free the item. If there is no additional partial page
1775  * handling required then we can return immediately.
1776  */
1777 static void __slab_free(struct kmem_cache *s, struct page *page,
1778 			void *x, unsigned long addr)
1779 {
1780 	void *prior;
1781 	void **object = (void *)x;
1782 
1783 	stat(s, FREE_SLOWPATH);
1784 	slab_lock(page);
1785 
1786 	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
1787 		goto debug;
1788 
1789 checks_ok:
1790 	prior = page->freelist;
1791 	set_freepointer(s, object, prior);
1792 	page->freelist = object;
1793 	page->inuse--;
1794 
1795 	if (unlikely(PageSlubFrozen(page))) {
1796 		stat(s, FREE_FROZEN);
1797 		goto out_unlock;
1798 	}
1799 
1800 	if (unlikely(!page->inuse))
1801 		goto slab_empty;
1802 
1803 	/*
1804 	 * Objects left in the slab. If it was not on the partial list before
1805 	 * then add it.
1806 	 */
1807 	if (unlikely(!prior)) {
1808 		add_partial(get_node(s, page_to_nid(page)), page, 1);
1809 		stat(s, FREE_ADD_PARTIAL);
1810 	}
1811 
1812 out_unlock:
1813 	slab_unlock(page);
1814 	return;
1815 
1816 slab_empty:
1817 	if (prior) {
1818 		/*
1819 		 * Slab still on the partial list.
1820 		 */
1821 		remove_partial(s, page);
1822 		stat(s, FREE_REMOVE_PARTIAL);
1823 	}
1824 	slab_unlock(page);
1825 	stat(s, FREE_SLAB);
1826 	discard_slab(s, page);
1827 	return;
1828 
1829 debug:
1830 	if (!free_debug_processing(s, page, x, addr))
1831 		goto out_unlock;
1832 	goto checks_ok;
1833 }
1834 
1835 /*
1836  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1837  * can perform fastpath freeing without additional function calls.
1838  *
1839  * The fastpath is only possible if we are freeing to the current cpu slab
1840  * of this processor. This typically the case if we have just allocated
1841  * the item before.
1842  *
1843  * If fastpath is not possible then fall back to __slab_free where we deal
1844  * with all sorts of special processing.
1845  */
1846 static __always_inline void slab_free(struct kmem_cache *s,
1847 			struct page *page, void *x, unsigned long addr)
1848 {
1849 	void **object = (void *)x;
1850 	struct kmem_cache_cpu *c;
1851 	unsigned long flags;
1852 
1853 	kmemleak_free_recursive(x, s->flags);
1854 	local_irq_save(flags);
1855 	c = __this_cpu_ptr(s->cpu_slab);
1856 	kmemcheck_slab_free(s, object, s->objsize);
1857 	debug_check_no_locks_freed(object, s->objsize);
1858 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1859 		debug_check_no_obj_freed(object, s->objsize);
1860 	if (likely(page == c->page && c->node >= 0)) {
1861 		set_freepointer(s, object, c->freelist);
1862 		c->freelist = object;
1863 		stat(s, FREE_FASTPATH);
1864 	} else
1865 		__slab_free(s, page, x, addr);
1866 
1867 	local_irq_restore(flags);
1868 }
1869 
1870 void kmem_cache_free(struct kmem_cache *s, void *x)
1871 {
1872 	struct page *page;
1873 
1874 	page = virt_to_head_page(x);
1875 
1876 	slab_free(s, page, x, _RET_IP_);
1877 
1878 	trace_kmem_cache_free(_RET_IP_, x);
1879 }
1880 EXPORT_SYMBOL(kmem_cache_free);
1881 
1882 /* Figure out on which slab page the object resides */
1883 static struct page *get_object_page(const void *x)
1884 {
1885 	struct page *page = virt_to_head_page(x);
1886 
1887 	if (!PageSlab(page))
1888 		return NULL;
1889 
1890 	return page;
1891 }
1892 
1893 /*
1894  * Object placement in a slab is made very easy because we always start at
1895  * offset 0. If we tune the size of the object to the alignment then we can
1896  * get the required alignment by putting one properly sized object after
1897  * another.
1898  *
1899  * Notice that the allocation order determines the sizes of the per cpu
1900  * caches. Each processor has always one slab available for allocations.
1901  * Increasing the allocation order reduces the number of times that slabs
1902  * must be moved on and off the partial lists and is therefore a factor in
1903  * locking overhead.
1904  */
1905 
1906 /*
1907  * Mininum / Maximum order of slab pages. This influences locking overhead
1908  * and slab fragmentation. A higher order reduces the number of partial slabs
1909  * and increases the number of allocations possible without having to
1910  * take the list_lock.
1911  */
1912 static int slub_min_order;
1913 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
1914 static int slub_min_objects;
1915 
1916 /*
1917  * Merge control. If this is set then no merging of slab caches will occur.
1918  * (Could be removed. This was introduced to pacify the merge skeptics.)
1919  */
1920 static int slub_nomerge;
1921 
1922 /*
1923  * Calculate the order of allocation given an slab object size.
1924  *
1925  * The order of allocation has significant impact on performance and other
1926  * system components. Generally order 0 allocations should be preferred since
1927  * order 0 does not cause fragmentation in the page allocator. Larger objects
1928  * be problematic to put into order 0 slabs because there may be too much
1929  * unused space left. We go to a higher order if more than 1/16th of the slab
1930  * would be wasted.
1931  *
1932  * In order to reach satisfactory performance we must ensure that a minimum
1933  * number of objects is in one slab. Otherwise we may generate too much
1934  * activity on the partial lists which requires taking the list_lock. This is
1935  * less a concern for large slabs though which are rarely used.
1936  *
1937  * slub_max_order specifies the order where we begin to stop considering the
1938  * number of objects in a slab as critical. If we reach slub_max_order then
1939  * we try to keep the page order as low as possible. So we accept more waste
1940  * of space in favor of a small page order.
1941  *
1942  * Higher order allocations also allow the placement of more objects in a
1943  * slab and thereby reduce object handling overhead. If the user has
1944  * requested a higher mininum order then we start with that one instead of
1945  * the smallest order which will fit the object.
1946  */
1947 static inline int slab_order(int size, int min_objects,
1948 				int max_order, int fract_leftover)
1949 {
1950 	int order;
1951 	int rem;
1952 	int min_order = slub_min_order;
1953 
1954 	if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1955 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1956 
1957 	for (order = max(min_order,
1958 				fls(min_objects * size - 1) - PAGE_SHIFT);
1959 			order <= max_order; order++) {
1960 
1961 		unsigned long slab_size = PAGE_SIZE << order;
1962 
1963 		if (slab_size < min_objects * size)
1964 			continue;
1965 
1966 		rem = slab_size % size;
1967 
1968 		if (rem <= slab_size / fract_leftover)
1969 			break;
1970 
1971 	}
1972 
1973 	return order;
1974 }
1975 
1976 static inline int calculate_order(int size)
1977 {
1978 	int order;
1979 	int min_objects;
1980 	int fraction;
1981 	int max_objects;
1982 
1983 	/*
1984 	 * Attempt to find best configuration for a slab. This
1985 	 * works by first attempting to generate a layout with
1986 	 * the best configuration and backing off gradually.
1987 	 *
1988 	 * First we reduce the acceptable waste in a slab. Then
1989 	 * we reduce the minimum objects required in a slab.
1990 	 */
1991 	min_objects = slub_min_objects;
1992 	if (!min_objects)
1993 		min_objects = 4 * (fls(nr_cpu_ids) + 1);
1994 	max_objects = (PAGE_SIZE << slub_max_order)/size;
1995 	min_objects = min(min_objects, max_objects);
1996 
1997 	while (min_objects > 1) {
1998 		fraction = 16;
1999 		while (fraction >= 4) {
2000 			order = slab_order(size, min_objects,
2001 						slub_max_order, fraction);
2002 			if (order <= slub_max_order)
2003 				return order;
2004 			fraction /= 2;
2005 		}
2006 		min_objects--;
2007 	}
2008 
2009 	/*
2010 	 * We were unable to place multiple objects in a slab. Now
2011 	 * lets see if we can place a single object there.
2012 	 */
2013 	order = slab_order(size, 1, slub_max_order, 1);
2014 	if (order <= slub_max_order)
2015 		return order;
2016 
2017 	/*
2018 	 * Doh this slab cannot be placed using slub_max_order.
2019 	 */
2020 	order = slab_order(size, 1, MAX_ORDER, 1);
2021 	if (order < MAX_ORDER)
2022 		return order;
2023 	return -ENOSYS;
2024 }
2025 
2026 /*
2027  * Figure out what the alignment of the objects will be.
2028  */
2029 static unsigned long calculate_alignment(unsigned long flags,
2030 		unsigned long align, unsigned long size)
2031 {
2032 	/*
2033 	 * If the user wants hardware cache aligned objects then follow that
2034 	 * suggestion if the object is sufficiently large.
2035 	 *
2036 	 * The hardware cache alignment cannot override the specified
2037 	 * alignment though. If that is greater then use it.
2038 	 */
2039 	if (flags & SLAB_HWCACHE_ALIGN) {
2040 		unsigned long ralign = cache_line_size();
2041 		while (size <= ralign / 2)
2042 			ralign /= 2;
2043 		align = max(align, ralign);
2044 	}
2045 
2046 	if (align < ARCH_SLAB_MINALIGN)
2047 		align = ARCH_SLAB_MINALIGN;
2048 
2049 	return ALIGN(align, sizeof(void *));
2050 }
2051 
2052 static void
2053 init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2054 {
2055 	n->nr_partial = 0;
2056 	spin_lock_init(&n->list_lock);
2057 	INIT_LIST_HEAD(&n->partial);
2058 #ifdef CONFIG_SLUB_DEBUG
2059 	atomic_long_set(&n->nr_slabs, 0);
2060 	atomic_long_set(&n->total_objects, 0);
2061 	INIT_LIST_HEAD(&n->full);
2062 #endif
2063 }
2064 
2065 static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
2066 
2067 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2068 {
2069 	if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
2070 		/*
2071 		 * Boot time creation of the kmalloc array. Use static per cpu data
2072 		 * since the per cpu allocator is not available yet.
2073 		 */
2074 		s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
2075 	else
2076 		s->cpu_slab =  alloc_percpu(struct kmem_cache_cpu);
2077 
2078 	if (!s->cpu_slab)
2079 		return 0;
2080 
2081 	return 1;
2082 }
2083 
2084 #ifdef CONFIG_NUMA
2085 /*
2086  * No kmalloc_node yet so do it by hand. We know that this is the first
2087  * slab on the node for this slabcache. There are no concurrent accesses
2088  * possible.
2089  *
2090  * Note that this function only works on the kmalloc_node_cache
2091  * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2092  * memory on a fresh node that has no slab structures yet.
2093  */
2094 static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2095 {
2096 	struct page *page;
2097 	struct kmem_cache_node *n;
2098 	unsigned long flags;
2099 
2100 	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2101 
2102 	page = new_slab(kmalloc_caches, gfpflags, node);
2103 
2104 	BUG_ON(!page);
2105 	if (page_to_nid(page) != node) {
2106 		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2107 				"node %d\n", node);
2108 		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2109 				"in order to be able to continue\n");
2110 	}
2111 
2112 	n = page->freelist;
2113 	BUG_ON(!n);
2114 	page->freelist = get_freepointer(kmalloc_caches, n);
2115 	page->inuse++;
2116 	kmalloc_caches->node[node] = n;
2117 #ifdef CONFIG_SLUB_DEBUG
2118 	init_object(kmalloc_caches, n, 1);
2119 	init_tracking(kmalloc_caches, n);
2120 #endif
2121 	init_kmem_cache_node(n, kmalloc_caches);
2122 	inc_slabs_node(kmalloc_caches, node, page->objects);
2123 
2124 	/*
2125 	 * lockdep requires consistent irq usage for each lock
2126 	 * so even though there cannot be a race this early in
2127 	 * the boot sequence, we still disable irqs.
2128 	 */
2129 	local_irq_save(flags);
2130 	add_partial(n, page, 0);
2131 	local_irq_restore(flags);
2132 }
2133 
2134 static void free_kmem_cache_nodes(struct kmem_cache *s)
2135 {
2136 	int node;
2137 
2138 	for_each_node_state(node, N_NORMAL_MEMORY) {
2139 		struct kmem_cache_node *n = s->node[node];
2140 		if (n)
2141 			kmem_cache_free(kmalloc_caches, n);
2142 		s->node[node] = NULL;
2143 	}
2144 }
2145 
2146 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2147 {
2148 	int node;
2149 
2150 	for_each_node_state(node, N_NORMAL_MEMORY) {
2151 		struct kmem_cache_node *n;
2152 
2153 		if (slab_state == DOWN) {
2154 			early_kmem_cache_node_alloc(gfpflags, node);
2155 			continue;
2156 		}
2157 		n = kmem_cache_alloc_node(kmalloc_caches,
2158 						gfpflags, node);
2159 
2160 		if (!n) {
2161 			free_kmem_cache_nodes(s);
2162 			return 0;
2163 		}
2164 
2165 		s->node[node] = n;
2166 		init_kmem_cache_node(n, s);
2167 	}
2168 	return 1;
2169 }
2170 #else
2171 static void free_kmem_cache_nodes(struct kmem_cache *s)
2172 {
2173 }
2174 
2175 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2176 {
2177 	init_kmem_cache_node(&s->local_node, s);
2178 	return 1;
2179 }
2180 #endif
2181 
2182 static void set_min_partial(struct kmem_cache *s, unsigned long min)
2183 {
2184 	if (min < MIN_PARTIAL)
2185 		min = MIN_PARTIAL;
2186 	else if (min > MAX_PARTIAL)
2187 		min = MAX_PARTIAL;
2188 	s->min_partial = min;
2189 }
2190 
2191 /*
2192  * calculate_sizes() determines the order and the distribution of data within
2193  * a slab object.
2194  */
2195 static int calculate_sizes(struct kmem_cache *s, int forced_order)
2196 {
2197 	unsigned long flags = s->flags;
2198 	unsigned long size = s->objsize;
2199 	unsigned long align = s->align;
2200 	int order;
2201 
2202 	/*
2203 	 * Round up object size to the next word boundary. We can only
2204 	 * place the free pointer at word boundaries and this determines
2205 	 * the possible location of the free pointer.
2206 	 */
2207 	size = ALIGN(size, sizeof(void *));
2208 
2209 #ifdef CONFIG_SLUB_DEBUG
2210 	/*
2211 	 * Determine if we can poison the object itself. If the user of
2212 	 * the slab may touch the object after free or before allocation
2213 	 * then we should never poison the object itself.
2214 	 */
2215 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2216 			!s->ctor)
2217 		s->flags |= __OBJECT_POISON;
2218 	else
2219 		s->flags &= ~__OBJECT_POISON;
2220 
2221 
2222 	/*
2223 	 * If we are Redzoning then check if there is some space between the
2224 	 * end of the object and the free pointer. If not then add an
2225 	 * additional word to have some bytes to store Redzone information.
2226 	 */
2227 	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2228 		size += sizeof(void *);
2229 #endif
2230 
2231 	/*
2232 	 * With that we have determined the number of bytes in actual use
2233 	 * by the object. This is the potential offset to the free pointer.
2234 	 */
2235 	s->inuse = size;
2236 
2237 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2238 		s->ctor)) {
2239 		/*
2240 		 * Relocate free pointer after the object if it is not
2241 		 * permitted to overwrite the first word of the object on
2242 		 * kmem_cache_free.
2243 		 *
2244 		 * This is the case if we do RCU, have a constructor or
2245 		 * destructor or are poisoning the objects.
2246 		 */
2247 		s->offset = size;
2248 		size += sizeof(void *);
2249 	}
2250 
2251 #ifdef CONFIG_SLUB_DEBUG
2252 	if (flags & SLAB_STORE_USER)
2253 		/*
2254 		 * Need to store information about allocs and frees after
2255 		 * the object.
2256 		 */
2257 		size += 2 * sizeof(struct track);
2258 
2259 	if (flags & SLAB_RED_ZONE)
2260 		/*
2261 		 * Add some empty padding so that we can catch
2262 		 * overwrites from earlier objects rather than let
2263 		 * tracking information or the free pointer be
2264 		 * corrupted if a user writes before the start
2265 		 * of the object.
2266 		 */
2267 		size += sizeof(void *);
2268 #endif
2269 
2270 	/*
2271 	 * Determine the alignment based on various parameters that the
2272 	 * user specified and the dynamic determination of cache line size
2273 	 * on bootup.
2274 	 */
2275 	align = calculate_alignment(flags, align, s->objsize);
2276 	s->align = align;
2277 
2278 	/*
2279 	 * SLUB stores one object immediately after another beginning from
2280 	 * offset 0. In order to align the objects we have to simply size
2281 	 * each object to conform to the alignment.
2282 	 */
2283 	size = ALIGN(size, align);
2284 	s->size = size;
2285 	if (forced_order >= 0)
2286 		order = forced_order;
2287 	else
2288 		order = calculate_order(size);
2289 
2290 	if (order < 0)
2291 		return 0;
2292 
2293 	s->allocflags = 0;
2294 	if (order)
2295 		s->allocflags |= __GFP_COMP;
2296 
2297 	if (s->flags & SLAB_CACHE_DMA)
2298 		s->allocflags |= SLUB_DMA;
2299 
2300 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2301 		s->allocflags |= __GFP_RECLAIMABLE;
2302 
2303 	/*
2304 	 * Determine the number of objects per slab
2305 	 */
2306 	s->oo = oo_make(order, size);
2307 	s->min = oo_make(get_order(size), size);
2308 	if (oo_objects(s->oo) > oo_objects(s->max))
2309 		s->max = s->oo;
2310 
2311 	return !!oo_objects(s->oo);
2312 
2313 }
2314 
2315 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2316 		const char *name, size_t size,
2317 		size_t align, unsigned long flags,
2318 		void (*ctor)(void *))
2319 {
2320 	memset(s, 0, kmem_size);
2321 	s->name = name;
2322 	s->ctor = ctor;
2323 	s->objsize = size;
2324 	s->align = align;
2325 	s->flags = kmem_cache_flags(size, flags, name, ctor);
2326 
2327 	if (!calculate_sizes(s, -1))
2328 		goto error;
2329 	if (disable_higher_order_debug) {
2330 		/*
2331 		 * Disable debugging flags that store metadata if the min slab
2332 		 * order increased.
2333 		 */
2334 		if (get_order(s->size) > get_order(s->objsize)) {
2335 			s->flags &= ~DEBUG_METADATA_FLAGS;
2336 			s->offset = 0;
2337 			if (!calculate_sizes(s, -1))
2338 				goto error;
2339 		}
2340 	}
2341 
2342 	/*
2343 	 * The larger the object size is, the more pages we want on the partial
2344 	 * list to avoid pounding the page allocator excessively.
2345 	 */
2346 	set_min_partial(s, ilog2(s->size));
2347 	s->refcount = 1;
2348 #ifdef CONFIG_NUMA
2349 	s->remote_node_defrag_ratio = 1000;
2350 #endif
2351 	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2352 		goto error;
2353 
2354 	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
2355 		return 1;
2356 
2357 	free_kmem_cache_nodes(s);
2358 error:
2359 	if (flags & SLAB_PANIC)
2360 		panic("Cannot create slab %s size=%lu realsize=%u "
2361 			"order=%u offset=%u flags=%lx\n",
2362 			s->name, (unsigned long)size, s->size, oo_order(s->oo),
2363 			s->offset, flags);
2364 	return 0;
2365 }
2366 
2367 /*
2368  * Check if a given pointer is valid
2369  */
2370 int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2371 {
2372 	struct page *page;
2373 
2374 	if (!kern_ptr_validate(object, s->size))
2375 		return 0;
2376 
2377 	page = get_object_page(object);
2378 
2379 	if (!page || s != page->slab)
2380 		/* No slab or wrong slab */
2381 		return 0;
2382 
2383 	if (!check_valid_pointer(s, page, object))
2384 		return 0;
2385 
2386 	/*
2387 	 * We could also check if the object is on the slabs freelist.
2388 	 * But this would be too expensive and it seems that the main
2389 	 * purpose of kmem_ptr_valid() is to check if the object belongs
2390 	 * to a certain slab.
2391 	 */
2392 	return 1;
2393 }
2394 EXPORT_SYMBOL(kmem_ptr_validate);
2395 
2396 /*
2397  * Determine the size of a slab object
2398  */
2399 unsigned int kmem_cache_size(struct kmem_cache *s)
2400 {
2401 	return s->objsize;
2402 }
2403 EXPORT_SYMBOL(kmem_cache_size);
2404 
2405 const char *kmem_cache_name(struct kmem_cache *s)
2406 {
2407 	return s->name;
2408 }
2409 EXPORT_SYMBOL(kmem_cache_name);
2410 
2411 static void list_slab_objects(struct kmem_cache *s, struct page *page,
2412 							const char *text)
2413 {
2414 #ifdef CONFIG_SLUB_DEBUG
2415 	void *addr = page_address(page);
2416 	void *p;
2417 	long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long),
2418 			    GFP_ATOMIC);
2419 
2420 	if (!map)
2421 		return;
2422 	slab_err(s, page, "%s", text);
2423 	slab_lock(page);
2424 	for_each_free_object(p, s, page->freelist)
2425 		set_bit(slab_index(p, s, addr), map);
2426 
2427 	for_each_object(p, s, addr, page->objects) {
2428 
2429 		if (!test_bit(slab_index(p, s, addr), map)) {
2430 			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2431 							p, p - addr);
2432 			print_tracking(s, p);
2433 		}
2434 	}
2435 	slab_unlock(page);
2436 	kfree(map);
2437 #endif
2438 }
2439 
2440 /*
2441  * Attempt to free all partial slabs on a node.
2442  */
2443 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2444 {
2445 	unsigned long flags;
2446 	struct page *page, *h;
2447 
2448 	spin_lock_irqsave(&n->list_lock, flags);
2449 	list_for_each_entry_safe(page, h, &n->partial, lru) {
2450 		if (!page->inuse) {
2451 			list_del(&page->lru);
2452 			discard_slab(s, page);
2453 			n->nr_partial--;
2454 		} else {
2455 			list_slab_objects(s, page,
2456 				"Objects remaining on kmem_cache_close()");
2457 		}
2458 	}
2459 	spin_unlock_irqrestore(&n->list_lock, flags);
2460 }
2461 
2462 /*
2463  * Release all resources used by a slab cache.
2464  */
2465 static inline int kmem_cache_close(struct kmem_cache *s)
2466 {
2467 	int node;
2468 
2469 	flush_all(s);
2470 	free_percpu(s->cpu_slab);
2471 	/* Attempt to free all objects */
2472 	for_each_node_state(node, N_NORMAL_MEMORY) {
2473 		struct kmem_cache_node *n = get_node(s, node);
2474 
2475 		free_partial(s, n);
2476 		if (n->nr_partial || slabs_node(s, node))
2477 			return 1;
2478 	}
2479 	free_kmem_cache_nodes(s);
2480 	return 0;
2481 }
2482 
2483 /*
2484  * Close a cache and release the kmem_cache structure
2485  * (must be used for caches created using kmem_cache_create)
2486  */
2487 void kmem_cache_destroy(struct kmem_cache *s)
2488 {
2489 	down_write(&slub_lock);
2490 	s->refcount--;
2491 	if (!s->refcount) {
2492 		list_del(&s->list);
2493 		up_write(&slub_lock);
2494 		if (kmem_cache_close(s)) {
2495 			printk(KERN_ERR "SLUB %s: %s called for cache that "
2496 				"still has objects.\n", s->name, __func__);
2497 			dump_stack();
2498 		}
2499 		if (s->flags & SLAB_DESTROY_BY_RCU)
2500 			rcu_barrier();
2501 		sysfs_slab_remove(s);
2502 	} else
2503 		up_write(&slub_lock);
2504 }
2505 EXPORT_SYMBOL(kmem_cache_destroy);
2506 
2507 /********************************************************************
2508  *		Kmalloc subsystem
2509  *******************************************************************/
2510 
2511 struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
2512 EXPORT_SYMBOL(kmalloc_caches);
2513 
2514 static int __init setup_slub_min_order(char *str)
2515 {
2516 	get_option(&str, &slub_min_order);
2517 
2518 	return 1;
2519 }
2520 
2521 __setup("slub_min_order=", setup_slub_min_order);
2522 
2523 static int __init setup_slub_max_order(char *str)
2524 {
2525 	get_option(&str, &slub_max_order);
2526 	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
2527 
2528 	return 1;
2529 }
2530 
2531 __setup("slub_max_order=", setup_slub_max_order);
2532 
2533 static int __init setup_slub_min_objects(char *str)
2534 {
2535 	get_option(&str, &slub_min_objects);
2536 
2537 	return 1;
2538 }
2539 
2540 __setup("slub_min_objects=", setup_slub_min_objects);
2541 
2542 static int __init setup_slub_nomerge(char *str)
2543 {
2544 	slub_nomerge = 1;
2545 	return 1;
2546 }
2547 
2548 __setup("slub_nomerge", setup_slub_nomerge);
2549 
2550 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2551 		const char *name, int size, gfp_t gfp_flags)
2552 {
2553 	unsigned int flags = 0;
2554 
2555 	if (gfp_flags & SLUB_DMA)
2556 		flags = SLAB_CACHE_DMA;
2557 
2558 	/*
2559 	 * This function is called with IRQs disabled during early-boot on
2560 	 * single CPU so there's no need to take slub_lock here.
2561 	 */
2562 	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2563 								flags, NULL))
2564 		goto panic;
2565 
2566 	list_add(&s->list, &slab_caches);
2567 
2568 	if (sysfs_slab_add(s))
2569 		goto panic;
2570 	return s;
2571 
2572 panic:
2573 	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2574 }
2575 
2576 #ifdef CONFIG_ZONE_DMA
2577 static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2578 
2579 static void sysfs_add_func(struct work_struct *w)
2580 {
2581 	struct kmem_cache *s;
2582 
2583 	down_write(&slub_lock);
2584 	list_for_each_entry(s, &slab_caches, list) {
2585 		if (s->flags & __SYSFS_ADD_DEFERRED) {
2586 			s->flags &= ~__SYSFS_ADD_DEFERRED;
2587 			sysfs_slab_add(s);
2588 		}
2589 	}
2590 	up_write(&slub_lock);
2591 }
2592 
2593 static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2594 
2595 static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2596 {
2597 	struct kmem_cache *s;
2598 	char *text;
2599 	size_t realsize;
2600 	unsigned long slabflags;
2601 	int i;
2602 
2603 	s = kmalloc_caches_dma[index];
2604 	if (s)
2605 		return s;
2606 
2607 	/* Dynamically create dma cache */
2608 	if (flags & __GFP_WAIT)
2609 		down_write(&slub_lock);
2610 	else {
2611 		if (!down_write_trylock(&slub_lock))
2612 			goto out;
2613 	}
2614 
2615 	if (kmalloc_caches_dma[index])
2616 		goto unlock_out;
2617 
2618 	realsize = kmalloc_caches[index].objsize;
2619 	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2620 			 (unsigned int)realsize);
2621 
2622 	s = NULL;
2623 	for (i = 0; i < KMALLOC_CACHES; i++)
2624 		if (!kmalloc_caches[i].size)
2625 			break;
2626 
2627 	BUG_ON(i >= KMALLOC_CACHES);
2628 	s = kmalloc_caches + i;
2629 
2630 	/*
2631 	 * Must defer sysfs creation to a workqueue because we don't know
2632 	 * what context we are called from. Before sysfs comes up, we don't
2633 	 * need to do anything because our sysfs initcall will start by
2634 	 * adding all existing slabs to sysfs.
2635 	 */
2636 	slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
2637 	if (slab_state >= SYSFS)
2638 		slabflags |= __SYSFS_ADD_DEFERRED;
2639 
2640 	if (!text || !kmem_cache_open(s, flags, text,
2641 			realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
2642 		s->size = 0;
2643 		kfree(text);
2644 		goto unlock_out;
2645 	}
2646 
2647 	list_add(&s->list, &slab_caches);
2648 	kmalloc_caches_dma[index] = s;
2649 
2650 	if (slab_state >= SYSFS)
2651 		schedule_work(&sysfs_add_work);
2652 
2653 unlock_out:
2654 	up_write(&slub_lock);
2655 out:
2656 	return kmalloc_caches_dma[index];
2657 }
2658 #endif
2659 
2660 /*
2661  * Conversion table for small slabs sizes / 8 to the index in the
2662  * kmalloc array. This is necessary for slabs < 192 since we have non power
2663  * of two cache sizes there. The size of larger slabs can be determined using
2664  * fls.
2665  */
2666 static s8 size_index[24] = {
2667 	3,	/* 8 */
2668 	4,	/* 16 */
2669 	5,	/* 24 */
2670 	5,	/* 32 */
2671 	6,	/* 40 */
2672 	6,	/* 48 */
2673 	6,	/* 56 */
2674 	6,	/* 64 */
2675 	1,	/* 72 */
2676 	1,	/* 80 */
2677 	1,	/* 88 */
2678 	1,	/* 96 */
2679 	7,	/* 104 */
2680 	7,	/* 112 */
2681 	7,	/* 120 */
2682 	7,	/* 128 */
2683 	2,	/* 136 */
2684 	2,	/* 144 */
2685 	2,	/* 152 */
2686 	2,	/* 160 */
2687 	2,	/* 168 */
2688 	2,	/* 176 */
2689 	2,	/* 184 */
2690 	2	/* 192 */
2691 };
2692 
2693 static inline int size_index_elem(size_t bytes)
2694 {
2695 	return (bytes - 1) / 8;
2696 }
2697 
2698 static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2699 {
2700 	int index;
2701 
2702 	if (size <= 192) {
2703 		if (!size)
2704 			return ZERO_SIZE_PTR;
2705 
2706 		index = size_index[size_index_elem(size)];
2707 	} else
2708 		index = fls(size - 1);
2709 
2710 #ifdef CONFIG_ZONE_DMA
2711 	if (unlikely((flags & SLUB_DMA)))
2712 		return dma_kmalloc_cache(index, flags);
2713 
2714 #endif
2715 	return &kmalloc_caches[index];
2716 }
2717 
2718 void *__kmalloc(size_t size, gfp_t flags)
2719 {
2720 	struct kmem_cache *s;
2721 	void *ret;
2722 
2723 	if (unlikely(size > SLUB_MAX_SIZE))
2724 		return kmalloc_large(size, flags);
2725 
2726 	s = get_slab(size, flags);
2727 
2728 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2729 		return s;
2730 
2731 	ret = slab_alloc(s, flags, -1, _RET_IP_);
2732 
2733 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2734 
2735 	return ret;
2736 }
2737 EXPORT_SYMBOL(__kmalloc);
2738 
2739 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2740 {
2741 	struct page *page;
2742 	void *ptr = NULL;
2743 
2744 	flags |= __GFP_COMP | __GFP_NOTRACK;
2745 	page = alloc_pages_node(node, flags, get_order(size));
2746 	if (page)
2747 		ptr = page_address(page);
2748 
2749 	kmemleak_alloc(ptr, size, 1, flags);
2750 	return ptr;
2751 }
2752 
2753 #ifdef CONFIG_NUMA
2754 void *__kmalloc_node(size_t size, gfp_t flags, int node)
2755 {
2756 	struct kmem_cache *s;
2757 	void *ret;
2758 
2759 	if (unlikely(size > SLUB_MAX_SIZE)) {
2760 		ret = kmalloc_large_node(size, flags, node);
2761 
2762 		trace_kmalloc_node(_RET_IP_, ret,
2763 				   size, PAGE_SIZE << get_order(size),
2764 				   flags, node);
2765 
2766 		return ret;
2767 	}
2768 
2769 	s = get_slab(size, flags);
2770 
2771 	if (unlikely(ZERO_OR_NULL_PTR(s)))
2772 		return s;
2773 
2774 	ret = slab_alloc(s, flags, node, _RET_IP_);
2775 
2776 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
2777 
2778 	return ret;
2779 }
2780 EXPORT_SYMBOL(__kmalloc_node);
2781 #endif
2782 
2783 size_t ksize(const void *object)
2784 {
2785 	struct page *page;
2786 	struct kmem_cache *s;
2787 
2788 	if (unlikely(object == ZERO_SIZE_PTR))
2789 		return 0;
2790 
2791 	page = virt_to_head_page(object);
2792 
2793 	if (unlikely(!PageSlab(page))) {
2794 		WARN_ON(!PageCompound(page));
2795 		return PAGE_SIZE << compound_order(page);
2796 	}
2797 	s = page->slab;
2798 
2799 #ifdef CONFIG_SLUB_DEBUG
2800 	/*
2801 	 * Debugging requires use of the padding between object
2802 	 * and whatever may come after it.
2803 	 */
2804 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2805 		return s->objsize;
2806 
2807 #endif
2808 	/*
2809 	 * If we have the need to store the freelist pointer
2810 	 * back there or track user information then we can
2811 	 * only use the space before that information.
2812 	 */
2813 	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2814 		return s->inuse;
2815 	/*
2816 	 * Else we can use all the padding etc for the allocation
2817 	 */
2818 	return s->size;
2819 }
2820 EXPORT_SYMBOL(ksize);
2821 
2822 void kfree(const void *x)
2823 {
2824 	struct page *page;
2825 	void *object = (void *)x;
2826 
2827 	trace_kfree(_RET_IP_, x);
2828 
2829 	if (unlikely(ZERO_OR_NULL_PTR(x)))
2830 		return;
2831 
2832 	page = virt_to_head_page(x);
2833 	if (unlikely(!PageSlab(page))) {
2834 		BUG_ON(!PageCompound(page));
2835 		kmemleak_free(x);
2836 		put_page(page);
2837 		return;
2838 	}
2839 	slab_free(page->slab, page, object, _RET_IP_);
2840 }
2841 EXPORT_SYMBOL(kfree);
2842 
2843 /*
2844  * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2845  * the remaining slabs by the number of items in use. The slabs with the
2846  * most items in use come first. New allocations will then fill those up
2847  * and thus they can be removed from the partial lists.
2848  *
2849  * The slabs with the least items are placed last. This results in them
2850  * being allocated from last increasing the chance that the last objects
2851  * are freed in them.
2852  */
2853 int kmem_cache_shrink(struct kmem_cache *s)
2854 {
2855 	int node;
2856 	int i;
2857 	struct kmem_cache_node *n;
2858 	struct page *page;
2859 	struct page *t;
2860 	int objects = oo_objects(s->max);
2861 	struct list_head *slabs_by_inuse =
2862 		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2863 	unsigned long flags;
2864 
2865 	if (!slabs_by_inuse)
2866 		return -ENOMEM;
2867 
2868 	flush_all(s);
2869 	for_each_node_state(node, N_NORMAL_MEMORY) {
2870 		n = get_node(s, node);
2871 
2872 		if (!n->nr_partial)
2873 			continue;
2874 
2875 		for (i = 0; i < objects; i++)
2876 			INIT_LIST_HEAD(slabs_by_inuse + i);
2877 
2878 		spin_lock_irqsave(&n->list_lock, flags);
2879 
2880 		/*
2881 		 * Build lists indexed by the items in use in each slab.
2882 		 *
2883 		 * Note that concurrent frees may occur while we hold the
2884 		 * list_lock. page->inuse here is the upper limit.
2885 		 */
2886 		list_for_each_entry_safe(page, t, &n->partial, lru) {
2887 			if (!page->inuse && slab_trylock(page)) {
2888 				/*
2889 				 * Must hold slab lock here because slab_free
2890 				 * may have freed the last object and be
2891 				 * waiting to release the slab.
2892 				 */
2893 				list_del(&page->lru);
2894 				n->nr_partial--;
2895 				slab_unlock(page);
2896 				discard_slab(s, page);
2897 			} else {
2898 				list_move(&page->lru,
2899 				slabs_by_inuse + page->inuse);
2900 			}
2901 		}
2902 
2903 		/*
2904 		 * Rebuild the partial list with the slabs filled up most
2905 		 * first and the least used slabs at the end.
2906 		 */
2907 		for (i = objects - 1; i >= 0; i--)
2908 			list_splice(slabs_by_inuse + i, n->partial.prev);
2909 
2910 		spin_unlock_irqrestore(&n->list_lock, flags);
2911 	}
2912 
2913 	kfree(slabs_by_inuse);
2914 	return 0;
2915 }
2916 EXPORT_SYMBOL(kmem_cache_shrink);
2917 
2918 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2919 static int slab_mem_going_offline_callback(void *arg)
2920 {
2921 	struct kmem_cache *s;
2922 
2923 	down_read(&slub_lock);
2924 	list_for_each_entry(s, &slab_caches, list)
2925 		kmem_cache_shrink(s);
2926 	up_read(&slub_lock);
2927 
2928 	return 0;
2929 }
2930 
2931 static void slab_mem_offline_callback(void *arg)
2932 {
2933 	struct kmem_cache_node *n;
2934 	struct kmem_cache *s;
2935 	struct memory_notify *marg = arg;
2936 	int offline_node;
2937 
2938 	offline_node = marg->status_change_nid;
2939 
2940 	/*
2941 	 * If the node still has available memory. we need kmem_cache_node
2942 	 * for it yet.
2943 	 */
2944 	if (offline_node < 0)
2945 		return;
2946 
2947 	down_read(&slub_lock);
2948 	list_for_each_entry(s, &slab_caches, list) {
2949 		n = get_node(s, offline_node);
2950 		if (n) {
2951 			/*
2952 			 * if n->nr_slabs > 0, slabs still exist on the node
2953 			 * that is going down. We were unable to free them,
2954 			 * and offline_pages() function shouldn't call this
2955 			 * callback. So, we must fail.
2956 			 */
2957 			BUG_ON(slabs_node(s, offline_node));
2958 
2959 			s->node[offline_node] = NULL;
2960 			kmem_cache_free(kmalloc_caches, n);
2961 		}
2962 	}
2963 	up_read(&slub_lock);
2964 }
2965 
2966 static int slab_mem_going_online_callback(void *arg)
2967 {
2968 	struct kmem_cache_node *n;
2969 	struct kmem_cache *s;
2970 	struct memory_notify *marg = arg;
2971 	int nid = marg->status_change_nid;
2972 	int ret = 0;
2973 
2974 	/*
2975 	 * If the node's memory is already available, then kmem_cache_node is
2976 	 * already created. Nothing to do.
2977 	 */
2978 	if (nid < 0)
2979 		return 0;
2980 
2981 	/*
2982 	 * We are bringing a node online. No memory is available yet. We must
2983 	 * allocate a kmem_cache_node structure in order to bring the node
2984 	 * online.
2985 	 */
2986 	down_read(&slub_lock);
2987 	list_for_each_entry(s, &slab_caches, list) {
2988 		/*
2989 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
2990 		 *      since memory is not yet available from the node that
2991 		 *      is brought up.
2992 		 */
2993 		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2994 		if (!n) {
2995 			ret = -ENOMEM;
2996 			goto out;
2997 		}
2998 		init_kmem_cache_node(n, s);
2999 		s->node[nid] = n;
3000 	}
3001 out:
3002 	up_read(&slub_lock);
3003 	return ret;
3004 }
3005 
3006 static int slab_memory_callback(struct notifier_block *self,
3007 				unsigned long action, void *arg)
3008 {
3009 	int ret = 0;
3010 
3011 	switch (action) {
3012 	case MEM_GOING_ONLINE:
3013 		ret = slab_mem_going_online_callback(arg);
3014 		break;
3015 	case MEM_GOING_OFFLINE:
3016 		ret = slab_mem_going_offline_callback(arg);
3017 		break;
3018 	case MEM_OFFLINE:
3019 	case MEM_CANCEL_ONLINE:
3020 		slab_mem_offline_callback(arg);
3021 		break;
3022 	case MEM_ONLINE:
3023 	case MEM_CANCEL_OFFLINE:
3024 		break;
3025 	}
3026 	if (ret)
3027 		ret = notifier_from_errno(ret);
3028 	else
3029 		ret = NOTIFY_OK;
3030 	return ret;
3031 }
3032 
3033 #endif /* CONFIG_MEMORY_HOTPLUG */
3034 
3035 /********************************************************************
3036  *			Basic setup of slabs
3037  *******************************************************************/
3038 
3039 void __init kmem_cache_init(void)
3040 {
3041 	int i;
3042 	int caches = 0;
3043 
3044 #ifdef CONFIG_NUMA
3045 	/*
3046 	 * Must first have the slab cache available for the allocations of the
3047 	 * struct kmem_cache_node's. There is special bootstrap code in
3048 	 * kmem_cache_open for slab_state == DOWN.
3049 	 */
3050 	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
3051 		sizeof(struct kmem_cache_node), GFP_NOWAIT);
3052 	kmalloc_caches[0].refcount = -1;
3053 	caches++;
3054 
3055 	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3056 #endif
3057 
3058 	/* Able to allocate the per node structures */
3059 	slab_state = PARTIAL;
3060 
3061 	/* Caches that are not of the two-to-the-power-of size */
3062 	if (KMALLOC_MIN_SIZE <= 32) {
3063 		create_kmalloc_cache(&kmalloc_caches[1],
3064 				"kmalloc-96", 96, GFP_NOWAIT);
3065 		caches++;
3066 	}
3067 	if (KMALLOC_MIN_SIZE <= 64) {
3068 		create_kmalloc_cache(&kmalloc_caches[2],
3069 				"kmalloc-192", 192, GFP_NOWAIT);
3070 		caches++;
3071 	}
3072 
3073 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3074 		create_kmalloc_cache(&kmalloc_caches[i],
3075 			"kmalloc", 1 << i, GFP_NOWAIT);
3076 		caches++;
3077 	}
3078 
3079 
3080 	/*
3081 	 * Patch up the size_index table if we have strange large alignment
3082 	 * requirements for the kmalloc array. This is only the case for
3083 	 * MIPS it seems. The standard arches will not generate any code here.
3084 	 *
3085 	 * Largest permitted alignment is 256 bytes due to the way we
3086 	 * handle the index determination for the smaller caches.
3087 	 *
3088 	 * Make sure that nothing crazy happens if someone starts tinkering
3089 	 * around with ARCH_KMALLOC_MINALIGN
3090 	 */
3091 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3092 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3093 
3094 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3095 		int elem = size_index_elem(i);
3096 		if (elem >= ARRAY_SIZE(size_index))
3097 			break;
3098 		size_index[elem] = KMALLOC_SHIFT_LOW;
3099 	}
3100 
3101 	if (KMALLOC_MIN_SIZE == 64) {
3102 		/*
3103 		 * The 96 byte size cache is not used if the alignment
3104 		 * is 64 byte.
3105 		 */
3106 		for (i = 64 + 8; i <= 96; i += 8)
3107 			size_index[size_index_elem(i)] = 7;
3108 	} else if (KMALLOC_MIN_SIZE == 128) {
3109 		/*
3110 		 * The 192 byte sized cache is not used if the alignment
3111 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3112 		 * instead.
3113 		 */
3114 		for (i = 128 + 8; i <= 192; i += 8)
3115 			size_index[size_index_elem(i)] = 8;
3116 	}
3117 
3118 	slab_state = UP;
3119 
3120 	/* Provide the correct kmalloc names now that the caches are up */
3121 	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3122 		kmalloc_caches[i]. name =
3123 			kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3124 
3125 #ifdef CONFIG_SMP
3126 	register_cpu_notifier(&slab_notifier);
3127 #endif
3128 #ifdef CONFIG_NUMA
3129 	kmem_size = offsetof(struct kmem_cache, node) +
3130 				nr_node_ids * sizeof(struct kmem_cache_node *);
3131 #else
3132 	kmem_size = sizeof(struct kmem_cache);
3133 #endif
3134 
3135 	printk(KERN_INFO
3136 		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3137 		" CPUs=%d, Nodes=%d\n",
3138 		caches, cache_line_size(),
3139 		slub_min_order, slub_max_order, slub_min_objects,
3140 		nr_cpu_ids, nr_node_ids);
3141 }
3142 
3143 void __init kmem_cache_init_late(void)
3144 {
3145 }
3146 
3147 /*
3148  * Find a mergeable slab cache
3149  */
3150 static int slab_unmergeable(struct kmem_cache *s)
3151 {
3152 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3153 		return 1;
3154 
3155 	if (s->ctor)
3156 		return 1;
3157 
3158 	/*
3159 	 * We may have set a slab to be unmergeable during bootstrap.
3160 	 */
3161 	if (s->refcount < 0)
3162 		return 1;
3163 
3164 	return 0;
3165 }
3166 
3167 static struct kmem_cache *find_mergeable(size_t size,
3168 		size_t align, unsigned long flags, const char *name,
3169 		void (*ctor)(void *))
3170 {
3171 	struct kmem_cache *s;
3172 
3173 	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3174 		return NULL;
3175 
3176 	if (ctor)
3177 		return NULL;
3178 
3179 	size = ALIGN(size, sizeof(void *));
3180 	align = calculate_alignment(flags, align, size);
3181 	size = ALIGN(size, align);
3182 	flags = kmem_cache_flags(size, flags, name, NULL);
3183 
3184 	list_for_each_entry(s, &slab_caches, list) {
3185 		if (slab_unmergeable(s))
3186 			continue;
3187 
3188 		if (size > s->size)
3189 			continue;
3190 
3191 		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3192 				continue;
3193 		/*
3194 		 * Check if alignment is compatible.
3195 		 * Courtesy of Adrian Drzewiecki
3196 		 */
3197 		if ((s->size & ~(align - 1)) != s->size)
3198 			continue;
3199 
3200 		if (s->size - size >= sizeof(void *))
3201 			continue;
3202 
3203 		return s;
3204 	}
3205 	return NULL;
3206 }
3207 
3208 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3209 		size_t align, unsigned long flags, void (*ctor)(void *))
3210 {
3211 	struct kmem_cache *s;
3212 
3213 	if (WARN_ON(!name))
3214 		return NULL;
3215 
3216 	down_write(&slub_lock);
3217 	s = find_mergeable(size, align, flags, name, ctor);
3218 	if (s) {
3219 		s->refcount++;
3220 		/*
3221 		 * Adjust the object sizes so that we clear
3222 		 * the complete object on kzalloc.
3223 		 */
3224 		s->objsize = max(s->objsize, (int)size);
3225 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3226 		up_write(&slub_lock);
3227 
3228 		if (sysfs_slab_alias(s, name)) {
3229 			down_write(&slub_lock);
3230 			s->refcount--;
3231 			up_write(&slub_lock);
3232 			goto err;
3233 		}
3234 		return s;
3235 	}
3236 
3237 	s = kmalloc(kmem_size, GFP_KERNEL);
3238 	if (s) {
3239 		if (kmem_cache_open(s, GFP_KERNEL, name,
3240 				size, align, flags, ctor)) {
3241 			list_add(&s->list, &slab_caches);
3242 			up_write(&slub_lock);
3243 			if (sysfs_slab_add(s)) {
3244 				down_write(&slub_lock);
3245 				list_del(&s->list);
3246 				up_write(&slub_lock);
3247 				kfree(s);
3248 				goto err;
3249 			}
3250 			return s;
3251 		}
3252 		kfree(s);
3253 	}
3254 	up_write(&slub_lock);
3255 
3256 err:
3257 	if (flags & SLAB_PANIC)
3258 		panic("Cannot create slabcache %s\n", name);
3259 	else
3260 		s = NULL;
3261 	return s;
3262 }
3263 EXPORT_SYMBOL(kmem_cache_create);
3264 
3265 #ifdef CONFIG_SMP
3266 /*
3267  * Use the cpu notifier to insure that the cpu slabs are flushed when
3268  * necessary.
3269  */
3270 static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3271 		unsigned long action, void *hcpu)
3272 {
3273 	long cpu = (long)hcpu;
3274 	struct kmem_cache *s;
3275 	unsigned long flags;
3276 
3277 	switch (action) {
3278 	case CPU_UP_CANCELED:
3279 	case CPU_UP_CANCELED_FROZEN:
3280 	case CPU_DEAD:
3281 	case CPU_DEAD_FROZEN:
3282 		down_read(&slub_lock);
3283 		list_for_each_entry(s, &slab_caches, list) {
3284 			local_irq_save(flags);
3285 			__flush_cpu_slab(s, cpu);
3286 			local_irq_restore(flags);
3287 		}
3288 		up_read(&slub_lock);
3289 		break;
3290 	default:
3291 		break;
3292 	}
3293 	return NOTIFY_OK;
3294 }
3295 
3296 static struct notifier_block __cpuinitdata slab_notifier = {
3297 	.notifier_call = slab_cpuup_callback
3298 };
3299 
3300 #endif
3301 
3302 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3303 {
3304 	struct kmem_cache *s;
3305 	void *ret;
3306 
3307 	if (unlikely(size > SLUB_MAX_SIZE))
3308 		return kmalloc_large(size, gfpflags);
3309 
3310 	s = get_slab(size, gfpflags);
3311 
3312 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3313 		return s;
3314 
3315 	ret = slab_alloc(s, gfpflags, -1, caller);
3316 
3317 	/* Honor the call site pointer we recieved. */
3318 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
3319 
3320 	return ret;
3321 }
3322 
3323 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3324 					int node, unsigned long caller)
3325 {
3326 	struct kmem_cache *s;
3327 	void *ret;
3328 
3329 	if (unlikely(size > SLUB_MAX_SIZE)) {
3330 		ret = kmalloc_large_node(size, gfpflags, node);
3331 
3332 		trace_kmalloc_node(caller, ret,
3333 				   size, PAGE_SIZE << get_order(size),
3334 				   gfpflags, node);
3335 
3336 		return ret;
3337 	}
3338 
3339 	s = get_slab(size, gfpflags);
3340 
3341 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3342 		return s;
3343 
3344 	ret = slab_alloc(s, gfpflags, node, caller);
3345 
3346 	/* Honor the call site pointer we recieved. */
3347 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3348 
3349 	return ret;
3350 }
3351 
3352 #ifdef CONFIG_SLUB_DEBUG
3353 static int count_inuse(struct page *page)
3354 {
3355 	return page->inuse;
3356 }
3357 
3358 static int count_total(struct page *page)
3359 {
3360 	return page->objects;
3361 }
3362 
3363 static int validate_slab(struct kmem_cache *s, struct page *page,
3364 						unsigned long *map)
3365 {
3366 	void *p;
3367 	void *addr = page_address(page);
3368 
3369 	if (!check_slab(s, page) ||
3370 			!on_freelist(s, page, NULL))
3371 		return 0;
3372 
3373 	/* Now we know that a valid freelist exists */
3374 	bitmap_zero(map, page->objects);
3375 
3376 	for_each_free_object(p, s, page->freelist) {
3377 		set_bit(slab_index(p, s, addr), map);
3378 		if (!check_object(s, page, p, 0))
3379 			return 0;
3380 	}
3381 
3382 	for_each_object(p, s, addr, page->objects)
3383 		if (!test_bit(slab_index(p, s, addr), map))
3384 			if (!check_object(s, page, p, 1))
3385 				return 0;
3386 	return 1;
3387 }
3388 
3389 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3390 						unsigned long *map)
3391 {
3392 	if (slab_trylock(page)) {
3393 		validate_slab(s, page, map);
3394 		slab_unlock(page);
3395 	} else
3396 		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3397 			s->name, page);
3398 
3399 	if (s->flags & DEBUG_DEFAULT_FLAGS) {
3400 		if (!PageSlubDebug(page))
3401 			printk(KERN_ERR "SLUB %s: SlubDebug not set "
3402 				"on slab 0x%p\n", s->name, page);
3403 	} else {
3404 		if (PageSlubDebug(page))
3405 			printk(KERN_ERR "SLUB %s: SlubDebug set on "
3406 				"slab 0x%p\n", s->name, page);
3407 	}
3408 }
3409 
3410 static int validate_slab_node(struct kmem_cache *s,
3411 		struct kmem_cache_node *n, unsigned long *map)
3412 {
3413 	unsigned long count = 0;
3414 	struct page *page;
3415 	unsigned long flags;
3416 
3417 	spin_lock_irqsave(&n->list_lock, flags);
3418 
3419 	list_for_each_entry(page, &n->partial, lru) {
3420 		validate_slab_slab(s, page, map);
3421 		count++;
3422 	}
3423 	if (count != n->nr_partial)
3424 		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3425 			"counter=%ld\n", s->name, count, n->nr_partial);
3426 
3427 	if (!(s->flags & SLAB_STORE_USER))
3428 		goto out;
3429 
3430 	list_for_each_entry(page, &n->full, lru) {
3431 		validate_slab_slab(s, page, map);
3432 		count++;
3433 	}
3434 	if (count != atomic_long_read(&n->nr_slabs))
3435 		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3436 			"counter=%ld\n", s->name, count,
3437 			atomic_long_read(&n->nr_slabs));
3438 
3439 out:
3440 	spin_unlock_irqrestore(&n->list_lock, flags);
3441 	return count;
3442 }
3443 
3444 static long validate_slab_cache(struct kmem_cache *s)
3445 {
3446 	int node;
3447 	unsigned long count = 0;
3448 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3449 				sizeof(unsigned long), GFP_KERNEL);
3450 
3451 	if (!map)
3452 		return -ENOMEM;
3453 
3454 	flush_all(s);
3455 	for_each_node_state(node, N_NORMAL_MEMORY) {
3456 		struct kmem_cache_node *n = get_node(s, node);
3457 
3458 		count += validate_slab_node(s, n, map);
3459 	}
3460 	kfree(map);
3461 	return count;
3462 }
3463 
3464 #ifdef SLUB_RESILIENCY_TEST
3465 static void resiliency_test(void)
3466 {
3467 	u8 *p;
3468 
3469 	printk(KERN_ERR "SLUB resiliency testing\n");
3470 	printk(KERN_ERR "-----------------------\n");
3471 	printk(KERN_ERR "A. Corruption after allocation\n");
3472 
3473 	p = kzalloc(16, GFP_KERNEL);
3474 	p[16] = 0x12;
3475 	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3476 			" 0x12->0x%p\n\n", p + 16);
3477 
3478 	validate_slab_cache(kmalloc_caches + 4);
3479 
3480 	/* Hmmm... The next two are dangerous */
3481 	p = kzalloc(32, GFP_KERNEL);
3482 	p[32 + sizeof(void *)] = 0x34;
3483 	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3484 			" 0x34 -> -0x%p\n", p);
3485 	printk(KERN_ERR
3486 		"If allocated object is overwritten then not detectable\n\n");
3487 
3488 	validate_slab_cache(kmalloc_caches + 5);
3489 	p = kzalloc(64, GFP_KERNEL);
3490 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3491 	*p = 0x56;
3492 	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3493 									p);
3494 	printk(KERN_ERR
3495 		"If allocated object is overwritten then not detectable\n\n");
3496 	validate_slab_cache(kmalloc_caches + 6);
3497 
3498 	printk(KERN_ERR "\nB. Corruption after free\n");
3499 	p = kzalloc(128, GFP_KERNEL);
3500 	kfree(p);
3501 	*p = 0x78;
3502 	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3503 	validate_slab_cache(kmalloc_caches + 7);
3504 
3505 	p = kzalloc(256, GFP_KERNEL);
3506 	kfree(p);
3507 	p[50] = 0x9a;
3508 	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3509 			p);
3510 	validate_slab_cache(kmalloc_caches + 8);
3511 
3512 	p = kzalloc(512, GFP_KERNEL);
3513 	kfree(p);
3514 	p[512] = 0xab;
3515 	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3516 	validate_slab_cache(kmalloc_caches + 9);
3517 }
3518 #else
3519 static void resiliency_test(void) {};
3520 #endif
3521 
3522 /*
3523  * Generate lists of code addresses where slabcache objects are allocated
3524  * and freed.
3525  */
3526 
3527 struct location {
3528 	unsigned long count;
3529 	unsigned long addr;
3530 	long long sum_time;
3531 	long min_time;
3532 	long max_time;
3533 	long min_pid;
3534 	long max_pid;
3535 	DECLARE_BITMAP(cpus, NR_CPUS);
3536 	nodemask_t nodes;
3537 };
3538 
3539 struct loc_track {
3540 	unsigned long max;
3541 	unsigned long count;
3542 	struct location *loc;
3543 };
3544 
3545 static void free_loc_track(struct loc_track *t)
3546 {
3547 	if (t->max)
3548 		free_pages((unsigned long)t->loc,
3549 			get_order(sizeof(struct location) * t->max));
3550 }
3551 
3552 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3553 {
3554 	struct location *l;
3555 	int order;
3556 
3557 	order = get_order(sizeof(struct location) * max);
3558 
3559 	l = (void *)__get_free_pages(flags, order);
3560 	if (!l)
3561 		return 0;
3562 
3563 	if (t->count) {
3564 		memcpy(l, t->loc, sizeof(struct location) * t->count);
3565 		free_loc_track(t);
3566 	}
3567 	t->max = max;
3568 	t->loc = l;
3569 	return 1;
3570 }
3571 
3572 static int add_location(struct loc_track *t, struct kmem_cache *s,
3573 				const struct track *track)
3574 {
3575 	long start, end, pos;
3576 	struct location *l;
3577 	unsigned long caddr;
3578 	unsigned long age = jiffies - track->when;
3579 
3580 	start = -1;
3581 	end = t->count;
3582 
3583 	for ( ; ; ) {
3584 		pos = start + (end - start + 1) / 2;
3585 
3586 		/*
3587 		 * There is nothing at "end". If we end up there
3588 		 * we need to add something to before end.
3589 		 */
3590 		if (pos == end)
3591 			break;
3592 
3593 		caddr = t->loc[pos].addr;
3594 		if (track->addr == caddr) {
3595 
3596 			l = &t->loc[pos];
3597 			l->count++;
3598 			if (track->when) {
3599 				l->sum_time += age;
3600 				if (age < l->min_time)
3601 					l->min_time = age;
3602 				if (age > l->max_time)
3603 					l->max_time = age;
3604 
3605 				if (track->pid < l->min_pid)
3606 					l->min_pid = track->pid;
3607 				if (track->pid > l->max_pid)
3608 					l->max_pid = track->pid;
3609 
3610 				cpumask_set_cpu(track->cpu,
3611 						to_cpumask(l->cpus));
3612 			}
3613 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
3614 			return 1;
3615 		}
3616 
3617 		if (track->addr < caddr)
3618 			end = pos;
3619 		else
3620 			start = pos;
3621 	}
3622 
3623 	/*
3624 	 * Not found. Insert new tracking element.
3625 	 */
3626 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
3627 		return 0;
3628 
3629 	l = t->loc + pos;
3630 	if (pos < t->count)
3631 		memmove(l + 1, l,
3632 			(t->count - pos) * sizeof(struct location));
3633 	t->count++;
3634 	l->count = 1;
3635 	l->addr = track->addr;
3636 	l->sum_time = age;
3637 	l->min_time = age;
3638 	l->max_time = age;
3639 	l->min_pid = track->pid;
3640 	l->max_pid = track->pid;
3641 	cpumask_clear(to_cpumask(l->cpus));
3642 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
3643 	nodes_clear(l->nodes);
3644 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
3645 	return 1;
3646 }
3647 
3648 static void process_slab(struct loc_track *t, struct kmem_cache *s,
3649 		struct page *page, enum track_item alloc,
3650 		long *map)
3651 {
3652 	void *addr = page_address(page);
3653 	void *p;
3654 
3655 	bitmap_zero(map, page->objects);
3656 	for_each_free_object(p, s, page->freelist)
3657 		set_bit(slab_index(p, s, addr), map);
3658 
3659 	for_each_object(p, s, addr, page->objects)
3660 		if (!test_bit(slab_index(p, s, addr), map))
3661 			add_location(t, s, get_track(s, p, alloc));
3662 }
3663 
3664 static int list_locations(struct kmem_cache *s, char *buf,
3665 					enum track_item alloc)
3666 {
3667 	int len = 0;
3668 	unsigned long i;
3669 	struct loc_track t = { 0, 0, NULL };
3670 	int node;
3671 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3672 				     sizeof(unsigned long), GFP_KERNEL);
3673 
3674 	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3675 				     GFP_TEMPORARY)) {
3676 		kfree(map);
3677 		return sprintf(buf, "Out of memory\n");
3678 	}
3679 	/* Push back cpu slabs */
3680 	flush_all(s);
3681 
3682 	for_each_node_state(node, N_NORMAL_MEMORY) {
3683 		struct kmem_cache_node *n = get_node(s, node);
3684 		unsigned long flags;
3685 		struct page *page;
3686 
3687 		if (!atomic_long_read(&n->nr_slabs))
3688 			continue;
3689 
3690 		spin_lock_irqsave(&n->list_lock, flags);
3691 		list_for_each_entry(page, &n->partial, lru)
3692 			process_slab(&t, s, page, alloc, map);
3693 		list_for_each_entry(page, &n->full, lru)
3694 			process_slab(&t, s, page, alloc, map);
3695 		spin_unlock_irqrestore(&n->list_lock, flags);
3696 	}
3697 
3698 	for (i = 0; i < t.count; i++) {
3699 		struct location *l = &t.loc[i];
3700 
3701 		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
3702 			break;
3703 		len += sprintf(buf + len, "%7ld ", l->count);
3704 
3705 		if (l->addr)
3706 			len += sprint_symbol(buf + len, (unsigned long)l->addr);
3707 		else
3708 			len += sprintf(buf + len, "<not-available>");
3709 
3710 		if (l->sum_time != l->min_time) {
3711 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
3712 				l->min_time,
3713 				(long)div_u64(l->sum_time, l->count),
3714 				l->max_time);
3715 		} else
3716 			len += sprintf(buf + len, " age=%ld",
3717 				l->min_time);
3718 
3719 		if (l->min_pid != l->max_pid)
3720 			len += sprintf(buf + len, " pid=%ld-%ld",
3721 				l->min_pid, l->max_pid);
3722 		else
3723 			len += sprintf(buf + len, " pid=%ld",
3724 				l->min_pid);
3725 
3726 		if (num_online_cpus() > 1 &&
3727 				!cpumask_empty(to_cpumask(l->cpus)) &&
3728 				len < PAGE_SIZE - 60) {
3729 			len += sprintf(buf + len, " cpus=");
3730 			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3731 						 to_cpumask(l->cpus));
3732 		}
3733 
3734 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
3735 				len < PAGE_SIZE - 60) {
3736 			len += sprintf(buf + len, " nodes=");
3737 			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3738 					l->nodes);
3739 		}
3740 
3741 		len += sprintf(buf + len, "\n");
3742 	}
3743 
3744 	free_loc_track(&t);
3745 	kfree(map);
3746 	if (!t.count)
3747 		len += sprintf(buf, "No data\n");
3748 	return len;
3749 }
3750 
3751 enum slab_stat_type {
3752 	SL_ALL,			/* All slabs */
3753 	SL_PARTIAL,		/* Only partially allocated slabs */
3754 	SL_CPU,			/* Only slabs used for cpu caches */
3755 	SL_OBJECTS,		/* Determine allocated objects not slabs */
3756 	SL_TOTAL		/* Determine object capacity not slabs */
3757 };
3758 
3759 #define SO_ALL		(1 << SL_ALL)
3760 #define SO_PARTIAL	(1 << SL_PARTIAL)
3761 #define SO_CPU		(1 << SL_CPU)
3762 #define SO_OBJECTS	(1 << SL_OBJECTS)
3763 #define SO_TOTAL	(1 << SL_TOTAL)
3764 
3765 static ssize_t show_slab_objects(struct kmem_cache *s,
3766 			    char *buf, unsigned long flags)
3767 {
3768 	unsigned long total = 0;
3769 	int node;
3770 	int x;
3771 	unsigned long *nodes;
3772 	unsigned long *per_cpu;
3773 
3774 	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3775 	if (!nodes)
3776 		return -ENOMEM;
3777 	per_cpu = nodes + nr_node_ids;
3778 
3779 	if (flags & SO_CPU) {
3780 		int cpu;
3781 
3782 		for_each_possible_cpu(cpu) {
3783 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3784 
3785 			if (!c || c->node < 0)
3786 				continue;
3787 
3788 			if (c->page) {
3789 					if (flags & SO_TOTAL)
3790 						x = c->page->objects;
3791 				else if (flags & SO_OBJECTS)
3792 					x = c->page->inuse;
3793 				else
3794 					x = 1;
3795 
3796 				total += x;
3797 				nodes[c->node] += x;
3798 			}
3799 			per_cpu[c->node]++;
3800 		}
3801 	}
3802 
3803 	if (flags & SO_ALL) {
3804 		for_each_node_state(node, N_NORMAL_MEMORY) {
3805 			struct kmem_cache_node *n = get_node(s, node);
3806 
3807 		if (flags & SO_TOTAL)
3808 			x = atomic_long_read(&n->total_objects);
3809 		else if (flags & SO_OBJECTS)
3810 			x = atomic_long_read(&n->total_objects) -
3811 				count_partial(n, count_free);
3812 
3813 			else
3814 				x = atomic_long_read(&n->nr_slabs);
3815 			total += x;
3816 			nodes[node] += x;
3817 		}
3818 
3819 	} else if (flags & SO_PARTIAL) {
3820 		for_each_node_state(node, N_NORMAL_MEMORY) {
3821 			struct kmem_cache_node *n = get_node(s, node);
3822 
3823 			if (flags & SO_TOTAL)
3824 				x = count_partial(n, count_total);
3825 			else if (flags & SO_OBJECTS)
3826 				x = count_partial(n, count_inuse);
3827 			else
3828 				x = n->nr_partial;
3829 			total += x;
3830 			nodes[node] += x;
3831 		}
3832 	}
3833 	x = sprintf(buf, "%lu", total);
3834 #ifdef CONFIG_NUMA
3835 	for_each_node_state(node, N_NORMAL_MEMORY)
3836 		if (nodes[node])
3837 			x += sprintf(buf + x, " N%d=%lu",
3838 					node, nodes[node]);
3839 #endif
3840 	kfree(nodes);
3841 	return x + sprintf(buf + x, "\n");
3842 }
3843 
3844 static int any_slab_objects(struct kmem_cache *s)
3845 {
3846 	int node;
3847 
3848 	for_each_online_node(node) {
3849 		struct kmem_cache_node *n = get_node(s, node);
3850 
3851 		if (!n)
3852 			continue;
3853 
3854 		if (atomic_long_read(&n->total_objects))
3855 			return 1;
3856 	}
3857 	return 0;
3858 }
3859 
3860 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3861 #define to_slab(n) container_of(n, struct kmem_cache, kobj);
3862 
3863 struct slab_attribute {
3864 	struct attribute attr;
3865 	ssize_t (*show)(struct kmem_cache *s, char *buf);
3866 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3867 };
3868 
3869 #define SLAB_ATTR_RO(_name) \
3870 	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3871 
3872 #define SLAB_ATTR(_name) \
3873 	static struct slab_attribute _name##_attr =  \
3874 	__ATTR(_name, 0644, _name##_show, _name##_store)
3875 
3876 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3877 {
3878 	return sprintf(buf, "%d\n", s->size);
3879 }
3880 SLAB_ATTR_RO(slab_size);
3881 
3882 static ssize_t align_show(struct kmem_cache *s, char *buf)
3883 {
3884 	return sprintf(buf, "%d\n", s->align);
3885 }
3886 SLAB_ATTR_RO(align);
3887 
3888 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3889 {
3890 	return sprintf(buf, "%d\n", s->objsize);
3891 }
3892 SLAB_ATTR_RO(object_size);
3893 
3894 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3895 {
3896 	return sprintf(buf, "%d\n", oo_objects(s->oo));
3897 }
3898 SLAB_ATTR_RO(objs_per_slab);
3899 
3900 static ssize_t order_store(struct kmem_cache *s,
3901 				const char *buf, size_t length)
3902 {
3903 	unsigned long order;
3904 	int err;
3905 
3906 	err = strict_strtoul(buf, 10, &order);
3907 	if (err)
3908 		return err;
3909 
3910 	if (order > slub_max_order || order < slub_min_order)
3911 		return -EINVAL;
3912 
3913 	calculate_sizes(s, order);
3914 	return length;
3915 }
3916 
3917 static ssize_t order_show(struct kmem_cache *s, char *buf)
3918 {
3919 	return sprintf(buf, "%d\n", oo_order(s->oo));
3920 }
3921 SLAB_ATTR(order);
3922 
3923 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
3924 {
3925 	return sprintf(buf, "%lu\n", s->min_partial);
3926 }
3927 
3928 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
3929 				 size_t length)
3930 {
3931 	unsigned long min;
3932 	int err;
3933 
3934 	err = strict_strtoul(buf, 10, &min);
3935 	if (err)
3936 		return err;
3937 
3938 	set_min_partial(s, min);
3939 	return length;
3940 }
3941 SLAB_ATTR(min_partial);
3942 
3943 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3944 {
3945 	if (s->ctor) {
3946 		int n = sprint_symbol(buf, (unsigned long)s->ctor);
3947 
3948 		return n + sprintf(buf + n, "\n");
3949 	}
3950 	return 0;
3951 }
3952 SLAB_ATTR_RO(ctor);
3953 
3954 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3955 {
3956 	return sprintf(buf, "%d\n", s->refcount - 1);
3957 }
3958 SLAB_ATTR_RO(aliases);
3959 
3960 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3961 {
3962 	return show_slab_objects(s, buf, SO_ALL);
3963 }
3964 SLAB_ATTR_RO(slabs);
3965 
3966 static ssize_t partial_show(struct kmem_cache *s, char *buf)
3967 {
3968 	return show_slab_objects(s, buf, SO_PARTIAL);
3969 }
3970 SLAB_ATTR_RO(partial);
3971 
3972 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3973 {
3974 	return show_slab_objects(s, buf, SO_CPU);
3975 }
3976 SLAB_ATTR_RO(cpu_slabs);
3977 
3978 static ssize_t objects_show(struct kmem_cache *s, char *buf)
3979 {
3980 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
3981 }
3982 SLAB_ATTR_RO(objects);
3983 
3984 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
3985 {
3986 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
3987 }
3988 SLAB_ATTR_RO(objects_partial);
3989 
3990 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
3991 {
3992 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
3993 }
3994 SLAB_ATTR_RO(total_objects);
3995 
3996 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3997 {
3998 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3999 }
4000 
4001 static ssize_t sanity_checks_store(struct kmem_cache *s,
4002 				const char *buf, size_t length)
4003 {
4004 	s->flags &= ~SLAB_DEBUG_FREE;
4005 	if (buf[0] == '1')
4006 		s->flags |= SLAB_DEBUG_FREE;
4007 	return length;
4008 }
4009 SLAB_ATTR(sanity_checks);
4010 
4011 static ssize_t trace_show(struct kmem_cache *s, char *buf)
4012 {
4013 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4014 }
4015 
4016 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4017 							size_t length)
4018 {
4019 	s->flags &= ~SLAB_TRACE;
4020 	if (buf[0] == '1')
4021 		s->flags |= SLAB_TRACE;
4022 	return length;
4023 }
4024 SLAB_ATTR(trace);
4025 
4026 #ifdef CONFIG_FAILSLAB
4027 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4028 {
4029 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4030 }
4031 
4032 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4033 							size_t length)
4034 {
4035 	s->flags &= ~SLAB_FAILSLAB;
4036 	if (buf[0] == '1')
4037 		s->flags |= SLAB_FAILSLAB;
4038 	return length;
4039 }
4040 SLAB_ATTR(failslab);
4041 #endif
4042 
4043 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4044 {
4045 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4046 }
4047 
4048 static ssize_t reclaim_account_store(struct kmem_cache *s,
4049 				const char *buf, size_t length)
4050 {
4051 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4052 	if (buf[0] == '1')
4053 		s->flags |= SLAB_RECLAIM_ACCOUNT;
4054 	return length;
4055 }
4056 SLAB_ATTR(reclaim_account);
4057 
4058 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4059 {
4060 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4061 }
4062 SLAB_ATTR_RO(hwcache_align);
4063 
4064 #ifdef CONFIG_ZONE_DMA
4065 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4066 {
4067 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4068 }
4069 SLAB_ATTR_RO(cache_dma);
4070 #endif
4071 
4072 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4073 {
4074 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4075 }
4076 SLAB_ATTR_RO(destroy_by_rcu);
4077 
4078 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4079 {
4080 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4081 }
4082 
4083 static ssize_t red_zone_store(struct kmem_cache *s,
4084 				const char *buf, size_t length)
4085 {
4086 	if (any_slab_objects(s))
4087 		return -EBUSY;
4088 
4089 	s->flags &= ~SLAB_RED_ZONE;
4090 	if (buf[0] == '1')
4091 		s->flags |= SLAB_RED_ZONE;
4092 	calculate_sizes(s, -1);
4093 	return length;
4094 }
4095 SLAB_ATTR(red_zone);
4096 
4097 static ssize_t poison_show(struct kmem_cache *s, char *buf)
4098 {
4099 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4100 }
4101 
4102 static ssize_t poison_store(struct kmem_cache *s,
4103 				const char *buf, size_t length)
4104 {
4105 	if (any_slab_objects(s))
4106 		return -EBUSY;
4107 
4108 	s->flags &= ~SLAB_POISON;
4109 	if (buf[0] == '1')
4110 		s->flags |= SLAB_POISON;
4111 	calculate_sizes(s, -1);
4112 	return length;
4113 }
4114 SLAB_ATTR(poison);
4115 
4116 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4117 {
4118 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4119 }
4120 
4121 static ssize_t store_user_store(struct kmem_cache *s,
4122 				const char *buf, size_t length)
4123 {
4124 	if (any_slab_objects(s))
4125 		return -EBUSY;
4126 
4127 	s->flags &= ~SLAB_STORE_USER;
4128 	if (buf[0] == '1')
4129 		s->flags |= SLAB_STORE_USER;
4130 	calculate_sizes(s, -1);
4131 	return length;
4132 }
4133 SLAB_ATTR(store_user);
4134 
4135 static ssize_t validate_show(struct kmem_cache *s, char *buf)
4136 {
4137 	return 0;
4138 }
4139 
4140 static ssize_t validate_store(struct kmem_cache *s,
4141 			const char *buf, size_t length)
4142 {
4143 	int ret = -EINVAL;
4144 
4145 	if (buf[0] == '1') {
4146 		ret = validate_slab_cache(s);
4147 		if (ret >= 0)
4148 			ret = length;
4149 	}
4150 	return ret;
4151 }
4152 SLAB_ATTR(validate);
4153 
4154 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4155 {
4156 	return 0;
4157 }
4158 
4159 static ssize_t shrink_store(struct kmem_cache *s,
4160 			const char *buf, size_t length)
4161 {
4162 	if (buf[0] == '1') {
4163 		int rc = kmem_cache_shrink(s);
4164 
4165 		if (rc)
4166 			return rc;
4167 	} else
4168 		return -EINVAL;
4169 	return length;
4170 }
4171 SLAB_ATTR(shrink);
4172 
4173 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4174 {
4175 	if (!(s->flags & SLAB_STORE_USER))
4176 		return -ENOSYS;
4177 	return list_locations(s, buf, TRACK_ALLOC);
4178 }
4179 SLAB_ATTR_RO(alloc_calls);
4180 
4181 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4182 {
4183 	if (!(s->flags & SLAB_STORE_USER))
4184 		return -ENOSYS;
4185 	return list_locations(s, buf, TRACK_FREE);
4186 }
4187 SLAB_ATTR_RO(free_calls);
4188 
4189 #ifdef CONFIG_NUMA
4190 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4191 {
4192 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4193 }
4194 
4195 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4196 				const char *buf, size_t length)
4197 {
4198 	unsigned long ratio;
4199 	int err;
4200 
4201 	err = strict_strtoul(buf, 10, &ratio);
4202 	if (err)
4203 		return err;
4204 
4205 	if (ratio <= 100)
4206 		s->remote_node_defrag_ratio = ratio * 10;
4207 
4208 	return length;
4209 }
4210 SLAB_ATTR(remote_node_defrag_ratio);
4211 #endif
4212 
4213 #ifdef CONFIG_SLUB_STATS
4214 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4215 {
4216 	unsigned long sum  = 0;
4217 	int cpu;
4218 	int len;
4219 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4220 
4221 	if (!data)
4222 		return -ENOMEM;
4223 
4224 	for_each_online_cpu(cpu) {
4225 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
4226 
4227 		data[cpu] = x;
4228 		sum += x;
4229 	}
4230 
4231 	len = sprintf(buf, "%lu", sum);
4232 
4233 #ifdef CONFIG_SMP
4234 	for_each_online_cpu(cpu) {
4235 		if (data[cpu] && len < PAGE_SIZE - 20)
4236 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4237 	}
4238 #endif
4239 	kfree(data);
4240 	return len + sprintf(buf + len, "\n");
4241 }
4242 
4243 static void clear_stat(struct kmem_cache *s, enum stat_item si)
4244 {
4245 	int cpu;
4246 
4247 	for_each_online_cpu(cpu)
4248 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
4249 }
4250 
4251 #define STAT_ATTR(si, text) 					\
4252 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
4253 {								\
4254 	return show_stat(s, buf, si);				\
4255 }								\
4256 static ssize_t text##_store(struct kmem_cache *s,		\
4257 				const char *buf, size_t length)	\
4258 {								\
4259 	if (buf[0] != '0')					\
4260 		return -EINVAL;					\
4261 	clear_stat(s, si);					\
4262 	return length;						\
4263 }								\
4264 SLAB_ATTR(text);						\
4265 
4266 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4267 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4268 STAT_ATTR(FREE_FASTPATH, free_fastpath);
4269 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4270 STAT_ATTR(FREE_FROZEN, free_frozen);
4271 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4272 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4273 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4274 STAT_ATTR(ALLOC_SLAB, alloc_slab);
4275 STAT_ATTR(ALLOC_REFILL, alloc_refill);
4276 STAT_ATTR(FREE_SLAB, free_slab);
4277 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4278 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4279 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4280 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4281 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4282 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4283 STAT_ATTR(ORDER_FALLBACK, order_fallback);
4284 #endif
4285 
4286 static struct attribute *slab_attrs[] = {
4287 	&slab_size_attr.attr,
4288 	&object_size_attr.attr,
4289 	&objs_per_slab_attr.attr,
4290 	&order_attr.attr,
4291 	&min_partial_attr.attr,
4292 	&objects_attr.attr,
4293 	&objects_partial_attr.attr,
4294 	&total_objects_attr.attr,
4295 	&slabs_attr.attr,
4296 	&partial_attr.attr,
4297 	&cpu_slabs_attr.attr,
4298 	&ctor_attr.attr,
4299 	&aliases_attr.attr,
4300 	&align_attr.attr,
4301 	&sanity_checks_attr.attr,
4302 	&trace_attr.attr,
4303 	&hwcache_align_attr.attr,
4304 	&reclaim_account_attr.attr,
4305 	&destroy_by_rcu_attr.attr,
4306 	&red_zone_attr.attr,
4307 	&poison_attr.attr,
4308 	&store_user_attr.attr,
4309 	&validate_attr.attr,
4310 	&shrink_attr.attr,
4311 	&alloc_calls_attr.attr,
4312 	&free_calls_attr.attr,
4313 #ifdef CONFIG_ZONE_DMA
4314 	&cache_dma_attr.attr,
4315 #endif
4316 #ifdef CONFIG_NUMA
4317 	&remote_node_defrag_ratio_attr.attr,
4318 #endif
4319 #ifdef CONFIG_SLUB_STATS
4320 	&alloc_fastpath_attr.attr,
4321 	&alloc_slowpath_attr.attr,
4322 	&free_fastpath_attr.attr,
4323 	&free_slowpath_attr.attr,
4324 	&free_frozen_attr.attr,
4325 	&free_add_partial_attr.attr,
4326 	&free_remove_partial_attr.attr,
4327 	&alloc_from_partial_attr.attr,
4328 	&alloc_slab_attr.attr,
4329 	&alloc_refill_attr.attr,
4330 	&free_slab_attr.attr,
4331 	&cpuslab_flush_attr.attr,
4332 	&deactivate_full_attr.attr,
4333 	&deactivate_empty_attr.attr,
4334 	&deactivate_to_head_attr.attr,
4335 	&deactivate_to_tail_attr.attr,
4336 	&deactivate_remote_frees_attr.attr,
4337 	&order_fallback_attr.attr,
4338 #endif
4339 #ifdef CONFIG_FAILSLAB
4340 	&failslab_attr.attr,
4341 #endif
4342 
4343 	NULL
4344 };
4345 
4346 static struct attribute_group slab_attr_group = {
4347 	.attrs = slab_attrs,
4348 };
4349 
4350 static ssize_t slab_attr_show(struct kobject *kobj,
4351 				struct attribute *attr,
4352 				char *buf)
4353 {
4354 	struct slab_attribute *attribute;
4355 	struct kmem_cache *s;
4356 	int err;
4357 
4358 	attribute = to_slab_attr(attr);
4359 	s = to_slab(kobj);
4360 
4361 	if (!attribute->show)
4362 		return -EIO;
4363 
4364 	err = attribute->show(s, buf);
4365 
4366 	return err;
4367 }
4368 
4369 static ssize_t slab_attr_store(struct kobject *kobj,
4370 				struct attribute *attr,
4371 				const char *buf, size_t len)
4372 {
4373 	struct slab_attribute *attribute;
4374 	struct kmem_cache *s;
4375 	int err;
4376 
4377 	attribute = to_slab_attr(attr);
4378 	s = to_slab(kobj);
4379 
4380 	if (!attribute->store)
4381 		return -EIO;
4382 
4383 	err = attribute->store(s, buf, len);
4384 
4385 	return err;
4386 }
4387 
4388 static void kmem_cache_release(struct kobject *kobj)
4389 {
4390 	struct kmem_cache *s = to_slab(kobj);
4391 
4392 	kfree(s);
4393 }
4394 
4395 static const struct sysfs_ops slab_sysfs_ops = {
4396 	.show = slab_attr_show,
4397 	.store = slab_attr_store,
4398 };
4399 
4400 static struct kobj_type slab_ktype = {
4401 	.sysfs_ops = &slab_sysfs_ops,
4402 	.release = kmem_cache_release
4403 };
4404 
4405 static int uevent_filter(struct kset *kset, struct kobject *kobj)
4406 {
4407 	struct kobj_type *ktype = get_ktype(kobj);
4408 
4409 	if (ktype == &slab_ktype)
4410 		return 1;
4411 	return 0;
4412 }
4413 
4414 static const struct kset_uevent_ops slab_uevent_ops = {
4415 	.filter = uevent_filter,
4416 };
4417 
4418 static struct kset *slab_kset;
4419 
4420 #define ID_STR_LENGTH 64
4421 
4422 /* Create a unique string id for a slab cache:
4423  *
4424  * Format	:[flags-]size
4425  */
4426 static char *create_unique_id(struct kmem_cache *s)
4427 {
4428 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4429 	char *p = name;
4430 
4431 	BUG_ON(!name);
4432 
4433 	*p++ = ':';
4434 	/*
4435 	 * First flags affecting slabcache operations. We will only
4436 	 * get here for aliasable slabs so we do not need to support
4437 	 * too many flags. The flags here must cover all flags that
4438 	 * are matched during merging to guarantee that the id is
4439 	 * unique.
4440 	 */
4441 	if (s->flags & SLAB_CACHE_DMA)
4442 		*p++ = 'd';
4443 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
4444 		*p++ = 'a';
4445 	if (s->flags & SLAB_DEBUG_FREE)
4446 		*p++ = 'F';
4447 	if (!(s->flags & SLAB_NOTRACK))
4448 		*p++ = 't';
4449 	if (p != name + 1)
4450 		*p++ = '-';
4451 	p += sprintf(p, "%07d", s->size);
4452 	BUG_ON(p > name + ID_STR_LENGTH - 1);
4453 	return name;
4454 }
4455 
4456 static int sysfs_slab_add(struct kmem_cache *s)
4457 {
4458 	int err;
4459 	const char *name;
4460 	int unmergeable;
4461 
4462 	if (slab_state < SYSFS)
4463 		/* Defer until later */
4464 		return 0;
4465 
4466 	unmergeable = slab_unmergeable(s);
4467 	if (unmergeable) {
4468 		/*
4469 		 * Slabcache can never be merged so we can use the name proper.
4470 		 * This is typically the case for debug situations. In that
4471 		 * case we can catch duplicate names easily.
4472 		 */
4473 		sysfs_remove_link(&slab_kset->kobj, s->name);
4474 		name = s->name;
4475 	} else {
4476 		/*
4477 		 * Create a unique name for the slab as a target
4478 		 * for the symlinks.
4479 		 */
4480 		name = create_unique_id(s);
4481 	}
4482 
4483 	s->kobj.kset = slab_kset;
4484 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4485 	if (err) {
4486 		kobject_put(&s->kobj);
4487 		return err;
4488 	}
4489 
4490 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
4491 	if (err) {
4492 		kobject_del(&s->kobj);
4493 		kobject_put(&s->kobj);
4494 		return err;
4495 	}
4496 	kobject_uevent(&s->kobj, KOBJ_ADD);
4497 	if (!unmergeable) {
4498 		/* Setup first alias */
4499 		sysfs_slab_alias(s, s->name);
4500 		kfree(name);
4501 	}
4502 	return 0;
4503 }
4504 
4505 static void sysfs_slab_remove(struct kmem_cache *s)
4506 {
4507 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
4508 	kobject_del(&s->kobj);
4509 	kobject_put(&s->kobj);
4510 }
4511 
4512 /*
4513  * Need to buffer aliases during bootup until sysfs becomes
4514  * available lest we lose that information.
4515  */
4516 struct saved_alias {
4517 	struct kmem_cache *s;
4518 	const char *name;
4519 	struct saved_alias *next;
4520 };
4521 
4522 static struct saved_alias *alias_list;
4523 
4524 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4525 {
4526 	struct saved_alias *al;
4527 
4528 	if (slab_state == SYSFS) {
4529 		/*
4530 		 * If we have a leftover link then remove it.
4531 		 */
4532 		sysfs_remove_link(&slab_kset->kobj, name);
4533 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4534 	}
4535 
4536 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4537 	if (!al)
4538 		return -ENOMEM;
4539 
4540 	al->s = s;
4541 	al->name = name;
4542 	al->next = alias_list;
4543 	alias_list = al;
4544 	return 0;
4545 }
4546 
4547 static int __init slab_sysfs_init(void)
4548 {
4549 	struct kmem_cache *s;
4550 	int err;
4551 
4552 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4553 	if (!slab_kset) {
4554 		printk(KERN_ERR "Cannot register slab subsystem.\n");
4555 		return -ENOSYS;
4556 	}
4557 
4558 	slab_state = SYSFS;
4559 
4560 	list_for_each_entry(s, &slab_caches, list) {
4561 		err = sysfs_slab_add(s);
4562 		if (err)
4563 			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4564 						" to sysfs\n", s->name);
4565 	}
4566 
4567 	while (alias_list) {
4568 		struct saved_alias *al = alias_list;
4569 
4570 		alias_list = alias_list->next;
4571 		err = sysfs_slab_alias(al->s, al->name);
4572 		if (err)
4573 			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4574 					" %s to sysfs\n", s->name);
4575 		kfree(al);
4576 	}
4577 
4578 	resiliency_test();
4579 	return 0;
4580 }
4581 
4582 __initcall(slab_sysfs_init);
4583 #endif
4584 
4585 /*
4586  * The /proc/slabinfo ABI
4587  */
4588 #ifdef CONFIG_SLABINFO
4589 static void print_slabinfo_header(struct seq_file *m)
4590 {
4591 	seq_puts(m, "slabinfo - version: 2.1\n");
4592 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4593 		 "<objperslab> <pagesperslab>");
4594 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4595 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4596 	seq_putc(m, '\n');
4597 }
4598 
4599 static void *s_start(struct seq_file *m, loff_t *pos)
4600 {
4601 	loff_t n = *pos;
4602 
4603 	down_read(&slub_lock);
4604 	if (!n)
4605 		print_slabinfo_header(m);
4606 
4607 	return seq_list_start(&slab_caches, *pos);
4608 }
4609 
4610 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4611 {
4612 	return seq_list_next(p, &slab_caches, pos);
4613 }
4614 
4615 static void s_stop(struct seq_file *m, void *p)
4616 {
4617 	up_read(&slub_lock);
4618 }
4619 
4620 static int s_show(struct seq_file *m, void *p)
4621 {
4622 	unsigned long nr_partials = 0;
4623 	unsigned long nr_slabs = 0;
4624 	unsigned long nr_inuse = 0;
4625 	unsigned long nr_objs = 0;
4626 	unsigned long nr_free = 0;
4627 	struct kmem_cache *s;
4628 	int node;
4629 
4630 	s = list_entry(p, struct kmem_cache, list);
4631 
4632 	for_each_online_node(node) {
4633 		struct kmem_cache_node *n = get_node(s, node);
4634 
4635 		if (!n)
4636 			continue;
4637 
4638 		nr_partials += n->nr_partial;
4639 		nr_slabs += atomic_long_read(&n->nr_slabs);
4640 		nr_objs += atomic_long_read(&n->total_objects);
4641 		nr_free += count_partial(n, count_free);
4642 	}
4643 
4644 	nr_inuse = nr_objs - nr_free;
4645 
4646 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4647 		   nr_objs, s->size, oo_objects(s->oo),
4648 		   (1 << oo_order(s->oo)));
4649 	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4650 	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4651 		   0UL);
4652 	seq_putc(m, '\n');
4653 	return 0;
4654 }
4655 
4656 static const struct seq_operations slabinfo_op = {
4657 	.start = s_start,
4658 	.next = s_next,
4659 	.stop = s_stop,
4660 	.show = s_show,
4661 };
4662 
4663 static int slabinfo_open(struct inode *inode, struct file *file)
4664 {
4665 	return seq_open(file, &slabinfo_op);
4666 }
4667 
4668 static const struct file_operations proc_slabinfo_operations = {
4669 	.open		= slabinfo_open,
4670 	.read		= seq_read,
4671 	.llseek		= seq_lseek,
4672 	.release	= seq_release,
4673 };
4674 
4675 static int __init slab_proc_init(void)
4676 {
4677 	proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
4678 	return 0;
4679 }
4680 module_init(slab_proc_init);
4681 #endif /* CONFIG_SLABINFO */
4682