Lines Matching +full:no +full:- +full:ref +full:- +full:high +full:- +full:z

1 // SPDX-License-Identifier: GPL-2.0
36 #include <linux/fault-inject.h>
43 #include <kunit/test-bug.h>
54 * 2. node->list_lock (Spinlock)
55 * 3. kmem_cache->cpu_slab->lock (Local lock)
73 * A. slab->freelist -> List of free objects in a slab
74 * B. slab->inuse -> Number of objects in use
75 * C. slab->objects -> Number of objects in slab
76 * D. slab->frozen -> frozen state
97 * - node partial slab: PG_Workingset && !frozen
98 * - cpu partial slab: !PG_Workingset && !frozen
99 * - cpu slab: !PG_Workingset && frozen
100 * - full slab: !PG_Workingset && !frozen
105 * the partial slab counter. If taken then no new slabs may be added or
119 * cpu_slab->lock local lock
128 * an in-progress slow path operations. In this case the local lock is always
155 * operations no list for full slabs is used. If an object in a full slab is
164 * slab->frozen The slab is frozen and exempt from list processing.
171 * when the slab is no longer needed.
236 (s->flags & SLAB_KMALLOC));
242 p += s->red_left_pad;
259 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
261 * - Variable sizing of the per node arrays
304 #define OO_MASK ((1 << OO_SHIFT) - 1)
408 * avoid this_cpu_add()'s irq-disable overhead.
410 raw_cpu_inc(s->cpu_slab->stat[si]);
418 raw_cpu_add(s->cpu_slab->stat[si], v);
438 return s->node[node];
470 * with an XOR of the address where the pointer is held and a per-cache
479 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
492 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
505 ptr_addr = (unsigned long)object + s->offset;
513 prefetchw(object + s->offset);
537 freepointer_addr = (unsigned long)object + s->offset;
544 unsigned long freeptr_addr = (unsigned long)object + s->offset;
559 return s->offset >= s->inuse;
569 return s->inuse + sizeof(void *);
571 return s->inuse;
577 __p < (__addr) + (__objects) * (__s)->size; \
578 __p += (__s)->size)
610 s->cpu_partial = nr_objects;
616 * be half-full.
618 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
619 s->cpu_partial_slabs = nr_slabs;
624 return s->cpu_partial_slabs;
643 bit_spin_lock(PG_locked, &slab->__page_flags);
648 bit_spin_unlock(PG_locked, &slab->__page_flags);
660 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
674 if (slab->freelist == freelist_old &&
675 slab->counters == counters_old) {
676 slab->freelist = freelist_new;
677 slab->counters = counters_new;
702 if (s->flags & __CMPXCHG_DOUBLE) {
716 pr_info("%s %s: cmpxchg double redo ", n, s->name);
729 if (s->flags & __CMPXCHG_DOUBLE) {
747 pr_info("%s %s: cmpxchg double redo ", n, s->name);
789 return s->object_size;
807 bitmap_zero(obj_map, slab->objects);
809 for (p = slab->freelist; p; p = get_freepointer(s, p))
821 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
825 (*(int *)resource->data)++;
837 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
850 if (s->flags & SLAB_RED_ZONE)
851 return s->size - s->red_left_pad;
853 return s->size;
858 if (s->flags & SLAB_RED_ZONE)
859 p -= s->red_left_pad;
910 if (object < base || object >= base + slab->objects * s->size ||
911 (object - base) % s->size) {
963 p->handle = handle;
965 p->addr = addr;
966 p->cpu = smp_processor_id();
967 p->pid = current->pid;
968 p->when = jiffies;
983 if (!(s->flags & SLAB_STORE_USER))
994 if (!t->addr)
998 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
1000 handle = READ_ONCE(t->handle);
1011 if (!(s->flags & SLAB_STORE_USER))
1021 slab, slab->objects, slab->inuse, slab->freelist,
1022 &slab->__page_flags);
1027 set_orig_size(s, (void *)object, s->object_size);
1039 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
1040 pr_err("-----------------------------------------------------------------------------\n\n");
1056 pr_err("FIX %s: %pV\n", s->name, &vaf);
1070 p, p - addr, get_freepointer(s, p));
1072 if (s->flags & SLAB_RED_ZONE)
1073 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
1074 s->red_left_pad);
1076 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1079 min_t(unsigned int, s->object_size, PAGE_SIZE));
1080 if (s->flags & SLAB_RED_ZONE)
1081 print_section(KERN_ERR, "Redzone ", p + s->object_size,
1082 s->inuse - s->object_size);
1086 if (s->flags & SLAB_STORE_USER)
1097 size_from_object(s) - off);
1116 if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
1148 unsigned int poison_size = s->object_size;
1150 if (s->flags & SLAB_RED_ZONE) {
1153 * the shadow makes it possible to distinguish uninit-value
1154 * from use-after-free.
1156 memset_no_sanitize_memory(p - s->red_left_pad, val,
1157 s->red_left_pad);
1169 if (s->flags & __OBJECT_POISON) {
1170 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1);
1171 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1);
1174 if (s->flags & SLAB_RED_ZONE)
1176 s->inuse - poison_size);
1182 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1183 memset(from, data, to - from);
1208 while (end > fault && end[-1] == value)
1209 end--;
1215 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1216 fault, end - 1, fault - addr,
1235 * object + s->object_size
1243 * object + s->inuse
1255 * object + s->size
1256 * Nothing is used beyond s->size.
1259 * ignored. And therefore no slab options that rely on these boundaries
1267 if (s->flags & SLAB_STORE_USER) {
1271 if (s->flags & SLAB_KMALLOC)
1281 p + off, POISON_INUSE, size_from_object(s) - off);
1295 if (!(s->flags & SLAB_POISON))
1301 remainder = length % s->size;
1305 pad = end - remainder;
1311 while (end > fault && end[-1] == POISON_INUSE)
1312 end--;
1314 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1315 fault, end - 1, fault - start);
1325 u8 *endobject = object + s->object_size;
1329 if (s->flags & SLAB_RED_ZONE) {
1331 object - s->red_left_pad, val, s->red_left_pad))
1335 endobject, val, s->inuse - s->object_size))
1341 if (s->object_size > orig_size &&
1344 val, s->object_size - orig_size)) {
1349 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1352 s->inuse - s->object_size))
1357 if (s->flags & SLAB_POISON) {
1358 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1365 if (kasan_meta_size < s->object_size - 1 &&
1368 s->object_size - kasan_meta_size - 1))
1370 if (kasan_meta_size < s->object_size &&
1372 p + s->object_size - 1, POISON_END, 1))
1390 * No choice but to zap it and thus lose the remainder
1415 maxobj = order_objects(slab_order(slab), s->size);
1416 if (slab->objects > maxobj) {
1418 slab->objects, maxobj);
1421 if (slab->inuse > slab->objects) {
1423 slab->inuse, slab->objects);
1442 fp = slab->freelist;
1443 while (fp && nr <= slab->objects) {
1453 slab->freelist = NULL;
1454 slab->inuse = slab->objects;
1465 max_objects = order_objects(slab_order(slab), s->size);
1469 if (slab->objects != max_objects) {
1471 slab->objects, max_objects);
1472 slab->objects = max_objects;
1475 if (slab->inuse != slab->objects - nr) {
1477 slab->inuse, slab->objects - nr);
1478 slab->inuse = slab->objects - nr;
1487 if (s->flags & SLAB_TRACE) {
1489 s->name,
1491 object, slab->inuse,
1492 slab->freelist);
1496 s->object_size);
1508 if (!(s->flags & SLAB_STORE_USER))
1511 lockdep_assert_held(&n->list_lock);
1512 list_add(&slab->slab_list, &n->full);
1517 if (!(s->flags & SLAB_STORE_USER))
1520 lockdep_assert_held(&n->list_lock);
1521 list_del(&slab->slab_list);
1526 return atomic_long_read(&n->nr_slabs);
1533 atomic_long_inc(&n->nr_slabs);
1534 atomic_long_add(objects, &n->total_objects);
1540 atomic_long_dec(&n->nr_slabs);
1541 atomic_long_sub(objects, &n->total_objects);
1585 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1604 slab->inuse = slab->objects;
1605 slab->freelist = NULL;
1626 if (unlikely(s != slab->slab_cache)) {
1630 } else if (!slab->slab_cache) {
1631 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1647 * @slabs: return start of list of slabs, or NULL when there's no list
1648 * @init: assume this is initial parsing and not per-kmem-create parsing
1663 * No options but restriction on slabs. This means full
1674 case '-':
1680 case 'z':
1742 * No options specified. Switch on full debugging.
1765 * long as there is no option specifying flags without a slab list.
1783 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1791 * kmem_cache_flags - apply debugging options to the cache
1797 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1833 end = next_block - 1;
1835 glob = strnchr(iter, end - iter, '*');
1837 cmplen = glob - iter;
1839 cmplen = max_t(size_t, len, (end - iter));
1911 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
1914 WARN_ON(slab_exts[offs].ref.ct);
1915 set_codetag_empty(&slab_exts[offs].ref);
1921 slab->obj_exts = OBJEXTS_ALLOC_FAIL;
1929 * objects with no tag reference. Mark all references in this
1936 set_codetag_empty(&vec[i].ref);
1975 return -ENOMEM;
1982 old_exts = READ_ONCE(slab->obj_exts);
1987 * obj_exts, no synchronization is required and obj_exts can
1990 slab->obj_exts = new_exts;
1992 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
2024 slab->obj_exts = 0;
2034 * inside memcg_slab_post_alloc_hook. No other users for now.
2068 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2078 __func__, s->name))
2097 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
2111 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
2112 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2122 alloc_tag_sub(&obj_exts[off].ref, s->size);
2153 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
2202 s = slab->slab_cache;
2254 * production configuration these hooks all should produce no code at all.
2265 bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay;
2267 kmemleak_free_recursive(x, s->flags);
2270 debug_check_no_locks_freed(x, s->object_size);
2272 if (!(s->flags & SLAB_DEBUG_OBJECTS))
2273 debug_check_no_obj_freed(x, s->object_size);
2275 /* Use KCSAN to help debug racy use-after-free. */
2277 __kcsan_check_access(x, s->object_size,
2305 delayed_free->object = x;
2306 call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
2331 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2333 s->size - inuse - rsize);
2382 --(*cnt);
2393 if (unlikely(s->ctor)) {
2395 s->ctor(object);
2421 /* Make the flag visible before any changes to folio->mapping */
2430 /* Pre-initialize the random sequence cache */
2433 unsigned int count = oo_objects(s->oo);
2437 if (s->random_seq)
2443 s->name);
2448 if (s->random_seq) {
2452 s->random_seq[i] *= s->size;
2470 /* Get the next entry on the pre-computed freelist randomized */
2483 idx = s->random_seq[*pos];
2492 /* Shuffle the single linked freelist based on a random pre-computed sequence */
2500 if (slab->objects < 2 || !s->random_seq)
2503 freelist_count = oo_objects(s->oo);
2506 page_limit = slab->objects * s->size;
2512 slab->freelist = cur;
2514 for (idx = 1; idx < slab->objects; idx++) {
2540 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
2554 -(PAGE_SIZE << order));
2560 struct kmem_cache_order_objects oo = s->oo;
2568 flags |= s->allocflags;
2571 * Let the initial higher-order allocation fail under memory pressure
2572 * so we fall-back to the minimum order allocation.
2575 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
2580 oo = s->min;
2592 slab->objects = oo_objects(oo);
2593 slab->inuse = 0;
2594 slab->frozen = 0;
2598 slab->slab_cache = s;
2611 slab->freelist = start;
2612 for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2613 next = p + s->size;
2629 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2642 folio->mapping = NULL;
2648 __free_pages(&folio->page, order);
2655 __free_slab(slab->slab_cache, slab);
2664 for_each_object(p, s, slab_address(slab), slab->objects)
2668 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
2669 call_rcu(&slab->rcu_head, rcu_free_slab);
2676 dec_slabs_node(s, slab_nid(slab), slab->objects);
2682 * the per-node partial list.
2705 n->nr_partial++;
2707 list_add_tail(&slab->slab_list, &n->partial);
2709 list_add(&slab->slab_list, &n->partial);
2716 lockdep_assert_held(&n->list_lock);
2723 lockdep_assert_held(&n->list_lock);
2724 list_del(&slab->slab_list);
2726 n->nr_partial--;
2731 * slab from the n->partial list. Remove only a single object from the slab, do
2740 lockdep_assert_held(&n->list_lock);
2742 object = slab->freelist;
2743 slab->freelist = get_freepointer(s, object);
2744 slab->inuse++;
2751 if (slab->inuse == slab->objects) {
2773 object = slab->freelist;
2774 slab->freelist = get_freepointer(s, object);
2775 slab->inuse = 1;
2785 spin_lock_irqsave(&n->list_lock, flags);
2787 if (slab->inuse == slab->objects)
2792 inc_slabs_node(s, nid, slab->objects);
2793 spin_unlock_irqrestore(&n->list_lock, flags);
2818 * Racy check. If we mistakenly see no partial slabs then we
2823 if (!n || !n->nr_partial)
2826 spin_lock_irqsave(&n->list_lock, flags);
2827 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
2828 if (!pfmemalloc_match(slab, pc->flags))
2833 pc->orig_size);
2836 pc->object = object;
2860 spin_unlock_irqrestore(&n->list_lock, flags);
2872 struct zoneref *z;
2874 enum zone_type highest_zoneidx = gfp_zone(pc->flags);
2896 if (!s->remote_node_defrag_ratio ||
2897 get_cycles() % 1024 > s->remote_node_defrag_ratio)
2902 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
2903 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2908 if (n && cpuset_zone_allowed(zone, pc->flags) &&
2909 n->nr_partial > s->min_partial) {
2914 * here - if mems_allowed was updated in
2941 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
2958 * No preemption supported therefore also no need to check for
2990 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2992 pr_info("%s %s: cmpxchg redo ", n, s->name);
2996 pr_warn("due to cpu change %d -> %d\n",
3001 pr_warn("due to cpu running other code. Event %ld->%ld\n",
3016 c = per_cpu_ptr(s->cpu_slab, cpu);
3017 local_lock_init(&c->lock);
3018 c->tid = init_tid(cpu);
3039 if (READ_ONCE(slab->freelist)) {
3068 * Stage two: Unfreeze the slab while splicing the per-cpu
3072 old.freelist = READ_ONCE(slab->freelist);
3073 old.counters = READ_ONCE(slab->counters);
3080 new.inuse -= free_delta;
3094 if (!new.inuse && n->nr_partial >= s->min_partial) {
3099 spin_lock_irqsave(&n->list_lock, flags);
3101 spin_unlock_irqrestore(&n->list_lock, flags);
3117 partial_slab = slab->next;
3122 spin_unlock_irqrestore(&n->list_lock, flags);
3125 spin_lock_irqsave(&n->list_lock, flags);
3128 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) {
3129 slab->next = slab_to_discard;
3138 spin_unlock_irqrestore(&n->list_lock, flags);
3142 slab_to_discard = slab_to_discard->next;
3158 local_lock_irqsave(&s->cpu_slab->lock, flags);
3159 partial_slab = this_cpu_read(s->cpu_slab->partial);
3160 this_cpu_write(s->cpu_slab->partial, NULL);
3161 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3173 c->partial = NULL;
3192 local_lock_irqsave(&s->cpu_slab->lock, flags);
3194 oldslab = this_cpu_read(s->cpu_slab->partial);
3197 if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
3206 slabs = oldslab->slabs;
3212 slab->slabs = slabs;
3213 slab->next = oldslab;
3215 this_cpu_write(s->cpu_slab->partial, slab);
3217 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3239 local_lock_irqsave(&s->cpu_slab->lock, flags);
3241 slab = c->slab;
3242 freelist = c->freelist;
3244 c->slab = NULL;
3245 c->freelist = NULL;
3246 c->tid = next_tid(c->tid);
3248 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3258 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3259 void *freelist = c->freelist;
3260 struct slab *slab = c->slab;
3262 c->slab = NULL;
3263 c->freelist = NULL;
3264 c->tid = next_tid(c->tid);
3293 s = sfw->s;
3294 c = this_cpu_ptr(s->cpu_slab);
3296 if (c->slab)
3304 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3306 return c->slab || slub_percpu_partial(c);
3323 sfw->skip = true;
3326 INIT_WORK(&sfw->work, flush_cpu_slab);
3327 sfw->skip = false;
3328 sfw->s = s;
3329 queue_work_on(cpu, flushwq, &sfw->work);
3334 if (sfw->skip)
3336 flush_work(&sfw->work);
3387 return slab->objects - slab->inuse;
3392 return atomic_long_read(&n->total_objects);
3404 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3409 if (slab->inuse < *bulk_cnt) {
3411 slab->inuse, *bulk_cnt);
3420 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3425 if (s->flags & SLAB_STORE_USER)
3462 spin_lock_irqsave(&n->list_lock, flags);
3463 list_for_each_entry(slab, &n->partial, slab_list)
3465 spin_unlock_irqrestore(&n->list_lock, flags);
3479 spin_lock_irqsave(&n->list_lock, flags);
3480 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) {
3481 list_for_each_entry(slab, &n->partial, slab_list)
3482 x += slab->objects - slab->inuse;
3491 list_for_each_entry(slab, &n->partial, slab_list) {
3492 x += slab->objects - slab->inuse;
3496 list_for_each_entry_reverse(slab, &n->partial, slab_list) {
3497 x += slab->objects - slab->inuse;
3501 x = mult_frac(x, n->nr_partial, scanned);
3504 spin_unlock_irqrestore(&n->list_lock, flags);
3523 s->name, s->object_size, s->size, oo_order(s->oo),
3524 oo_order(s->min));
3526 if (oo_order(s->min) > get_order(s->object_size))
3528 s->name);
3565 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
3570 * Check the slab->freelist and either transfer the freelist to the
3583 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3586 freelist = slab->freelist;
3587 counters = slab->counters;
3591 new.inuse = slab->objects;
3612 freelist = slab->freelist;
3613 counters = slab->counters;
3618 new.inuse = slab->objects;
3661 slab = READ_ONCE(c->slab);
3664 * if the node is not online or has no normal memory, just
3689 * information when the page leaves the per-cpu allocator
3694 /* must check again c->slab in case we got preempted and it changed */
3695 local_lock_irqsave(&s->cpu_slab->lock, flags);
3696 if (unlikely(slab != c->slab)) {
3697 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3700 freelist = c->freelist;
3707 c->slab = NULL;
3708 c->tid = next_tid(c->tid);
3709 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3718 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3725 VM_BUG_ON(!c->slab->frozen);
3726 c->freelist = get_freepointer(s, freelist);
3727 c->tid = next_tid(c->tid);
3728 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3733 local_lock_irqsave(&s->cpu_slab->lock, flags);
3734 if (slab != c->slab) {
3735 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3738 freelist = c->freelist;
3739 c->slab = NULL;
3740 c->freelist = NULL;
3741 c->tid = next_tid(c->tid);
3742 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3749 local_lock_irqsave(&s->cpu_slab->lock, flags);
3750 if (unlikely(c->slab)) {
3751 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3755 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3765 c->slab = slab;
3772 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3774 slab->next = NULL;
3783 * When a preferred node is indicated but no __GFP_THISNODE
3807 if (s->flags & SLAB_STORE_USER)
3817 slub_put_cpu_ptr(s->cpu_slab);
3819 c = slub_get_cpu_ptr(s->cpu_slab);
3839 if (s->flags & SLAB_STORE_USER)
3846 * No other reference to the slab yet so we can
3849 freelist = slab->freelist;
3850 slab->freelist = NULL;
3851 slab->inuse = slab->objects;
3852 slab->frozen = 1;
3854 inc_slabs_node(s, slab_nid(slab), slab->objects);
3867 local_lock_irqsave(&s->cpu_slab->lock, flags);
3868 if (unlikely(c->slab)) {
3869 void *flush_freelist = c->freelist;
3870 struct slab *flush_slab = c->slab;
3872 c->slab = NULL;
3873 c->freelist = NULL;
3874 c->tid = next_tid(c->tid);
3876 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3884 c->slab = slab;
3905 c = slub_get_cpu_ptr(s->cpu_slab);
3910 slub_put_cpu_ptr(s->cpu_slab);
3936 c = raw_cpu_ptr(s->cpu_slab);
3937 tid = READ_ONCE(c->tid);
3945 * request will be failed. In this case, we will retry. So, no problem.
3952 * occurs on the right processor and that there was no operation on the
3956 object = c->freelist;
3957 slab = c->slab;
3966 * The cmpxchg will only match if there was no additional
4027 memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
4049 unsigned int zero_size = s->object_size;
4063 (s->flags & SLAB_KMALLOC))
4070 * cause false-positive reports. This does not lead to a performance
4089 kmemleak_alloc_recursive(p[i], s->object_size, 1,
4090 s->flags, init_flags);
4100 * have the fastpath folded into their functions. So no function call
4130 * @orig_size bytes might be zeroed instead of s->object_size
4142 s->object_size);
4154 s->object_size);
4172 * kmem_cache_alloc_node - Allocate an object on the specified node
4186 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
4265 trace_kmalloc(caller, ret, size, s->size, flags, node);
4293 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
4305 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
4323 if (s->flags & SLAB_STORE_USER)
4326 spin_lock_irqsave(&n->list_lock, flags);
4329 void *prior = slab->freelist;
4332 slab->inuse -= cnt;
4334 slab->freelist = head;
4338 * it should be discarded anyway no matter it's on full or
4341 if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
4359 * Update the counters while still holding n->list_lock to
4362 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
4365 spin_unlock_irqrestore(&n->list_lock, flags);
4378 * lock and free the item. If there is no additional partial slab
4403 spin_unlock_irqrestore(&n->list_lock, flags);
4406 prior = slab->freelist;
4407 counters = slab->counters;
4411 new.inuse -= cnt;
4425 spin_lock_irqsave(&n->list_lock, flags);
4440 * The list lock was not taken therefore no list
4457 * This slab was partially empty but not on the per-node partial list,
4461 spin_unlock_irqrestore(&n->list_lock, flags);
4465 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
4476 spin_unlock_irqrestore(&n->list_lock, flags);
4488 spin_unlock_irqrestore(&n->list_lock, flags);
4524 c = raw_cpu_ptr(s->cpu_slab);
4525 tid = READ_ONCE(c->tid);
4530 if (unlikely(slab != c->slab)) {
4536 freelist = READ_ONCE(c->freelist);
4546 local_lock(&s->cpu_slab->lock);
4547 c = this_cpu_ptr(s->cpu_slab);
4548 if (unlikely(slab != c->slab)) {
4549 local_unlock(&s->cpu_slab->lock);
4552 tid = c->tid;
4553 freelist = c->freelist;
4556 c->freelist = head;
4557 c->tid = next_tid(tid);
4559 local_unlock(&s->cpu_slab->lock);
4612 void *object = delayed_free->object;
4624 s = slab->slab_cache;
4625 if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
4648 return slab->slab_cache;
4662 __func__, s->name, cachep->name))
4668 * kmem_cache_free - Deallocate an object
4697 -(PAGE_SIZE << order));
4702 * kfree - free previously allocated memory
4705 * If @object is NULL, no operation is performed.
4726 s = slab->slab_cache;
4760 object = p[--size];
4766 df->slab = NULL;
4770 df->slab = folio_slab(folio);
4771 df->s = df->slab->slab_cache;
4773 df->slab = folio_slab(folio);
4774 df->s = cache_from_obj(s, object); /* Support for memcg */
4778 df->tail = object;
4779 df->freelist = object;
4780 df->cnt = 1;
4785 set_freepointer(df->s, object, NULL);
4789 object = p[--size];
4790 /* df->slab is always set at this point */
4791 if (df->slab == virt_to_slab(object)) {
4793 set_freepointer(df->s, object, df->freelist);
4794 df->freelist = object;
4795 df->cnt++;
4796 same--;
4803 if (!--lookahead)
4867 c = slub_get_cpu_ptr(s->cpu_slab);
4868 local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4871 void *object = kfence_alloc(s, s->object_size, flags);
4878 object = c->freelist;
4881 * We may have removed an object from c->freelist using
4883 * c->tid has not been bumped yet.
4885 * allocating memory, we should bump c->tid now.
4887 c->tid = next_tid(c->tid);
4889 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4892 * Invoking slow path likely have side-effect
4893 * of re-populating per CPU c->freelist
4896 _RET_IP_, c, s->object_size);
4900 c = this_cpu_ptr(s->cpu_slab);
4903 local_lock_irqsave(&s->cpu_slab->lock, irqflags);
4905 continue; /* goto for-loop */
4907 c->freelist = get_freepointer(s, object);
4912 c->tid = next_tid(c->tid);
4913 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
4914 slub_put_cpu_ptr(s->cpu_slab);
4919 slub_put_cpu_ptr(s->cpu_slab);
4931 void *object = kfence_alloc(s, s->object_size, flags);
4939 _RET_IP_, s->object_size);
4976 slab_want_init_on_alloc(flags, s), s->object_size))) {
5067 * onlined. Here we compromise between trying to avoid too high
5083 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
5091 * smallest order from min_objects-derived/slab_min_order up to
5113 return -ENOSYS;
5119 n->nr_partial = 0;
5120 spin_lock_init(&n->list_lock);
5121 INIT_LIST_HEAD(&n->partial);
5123 atomic_long_set(&n->nr_slabs, 0);
5124 atomic_long_set(&n->total_objects, 0);
5125 INIT_LIST_HEAD(&n->full);
5140 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
5143 if (!s->cpu_slab)
5160 * No kmalloc_node yet so do it by hand. We know that this is the first
5161 * slab on the node for this slabcache. There are no concurrent accesses
5166 * memory on a fresh node that has no slab structures yet.
5173 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
5183 n = slab->freelist;
5189 slab->freelist = get_freepointer(kmem_cache_node, n);
5190 slab->inuse = 1;
5191 kmem_cache_node->node[node] = n;
5193 inc_slabs_node(kmem_cache_node, node, slab->objects);
5196 * No locks need to be taken here as it has just been
5197 * initialized and there is no concurrent access.
5208 s->node[node] = NULL;
5217 free_percpu(s->cpu_slab);
5242 s->node[node] = n;
5259 * per node partial lists and therefore no locking will be required.
5267 else if (s->size >= PAGE_SIZE)
5269 else if (s->size >= 1024)
5271 else if (s->size >= 256)
5286 slab_flags_t flags = s->flags;
5287 unsigned int size = s->object_size;
5304 !s->ctor)
5305 s->flags |= __OBJECT_POISON;
5307 s->flags &= ~__OBJECT_POISON;
5315 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
5323 s->inuse = size;
5325 if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
5326 (flags & SLAB_POISON) || s->ctor ||
5328 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
5340 * The assumption that s->offset >= s->inuse means free
5342 * freeptr_outside_object() function. If that is no
5345 s->offset = size;
5347 } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
5348 s->offset = args->freeptr_offset;
5355 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
5372 kasan_cache_create(s, &size, &s->flags);
5384 s->red_left_pad = sizeof(void *);
5385 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
5386 size += s->red_left_pad;
5395 size = ALIGN(size, s->align);
5396 s->size = size;
5397 s->reciprocal_size = reciprocal_value(size);
5403 s->allocflags = __GFP_COMP;
5405 if (s->flags & SLAB_CACHE_DMA)
5406 s->allocflags |= GFP_DMA;
5408 if (s->flags & SLAB_CACHE_DMA32)
5409 s->allocflags |= GFP_DMA32;
5411 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5412 s->allocflags |= __GFP_RECLAIMABLE;
5417 s->oo = oo_make(order, size);
5418 s->min = oo_make(get_order(size), size);
5420 return !!oo_objects(s->oo);
5430 slab_err(s, slab, text, s->name);
5435 for_each_object(p, s, addr, slab->objects) {
5440 pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
5459 spin_lock_irq(&n->list_lock);
5460 list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
5461 if (!slab->inuse) {
5463 list_add(&slab->slab_list, &discard);
5469 spin_unlock_irq(&n->list_lock);
5481 if (n->nr_partial || node_nr_slabs(n))
5498 if (n->nr_partial || node_nr_slabs(n))
5512 struct kmem_cache *s = slab->slab_cache;
5515 kpp->kp_ptr = object;
5516 kpp->kp_slab = slab;
5517 kpp->kp_slab_cache = s;
5526 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
5527 objp = base + s->size * objnr;
5528 kpp->kp_objp = objp;
5529 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
5530 || (objp - base) % s->size) ||
5531 !(s->flags & SLAB_STORE_USER))
5536 kpp->kp_ret = (void *)trackp->addr;
5543 handle = READ_ONCE(trackp->handle);
5547 kpp->kp_stack[i] = (void *)entries[i];
5551 handle = READ_ONCE(trackp->handle);
5555 kpp->kp_free_stack[i] = (void *)entries[i];
5624 s = slab->slab_cache;
5633 offset = ptr - kfence_object_start(ptr);
5635 offset = (ptr - slab_address(slab)) % s->size;
5639 if (offset < s->red_left_pad)
5641 s->name, to_user, offset, n);
5642 offset -= s->red_left_pad;
5646 if (offset >= s->useroffset &&
5647 offset - s->useroffset <= s->usersize &&
5648 n <= s->useroffset - offset + s->usersize)
5651 usercopy_abort("SLUB object", s->name, to_user, offset, n);
5683 spin_lock_irqsave(&n->list_lock, flags);
5689 * list_lock. slab->inuse here is the upper limit.
5691 list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
5692 int free = slab->objects - slab->inuse;
5694 /* Do not reread slab->inuse */
5700 if (free == slab->objects) {
5701 list_move(&slab->slab_list, &discard);
5703 n->nr_partial--;
5704 dec_slabs_node(s, node, slab->objects);
5706 list_move(&slab->slab_list, promote + free - 1);
5713 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
5714 list_splice(promote + i, &n->partial);
5716 spin_unlock_irqrestore(&n->list_lock, flags);
5754 offline_node = marg->status_change_nid_normal;
5766 * We no longer free kmem_cache_node structures here, as it would be
5778 int nid = marg->status_change_nid_normal;
5789 * We are bringing a node online. No memory is available yet. We must
5808 ret = -ENOMEM;
5812 s->node[nid] = n;
5867 memcpy(s, static_cache, kmem_cache->object_size);
5878 list_for_each_entry(p, &n->partial, slab_list)
5879 p->slab_cache = s;
5882 list_for_each_entry(p, &n->full, slab_list)
5883 p->slab_cache = s;
5886 list_add(&s->list, &slab_caches);
5940 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
5965 s->refcount++;
5971 s->object_size = max(s->object_size, size);
5972 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
5982 int err = -EINVAL;
5984 s->name = name;
5985 s->size = s->object_size = size;
5987 s->flags = kmem_cache_flags(flags, s->name);
5989 s->random = get_random_long();
5991 s->align = args->align;
5992 s->ctor = args->ctor;
5994 s->useroffset = args->useroffset;
5995 s->usersize = args->usersize;
6005 if (get_order(s->size) > get_order(s->object_size)) {
6006 s->flags &= ~DEBUG_METADATA_FLAGS;
6007 s->offset = 0;
6014 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
6016 s->flags |= __CMPXCHG_DOUBLE;
6024 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
6025 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
6030 s->remote_node_defrag_ratio = 1000;
6033 /* Initialize the pre-computed randomized freelist if slab is up */
6055 if (s->flags & SLAB_STORE_USER)
6067 return slab->inuse;
6072 return slab->objects;
6088 for_each_object(p, s, addr, slab->objects) {
6104 spin_lock_irqsave(&n->list_lock, flags);
6106 list_for_each_entry(slab, &n->partial, slab_list) {
6110 if (count != n->nr_partial) {
6112 s->name, count, n->nr_partial);
6116 if (!(s->flags & SLAB_STORE_USER))
6119 list_for_each_entry(slab, &n->full, slab_list) {
6125 s->name, count, node_nr_slabs(n));
6130 spin_unlock_irqrestore(&n->list_lock, flags);
6141 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
6143 return -ENOMEM;
6186 if (t->max)
6187 free_pages((unsigned long)t->loc,
6188 get_order(sizeof(struct location) * t->max));
6202 if (t->count) {
6203 memcpy(l, t->loc, sizeof(struct location) * t->count);
6206 t->max = max;
6207 t->loc = l;
6218 unsigned long age = jiffies - track->when;
6220 unsigned int waste = s->object_size - orig_size;
6223 handle = READ_ONCE(track->handle);
6225 start = -1;
6226 end = t->count;
6229 pos = start + (end - start + 1) / 2;
6238 l = &t->loc[pos];
6239 caddr = l->addr;
6240 chandle = l->handle;
6241 cwaste = l->waste;
6242 if ((track->addr == caddr) && (handle == chandle) &&
6245 l->count++;
6246 if (track->when) {
6247 l->sum_time += age;
6248 if (age < l->min_time)
6249 l->min_time = age;
6250 if (age > l->max_time)
6251 l->max_time = age;
6253 if (track->pid < l->min_pid)
6254 l->min_pid = track->pid;
6255 if (track->pid > l->max_pid)
6256 l->max_pid = track->pid;
6258 cpumask_set_cpu(track->cpu,
6259 to_cpumask(l->cpus));
6261 node_set(page_to_nid(virt_to_page(track)), l->nodes);
6265 if (track->addr < caddr)
6267 else if (track->addr == caddr && handle < chandle)
6269 else if (track->addr == caddr && handle == chandle &&
6279 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
6282 l = t->loc + pos;
6283 if (pos < t->count)
6285 (t->count - pos) * sizeof(struct location));
6286 t->count++;
6287 l->count = 1;
6288 l->addr = track->addr;
6289 l->sum_time = age;
6290 l->min_time = age;
6291 l->max_time = age;
6292 l->min_pid = track->pid;
6293 l->max_pid = track->pid;
6294 l->handle = handle;
6295 l->waste = waste;
6296 cpumask_clear(to_cpumask(l->cpus));
6297 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
6298 nodes_clear(l->nodes);
6299 node_set(page_to_nid(virt_to_page(track)), l->nodes);
6313 for_each_object(p, s, addr, slab->objects)
6317 s->object_size);
6348 return -ENOMEM;
6354 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
6359 slab = READ_ONCE(c->slab);
6365 x = slab->objects;
6367 x = slab->inuse;
6383 x = data_race(slab->slabs);
6395 * mem_hotplug_lock->slab_mutex->kernfs_mutex
6399 * unplug code doesn't destroy the kmem_cache->node[] data.
6411 x = node_nr_objs(n) - count_partial(n, count_free);
6429 x = n->nr_partial;
6466 return sysfs_emit(buf, "%u\n", s->size);
6472 return sysfs_emit(buf, "%u\n", s->align);
6478 return sysfs_emit(buf, "%u\n", s->object_size);
6484 return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
6490 return sysfs_emit(buf, "%u\n", oo_order(s->oo));
6496 return sysfs_emit(buf, "%lu\n", s->min_partial);
6509 s->min_partial = min;
6518 nr_partial = s->cpu_partial;
6534 return -EINVAL;
6544 if (!s->ctor)
6546 return sysfs_emit(buf, "%pS\n", s->ctor);
6552 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
6585 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6588 slabs += data_race(slab->slabs);
6592 /* Approximate half-full slabs, see slub_set_cpu_partial() */
6593 objects = (slabs * oo_objects(s->oo)) / 2;
6600 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6602 slabs = data_race(slab->slabs);
6603 objects = (slabs * oo_objects(s->oo)) / 2;
6617 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
6623 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
6630 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
6638 return sysfs_emit(buf, "%u\n", s->usersize);
6645 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
6670 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
6676 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
6682 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
6689 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
6696 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
6709 int ret = -EINVAL;
6725 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
6731 if (s->refcount > 1)
6732 return -EINVAL;
6735 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
6737 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
6755 return -EINVAL;
6763 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
6776 return -ERANGE;
6778 s->remote_node_defrag_ratio = ratio * 10;
6794 return -ENOMEM;
6797 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
6823 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
6835 return -EINVAL; \
6872 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
6881 s->flags &= ~SLAB_SKIP_KFENCE;
6883 s->flags |= SLAB_SKIP_KFENCE;
6885 ret = -EINVAL;
6982 if (!attribute->show)
6983 return -EIO;
6985 return attribute->show(s, buf);
6998 if (!attribute->store)
6999 return -EIO;
7001 return attribute->store(s, buf, len);
7030 * Format :[flags-]size
7038 return ERR_PTR(-ENOMEM);
7048 if (s->flags & SLAB_CACHE_DMA)
7050 if (s->flags & SLAB_CACHE_DMA32)
7052 if (s->flags & SLAB_RECLAIM_ACCOUNT)
7054 if (s->flags & SLAB_CONSISTENCY_CHECKS)
7056 if (s->flags & SLAB_ACCOUNT)
7059 *p++ = '-';
7060 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
7062 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
7064 return ERR_PTR(-EINVAL);
7066 kmsan_unpoison_memory(name, p - name);
7087 sysfs_remove_link(&slab_kset->kobj, s->name);
7088 name = s->name;
7099 s->kobj.kset = kset;
7100 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
7104 err = sysfs_create_group(&s->kobj, &slab_attr_group);
7110 sysfs_slab_alias(s, s->name);
7117 kobject_del(&s->kobj);
7123 kobject_del(&s->kobj);
7128 kobject_put(&s->kobj);
7151 sysfs_remove_link(&slab_kset->kobj, name);
7152 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
7157 return -ENOMEM;
7159 al->s = s;
7160 al->name = name;
7161 al->next = alias_list;
7178 return -ENOMEM;
7187 s->name);
7193 alias_list = alias_list->next;
7194 err = sysfs_slab_alias(al->s, al->name);
7197 al->name);
7210 struct loc_track *t = seq->private;
7214 idx = (unsigned long) t->idx;
7215 if (idx < t->count) {
7216 l = &t->loc[idx];
7218 seq_printf(seq, "%7ld ", l->count);
7220 if (l->addr)
7221 seq_printf(seq, "%pS", (void *)l->addr);
7223 seq_puts(seq, "<not-available>");
7225 if (l->waste)
7227 l->count * l->waste, l->waste);
7229 if (l->sum_time != l->min_time) {
7231 l->min_time, div_u64(l->sum_time, l->count),
7232 l->max_time);
7234 seq_printf(seq, " age=%ld", l->min_time);
7236 if (l->min_pid != l->max_pid)
7237 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
7240 l->min_pid);
7242 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
7244 cpumask_pr_args(to_cpumask(l->cpus)));
7246 if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
7248 nodemask_pr_args(&l->nodes));
7256 handle = READ_ONCE(l->handle);
7268 if (!idx && !t->count)
7269 seq_puts(seq, "No data\n");
7280 struct loc_track *t = seq->private;
7282 t->idx = ++(*ppos);
7283 if (*ppos <= t->count)
7294 if (loc1->count > loc2->count)
7295 return -1;
7302 struct loc_track *t = seq->private;
7304 t->idx = *ppos;
7323 struct kmem_cache *s = file_inode(filep)->i_private;
7327 return -ENOMEM;
7329 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
7332 return -ENOMEM;
7335 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
7343 return -ENOMEM;
7353 spin_lock_irqsave(&n->list_lock, flags);
7354 list_for_each_entry(slab, &n->partial, slab_list)
7356 list_for_each_entry(slab, &n->full, slab_list)
7358 spin_unlock_irqrestore(&n->list_lock, flags);
7362 sort_r(t->loc, t->count, sizeof(struct location),
7371 struct seq_file *seq = file->private_data;
7372 struct loc_track *t = seq->private;
7392 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
7403 debugfs_lookup_and_remove(s->name, slab_debugfs_root);
7413 if (s->flags & SLAB_STORE_USER)
7439 sinfo->active_objs = nr_objs - nr_free;
7440 sinfo->num_objs = nr_objs;
7441 sinfo->active_slabs = nr_slabs;
7442 sinfo->num_slabs = nr_slabs;
7443 sinfo->objects_per_slab = oo_objects(s->oo);
7444 sinfo->cache_order = oo_order(s->oo);