slub.c (1a5ad30b89b4e9fa64f75b941a324396738b7616) slub.c (7c82b3b308f9ca24852e3b0ee963b9eae128b78a)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
6 * The allocator synchronizes using per slab locks or atomic operations
7 * and only uses a centralized lock to manage a pool of partial slabs.
8 *

--- 5572 unchanged lines hidden (view full) ---

5581
5582#endif /* CONFIG_SLUB_DEBUG */
5583
5584#ifdef CONFIG_FAILSLAB
5585static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5586{
5587 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5588}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
6 * The allocator synchronizes using per slab locks or atomic operations
7 * and only uses a centralized lock to manage a pool of partial slabs.
8 *

--- 5572 unchanged lines hidden (view full) ---

5581
5582#endif /* CONFIG_SLUB_DEBUG */
5583
5584#ifdef CONFIG_FAILSLAB
5585static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5586{
5587 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5588}
5589SLAB_ATTR_RO(failslab);
5589
5590static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5591 size_t length)
5592{
5593 if (s->refcount > 1)
5594 return -EINVAL;
5595
5596 if (buf[0] == '1')
5597 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
5598 else
5599 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
5600
5601 return length;
5602}
5603SLAB_ATTR(failslab);
5590#endif
5591
5592static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5593{
5594 return 0;
5595}
5596
5597static ssize_t shrink_store(struct kmem_cache *s,

--- 707 unchanged lines hidden ---
5604#endif
5605
5606static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5607{
5608 return 0;
5609}
5610
5611static ssize_t shrink_store(struct kmem_cache *s,

--- 707 unchanged lines hidden ---