slub.c (74ad3cb08b0166776c41a460b70034edb02acb65) slub.c (67f2df3b82d091ed095d0e47e1f3a9d3e18e4e41)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
6 * The allocator synchronizes using per slab locks or atomic operations
7 * and only uses a centralized lock to manage a pool of partial slabs.
8 *

--- 774 unchanged lines hidden (view full) ---

783 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
784 if (!resource)
785 return false;
786
787 (*(int *)resource->data)++;
788 kunit_put_resource(resource);
789 return true;
790}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
6 * The allocator synchronizes using per slab locks or atomic operations
7 * and only uses a centralized lock to manage a pool of partial slabs.
8 *

--- 774 unchanged lines hidden (view full) ---

783 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
784 if (!resource)
785 return false;
786
787 (*(int *)resource->data)++;
788 kunit_put_resource(resource);
789 return true;
790}
791
792static bool slab_in_kunit_test(void)
793{
794 struct kunit_resource *resource;
795
796 if (!kunit_get_current_test())
797 return false;
798
799 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
800 if (!resource)
801 return false;
802
803 kunit_put_resource(resource);
804 return true;
805}
791#else
792static inline bool slab_add_kunit_errors(void) { return false; }
806#else
807static inline bool slab_add_kunit_errors(void) { return false; }
808static inline bool slab_in_kunit_test(void) { return false; }
793#endif
794
795static inline unsigned int size_from_object(struct kmem_cache *s)
796{
797 if (s->flags & SLAB_RED_ZONE)
798 return s->size - s->red_left_pad;
799
800 return s->size;

--- 156 unchanged lines hidden (view full) ---

957 return;
958
959 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
960 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
961}
962
963static void print_slab_info(const struct slab *slab)
964{
809#endif
810
811static inline unsigned int size_from_object(struct kmem_cache *s)
812{
813 if (s->flags & SLAB_RED_ZONE)
814 return s->size - s->red_left_pad;
815
816 return s->size;

--- 156 unchanged lines hidden (view full) ---

973 return;
974
975 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
976 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
977}
978
979static void print_slab_info(const struct slab *slab)
980{
965 struct folio *folio = (struct folio *)slab_folio(slab);
966
967 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
968 slab, slab->objects, slab->inuse, slab->freelist,
981 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
982 slab, slab->objects, slab->inuse, slab->freelist,
969 folio_flags(folio, 0));
983 &slab->__page_flags);
970}
971
972/*
973 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
974 * family will round up the real request size to these fixed ones, so
975 * there could be an extra area than what is requested. Save the original
976 * request size in the meta data area, for better debug and sanity check.
977 */

--- 209 unchanged lines hidden (view full) ---

1187
1188 if (slab_add_kunit_errors())
1189 goto skip_bug_print;
1190
1191 slab_bug(s, "%s overwritten", what);
1192 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1193 fault, end - 1, fault - addr,
1194 fault[0], value);
984}
985
986/*
987 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
988 * family will round up the real request size to these fixed ones, so
989 * there could be an extra area than what is requested. Save the original
990 * request size in the meta data area, for better debug and sanity check.
991 */

--- 209 unchanged lines hidden (view full) ---

1201
1202 if (slab_add_kunit_errors())
1203 goto skip_bug_print;
1204
1205 slab_bug(s, "%s overwritten", what);
1206 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1207 fault, end - 1, fault - addr,
1208 fault[0], value);
1195 print_trailer(s, slab, object);
1196 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1197
1198skip_bug_print:
1199 restore_bytes(s, what, value, fault, end);
1200 return 0;
1201}
1202
1203/*
1204 * Object layout:

--- 6 unchanged lines hidden (view full) ---

1211 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
1212 * 0xa5 (POISON_END)
1213 *
1214 * object + s->object_size
1215 * Padding to reach word boundary. This is also used for Redzoning.
1216 * Padding is extended by another word if Redzoning is enabled and
1217 * object_size == inuse.
1218 *
1209
1210skip_bug_print:
1211 restore_bytes(s, what, value, fault, end);
1212 return 0;
1213}
1214
1215/*
1216 * Object layout:

--- 6 unchanged lines hidden (view full) ---

1223 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
1224 * 0xa5 (POISON_END)
1225 *
1226 * object + s->object_size
1227 * Padding to reach word boundary. This is also used for Redzoning.
1228 * Padding is extended by another word if Redzoning is enabled and
1229 * object_size == inuse.
1230 *
1219 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
1220 * 0xcc (RED_ACTIVE) for objects in use.
1231 * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with
1232 * 0xcc (SLUB_RED_ACTIVE) for objects in use.
1221 *
1222 * object + s->inuse
1223 * Meta data starts here.
1224 *
1225 * A. Free pointer (if we cannot overwrite object on free)
1226 * B. Tracking data for SLAB_STORE_USER
1227 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
1228 * D. Padding to reach required alignment boundary or at minimum

--- 68 unchanged lines hidden (view full) ---

1297}
1298
1299static int check_object(struct kmem_cache *s, struct slab *slab,
1300 void *object, u8 val)
1301{
1302 u8 *p = object;
1303 u8 *endobject = object + s->object_size;
1304 unsigned int orig_size, kasan_meta_size;
1233 *
1234 * object + s->inuse
1235 * Meta data starts here.
1236 *
1237 * A. Free pointer (if we cannot overwrite object on free)
1238 * B. Tracking data for SLAB_STORE_USER
1239 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
1240 * D. Padding to reach required alignment boundary or at minimum

--- 68 unchanged lines hidden (view full) ---

1309}
1310
1311static int check_object(struct kmem_cache *s, struct slab *slab,
1312 void *object, u8 val)
1313{
1314 u8 *p = object;
1315 u8 *endobject = object + s->object_size;
1316 unsigned int orig_size, kasan_meta_size;
1317 int ret = 1;
1305
1306 if (s->flags & SLAB_RED_ZONE) {
1307 if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1308 object - s->red_left_pad, val, s->red_left_pad))
1318
1319 if (s->flags & SLAB_RED_ZONE) {
1320 if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1321 object - s->red_left_pad, val, s->red_left_pad))
1309 return 0;
1322 ret = 0;
1310
1311 if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1312 endobject, val, s->inuse - s->object_size))
1323
1324 if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1325 endobject, val, s->inuse - s->object_size))
1313 return 0;
1326 ret = 0;
1314
1315 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1316 orig_size = get_orig_size(s, object);
1317
1318 if (s->object_size > orig_size &&
1319 !check_bytes_and_report(s, slab, object,
1320 "kmalloc Redzone", p + orig_size,
1321 val, s->object_size - orig_size)) {
1327
1328 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1329 orig_size = get_orig_size(s, object);
1330
1331 if (s->object_size > orig_size &&
1332 !check_bytes_and_report(s, slab, object,
1333 "kmalloc Redzone", p + orig_size,
1334 val, s->object_size - orig_size)) {
1322 return 0;
1335 ret = 0;
1323 }
1324 }
1325 } else {
1326 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1336 }
1337 }
1338 } else {
1339 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1327 check_bytes_and_report(s, slab, p, "Alignment padding",
1340 if (!check_bytes_and_report(s, slab, p, "Alignment padding",
1328 endobject, POISON_INUSE,
1341 endobject, POISON_INUSE,
1329 s->inuse - s->object_size);
1342 s->inuse - s->object_size))
1343 ret = 0;
1330 }
1331 }
1332
1333 if (s->flags & SLAB_POISON) {
1334 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1335 /*
1336 * KASAN can save its free meta data inside of the
1337 * object at offset 0. Thus, skip checking the part of
1338 * the redzone that overlaps with the meta data.
1339 */
1340 kasan_meta_size = kasan_metadata_size(s, true);
1341 if (kasan_meta_size < s->object_size - 1 &&
1342 !check_bytes_and_report(s, slab, p, "Poison",
1343 p + kasan_meta_size, POISON_FREE,
1344 s->object_size - kasan_meta_size - 1))
1344 }
1345 }
1346
1347 if (s->flags & SLAB_POISON) {
1348 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1349 /*
1350 * KASAN can save its free meta data inside of the
1351 * object at offset 0. Thus, skip checking the part of
1352 * the redzone that overlaps with the meta data.
1353 */
1354 kasan_meta_size = kasan_metadata_size(s, true);
1355 if (kasan_meta_size < s->object_size - 1 &&
1356 !check_bytes_and_report(s, slab, p, "Poison",
1357 p + kasan_meta_size, POISON_FREE,
1358 s->object_size - kasan_meta_size - 1))
1345 return 0;
1359 ret = 0;
1346 if (kasan_meta_size < s->object_size &&
1347 !check_bytes_and_report(s, slab, p, "End Poison",
1348 p + s->object_size - 1, POISON_END, 1))
1360 if (kasan_meta_size < s->object_size &&
1361 !check_bytes_and_report(s, slab, p, "End Poison",
1362 p + s->object_size - 1, POISON_END, 1))
1349 return 0;
1363 ret = 0;
1350 }
1351 /*
1352 * check_pad_bytes cleans up on its own.
1353 */
1364 }
1365 /*
1366 * check_pad_bytes cleans up on its own.
1367 */
1354 check_pad_bytes(s, slab, p);
1368 if (!check_pad_bytes(s, slab, p))
1369 ret = 0;
1355 }
1356
1370 }
1371
1357 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
1358 /*
1359 * Object and freepointer overlap. Cannot check
1360 * freepointer while object is allocated.
1361 */
1362 return 1;
1363
1364 /* Check free pointer validity */
1365 if (!check_valid_pointer(s, slab, get_freepointer(s, p))) {
1372 /*
1373 * Cannot check freepointer while object is allocated if
1374 * object and freepointer overlap.
1375 */
1376 if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1377 !check_valid_pointer(s, slab, get_freepointer(s, p))) {
1366 object_err(s, slab, p, "Freepointer corrupt");
1367 /*
1368 * No choice but to zap it and thus lose the remainder
1369 * of the free objects in this slab. May cause
1370 * another error because the object count is now wrong.
1371 */
1372 set_freepointer(s, p, NULL);
1378 object_err(s, slab, p, "Freepointer corrupt");
1379 /*
1380 * No choice but to zap it and thus lose the remainder
1381 * of the free objects in this slab. May cause
1382 * another error because the object count is now wrong.
1383 */
1384 set_freepointer(s, p, NULL);
1373 return 0;
1385 ret = 0;
1374 }
1386 }
1375 return 1;
1387
1388 if (!ret && !slab_in_kunit_test()) {
1389 print_trailer(s, slab, object);
1390 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1391 }
1392
1393 return ret;
1376}
1377
1378static int check_slab(struct kmem_cache *s, struct slab *slab)
1379{
1380 int maxobj;
1381
1382 if (!folio_test_slab(slab_folio(slab))) {
1383 slab_err(s, slab, "Not a valid slab page");

--- 563 unchanged lines hidden (view full) ---

1947
1948 return -ENOMEM;
1949 }
1950
1951 new_exts = (unsigned long)vec;
1952#ifdef CONFIG_MEMCG
1953 new_exts |= MEMCG_DATA_OBJEXTS;
1954#endif
1394}
1395
1396static int check_slab(struct kmem_cache *s, struct slab *slab)
1397{
1398 int maxobj;
1399
1400 if (!folio_test_slab(slab_folio(slab))) {
1401 slab_err(s, slab, "Not a valid slab page");

--- 563 unchanged lines hidden (view full) ---

1965
1966 return -ENOMEM;
1967 }
1968
1969 new_exts = (unsigned long)vec;
1970#ifdef CONFIG_MEMCG
1971 new_exts |= MEMCG_DATA_OBJEXTS;
1972#endif
1955 old_exts = READ_ONCE(slab->obj_exts);
1973 old_exts = slab->obj_exts;
1956 handle_failed_objexts_alloc(old_exts, vec, objects);
1957 if (new_slab) {
1958 /*
1959 * If the slab is brand new and nobody can yet access its
1960 * obj_exts, no synchronization is required and obj_exts can
1961 * be simply assigned.
1962 */
1963 slab->obj_exts = new_exts;
1974 handle_failed_objexts_alloc(old_exts, vec, objects);
1975 if (new_slab) {
1976 /*
1977 * If the slab is brand new and nobody can yet access its
1978 * obj_exts, no synchronization is required and obj_exts can
1979 * be simply assigned.
1980 */
1981 slab->obj_exts = new_exts;
1964 } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
1965 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
1982 } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
1966 /*
1967 * If the slab is already in use, somebody can allocate and
1968 * assign slabobj_exts in parallel. In this case the existing
1969 * objcg vector should be reused.
1970 */
1971 mark_objexts_empty(vec);
1972 kfree(vec);
1973 return 0;

--- 554 unchanged lines hidden (view full) ---

2528}
2529
2530/*
2531 * SLUB reuses PG_workingset bit to keep track of whether it's on
2532 * the per-node partial list.
2533 */
2534static inline bool slab_test_node_partial(const struct slab *slab)
2535{
1983 /*
1984 * If the slab is already in use, somebody can allocate and
1985 * assign slabobj_exts in parallel. In this case the existing
1986 * objcg vector should be reused.
1987 */
1988 mark_objexts_empty(vec);
1989 kfree(vec);
1990 return 0;

--- 554 unchanged lines hidden (view full) ---

2545}
2546
2547/*
2548 * SLUB reuses PG_workingset bit to keep track of whether it's on
2549 * the per-node partial list.
2550 */
2551static inline bool slab_test_node_partial(const struct slab *slab)
2552{
2536 return folio_test_workingset((struct folio *)slab_folio(slab));
2553 return folio_test_workingset(slab_folio(slab));
2537}
2538
2539static inline void slab_set_node_partial(struct slab *slab)
2540{
2541 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2542}
2543
2544static inline void slab_clear_node_partial(struct slab *slab)

--- 1504 unchanged lines hidden (view full) ---

4049}
4050EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4051
4052/*
4053 * To avoid unnecessary overhead, we pass through large allocation requests
4054 * directly to the page allocator. We use __GFP_COMP, because we will need to
4055 * know the allocation order to free the pages properly in kfree.
4056 */
2554}
2555
2556static inline void slab_set_node_partial(struct slab *slab)
2557{
2558 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2559}
2560
2561static inline void slab_clear_node_partial(struct slab *slab)

--- 1504 unchanged lines hidden (view full) ---

4066}
4067EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4068
4069/*
4070 * To avoid unnecessary overhead, we pass through large allocation requests
4071 * directly to the page allocator. We use __GFP_COMP, because we will need to
4072 * know the allocation order to free the pages properly in kfree.
4073 */
4057static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
4074static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
4058{
4059 struct folio *folio;
4060 void *ptr = NULL;
4061 unsigned int order = get_order(size);
4062
4063 if (unlikely(flags & GFP_SLAB_BUG_MASK))
4064 flags = kmalloc_fix_flags(flags);
4065

--- 8 unchanged lines hidden (view full) ---

4074 ptr = kasan_kmalloc_large(ptr, size, flags);
4075 /* As ptr might get tagged, call kmemleak hook after KASAN. */
4076 kmemleak_alloc(ptr, size, 1, flags);
4077 kmsan_kmalloc_large(ptr, size, flags);
4078
4079 return ptr;
4080}
4081
4075{
4076 struct folio *folio;
4077 void *ptr = NULL;
4078 unsigned int order = get_order(size);
4079
4080 if (unlikely(flags & GFP_SLAB_BUG_MASK))
4081 flags = kmalloc_fix_flags(flags);
4082

--- 8 unchanged lines hidden (view full) ---

4091 ptr = kasan_kmalloc_large(ptr, size, flags);
4092 /* As ptr might get tagged, call kmemleak hook after KASAN. */
4093 kmemleak_alloc(ptr, size, 1, flags);
4094 kmsan_kmalloc_large(ptr, size, flags);
4095
4096 return ptr;
4097}
4098
4082void *kmalloc_large_noprof(size_t size, gfp_t flags)
4099void *__kmalloc_large_noprof(size_t size, gfp_t flags)
4083{
4100{
4084 void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
4101 void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
4085
4086 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4087 flags, NUMA_NO_NODE);
4088 return ret;
4089}
4102
4103 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4104 flags, NUMA_NO_NODE);
4105 return ret;
4106}
4090EXPORT_SYMBOL(kmalloc_large_noprof);
4107EXPORT_SYMBOL(__kmalloc_large_noprof);
4091
4108
4092void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
4109void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
4093{
4110{
4094 void *ret = __kmalloc_large_node(size, flags, node);
4111 void *ret = ___kmalloc_large_node(size, flags, node);
4095
4096 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4097 flags, node);
4098 return ret;
4099}
4112
4113 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4114 flags, node);
4115 return ret;
4116}
4100EXPORT_SYMBOL(kmalloc_large_node_noprof);
4117EXPORT_SYMBOL(__kmalloc_large_node_noprof);
4101
4102static __always_inline
4118
4119static __always_inline
4103void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
4120void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
4104 unsigned long caller)
4105{
4106 struct kmem_cache *s;
4107 void *ret;
4108
4109 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4121 unsigned long caller)
4122{
4123 struct kmem_cache *s;
4124 void *ret;
4125
4126 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4110 ret = __kmalloc_large_node(size, flags, node);
4127 ret = __kmalloc_large_node_noprof(size, flags, node);
4111 trace_kmalloc(caller, ret, size,
4112 PAGE_SIZE << get_order(size), flags, node);
4113 return ret;
4114 }
4115
4116 if (unlikely(!size))
4117 return ZERO_SIZE_PTR;
4118
4128 trace_kmalloc(caller, ret, size,
4129 PAGE_SIZE << get_order(size), flags, node);
4130 return ret;
4131 }
4132
4133 if (unlikely(!size))
4134 return ZERO_SIZE_PTR;
4135
4119 s = kmalloc_slab(size, flags, caller);
4136 s = kmalloc_slab(size, b, flags, caller);
4120
4121 ret = slab_alloc_node(s, NULL, flags, node, caller, size);
4122 ret = kasan_kmalloc(s, ret, size, flags);
4123 trace_kmalloc(caller, ret, size, s->size, flags, node);
4124 return ret;
4125}
4137
4138 ret = slab_alloc_node(s, NULL, flags, node, caller, size);
4139 ret = kasan_kmalloc(s, ret, size, flags);
4140 trace_kmalloc(caller, ret, size, s->size, flags, node);
4141 return ret;
4142}
4126
4127void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
4143void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
4128{
4144{
4129 return __do_kmalloc_node(size, flags, node, _RET_IP_);
4145 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
4130}
4131EXPORT_SYMBOL(__kmalloc_node_noprof);
4132
4133void *__kmalloc_noprof(size_t size, gfp_t flags)
4134{
4146}
4147EXPORT_SYMBOL(__kmalloc_node_noprof);
4148
4149void *__kmalloc_noprof(size_t size, gfp_t flags)
4150{
4135 return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
4151 return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
4136}
4137EXPORT_SYMBOL(__kmalloc_noprof);
4138
4152}
4153EXPORT_SYMBOL(__kmalloc_noprof);
4154
4139void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags,
4140 int node, unsigned long caller)
4155void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
4156 int node, unsigned long caller)
4141{
4157{
4142 return __do_kmalloc_node(size, flags, node, caller);
4158 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
4159
4143}
4160}
4144EXPORT_SYMBOL(kmalloc_node_track_caller_noprof);
4161EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
4145
4162
4146void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
4163void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
4147{
4148 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
4149 _RET_IP_, size);
4150
4151 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
4152
4153 ret = kasan_kmalloc(s, ret, size, gfpflags);
4154 return ret;
4155}
4164{
4165 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
4166 _RET_IP_, size);
4167
4168 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
4169
4170 ret = kasan_kmalloc(s, ret, size, gfpflags);
4171 return ret;
4172}
4156EXPORT_SYMBOL(kmalloc_trace_noprof);
4173EXPORT_SYMBOL(__kmalloc_cache_noprof);
4157
4174
4158void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
4159 int node, size_t size)
4175void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
4176 int node, size_t size)
4160{
4161 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
4162
4163 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
4164
4165 ret = kasan_kmalloc(s, ret, size, gfpflags);
4166 return ret;
4167}
4177{
4178 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
4179
4180 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
4181
4182 ret = kasan_kmalloc(s, ret, size, gfpflags);
4183 return ret;
4184}
4168EXPORT_SYMBOL(kmalloc_node_trace_noprof);
4185EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
4169
4170static noinline void free_to_partial_list(
4171 struct kmem_cache *s, struct slab *slab,
4172 void *head, void *tail, int bulk_cnt,
4173 unsigned long addr)
4174{
4175 struct kmem_cache_node *n = get_node(s, slab_nid(slab));
4176 struct slab *slab_free = NULL;

--- 968 unchanged lines hidden (view full) ---

5145#endif
5146
5147 /*
5148 * With that we have determined the number of bytes in actual use
5149 * by the object and redzoning.
5150 */
5151 s->inuse = size;
5152
4186
4187static noinline void free_to_partial_list(
4188 struct kmem_cache *s, struct slab *slab,
4189 void *head, void *tail, int bulk_cnt,
4190 unsigned long addr)
4191{
4192 struct kmem_cache_node *n = get_node(s, slab_nid(slab));
4193 struct slab *slab_free = NULL;

--- 968 unchanged lines hidden (view full) ---

5162#endif
5163
5164 /*
5165 * With that we have determined the number of bytes in actual use
5166 * by the object and redzoning.
5167 */
5168 s->inuse = size;
5169
5153 if (slub_debug_orig_size(s) ||
5154 (flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
5155 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
5156 s->ctor) {
5170 if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
5171 ((flags & SLAB_RED_ZONE) &&
5172 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
5157 /*
5158 * Relocate free pointer after the object if it is not
5159 * permitted to overwrite the first word of the object on
5160 * kmem_cache_free.
5161 *
5162 * This is the case if we do RCU, have a constructor or
5163 * destructor, are poisoning the objects, or are
5173 /*
5174 * Relocate free pointer after the object if it is not
5175 * permitted to overwrite the first word of the object on
5176 * kmem_cache_free.
5177 *
5178 * This is the case if we do RCU, have a constructor or
5179 * destructor, are poisoning the objects, or are
5164 * redzoning an object smaller than sizeof(void *).
5180 * redzoning an object smaller than sizeof(void *) or are
5181 * redzoning an object with slub_debug_orig_size() enabled,
5182 * in which case the right redzone may be extended.
5165 *
5166 * The assumption that s->offset >= s->inuse means free
5167 * pointer is outside of the object is used in the
5168 * freeptr_outside_object() function. If that is no
5169 * longer true, the function needs to be modified.
5170 */
5171 s->offset = size;
5172 size += sizeof(void *);

--- 2093 unchanged lines hidden ---
5183 *
5184 * The assumption that s->offset >= s->inuse means free
5185 * pointer is outside of the object is used in the
5186 * freeptr_outside_object() function. If that is no
5187 * longer true, the function needs to be modified.
5188 */
5189 s->offset = size;
5190 size += sizeof(void *);

--- 2093 unchanged lines hidden ---