xref: /linux/drivers/base/cacheinfo.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * cacheinfo support - processor cache information via sysfs
3  *
4  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5  * Author: Sudeep Holla <sudeep.holla@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12  * kind, whether express or implied; without even the implied warranty
13  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/acpi.h>
22 #include <linux/bitops.h>
23 #include <linux/cacheinfo.h>
24 #include <linux/compiler.h>
25 #include <linux/cpu.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/of.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/smp.h>
32 #include <linux/sysfs.h>
33 
34 /* pointer to per cpu cacheinfo */
35 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
36 #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
37 #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
38 #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
39 
40 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
41 {
42 	return ci_cacheinfo(cpu);
43 }
44 
45 #ifdef CONFIG_OF
46 static int cache_setup_of_node(unsigned int cpu)
47 {
48 	struct device_node *np;
49 	struct cacheinfo *this_leaf;
50 	struct device *cpu_dev = get_cpu_device(cpu);
51 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
52 	unsigned int index = 0;
53 
54 	/* skip if of_node is already populated */
55 	if (this_cpu_ci->info_list->of_node)
56 		return 0;
57 
58 	if (!cpu_dev) {
59 		pr_err("No cpu device for CPU %d\n", cpu);
60 		return -ENODEV;
61 	}
62 	np = cpu_dev->of_node;
63 	if (!np) {
64 		pr_err("Failed to find cpu%d device node\n", cpu);
65 		return -ENOENT;
66 	}
67 
68 	while (index < cache_leaves(cpu)) {
69 		this_leaf = this_cpu_ci->info_list + index;
70 		if (this_leaf->level != 1)
71 			np = of_find_next_cache_node(np);
72 		else
73 			np = of_node_get(np);/* cpu node itself */
74 		if (!np)
75 			break;
76 		this_leaf->of_node = np;
77 		index++;
78 	}
79 
80 	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
81 		return -ENOENT;
82 
83 	return 0;
84 }
85 
86 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
87 					   struct cacheinfo *sib_leaf)
88 {
89 	return sib_leaf->of_node == this_leaf->of_node;
90 }
91 
92 /* OF properties to query for a given cache type */
93 struct cache_type_info {
94 	const char *size_prop;
95 	const char *line_size_props[2];
96 	const char *nr_sets_prop;
97 };
98 
99 static const struct cache_type_info cache_type_info[] = {
100 	{
101 		.size_prop       = "cache-size",
102 		.line_size_props = { "cache-line-size",
103 				     "cache-block-size", },
104 		.nr_sets_prop    = "cache-sets",
105 	}, {
106 		.size_prop       = "i-cache-size",
107 		.line_size_props = { "i-cache-line-size",
108 				     "i-cache-block-size", },
109 		.nr_sets_prop    = "i-cache-sets",
110 	}, {
111 		.size_prop       = "d-cache-size",
112 		.line_size_props = { "d-cache-line-size",
113 				     "d-cache-block-size", },
114 		.nr_sets_prop    = "d-cache-sets",
115 	},
116 };
117 
118 static inline int get_cacheinfo_idx(enum cache_type type)
119 {
120 	if (type == CACHE_TYPE_UNIFIED)
121 		return 0;
122 	return type;
123 }
124 
125 static void cache_size(struct cacheinfo *this_leaf)
126 {
127 	const char *propname;
128 	const __be32 *cache_size;
129 	int ct_idx;
130 
131 	ct_idx = get_cacheinfo_idx(this_leaf->type);
132 	propname = cache_type_info[ct_idx].size_prop;
133 
134 	cache_size = of_get_property(this_leaf->of_node, propname, NULL);
135 	if (cache_size)
136 		this_leaf->size = of_read_number(cache_size, 1);
137 }
138 
139 /* not cache_line_size() because that's a macro in include/linux/cache.h */
140 static void cache_get_line_size(struct cacheinfo *this_leaf)
141 {
142 	const __be32 *line_size;
143 	int i, lim, ct_idx;
144 
145 	ct_idx = get_cacheinfo_idx(this_leaf->type);
146 	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
147 
148 	for (i = 0; i < lim; i++) {
149 		const char *propname;
150 
151 		propname = cache_type_info[ct_idx].line_size_props[i];
152 		line_size = of_get_property(this_leaf->of_node, propname, NULL);
153 		if (line_size)
154 			break;
155 	}
156 
157 	if (line_size)
158 		this_leaf->coherency_line_size = of_read_number(line_size, 1);
159 }
160 
161 static void cache_nr_sets(struct cacheinfo *this_leaf)
162 {
163 	const char *propname;
164 	const __be32 *nr_sets;
165 	int ct_idx;
166 
167 	ct_idx = get_cacheinfo_idx(this_leaf->type);
168 	propname = cache_type_info[ct_idx].nr_sets_prop;
169 
170 	nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
171 	if (nr_sets)
172 		this_leaf->number_of_sets = of_read_number(nr_sets, 1);
173 }
174 
175 static void cache_associativity(struct cacheinfo *this_leaf)
176 {
177 	unsigned int line_size = this_leaf->coherency_line_size;
178 	unsigned int nr_sets = this_leaf->number_of_sets;
179 	unsigned int size = this_leaf->size;
180 
181 	/*
182 	 * If the cache is fully associative, there is no need to
183 	 * check the other properties.
184 	 */
185 	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
186 		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
187 }
188 
189 static void cache_of_override_properties(unsigned int cpu)
190 {
191 	int index;
192 	struct cacheinfo *this_leaf;
193 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
194 
195 	for (index = 0; index < cache_leaves(cpu); index++) {
196 		this_leaf = this_cpu_ci->info_list + index;
197 		cache_size(this_leaf);
198 		cache_get_line_size(this_leaf);
199 		cache_nr_sets(this_leaf);
200 		cache_associativity(this_leaf);
201 	}
202 }
203 #else
204 static void cache_of_override_properties(unsigned int cpu) { }
205 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
206 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
207 					   struct cacheinfo *sib_leaf)
208 {
209 	/*
210 	 * For non-DT systems, assume unique level 1 cache, system-wide
211 	 * shared caches for all other levels. This will be used only if
212 	 * arch specific code has not populated shared_cpu_map
213 	 */
214 	return !(this_leaf->level == 1);
215 }
216 #endif
217 
218 static int cache_shared_cpu_map_setup(unsigned int cpu)
219 {
220 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
221 	struct cacheinfo *this_leaf, *sib_leaf;
222 	unsigned int index;
223 	int ret = 0;
224 
225 	if (this_cpu_ci->cpu_map_populated)
226 		return 0;
227 
228 	if (of_have_populated_dt())
229 		ret = cache_setup_of_node(cpu);
230 	else if (!acpi_disabled)
231 		/* No cache property/hierarchy support yet in ACPI */
232 		ret = -ENOTSUPP;
233 	if (ret)
234 		return ret;
235 
236 	for (index = 0; index < cache_leaves(cpu); index++) {
237 		unsigned int i;
238 
239 		this_leaf = this_cpu_ci->info_list + index;
240 		/* skip if shared_cpu_map is already populated */
241 		if (!cpumask_empty(&this_leaf->shared_cpu_map))
242 			continue;
243 
244 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
245 		for_each_online_cpu(i) {
246 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
247 
248 			if (i == cpu || !sib_cpu_ci->info_list)
249 				continue;/* skip if itself or no cacheinfo */
250 			sib_leaf = sib_cpu_ci->info_list + index;
251 			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
252 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
253 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
254 			}
255 		}
256 	}
257 
258 	return 0;
259 }
260 
261 static void cache_shared_cpu_map_remove(unsigned int cpu)
262 {
263 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
264 	struct cacheinfo *this_leaf, *sib_leaf;
265 	unsigned int sibling, index;
266 
267 	for (index = 0; index < cache_leaves(cpu); index++) {
268 		this_leaf = this_cpu_ci->info_list + index;
269 		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
270 			struct cpu_cacheinfo *sib_cpu_ci;
271 
272 			if (sibling == cpu) /* skip itself */
273 				continue;
274 
275 			sib_cpu_ci = get_cpu_cacheinfo(sibling);
276 			if (!sib_cpu_ci->info_list)
277 				continue;
278 
279 			sib_leaf = sib_cpu_ci->info_list + index;
280 			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
281 			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
282 		}
283 		of_node_put(this_leaf->of_node);
284 	}
285 }
286 
287 static void cache_override_properties(unsigned int cpu)
288 {
289 	if (of_have_populated_dt())
290 		return cache_of_override_properties(cpu);
291 }
292 
293 static void free_cache_attributes(unsigned int cpu)
294 {
295 	if (!per_cpu_cacheinfo(cpu))
296 		return;
297 
298 	cache_shared_cpu_map_remove(cpu);
299 
300 	kfree(per_cpu_cacheinfo(cpu));
301 	per_cpu_cacheinfo(cpu) = NULL;
302 }
303 
304 int __weak init_cache_level(unsigned int cpu)
305 {
306 	return -ENOENT;
307 }
308 
309 int __weak populate_cache_leaves(unsigned int cpu)
310 {
311 	return -ENOENT;
312 }
313 
314 static int detect_cache_attributes(unsigned int cpu)
315 {
316 	int ret;
317 
318 	if (init_cache_level(cpu) || !cache_leaves(cpu))
319 		return -ENOENT;
320 
321 	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
322 					 sizeof(struct cacheinfo), GFP_KERNEL);
323 	if (per_cpu_cacheinfo(cpu) == NULL)
324 		return -ENOMEM;
325 
326 	ret = populate_cache_leaves(cpu);
327 	if (ret)
328 		goto free_ci;
329 	/*
330 	 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
331 	 * will be set up here only if they are not populated already
332 	 */
333 	ret = cache_shared_cpu_map_setup(cpu);
334 	if (ret) {
335 		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
336 		goto free_ci;
337 	}
338 
339 	cache_override_properties(cpu);
340 	return 0;
341 
342 free_ci:
343 	free_cache_attributes(cpu);
344 	return ret;
345 }
346 
347 /* pointer to cpuX/cache device */
348 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
349 #define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
350 
351 static cpumask_t cache_dev_map;
352 
353 /* pointer to array of devices for cpuX/cache/indexY */
354 static DEFINE_PER_CPU(struct device **, ci_index_dev);
355 #define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
356 #define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
357 
358 #define show_one(file_name, object)				\
359 static ssize_t file_name##_show(struct device *dev,		\
360 		struct device_attribute *attr, char *buf)	\
361 {								\
362 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
363 	return sprintf(buf, "%u\n", this_leaf->object);		\
364 }
365 
366 show_one(id, id);
367 show_one(level, level);
368 show_one(coherency_line_size, coherency_line_size);
369 show_one(number_of_sets, number_of_sets);
370 show_one(physical_line_partition, physical_line_partition);
371 show_one(ways_of_associativity, ways_of_associativity);
372 
373 static ssize_t size_show(struct device *dev,
374 			 struct device_attribute *attr, char *buf)
375 {
376 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
377 
378 	return sprintf(buf, "%uK\n", this_leaf->size >> 10);
379 }
380 
381 static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
382 {
383 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
384 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
385 
386 	return cpumap_print_to_pagebuf(list, buf, mask);
387 }
388 
389 static ssize_t shared_cpu_map_show(struct device *dev,
390 				   struct device_attribute *attr, char *buf)
391 {
392 	return shared_cpumap_show_func(dev, false, buf);
393 }
394 
395 static ssize_t shared_cpu_list_show(struct device *dev,
396 				    struct device_attribute *attr, char *buf)
397 {
398 	return shared_cpumap_show_func(dev, true, buf);
399 }
400 
401 static ssize_t type_show(struct device *dev,
402 			 struct device_attribute *attr, char *buf)
403 {
404 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
405 
406 	switch (this_leaf->type) {
407 	case CACHE_TYPE_DATA:
408 		return sprintf(buf, "Data\n");
409 	case CACHE_TYPE_INST:
410 		return sprintf(buf, "Instruction\n");
411 	case CACHE_TYPE_UNIFIED:
412 		return sprintf(buf, "Unified\n");
413 	default:
414 		return -EINVAL;
415 	}
416 }
417 
418 static ssize_t allocation_policy_show(struct device *dev,
419 				      struct device_attribute *attr, char *buf)
420 {
421 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
422 	unsigned int ci_attr = this_leaf->attributes;
423 	int n = 0;
424 
425 	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
426 		n = sprintf(buf, "ReadWriteAllocate\n");
427 	else if (ci_attr & CACHE_READ_ALLOCATE)
428 		n = sprintf(buf, "ReadAllocate\n");
429 	else if (ci_attr & CACHE_WRITE_ALLOCATE)
430 		n = sprintf(buf, "WriteAllocate\n");
431 	return n;
432 }
433 
434 static ssize_t write_policy_show(struct device *dev,
435 				 struct device_attribute *attr, char *buf)
436 {
437 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
438 	unsigned int ci_attr = this_leaf->attributes;
439 	int n = 0;
440 
441 	if (ci_attr & CACHE_WRITE_THROUGH)
442 		n = sprintf(buf, "WriteThrough\n");
443 	else if (ci_attr & CACHE_WRITE_BACK)
444 		n = sprintf(buf, "WriteBack\n");
445 	return n;
446 }
447 
448 static DEVICE_ATTR_RO(id);
449 static DEVICE_ATTR_RO(level);
450 static DEVICE_ATTR_RO(type);
451 static DEVICE_ATTR_RO(coherency_line_size);
452 static DEVICE_ATTR_RO(ways_of_associativity);
453 static DEVICE_ATTR_RO(number_of_sets);
454 static DEVICE_ATTR_RO(size);
455 static DEVICE_ATTR_RO(allocation_policy);
456 static DEVICE_ATTR_RO(write_policy);
457 static DEVICE_ATTR_RO(shared_cpu_map);
458 static DEVICE_ATTR_RO(shared_cpu_list);
459 static DEVICE_ATTR_RO(physical_line_partition);
460 
461 static struct attribute *cache_default_attrs[] = {
462 	&dev_attr_id.attr,
463 	&dev_attr_type.attr,
464 	&dev_attr_level.attr,
465 	&dev_attr_shared_cpu_map.attr,
466 	&dev_attr_shared_cpu_list.attr,
467 	&dev_attr_coherency_line_size.attr,
468 	&dev_attr_ways_of_associativity.attr,
469 	&dev_attr_number_of_sets.attr,
470 	&dev_attr_size.attr,
471 	&dev_attr_allocation_policy.attr,
472 	&dev_attr_write_policy.attr,
473 	&dev_attr_physical_line_partition.attr,
474 	NULL
475 };
476 
477 static umode_t
478 cache_default_attrs_is_visible(struct kobject *kobj,
479 			       struct attribute *attr, int unused)
480 {
481 	struct device *dev = kobj_to_dev(kobj);
482 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
483 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
484 	umode_t mode = attr->mode;
485 
486 	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
487 		return mode;
488 	if ((attr == &dev_attr_type.attr) && this_leaf->type)
489 		return mode;
490 	if ((attr == &dev_attr_level.attr) && this_leaf->level)
491 		return mode;
492 	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
493 		return mode;
494 	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
495 		return mode;
496 	if ((attr == &dev_attr_coherency_line_size.attr) &&
497 	    this_leaf->coherency_line_size)
498 		return mode;
499 	if ((attr == &dev_attr_ways_of_associativity.attr) &&
500 	    this_leaf->size) /* allow 0 = full associativity */
501 		return mode;
502 	if ((attr == &dev_attr_number_of_sets.attr) &&
503 	    this_leaf->number_of_sets)
504 		return mode;
505 	if ((attr == &dev_attr_size.attr) && this_leaf->size)
506 		return mode;
507 	if ((attr == &dev_attr_write_policy.attr) &&
508 	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
509 		return mode;
510 	if ((attr == &dev_attr_allocation_policy.attr) &&
511 	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
512 		return mode;
513 	if ((attr == &dev_attr_physical_line_partition.attr) &&
514 	    this_leaf->physical_line_partition)
515 		return mode;
516 
517 	return 0;
518 }
519 
520 static const struct attribute_group cache_default_group = {
521 	.attrs = cache_default_attrs,
522 	.is_visible = cache_default_attrs_is_visible,
523 };
524 
525 static const struct attribute_group *cache_default_groups[] = {
526 	&cache_default_group,
527 	NULL,
528 };
529 
530 static const struct attribute_group *cache_private_groups[] = {
531 	&cache_default_group,
532 	NULL, /* Place holder for private group */
533 	NULL,
534 };
535 
536 const struct attribute_group *
537 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
538 {
539 	return NULL;
540 }
541 
542 static const struct attribute_group **
543 cache_get_attribute_groups(struct cacheinfo *this_leaf)
544 {
545 	const struct attribute_group *priv_group =
546 			cache_get_priv_group(this_leaf);
547 
548 	if (!priv_group)
549 		return cache_default_groups;
550 
551 	if (!cache_private_groups[1])
552 		cache_private_groups[1] = priv_group;
553 
554 	return cache_private_groups;
555 }
556 
557 /* Add/Remove cache interface for CPU device */
558 static void cpu_cache_sysfs_exit(unsigned int cpu)
559 {
560 	int i;
561 	struct device *ci_dev;
562 
563 	if (per_cpu_index_dev(cpu)) {
564 		for (i = 0; i < cache_leaves(cpu); i++) {
565 			ci_dev = per_cache_index_dev(cpu, i);
566 			if (!ci_dev)
567 				continue;
568 			device_unregister(ci_dev);
569 		}
570 		kfree(per_cpu_index_dev(cpu));
571 		per_cpu_index_dev(cpu) = NULL;
572 	}
573 	device_unregister(per_cpu_cache_dev(cpu));
574 	per_cpu_cache_dev(cpu) = NULL;
575 }
576 
577 static int cpu_cache_sysfs_init(unsigned int cpu)
578 {
579 	struct device *dev = get_cpu_device(cpu);
580 
581 	if (per_cpu_cacheinfo(cpu) == NULL)
582 		return -ENOENT;
583 
584 	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
585 	if (IS_ERR(per_cpu_cache_dev(cpu)))
586 		return PTR_ERR(per_cpu_cache_dev(cpu));
587 
588 	/* Allocate all required memory */
589 	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
590 					 sizeof(struct device *), GFP_KERNEL);
591 	if (unlikely(per_cpu_index_dev(cpu) == NULL))
592 		goto err_out;
593 
594 	return 0;
595 
596 err_out:
597 	cpu_cache_sysfs_exit(cpu);
598 	return -ENOMEM;
599 }
600 
601 static int cache_add_dev(unsigned int cpu)
602 {
603 	unsigned int i;
604 	int rc;
605 	struct device *ci_dev, *parent;
606 	struct cacheinfo *this_leaf;
607 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
608 	const struct attribute_group **cache_groups;
609 
610 	rc = cpu_cache_sysfs_init(cpu);
611 	if (unlikely(rc < 0))
612 		return rc;
613 
614 	parent = per_cpu_cache_dev(cpu);
615 	for (i = 0; i < cache_leaves(cpu); i++) {
616 		this_leaf = this_cpu_ci->info_list + i;
617 		if (this_leaf->disable_sysfs)
618 			continue;
619 		cache_groups = cache_get_attribute_groups(this_leaf);
620 		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
621 					   "index%1u", i);
622 		if (IS_ERR(ci_dev)) {
623 			rc = PTR_ERR(ci_dev);
624 			goto err;
625 		}
626 		per_cache_index_dev(cpu, i) = ci_dev;
627 	}
628 	cpumask_set_cpu(cpu, &cache_dev_map);
629 
630 	return 0;
631 err:
632 	cpu_cache_sysfs_exit(cpu);
633 	return rc;
634 }
635 
636 static int cacheinfo_cpu_online(unsigned int cpu)
637 {
638 	int rc = detect_cache_attributes(cpu);
639 
640 	if (rc)
641 		return rc;
642 	rc = cache_add_dev(cpu);
643 	if (rc)
644 		free_cache_attributes(cpu);
645 	return rc;
646 }
647 
648 static int cacheinfo_cpu_pre_down(unsigned int cpu)
649 {
650 	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
651 		cpu_cache_sysfs_exit(cpu);
652 
653 	free_cache_attributes(cpu);
654 	return 0;
655 }
656 
657 static int __init cacheinfo_sysfs_init(void)
658 {
659 	return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
660 				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
661 }
662 device_initcall(cacheinfo_sysfs_init);
663