xref: /linux/arch/loongarch/kernel/cacheinfo.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * LoongArch cacheinfo support
4  *
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  */
7 #include <asm/cpu-info.h>
8 #include <linux/cacheinfo.h>
9 
10 /* Populates leaf and increments to next leaf */
11 #define populate_cache(cache, leaf, c_level, c_type)		\
12 do {								\
13 	leaf->type = c_type;					\
14 	leaf->level = c_level;					\
15 	leaf->coherency_line_size = c->cache.linesz;		\
16 	leaf->number_of_sets = c->cache.sets;			\
17 	leaf->ways_of_associativity = c->cache.ways;		\
18 	leaf->size = c->cache.linesz * c->cache.sets *		\
19 		c->cache.ways;					\
20 	leaf++;							\
21 } while (0)
22 
23 int init_cache_level(unsigned int cpu)
24 {
25 	struct cpuinfo_loongarch *c = &current_cpu_data;
26 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
27 	int levels = 0, leaves = 0;
28 
29 	/*
30 	 * If Dcache is not set, we assume the cache structures
31 	 * are not properly initialized.
32 	 */
33 	if (c->dcache.waysize)
34 		levels += 1;
35 	else
36 		return -ENOENT;
37 
38 
39 	leaves += (c->icache.waysize) ? 2 : 1;
40 
41 	if (c->vcache.waysize) {
42 		levels++;
43 		leaves++;
44 	}
45 
46 	if (c->scache.waysize) {
47 		levels++;
48 		leaves++;
49 	}
50 
51 	if (c->tcache.waysize) {
52 		levels++;
53 		leaves++;
54 	}
55 
56 	this_cpu_ci->num_levels = levels;
57 	this_cpu_ci->num_leaves = leaves;
58 	return 0;
59 }
60 
61 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
62 					   struct cacheinfo *sib_leaf)
63 {
64 	return !((this_leaf->level == 1) || (this_leaf->level == 2));
65 }
66 
67 static void cache_cpumap_setup(unsigned int cpu)
68 {
69 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
70 	struct cacheinfo *this_leaf, *sib_leaf;
71 	unsigned int index;
72 
73 	for (index = 0; index < this_cpu_ci->num_leaves; index++) {
74 		unsigned int i;
75 
76 		this_leaf = this_cpu_ci->info_list + index;
77 		/* skip if shared_cpu_map is already populated */
78 		if (!cpumask_empty(&this_leaf->shared_cpu_map))
79 			continue;
80 
81 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
82 		for_each_online_cpu(i) {
83 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
84 
85 			if (i == cpu || !sib_cpu_ci->info_list)
86 				continue;/* skip if itself or no cacheinfo */
87 			sib_leaf = sib_cpu_ci->info_list + index;
88 			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
89 				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
90 				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
91 			}
92 		}
93 	}
94 }
95 
96 int populate_cache_leaves(unsigned int cpu)
97 {
98 	int level = 1;
99 	struct cpuinfo_loongarch *c = &current_cpu_data;
100 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
101 	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
102 
103 	if (c->icache.waysize) {
104 		populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
105 		populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST);
106 	} else {
107 		populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
108 	}
109 
110 	if (c->vcache.waysize)
111 		populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
112 
113 	if (c->scache.waysize)
114 		populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED);
115 
116 	if (c->tcache.waysize)
117 		populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
118 
119 	cache_cpumap_setup(cpu);
120 	this_cpu_ci->cpu_map_populated = true;
121 
122 	return 0;
123 }
124