xref: /linux/arch/s390/kernel/cache.c (revision 87c9c16317882dd6dbbc07e349bc3223e14f3244)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Extract CPU cache information and expose them via sysfs.
4  *
5  *    Copyright IBM Corp. 2012
6  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
7  */
8 
9 #include <linux/seq_file.h>
10 #include <linux/cpu.h>
11 #include <linux/cacheinfo.h>
12 #include <asm/facility.h>
13 
14 enum {
15 	CACHE_SCOPE_NOTEXISTS,
16 	CACHE_SCOPE_PRIVATE,
17 	CACHE_SCOPE_SHARED,
18 	CACHE_SCOPE_RESERVED,
19 };
20 
21 enum {
22 	CTYPE_SEPARATE,
23 	CTYPE_DATA,
24 	CTYPE_INSTRUCTION,
25 	CTYPE_UNIFIED,
26 };
27 
28 enum {
29 	EXTRACT_TOPOLOGY,
30 	EXTRACT_LINE_SIZE,
31 	EXTRACT_SIZE,
32 	EXTRACT_ASSOCIATIVITY,
33 };
34 
35 enum {
36 	CACHE_TI_UNIFIED = 0,
37 	CACHE_TI_DATA = 0,
38 	CACHE_TI_INSTRUCTION,
39 };
40 
41 struct cache_info {
42 	unsigned char	    : 4;
43 	unsigned char scope : 2;
44 	unsigned char type  : 2;
45 };
46 
47 #define CACHE_MAX_LEVEL 8
48 union cache_topology {
49 	struct cache_info ci[CACHE_MAX_LEVEL];
50 	unsigned long long raw;
51 };
52 
53 static const char * const cache_type_string[] = {
54 	"",
55 	"Instruction",
56 	"Data",
57 	"",
58 	"Unified",
59 };
60 
61 static const enum cache_type cache_type_map[] = {
62 	[CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
63 	[CTYPE_DATA] = CACHE_TYPE_DATA,
64 	[CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
65 	[CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
66 };
67 
68 void show_cacheinfo(struct seq_file *m)
69 {
70 	struct cpu_cacheinfo *this_cpu_ci;
71 	struct cacheinfo *cache;
72 	int idx;
73 
74 	if (!test_facility(34))
75 		return;
76 	this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
77 	for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
78 		cache = this_cpu_ci->info_list + idx;
79 		seq_printf(m, "cache%-11d: ", idx);
80 		seq_printf(m, "level=%d ", cache->level);
81 		seq_printf(m, "type=%s ", cache_type_string[cache->type]);
82 		seq_printf(m, "scope=%s ",
83 			   cache->disable_sysfs ? "Shared" : "Private");
84 		seq_printf(m, "size=%dK ", cache->size >> 10);
85 		seq_printf(m, "line_size=%u ", cache->coherency_line_size);
86 		seq_printf(m, "associativity=%d", cache->ways_of_associativity);
87 		seq_puts(m, "\n");
88 	}
89 }
90 
91 static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
92 {
93 	if (level >= CACHE_MAX_LEVEL)
94 		return CACHE_TYPE_NOCACHE;
95 	ci += level;
96 	if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
97 		return CACHE_TYPE_NOCACHE;
98 	return cache_type_map[ci->type];
99 }
100 
101 static inline unsigned long ecag(int ai, int li, int ti)
102 {
103 	return __ecag(ECAG_CACHE_ATTRIBUTE, ai << 4 | li << 1 | ti);
104 }
105 
106 static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
107 			 enum cache_type type, unsigned int level, int cpu)
108 {
109 	int ti, num_sets;
110 
111 	if (type == CACHE_TYPE_INST)
112 		ti = CACHE_TI_INSTRUCTION;
113 	else
114 		ti = CACHE_TI_UNIFIED;
115 	this_leaf->level = level + 1;
116 	this_leaf->type = type;
117 	this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
118 	this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
119 	this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
120 	num_sets = this_leaf->size / this_leaf->coherency_line_size;
121 	num_sets /= this_leaf->ways_of_associativity;
122 	this_leaf->number_of_sets = num_sets;
123 	cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
124 	if (!private)
125 		this_leaf->disable_sysfs = true;
126 }
127 
128 int init_cache_level(unsigned int cpu)
129 {
130 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
131 	unsigned int level = 0, leaves = 0;
132 	union cache_topology ct;
133 	enum cache_type ctype;
134 
135 	if (!test_facility(34))
136 		return -EOPNOTSUPP;
137 	if (!this_cpu_ci)
138 		return -EINVAL;
139 	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
140 	do {
141 		ctype = get_cache_type(&ct.ci[0], level);
142 		if (ctype == CACHE_TYPE_NOCACHE)
143 			break;
144 		/* Separate instruction and data caches */
145 		leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
146 	} while (++level < CACHE_MAX_LEVEL);
147 	this_cpu_ci->num_levels = level;
148 	this_cpu_ci->num_leaves = leaves;
149 	return 0;
150 }
151 
152 int populate_cache_leaves(unsigned int cpu)
153 {
154 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
155 	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
156 	unsigned int level, idx, pvt;
157 	union cache_topology ct;
158 	enum cache_type ctype;
159 
160 	if (!test_facility(34))
161 		return -EOPNOTSUPP;
162 	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
163 	for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
164 	     idx < this_cpu_ci->num_leaves; idx++, level++) {
165 		if (!this_leaf)
166 			return -EINVAL;
167 		pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
168 		ctype = get_cache_type(&ct.ci[0], level);
169 		if (ctype == CACHE_TYPE_SEPARATE) {
170 			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
171 			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
172 		} else {
173 			ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
174 		}
175 	}
176 	return 0;
177 }
178