xref: /linux/arch/x86/kernel/cpu/amd_cache_disable.c (revision 785cdec46e9227f9433884ed3b436471e944007c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AMD L3 cache_disable_{0,1} sysfs handling
4  * Documentation/ABI/testing/sysfs-devices-system-cpu
5  */
6 
7 #include <linux/cacheinfo.h>
8 #include <linux/capability.h>
9 #include <linux/pci.h>
10 #include <linux/sysfs.h>
11 
12 #include <asm/amd/nb.h>
13 
14 #include "cpu.h"
15 
16 /*
17  * L3 cache descriptors
18  */
19 static void amd_calc_l3_indices(struct amd_northbridge *nb)
20 {
21 	struct amd_l3_cache *l3 = &nb->l3_cache;
22 	unsigned int sc0, sc1, sc2, sc3;
23 	u32 val = 0;
24 
25 	pci_read_config_dword(nb->misc, 0x1C4, &val);
26 
27 	/* calculate subcache sizes */
28 	l3->subcaches[0] = sc0 = !(val & BIT(0));
29 	l3->subcaches[1] = sc1 = !(val & BIT(4));
30 
31 	if (boot_cpu_data.x86 == 0x15) {
32 		l3->subcaches[0] = sc0 += !(val & BIT(1));
33 		l3->subcaches[1] = sc1 += !(val & BIT(5));
34 	}
35 
36 	l3->subcaches[2] = sc2 = !(val & BIT(8))  + !(val & BIT(9));
37 	l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
38 
39 	l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
40 }
41 
42 /*
43  * check whether a slot used for disabling an L3 index is occupied.
44  * @l3: L3 cache descriptor
45  * @slot: slot number (0..1)
46  *
47  * @returns: the disabled index if used or negative value if slot free.
48  */
49 static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned int slot)
50 {
51 	unsigned int reg = 0;
52 
53 	pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
54 
55 	/* check whether this slot is activated already */
56 	if (reg & (3UL << 30))
57 		return reg & 0xfff;
58 
59 	return -1;
60 }
61 
62 static ssize_t show_cache_disable(struct cacheinfo *ci, char *buf, unsigned int slot)
63 {
64 	int index;
65 	struct amd_northbridge *nb = ci->priv;
66 
67 	index = amd_get_l3_disable_slot(nb, slot);
68 	if (index >= 0)
69 		return sysfs_emit(buf, "%d\n", index);
70 
71 	return sysfs_emit(buf, "FREE\n");
72 }
73 
74 #define SHOW_CACHE_DISABLE(slot)					\
75 static ssize_t								\
76 cache_disable_##slot##_show(struct device *dev,				\
77 			    struct device_attribute *attr, char *buf)	\
78 {									\
79 	struct cacheinfo *ci = dev_get_drvdata(dev);			\
80 	return show_cache_disable(ci, buf, slot);			\
81 }
82 
83 SHOW_CACHE_DISABLE(0)
84 SHOW_CACHE_DISABLE(1)
85 
86 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
87 				 unsigned int slot, unsigned long idx)
88 {
89 	int i;
90 
91 	idx |= BIT(30);
92 
93 	/*
94 	 *  disable index in all 4 subcaches
95 	 */
96 	for (i = 0; i < 4; i++) {
97 		u32 reg = idx | (i << 20);
98 
99 		if (!nb->l3_cache.subcaches[i])
100 			continue;
101 
102 		pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
103 
104 		/*
105 		 * We need to WBINVD on a core on the node containing the L3
106 		 * cache which indices we disable therefore a simple wbinvd()
107 		 * is not sufficient.
108 		 */
109 		wbinvd_on_cpu(cpu);
110 
111 		reg |= BIT(31);
112 		pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
113 	}
114 }
115 
116 /*
117  * disable a L3 cache index by using a disable-slot
118  *
119  * @l3:    L3 cache descriptor
120  * @cpu:   A CPU on the node containing the L3 cache
121  * @slot:  slot number (0..1)
122  * @index: index to disable
123  *
124  * @return: 0 on success, error status on failure
125  */
126 static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
127 				   unsigned int slot, unsigned long index)
128 {
129 	int ret = 0;
130 
131 	/*  check if @slot is already used or the index is already disabled */
132 	ret = amd_get_l3_disable_slot(nb, slot);
133 	if (ret >= 0)
134 		return -EEXIST;
135 
136 	if (index > nb->l3_cache.indices)
137 		return -EINVAL;
138 
139 	/* check whether the other slot has disabled the same index already */
140 	if (index == amd_get_l3_disable_slot(nb, !slot))
141 		return -EEXIST;
142 
143 	amd_l3_disable_index(nb, cpu, slot, index);
144 
145 	return 0;
146 }
147 
148 static ssize_t store_cache_disable(struct cacheinfo *ci, const char *buf,
149 				   size_t count, unsigned int slot)
150 {
151 	struct amd_northbridge *nb = ci->priv;
152 	unsigned long val = 0;
153 	int cpu, err = 0;
154 
155 	if (!capable(CAP_SYS_ADMIN))
156 		return -EPERM;
157 
158 	cpu = cpumask_first(&ci->shared_cpu_map);
159 
160 	if (kstrtoul(buf, 10, &val) < 0)
161 		return -EINVAL;
162 
163 	err = amd_set_l3_disable_slot(nb, cpu, slot, val);
164 	if (err) {
165 		if (err == -EEXIST)
166 			pr_warn("L3 slot %d in use/index already disabled!\n",
167 				   slot);
168 		return err;
169 	}
170 	return count;
171 }
172 
173 #define STORE_CACHE_DISABLE(slot)					\
174 static ssize_t								\
175 cache_disable_##slot##_store(struct device *dev,			\
176 			     struct device_attribute *attr,		\
177 			     const char *buf, size_t count)		\
178 {									\
179 	struct cacheinfo *ci = dev_get_drvdata(dev);			\
180 	return store_cache_disable(ci, buf, count, slot);		\
181 }
182 
183 STORE_CACHE_DISABLE(0)
184 STORE_CACHE_DISABLE(1)
185 
186 static ssize_t subcaches_show(struct device *dev, struct device_attribute *attr,
187 			      char *buf)
188 {
189 	struct cacheinfo *ci = dev_get_drvdata(dev);
190 	int cpu = cpumask_first(&ci->shared_cpu_map);
191 
192 	return sysfs_emit(buf, "%x\n", amd_get_subcaches(cpu));
193 }
194 
195 static ssize_t subcaches_store(struct device *dev,
196 			       struct device_attribute *attr,
197 			       const char *buf, size_t count)
198 {
199 	struct cacheinfo *ci = dev_get_drvdata(dev);
200 	int cpu = cpumask_first(&ci->shared_cpu_map);
201 	unsigned long val;
202 
203 	if (!capable(CAP_SYS_ADMIN))
204 		return -EPERM;
205 
206 	if (kstrtoul(buf, 16, &val) < 0)
207 		return -EINVAL;
208 
209 	if (amd_set_subcaches(cpu, val))
210 		return -EINVAL;
211 
212 	return count;
213 }
214 
215 static DEVICE_ATTR_RW(cache_disable_0);
216 static DEVICE_ATTR_RW(cache_disable_1);
217 static DEVICE_ATTR_RW(subcaches);
218 
219 static umode_t cache_private_attrs_is_visible(struct kobject *kobj,
220 					      struct attribute *attr, int unused)
221 {
222 	struct device *dev = kobj_to_dev(kobj);
223 	struct cacheinfo *ci = dev_get_drvdata(dev);
224 	umode_t mode = attr->mode;
225 
226 	if (!ci->priv)
227 		return 0;
228 
229 	if ((attr == &dev_attr_subcaches.attr) &&
230 	    amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
231 		return mode;
232 
233 	if ((attr == &dev_attr_cache_disable_0.attr ||
234 	     attr == &dev_attr_cache_disable_1.attr) &&
235 	    amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
236 		return mode;
237 
238 	return 0;
239 }
240 
241 static struct attribute_group cache_private_group = {
242 	.is_visible = cache_private_attrs_is_visible,
243 };
244 
245 static void init_amd_l3_attrs(void)
246 {
247 	static struct attribute **amd_l3_attrs;
248 	int n = 1;
249 
250 	if (amd_l3_attrs) /* already initialized */
251 		return;
252 
253 	if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
254 		n += 2;
255 	if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
256 		n += 1;
257 
258 	amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
259 	if (!amd_l3_attrs)
260 		return;
261 
262 	n = 0;
263 	if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
264 		amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
265 		amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
266 	}
267 	if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
268 		amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
269 
270 	cache_private_group.attrs = amd_l3_attrs;
271 }
272 
273 const struct attribute_group *cache_get_priv_group(struct cacheinfo *ci)
274 {
275 	struct amd_northbridge *nb = ci->priv;
276 
277 	if (ci->level < 3 || !nb)
278 		return NULL;
279 
280 	if (nb && nb->l3_cache.indices)
281 		init_amd_l3_attrs();
282 
283 	return &cache_private_group;
284 }
285 
286 struct amd_northbridge *amd_init_l3_cache(int index)
287 {
288 	struct amd_northbridge *nb;
289 	int node;
290 
291 	/* only for L3, and not in virtualized environments */
292 	if (index < 3)
293 		return NULL;
294 
295 	node = topology_amd_node_id(smp_processor_id());
296 	nb = node_to_amd_nb(node);
297 	if (nb && !nb->l3_cache.indices)
298 		amd_calc_l3_indices(nb);
299 
300 	return nb;
301 }
302