xref: /linux/arch/x86/kernel/amd_nb.c (revision 45d8b572fac3aa8b49d53c946b3685eaf78a2824)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Shared support code for AMD K8 northbridges and derivatives.
4  * Copyright 2006 Andi Kleen, SUSE Labs.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/export.h>
14 #include <linux/spinlock.h>
15 #include <linux/pci_ids.h>
16 #include <asm/amd_nb.h>
17 
18 #define PCI_DEVICE_ID_AMD_17H_ROOT		0x1450
19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT		0x15d0
20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT		0x1480
21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT		0x1630
22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT		0x14b5
23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT		0x14a4
24 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT		0x14b5
25 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT		0x14d8
26 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT		0x14e8
27 #define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT		0x153a
28 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT		0x1507
29 #define PCI_DEVICE_ID_AMD_MI200_ROOT		0x14bb
30 #define PCI_DEVICE_ID_AMD_MI300_ROOT		0x14f8
31 
32 #define PCI_DEVICE_ID_AMD_17H_DF_F4		0x1464
33 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4	0x15ec
34 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4	0x1494
35 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4	0x144c
36 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4	0x1444
37 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4	0x1728
38 #define PCI_DEVICE_ID_AMD_19H_DF_F4		0x1654
39 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4	0x14b1
40 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4	0x167d
41 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4	0x166e
42 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4	0x14e4
43 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4	0x14f4
44 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4	0x12fc
45 #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4	0x12c4
46 #define PCI_DEVICE_ID_AMD_MI200_DF_F4		0x14d4
47 #define PCI_DEVICE_ID_AMD_MI300_DF_F4		0x152c
48 
49 /* Protect the PCI config register pairs used for SMN. */
50 static DEFINE_MUTEX(smn_mutex);
51 
52 static u32 *flush_words;
53 
54 static const struct pci_device_id amd_root_ids[] = {
55 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
56 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
57 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
58 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
59 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
60 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
61 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
62 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
63 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
64 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
65 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
66 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
67 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) },
68 	{}
69 };
70 
71 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
72 
73 static const struct pci_device_id amd_nb_misc_ids[] = {
74 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
75 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
76 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
77 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
78 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
79 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
80 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
81 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
82 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
83 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
84 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
85 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
86 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
87 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
88 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
89 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
90 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
91 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
92 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
93 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
94 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
95 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
96 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
97 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
98 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
99 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) },
100 	{}
101 };
102 
103 static const struct pci_device_id amd_nb_link_ids[] = {
104 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
105 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
106 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
107 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
108 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
109 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
110 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
111 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
112 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
113 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
114 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
115 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
116 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
117 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
118 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
119 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
120 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
121 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
122 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
123 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
124 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
125 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) },
126 	{}
127 };
128 
129 static const struct pci_device_id hygon_root_ids[] = {
130 	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
131 	{}
132 };
133 
134 static const struct pci_device_id hygon_nb_misc_ids[] = {
135 	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
136 	{}
137 };
138 
139 static const struct pci_device_id hygon_nb_link_ids[] = {
140 	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
141 	{}
142 };
143 
144 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
145 	{ 0x00, 0x18, 0x20 },
146 	{ 0xff, 0x00, 0x20 },
147 	{ 0xfe, 0x00, 0x20 },
148 	{ }
149 };
150 
151 static struct amd_northbridge_info amd_northbridges;
152 
153 u16 amd_nb_num(void)
154 {
155 	return amd_northbridges.num;
156 }
157 EXPORT_SYMBOL_GPL(amd_nb_num);
158 
159 bool amd_nb_has_feature(unsigned int feature)
160 {
161 	return ((amd_northbridges.flags & feature) == feature);
162 }
163 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
164 
165 struct amd_northbridge *node_to_amd_nb(int node)
166 {
167 	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
168 }
169 EXPORT_SYMBOL_GPL(node_to_amd_nb);
170 
171 static struct pci_dev *next_northbridge(struct pci_dev *dev,
172 					const struct pci_device_id *ids)
173 {
174 	do {
175 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
176 		if (!dev)
177 			break;
178 	} while (!pci_match_id(ids, dev));
179 	return dev;
180 }
181 
182 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
183 {
184 	struct pci_dev *root;
185 	int err = -ENODEV;
186 
187 	if (node >= amd_northbridges.num)
188 		goto out;
189 
190 	root = node_to_amd_nb(node)->root;
191 	if (!root)
192 		goto out;
193 
194 	mutex_lock(&smn_mutex);
195 
196 	err = pci_write_config_dword(root, 0x60, address);
197 	if (err) {
198 		pr_warn("Error programming SMN address 0x%x.\n", address);
199 		goto out_unlock;
200 	}
201 
202 	err = (write ? pci_write_config_dword(root, 0x64, *value)
203 		     : pci_read_config_dword(root, 0x64, value));
204 	if (err)
205 		pr_warn("Error %s SMN address 0x%x.\n",
206 			(write ? "writing to" : "reading from"), address);
207 
208 out_unlock:
209 	mutex_unlock(&smn_mutex);
210 
211 out:
212 	return err;
213 }
214 
215 int amd_smn_read(u16 node, u32 address, u32 *value)
216 {
217 	return __amd_smn_rw(node, address, value, false);
218 }
219 EXPORT_SYMBOL_GPL(amd_smn_read);
220 
221 int amd_smn_write(u16 node, u32 address, u32 value)
222 {
223 	return __amd_smn_rw(node, address, &value, true);
224 }
225 EXPORT_SYMBOL_GPL(amd_smn_write);
226 
227 
228 static int amd_cache_northbridges(void)
229 {
230 	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
231 	const struct pci_device_id *link_ids = amd_nb_link_ids;
232 	const struct pci_device_id *root_ids = amd_root_ids;
233 	struct pci_dev *root, *misc, *link;
234 	struct amd_northbridge *nb;
235 	u16 roots_per_misc = 0;
236 	u16 misc_count = 0;
237 	u16 root_count = 0;
238 	u16 i, j;
239 
240 	if (amd_northbridges.num)
241 		return 0;
242 
243 	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
244 		root_ids = hygon_root_ids;
245 		misc_ids = hygon_nb_misc_ids;
246 		link_ids = hygon_nb_link_ids;
247 	}
248 
249 	misc = NULL;
250 	while ((misc = next_northbridge(misc, misc_ids)))
251 		misc_count++;
252 
253 	if (!misc_count)
254 		return -ENODEV;
255 
256 	root = NULL;
257 	while ((root = next_northbridge(root, root_ids)))
258 		root_count++;
259 
260 	if (root_count) {
261 		roots_per_misc = root_count / misc_count;
262 
263 		/*
264 		 * There should be _exactly_ N roots for each DF/SMN
265 		 * interface.
266 		 */
267 		if (!roots_per_misc || (root_count % roots_per_misc)) {
268 			pr_info("Unsupported AMD DF/PCI configuration found\n");
269 			return -ENODEV;
270 		}
271 	}
272 
273 	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
274 	if (!nb)
275 		return -ENOMEM;
276 
277 	amd_northbridges.nb = nb;
278 	amd_northbridges.num = misc_count;
279 
280 	link = misc = root = NULL;
281 	for (i = 0; i < amd_northbridges.num; i++) {
282 		node_to_amd_nb(i)->root = root =
283 			next_northbridge(root, root_ids);
284 		node_to_amd_nb(i)->misc = misc =
285 			next_northbridge(misc, misc_ids);
286 		node_to_amd_nb(i)->link = link =
287 			next_northbridge(link, link_ids);
288 
289 		/*
290 		 * If there are more PCI root devices than data fabric/
291 		 * system management network interfaces, then the (N)
292 		 * PCI roots per DF/SMN interface are functionally the
293 		 * same (for DF/SMN access) and N-1 are redundant.  N-1
294 		 * PCI roots should be skipped per DF/SMN interface so
295 		 * the following DF/SMN interfaces get mapped to
296 		 * correct PCI roots.
297 		 */
298 		for (j = 1; j < roots_per_misc; j++)
299 			root = next_northbridge(root, root_ids);
300 	}
301 
302 	if (amd_gart_present())
303 		amd_northbridges.flags |= AMD_NB_GART;
304 
305 	/*
306 	 * Check for L3 cache presence.
307 	 */
308 	if (!cpuid_edx(0x80000006))
309 		return 0;
310 
311 	/*
312 	 * Some CPU families support L3 Cache Index Disable. There are some
313 	 * limitations because of E382 and E388 on family 0x10.
314 	 */
315 	if (boot_cpu_data.x86 == 0x10 &&
316 	    boot_cpu_data.x86_model >= 0x8 &&
317 	    (boot_cpu_data.x86_model > 0x9 ||
318 	     boot_cpu_data.x86_stepping >= 0x1))
319 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
320 
321 	if (boot_cpu_data.x86 == 0x15)
322 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
323 
324 	/* L3 cache partitioning is supported on family 0x15 */
325 	if (boot_cpu_data.x86 == 0x15)
326 		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
327 
328 	return 0;
329 }
330 
331 /*
332  * Ignores subdevice/subvendor but as far as I can figure out
333  * they're useless anyways
334  */
335 bool __init early_is_amd_nb(u32 device)
336 {
337 	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
338 	const struct pci_device_id *id;
339 	u32 vendor = device & 0xffff;
340 
341 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
342 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
343 		return false;
344 
345 	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
346 		misc_ids = hygon_nb_misc_ids;
347 
348 	device >>= 16;
349 	for (id = misc_ids; id->vendor; id++)
350 		if (vendor == id->vendor && device == id->device)
351 			return true;
352 	return false;
353 }
354 
355 struct resource *amd_get_mmconfig_range(struct resource *res)
356 {
357 	u32 address;
358 	u64 base, msr;
359 	unsigned int segn_busn_bits;
360 
361 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
362 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
363 		return NULL;
364 
365 	/* assume all cpus from fam10h have mmconfig */
366 	if (boot_cpu_data.x86 < 0x10)
367 		return NULL;
368 
369 	address = MSR_FAM10H_MMIO_CONF_BASE;
370 	rdmsrl(address, msr);
371 
372 	/* mmconfig is not enabled */
373 	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
374 		return NULL;
375 
376 	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
377 
378 	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
379 			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
380 
381 	res->flags = IORESOURCE_MEM;
382 	res->start = base;
383 	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
384 	return res;
385 }
386 
387 int amd_get_subcaches(int cpu)
388 {
389 	struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link;
390 	unsigned int mask;
391 
392 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
393 		return 0;
394 
395 	pci_read_config_dword(link, 0x1d4, &mask);
396 
397 	return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
398 }
399 
400 int amd_set_subcaches(int cpu, unsigned long mask)
401 {
402 	static unsigned int reset, ban;
403 	struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu));
404 	unsigned int reg;
405 	int cuid;
406 
407 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
408 		return -EINVAL;
409 
410 	/* if necessary, collect reset state of L3 partitioning and BAN mode */
411 	if (reset == 0) {
412 		pci_read_config_dword(nb->link, 0x1d4, &reset);
413 		pci_read_config_dword(nb->misc, 0x1b8, &ban);
414 		ban &= 0x180000;
415 	}
416 
417 	/* deactivate BAN mode if any subcaches are to be disabled */
418 	if (mask != 0xf) {
419 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
420 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
421 	}
422 
423 	cuid = cpu_data(cpu).topo.core_id;
424 	mask <<= 4 * cuid;
425 	mask |= (0xf ^ (1 << cuid)) << 26;
426 
427 	pci_write_config_dword(nb->link, 0x1d4, mask);
428 
429 	/* reset BAN mode if L3 partitioning returned to reset state */
430 	pci_read_config_dword(nb->link, 0x1d4, &reg);
431 	if (reg == reset) {
432 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
433 		reg &= ~0x180000;
434 		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
435 	}
436 
437 	return 0;
438 }
439 
440 static void amd_cache_gart(void)
441 {
442 	u16 i;
443 
444 	if (!amd_nb_has_feature(AMD_NB_GART))
445 		return;
446 
447 	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
448 	if (!flush_words) {
449 		amd_northbridges.flags &= ~AMD_NB_GART;
450 		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
451 		return;
452 	}
453 
454 	for (i = 0; i != amd_northbridges.num; i++)
455 		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
456 }
457 
458 void amd_flush_garts(void)
459 {
460 	int flushed, i;
461 	unsigned long flags;
462 	static DEFINE_SPINLOCK(gart_lock);
463 
464 	if (!amd_nb_has_feature(AMD_NB_GART))
465 		return;
466 
467 	/*
468 	 * Avoid races between AGP and IOMMU. In theory it's not needed
469 	 * but I'm not sure if the hardware won't lose flush requests
470 	 * when another is pending. This whole thing is so expensive anyways
471 	 * that it doesn't matter to serialize more. -AK
472 	 */
473 	spin_lock_irqsave(&gart_lock, flags);
474 	flushed = 0;
475 	for (i = 0; i < amd_northbridges.num; i++) {
476 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
477 				       flush_words[i] | 1);
478 		flushed++;
479 	}
480 	for (i = 0; i < amd_northbridges.num; i++) {
481 		u32 w;
482 		/* Make sure the hardware actually executed the flush*/
483 		for (;;) {
484 			pci_read_config_dword(node_to_amd_nb(i)->misc,
485 					      0x9c, &w);
486 			if (!(w & 1))
487 				break;
488 			cpu_relax();
489 		}
490 	}
491 	spin_unlock_irqrestore(&gart_lock, flags);
492 	if (!flushed)
493 		pr_notice("nothing to flush?\n");
494 }
495 EXPORT_SYMBOL_GPL(amd_flush_garts);
496 
497 static void __fix_erratum_688(void *info)
498 {
499 #define MSR_AMD64_IC_CFG 0xC0011021
500 
501 	msr_set_bit(MSR_AMD64_IC_CFG, 3);
502 	msr_set_bit(MSR_AMD64_IC_CFG, 14);
503 }
504 
505 /* Apply erratum 688 fix so machines without a BIOS fix work. */
506 static __init void fix_erratum_688(void)
507 {
508 	struct pci_dev *F4;
509 	u32 val;
510 
511 	if (boot_cpu_data.x86 != 0x14)
512 		return;
513 
514 	if (!amd_northbridges.num)
515 		return;
516 
517 	F4 = node_to_amd_nb(0)->link;
518 	if (!F4)
519 		return;
520 
521 	if (pci_read_config_dword(F4, 0x164, &val))
522 		return;
523 
524 	if (val & BIT(2))
525 		return;
526 
527 	on_each_cpu(__fix_erratum_688, NULL, 0);
528 
529 	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
530 }
531 
532 static __init int init_amd_nbs(void)
533 {
534 	amd_cache_northbridges();
535 	amd_cache_gart();
536 
537 	fix_erratum_688();
538 
539 	return 0;
540 }
541 
542 /* This has to go after the PCI subsystem */
543 fs_initcall(init_amd_nbs);
544