xref: /linux/arch/x86/kernel/amd_nb.c (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Shared support code for AMD K8 northbridges and derivatives.
4  * Copyright 2006 Andi Kleen, SUSE Labs.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/export.h>
14 #include <linux/spinlock.h>
15 #include <linux/pci_ids.h>
16 #include <asm/amd_nb.h>
17 
18 #define PCI_DEVICE_ID_AMD_17H_ROOT		0x1450
19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT		0x15d0
20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT		0x1480
21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT		0x1630
22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT		0x14b5
23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT		0x14a4
24 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT		0x14b5
25 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT		0x14d8
26 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT		0x14e8
27 #define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT		0x153a
28 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT		0x1507
29 #define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT		0x1122
30 #define PCI_DEVICE_ID_AMD_MI200_ROOT		0x14bb
31 #define PCI_DEVICE_ID_AMD_MI300_ROOT		0x14f8
32 
33 #define PCI_DEVICE_ID_AMD_17H_DF_F4		0x1464
34 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4	0x15ec
35 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4	0x1494
36 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4	0x144c
37 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4	0x1444
38 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4	0x1728
39 #define PCI_DEVICE_ID_AMD_19H_DF_F4		0x1654
40 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4	0x14b1
41 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4	0x167d
42 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4	0x166e
43 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4	0x14e4
44 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4	0x14f4
45 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4	0x12fc
46 #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4	0x12c4
47 #define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4	0x124c
48 #define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4	0x12bc
49 #define PCI_DEVICE_ID_AMD_MI200_DF_F4		0x14d4
50 #define PCI_DEVICE_ID_AMD_MI300_DF_F4		0x152c
51 
52 /* Protect the PCI config register pairs used for SMN. */
53 static DEFINE_MUTEX(smn_mutex);
54 
55 static u32 *flush_words;
56 
57 static const struct pci_device_id amd_root_ids[] = {
58 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
59 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
60 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
61 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
62 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
63 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
64 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
65 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
66 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
67 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
68 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
69 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
70 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
71 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) },
72 	{}
73 };
74 
75 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
76 
77 static const struct pci_device_id amd_nb_misc_ids[] = {
78 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
79 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
80 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
81 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
82 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
83 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
84 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
85 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
86 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
87 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
88 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
89 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
90 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
91 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
92 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
93 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
94 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
95 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
96 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
97 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
98 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
99 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
100 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
101 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
102 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
103 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
104 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
105 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) },
106 	{}
107 };
108 
109 static const struct pci_device_id amd_nb_link_ids[] = {
110 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
111 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
112 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
113 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
114 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
115 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
116 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
117 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
118 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
119 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
120 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
121 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
122 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
123 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
124 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
125 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
126 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
127 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
128 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
129 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
130 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4) },
131 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4) },
132 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
133 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) },
134 	{}
135 };
136 
137 static const struct pci_device_id hygon_root_ids[] = {
138 	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
139 	{}
140 };
141 
142 static const struct pci_device_id hygon_nb_misc_ids[] = {
143 	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
144 	{}
145 };
146 
147 static const struct pci_device_id hygon_nb_link_ids[] = {
148 	{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
149 	{}
150 };
151 
152 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
153 	{ 0x00, 0x18, 0x20 },
154 	{ 0xff, 0x00, 0x20 },
155 	{ 0xfe, 0x00, 0x20 },
156 	{ }
157 };
158 
159 static struct amd_northbridge_info amd_northbridges;
160 
161 u16 amd_nb_num(void)
162 {
163 	return amd_northbridges.num;
164 }
165 EXPORT_SYMBOL_GPL(amd_nb_num);
166 
167 bool amd_nb_has_feature(unsigned int feature)
168 {
169 	return ((amd_northbridges.flags & feature) == feature);
170 }
171 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
172 
173 struct amd_northbridge *node_to_amd_nb(int node)
174 {
175 	return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
176 }
177 EXPORT_SYMBOL_GPL(node_to_amd_nb);
178 
179 static struct pci_dev *next_northbridge(struct pci_dev *dev,
180 					const struct pci_device_id *ids)
181 {
182 	do {
183 		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
184 		if (!dev)
185 			break;
186 	} while (!pci_match_id(ids, dev));
187 	return dev;
188 }
189 
190 /*
191  * SMN accesses may fail in ways that are difficult to detect here in the called
192  * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
193  * their own checking based on what behavior they expect.
194  *
195  * For SMN reads, the returned value may be zero if the register is Read-as-Zero.
196  * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
197  * can be checked here, and a proper error code can be returned.
198  *
199  * But the Read-as-Zero response cannot be verified here. A value of 0 may be
200  * correct in some cases, so callers must check that this correct is for the
201  * register/fields they need.
202  *
203  * For SMN writes, success can be determined through a "write and read back"
204  * However, this is not robust when done here.
205  *
206  * Possible issues:
207  *
208  * 1) Bits that are "Write-1-to-Clear". In this case, the read value should
209  *    *not* match the write value.
210  *
211  * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
212  *    known here.
213  *
214  * 3) Bits that are "Reserved / Set to 1". Ditto above.
215  *
216  * Callers of amd_smn_write() should do the "write and read back" check
217  * themselves, if needed.
218  *
219  * For #1, they can see if their target bits got cleared.
220  *
221  * For #2 and #3, they can check if their target bits got set as intended.
222  *
223  * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
224  * the operation is considered a success, and the caller does their own
225  * checking.
226  */
227 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
228 {
229 	struct pci_dev *root;
230 	int err = -ENODEV;
231 
232 	if (node >= amd_northbridges.num)
233 		goto out;
234 
235 	root = node_to_amd_nb(node)->root;
236 	if (!root)
237 		goto out;
238 
239 	mutex_lock(&smn_mutex);
240 
241 	err = pci_write_config_dword(root, 0x60, address);
242 	if (err) {
243 		pr_warn("Error programming SMN address 0x%x.\n", address);
244 		goto out_unlock;
245 	}
246 
247 	err = (write ? pci_write_config_dword(root, 0x64, *value)
248 		     : pci_read_config_dword(root, 0x64, value));
249 
250 out_unlock:
251 	mutex_unlock(&smn_mutex);
252 
253 out:
254 	return err;
255 }
256 
257 int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
258 {
259 	int err = __amd_smn_rw(node, address, value, false);
260 
261 	if (PCI_POSSIBLE_ERROR(*value)) {
262 		err = -ENODEV;
263 		*value = 0;
264 	}
265 
266 	return err;
267 }
268 EXPORT_SYMBOL_GPL(amd_smn_read);
269 
270 int __must_check amd_smn_write(u16 node, u32 address, u32 value)
271 {
272 	return __amd_smn_rw(node, address, &value, true);
273 }
274 EXPORT_SYMBOL_GPL(amd_smn_write);
275 
276 
277 static int amd_cache_northbridges(void)
278 {
279 	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
280 	const struct pci_device_id *link_ids = amd_nb_link_ids;
281 	const struct pci_device_id *root_ids = amd_root_ids;
282 	struct pci_dev *root, *misc, *link;
283 	struct amd_northbridge *nb;
284 	u16 roots_per_misc = 0;
285 	u16 misc_count = 0;
286 	u16 root_count = 0;
287 	u16 i, j;
288 
289 	if (amd_northbridges.num)
290 		return 0;
291 
292 	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
293 		root_ids = hygon_root_ids;
294 		misc_ids = hygon_nb_misc_ids;
295 		link_ids = hygon_nb_link_ids;
296 	}
297 
298 	misc = NULL;
299 	while ((misc = next_northbridge(misc, misc_ids)))
300 		misc_count++;
301 
302 	if (!misc_count)
303 		return -ENODEV;
304 
305 	root = NULL;
306 	while ((root = next_northbridge(root, root_ids)))
307 		root_count++;
308 
309 	if (root_count) {
310 		roots_per_misc = root_count / misc_count;
311 
312 		/*
313 		 * There should be _exactly_ N roots for each DF/SMN
314 		 * interface.
315 		 */
316 		if (!roots_per_misc || (root_count % roots_per_misc)) {
317 			pr_info("Unsupported AMD DF/PCI configuration found\n");
318 			return -ENODEV;
319 		}
320 	}
321 
322 	nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
323 	if (!nb)
324 		return -ENOMEM;
325 
326 	amd_northbridges.nb = nb;
327 	amd_northbridges.num = misc_count;
328 
329 	link = misc = root = NULL;
330 	for (i = 0; i < amd_northbridges.num; i++) {
331 		node_to_amd_nb(i)->root = root =
332 			next_northbridge(root, root_ids);
333 		node_to_amd_nb(i)->misc = misc =
334 			next_northbridge(misc, misc_ids);
335 		node_to_amd_nb(i)->link = link =
336 			next_northbridge(link, link_ids);
337 
338 		/*
339 		 * If there are more PCI root devices than data fabric/
340 		 * system management network interfaces, then the (N)
341 		 * PCI roots per DF/SMN interface are functionally the
342 		 * same (for DF/SMN access) and N-1 are redundant.  N-1
343 		 * PCI roots should be skipped per DF/SMN interface so
344 		 * the following DF/SMN interfaces get mapped to
345 		 * correct PCI roots.
346 		 */
347 		for (j = 1; j < roots_per_misc; j++)
348 			root = next_northbridge(root, root_ids);
349 	}
350 
351 	if (amd_gart_present())
352 		amd_northbridges.flags |= AMD_NB_GART;
353 
354 	/*
355 	 * Check for L3 cache presence.
356 	 */
357 	if (!cpuid_edx(0x80000006))
358 		return 0;
359 
360 	/*
361 	 * Some CPU families support L3 Cache Index Disable. There are some
362 	 * limitations because of E382 and E388 on family 0x10.
363 	 */
364 	if (boot_cpu_data.x86 == 0x10 &&
365 	    boot_cpu_data.x86_model >= 0x8 &&
366 	    (boot_cpu_data.x86_model > 0x9 ||
367 	     boot_cpu_data.x86_stepping >= 0x1))
368 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
369 
370 	if (boot_cpu_data.x86 == 0x15)
371 		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
372 
373 	/* L3 cache partitioning is supported on family 0x15 */
374 	if (boot_cpu_data.x86 == 0x15)
375 		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
376 
377 	return 0;
378 }
379 
380 /*
381  * Ignores subdevice/subvendor but as far as I can figure out
382  * they're useless anyways
383  */
384 bool __init early_is_amd_nb(u32 device)
385 {
386 	const struct pci_device_id *misc_ids = amd_nb_misc_ids;
387 	const struct pci_device_id *id;
388 	u32 vendor = device & 0xffff;
389 
390 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
391 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
392 		return false;
393 
394 	if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
395 		misc_ids = hygon_nb_misc_ids;
396 
397 	device >>= 16;
398 	for (id = misc_ids; id->vendor; id++)
399 		if (vendor == id->vendor && device == id->device)
400 			return true;
401 	return false;
402 }
403 
404 struct resource *amd_get_mmconfig_range(struct resource *res)
405 {
406 	u32 address;
407 	u64 base, msr;
408 	unsigned int segn_busn_bits;
409 
410 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
411 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
412 		return NULL;
413 
414 	/* assume all cpus from fam10h have mmconfig */
415 	if (boot_cpu_data.x86 < 0x10)
416 		return NULL;
417 
418 	address = MSR_FAM10H_MMIO_CONF_BASE;
419 	rdmsrl(address, msr);
420 
421 	/* mmconfig is not enabled */
422 	if (!(msr & FAM10H_MMIO_CONF_ENABLE))
423 		return NULL;
424 
425 	base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
426 
427 	segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
428 			 FAM10H_MMIO_CONF_BUSRANGE_MASK;
429 
430 	res->flags = IORESOURCE_MEM;
431 	res->start = base;
432 	res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
433 	return res;
434 }
435 
436 int amd_get_subcaches(int cpu)
437 {
438 	struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link;
439 	unsigned int mask;
440 
441 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
442 		return 0;
443 
444 	pci_read_config_dword(link, 0x1d4, &mask);
445 
446 	return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
447 }
448 
449 int amd_set_subcaches(int cpu, unsigned long mask)
450 {
451 	static unsigned int reset, ban;
452 	struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu));
453 	unsigned int reg;
454 	int cuid;
455 
456 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
457 		return -EINVAL;
458 
459 	/* if necessary, collect reset state of L3 partitioning and BAN mode */
460 	if (reset == 0) {
461 		pci_read_config_dword(nb->link, 0x1d4, &reset);
462 		pci_read_config_dword(nb->misc, 0x1b8, &ban);
463 		ban &= 0x180000;
464 	}
465 
466 	/* deactivate BAN mode if any subcaches are to be disabled */
467 	if (mask != 0xf) {
468 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
469 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
470 	}
471 
472 	cuid = cpu_data(cpu).topo.core_id;
473 	mask <<= 4 * cuid;
474 	mask |= (0xf ^ (1 << cuid)) << 26;
475 
476 	pci_write_config_dword(nb->link, 0x1d4, mask);
477 
478 	/* reset BAN mode if L3 partitioning returned to reset state */
479 	pci_read_config_dword(nb->link, 0x1d4, &reg);
480 	if (reg == reset) {
481 		pci_read_config_dword(nb->misc, 0x1b8, &reg);
482 		reg &= ~0x180000;
483 		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
484 	}
485 
486 	return 0;
487 }
488 
489 static void amd_cache_gart(void)
490 {
491 	u16 i;
492 
493 	if (!amd_nb_has_feature(AMD_NB_GART))
494 		return;
495 
496 	flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
497 	if (!flush_words) {
498 		amd_northbridges.flags &= ~AMD_NB_GART;
499 		pr_notice("Cannot initialize GART flush words, GART support disabled\n");
500 		return;
501 	}
502 
503 	for (i = 0; i != amd_northbridges.num; i++)
504 		pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
505 }
506 
507 void amd_flush_garts(void)
508 {
509 	int flushed, i;
510 	unsigned long flags;
511 	static DEFINE_SPINLOCK(gart_lock);
512 
513 	if (!amd_nb_has_feature(AMD_NB_GART))
514 		return;
515 
516 	/*
517 	 * Avoid races between AGP and IOMMU. In theory it's not needed
518 	 * but I'm not sure if the hardware won't lose flush requests
519 	 * when another is pending. This whole thing is so expensive anyways
520 	 * that it doesn't matter to serialize more. -AK
521 	 */
522 	spin_lock_irqsave(&gart_lock, flags);
523 	flushed = 0;
524 	for (i = 0; i < amd_northbridges.num; i++) {
525 		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
526 				       flush_words[i] | 1);
527 		flushed++;
528 	}
529 	for (i = 0; i < amd_northbridges.num; i++) {
530 		u32 w;
531 		/* Make sure the hardware actually executed the flush*/
532 		for (;;) {
533 			pci_read_config_dword(node_to_amd_nb(i)->misc,
534 					      0x9c, &w);
535 			if (!(w & 1))
536 				break;
537 			cpu_relax();
538 		}
539 	}
540 	spin_unlock_irqrestore(&gart_lock, flags);
541 	if (!flushed)
542 		pr_notice("nothing to flush?\n");
543 }
544 EXPORT_SYMBOL_GPL(amd_flush_garts);
545 
546 static void __fix_erratum_688(void *info)
547 {
548 #define MSR_AMD64_IC_CFG 0xC0011021
549 
550 	msr_set_bit(MSR_AMD64_IC_CFG, 3);
551 	msr_set_bit(MSR_AMD64_IC_CFG, 14);
552 }
553 
554 /* Apply erratum 688 fix so machines without a BIOS fix work. */
555 static __init void fix_erratum_688(void)
556 {
557 	struct pci_dev *F4;
558 	u32 val;
559 
560 	if (boot_cpu_data.x86 != 0x14)
561 		return;
562 
563 	if (!amd_northbridges.num)
564 		return;
565 
566 	F4 = node_to_amd_nb(0)->link;
567 	if (!F4)
568 		return;
569 
570 	if (pci_read_config_dword(F4, 0x164, &val))
571 		return;
572 
573 	if (val & BIT(2))
574 		return;
575 
576 	on_each_cpu(__fix_erratum_688, NULL, 0);
577 
578 	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
579 }
580 
581 static __init int init_amd_nbs(void)
582 {
583 	amd_cache_northbridges();
584 	amd_cache_gart();
585 
586 	fix_erratum_688();
587 
588 	return 0;
589 }
590 
591 /* This has to go after the PCI subsystem */
592 fs_initcall(init_amd_nbs);
593