xref: /linux/drivers/iommu/amd/init.c (revision 3719a04a80caf660f899a462cd8f3973bcfa676e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  *         Leo Duran <leo.duran@amd.com>
6  */
7 
8 #define pr_fmt(fmt)     "AMD-Vi: " fmt
9 #define dev_fmt(fmt)    pr_fmt(fmt)
10 
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/interrupt.h>
17 #include <linux/msi.h>
18 #include <linux/irq.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/export.h>
21 #include <linux/kmemleak.h>
22 #include <linux/cc_platform.h>
23 #include <linux/iopoll.h>
24 #include <asm/pci-direct.h>
25 #include <asm/iommu.h>
26 #include <asm/apic.h>
27 #include <asm/gart.h>
28 #include <asm/x86_init.h>
29 #include <asm/io_apic.h>
30 #include <asm/irq_remapping.h>
31 #include <asm/set_memory.h>
32 #include <asm/sev.h>
33 
34 #include <linux/crash_dump.h>
35 
36 #include "amd_iommu.h"
37 #include "../irq_remapping.h"
38 #include "../iommu-pages.h"
39 
40 /*
41  * definitions for the ACPI scanning code
42  */
43 #define IVRS_HEADER_LENGTH 48
44 
45 #define ACPI_IVHD_TYPE_MAX_SUPPORTED	0x40
46 #define ACPI_IVMD_TYPE_ALL              0x20
47 #define ACPI_IVMD_TYPE                  0x21
48 #define ACPI_IVMD_TYPE_RANGE            0x22
49 
50 #define IVHD_DEV_ALL                    0x01
51 #define IVHD_DEV_SELECT                 0x02
52 #define IVHD_DEV_SELECT_RANGE_START     0x03
53 #define IVHD_DEV_RANGE_END              0x04
54 #define IVHD_DEV_ALIAS                  0x42
55 #define IVHD_DEV_ALIAS_RANGE            0x43
56 #define IVHD_DEV_EXT_SELECT             0x46
57 #define IVHD_DEV_EXT_SELECT_RANGE       0x47
58 #define IVHD_DEV_SPECIAL		0x48
59 #define IVHD_DEV_ACPI_HID		0xf0
60 
61 #define UID_NOT_PRESENT                 0
62 #define UID_IS_INTEGER                  1
63 #define UID_IS_CHARACTER                2
64 
65 #define IVHD_SPECIAL_IOAPIC		1
66 #define IVHD_SPECIAL_HPET		2
67 
68 #define IVHD_FLAG_HT_TUN_EN_MASK        0x01
69 #define IVHD_FLAG_PASSPW_EN_MASK        0x02
70 #define IVHD_FLAG_RESPASSPW_EN_MASK     0x04
71 #define IVHD_FLAG_ISOC_EN_MASK          0x08
72 
73 #define IVMD_FLAG_EXCL_RANGE            0x08
74 #define IVMD_FLAG_IW                    0x04
75 #define IVMD_FLAG_IR                    0x02
76 #define IVMD_FLAG_UNITY_MAP             0x01
77 
78 #define ACPI_DEVFLAG_INITPASS           0x01
79 #define ACPI_DEVFLAG_EXTINT             0x02
80 #define ACPI_DEVFLAG_NMI                0x04
81 #define ACPI_DEVFLAG_SYSMGT1            0x10
82 #define ACPI_DEVFLAG_SYSMGT2            0x20
83 #define ACPI_DEVFLAG_LINT0              0x40
84 #define ACPI_DEVFLAG_LINT1              0x80
85 #define ACPI_DEVFLAG_ATSDIS             0x10000000
86 
87 #define IVRS_GET_SBDF_ID(seg, bus, dev, fn)	(((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
88 						 | ((dev & 0x1f) << 3) | (fn & 0x7))
89 
90 /*
91  * ACPI table definitions
92  *
93  * These data structures are laid over the table to parse the important values
94  * out of it.
95  */
96 
97 /*
98  * structure describing one IOMMU in the ACPI table. Typically followed by one
99  * or more ivhd_entrys.
100  */
101 struct ivhd_header {
102 	u8 type;
103 	u8 flags;
104 	u16 length;
105 	u16 devid;
106 	u16 cap_ptr;
107 	u64 mmio_phys;
108 	u16 pci_seg;
109 	u16 info;
110 	u32 efr_attr;
111 
112 	/* Following only valid on IVHD type 11h and 40h */
113 	u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
114 	u64 efr_reg2;
115 } __attribute__((packed));
116 
117 /*
118  * A device entry describing which devices a specific IOMMU translates and
119  * which requestor ids they use.
120  */
121 struct ivhd_entry {
122 	u8 type;
123 	u16 devid;
124 	u8 flags;
125 	struct_group(ext_hid,
126 		u32 ext;
127 		u32 hidh;
128 	);
129 	u64 cid;
130 	u8 uidf;
131 	u8 uidl;
132 	u8 uid;
133 } __attribute__((packed));
134 
135 /*
136  * An AMD IOMMU memory definition structure. It defines things like exclusion
137  * ranges for devices and regions that should be unity mapped.
138  */
139 struct ivmd_header {
140 	u8 type;
141 	u8 flags;
142 	u16 length;
143 	u16 devid;
144 	u16 aux;
145 	u16 pci_seg;
146 	u8  resv[6];
147 	u64 range_start;
148 	u64 range_length;
149 } __attribute__((packed));
150 
151 bool amd_iommu_dump;
152 bool amd_iommu_irq_remap __read_mostly;
153 
154 enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1;
155 /* Guest page table level */
156 int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
157 
158 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
159 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
160 
161 static bool amd_iommu_detected;
162 static bool amd_iommu_disabled __initdata;
163 static bool amd_iommu_force_enable __initdata;
164 static bool amd_iommu_irtcachedis;
165 static int amd_iommu_target_ivhd_type;
166 
167 /* Global EFR and EFR2 registers */
168 u64 amd_iommu_efr;
169 u64 amd_iommu_efr2;
170 
171 /* SNP is enabled on the system? */
172 bool amd_iommu_snp_en;
173 EXPORT_SYMBOL(amd_iommu_snp_en);
174 
175 LIST_HEAD(amd_iommu_pci_seg_list);	/* list of all PCI segments */
176 LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the system */
177 LIST_HEAD(amd_ivhd_dev_flags_list);	/* list of all IVHD device entry settings */
178 
179 /* Number of IOMMUs present in the system */
180 static int amd_iommus_present;
181 
182 /* IOMMUs have a non-present cache? */
183 bool amd_iommu_np_cache __read_mostly;
184 bool amd_iommu_iotlb_sup __read_mostly = true;
185 
186 static bool amd_iommu_pc_present __read_mostly;
187 bool amdr_ivrs_remap_support __read_mostly;
188 
189 bool amd_iommu_force_isolation __read_mostly;
190 
191 unsigned long amd_iommu_pgsize_bitmap __ro_after_init = AMD_IOMMU_PGSIZES;
192 
193 enum iommu_init_state {
194 	IOMMU_START_STATE,
195 	IOMMU_IVRS_DETECTED,
196 	IOMMU_ACPI_FINISHED,
197 	IOMMU_ENABLED,
198 	IOMMU_PCI_INIT,
199 	IOMMU_INTERRUPTS_EN,
200 	IOMMU_INITIALIZED,
201 	IOMMU_NOT_FOUND,
202 	IOMMU_INIT_ERROR,
203 	IOMMU_CMDLINE_DISABLED,
204 };
205 
206 /* Early ioapic and hpet maps from kernel command line */
207 #define EARLY_MAP_SIZE		4
208 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
209 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
210 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
211 
212 static int __initdata early_ioapic_map_size;
213 static int __initdata early_hpet_map_size;
214 static int __initdata early_acpihid_map_size;
215 
216 static bool __initdata cmdline_maps;
217 
218 static enum iommu_init_state init_state = IOMMU_START_STATE;
219 
220 static int amd_iommu_enable_interrupts(void);
221 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
222 
223 static bool amd_iommu_pre_enabled = true;
224 
225 static u32 amd_iommu_ivinfo __initdata;
226 
translation_pre_enabled(struct amd_iommu * iommu)227 bool translation_pre_enabled(struct amd_iommu *iommu)
228 {
229 	return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
230 }
231 
clear_translation_pre_enabled(struct amd_iommu * iommu)232 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
233 {
234 	iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
235 }
236 
init_translation_status(struct amd_iommu * iommu)237 static void init_translation_status(struct amd_iommu *iommu)
238 {
239 	u64 ctrl;
240 
241 	ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
242 	if (ctrl & (1<<CONTROL_IOMMU_EN))
243 		iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
244 }
245 
amd_iommu_get_num_iommus(void)246 int amd_iommu_get_num_iommus(void)
247 {
248 	return amd_iommus_present;
249 }
250 
amd_iommu_ht_range_ignore(void)251 bool amd_iommu_ht_range_ignore(void)
252 {
253 	return check_feature2(FEATURE_HT_RANGE_IGNORE);
254 }
255 
256 /*
257  * Iterate through all the IOMMUs to get common EFR
258  * masks among all IOMMUs and warn if found inconsistency.
259  */
get_global_efr(void)260 static __init void get_global_efr(void)
261 {
262 	struct amd_iommu *iommu;
263 
264 	for_each_iommu(iommu) {
265 		u64 tmp = iommu->features;
266 		u64 tmp2 = iommu->features2;
267 
268 		if (list_is_first(&iommu->list, &amd_iommu_list)) {
269 			amd_iommu_efr = tmp;
270 			amd_iommu_efr2 = tmp2;
271 			continue;
272 		}
273 
274 		if (amd_iommu_efr == tmp &&
275 		    amd_iommu_efr2 == tmp2)
276 			continue;
277 
278 		pr_err(FW_BUG
279 		       "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
280 		       tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
281 		       iommu->index, iommu->pci_seg->id,
282 		       PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
283 		       PCI_FUNC(iommu->devid));
284 
285 		amd_iommu_efr &= tmp;
286 		amd_iommu_efr2 &= tmp2;
287 	}
288 
289 	pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
290 }
291 
292 /*
293  * For IVHD type 0x11/0x40, EFR is also available via IVHD.
294  * Default to IVHD EFR since it is available sooner
295  * (i.e. before PCI init).
296  */
early_iommu_features_init(struct amd_iommu * iommu,struct ivhd_header * h)297 static void __init early_iommu_features_init(struct amd_iommu *iommu,
298 					     struct ivhd_header *h)
299 {
300 	if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
301 		iommu->features = h->efr_reg;
302 		iommu->features2 = h->efr_reg2;
303 	}
304 	if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
305 		amdr_ivrs_remap_support = true;
306 }
307 
308 /* Access to l1 and l2 indexed register spaces */
309 
iommu_read_l1(struct amd_iommu * iommu,u16 l1,u8 address)310 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
311 {
312 	u32 val;
313 
314 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
315 	pci_read_config_dword(iommu->dev, 0xfc, &val);
316 	return val;
317 }
318 
iommu_write_l1(struct amd_iommu * iommu,u16 l1,u8 address,u32 val)319 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
320 {
321 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
322 	pci_write_config_dword(iommu->dev, 0xfc, val);
323 	pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
324 }
325 
iommu_read_l2(struct amd_iommu * iommu,u8 address)326 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
327 {
328 	u32 val;
329 
330 	pci_write_config_dword(iommu->dev, 0xf0, address);
331 	pci_read_config_dword(iommu->dev, 0xf4, &val);
332 	return val;
333 }
334 
iommu_write_l2(struct amd_iommu * iommu,u8 address,u32 val)335 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
336 {
337 	pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
338 	pci_write_config_dword(iommu->dev, 0xf4, val);
339 }
340 
341 /****************************************************************************
342  *
343  * AMD IOMMU MMIO register space handling functions
344  *
345  * These functions are used to program the IOMMU device registers in
346  * MMIO space required for that driver.
347  *
348  ****************************************************************************/
349 
350 /*
351  * This function set the exclusion range in the IOMMU. DMA accesses to the
352  * exclusion range are passed through untranslated
353  */
iommu_set_exclusion_range(struct amd_iommu * iommu)354 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
355 {
356 	u64 start = iommu->exclusion_start & PAGE_MASK;
357 	u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
358 	u64 entry;
359 
360 	if (!iommu->exclusion_start)
361 		return;
362 
363 	entry = start | MMIO_EXCL_ENABLE_MASK;
364 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
365 			&entry, sizeof(entry));
366 
367 	entry = limit;
368 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
369 			&entry, sizeof(entry));
370 }
371 
iommu_set_cwwb_range(struct amd_iommu * iommu)372 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
373 {
374 	u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
375 	u64 entry = start & PM_ADDR_MASK;
376 
377 	if (!check_feature(FEATURE_SNP))
378 		return;
379 
380 	/* Note:
381 	 * Re-purpose Exclusion base/limit registers for Completion wait
382 	 * write-back base/limit.
383 	 */
384 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
385 		    &entry, sizeof(entry));
386 
387 	/* Note:
388 	 * Default to 4 Kbytes, which can be specified by setting base
389 	 * address equal to the limit address.
390 	 */
391 	memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
392 		    &entry, sizeof(entry));
393 }
394 
395 /* Programs the physical address of the device table into the IOMMU hardware */
iommu_set_device_table(struct amd_iommu * iommu)396 static void iommu_set_device_table(struct amd_iommu *iommu)
397 {
398 	u64 entry;
399 	u32 dev_table_size = iommu->pci_seg->dev_table_size;
400 	void *dev_table = (void *)get_dev_table(iommu);
401 
402 	BUG_ON(iommu->mmio_base == NULL);
403 
404 	entry = iommu_virt_to_phys(dev_table);
405 	entry |= (dev_table_size >> 12) - 1;
406 	memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
407 			&entry, sizeof(entry));
408 }
409 
iommu_feature_set(struct amd_iommu * iommu,u64 val,u64 mask,u8 shift)410 static void iommu_feature_set(struct amd_iommu *iommu, u64 val, u64 mask, u8 shift)
411 {
412 	u64 ctrl;
413 
414 	ctrl = readq(iommu->mmio_base +  MMIO_CONTROL_OFFSET);
415 	mask <<= shift;
416 	ctrl &= ~mask;
417 	ctrl |= (val << shift) & mask;
418 	writeq(ctrl, iommu->mmio_base +  MMIO_CONTROL_OFFSET);
419 }
420 
421 /* Generic functions to enable/disable certain features of the IOMMU. */
iommu_feature_enable(struct amd_iommu * iommu,u8 bit)422 void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
423 {
424 	iommu_feature_set(iommu, 1ULL, 1ULL, bit);
425 }
426 
iommu_feature_disable(struct amd_iommu * iommu,u8 bit)427 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
428 {
429 	iommu_feature_set(iommu, 0ULL, 1ULL, bit);
430 }
431 
432 /* Function to enable the hardware */
iommu_enable(struct amd_iommu * iommu)433 static void iommu_enable(struct amd_iommu *iommu)
434 {
435 	iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
436 }
437 
iommu_disable(struct amd_iommu * iommu)438 static void iommu_disable(struct amd_iommu *iommu)
439 {
440 	if (!iommu->mmio_base)
441 		return;
442 
443 	/* Disable command buffer */
444 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
445 
446 	/* Disable event logging and event interrupts */
447 	iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
448 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
449 
450 	/* Disable IOMMU GA_LOG */
451 	iommu_feature_disable(iommu, CONTROL_GALOG_EN);
452 	iommu_feature_disable(iommu, CONTROL_GAINT_EN);
453 
454 	/* Disable IOMMU PPR logging */
455 	iommu_feature_disable(iommu, CONTROL_PPRLOG_EN);
456 	iommu_feature_disable(iommu, CONTROL_PPRINT_EN);
457 
458 	/* Disable IOMMU hardware itself */
459 	iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
460 
461 	/* Clear IRTE cache disabling bit */
462 	iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
463 }
464 
465 /*
466  * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
467  * the system has one.
468  */
iommu_map_mmio_space(u64 address,u64 end)469 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
470 {
471 	if (!request_mem_region(address, end, "amd_iommu")) {
472 		pr_err("Can not reserve memory region %llx-%llx for mmio\n",
473 			address, end);
474 		pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
475 		return NULL;
476 	}
477 
478 	return (u8 __iomem *)ioremap(address, end);
479 }
480 
iommu_unmap_mmio_space(struct amd_iommu * iommu)481 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
482 {
483 	if (iommu->mmio_base)
484 		iounmap(iommu->mmio_base);
485 	release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
486 }
487 
get_ivhd_header_size(struct ivhd_header * h)488 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
489 {
490 	u32 size = 0;
491 
492 	switch (h->type) {
493 	case 0x10:
494 		size = 24;
495 		break;
496 	case 0x11:
497 	case 0x40:
498 		size = 40;
499 		break;
500 	}
501 	return size;
502 }
503 
504 /****************************************************************************
505  *
506  * The functions below belong to the first pass of AMD IOMMU ACPI table
507  * parsing. In this pass we try to find out the highest device id this
508  * code has to handle. Upon this information the size of the shared data
509  * structures is determined later.
510  *
511  ****************************************************************************/
512 
513 /*
514  * This function calculates the length of a given IVHD entry
515  */
ivhd_entry_length(u8 * ivhd)516 static inline int ivhd_entry_length(u8 *ivhd)
517 {
518 	u32 type = ((struct ivhd_entry *)ivhd)->type;
519 
520 	if (type < 0x80) {
521 		return 0x04 << (*ivhd >> 6);
522 	} else if (type == IVHD_DEV_ACPI_HID) {
523 		/* For ACPI_HID, offset 21 is uid len */
524 		return *((u8 *)ivhd + 21) + 22;
525 	}
526 	return 0;
527 }
528 
529 /*
530  * After reading the highest device id from the IOMMU PCI capability header
531  * this function looks if there is a higher device id defined in the ACPI table
532  */
find_last_devid_from_ivhd(struct ivhd_header * h)533 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
534 {
535 	u8 *p = (void *)h, *end = (void *)h;
536 	struct ivhd_entry *dev;
537 	int last_devid = -EINVAL;
538 
539 	u32 ivhd_size = get_ivhd_header_size(h);
540 
541 	if (!ivhd_size) {
542 		pr_err("Unsupported IVHD type %#x\n", h->type);
543 		return -EINVAL;
544 	}
545 
546 	p += ivhd_size;
547 	end += h->length;
548 
549 	while (p < end) {
550 		dev = (struct ivhd_entry *)p;
551 		switch (dev->type) {
552 		case IVHD_DEV_ALL:
553 			/* Use maximum BDF value for DEV_ALL */
554 			return 0xffff;
555 		case IVHD_DEV_SELECT:
556 		case IVHD_DEV_RANGE_END:
557 		case IVHD_DEV_ALIAS:
558 		case IVHD_DEV_EXT_SELECT:
559 			/* all the above subfield types refer to device ids */
560 			if (dev->devid > last_devid)
561 				last_devid = dev->devid;
562 			break;
563 		default:
564 			break;
565 		}
566 		p += ivhd_entry_length(p);
567 	}
568 
569 	WARN_ON(p != end);
570 
571 	return last_devid;
572 }
573 
check_ivrs_checksum(struct acpi_table_header * table)574 static int __init check_ivrs_checksum(struct acpi_table_header *table)
575 {
576 	int i;
577 	u8 checksum = 0, *p = (u8 *)table;
578 
579 	for (i = 0; i < table->length; ++i)
580 		checksum += p[i];
581 	if (checksum != 0) {
582 		/* ACPI table corrupt */
583 		pr_err(FW_BUG "IVRS invalid checksum\n");
584 		return -ENODEV;
585 	}
586 
587 	return 0;
588 }
589 
590 /*
591  * Iterate over all IVHD entries in the ACPI table and find the highest device
592  * id which we need to handle. This is the first of three functions which parse
593  * the ACPI table. So we check the checksum here.
594  */
find_last_devid_acpi(struct acpi_table_header * table,u16 pci_seg)595 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
596 {
597 	u8 *p = (u8 *)table, *end = (u8 *)table;
598 	struct ivhd_header *h;
599 	int last_devid, last_bdf = 0;
600 
601 	p += IVRS_HEADER_LENGTH;
602 
603 	end += table->length;
604 	while (p < end) {
605 		h = (struct ivhd_header *)p;
606 		if (h->pci_seg == pci_seg &&
607 		    h->type == amd_iommu_target_ivhd_type) {
608 			last_devid = find_last_devid_from_ivhd(h);
609 
610 			if (last_devid < 0)
611 				return -EINVAL;
612 			if (last_devid > last_bdf)
613 				last_bdf = last_devid;
614 		}
615 		p += h->length;
616 	}
617 	WARN_ON(p != end);
618 
619 	return last_bdf;
620 }
621 
622 /****************************************************************************
623  *
624  * The following functions belong to the code path which parses the ACPI table
625  * the second time. In this ACPI parsing iteration we allocate IOMMU specific
626  * data structures, initialize the per PCI segment device/alias/rlookup table
627  * and also basically initialize the hardware.
628  *
629  ****************************************************************************/
630 
631 /* Allocate per PCI segment device table */
alloc_dev_table(struct amd_iommu_pci_seg * pci_seg)632 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
633 {
634 	pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
635 						  pci_seg->dev_table_size);
636 	if (!pci_seg->dev_table)
637 		return -ENOMEM;
638 
639 	return 0;
640 }
641 
free_dev_table(struct amd_iommu_pci_seg * pci_seg)642 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
643 {
644 	iommu_free_pages(pci_seg->dev_table);
645 	pci_seg->dev_table = NULL;
646 }
647 
648 /* Allocate per PCI segment IOMMU rlookup table. */
alloc_rlookup_table(struct amd_iommu_pci_seg * pci_seg)649 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
650 {
651 	pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1,
652 					  sizeof(*pci_seg->rlookup_table),
653 					  GFP_KERNEL);
654 	if (pci_seg->rlookup_table == NULL)
655 		return -ENOMEM;
656 
657 	return 0;
658 }
659 
free_rlookup_table(struct amd_iommu_pci_seg * pci_seg)660 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
661 {
662 	kvfree(pci_seg->rlookup_table);
663 	pci_seg->rlookup_table = NULL;
664 }
665 
alloc_irq_lookup_table(struct amd_iommu_pci_seg * pci_seg)666 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
667 {
668 	pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1,
669 					     sizeof(*pci_seg->irq_lookup_table),
670 					     GFP_KERNEL);
671 	if (pci_seg->irq_lookup_table == NULL)
672 		return -ENOMEM;
673 
674 	return 0;
675 }
676 
free_irq_lookup_table(struct amd_iommu_pci_seg * pci_seg)677 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
678 {
679 	kvfree(pci_seg->irq_lookup_table);
680 	pci_seg->irq_lookup_table = NULL;
681 }
682 
alloc_alias_table(struct amd_iommu_pci_seg * pci_seg)683 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
684 {
685 	int i;
686 
687 	pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1,
688 					      sizeof(*pci_seg->alias_table),
689 					      GFP_KERNEL);
690 	if (!pci_seg->alias_table)
691 		return -ENOMEM;
692 
693 	/*
694 	 * let all alias entries point to itself
695 	 */
696 	for (i = 0; i <= pci_seg->last_bdf; ++i)
697 		pci_seg->alias_table[i] = i;
698 
699 	return 0;
700 }
701 
free_alias_table(struct amd_iommu_pci_seg * pci_seg)702 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
703 {
704 	kvfree(pci_seg->alias_table);
705 	pci_seg->alias_table = NULL;
706 }
707 
708 /*
709  * Allocates the command buffer. This buffer is per AMD IOMMU. We can
710  * write commands to that buffer later and the IOMMU will execute them
711  * asynchronously
712  */
alloc_command_buffer(struct amd_iommu * iommu)713 static int __init alloc_command_buffer(struct amd_iommu *iommu)
714 {
715 	iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
716 
717 	return iommu->cmd_buf ? 0 : -ENOMEM;
718 }
719 
720 /*
721  * Interrupt handler has processed all pending events and adjusted head
722  * and tail pointer. Reset overflow mask and restart logging again.
723  */
amd_iommu_restart_log(struct amd_iommu * iommu,const char * evt_type,u8 cntrl_intr,u8 cntrl_log,u32 status_run_mask,u32 status_overflow_mask)724 void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
725 			   u8 cntrl_intr, u8 cntrl_log,
726 			   u32 status_run_mask, u32 status_overflow_mask)
727 {
728 	u32 status;
729 
730 	status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
731 	if (status & status_run_mask)
732 		return;
733 
734 	pr_info_ratelimited("IOMMU %s log restarting\n", evt_type);
735 
736 	iommu_feature_disable(iommu, cntrl_log);
737 	iommu_feature_disable(iommu, cntrl_intr);
738 
739 	writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
740 
741 	iommu_feature_enable(iommu, cntrl_intr);
742 	iommu_feature_enable(iommu, cntrl_log);
743 }
744 
745 /*
746  * This function restarts event logging in case the IOMMU experienced
747  * an event log buffer overflow.
748  */
amd_iommu_restart_event_logging(struct amd_iommu * iommu)749 void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
750 {
751 	amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN,
752 			      CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK,
753 			      MMIO_STATUS_EVT_OVERFLOW_MASK);
754 }
755 
756 /*
757  * This function restarts event logging in case the IOMMU experienced
758  * GA log overflow.
759  */
amd_iommu_restart_ga_log(struct amd_iommu * iommu)760 void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
761 {
762 	amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN,
763 			      CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK,
764 			      MMIO_STATUS_GALOG_OVERFLOW_MASK);
765 }
766 
767 /*
768  * This function resets the command buffer if the IOMMU stopped fetching
769  * commands from it.
770  */
amd_iommu_reset_cmd_buffer(struct amd_iommu * iommu)771 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
772 {
773 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
774 
775 	writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
776 	writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
777 	iommu->cmd_buf_head = 0;
778 	iommu->cmd_buf_tail = 0;
779 
780 	iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
781 }
782 
783 /*
784  * This function writes the command buffer address to the hardware and
785  * enables it.
786  */
iommu_enable_command_buffer(struct amd_iommu * iommu)787 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
788 {
789 	u64 entry;
790 
791 	BUG_ON(iommu->cmd_buf == NULL);
792 
793 	entry = iommu_virt_to_phys(iommu->cmd_buf);
794 	entry |= MMIO_CMD_SIZE_512;
795 
796 	memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
797 		    &entry, sizeof(entry));
798 
799 	amd_iommu_reset_cmd_buffer(iommu);
800 }
801 
802 /*
803  * This function disables the command buffer
804  */
iommu_disable_command_buffer(struct amd_iommu * iommu)805 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
806 {
807 	iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
808 }
809 
free_command_buffer(struct amd_iommu * iommu)810 static void __init free_command_buffer(struct amd_iommu *iommu)
811 {
812 	iommu_free_pages(iommu->cmd_buf);
813 }
814 
iommu_alloc_4k_pages(struct amd_iommu * iommu,gfp_t gfp,size_t size)815 void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
816 				  size_t size)
817 {
818 	void *buf;
819 
820 	size = PAGE_ALIGN(size);
821 	buf = iommu_alloc_pages_sz(gfp, size);
822 	if (!buf)
823 		return NULL;
824 	if (check_feature(FEATURE_SNP) &&
825 	    set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
826 		iommu_free_pages(buf);
827 		return NULL;
828 	}
829 
830 	return buf;
831 }
832 
833 /* allocates the memory where the IOMMU will log its events to */
alloc_event_buffer(struct amd_iommu * iommu)834 static int __init alloc_event_buffer(struct amd_iommu *iommu)
835 {
836 	iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
837 					      EVT_BUFFER_SIZE);
838 
839 	return iommu->evt_buf ? 0 : -ENOMEM;
840 }
841 
iommu_enable_event_buffer(struct amd_iommu * iommu)842 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
843 {
844 	u64 entry;
845 
846 	BUG_ON(iommu->evt_buf == NULL);
847 
848 	entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
849 
850 	memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
851 		    &entry, sizeof(entry));
852 
853 	/* set head and tail to zero manually */
854 	writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
855 	writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
856 
857 	iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
858 }
859 
860 /*
861  * This function disables the event log buffer
862  */
iommu_disable_event_buffer(struct amd_iommu * iommu)863 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
864 {
865 	iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
866 }
867 
free_event_buffer(struct amd_iommu * iommu)868 static void __init free_event_buffer(struct amd_iommu *iommu)
869 {
870 	iommu_free_pages(iommu->evt_buf);
871 }
872 
free_ga_log(struct amd_iommu * iommu)873 static void free_ga_log(struct amd_iommu *iommu)
874 {
875 #ifdef CONFIG_IRQ_REMAP
876 	iommu_free_pages(iommu->ga_log);
877 	iommu_free_pages(iommu->ga_log_tail);
878 #endif
879 }
880 
881 #ifdef CONFIG_IRQ_REMAP
iommu_ga_log_enable(struct amd_iommu * iommu)882 static int iommu_ga_log_enable(struct amd_iommu *iommu)
883 {
884 	u32 status, i;
885 	u64 entry;
886 
887 	if (!iommu->ga_log)
888 		return -EINVAL;
889 
890 	entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
891 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
892 		    &entry, sizeof(entry));
893 	entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
894 		 (BIT_ULL(52)-1)) & ~7ULL;
895 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
896 		    &entry, sizeof(entry));
897 	writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
898 	writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
899 
900 
901 	iommu_feature_enable(iommu, CONTROL_GAINT_EN);
902 	iommu_feature_enable(iommu, CONTROL_GALOG_EN);
903 
904 	for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
905 		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
906 		if (status & (MMIO_STATUS_GALOG_RUN_MASK))
907 			break;
908 		udelay(10);
909 	}
910 
911 	if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
912 		return -EINVAL;
913 
914 	return 0;
915 }
916 
iommu_init_ga_log(struct amd_iommu * iommu)917 static int iommu_init_ga_log(struct amd_iommu *iommu)
918 {
919 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
920 		return 0;
921 
922 	iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE);
923 	if (!iommu->ga_log)
924 		goto err_out;
925 
926 	iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8);
927 	if (!iommu->ga_log_tail)
928 		goto err_out;
929 
930 	return 0;
931 err_out:
932 	free_ga_log(iommu);
933 	return -EINVAL;
934 }
935 #endif /* CONFIG_IRQ_REMAP */
936 
alloc_cwwb_sem(struct amd_iommu * iommu)937 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
938 {
939 	iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
940 
941 	return iommu->cmd_sem ? 0 : -ENOMEM;
942 }
943 
free_cwwb_sem(struct amd_iommu * iommu)944 static void __init free_cwwb_sem(struct amd_iommu *iommu)
945 {
946 	if (iommu->cmd_sem)
947 		iommu_free_pages((void *)iommu->cmd_sem);
948 }
949 
iommu_enable_xt(struct amd_iommu * iommu)950 static void iommu_enable_xt(struct amd_iommu *iommu)
951 {
952 #ifdef CONFIG_IRQ_REMAP
953 	/*
954 	 * XT mode (32-bit APIC destination ID) requires
955 	 * GA mode (128-bit IRTE support) as a prerequisite.
956 	 */
957 	if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
958 	    amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
959 		iommu_feature_enable(iommu, CONTROL_XT_EN);
960 #endif /* CONFIG_IRQ_REMAP */
961 }
962 
iommu_enable_gt(struct amd_iommu * iommu)963 static void iommu_enable_gt(struct amd_iommu *iommu)
964 {
965 	if (!check_feature(FEATURE_GT))
966 		return;
967 
968 	iommu_feature_enable(iommu, CONTROL_GT_EN);
969 }
970 
971 /* sets a specific bit in the device table entry. */
set_dte_bit(struct dev_table_entry * dte,u8 bit)972 static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
973 {
974 	int i = (bit >> 6) & 0x03;
975 	int _bit = bit & 0x3f;
976 
977 	dte->data[i] |= (1UL << _bit);
978 }
979 
__copy_device_table(struct amd_iommu * iommu)980 static bool __copy_device_table(struct amd_iommu *iommu)
981 {
982 	u64 int_ctl, int_tab_len, entry = 0;
983 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
984 	struct dev_table_entry *old_devtb = NULL;
985 	u32 lo, hi, devid, old_devtb_size;
986 	phys_addr_t old_devtb_phys;
987 	u16 dom_id, dte_v, irq_v;
988 	u64 tmp;
989 
990 	/* Each IOMMU use separate device table with the same size */
991 	lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
992 	hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
993 	entry = (((u64) hi) << 32) + lo;
994 
995 	old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
996 	if (old_devtb_size != pci_seg->dev_table_size) {
997 		pr_err("The device table size of IOMMU:%d is not expected!\n",
998 			iommu->index);
999 		return false;
1000 	}
1001 
1002 	/*
1003 	 * When SME is enabled in the first kernel, the entry includes the
1004 	 * memory encryption mask(sme_me_mask), we must remove the memory
1005 	 * encryption mask to obtain the true physical address in kdump kernel.
1006 	 */
1007 	old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
1008 
1009 	if (old_devtb_phys >= 0x100000000ULL) {
1010 		pr_err("The address of old device table is above 4G, not trustworthy!\n");
1011 		return false;
1012 	}
1013 	old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
1014 		    ? (__force void *)ioremap_encrypted(old_devtb_phys,
1015 							pci_seg->dev_table_size)
1016 		    : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
1017 
1018 	if (!old_devtb)
1019 		return false;
1020 
1021 	pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
1022 		GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
1023 	if (pci_seg->old_dev_tbl_cpy == NULL) {
1024 		pr_err("Failed to allocate memory for copying old device table!\n");
1025 		memunmap(old_devtb);
1026 		return false;
1027 	}
1028 
1029 	for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
1030 		pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
1031 		dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
1032 		dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
1033 
1034 		if (dte_v && dom_id) {
1035 			pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
1036 			pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
1037 			/* Reserve the Domain IDs used by previous kernel */
1038 			if (ida_alloc_range(&pdom_ids, dom_id, dom_id, GFP_ATOMIC) != dom_id) {
1039 				pr_err("Failed to reserve domain ID 0x%x\n", dom_id);
1040 				memunmap(old_devtb);
1041 				return false;
1042 			}
1043 			/* If gcr3 table existed, mask it out */
1044 			if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1045 				tmp = (DTE_GCR3_30_15 | DTE_GCR3_51_31);
1046 				pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1047 				tmp = (DTE_GCR3_14_12 | DTE_FLAG_GV);
1048 				pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1049 			}
1050 		}
1051 
1052 		irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1053 		int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1054 		int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
1055 		if (irq_v && (int_ctl || int_tab_len)) {
1056 			if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1057 			    (int_tab_len != DTE_INTTABLEN_512 &&
1058 			     int_tab_len != DTE_INTTABLEN_2K)) {
1059 				pr_err("Wrong old irq remapping flag: %#x\n", devid);
1060 				memunmap(old_devtb);
1061 				return false;
1062 			}
1063 
1064 			pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1065 		}
1066 	}
1067 	memunmap(old_devtb);
1068 
1069 	return true;
1070 }
1071 
copy_device_table(void)1072 static bool copy_device_table(void)
1073 {
1074 	struct amd_iommu *iommu;
1075 	struct amd_iommu_pci_seg *pci_seg;
1076 
1077 	if (!amd_iommu_pre_enabled)
1078 		return false;
1079 
1080 	pr_warn("Translation is already enabled - trying to copy translation structures\n");
1081 
1082 	/*
1083 	 * All IOMMUs within PCI segment shares common device table.
1084 	 * Hence copy device table only once per PCI segment.
1085 	 */
1086 	for_each_pci_segment(pci_seg) {
1087 		for_each_iommu(iommu) {
1088 			if (pci_seg->id != iommu->pci_seg->id)
1089 				continue;
1090 			if (!__copy_device_table(iommu))
1091 				return false;
1092 			break;
1093 		}
1094 	}
1095 
1096 	return true;
1097 }
1098 
amd_iommu_get_ivhd_dte_flags(u16 segid,u16 devid)1099 struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid)
1100 {
1101 	struct ivhd_dte_flags *e;
1102 	unsigned int best_len = UINT_MAX;
1103 	struct dev_table_entry *dte = NULL;
1104 
1105 	for_each_ivhd_dte_flags(e) {
1106 		/*
1107 		 * Need to go through the whole list to find the smallest range,
1108 		 * which contains the devid.
1109 		 */
1110 		if ((e->segid == segid) &&
1111 		    (e->devid_first <= devid) && (devid <= e->devid_last)) {
1112 			unsigned int len = e->devid_last - e->devid_first;
1113 
1114 			if (len < best_len) {
1115 				dte = &(e->dte);
1116 				best_len = len;
1117 			}
1118 		}
1119 	}
1120 	return dte;
1121 }
1122 
search_ivhd_dte_flags(u16 segid,u16 first,u16 last)1123 static bool search_ivhd_dte_flags(u16 segid, u16 first, u16 last)
1124 {
1125 	struct ivhd_dte_flags *e;
1126 
1127 	for_each_ivhd_dte_flags(e) {
1128 		if ((e->segid == segid) &&
1129 		    (e->devid_first == first) &&
1130 		    (e->devid_last == last))
1131 			return true;
1132 	}
1133 	return false;
1134 }
1135 
1136 /*
1137  * This function takes the device specific flags read from the ACPI
1138  * table and sets up the device table entry with that information
1139  */
1140 static void __init
set_dev_entry_from_acpi_range(struct amd_iommu * iommu,u16 first,u16 last,u32 flags,u32 ext_flags)1141 set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last,
1142 			      u32 flags, u32 ext_flags)
1143 {
1144 	int i;
1145 	struct dev_table_entry dte = {};
1146 
1147 	/* Parse IVHD DTE setting flags and store information */
1148 	if (flags) {
1149 		struct ivhd_dte_flags *d;
1150 
1151 		if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last))
1152 			return;
1153 
1154 		d = kzalloc(sizeof(struct ivhd_dte_flags), GFP_KERNEL);
1155 		if (!d)
1156 			return;
1157 
1158 		pr_debug("%s: devid range %#x:%#x\n", __func__, first, last);
1159 
1160 		if (flags & ACPI_DEVFLAG_INITPASS)
1161 			set_dte_bit(&dte, DEV_ENTRY_INIT_PASS);
1162 		if (flags & ACPI_DEVFLAG_EXTINT)
1163 			set_dte_bit(&dte, DEV_ENTRY_EINT_PASS);
1164 		if (flags & ACPI_DEVFLAG_NMI)
1165 			set_dte_bit(&dte, DEV_ENTRY_NMI_PASS);
1166 		if (flags & ACPI_DEVFLAG_SYSMGT1)
1167 			set_dte_bit(&dte, DEV_ENTRY_SYSMGT1);
1168 		if (flags & ACPI_DEVFLAG_SYSMGT2)
1169 			set_dte_bit(&dte, DEV_ENTRY_SYSMGT2);
1170 		if (flags & ACPI_DEVFLAG_LINT0)
1171 			set_dte_bit(&dte, DEV_ENTRY_LINT0_PASS);
1172 		if (flags & ACPI_DEVFLAG_LINT1)
1173 			set_dte_bit(&dte, DEV_ENTRY_LINT1_PASS);
1174 
1175 		/* Apply erratum 63, which needs info in initial_dte */
1176 		if (FIELD_GET(DTE_DATA1_SYSMGT_MASK, dte.data[1]) == 0x1)
1177 			dte.data[0] |= DTE_FLAG_IW;
1178 
1179 		memcpy(&d->dte, &dte, sizeof(dte));
1180 		d->segid = iommu->pci_seg->id;
1181 		d->devid_first = first;
1182 		d->devid_last = last;
1183 		list_add_tail(&d->list, &amd_ivhd_dev_flags_list);
1184 	}
1185 
1186 	for (i = first; i <= last; i++)  {
1187 		if (flags) {
1188 			struct dev_table_entry *dev_table = get_dev_table(iommu);
1189 
1190 			memcpy(&dev_table[i], &dte, sizeof(dte));
1191 		}
1192 		amd_iommu_set_rlookup_table(iommu, i);
1193 	}
1194 }
1195 
set_dev_entry_from_acpi(struct amd_iommu * iommu,u16 devid,u32 flags,u32 ext_flags)1196 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1197 					   u16 devid, u32 flags, u32 ext_flags)
1198 {
1199 	set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags);
1200 }
1201 
add_special_device(u8 type,u8 id,u32 * devid,bool cmd_line)1202 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
1203 {
1204 	struct devid_map *entry;
1205 	struct list_head *list;
1206 
1207 	if (type == IVHD_SPECIAL_IOAPIC)
1208 		list = &ioapic_map;
1209 	else if (type == IVHD_SPECIAL_HPET)
1210 		list = &hpet_map;
1211 	else
1212 		return -EINVAL;
1213 
1214 	list_for_each_entry(entry, list, list) {
1215 		if (!(entry->id == id && entry->cmd_line))
1216 			continue;
1217 
1218 		pr_info("Command-line override present for %s id %d - ignoring\n",
1219 			type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1220 
1221 		*devid = entry->devid;
1222 
1223 		return 0;
1224 	}
1225 
1226 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1227 	if (!entry)
1228 		return -ENOMEM;
1229 
1230 	entry->id	= id;
1231 	entry->devid	= *devid;
1232 	entry->cmd_line	= cmd_line;
1233 
1234 	list_add_tail(&entry->list, list);
1235 
1236 	return 0;
1237 }
1238 
add_acpi_hid_device(u8 * hid,u8 * uid,u32 * devid,bool cmd_line)1239 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
1240 				      bool cmd_line)
1241 {
1242 	struct acpihid_map_entry *entry;
1243 	struct list_head *list = &acpihid_map;
1244 
1245 	list_for_each_entry(entry, list, list) {
1246 		if (strcmp(entry->hid, hid) ||
1247 		    (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1248 		    !entry->cmd_line)
1249 			continue;
1250 
1251 		pr_info("Command-line override for hid:%s uid:%s\n",
1252 			hid, uid);
1253 		*devid = entry->devid;
1254 		return 0;
1255 	}
1256 
1257 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1258 	if (!entry)
1259 		return -ENOMEM;
1260 
1261 	memcpy(entry->uid, uid, strlen(uid));
1262 	memcpy(entry->hid, hid, strlen(hid));
1263 	entry->devid = *devid;
1264 	entry->cmd_line	= cmd_line;
1265 	entry->root_devid = (entry->devid & (~0x7));
1266 
1267 	pr_info("%s, add hid:%s, uid:%s, rdevid:%#x\n",
1268 		entry->cmd_line ? "cmd" : "ivrs",
1269 		entry->hid, entry->uid, entry->root_devid);
1270 
1271 	list_add_tail(&entry->list, list);
1272 	return 0;
1273 }
1274 
add_early_maps(void)1275 static int __init add_early_maps(void)
1276 {
1277 	int i, ret;
1278 
1279 	for (i = 0; i < early_ioapic_map_size; ++i) {
1280 		ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1281 					 early_ioapic_map[i].id,
1282 					 &early_ioapic_map[i].devid,
1283 					 early_ioapic_map[i].cmd_line);
1284 		if (ret)
1285 			return ret;
1286 	}
1287 
1288 	for (i = 0; i < early_hpet_map_size; ++i) {
1289 		ret = add_special_device(IVHD_SPECIAL_HPET,
1290 					 early_hpet_map[i].id,
1291 					 &early_hpet_map[i].devid,
1292 					 early_hpet_map[i].cmd_line);
1293 		if (ret)
1294 			return ret;
1295 	}
1296 
1297 	for (i = 0; i < early_acpihid_map_size; ++i) {
1298 		ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1299 					  early_acpihid_map[i].uid,
1300 					  &early_acpihid_map[i].devid,
1301 					  early_acpihid_map[i].cmd_line);
1302 		if (ret)
1303 			return ret;
1304 	}
1305 
1306 	return 0;
1307 }
1308 
1309 /*
1310  * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1311  * initializes the hardware and our data structures with it.
1312  */
init_iommu_from_acpi(struct amd_iommu * iommu,struct ivhd_header * h)1313 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1314 					struct ivhd_header *h)
1315 {
1316 	u8 *p = (u8 *)h;
1317 	u8 *end = p, flags = 0;
1318 	u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
1319 	u32 dev_i, ext_flags = 0;
1320 	bool alias = false;
1321 	struct ivhd_entry *e;
1322 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1323 	u32 ivhd_size;
1324 	int ret;
1325 
1326 
1327 	ret = add_early_maps();
1328 	if (ret)
1329 		return ret;
1330 
1331 	amd_iommu_apply_ivrs_quirks();
1332 
1333 	/*
1334 	 * First save the recommended feature enable bits from ACPI
1335 	 */
1336 	iommu->acpi_flags = h->flags;
1337 
1338 	/*
1339 	 * Done. Now parse the device entries
1340 	 */
1341 	ivhd_size = get_ivhd_header_size(h);
1342 	if (!ivhd_size) {
1343 		pr_err("Unsupported IVHD type %#x\n", h->type);
1344 		return -EINVAL;
1345 	}
1346 
1347 	p += ivhd_size;
1348 
1349 	end += h->length;
1350 
1351 
1352 	while (p < end) {
1353 		e = (struct ivhd_entry *)p;
1354 		seg_id = pci_seg->id;
1355 
1356 		switch (e->type) {
1357 		case IVHD_DEV_ALL:
1358 
1359 			DUMP_printk("  DEV_ALL\t\t\tsetting: %#02x\n", e->flags);
1360 			set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0);
1361 			break;
1362 		case IVHD_DEV_SELECT:
1363 
1364 			DUMP_printk("  DEV_SELECT\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
1365 				    seg_id, PCI_BUS_NUM(e->devid),
1366 				    PCI_SLOT(e->devid),
1367 				    PCI_FUNC(e->devid),
1368 				    e->flags);
1369 
1370 			devid = e->devid;
1371 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1372 			break;
1373 		case IVHD_DEV_SELECT_RANGE_START:
1374 
1375 			DUMP_printk("  DEV_SELECT_RANGE_START\tdevid: %04x:%02x:%02x.%x flags: %#02x\n",
1376 				    seg_id, PCI_BUS_NUM(e->devid),
1377 				    PCI_SLOT(e->devid),
1378 				    PCI_FUNC(e->devid),
1379 				    e->flags);
1380 
1381 			devid_start = e->devid;
1382 			flags = e->flags;
1383 			ext_flags = 0;
1384 			alias = false;
1385 			break;
1386 		case IVHD_DEV_ALIAS:
1387 
1388 			DUMP_printk("  DEV_ALIAS\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %02x:%02x.%x\n",
1389 				    seg_id, PCI_BUS_NUM(e->devid),
1390 				    PCI_SLOT(e->devid),
1391 				    PCI_FUNC(e->devid),
1392 				    e->flags,
1393 				    PCI_BUS_NUM(e->ext >> 8),
1394 				    PCI_SLOT(e->ext >> 8),
1395 				    PCI_FUNC(e->ext >> 8));
1396 
1397 			devid = e->devid;
1398 			devid_to = e->ext >> 8;
1399 			set_dev_entry_from_acpi(iommu, devid   , e->flags, 0);
1400 			set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1401 			pci_seg->alias_table[devid] = devid_to;
1402 			break;
1403 		case IVHD_DEV_ALIAS_RANGE:
1404 
1405 			DUMP_printk("  DEV_ALIAS_RANGE\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %04x:%02x:%02x.%x\n",
1406 				    seg_id, PCI_BUS_NUM(e->devid),
1407 				    PCI_SLOT(e->devid),
1408 				    PCI_FUNC(e->devid),
1409 				    e->flags,
1410 				    seg_id, PCI_BUS_NUM(e->ext >> 8),
1411 				    PCI_SLOT(e->ext >> 8),
1412 				    PCI_FUNC(e->ext >> 8));
1413 
1414 			devid_start = e->devid;
1415 			flags = e->flags;
1416 			devid_to = e->ext >> 8;
1417 			ext_flags = 0;
1418 			alias = true;
1419 			break;
1420 		case IVHD_DEV_EXT_SELECT:
1421 
1422 			DUMP_printk("  DEV_EXT_SELECT\t\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
1423 				    seg_id, PCI_BUS_NUM(e->devid),
1424 				    PCI_SLOT(e->devid),
1425 				    PCI_FUNC(e->devid),
1426 				    e->flags, e->ext);
1427 
1428 			devid = e->devid;
1429 			set_dev_entry_from_acpi(iommu, devid, e->flags,
1430 						e->ext);
1431 			break;
1432 		case IVHD_DEV_EXT_SELECT_RANGE:
1433 
1434 			DUMP_printk("  DEV_EXT_SELECT_RANGE\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n",
1435 				    seg_id, PCI_BUS_NUM(e->devid),
1436 				    PCI_SLOT(e->devid),
1437 				    PCI_FUNC(e->devid),
1438 				    e->flags, e->ext);
1439 
1440 			devid_start = e->devid;
1441 			flags = e->flags;
1442 			ext_flags = e->ext;
1443 			alias = false;
1444 			break;
1445 		case IVHD_DEV_RANGE_END:
1446 
1447 			DUMP_printk("  DEV_RANGE_END\t\tdevid: %04x:%02x:%02x.%x\n",
1448 				    seg_id, PCI_BUS_NUM(e->devid),
1449 				    PCI_SLOT(e->devid),
1450 				    PCI_FUNC(e->devid));
1451 
1452 			devid = e->devid;
1453 			for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1454 				if (alias)
1455 					pci_seg->alias_table[dev_i] = devid_to;
1456 			}
1457 			set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
1458 			set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
1459 			break;
1460 		case IVHD_DEV_SPECIAL: {
1461 			u8 handle, type;
1462 			const char *var;
1463 			u32 devid;
1464 			int ret;
1465 
1466 			handle = e->ext & 0xff;
1467 			devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
1468 			type   = (e->ext >> 24) & 0xff;
1469 
1470 			if (type == IVHD_SPECIAL_IOAPIC)
1471 				var = "IOAPIC";
1472 			else if (type == IVHD_SPECIAL_HPET)
1473 				var = "HPET";
1474 			else
1475 				var = "UNKNOWN";
1476 
1477 			DUMP_printk("  DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
1478 				    var, (int)handle,
1479 				    seg_id, PCI_BUS_NUM(devid),
1480 				    PCI_SLOT(devid),
1481 				    PCI_FUNC(devid),
1482 				    e->flags);
1483 
1484 			ret = add_special_device(type, handle, &devid, false);
1485 			if (ret)
1486 				return ret;
1487 
1488 			/*
1489 			 * add_special_device might update the devid in case a
1490 			 * command-line override is present. So call
1491 			 * set_dev_entry_from_acpi after add_special_device.
1492 			 */
1493 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1494 
1495 			break;
1496 		}
1497 		case IVHD_DEV_ACPI_HID: {
1498 			u32 devid;
1499 			u8 hid[ACPIHID_HID_LEN];
1500 			u8 uid[ACPIHID_UID_LEN];
1501 			int ret;
1502 
1503 			if (h->type != 0x40) {
1504 				pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1505 				       e->type);
1506 				break;
1507 			}
1508 
1509 			BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
1510 			memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
1511 			hid[ACPIHID_HID_LEN - 1] = '\0';
1512 
1513 			if (!(*hid)) {
1514 				pr_err(FW_BUG "Invalid HID.\n");
1515 				break;
1516 			}
1517 
1518 			uid[0] = '\0';
1519 			switch (e->uidf) {
1520 			case UID_NOT_PRESENT:
1521 
1522 				if (e->uidl != 0)
1523 					pr_warn(FW_BUG "Invalid UID length.\n");
1524 
1525 				break;
1526 			case UID_IS_INTEGER:
1527 
1528 				sprintf(uid, "%d", e->uid);
1529 
1530 				break;
1531 			case UID_IS_CHARACTER:
1532 
1533 				memcpy(uid, &e->uid, e->uidl);
1534 				uid[e->uidl] = '\0';
1535 
1536 				break;
1537 			default:
1538 				break;
1539 			}
1540 
1541 			devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
1542 			DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n",
1543 				    hid, uid, seg_id,
1544 				    PCI_BUS_NUM(devid),
1545 				    PCI_SLOT(devid),
1546 				    PCI_FUNC(devid),
1547 				    e->flags);
1548 
1549 			flags = e->flags;
1550 
1551 			ret = add_acpi_hid_device(hid, uid, &devid, false);
1552 			if (ret)
1553 				return ret;
1554 
1555 			/*
1556 			 * add_special_device might update the devid in case a
1557 			 * command-line override is present. So call
1558 			 * set_dev_entry_from_acpi after add_special_device.
1559 			 */
1560 			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1561 
1562 			break;
1563 		}
1564 		default:
1565 			break;
1566 		}
1567 
1568 		p += ivhd_entry_length(p);
1569 	}
1570 
1571 	return 0;
1572 }
1573 
1574 /* Allocate PCI segment data structure */
alloc_pci_segment(u16 id,struct acpi_table_header * ivrs_base)1575 static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
1576 					  struct acpi_table_header *ivrs_base)
1577 {
1578 	struct amd_iommu_pci_seg *pci_seg;
1579 	int last_bdf;
1580 
1581 	/*
1582 	 * First parse ACPI tables to find the largest Bus/Dev/Func we need to
1583 	 * handle in this PCI segment. Upon this information the shared data
1584 	 * structures for the PCI segments in the system will be allocated.
1585 	 */
1586 	last_bdf = find_last_devid_acpi(ivrs_base, id);
1587 	if (last_bdf < 0)
1588 		return NULL;
1589 
1590 	pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
1591 	if (pci_seg == NULL)
1592 		return NULL;
1593 
1594 	pci_seg->last_bdf = last_bdf;
1595 	DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
1596 	pci_seg->dev_table_size =
1597 		max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE),
1598 		    SZ_4K);
1599 
1600 	pci_seg->id = id;
1601 	init_llist_head(&pci_seg->dev_data_list);
1602 	INIT_LIST_HEAD(&pci_seg->unity_map);
1603 	list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
1604 
1605 	if (alloc_dev_table(pci_seg))
1606 		return NULL;
1607 	if (alloc_alias_table(pci_seg))
1608 		return NULL;
1609 	if (alloc_rlookup_table(pci_seg))
1610 		return NULL;
1611 
1612 	return pci_seg;
1613 }
1614 
get_pci_segment(u16 id,struct acpi_table_header * ivrs_base)1615 static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
1616 					struct acpi_table_header *ivrs_base)
1617 {
1618 	struct amd_iommu_pci_seg *pci_seg;
1619 
1620 	for_each_pci_segment(pci_seg) {
1621 		if (pci_seg->id == id)
1622 			return pci_seg;
1623 	}
1624 
1625 	return alloc_pci_segment(id, ivrs_base);
1626 }
1627 
free_pci_segments(void)1628 static void __init free_pci_segments(void)
1629 {
1630 	struct amd_iommu_pci_seg *pci_seg, *next;
1631 
1632 	for_each_pci_segment_safe(pci_seg, next) {
1633 		list_del(&pci_seg->list);
1634 		free_irq_lookup_table(pci_seg);
1635 		free_rlookup_table(pci_seg);
1636 		free_alias_table(pci_seg);
1637 		free_dev_table(pci_seg);
1638 		kfree(pci_seg);
1639 	}
1640 }
1641 
free_sysfs(struct amd_iommu * iommu)1642 static void __init free_sysfs(struct amd_iommu *iommu)
1643 {
1644 	if (iommu->iommu.dev) {
1645 		iommu_device_unregister(&iommu->iommu);
1646 		iommu_device_sysfs_remove(&iommu->iommu);
1647 	}
1648 }
1649 
free_iommu_one(struct amd_iommu * iommu)1650 static void __init free_iommu_one(struct amd_iommu *iommu)
1651 {
1652 	free_sysfs(iommu);
1653 	free_cwwb_sem(iommu);
1654 	free_command_buffer(iommu);
1655 	free_event_buffer(iommu);
1656 	amd_iommu_free_ppr_log(iommu);
1657 	free_ga_log(iommu);
1658 	iommu_unmap_mmio_space(iommu);
1659 	amd_iommu_iopf_uninit(iommu);
1660 }
1661 
free_iommu_all(void)1662 static void __init free_iommu_all(void)
1663 {
1664 	struct amd_iommu *iommu, *next;
1665 
1666 	for_each_iommu_safe(iommu, next) {
1667 		list_del(&iommu->list);
1668 		free_iommu_one(iommu);
1669 		kfree(iommu);
1670 	}
1671 }
1672 
1673 /*
1674  * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1675  * Workaround:
1676  *     BIOS should disable L2B micellaneous clock gating by setting
1677  *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1678  */
amd_iommu_erratum_746_workaround(struct amd_iommu * iommu)1679 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1680 {
1681 	u32 value;
1682 
1683 	if ((boot_cpu_data.x86 != 0x15) ||
1684 	    (boot_cpu_data.x86_model < 0x10) ||
1685 	    (boot_cpu_data.x86_model > 0x1f))
1686 		return;
1687 
1688 	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1689 	pci_read_config_dword(iommu->dev, 0xf4, &value);
1690 
1691 	if (value & BIT(2))
1692 		return;
1693 
1694 	/* Select NB indirect register 0x90 and enable writing */
1695 	pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1696 
1697 	pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1698 	pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1699 
1700 	/* Clear the enable writing bit */
1701 	pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1702 }
1703 
1704 /*
1705  * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1706  * Workaround:
1707  *     BIOS should enable ATS write permission check by setting
1708  *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1709  */
amd_iommu_ats_write_check_workaround(struct amd_iommu * iommu)1710 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1711 {
1712 	u32 value;
1713 
1714 	if ((boot_cpu_data.x86 != 0x15) ||
1715 	    (boot_cpu_data.x86_model < 0x30) ||
1716 	    (boot_cpu_data.x86_model > 0x3f))
1717 		return;
1718 
1719 	/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1720 	value = iommu_read_l2(iommu, 0x47);
1721 
1722 	if (value & BIT(0))
1723 		return;
1724 
1725 	/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1726 	iommu_write_l2(iommu, 0x47, value | BIT(0));
1727 
1728 	pci_info(iommu->dev, "Applying ATS write check workaround\n");
1729 }
1730 
1731 /*
1732  * This function glues the initialization function for one IOMMU
1733  * together and also allocates the command buffer and programs the
1734  * hardware. It does NOT enable the IOMMU. This is done afterwards.
1735  */
init_iommu_one(struct amd_iommu * iommu,struct ivhd_header * h,struct acpi_table_header * ivrs_base)1736 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
1737 				 struct acpi_table_header *ivrs_base)
1738 {
1739 	struct amd_iommu_pci_seg *pci_seg;
1740 
1741 	pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
1742 	if (pci_seg == NULL)
1743 		return -ENOMEM;
1744 	iommu->pci_seg = pci_seg;
1745 
1746 	raw_spin_lock_init(&iommu->lock);
1747 	atomic64_set(&iommu->cmd_sem_val, 0);
1748 
1749 	/* Add IOMMU to internal data structures */
1750 	list_add_tail(&iommu->list, &amd_iommu_list);
1751 	iommu->index = amd_iommus_present++;
1752 
1753 	if (unlikely(iommu->index >= MAX_IOMMUS)) {
1754 		WARN(1, "System has more IOMMUs than supported by this driver\n");
1755 		return -ENOSYS;
1756 	}
1757 
1758 	/*
1759 	 * Copy data from ACPI table entry to the iommu struct
1760 	 */
1761 	iommu->devid   = h->devid;
1762 	iommu->cap_ptr = h->cap_ptr;
1763 	iommu->mmio_phys = h->mmio_phys;
1764 
1765 	switch (h->type) {
1766 	case 0x10:
1767 		/* Check if IVHD EFR contains proper max banks/counters */
1768 		if ((h->efr_attr != 0) &&
1769 		    ((h->efr_attr & (0xF << 13)) != 0) &&
1770 		    ((h->efr_attr & (0x3F << 17)) != 0))
1771 			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1772 		else
1773 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1774 
1775 		/* GAM requires GA mode. */
1776 		if ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)
1777 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1778 		break;
1779 	case 0x11:
1780 	case 0x40:
1781 		if (h->efr_reg & (1 << 9))
1782 			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1783 		else
1784 			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1785 
1786 		/* XT and GAM require GA mode. */
1787 		if ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0) {
1788 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1789 			break;
1790 		}
1791 
1792 		if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1793 			amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1794 
1795 		early_iommu_features_init(iommu, h);
1796 
1797 		break;
1798 	default:
1799 		return -EINVAL;
1800 	}
1801 
1802 	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1803 						iommu->mmio_phys_end);
1804 	if (!iommu->mmio_base)
1805 		return -ENOMEM;
1806 
1807 	return init_iommu_from_acpi(iommu, h);
1808 }
1809 
init_iommu_one_late(struct amd_iommu * iommu)1810 static int __init init_iommu_one_late(struct amd_iommu *iommu)
1811 {
1812 	int ret;
1813 
1814 	if (alloc_cwwb_sem(iommu))
1815 		return -ENOMEM;
1816 
1817 	if (alloc_command_buffer(iommu))
1818 		return -ENOMEM;
1819 
1820 	if (alloc_event_buffer(iommu))
1821 		return -ENOMEM;
1822 
1823 	iommu->int_enabled = false;
1824 
1825 	init_translation_status(iommu);
1826 	if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1827 		iommu_disable(iommu);
1828 		clear_translation_pre_enabled(iommu);
1829 		pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1830 			iommu->index);
1831 	}
1832 	if (amd_iommu_pre_enabled)
1833 		amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1834 
1835 	if (amd_iommu_irq_remap) {
1836 		ret = amd_iommu_create_irq_domain(iommu);
1837 		if (ret)
1838 			return ret;
1839 	}
1840 
1841 	/*
1842 	 * Make sure IOMMU is not considered to translate itself. The IVRS
1843 	 * table tells us so, but this is a lie!
1844 	 */
1845 	iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
1846 
1847 	return 0;
1848 }
1849 
1850 /**
1851  * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1852  * @ivrs: Pointer to the IVRS header
1853  *
1854  * This function search through all IVDB of the maximum supported IVHD
1855  */
get_highest_supported_ivhd_type(struct acpi_table_header * ivrs)1856 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1857 {
1858 	u8 *base = (u8 *)ivrs;
1859 	struct ivhd_header *ivhd = (struct ivhd_header *)
1860 					(base + IVRS_HEADER_LENGTH);
1861 	u8 last_type = ivhd->type;
1862 	u16 devid = ivhd->devid;
1863 
1864 	while (((u8 *)ivhd - base < ivrs->length) &&
1865 	       (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1866 		u8 *p = (u8 *) ivhd;
1867 
1868 		if (ivhd->devid == devid)
1869 			last_type = ivhd->type;
1870 		ivhd = (struct ivhd_header *)(p + ivhd->length);
1871 	}
1872 
1873 	return last_type;
1874 }
1875 
1876 /*
1877  * Iterates over all IOMMU entries in the ACPI table, allocates the
1878  * IOMMU structure and initializes it with init_iommu_one()
1879  */
init_iommu_all(struct acpi_table_header * table)1880 static int __init init_iommu_all(struct acpi_table_header *table)
1881 {
1882 	u8 *p = (u8 *)table, *end = (u8 *)table;
1883 	struct ivhd_header *h;
1884 	struct amd_iommu *iommu;
1885 	int ret;
1886 
1887 	end += table->length;
1888 	p += IVRS_HEADER_LENGTH;
1889 
1890 	/* Phase 1: Process all IVHD blocks */
1891 	while (p < end) {
1892 		h = (struct ivhd_header *)p;
1893 		if (*p == amd_iommu_target_ivhd_type) {
1894 
1895 			DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
1896 				    "flags: %01x info %04x\n",
1897 				    h->pci_seg, PCI_BUS_NUM(h->devid),
1898 				    PCI_SLOT(h->devid), PCI_FUNC(h->devid),
1899 				    h->cap_ptr, h->flags, h->info);
1900 			DUMP_printk("       mmio-addr: %016llx\n",
1901 				    h->mmio_phys);
1902 
1903 			iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1904 			if (iommu == NULL)
1905 				return -ENOMEM;
1906 
1907 			ret = init_iommu_one(iommu, h, table);
1908 			if (ret)
1909 				return ret;
1910 		}
1911 		p += h->length;
1912 
1913 	}
1914 	WARN_ON(p != end);
1915 
1916 	/* Phase 2 : Early feature support check */
1917 	get_global_efr();
1918 
1919 	/* Phase 3 : Enabling IOMMU features */
1920 	for_each_iommu(iommu) {
1921 		ret = init_iommu_one_late(iommu);
1922 		if (ret)
1923 			return ret;
1924 	}
1925 
1926 	return 0;
1927 }
1928 
init_iommu_perf_ctr(struct amd_iommu * iommu)1929 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1930 {
1931 	u64 val;
1932 	struct pci_dev *pdev = iommu->dev;
1933 
1934 	if (!check_feature(FEATURE_PC))
1935 		return;
1936 
1937 	amd_iommu_pc_present = true;
1938 
1939 	pci_info(pdev, "IOMMU performance counters supported\n");
1940 
1941 	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1942 	iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1943 	iommu->max_counters = (u8) ((val >> 7) & 0xf);
1944 
1945 	return;
1946 }
1947 
amd_iommu_show_cap(struct device * dev,struct device_attribute * attr,char * buf)1948 static ssize_t amd_iommu_show_cap(struct device *dev,
1949 				  struct device_attribute *attr,
1950 				  char *buf)
1951 {
1952 	struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1953 	return sysfs_emit(buf, "%x\n", iommu->cap);
1954 }
1955 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1956 
amd_iommu_show_features(struct device * dev,struct device_attribute * attr,char * buf)1957 static ssize_t amd_iommu_show_features(struct device *dev,
1958 				       struct device_attribute *attr,
1959 				       char *buf)
1960 {
1961 	return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2);
1962 }
1963 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1964 
1965 static struct attribute *amd_iommu_attrs[] = {
1966 	&dev_attr_cap.attr,
1967 	&dev_attr_features.attr,
1968 	NULL,
1969 };
1970 
1971 static struct attribute_group amd_iommu_group = {
1972 	.name = "amd-iommu",
1973 	.attrs = amd_iommu_attrs,
1974 };
1975 
1976 static const struct attribute_group *amd_iommu_groups[] = {
1977 	&amd_iommu_group,
1978 	NULL,
1979 };
1980 
1981 /*
1982  * Note: IVHD 0x11 and 0x40 also contains exact copy
1983  * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
1984  * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
1985  */
late_iommu_features_init(struct amd_iommu * iommu)1986 static void __init late_iommu_features_init(struct amd_iommu *iommu)
1987 {
1988 	u64 features, features2;
1989 
1990 	if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
1991 		return;
1992 
1993 	/* read extended feature bits */
1994 	features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
1995 	features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
1996 
1997 	if (!amd_iommu_efr) {
1998 		amd_iommu_efr = features;
1999 		amd_iommu_efr2 = features2;
2000 		return;
2001 	}
2002 
2003 	/*
2004 	 * Sanity check and warn if EFR values from
2005 	 * IVHD and MMIO conflict.
2006 	 */
2007 	if (features != amd_iommu_efr ||
2008 	    features2 != amd_iommu_efr2) {
2009 		pr_warn(FW_WARN
2010 			"EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
2011 			features, amd_iommu_efr,
2012 			features2, amd_iommu_efr2);
2013 	}
2014 }
2015 
iommu_init_pci(struct amd_iommu * iommu)2016 static int __init iommu_init_pci(struct amd_iommu *iommu)
2017 {
2018 	int cap_ptr = iommu->cap_ptr;
2019 	int ret;
2020 
2021 	iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2022 						 PCI_BUS_NUM(iommu->devid),
2023 						 iommu->devid & 0xff);
2024 	if (!iommu->dev)
2025 		return -ENODEV;
2026 
2027 	/* ACPI _PRT won't have an IRQ for IOMMU */
2028 	iommu->dev->irq_managed = 1;
2029 
2030 	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
2031 			      &iommu->cap);
2032 
2033 	if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
2034 		amd_iommu_iotlb_sup = false;
2035 
2036 	late_iommu_features_init(iommu);
2037 
2038 	if (check_feature(FEATURE_GT)) {
2039 		int glxval;
2040 		u64 pasmax;
2041 
2042 		pasmax = FIELD_GET(FEATURE_PASMAX, amd_iommu_efr);
2043 		iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1;
2044 
2045 		BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK);
2046 
2047 		glxval = FIELD_GET(FEATURE_GLX, amd_iommu_efr);
2048 
2049 		if (amd_iommu_max_glx_val == -1)
2050 			amd_iommu_max_glx_val = glxval;
2051 		else
2052 			amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
2053 
2054 		iommu_enable_gt(iommu);
2055 	}
2056 
2057 	if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu))
2058 		return -ENOMEM;
2059 
2060 	if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
2061 		pr_info("Using strict mode due to virtualization\n");
2062 		iommu_set_dma_strict();
2063 		amd_iommu_np_cache = true;
2064 	}
2065 
2066 	init_iommu_perf_ctr(iommu);
2067 
2068 	if (is_rd890_iommu(iommu->dev)) {
2069 		int i, j;
2070 
2071 		iommu->root_pdev =
2072 			pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2073 						    iommu->dev->bus->number,
2074 						    PCI_DEVFN(0, 0));
2075 
2076 		/*
2077 		 * Some rd890 systems may not be fully reconfigured by the
2078 		 * BIOS, so it's necessary for us to store this information so
2079 		 * it can be reprogrammed on resume
2080 		 */
2081 		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
2082 				&iommu->stored_addr_lo);
2083 		pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
2084 				&iommu->stored_addr_hi);
2085 
2086 		/* Low bit locks writes to configuration space */
2087 		iommu->stored_addr_lo &= ~1;
2088 
2089 		for (i = 0; i < 6; i++)
2090 			for (j = 0; j < 0x12; j++)
2091 				iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
2092 
2093 		for (i = 0; i < 0x83; i++)
2094 			iommu->stored_l2[i] = iommu_read_l2(iommu, i);
2095 	}
2096 
2097 	amd_iommu_erratum_746_workaround(iommu);
2098 	amd_iommu_ats_write_check_workaround(iommu);
2099 
2100 	ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
2101 			       amd_iommu_groups, "ivhd%d", iommu->index);
2102 	if (ret)
2103 		return ret;
2104 
2105 	/*
2106 	 * Allocate per IOMMU IOPF queue here so that in attach device path,
2107 	 * PRI capable device can be added to IOPF queue
2108 	 */
2109 	if (amd_iommu_gt_ppr_supported()) {
2110 		ret = amd_iommu_iopf_init(iommu);
2111 		if (ret)
2112 			return ret;
2113 	}
2114 
2115 	iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
2116 
2117 	return pci_enable_device(iommu->dev);
2118 }
2119 
print_iommu_info(void)2120 static void print_iommu_info(void)
2121 {
2122 	int i;
2123 	static const char * const feat_str[] = {
2124 		"PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
2125 		"IA", "GA", "HE", "PC"
2126 	};
2127 
2128 	if (amd_iommu_efr) {
2129 		pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2);
2130 
2131 		for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
2132 			if (check_feature(1ULL << i))
2133 				pr_cont(" %s", feat_str[i]);
2134 		}
2135 
2136 		if (check_feature(FEATURE_GAM_VAPIC))
2137 			pr_cont(" GA_vAPIC");
2138 
2139 		if (check_feature(FEATURE_SNP))
2140 			pr_cont(" SNP");
2141 
2142 		pr_cont("\n");
2143 	}
2144 
2145 	if (irq_remapping_enabled) {
2146 		pr_info("Interrupt remapping enabled\n");
2147 		if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2148 			pr_info("X2APIC enabled\n");
2149 	}
2150 	if (amd_iommu_pgtable == PD_MODE_V2) {
2151 		pr_info("V2 page table enabled (Paging mode : %d level)\n",
2152 			amd_iommu_gpt_level);
2153 	}
2154 }
2155 
amd_iommu_init_pci(void)2156 static int __init amd_iommu_init_pci(void)
2157 {
2158 	struct amd_iommu *iommu;
2159 	struct amd_iommu_pci_seg *pci_seg;
2160 	int ret;
2161 
2162 	/* Init global identity domain before registering IOMMU */
2163 	amd_iommu_init_identity_domain();
2164 
2165 	for_each_iommu(iommu) {
2166 		ret = iommu_init_pci(iommu);
2167 		if (ret) {
2168 			pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
2169 			       iommu->index, ret);
2170 			goto out;
2171 		}
2172 		/* Need to setup range after PCI init */
2173 		iommu_set_cwwb_range(iommu);
2174 	}
2175 
2176 	/*
2177 	 * Order is important here to make sure any unity map requirements are
2178 	 * fulfilled. The unity mappings are created and written to the device
2179 	 * table during the iommu_init_pci() call.
2180 	 *
2181 	 * After that we call init_device_table_dma() to make sure any
2182 	 * uninitialized DTE will block DMA, and in the end we flush the caches
2183 	 * of all IOMMUs to make sure the changes to the device table are
2184 	 * active.
2185 	 */
2186 	for_each_pci_segment(pci_seg)
2187 		init_device_table_dma(pci_seg);
2188 
2189 	for_each_iommu(iommu)
2190 		amd_iommu_flush_all_caches(iommu);
2191 
2192 	print_iommu_info();
2193 
2194 out:
2195 	return ret;
2196 }
2197 
2198 /****************************************************************************
2199  *
2200  * The following functions initialize the MSI interrupts for all IOMMUs
2201  * in the system. It's a bit challenging because there could be multiple
2202  * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
2203  * pci_dev.
2204  *
2205  ****************************************************************************/
2206 
iommu_setup_msi(struct amd_iommu * iommu)2207 static int iommu_setup_msi(struct amd_iommu *iommu)
2208 {
2209 	int r;
2210 
2211 	r = pci_enable_msi(iommu->dev);
2212 	if (r)
2213 		return r;
2214 
2215 	r = request_threaded_irq(iommu->dev->irq,
2216 				 amd_iommu_int_handler,
2217 				 amd_iommu_int_thread,
2218 				 0, "AMD-Vi",
2219 				 iommu);
2220 
2221 	if (r) {
2222 		pci_disable_msi(iommu->dev);
2223 		return r;
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 union intcapxt {
2230 	u64	capxt;
2231 	struct {
2232 		u64	reserved_0		:  2,
2233 			dest_mode_logical	:  1,
2234 			reserved_1		:  5,
2235 			destid_0_23		: 24,
2236 			vector			:  8,
2237 			reserved_2		: 16,
2238 			destid_24_31		:  8;
2239 	};
2240 } __attribute__ ((packed));
2241 
2242 
2243 static struct irq_chip intcapxt_controller;
2244 
intcapxt_irqdomain_activate(struct irq_domain * domain,struct irq_data * irqd,bool reserve)2245 static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2246 				       struct irq_data *irqd, bool reserve)
2247 {
2248 	return 0;
2249 }
2250 
intcapxt_irqdomain_deactivate(struct irq_domain * domain,struct irq_data * irqd)2251 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2252 					  struct irq_data *irqd)
2253 {
2254 }
2255 
2256 
intcapxt_irqdomain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)2257 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2258 				    unsigned int nr_irqs, void *arg)
2259 {
2260 	struct irq_alloc_info *info = arg;
2261 	int i, ret;
2262 
2263 	if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2264 		return -EINVAL;
2265 
2266 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2267 	if (ret < 0)
2268 		return ret;
2269 
2270 	for (i = virq; i < virq + nr_irqs; i++) {
2271 		struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2272 
2273 		irqd->chip = &intcapxt_controller;
2274 		irqd->hwirq = info->hwirq;
2275 		irqd->chip_data = info->data;
2276 		__irq_set_handler(i, handle_edge_irq, 0, "edge");
2277 	}
2278 
2279 	return ret;
2280 }
2281 
intcapxt_irqdomain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)2282 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2283 				    unsigned int nr_irqs)
2284 {
2285 	irq_domain_free_irqs_top(domain, virq, nr_irqs);
2286 }
2287 
2288 
intcapxt_unmask_irq(struct irq_data * irqd)2289 static void intcapxt_unmask_irq(struct irq_data *irqd)
2290 {
2291 	struct amd_iommu *iommu = irqd->chip_data;
2292 	struct irq_cfg *cfg = irqd_cfg(irqd);
2293 	union intcapxt xt;
2294 
2295 	xt.capxt = 0ULL;
2296 	xt.dest_mode_logical = apic->dest_mode_logical;
2297 	xt.vector = cfg->vector;
2298 	xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2299 	xt.destid_24_31 = cfg->dest_apicid >> 24;
2300 
2301 	writeq(xt.capxt, iommu->mmio_base + irqd->hwirq);
2302 }
2303 
intcapxt_mask_irq(struct irq_data * irqd)2304 static void intcapxt_mask_irq(struct irq_data *irqd)
2305 {
2306 	struct amd_iommu *iommu = irqd->chip_data;
2307 
2308 	writeq(0, iommu->mmio_base + irqd->hwirq);
2309 }
2310 
2311 
intcapxt_set_affinity(struct irq_data * irqd,const struct cpumask * mask,bool force)2312 static int intcapxt_set_affinity(struct irq_data *irqd,
2313 				 const struct cpumask *mask, bool force)
2314 {
2315 	struct irq_data *parent = irqd->parent_data;
2316 	int ret;
2317 
2318 	ret = parent->chip->irq_set_affinity(parent, mask, force);
2319 	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2320 		return ret;
2321 	return 0;
2322 }
2323 
intcapxt_set_wake(struct irq_data * irqd,unsigned int on)2324 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
2325 {
2326 	return on ? -EOPNOTSUPP : 0;
2327 }
2328 
2329 static struct irq_chip intcapxt_controller = {
2330 	.name			= "IOMMU-MSI",
2331 	.irq_unmask		= intcapxt_unmask_irq,
2332 	.irq_mask		= intcapxt_mask_irq,
2333 	.irq_ack		= irq_chip_ack_parent,
2334 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
2335 	.irq_set_affinity       = intcapxt_set_affinity,
2336 	.irq_set_wake		= intcapxt_set_wake,
2337 	.flags			= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_MOVE_DEFERRED,
2338 };
2339 
2340 static const struct irq_domain_ops intcapxt_domain_ops = {
2341 	.alloc			= intcapxt_irqdomain_alloc,
2342 	.free			= intcapxt_irqdomain_free,
2343 	.activate		= intcapxt_irqdomain_activate,
2344 	.deactivate		= intcapxt_irqdomain_deactivate,
2345 };
2346 
2347 
2348 static struct irq_domain *iommu_irqdomain;
2349 
iommu_get_irqdomain(void)2350 static struct irq_domain *iommu_get_irqdomain(void)
2351 {
2352 	struct fwnode_handle *fn;
2353 
2354 	/* No need for locking here (yet) as the init is single-threaded */
2355 	if (iommu_irqdomain)
2356 		return iommu_irqdomain;
2357 
2358 	fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2359 	if (!fn)
2360 		return NULL;
2361 
2362 	iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2363 						      fn, &intcapxt_domain_ops,
2364 						      NULL);
2365 	if (!iommu_irqdomain)
2366 		irq_domain_free_fwnode(fn);
2367 
2368 	return iommu_irqdomain;
2369 }
2370 
__iommu_setup_intcapxt(struct amd_iommu * iommu,const char * devname,int hwirq,irq_handler_t thread_fn)2371 static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
2372 				  int hwirq, irq_handler_t thread_fn)
2373 {
2374 	struct irq_domain *domain;
2375 	struct irq_alloc_info info;
2376 	int irq, ret;
2377 	int node = dev_to_node(&iommu->dev->dev);
2378 
2379 	domain = iommu_get_irqdomain();
2380 	if (!domain)
2381 		return -ENXIO;
2382 
2383 	init_irq_alloc_info(&info, NULL);
2384 	info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2385 	info.data = iommu;
2386 	info.hwirq = hwirq;
2387 
2388 	irq = irq_domain_alloc_irqs(domain, 1, node, &info);
2389 	if (irq < 0) {
2390 		irq_domain_remove(domain);
2391 		return irq;
2392 	}
2393 
2394 	ret = request_threaded_irq(irq, amd_iommu_int_handler,
2395 				   thread_fn, 0, devname, iommu);
2396 	if (ret) {
2397 		irq_domain_free_irqs(irq, 1);
2398 		irq_domain_remove(domain);
2399 		return ret;
2400 	}
2401 
2402 	return 0;
2403 }
2404 
iommu_setup_intcapxt(struct amd_iommu * iommu)2405 static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2406 {
2407 	int ret;
2408 
2409 	snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name),
2410 		 "AMD-Vi%d-Evt", iommu->index);
2411 	ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name,
2412 				     MMIO_INTCAPXT_EVT_OFFSET,
2413 				     amd_iommu_int_thread_evtlog);
2414 	if (ret)
2415 		return ret;
2416 
2417 	snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name),
2418 		 "AMD-Vi%d-PPR", iommu->index);
2419 	ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name,
2420 				     MMIO_INTCAPXT_PPR_OFFSET,
2421 				     amd_iommu_int_thread_pprlog);
2422 	if (ret)
2423 		return ret;
2424 
2425 #ifdef CONFIG_IRQ_REMAP
2426 	snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name),
2427 		 "AMD-Vi%d-GA", iommu->index);
2428 	ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name,
2429 				     MMIO_INTCAPXT_GALOG_OFFSET,
2430 				     amd_iommu_int_thread_galog);
2431 #endif
2432 
2433 	return ret;
2434 }
2435 
iommu_init_irq(struct amd_iommu * iommu)2436 static int iommu_init_irq(struct amd_iommu *iommu)
2437 {
2438 	int ret;
2439 
2440 	if (iommu->int_enabled)
2441 		goto enable_faults;
2442 
2443 	if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2444 		ret = iommu_setup_intcapxt(iommu);
2445 	else if (iommu->dev->msi_cap)
2446 		ret = iommu_setup_msi(iommu);
2447 	else
2448 		ret = -ENODEV;
2449 
2450 	if (ret)
2451 		return ret;
2452 
2453 	iommu->int_enabled = true;
2454 enable_faults:
2455 
2456 	if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2457 		iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2458 
2459 	iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2460 
2461 	return 0;
2462 }
2463 
2464 /****************************************************************************
2465  *
2466  * The next functions belong to the third pass of parsing the ACPI
2467  * table. In this last pass the memory mapping requirements are
2468  * gathered (like exclusion and unity mapping ranges).
2469  *
2470  ****************************************************************************/
2471 
free_unity_maps(void)2472 static void __init free_unity_maps(void)
2473 {
2474 	struct unity_map_entry *entry, *next;
2475 	struct amd_iommu_pci_seg *p, *pci_seg;
2476 
2477 	for_each_pci_segment_safe(pci_seg, p) {
2478 		list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
2479 			list_del(&entry->list);
2480 			kfree(entry);
2481 		}
2482 	}
2483 }
2484 
2485 /* called for unity map ACPI definition */
init_unity_map_range(struct ivmd_header * m,struct acpi_table_header * ivrs_base)2486 static int __init init_unity_map_range(struct ivmd_header *m,
2487 				       struct acpi_table_header *ivrs_base)
2488 {
2489 	struct unity_map_entry *e = NULL;
2490 	struct amd_iommu_pci_seg *pci_seg;
2491 	char *s;
2492 
2493 	pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
2494 	if (pci_seg == NULL)
2495 		return -ENOMEM;
2496 
2497 	e = kzalloc(sizeof(*e), GFP_KERNEL);
2498 	if (e == NULL)
2499 		return -ENOMEM;
2500 
2501 	switch (m->type) {
2502 	default:
2503 		kfree(e);
2504 		return 0;
2505 	case ACPI_IVMD_TYPE:
2506 		s = "IVMD_TYPEi\t\t\t";
2507 		e->devid_start = e->devid_end = m->devid;
2508 		break;
2509 	case ACPI_IVMD_TYPE_ALL:
2510 		s = "IVMD_TYPE_ALL\t\t";
2511 		e->devid_start = 0;
2512 		e->devid_end = pci_seg->last_bdf;
2513 		break;
2514 	case ACPI_IVMD_TYPE_RANGE:
2515 		s = "IVMD_TYPE_RANGE\t\t";
2516 		e->devid_start = m->devid;
2517 		e->devid_end = m->aux;
2518 		break;
2519 	}
2520 	e->address_start = PAGE_ALIGN(m->range_start);
2521 	e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2522 	e->prot = m->flags >> 1;
2523 
2524 	/*
2525 	 * Treat per-device exclusion ranges as r/w unity-mapped regions
2526 	 * since some buggy BIOSes might lead to the overwritten exclusion
2527 	 * range (exclusion_start and exclusion_length members). This
2528 	 * happens when there are multiple exclusion ranges (IVMD entries)
2529 	 * defined in ACPI table.
2530 	 */
2531 	if (m->flags & IVMD_FLAG_EXCL_RANGE)
2532 		e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2533 
2534 	DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
2535 		    "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
2536 		    " flags: %x\n", s, m->pci_seg,
2537 		    PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2538 		    PCI_FUNC(e->devid_start), m->pci_seg,
2539 		    PCI_BUS_NUM(e->devid_end),
2540 		    PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2541 		    e->address_start, e->address_end, m->flags);
2542 
2543 	list_add_tail(&e->list, &pci_seg->unity_map);
2544 
2545 	return 0;
2546 }
2547 
2548 /* iterates over all memory definitions we find in the ACPI table */
init_memory_definitions(struct acpi_table_header * table)2549 static int __init init_memory_definitions(struct acpi_table_header *table)
2550 {
2551 	u8 *p = (u8 *)table, *end = (u8 *)table;
2552 	struct ivmd_header *m;
2553 
2554 	end += table->length;
2555 	p += IVRS_HEADER_LENGTH;
2556 
2557 	while (p < end) {
2558 		m = (struct ivmd_header *)p;
2559 		if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2560 			init_unity_map_range(m, table);
2561 
2562 		p += m->length;
2563 	}
2564 
2565 	return 0;
2566 }
2567 
2568 /*
2569  * Init the device table to not allow DMA access for devices
2570  */
init_device_table_dma(struct amd_iommu_pci_seg * pci_seg)2571 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2572 {
2573 	u32 devid;
2574 	struct dev_table_entry *dev_table = pci_seg->dev_table;
2575 
2576 	if (dev_table == NULL)
2577 		return;
2578 
2579 	for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2580 		set_dte_bit(&dev_table[devid], DEV_ENTRY_VALID);
2581 		if (!amd_iommu_snp_en)
2582 			set_dte_bit(&dev_table[devid], DEV_ENTRY_TRANSLATION);
2583 	}
2584 }
2585 
uninit_device_table_dma(struct amd_iommu_pci_seg * pci_seg)2586 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2587 {
2588 	u32 devid;
2589 	struct dev_table_entry *dev_table = pci_seg->dev_table;
2590 
2591 	if (dev_table == NULL)
2592 		return;
2593 
2594 	for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2595 		dev_table[devid].data[0] = 0ULL;
2596 		dev_table[devid].data[1] = 0ULL;
2597 	}
2598 }
2599 
init_device_table(void)2600 static void init_device_table(void)
2601 {
2602 	struct amd_iommu_pci_seg *pci_seg;
2603 	u32 devid;
2604 
2605 	if (!amd_iommu_irq_remap)
2606 		return;
2607 
2608 	for_each_pci_segment(pci_seg) {
2609 		for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
2610 			set_dte_bit(&pci_seg->dev_table[devid], DEV_ENTRY_IRQ_TBL_EN);
2611 	}
2612 }
2613 
iommu_init_flags(struct amd_iommu * iommu)2614 static void iommu_init_flags(struct amd_iommu *iommu)
2615 {
2616 	iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2617 		iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2618 		iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2619 
2620 	iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2621 		iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2622 		iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2623 
2624 	iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2625 		iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2626 		iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2627 
2628 	iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2629 		iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2630 		iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2631 
2632 	/*
2633 	 * make IOMMU memory accesses cache coherent
2634 	 */
2635 	iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2636 
2637 	/* Set IOTLB invalidation timeout to 1s */
2638 	iommu_feature_set(iommu, CTRL_INV_TO_1S, CTRL_INV_TO_MASK, CONTROL_INV_TIMEOUT);
2639 
2640 	/* Enable Enhanced Peripheral Page Request Handling */
2641 	if (check_feature(FEATURE_EPHSUP))
2642 		iommu_feature_enable(iommu, CONTROL_EPH_EN);
2643 }
2644 
iommu_apply_resume_quirks(struct amd_iommu * iommu)2645 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2646 {
2647 	int i, j;
2648 	u32 ioc_feature_control;
2649 	struct pci_dev *pdev = iommu->root_pdev;
2650 
2651 	/* RD890 BIOSes may not have completely reconfigured the iommu */
2652 	if (!is_rd890_iommu(iommu->dev) || !pdev)
2653 		return;
2654 
2655 	/*
2656 	 * First, we need to ensure that the iommu is enabled. This is
2657 	 * controlled by a register in the northbridge
2658 	 */
2659 
2660 	/* Select Northbridge indirect register 0x75 and enable writing */
2661 	pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2662 	pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2663 
2664 	/* Enable the iommu */
2665 	if (!(ioc_feature_control & 0x1))
2666 		pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2667 
2668 	/* Restore the iommu BAR */
2669 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2670 			       iommu->stored_addr_lo);
2671 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2672 			       iommu->stored_addr_hi);
2673 
2674 	/* Restore the l1 indirect regs for each of the 6 l1s */
2675 	for (i = 0; i < 6; i++)
2676 		for (j = 0; j < 0x12; j++)
2677 			iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2678 
2679 	/* Restore the l2 indirect regs */
2680 	for (i = 0; i < 0x83; i++)
2681 		iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2682 
2683 	/* Lock PCI setup registers */
2684 	pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2685 			       iommu->stored_addr_lo | 1);
2686 }
2687 
iommu_enable_ga(struct amd_iommu * iommu)2688 static void iommu_enable_ga(struct amd_iommu *iommu)
2689 {
2690 #ifdef CONFIG_IRQ_REMAP
2691 	switch (amd_iommu_guest_ir) {
2692 	case AMD_IOMMU_GUEST_IR_VAPIC:
2693 	case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2694 		iommu_feature_enable(iommu, CONTROL_GA_EN);
2695 		iommu->irte_ops = &irte_128_ops;
2696 		break;
2697 	default:
2698 		iommu->irte_ops = &irte_32_ops;
2699 		break;
2700 	}
2701 #endif
2702 }
2703 
iommu_disable_irtcachedis(struct amd_iommu * iommu)2704 static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
2705 {
2706 	iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
2707 }
2708 
iommu_enable_irtcachedis(struct amd_iommu * iommu)2709 static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
2710 {
2711 	u64 ctrl;
2712 
2713 	if (!amd_iommu_irtcachedis)
2714 		return;
2715 
2716 	/*
2717 	 * Note:
2718 	 * The support for IRTCacheDis feature is dertermined by
2719 	 * checking if the bit is writable.
2720 	 */
2721 	iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
2722 	ctrl = readq(iommu->mmio_base +  MMIO_CONTROL_OFFSET);
2723 	ctrl &= (1ULL << CONTROL_IRTCACHEDIS);
2724 	if (ctrl)
2725 		iommu->irtcachedis_enabled = true;
2726 	pr_info("iommu%d (%#06x) : IRT cache is %s\n",
2727 		iommu->index, iommu->devid,
2728 		iommu->irtcachedis_enabled ? "disabled" : "enabled");
2729 }
2730 
iommu_enable_2k_int(struct amd_iommu * iommu)2731 static void iommu_enable_2k_int(struct amd_iommu *iommu)
2732 {
2733 	if (!FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
2734 		return;
2735 
2736 	iommu_feature_set(iommu,
2737 			  CONTROL_NUM_INT_REMAP_MODE_2K,
2738 			  CONTROL_NUM_INT_REMAP_MODE_MASK,
2739 			  CONTROL_NUM_INT_REMAP_MODE);
2740 }
2741 
early_enable_iommu(struct amd_iommu * iommu)2742 static void early_enable_iommu(struct amd_iommu *iommu)
2743 {
2744 	iommu_disable(iommu);
2745 	iommu_init_flags(iommu);
2746 	iommu_set_device_table(iommu);
2747 	iommu_enable_command_buffer(iommu);
2748 	iommu_enable_event_buffer(iommu);
2749 	iommu_set_exclusion_range(iommu);
2750 	iommu_enable_gt(iommu);
2751 	iommu_enable_ga(iommu);
2752 	iommu_enable_xt(iommu);
2753 	iommu_enable_irtcachedis(iommu);
2754 	iommu_enable_2k_int(iommu);
2755 	iommu_enable(iommu);
2756 	amd_iommu_flush_all_caches(iommu);
2757 }
2758 
2759 /*
2760  * This function finally enables all IOMMUs found in the system after
2761  * they have been initialized.
2762  *
2763  * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2764  * the old content of device table entries. Not this case or copy failed,
2765  * just continue as normal kernel does.
2766  */
early_enable_iommus(void)2767 static void early_enable_iommus(void)
2768 {
2769 	struct amd_iommu *iommu;
2770 	struct amd_iommu_pci_seg *pci_seg;
2771 
2772 	if (!copy_device_table()) {
2773 		/*
2774 		 * If come here because of failure in copying device table from old
2775 		 * kernel with all IOMMUs enabled, print error message and try to
2776 		 * free allocated old_dev_tbl_cpy.
2777 		 */
2778 		if (amd_iommu_pre_enabled)
2779 			pr_err("Failed to copy DEV table from previous kernel.\n");
2780 
2781 		for_each_pci_segment(pci_seg) {
2782 			if (pci_seg->old_dev_tbl_cpy != NULL) {
2783 				iommu_free_pages(pci_seg->old_dev_tbl_cpy);
2784 				pci_seg->old_dev_tbl_cpy = NULL;
2785 			}
2786 		}
2787 
2788 		for_each_iommu(iommu) {
2789 			clear_translation_pre_enabled(iommu);
2790 			early_enable_iommu(iommu);
2791 		}
2792 	} else {
2793 		pr_info("Copied DEV table from previous kernel.\n");
2794 
2795 		for_each_pci_segment(pci_seg) {
2796 			iommu_free_pages(pci_seg->dev_table);
2797 			pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
2798 		}
2799 
2800 		for_each_iommu(iommu) {
2801 			iommu_disable_command_buffer(iommu);
2802 			iommu_disable_event_buffer(iommu);
2803 			iommu_disable_irtcachedis(iommu);
2804 			iommu_enable_command_buffer(iommu);
2805 			iommu_enable_event_buffer(iommu);
2806 			iommu_enable_ga(iommu);
2807 			iommu_enable_xt(iommu);
2808 			iommu_enable_irtcachedis(iommu);
2809 			iommu_enable_2k_int(iommu);
2810 			iommu_set_device_table(iommu);
2811 			amd_iommu_flush_all_caches(iommu);
2812 		}
2813 	}
2814 }
2815 
enable_iommus_ppr(void)2816 static void enable_iommus_ppr(void)
2817 {
2818 	struct amd_iommu *iommu;
2819 
2820 	if (!amd_iommu_gt_ppr_supported())
2821 		return;
2822 
2823 	for_each_iommu(iommu)
2824 		amd_iommu_enable_ppr_log(iommu);
2825 }
2826 
enable_iommus_vapic(void)2827 static void enable_iommus_vapic(void)
2828 {
2829 #ifdef CONFIG_IRQ_REMAP
2830 	u32 status, i;
2831 	struct amd_iommu *iommu;
2832 
2833 	for_each_iommu(iommu) {
2834 		/*
2835 		 * Disable GALog if already running. It could have been enabled
2836 		 * in the previous boot before kdump.
2837 		 */
2838 		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2839 		if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2840 			continue;
2841 
2842 		iommu_feature_disable(iommu, CONTROL_GALOG_EN);
2843 		iommu_feature_disable(iommu, CONTROL_GAINT_EN);
2844 
2845 		/*
2846 		 * Need to set and poll check the GALOGRun bit to zero before
2847 		 * we can set/ modify GA Log registers safely.
2848 		 */
2849 		for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
2850 			status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2851 			if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2852 				break;
2853 			udelay(10);
2854 		}
2855 
2856 		if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
2857 			return;
2858 	}
2859 
2860 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2861 	    !check_feature(FEATURE_GAM_VAPIC)) {
2862 		amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2863 		return;
2864 	}
2865 
2866 	if (amd_iommu_snp_en &&
2867 	    !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
2868 		pr_warn("Force to disable Virtual APIC due to SNP\n");
2869 		amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2870 		return;
2871 	}
2872 
2873 	/* Enabling GAM and SNPAVIC support */
2874 	for_each_iommu(iommu) {
2875 		if (iommu_init_ga_log(iommu) ||
2876 		    iommu_ga_log_enable(iommu))
2877 			return;
2878 
2879 		iommu_feature_enable(iommu, CONTROL_GAM_EN);
2880 		if (amd_iommu_snp_en)
2881 			iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
2882 	}
2883 
2884 	amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2885 	pr_info("Virtual APIC enabled\n");
2886 #endif
2887 }
2888 
disable_iommus(void)2889 static void disable_iommus(void)
2890 {
2891 	struct amd_iommu *iommu;
2892 
2893 	for_each_iommu(iommu)
2894 		iommu_disable(iommu);
2895 
2896 #ifdef CONFIG_IRQ_REMAP
2897 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2898 		amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2899 #endif
2900 }
2901 
2902 /*
2903  * Suspend/Resume support
2904  * disable suspend until real resume implemented
2905  */
2906 
amd_iommu_resume(void)2907 static void amd_iommu_resume(void)
2908 {
2909 	struct amd_iommu *iommu;
2910 
2911 	for_each_iommu(iommu)
2912 		iommu_apply_resume_quirks(iommu);
2913 
2914 	/* re-load the hardware */
2915 	for_each_iommu(iommu)
2916 		early_enable_iommu(iommu);
2917 
2918 	amd_iommu_enable_interrupts();
2919 }
2920 
amd_iommu_suspend(void)2921 static int amd_iommu_suspend(void)
2922 {
2923 	/* disable IOMMUs to go out of the way for BIOS */
2924 	disable_iommus();
2925 
2926 	return 0;
2927 }
2928 
2929 static struct syscore_ops amd_iommu_syscore_ops = {
2930 	.suspend = amd_iommu_suspend,
2931 	.resume = amd_iommu_resume,
2932 };
2933 
free_iommu_resources(void)2934 static void __init free_iommu_resources(void)
2935 {
2936 	free_iommu_all();
2937 	free_pci_segments();
2938 }
2939 
2940 /* SB IOAPIC is always on this device in AMD systems */
2941 #define IOAPIC_SB_DEVID		((0x00 << 8) | PCI_DEVFN(0x14, 0))
2942 
check_ioapic_information(void)2943 static bool __init check_ioapic_information(void)
2944 {
2945 	const char *fw_bug = FW_BUG;
2946 	bool ret, has_sb_ioapic;
2947 	int idx;
2948 
2949 	has_sb_ioapic = false;
2950 	ret           = false;
2951 
2952 	/*
2953 	 * If we have map overrides on the kernel command line the
2954 	 * messages in this function might not describe firmware bugs
2955 	 * anymore - so be careful
2956 	 */
2957 	if (cmdline_maps)
2958 		fw_bug = "";
2959 
2960 	for (idx = 0; idx < nr_ioapics; idx++) {
2961 		int devid, id = mpc_ioapic_id(idx);
2962 
2963 		devid = get_ioapic_devid(id);
2964 		if (devid < 0) {
2965 			pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2966 				fw_bug, id);
2967 			ret = false;
2968 		} else if (devid == IOAPIC_SB_DEVID) {
2969 			has_sb_ioapic = true;
2970 			ret           = true;
2971 		}
2972 	}
2973 
2974 	if (!has_sb_ioapic) {
2975 		/*
2976 		 * We expect the SB IOAPIC to be listed in the IVRS
2977 		 * table. The system timer is connected to the SB IOAPIC
2978 		 * and if we don't have it in the list the system will
2979 		 * panic at boot time.  This situation usually happens
2980 		 * when the BIOS is buggy and provides us the wrong
2981 		 * device id for the IOAPIC in the system.
2982 		 */
2983 		pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2984 	}
2985 
2986 	if (!ret)
2987 		pr_err("Disabling interrupt remapping\n");
2988 
2989 	return ret;
2990 }
2991 
free_dma_resources(void)2992 static void __init free_dma_resources(void)
2993 {
2994 	ida_destroy(&pdom_ids);
2995 
2996 	free_unity_maps();
2997 }
2998 
ivinfo_init(void * ivrs)2999 static void __init ivinfo_init(void *ivrs)
3000 {
3001 	amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
3002 }
3003 
3004 /*
3005  * This is the hardware init function for AMD IOMMU in the system.
3006  * This function is called either from amd_iommu_init or from the interrupt
3007  * remapping setup code.
3008  *
3009  * This function basically parses the ACPI table for AMD IOMMU (IVRS)
3010  * four times:
3011  *
3012  *	1 pass) Discover the most comprehensive IVHD type to use.
3013  *
3014  *	2 pass) Find the highest PCI device id the driver has to handle.
3015  *		Upon this information the size of the data structures is
3016  *		determined that needs to be allocated.
3017  *
3018  *	3 pass) Initialize the data structures just allocated with the
3019  *		information in the ACPI table about available AMD IOMMUs
3020  *		in the system. It also maps the PCI devices in the
3021  *		system to specific IOMMUs
3022  *
3023  *	4 pass) After the basic data structures are allocated and
3024  *		initialized we update them with information about memory
3025  *		remapping requirements parsed out of the ACPI table in
3026  *		this last pass.
3027  *
3028  * After everything is set up the IOMMUs are enabled and the necessary
3029  * hotplug and suspend notifiers are registered.
3030  */
early_amd_iommu_init(void)3031 static int __init early_amd_iommu_init(void)
3032 {
3033 	struct acpi_table_header *ivrs_base;
3034 	int ret;
3035 	acpi_status status;
3036 
3037 	if (!amd_iommu_detected)
3038 		return -ENODEV;
3039 
3040 	status = acpi_get_table("IVRS", 0, &ivrs_base);
3041 	if (status == AE_NOT_FOUND)
3042 		return -ENODEV;
3043 	else if (ACPI_FAILURE(status)) {
3044 		const char *err = acpi_format_exception(status);
3045 		pr_err("IVRS table error: %s\n", err);
3046 		return -EINVAL;
3047 	}
3048 
3049 	if (!boot_cpu_has(X86_FEATURE_CX16)) {
3050 		pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
3051 		return -EINVAL;
3052 	}
3053 
3054 	/*
3055 	 * Validate checksum here so we don't need to do it when
3056 	 * we actually parse the table
3057 	 */
3058 	ret = check_ivrs_checksum(ivrs_base);
3059 	if (ret)
3060 		goto out;
3061 
3062 	ivinfo_init(ivrs_base);
3063 
3064 	amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
3065 	DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
3066 
3067 	/*
3068 	 * now the data structures are allocated and basically initialized
3069 	 * start the real acpi table scan
3070 	 */
3071 	ret = init_iommu_all(ivrs_base);
3072 	if (ret)
3073 		goto out;
3074 
3075 	/* 5 level guest page table */
3076 	if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3077 	    FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
3078 		amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
3079 
3080 	if (amd_iommu_pgtable == PD_MODE_V2) {
3081 		if (!amd_iommu_v2_pgtbl_supported()) {
3082 			pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
3083 			amd_iommu_pgtable = PD_MODE_V1;
3084 		}
3085 	}
3086 
3087 	/* Disable any previously enabled IOMMUs */
3088 	if (!is_kdump_kernel() || amd_iommu_disabled)
3089 		disable_iommus();
3090 
3091 	if (amd_iommu_irq_remap)
3092 		amd_iommu_irq_remap = check_ioapic_information();
3093 
3094 	if (amd_iommu_irq_remap) {
3095 		struct amd_iommu_pci_seg *pci_seg;
3096 		ret = -ENOMEM;
3097 		for_each_pci_segment(pci_seg) {
3098 			if (alloc_irq_lookup_table(pci_seg))
3099 				goto out;
3100 		}
3101 	}
3102 
3103 	ret = init_memory_definitions(ivrs_base);
3104 	if (ret)
3105 		goto out;
3106 
3107 	/* init the device table */
3108 	init_device_table();
3109 
3110 out:
3111 	/* Don't leak any ACPI memory */
3112 	acpi_put_table(ivrs_base);
3113 
3114 	return ret;
3115 }
3116 
amd_iommu_enable_interrupts(void)3117 static int amd_iommu_enable_interrupts(void)
3118 {
3119 	struct amd_iommu *iommu;
3120 	int ret = 0;
3121 
3122 	for_each_iommu(iommu) {
3123 		ret = iommu_init_irq(iommu);
3124 		if (ret)
3125 			goto out;
3126 	}
3127 
3128 	/*
3129 	 * Interrupt handler is ready to process interrupts. Enable
3130 	 * PPR and GA log interrupt for all IOMMUs.
3131 	 */
3132 	enable_iommus_vapic();
3133 	enable_iommus_ppr();
3134 
3135 out:
3136 	return ret;
3137 }
3138 
detect_ivrs(void)3139 static bool __init detect_ivrs(void)
3140 {
3141 	struct acpi_table_header *ivrs_base;
3142 	acpi_status status;
3143 	int i;
3144 
3145 	status = acpi_get_table("IVRS", 0, &ivrs_base);
3146 	if (status == AE_NOT_FOUND)
3147 		return false;
3148 	else if (ACPI_FAILURE(status)) {
3149 		const char *err = acpi_format_exception(status);
3150 		pr_err("IVRS table error: %s\n", err);
3151 		return false;
3152 	}
3153 
3154 	acpi_put_table(ivrs_base);
3155 
3156 	if (amd_iommu_force_enable)
3157 		goto out;
3158 
3159 	/* Don't use IOMMU if there is Stoney Ridge graphics */
3160 	for (i = 0; i < 32; i++) {
3161 		u32 pci_id;
3162 
3163 		pci_id = read_pci_config(0, i, 0, 0);
3164 		if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
3165 			pr_info("Disable IOMMU on Stoney Ridge\n");
3166 			return false;
3167 		}
3168 	}
3169 
3170 out:
3171 	/* Make sure ACS will be enabled during PCI probe */
3172 	pci_request_acs();
3173 
3174 	return true;
3175 }
3176 
iommu_snp_enable(void)3177 static __init void iommu_snp_enable(void)
3178 {
3179 #ifdef CONFIG_KVM_AMD_SEV
3180 	if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
3181 		return;
3182 	/*
3183 	 * The SNP support requires that IOMMU must be enabled, and is
3184 	 * configured with V1 page table (DTE[Mode] = 0 is not supported).
3185 	 */
3186 	if (no_iommu || iommu_default_passthrough()) {
3187 		pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
3188 		goto disable_snp;
3189 	}
3190 
3191 	if (amd_iommu_pgtable != PD_MODE_V1) {
3192 		pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
3193 		goto disable_snp;
3194 	}
3195 
3196 	amd_iommu_snp_en = check_feature(FEATURE_SNP);
3197 	if (!amd_iommu_snp_en) {
3198 		pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
3199 		goto disable_snp;
3200 	}
3201 
3202 	/*
3203 	 * Enable host SNP support once SNP support is checked on IOMMU.
3204 	 */
3205 	if (snp_rmptable_init()) {
3206 		pr_warn("SNP: RMP initialization failed, SNP cannot be supported.\n");
3207 		goto disable_snp;
3208 	}
3209 
3210 	pr_info("IOMMU SNP support enabled.\n");
3211 	return;
3212 
3213 disable_snp:
3214 	cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
3215 #endif
3216 }
3217 
3218 /****************************************************************************
3219  *
3220  * AMD IOMMU Initialization State Machine
3221  *
3222  ****************************************************************************/
3223 
state_next(void)3224 static int __init state_next(void)
3225 {
3226 	int ret = 0;
3227 
3228 	switch (init_state) {
3229 	case IOMMU_START_STATE:
3230 		if (!detect_ivrs()) {
3231 			init_state	= IOMMU_NOT_FOUND;
3232 			ret		= -ENODEV;
3233 		} else {
3234 			init_state	= IOMMU_IVRS_DETECTED;
3235 		}
3236 		break;
3237 	case IOMMU_IVRS_DETECTED:
3238 		if (amd_iommu_disabled) {
3239 			init_state = IOMMU_CMDLINE_DISABLED;
3240 			ret = -EINVAL;
3241 		} else {
3242 			ret = early_amd_iommu_init();
3243 			init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
3244 		}
3245 		break;
3246 	case IOMMU_ACPI_FINISHED:
3247 		early_enable_iommus();
3248 		x86_platform.iommu_shutdown = disable_iommus;
3249 		init_state = IOMMU_ENABLED;
3250 		break;
3251 	case IOMMU_ENABLED:
3252 		register_syscore_ops(&amd_iommu_syscore_ops);
3253 		iommu_snp_enable();
3254 		ret = amd_iommu_init_pci();
3255 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
3256 		break;
3257 	case IOMMU_PCI_INIT:
3258 		ret = amd_iommu_enable_interrupts();
3259 		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
3260 		break;
3261 	case IOMMU_INTERRUPTS_EN:
3262 		init_state = IOMMU_INITIALIZED;
3263 		break;
3264 	case IOMMU_INITIALIZED:
3265 		/* Nothing to do */
3266 		break;
3267 	case IOMMU_NOT_FOUND:
3268 	case IOMMU_INIT_ERROR:
3269 	case IOMMU_CMDLINE_DISABLED:
3270 		/* Error states => do nothing */
3271 		ret = -EINVAL;
3272 		break;
3273 	default:
3274 		/* Unknown state */
3275 		BUG();
3276 	}
3277 
3278 	if (ret) {
3279 		free_dma_resources();
3280 		if (!irq_remapping_enabled) {
3281 			disable_iommus();
3282 			free_iommu_resources();
3283 		} else {
3284 			struct amd_iommu *iommu;
3285 			struct amd_iommu_pci_seg *pci_seg;
3286 
3287 			for_each_pci_segment(pci_seg)
3288 				uninit_device_table_dma(pci_seg);
3289 
3290 			for_each_iommu(iommu)
3291 				amd_iommu_flush_all_caches(iommu);
3292 		}
3293 	}
3294 	return ret;
3295 }
3296 
iommu_go_to_state(enum iommu_init_state state)3297 static int __init iommu_go_to_state(enum iommu_init_state state)
3298 {
3299 	int ret = -EINVAL;
3300 
3301 	while (init_state != state) {
3302 		if (init_state == IOMMU_NOT_FOUND         ||
3303 		    init_state == IOMMU_INIT_ERROR        ||
3304 		    init_state == IOMMU_CMDLINE_DISABLED)
3305 			break;
3306 		ret = state_next();
3307 	}
3308 
3309 	/*
3310 	 * SNP platform initilazation requires IOMMUs to be fully configured.
3311 	 * If the SNP support on IOMMUs has NOT been checked, simply mark SNP
3312 	 * as unsupported. If the SNP support on IOMMUs has been checked and
3313 	 * host SNP support enabled but RMP enforcement has not been enabled
3314 	 * in IOMMUs, then the system is in a half-baked state, but can limp
3315 	 * along as all memory should be Hypervisor-Owned in the RMP. WARN,
3316 	 * but leave SNP as "supported" to avoid confusing the kernel.
3317 	 */
3318 	if (ret && cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
3319 	    !WARN_ON_ONCE(amd_iommu_snp_en))
3320 		cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
3321 
3322 	return ret;
3323 }
3324 
3325 #ifdef CONFIG_IRQ_REMAP
amd_iommu_prepare(void)3326 int __init amd_iommu_prepare(void)
3327 {
3328 	int ret;
3329 
3330 	amd_iommu_irq_remap = true;
3331 
3332 	ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
3333 	if (ret) {
3334 		amd_iommu_irq_remap = false;
3335 		return ret;
3336 	}
3337 
3338 	return amd_iommu_irq_remap ? 0 : -ENODEV;
3339 }
3340 
amd_iommu_enable(void)3341 int __init amd_iommu_enable(void)
3342 {
3343 	int ret;
3344 
3345 	ret = iommu_go_to_state(IOMMU_ENABLED);
3346 	if (ret)
3347 		return ret;
3348 
3349 	irq_remapping_enabled = 1;
3350 	return amd_iommu_xt_mode;
3351 }
3352 
amd_iommu_disable(void)3353 void amd_iommu_disable(void)
3354 {
3355 	amd_iommu_suspend();
3356 }
3357 
amd_iommu_reenable(int mode)3358 int amd_iommu_reenable(int mode)
3359 {
3360 	amd_iommu_resume();
3361 
3362 	return 0;
3363 }
3364 
amd_iommu_enable_faulting(unsigned int cpu)3365 int amd_iommu_enable_faulting(unsigned int cpu)
3366 {
3367 	/* We enable MSI later when PCI is initialized */
3368 	return 0;
3369 }
3370 #endif
3371 
3372 /*
3373  * This is the core init function for AMD IOMMU hardware in the system.
3374  * This function is called from the generic x86 DMA layer initialization
3375  * code.
3376  */
amd_iommu_init(void)3377 static int __init amd_iommu_init(void)
3378 {
3379 	struct amd_iommu *iommu;
3380 	int ret;
3381 
3382 	ret = iommu_go_to_state(IOMMU_INITIALIZED);
3383 #ifdef CONFIG_GART_IOMMU
3384 	if (ret && list_empty(&amd_iommu_list)) {
3385 		/*
3386 		 * We failed to initialize the AMD IOMMU - try fallback
3387 		 * to GART if possible.
3388 		 */
3389 		gart_iommu_init();
3390 	}
3391 #endif
3392 
3393 	for_each_iommu(iommu)
3394 		amd_iommu_debugfs_setup(iommu);
3395 
3396 	return ret;
3397 }
3398 
amd_iommu_sme_check(void)3399 static bool amd_iommu_sme_check(void)
3400 {
3401 	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
3402 	    (boot_cpu_data.x86 != 0x17))
3403 		return true;
3404 
3405 	/* For Fam17h, a specific level of support is required */
3406 	if (boot_cpu_data.microcode >= 0x08001205)
3407 		return true;
3408 
3409 	if ((boot_cpu_data.microcode >= 0x08001126) &&
3410 	    (boot_cpu_data.microcode <= 0x080011ff))
3411 		return true;
3412 
3413 	pr_notice("IOMMU not currently supported when SME is active\n");
3414 
3415 	return false;
3416 }
3417 
3418 /****************************************************************************
3419  *
3420  * Early detect code. This code runs at IOMMU detection time in the DMA
3421  * layer. It just looks if there is an IVRS ACPI table to detect AMD
3422  * IOMMUs
3423  *
3424  ****************************************************************************/
amd_iommu_detect(void)3425 void __init amd_iommu_detect(void)
3426 {
3427 	int ret;
3428 
3429 	if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3430 		goto disable_snp;
3431 
3432 	if (!amd_iommu_sme_check())
3433 		goto disable_snp;
3434 
3435 	ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3436 	if (ret)
3437 		goto disable_snp;
3438 
3439 	amd_iommu_detected = true;
3440 	iommu_detected = 1;
3441 	x86_init.iommu.iommu_init = amd_iommu_init;
3442 	return;
3443 
3444 disable_snp:
3445 	if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
3446 		cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
3447 }
3448 
3449 /****************************************************************************
3450  *
3451  * Parsing functions for the AMD IOMMU specific kernel command line
3452  * options.
3453  *
3454  ****************************************************************************/
3455 
parse_amd_iommu_dump(char * str)3456 static int __init parse_amd_iommu_dump(char *str)
3457 {
3458 	amd_iommu_dump = true;
3459 
3460 	return 1;
3461 }
3462 
parse_amd_iommu_intr(char * str)3463 static int __init parse_amd_iommu_intr(char *str)
3464 {
3465 	for (; *str; ++str) {
3466 		if (strncmp(str, "legacy", 6) == 0) {
3467 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3468 			break;
3469 		}
3470 		if (strncmp(str, "vapic", 5) == 0) {
3471 			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3472 			break;
3473 		}
3474 	}
3475 	return 1;
3476 }
3477 
parse_amd_iommu_options(char * str)3478 static int __init parse_amd_iommu_options(char *str)
3479 {
3480 	if (!str)
3481 		return -EINVAL;
3482 
3483 	while (*str) {
3484 		if (strncmp(str, "fullflush", 9) == 0) {
3485 			pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
3486 			iommu_set_dma_strict();
3487 		} else if (strncmp(str, "force_enable", 12) == 0) {
3488 			amd_iommu_force_enable = true;
3489 		} else if (strncmp(str, "off", 3) == 0) {
3490 			amd_iommu_disabled = true;
3491 		} else if (strncmp(str, "force_isolation", 15) == 0) {
3492 			amd_iommu_force_isolation = true;
3493 		} else if (strncmp(str, "pgtbl_v1", 8) == 0) {
3494 			amd_iommu_pgtable = PD_MODE_V1;
3495 		} else if (strncmp(str, "pgtbl_v2", 8) == 0) {
3496 			amd_iommu_pgtable = PD_MODE_V2;
3497 		} else if (strncmp(str, "irtcachedis", 11) == 0) {
3498 			amd_iommu_irtcachedis = true;
3499 		} else if (strncmp(str, "nohugepages", 11) == 0) {
3500 			pr_info("Restricting V1 page-sizes to 4KiB");
3501 			amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_4K;
3502 		} else if (strncmp(str, "v2_pgsizes_only", 15) == 0) {
3503 			pr_info("Restricting V1 page-sizes to 4KiB/2MiB/1GiB");
3504 			amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
3505 		} else {
3506 			pr_notice("Unknown option - '%s'\n", str);
3507 		}
3508 
3509 		str += strcspn(str, ",");
3510 		while (*str == ',')
3511 			str++;
3512 	}
3513 
3514 	return 1;
3515 }
3516 
parse_ivrs_ioapic(char * str)3517 static int __init parse_ivrs_ioapic(char *str)
3518 {
3519 	u32 seg = 0, bus, dev, fn;
3520 	int id, i;
3521 	u32 devid;
3522 
3523 	if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3524 	    sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3525 		goto found;
3526 
3527 	if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3528 	    sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3529 		pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
3530 			str, id, seg, bus, dev, fn);
3531 		goto found;
3532 	}
3533 
3534 	pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3535 	return 1;
3536 
3537 found:
3538 	if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3539 		pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3540 			str);
3541 		return 1;
3542 	}
3543 
3544 	devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3545 
3546 	cmdline_maps			= true;
3547 	i				= early_ioapic_map_size++;
3548 	early_ioapic_map[i].id		= id;
3549 	early_ioapic_map[i].devid	= devid;
3550 	early_ioapic_map[i].cmd_line	= true;
3551 
3552 	return 1;
3553 }
3554 
parse_ivrs_hpet(char * str)3555 static int __init parse_ivrs_hpet(char *str)
3556 {
3557 	u32 seg = 0, bus, dev, fn;
3558 	int id, i;
3559 	u32 devid;
3560 
3561 	if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3562 	    sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3563 		goto found;
3564 
3565 	if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3566 	    sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3567 		pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
3568 			str, id, seg, bus, dev, fn);
3569 		goto found;
3570 	}
3571 
3572 	pr_err("Invalid command line: ivrs_hpet%s\n", str);
3573 	return 1;
3574 
3575 found:
3576 	if (early_hpet_map_size == EARLY_MAP_SIZE) {
3577 		pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3578 			str);
3579 		return 1;
3580 	}
3581 
3582 	devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3583 
3584 	cmdline_maps			= true;
3585 	i				= early_hpet_map_size++;
3586 	early_hpet_map[i].id		= id;
3587 	early_hpet_map[i].devid		= devid;
3588 	early_hpet_map[i].cmd_line	= true;
3589 
3590 	return 1;
3591 }
3592 
3593 #define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
3594 
parse_ivrs_acpihid(char * str)3595 static int __init parse_ivrs_acpihid(char *str)
3596 {
3597 	u32 seg = 0, bus, dev, fn;
3598 	char *hid, *uid, *p, *addr;
3599 	char acpiid[ACPIID_LEN] = {0};
3600 	int i;
3601 
3602 	addr = strchr(str, '@');
3603 	if (!addr) {
3604 		addr = strchr(str, '=');
3605 		if (!addr)
3606 			goto not_found;
3607 
3608 		++addr;
3609 
3610 		if (strlen(addr) > ACPIID_LEN)
3611 			goto not_found;
3612 
3613 		if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
3614 		    sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
3615 			pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
3616 				str, acpiid, seg, bus, dev, fn);
3617 			goto found;
3618 		}
3619 		goto not_found;
3620 	}
3621 
3622 	/* We have the '@', make it the terminator to get just the acpiid */
3623 	*addr++ = 0;
3624 
3625 	if (strlen(str) > ACPIID_LEN + 1)
3626 		goto not_found;
3627 
3628 	if (sscanf(str, "=%s", acpiid) != 1)
3629 		goto not_found;
3630 
3631 	if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
3632 	    sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
3633 		goto found;
3634 
3635 not_found:
3636 	pr_err("Invalid command line: ivrs_acpihid%s\n", str);
3637 	return 1;
3638 
3639 found:
3640 	p = acpiid;
3641 	hid = strsep(&p, ":");
3642 	uid = p;
3643 
3644 	if (!hid || !(*hid) || !uid) {
3645 		pr_err("Invalid command line: hid or uid\n");
3646 		return 1;
3647 	}
3648 
3649 	/*
3650 	 * Ignore leading zeroes after ':', so e.g., AMDI0095:00
3651 	 * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
3652 	 */
3653 	while (*uid == '0' && *(uid + 1))
3654 		uid++;
3655 
3656 	if (strlen(hid) >= ACPIHID_HID_LEN) {
3657 		pr_err("Invalid command line: hid is too long\n");
3658 		return 1;
3659 	} else if (strlen(uid) >= ACPIHID_UID_LEN) {
3660 		pr_err("Invalid command line: uid is too long\n");
3661 		return 1;
3662 	}
3663 
3664 	i = early_acpihid_map_size++;
3665 	memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3666 	memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3667 	early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3668 	early_acpihid_map[i].cmd_line	= true;
3669 
3670 	return 1;
3671 }
3672 
3673 __setup("amd_iommu_dump",	parse_amd_iommu_dump);
3674 __setup("amd_iommu=",		parse_amd_iommu_options);
3675 __setup("amd_iommu_intr=",	parse_amd_iommu_intr);
3676 __setup("ivrs_ioapic",		parse_ivrs_ioapic);
3677 __setup("ivrs_hpet",		parse_ivrs_hpet);
3678 __setup("ivrs_acpihid",		parse_ivrs_acpihid);
3679 
amd_iommu_pasid_supported(void)3680 bool amd_iommu_pasid_supported(void)
3681 {
3682 	/* CPU page table size should match IOMMU guest page table size */
3683 	if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3684 	    amd_iommu_gpt_level != PAGE_MODE_5_LEVEL)
3685 		return false;
3686 
3687 	/*
3688 	 * Since DTE[Mode]=0 is prohibited on SNP-enabled system
3689 	 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
3690 	 * setting up IOMMUv1 page table.
3691 	 */
3692 	return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en;
3693 }
3694 
get_amd_iommu(unsigned int idx)3695 struct amd_iommu *get_amd_iommu(unsigned int idx)
3696 {
3697 	unsigned int i = 0;
3698 	struct amd_iommu *iommu;
3699 
3700 	for_each_iommu(iommu)
3701 		if (i++ == idx)
3702 			return iommu;
3703 	return NULL;
3704 }
3705 
3706 /****************************************************************************
3707  *
3708  * IOMMU EFR Performance Counter support functionality. This code allows
3709  * access to the IOMMU PC functionality.
3710  *
3711  ****************************************************************************/
3712 
amd_iommu_pc_get_max_banks(unsigned int idx)3713 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3714 {
3715 	struct amd_iommu *iommu = get_amd_iommu(idx);
3716 
3717 	if (iommu)
3718 		return iommu->max_banks;
3719 
3720 	return 0;
3721 }
3722 
amd_iommu_pc_supported(void)3723 bool amd_iommu_pc_supported(void)
3724 {
3725 	return amd_iommu_pc_present;
3726 }
3727 
amd_iommu_pc_get_max_counters(unsigned int idx)3728 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3729 {
3730 	struct amd_iommu *iommu = get_amd_iommu(idx);
3731 
3732 	if (iommu)
3733 		return iommu->max_counters;
3734 
3735 	return 0;
3736 }
3737 
iommu_pc_get_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value,bool is_write)3738 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3739 				u8 fxn, u64 *value, bool is_write)
3740 {
3741 	u32 offset;
3742 	u32 max_offset_lim;
3743 
3744 	/* Make sure the IOMMU PC resource is available */
3745 	if (!amd_iommu_pc_present)
3746 		return -ENODEV;
3747 
3748 	/* Check for valid iommu and pc register indexing */
3749 	if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3750 		return -ENODEV;
3751 
3752 	offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3753 
3754 	/* Limit the offset to the hw defined mmio region aperture */
3755 	max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3756 				(iommu->max_counters << 8) | 0x28);
3757 	if ((offset < MMIO_CNTR_REG_OFFSET) ||
3758 	    (offset > max_offset_lim))
3759 		return -EINVAL;
3760 
3761 	if (is_write) {
3762 		u64 val = *value & GENMASK_ULL(47, 0);
3763 
3764 		writel((u32)val, iommu->mmio_base + offset);
3765 		writel((val >> 32), iommu->mmio_base + offset + 4);
3766 	} else {
3767 		*value = readl(iommu->mmio_base + offset + 4);
3768 		*value <<= 32;
3769 		*value |= readl(iommu->mmio_base + offset);
3770 		*value &= GENMASK_ULL(47, 0);
3771 	}
3772 
3773 	return 0;
3774 }
3775 
amd_iommu_pc_get_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3776 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3777 {
3778 	if (!iommu)
3779 		return -EINVAL;
3780 
3781 	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3782 }
3783 
amd_iommu_pc_set_reg(struct amd_iommu * iommu,u8 bank,u8 cntr,u8 fxn,u64 * value)3784 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3785 {
3786 	if (!iommu)
3787 		return -EINVAL;
3788 
3789 	return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3790 }
3791 
3792 #ifdef CONFIG_KVM_AMD_SEV
iommu_page_make_shared(void * page)3793 static int iommu_page_make_shared(void *page)
3794 {
3795 	unsigned long paddr, pfn;
3796 
3797 	paddr = iommu_virt_to_phys(page);
3798 	/* Cbit maybe set in the paddr */
3799 	pfn = __sme_clr(paddr) >> PAGE_SHIFT;
3800 
3801 	if (!(pfn % PTRS_PER_PMD)) {
3802 		int ret, level;
3803 		bool assigned;
3804 
3805 		ret = snp_lookup_rmpentry(pfn, &assigned, &level);
3806 		if (ret) {
3807 			pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret);
3808 			return ret;
3809 		}
3810 
3811 		if (!assigned) {
3812 			pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn);
3813 			return -EINVAL;
3814 		}
3815 
3816 		if (level > PG_LEVEL_4K) {
3817 			ret = psmash(pfn);
3818 			if (!ret)
3819 				goto done;
3820 
3821 			pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n",
3822 				pfn, ret, level);
3823 			return ret;
3824 		}
3825 	}
3826 
3827 done:
3828 	return rmp_make_shared(pfn, PG_LEVEL_4K);
3829 }
3830 
iommu_make_shared(void * va,size_t size)3831 static int iommu_make_shared(void *va, size_t size)
3832 {
3833 	void *page;
3834 	int ret;
3835 
3836 	if (!va)
3837 		return 0;
3838 
3839 	for (page = va; page < (va + size); page += PAGE_SIZE) {
3840 		ret = iommu_page_make_shared(page);
3841 		if (ret)
3842 			return ret;
3843 	}
3844 
3845 	return 0;
3846 }
3847 
amd_iommu_snp_disable(void)3848 int amd_iommu_snp_disable(void)
3849 {
3850 	struct amd_iommu *iommu;
3851 	int ret;
3852 
3853 	if (!amd_iommu_snp_en)
3854 		return 0;
3855 
3856 	for_each_iommu(iommu) {
3857 		ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
3858 		if (ret)
3859 			return ret;
3860 
3861 		ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
3862 		if (ret)
3863 			return ret;
3864 
3865 		ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE);
3866 		if (ret)
3867 			return ret;
3868 	}
3869 
3870 	return 0;
3871 }
3872 EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
3873 #endif
3874