xref: /linux/drivers/iommu/amd/iommu.c (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  *         Leo Duran <leo.duran@amd.com>
6  */
7 
8 #define pr_fmt(fmt)     "AMD-Vi: " fmt
9 #define dev_fmt(fmt)    pr_fmt(fmt)
10 
11 #include <linux/ratelimit.h>
12 #include <linux/pci.h>
13 #include <linux/acpi.h>
14 #include <linux/pci-ats.h>
15 #include <linux/bitmap.h>
16 #include <linux/slab.h>
17 #include <linux/debugfs.h>
18 #include <linux/scatterlist.h>
19 #include <linux/dma-map-ops.h>
20 #include <linux/dma-direct.h>
21 #include <linux/idr.h>
22 #include <linux/iommu-helper.h>
23 #include <linux/delay.h>
24 #include <linux/amd-iommu.h>
25 #include <linux/notifier.h>
26 #include <linux/export.h>
27 #include <linux/irq.h>
28 #include <linux/irqchip/irq-msi-lib.h>
29 #include <linux/msi.h>
30 #include <linux/irqdomain.h>
31 #include <linux/percpu.h>
32 #include <linux/io-pgtable.h>
33 #include <linux/cc_platform.h>
34 #include <asm/irq_remapping.h>
35 #include <asm/io_apic.h>
36 #include <asm/apic.h>
37 #include <asm/hw_irq.h>
38 #include <asm/proto.h>
39 #include <asm/iommu.h>
40 #include <asm/gart.h>
41 #include <asm/dma.h>
42 #include <uapi/linux/iommufd.h>
43 
44 #include "amd_iommu.h"
45 #include "../dma-iommu.h"
46 #include "../irq_remapping.h"
47 #include "../iommu-pages.h"
48 
49 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
50 
51 /* Reserved IOVA ranges */
52 #define MSI_RANGE_START		(0xfee00000)
53 #define MSI_RANGE_END		(0xfeefffff)
54 #define HT_RANGE_START		(0xfd00000000ULL)
55 #define HT_RANGE_END		(0xffffffffffULL)
56 
57 LIST_HEAD(ioapic_map);
58 LIST_HEAD(hpet_map);
59 LIST_HEAD(acpihid_map);
60 
61 const struct iommu_ops amd_iommu_ops;
62 static const struct iommu_dirty_ops amd_dirty_ops;
63 
64 int amd_iommu_max_glx_val = -1;
65 
66 /*
67  * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
68  * to know which ones are already in use.
69  */
70 DEFINE_IDA(pdom_ids);
71 
72 static int amd_iommu_attach_device(struct iommu_domain *dom,
73 				   struct device *dev);
74 
75 static void set_dte_entry(struct amd_iommu *iommu,
76 			  struct iommu_dev_data *dev_data);
77 
78 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
79 
80 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid);
81 
82 /****************************************************************************
83  *
84  * Helper functions
85  *
86  ****************************************************************************/
87 
amd_iommu_atomic128_set(__int128 * ptr,__int128 val)88 static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val)
89 {
90 	/*
91 	 * Note:
92 	 * We use arch_cmpxchg128_local() because:
93 	 * - Need cmpxchg16b instruction mainly for 128-bit store to DTE
94 	 *   (not necessary for cmpxchg since this function is already
95 	 *   protected by a spin_lock for this DTE).
96 	 * - Neither need LOCK_PREFIX nor try loop because of the spin_lock.
97 	 */
98 	arch_cmpxchg128_local(ptr, *ptr, val);
99 }
100 
write_dte_upper128(struct dev_table_entry * ptr,struct dev_table_entry * new)101 static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new)
102 {
103 	struct dev_table_entry old;
104 
105 	old.data128[1] = ptr->data128[1];
106 	/*
107 	 * Preserve DTE_DATA2_INTR_MASK. This needs to be
108 	 * done here since it requires to be inside
109 	 * spin_lock(&dev_data->dte_lock) context.
110 	 */
111 	new->data[2] &= ~DTE_DATA2_INTR_MASK;
112 	new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK;
113 
114 	amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]);
115 }
116 
write_dte_lower128(struct dev_table_entry * ptr,struct dev_table_entry * new)117 static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new)
118 {
119 	amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]);
120 }
121 
122 /*
123  * Note:
124  * IOMMU reads the entire Device Table entry in a single 256-bit transaction
125  * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver
126  * need to ensure the following:
127  *   - DTE[V|GV] bit is being written last when setting.
128  *   - DTE[V|GV] bit is being written first when clearing.
129  *
130  * This function is used only by code, which updates DMA translation part of the DTE.
131  * So, only consider control bits related to DMA when updating the entry.
132  */
update_dte256(struct amd_iommu * iommu,struct iommu_dev_data * dev_data,struct dev_table_entry * new)133 static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
134 			  struct dev_table_entry *new)
135 {
136 	unsigned long flags;
137 	struct dev_table_entry *dev_table = get_dev_table(iommu);
138 	struct dev_table_entry *ptr = &dev_table[dev_data->devid];
139 
140 	spin_lock_irqsave(&dev_data->dte_lock, flags);
141 
142 	if (!(ptr->data[0] & DTE_FLAG_V)) {
143 		/* Existing DTE is not valid. */
144 		write_dte_upper128(ptr, new);
145 		write_dte_lower128(ptr, new);
146 		iommu_flush_dte_sync(iommu, dev_data->devid);
147 	} else if (!(new->data[0] & DTE_FLAG_V)) {
148 		/* Existing DTE is valid. New DTE is not valid.  */
149 		write_dte_lower128(ptr, new);
150 		write_dte_upper128(ptr, new);
151 		iommu_flush_dte_sync(iommu, dev_data->devid);
152 	} else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
153 		/*
154 		 * Both DTEs are valid.
155 		 * Existing DTE has no guest page table.
156 		 */
157 		write_dte_upper128(ptr, new);
158 		write_dte_lower128(ptr, new);
159 		iommu_flush_dte_sync(iommu, dev_data->devid);
160 	} else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
161 		/*
162 		 * Both DTEs are valid.
163 		 * Existing DTE has guest page table,
164 		 * new DTE has no guest page table,
165 		 */
166 		write_dte_lower128(ptr, new);
167 		write_dte_upper128(ptr, new);
168 		iommu_flush_dte_sync(iommu, dev_data->devid);
169 	} else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) !=
170 		   FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) {
171 		/*
172 		 * Both DTEs are valid and have guest page table,
173 		 * but have different number of levels. So, we need
174 		 * to upadte both upper and lower 128-bit value, which
175 		 * require disabling and flushing.
176 		 */
177 		struct dev_table_entry clear = {};
178 
179 		/* First disable DTE */
180 		write_dte_lower128(ptr, &clear);
181 		iommu_flush_dte_sync(iommu, dev_data->devid);
182 
183 		/* Then update DTE */
184 		write_dte_upper128(ptr, new);
185 		write_dte_lower128(ptr, new);
186 		iommu_flush_dte_sync(iommu, dev_data->devid);
187 	} else {
188 		/*
189 		 * Both DTEs are valid and have guest page table,
190 		 * and same number of levels. We just need to only
191 		 * update the lower 128-bit. So no need to disable DTE.
192 		 */
193 		write_dte_lower128(ptr, new);
194 	}
195 
196 	spin_unlock_irqrestore(&dev_data->dte_lock, flags);
197 }
198 
get_dte256(struct amd_iommu * iommu,struct iommu_dev_data * dev_data,struct dev_table_entry * dte)199 static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
200 		      struct dev_table_entry *dte)
201 {
202 	unsigned long flags;
203 	struct dev_table_entry *ptr;
204 	struct dev_table_entry *dev_table = get_dev_table(iommu);
205 
206 	ptr = &dev_table[dev_data->devid];
207 
208 	spin_lock_irqsave(&dev_data->dte_lock, flags);
209 	dte->data128[0] = ptr->data128[0];
210 	dte->data128[1] = ptr->data128[1];
211 	spin_unlock_irqrestore(&dev_data->dte_lock, flags);
212 }
213 
pdom_is_v2_pgtbl_mode(struct protection_domain * pdom)214 static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
215 {
216 	return (pdom && (pdom->pd_mode == PD_MODE_V2));
217 }
218 
pdom_is_in_pt_mode(struct protection_domain * pdom)219 static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom)
220 {
221 	return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY);
222 }
223 
224 /*
225  * We cannot support PASID w/ existing v1 page table in the same domain
226  * since it will be nested. However, existing domain w/ v2 page table
227  * or passthrough mode can be used for PASID.
228  */
pdom_is_sva_capable(struct protection_domain * pdom)229 static inline bool pdom_is_sva_capable(struct protection_domain *pdom)
230 {
231 	return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom);
232 }
233 
get_acpihid_device_id(struct device * dev,struct acpihid_map_entry ** entry)234 static inline int get_acpihid_device_id(struct device *dev,
235 					struct acpihid_map_entry **entry)
236 {
237 	struct acpi_device *adev = ACPI_COMPANION(dev);
238 	struct acpihid_map_entry *p, *p1 = NULL;
239 	int hid_count = 0;
240 	bool fw_bug;
241 
242 	if (!adev)
243 		return -ENODEV;
244 
245 	list_for_each_entry(p, &acpihid_map, list) {
246 		if (acpi_dev_hid_uid_match(adev, p->hid,
247 					   p->uid[0] ? p->uid : NULL)) {
248 			p1 = p;
249 			fw_bug = false;
250 			hid_count = 1;
251 			break;
252 		}
253 
254 		/*
255 		 * Count HID matches w/o UID, raise FW_BUG but allow exactly one match
256 		 */
257 		if (acpi_dev_hid_match(adev, p->hid)) {
258 			p1 = p;
259 			hid_count++;
260 			fw_bug = true;
261 		}
262 	}
263 
264 	if (!p1)
265 		return -EINVAL;
266 	if (fw_bug)
267 		dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
268 			     hid_count, hid_count > 1 ? "s" : "");
269 	if (hid_count > 1)
270 		return -EINVAL;
271 	if (entry)
272 		*entry = p1;
273 
274 	return p1->devid;
275 }
276 
get_device_sbdf_id(struct device * dev)277 static inline int get_device_sbdf_id(struct device *dev)
278 {
279 	int sbdf;
280 
281 	if (dev_is_pci(dev))
282 		sbdf = get_pci_sbdf_id(to_pci_dev(dev));
283 	else
284 		sbdf = get_acpihid_device_id(dev, NULL);
285 
286 	return sbdf;
287 }
288 
get_dev_table(struct amd_iommu * iommu)289 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
290 {
291 	struct dev_table_entry *dev_table;
292 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
293 
294 	BUG_ON(pci_seg == NULL);
295 	dev_table = pci_seg->dev_table;
296 	BUG_ON(dev_table == NULL);
297 
298 	return dev_table;
299 }
300 
get_device_segment(struct device * dev)301 static inline u16 get_device_segment(struct device *dev)
302 {
303 	u16 seg;
304 
305 	if (dev_is_pci(dev)) {
306 		struct pci_dev *pdev = to_pci_dev(dev);
307 
308 		seg = pci_domain_nr(pdev->bus);
309 	} else {
310 		u32 devid = get_acpihid_device_id(dev, NULL);
311 
312 		seg = PCI_SBDF_TO_SEGID(devid);
313 	}
314 
315 	return seg;
316 }
317 
318 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
amd_iommu_set_rlookup_table(struct amd_iommu * iommu,u16 devid)319 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
320 {
321 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
322 
323 	pci_seg->rlookup_table[devid] = iommu;
324 }
325 
__rlookup_amd_iommu(u16 seg,u16 devid)326 static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
327 {
328 	struct amd_iommu_pci_seg *pci_seg;
329 
330 	for_each_pci_segment(pci_seg) {
331 		if (pci_seg->id == seg)
332 			return pci_seg->rlookup_table[devid];
333 	}
334 	return NULL;
335 }
336 
rlookup_amd_iommu(struct device * dev)337 static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
338 {
339 	u16 seg = get_device_segment(dev);
340 	int devid = get_device_sbdf_id(dev);
341 
342 	if (devid < 0)
343 		return NULL;
344 	return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
345 }
346 
alloc_dev_data(struct amd_iommu * iommu,u16 devid)347 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
348 {
349 	struct iommu_dev_data *dev_data;
350 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
351 
352 	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
353 	if (!dev_data)
354 		return NULL;
355 
356 	mutex_init(&dev_data->mutex);
357 	spin_lock_init(&dev_data->dte_lock);
358 	dev_data->devid = devid;
359 	ratelimit_default_init(&dev_data->rs);
360 
361 	llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
362 	return dev_data;
363 }
364 
search_dev_data(struct amd_iommu * iommu,u16 devid)365 struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
366 {
367 	struct iommu_dev_data *dev_data;
368 	struct llist_node *node;
369 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
370 
371 	if (llist_empty(&pci_seg->dev_data_list))
372 		return NULL;
373 
374 	node = pci_seg->dev_data_list.first;
375 	llist_for_each_entry(dev_data, node, dev_data_list) {
376 		if (dev_data->devid == devid)
377 			return dev_data;
378 	}
379 
380 	return NULL;
381 }
382 
clone_alias(struct pci_dev * pdev,u16 alias,void * data)383 static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
384 {
385 	struct dev_table_entry new;
386 	struct amd_iommu *iommu;
387 	struct iommu_dev_data *dev_data, *alias_data;
388 	u16 devid = pci_dev_id(pdev);
389 	int ret = 0;
390 
391 	if (devid == alias)
392 		return 0;
393 
394 	iommu = rlookup_amd_iommu(&pdev->dev);
395 	if (!iommu)
396 		return 0;
397 
398 	/* Copy the data from pdev */
399 	dev_data = dev_iommu_priv_get(&pdev->dev);
400 	if (!dev_data) {
401 		pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid);
402 		ret = -EINVAL;
403 		goto out;
404 	}
405 	get_dte256(iommu, dev_data, &new);
406 
407 	/* Setup alias */
408 	alias_data = find_dev_data(iommu, alias);
409 	if (!alias_data) {
410 		pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias);
411 		ret = -EINVAL;
412 		goto out;
413 	}
414 	update_dte256(iommu, alias_data, &new);
415 
416 	amd_iommu_set_rlookup_table(iommu, alias);
417 out:
418 	return ret;
419 }
420 
clone_aliases(struct amd_iommu * iommu,struct device * dev)421 static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
422 {
423 	struct pci_dev *pdev;
424 
425 	if (!dev_is_pci(dev))
426 		return;
427 	pdev = to_pci_dev(dev);
428 
429 	/*
430 	 * The IVRS alias stored in the alias table may not be
431 	 * part of the PCI DMA aliases if it's bus differs
432 	 * from the original device.
433 	 */
434 	clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL);
435 
436 	pci_for_each_dma_alias(pdev, clone_alias, NULL);
437 }
438 
setup_aliases(struct amd_iommu * iommu,struct device * dev)439 static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
440 {
441 	struct pci_dev *pdev = to_pci_dev(dev);
442 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
443 	u16 ivrs_alias;
444 
445 	/* For ACPI HID devices, there are no aliases */
446 	if (!dev_is_pci(dev))
447 		return;
448 
449 	/*
450 	 * Add the IVRS alias to the pci aliases if it is on the same
451 	 * bus. The IVRS table may know about a quirk that we don't.
452 	 */
453 	ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
454 	if (ivrs_alias != pci_dev_id(pdev) &&
455 	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
456 		pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
457 
458 	clone_aliases(iommu, dev);
459 }
460 
find_dev_data(struct amd_iommu * iommu,u16 devid)461 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
462 {
463 	struct iommu_dev_data *dev_data;
464 
465 	dev_data = search_dev_data(iommu, devid);
466 
467 	if (dev_data == NULL) {
468 		dev_data = alloc_dev_data(iommu, devid);
469 		if (!dev_data)
470 			return NULL;
471 
472 		if (translation_pre_enabled(iommu))
473 			dev_data->defer_attach = true;
474 	}
475 
476 	return dev_data;
477 }
478 
479 /*
480 * Find or create an IOMMU group for a acpihid device.
481 */
acpihid_device_group(struct device * dev)482 static struct iommu_group *acpihid_device_group(struct device *dev)
483 {
484 	struct acpihid_map_entry *p, *entry = NULL;
485 	int devid;
486 
487 	devid = get_acpihid_device_id(dev, &entry);
488 	if (devid < 0)
489 		return ERR_PTR(devid);
490 
491 	list_for_each_entry(p, &acpihid_map, list) {
492 		if ((devid == p->devid) && p->group)
493 			entry->group = p->group;
494 	}
495 
496 	if (!entry->group)
497 		entry->group = generic_device_group(dev);
498 	else
499 		iommu_group_ref_get(entry->group);
500 
501 	return entry->group;
502 }
503 
pdev_pasid_supported(struct iommu_dev_data * dev_data)504 static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data)
505 {
506 	return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP);
507 }
508 
pdev_get_caps(struct pci_dev * pdev)509 static u32 pdev_get_caps(struct pci_dev *pdev)
510 {
511 	int features;
512 	u32 flags = 0;
513 
514 	if (pci_ats_supported(pdev))
515 		flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
516 
517 	if (pci_pri_supported(pdev))
518 		flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
519 
520 	features = pci_pasid_features(pdev);
521 	if (features >= 0) {
522 		flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
523 
524 		if (features & PCI_PASID_CAP_EXEC)
525 			flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
526 
527 		if (features & PCI_PASID_CAP_PRIV)
528 			flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
529 	}
530 
531 	return flags;
532 }
533 
pdev_enable_cap_ats(struct pci_dev * pdev)534 static inline int pdev_enable_cap_ats(struct pci_dev *pdev)
535 {
536 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
537 	int ret = -EINVAL;
538 
539 	if (dev_data->ats_enabled)
540 		return 0;
541 
542 	if (amd_iommu_iotlb_sup &&
543 	    (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) {
544 		ret = pci_enable_ats(pdev, PAGE_SHIFT);
545 		if (!ret) {
546 			dev_data->ats_enabled = 1;
547 			dev_data->ats_qdep    = pci_ats_queue_depth(pdev);
548 		}
549 	}
550 
551 	return ret;
552 }
553 
pdev_disable_cap_ats(struct pci_dev * pdev)554 static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
555 {
556 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
557 
558 	if (dev_data->ats_enabled) {
559 		pci_disable_ats(pdev);
560 		dev_data->ats_enabled = 0;
561 	}
562 }
563 
pdev_enable_cap_pri(struct pci_dev * pdev)564 static inline int pdev_enable_cap_pri(struct pci_dev *pdev)
565 {
566 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
567 	int ret = -EINVAL;
568 
569 	if (dev_data->pri_enabled)
570 		return 0;
571 
572 	if (!dev_data->ats_enabled)
573 		return 0;
574 
575 	if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
576 		/*
577 		 * First reset the PRI state of the device.
578 		 * FIXME: Hardcode number of outstanding requests for now
579 		 */
580 		if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) {
581 			dev_data->pri_enabled = 1;
582 			dev_data->pri_tlp     = pci_prg_resp_pasid_required(pdev);
583 
584 			ret = 0;
585 		}
586 	}
587 
588 	return ret;
589 }
590 
pdev_disable_cap_pri(struct pci_dev * pdev)591 static inline void pdev_disable_cap_pri(struct pci_dev *pdev)
592 {
593 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
594 
595 	if (dev_data->pri_enabled) {
596 		pci_disable_pri(pdev);
597 		dev_data->pri_enabled = 0;
598 	}
599 }
600 
pdev_enable_cap_pasid(struct pci_dev * pdev)601 static inline int pdev_enable_cap_pasid(struct pci_dev *pdev)
602 {
603 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
604 	int ret = -EINVAL;
605 
606 	if (dev_data->pasid_enabled)
607 		return 0;
608 
609 	if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) {
610 		/* Only allow access to user-accessible pages */
611 		ret = pci_enable_pasid(pdev, 0);
612 		if (!ret)
613 			dev_data->pasid_enabled = 1;
614 	}
615 
616 	return ret;
617 }
618 
pdev_disable_cap_pasid(struct pci_dev * pdev)619 static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
620 {
621 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
622 
623 	if (dev_data->pasid_enabled) {
624 		pci_disable_pasid(pdev);
625 		dev_data->pasid_enabled = 0;
626 	}
627 }
628 
pdev_enable_caps(struct pci_dev * pdev)629 static void pdev_enable_caps(struct pci_dev *pdev)
630 {
631 	pdev_enable_cap_pasid(pdev);
632 	pdev_enable_cap_ats(pdev);
633 	pdev_enable_cap_pri(pdev);
634 }
635 
pdev_disable_caps(struct pci_dev * pdev)636 static void pdev_disable_caps(struct pci_dev *pdev)
637 {
638 	pdev_disable_cap_ats(pdev);
639 	pdev_disable_cap_pasid(pdev);
640 	pdev_disable_cap_pri(pdev);
641 }
642 
643 /*
644  * This function checks if the driver got a valid device from the caller to
645  * avoid dereferencing invalid pointers.
646  */
check_device(struct device * dev)647 static bool check_device(struct device *dev)
648 {
649 	struct amd_iommu_pci_seg *pci_seg;
650 	struct amd_iommu *iommu;
651 	int devid, sbdf;
652 
653 	if (!dev)
654 		return false;
655 
656 	sbdf = get_device_sbdf_id(dev);
657 	if (sbdf < 0)
658 		return false;
659 	devid = PCI_SBDF_TO_DEVID(sbdf);
660 
661 	iommu = rlookup_amd_iommu(dev);
662 	if (!iommu)
663 		return false;
664 
665 	/* Out of our scope? */
666 	pci_seg = iommu->pci_seg;
667 	if (devid > pci_seg->last_bdf)
668 		return false;
669 
670 	return true;
671 }
672 
iommu_init_device(struct amd_iommu * iommu,struct device * dev)673 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
674 {
675 	struct iommu_dev_data *dev_data;
676 	int devid, sbdf;
677 
678 	if (dev_iommu_priv_get(dev))
679 		return 0;
680 
681 	sbdf = get_device_sbdf_id(dev);
682 	if (sbdf < 0)
683 		return sbdf;
684 
685 	devid = PCI_SBDF_TO_DEVID(sbdf);
686 	dev_data = find_dev_data(iommu, devid);
687 	if (!dev_data)
688 		return -ENOMEM;
689 
690 	dev_data->dev = dev;
691 
692 	/*
693 	 * The dev_iommu_priv_set() needes to be called before setup_aliases.
694 	 * Otherwise, subsequent call to dev_iommu_priv_get() will fail.
695 	 */
696 	dev_iommu_priv_set(dev, dev_data);
697 	setup_aliases(iommu, dev);
698 
699 	/*
700 	 * By default we use passthrough mode for IOMMUv2 capable device.
701 	 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
702 	 * invalid address), we ignore the capability for the device so
703 	 * it'll be forced to go into translation mode.
704 	 */
705 	if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
706 	    dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) {
707 		dev_data->flags = pdev_get_caps(to_pci_dev(dev));
708 	}
709 
710 	return 0;
711 }
712 
iommu_ignore_device(struct amd_iommu * iommu,struct device * dev)713 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
714 {
715 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
716 	struct dev_table_entry *dev_table = get_dev_table(iommu);
717 	int devid, sbdf;
718 
719 	sbdf = get_device_sbdf_id(dev);
720 	if (sbdf < 0)
721 		return;
722 
723 	devid = PCI_SBDF_TO_DEVID(sbdf);
724 	pci_seg->rlookup_table[devid] = NULL;
725 	memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
726 
727 	setup_aliases(iommu, dev);
728 }
729 
730 
731 /****************************************************************************
732  *
733  * Interrupt handling functions
734  *
735  ****************************************************************************/
736 
dump_dte_entry(struct amd_iommu * iommu,u16 devid)737 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
738 {
739 	int i;
740 	struct dev_table_entry dte;
741 	struct iommu_dev_data *dev_data = find_dev_data(iommu, devid);
742 
743 	get_dte256(iommu, dev_data, &dte);
744 
745 	for (i = 0; i < 4; ++i)
746 		pr_err("DTE[%d]: %016llx\n", i, dte.data[i]);
747 }
748 
dump_command(unsigned long phys_addr)749 static void dump_command(unsigned long phys_addr)
750 {
751 	struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
752 	int i;
753 
754 	for (i = 0; i < 4; ++i)
755 		pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
756 }
757 
amd_iommu_report_rmp_hw_error(struct amd_iommu * iommu,volatile u32 * event)758 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
759 {
760 	struct iommu_dev_data *dev_data = NULL;
761 	int devid, vmg_tag, flags;
762 	struct pci_dev *pdev;
763 	u64 spa;
764 
765 	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
766 	vmg_tag = (event[1]) & 0xFFFF;
767 	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
768 	spa     = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
769 
770 	pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
771 					   devid & 0xff);
772 	if (pdev)
773 		dev_data = dev_iommu_priv_get(&pdev->dev);
774 
775 	if (dev_data) {
776 		if (__ratelimit(&dev_data->rs)) {
777 			pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
778 				vmg_tag, spa, flags);
779 		}
780 	} else {
781 		pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
782 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
783 			vmg_tag, spa, flags);
784 	}
785 
786 	if (pdev)
787 		pci_dev_put(pdev);
788 }
789 
amd_iommu_report_rmp_fault(struct amd_iommu * iommu,volatile u32 * event)790 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
791 {
792 	struct iommu_dev_data *dev_data = NULL;
793 	int devid, flags_rmp, vmg_tag, flags;
794 	struct pci_dev *pdev;
795 	u64 gpa;
796 
797 	devid     = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
798 	flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
799 	vmg_tag   = (event[1]) & 0xFFFF;
800 	flags     = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
801 	gpa       = ((u64)event[3] << 32) | event[2];
802 
803 	pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
804 					   devid & 0xff);
805 	if (pdev)
806 		dev_data = dev_iommu_priv_get(&pdev->dev);
807 
808 	if (dev_data) {
809 		if (__ratelimit(&dev_data->rs)) {
810 			pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
811 				vmg_tag, gpa, flags_rmp, flags);
812 		}
813 	} else {
814 		pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
815 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
816 			vmg_tag, gpa, flags_rmp, flags);
817 	}
818 
819 	if (pdev)
820 		pci_dev_put(pdev);
821 }
822 
823 #define IS_IOMMU_MEM_TRANSACTION(flags)		\
824 	(((flags) & EVENT_FLAG_I) == 0)
825 
826 #define IS_WRITE_REQUEST(flags)			\
827 	((flags) & EVENT_FLAG_RW)
828 
amd_iommu_report_page_fault(struct amd_iommu * iommu,u16 devid,u16 domain_id,u64 address,int flags)829 static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
830 					u16 devid, u16 domain_id,
831 					u64 address, int flags)
832 {
833 	struct iommu_dev_data *dev_data = NULL;
834 	struct pci_dev *pdev;
835 
836 	pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
837 					   devid & 0xff);
838 	if (pdev)
839 		dev_data = dev_iommu_priv_get(&pdev->dev);
840 
841 	if (dev_data) {
842 		/*
843 		 * If this is a DMA fault (for which the I(nterrupt)
844 		 * bit will be unset), allow report_iommu_fault() to
845 		 * prevent logging it.
846 		 */
847 		if (IS_IOMMU_MEM_TRANSACTION(flags)) {
848 			/* Device not attached to domain properly */
849 			if (dev_data->domain == NULL) {
850 				pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
851 				pr_err_ratelimited("  device=%04x:%02x:%02x.%x domain=0x%04x\n",
852 						   iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
853 						   PCI_FUNC(devid), domain_id);
854 				goto out;
855 			}
856 
857 			if (!report_iommu_fault(&dev_data->domain->domain,
858 						&pdev->dev, address,
859 						IS_WRITE_REQUEST(flags) ?
860 							IOMMU_FAULT_WRITE :
861 							IOMMU_FAULT_READ))
862 				goto out;
863 		}
864 
865 		if (__ratelimit(&dev_data->rs)) {
866 			pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
867 				domain_id, address, flags);
868 		}
869 	} else {
870 		pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
871 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
872 			domain_id, address, flags);
873 	}
874 
875 out:
876 	if (pdev)
877 		pci_dev_put(pdev);
878 }
879 
iommu_print_event(struct amd_iommu * iommu,void * __evt)880 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
881 {
882 	struct device *dev = iommu->iommu.dev;
883 	int type, devid, flags, tag;
884 	volatile u32 *event = __evt;
885 	int count = 0;
886 	u64 address, ctrl;
887 	u32 pasid;
888 
889 retry:
890 	type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
891 	devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
892 	pasid   = (event[0] & EVENT_DOMID_MASK_HI) |
893 		  (event[1] & EVENT_DOMID_MASK_LO);
894 	flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
895 	address = (u64)(((u64)event[3]) << 32) | event[2];
896 	ctrl    = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
897 
898 	if (type == 0) {
899 		/* Did we hit the erratum? */
900 		if (++count == LOOP_TIMEOUT) {
901 			pr_err("No event written to event log\n");
902 			return;
903 		}
904 		udelay(1);
905 		goto retry;
906 	}
907 
908 	if (type == EVENT_TYPE_IO_FAULT) {
909 		amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
910 		return;
911 	}
912 
913 	switch (type) {
914 	case EVENT_TYPE_ILL_DEV:
915 		dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
916 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
917 			pasid, address, flags);
918 		dev_err(dev, "Control Reg : 0x%llx\n", ctrl);
919 		dump_dte_entry(iommu, devid);
920 		break;
921 	case EVENT_TYPE_DEV_TAB_ERR:
922 		dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
923 			"address=0x%llx flags=0x%04x]\n",
924 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
925 			address, flags);
926 		break;
927 	case EVENT_TYPE_PAGE_TAB_ERR:
928 		dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
929 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
930 			pasid, address, flags);
931 		break;
932 	case EVENT_TYPE_ILL_CMD:
933 		dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
934 		dump_command(address);
935 		break;
936 	case EVENT_TYPE_CMD_HARD_ERR:
937 		dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
938 			address, flags);
939 		break;
940 	case EVENT_TYPE_IOTLB_INV_TO:
941 		dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
942 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
943 			address);
944 		break;
945 	case EVENT_TYPE_INV_DEV_REQ:
946 		dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
947 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
948 			pasid, address, flags);
949 		break;
950 	case EVENT_TYPE_RMP_FAULT:
951 		amd_iommu_report_rmp_fault(iommu, event);
952 		break;
953 	case EVENT_TYPE_RMP_HW_ERR:
954 		amd_iommu_report_rmp_hw_error(iommu, event);
955 		break;
956 	case EVENT_TYPE_INV_PPR_REQ:
957 		pasid = PPR_PASID(*((u64 *)__evt));
958 		tag = event[1] & 0x03FF;
959 		dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
960 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
961 			pasid, address, flags, tag);
962 		break;
963 	default:
964 		dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
965 			event[0], event[1], event[2], event[3]);
966 	}
967 
968 	/*
969 	 * To detect the hardware errata 732 we need to clear the
970 	 * entry back to zero. This issue does not exist on SNP
971 	 * enabled system. Also this buffer is not writeable on
972 	 * SNP enabled system.
973 	 */
974 	if (!amd_iommu_snp_en)
975 		memset(__evt, 0, 4 * sizeof(u32));
976 }
977 
iommu_poll_events(struct amd_iommu * iommu)978 static void iommu_poll_events(struct amd_iommu *iommu)
979 {
980 	u32 head, tail;
981 
982 	head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
983 	tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
984 
985 	while (head != tail) {
986 		iommu_print_event(iommu, iommu->evt_buf + head);
987 
988 		/* Update head pointer of hardware ring-buffer */
989 		head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
990 		writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
991 	}
992 
993 }
994 
995 #ifdef CONFIG_IRQ_REMAP
996 static int (*iommu_ga_log_notifier)(u32);
997 
amd_iommu_register_ga_log_notifier(int (* notifier)(u32))998 int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
999 {
1000 	iommu_ga_log_notifier = notifier;
1001 
1002 	/*
1003 	 * Ensure all in-flight IRQ handlers run to completion before returning
1004 	 * to the caller, e.g. to ensure module code isn't unloaded while it's
1005 	 * being executed in the IRQ handler.
1006 	 */
1007 	if (!notifier)
1008 		synchronize_rcu();
1009 
1010 	return 0;
1011 }
1012 EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
1013 
iommu_poll_ga_log(struct amd_iommu * iommu)1014 static void iommu_poll_ga_log(struct amd_iommu *iommu)
1015 {
1016 	u32 head, tail;
1017 
1018 	if (iommu->ga_log == NULL)
1019 		return;
1020 
1021 	head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
1022 	tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
1023 
1024 	while (head != tail) {
1025 		volatile u64 *raw;
1026 		u64 log_entry;
1027 
1028 		raw = (u64 *)(iommu->ga_log + head);
1029 
1030 		/* Avoid memcpy function-call overhead */
1031 		log_entry = *raw;
1032 
1033 		/* Update head pointer of hardware ring-buffer */
1034 		head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
1035 		writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
1036 
1037 		/* Handle GA entry */
1038 		switch (GA_REQ_TYPE(log_entry)) {
1039 		case GA_GUEST_NR:
1040 			if (!iommu_ga_log_notifier)
1041 				break;
1042 
1043 			pr_debug("%s: devid=%#x, ga_tag=%#x\n",
1044 				 __func__, GA_DEVID(log_entry),
1045 				 GA_TAG(log_entry));
1046 
1047 			if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
1048 				pr_err("GA log notifier failed.\n");
1049 			break;
1050 		default:
1051 			break;
1052 		}
1053 	}
1054 }
1055 
1056 static void
amd_iommu_set_pci_msi_domain(struct device * dev,struct amd_iommu * iommu)1057 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
1058 {
1059 	if (!irq_remapping_enabled || !dev_is_pci(dev) ||
1060 	    !pci_dev_has_default_msi_parent_domain(to_pci_dev(dev)))
1061 		return;
1062 
1063 	dev_set_msi_domain(dev, iommu->ir_domain);
1064 }
1065 
1066 #else /* CONFIG_IRQ_REMAP */
1067 static inline void
amd_iommu_set_pci_msi_domain(struct device * dev,struct amd_iommu * iommu)1068 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
1069 #endif /* !CONFIG_IRQ_REMAP */
1070 
amd_iommu_handle_irq(void * data,const char * evt_type,u32 int_mask,u32 overflow_mask,void (* int_handler)(struct amd_iommu *),void (* overflow_handler)(struct amd_iommu *))1071 static void amd_iommu_handle_irq(void *data, const char *evt_type,
1072 				 u32 int_mask, u32 overflow_mask,
1073 				 void (*int_handler)(struct amd_iommu *),
1074 				 void (*overflow_handler)(struct amd_iommu *))
1075 {
1076 	struct amd_iommu *iommu = (struct amd_iommu *) data;
1077 	u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
1078 	u32 mask = int_mask | overflow_mask;
1079 
1080 	while (status & mask) {
1081 		/* Enable interrupt sources again */
1082 		writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
1083 
1084 		if (int_handler) {
1085 			pr_devel("Processing IOMMU (ivhd%d) %s Log\n",
1086 				 iommu->index, evt_type);
1087 			int_handler(iommu);
1088 		}
1089 
1090 		if ((status & overflow_mask) && overflow_handler)
1091 			overflow_handler(iommu);
1092 
1093 		/*
1094 		 * Hardware bug: ERBT1312
1095 		 * When re-enabling interrupt (by writing 1
1096 		 * to clear the bit), the hardware might also try to set
1097 		 * the interrupt bit in the event status register.
1098 		 * In this scenario, the bit will be set, and disable
1099 		 * subsequent interrupts.
1100 		 *
1101 		 * Workaround: The IOMMU driver should read back the
1102 		 * status register and check if the interrupt bits are cleared.
1103 		 * If not, driver will need to go through the interrupt handler
1104 		 * again and re-clear the bits
1105 		 */
1106 		status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
1107 	}
1108 }
1109 
amd_iommu_int_thread_evtlog(int irq,void * data)1110 irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data)
1111 {
1112 	amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK,
1113 			     MMIO_STATUS_EVT_OVERFLOW_MASK,
1114 			     iommu_poll_events, amd_iommu_restart_event_logging);
1115 
1116 	return IRQ_HANDLED;
1117 }
1118 
amd_iommu_int_thread_pprlog(int irq,void * data)1119 irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
1120 {
1121 	amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
1122 			     MMIO_STATUS_PPR_OVERFLOW_MASK,
1123 			     amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
1124 
1125 	return IRQ_HANDLED;
1126 }
1127 
amd_iommu_int_thread_galog(int irq,void * data)1128 irqreturn_t amd_iommu_int_thread_galog(int irq, void *data)
1129 {
1130 #ifdef CONFIG_IRQ_REMAP
1131 	amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK,
1132 			     MMIO_STATUS_GALOG_OVERFLOW_MASK,
1133 			     iommu_poll_ga_log, amd_iommu_restart_ga_log);
1134 #endif
1135 
1136 	return IRQ_HANDLED;
1137 }
1138 
amd_iommu_int_thread(int irq,void * data)1139 irqreturn_t amd_iommu_int_thread(int irq, void *data)
1140 {
1141 	amd_iommu_int_thread_evtlog(irq, data);
1142 	amd_iommu_int_thread_pprlog(irq, data);
1143 	amd_iommu_int_thread_galog(irq, data);
1144 
1145 	return IRQ_HANDLED;
1146 }
1147 
amd_iommu_int_handler(int irq,void * data)1148 irqreturn_t amd_iommu_int_handler(int irq, void *data)
1149 {
1150 	return IRQ_WAKE_THREAD;
1151 }
1152 
1153 /****************************************************************************
1154  *
1155  * IOMMU command queuing functions
1156  *
1157  ****************************************************************************/
1158 
wait_on_sem(struct amd_iommu * iommu,u64 data)1159 static int wait_on_sem(struct amd_iommu *iommu, u64 data)
1160 {
1161 	int i = 0;
1162 
1163 	while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
1164 		udelay(1);
1165 		i += 1;
1166 	}
1167 
1168 	if (i == LOOP_TIMEOUT) {
1169 		pr_alert("Completion-Wait loop timed out\n");
1170 		return -EIO;
1171 	}
1172 
1173 	return 0;
1174 }
1175 
copy_cmd_to_buffer(struct amd_iommu * iommu,struct iommu_cmd * cmd)1176 static void copy_cmd_to_buffer(struct amd_iommu *iommu,
1177 			       struct iommu_cmd *cmd)
1178 {
1179 	u8 *target;
1180 	u32 tail;
1181 
1182 	/* Copy command to buffer */
1183 	tail = iommu->cmd_buf_tail;
1184 	target = iommu->cmd_buf + tail;
1185 	memcpy(target, cmd, sizeof(*cmd));
1186 
1187 	tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1188 	iommu->cmd_buf_tail = tail;
1189 
1190 	/* Tell the IOMMU about it */
1191 	writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
1192 }
1193 
build_completion_wait(struct iommu_cmd * cmd,struct amd_iommu * iommu,u64 data)1194 static void build_completion_wait(struct iommu_cmd *cmd,
1195 				  struct amd_iommu *iommu,
1196 				  u64 data)
1197 {
1198 	u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
1199 
1200 	memset(cmd, 0, sizeof(*cmd));
1201 	cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
1202 	cmd->data[1] = upper_32_bits(paddr);
1203 	cmd->data[2] = lower_32_bits(data);
1204 	cmd->data[3] = upper_32_bits(data);
1205 	CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
1206 }
1207 
build_inv_dte(struct iommu_cmd * cmd,u16 devid)1208 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
1209 {
1210 	memset(cmd, 0, sizeof(*cmd));
1211 	cmd->data[0] = devid;
1212 	CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
1213 }
1214 
1215 /*
1216  * Builds an invalidation address which is suitable for one page or multiple
1217  * pages. Sets the size bit (S) as needed is more than one page is flushed.
1218  */
build_inv_address(u64 address,size_t size)1219 static inline u64 build_inv_address(u64 address, size_t size)
1220 {
1221 	u64 pages, end, msb_diff;
1222 
1223 	pages = iommu_num_pages(address, size, PAGE_SIZE);
1224 
1225 	if (pages == 1)
1226 		return address & PAGE_MASK;
1227 
1228 	end = address + size - 1;
1229 
1230 	/*
1231 	 * msb_diff would hold the index of the most significant bit that
1232 	 * flipped between the start and end.
1233 	 */
1234 	msb_diff = fls64(end ^ address) - 1;
1235 
1236 	/*
1237 	 * Bits 63:52 are sign extended. If for some reason bit 51 is different
1238 	 * between the start and the end, invalidate everything.
1239 	 */
1240 	if (unlikely(msb_diff > 51)) {
1241 		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
1242 	} else {
1243 		/*
1244 		 * The msb-bit must be clear on the address. Just set all the
1245 		 * lower bits.
1246 		 */
1247 		address |= (1ull << msb_diff) - 1;
1248 	}
1249 
1250 	/* Clear bits 11:0 */
1251 	address &= PAGE_MASK;
1252 
1253 	/* Set the size bit - we flush more than one 4kb page */
1254 	return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
1255 }
1256 
build_inv_iommu_pages(struct iommu_cmd * cmd,u64 address,size_t size,u16 domid,ioasid_t pasid,bool gn)1257 static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
1258 				  size_t size, u16 domid,
1259 				  ioasid_t pasid, bool gn)
1260 {
1261 	u64 inv_address = build_inv_address(address, size);
1262 
1263 	memset(cmd, 0, sizeof(*cmd));
1264 
1265 	cmd->data[1] |= domid;
1266 	cmd->data[2]  = lower_32_bits(inv_address);
1267 	cmd->data[3]  = upper_32_bits(inv_address);
1268 	/* PDE bit - we want to flush everything, not only the PTEs */
1269 	cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
1270 	if (gn) {
1271 		cmd->data[0] |= pasid;
1272 		cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1273 	}
1274 	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
1275 }
1276 
build_inv_iotlb_pages(struct iommu_cmd * cmd,u16 devid,int qdep,u64 address,size_t size,ioasid_t pasid,bool gn)1277 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
1278 				  u64 address, size_t size,
1279 				  ioasid_t pasid, bool gn)
1280 {
1281 	u64 inv_address = build_inv_address(address, size);
1282 
1283 	memset(cmd, 0, sizeof(*cmd));
1284 
1285 	cmd->data[0]  = devid;
1286 	cmd->data[0] |= (qdep & 0xff) << 24;
1287 	cmd->data[1]  = devid;
1288 	cmd->data[2]  = lower_32_bits(inv_address);
1289 	cmd->data[3]  = upper_32_bits(inv_address);
1290 	if (gn) {
1291 		cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
1292 		cmd->data[1] |= (pasid & 0xff) << 16;
1293 		cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1294 	}
1295 
1296 	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
1297 }
1298 
build_complete_ppr(struct iommu_cmd * cmd,u16 devid,u32 pasid,int status,int tag,u8 gn)1299 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1300 			       int status, int tag, u8 gn)
1301 {
1302 	memset(cmd, 0, sizeof(*cmd));
1303 
1304 	cmd->data[0]  = devid;
1305 	if (gn) {
1306 		cmd->data[1]  = pasid;
1307 		cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
1308 	}
1309 	cmd->data[3]  = tag & 0x1ff;
1310 	cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
1311 
1312 	CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
1313 }
1314 
build_inv_all(struct iommu_cmd * cmd)1315 static void build_inv_all(struct iommu_cmd *cmd)
1316 {
1317 	memset(cmd, 0, sizeof(*cmd));
1318 	CMD_SET_TYPE(cmd, CMD_INV_ALL);
1319 }
1320 
build_inv_irt(struct iommu_cmd * cmd,u16 devid)1321 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1322 {
1323 	memset(cmd, 0, sizeof(*cmd));
1324 	cmd->data[0] = devid;
1325 	CMD_SET_TYPE(cmd, CMD_INV_IRT);
1326 }
1327 
1328 /*
1329  * Writes the command to the IOMMUs command buffer and informs the
1330  * hardware about the new command.
1331  */
__iommu_queue_command_sync(struct amd_iommu * iommu,struct iommu_cmd * cmd,bool sync)1332 static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1333 				      struct iommu_cmd *cmd,
1334 				      bool sync)
1335 {
1336 	unsigned int count = 0;
1337 	u32 left, next_tail;
1338 
1339 	next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1340 again:
1341 	left      = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1342 
1343 	if (left <= 0x20) {
1344 		/* Skip udelay() the first time around */
1345 		if (count++) {
1346 			if (count == LOOP_TIMEOUT) {
1347 				pr_err("Command buffer timeout\n");
1348 				return -EIO;
1349 			}
1350 
1351 			udelay(1);
1352 		}
1353 
1354 		/* Update head and recheck remaining space */
1355 		iommu->cmd_buf_head = readl(iommu->mmio_base +
1356 					    MMIO_CMD_HEAD_OFFSET);
1357 
1358 		goto again;
1359 	}
1360 
1361 	copy_cmd_to_buffer(iommu, cmd);
1362 
1363 	/* Do we need to make sure all commands are processed? */
1364 	iommu->need_sync = sync;
1365 
1366 	return 0;
1367 }
1368 
iommu_queue_command_sync(struct amd_iommu * iommu,struct iommu_cmd * cmd,bool sync)1369 static int iommu_queue_command_sync(struct amd_iommu *iommu,
1370 				    struct iommu_cmd *cmd,
1371 				    bool sync)
1372 {
1373 	unsigned long flags;
1374 	int ret;
1375 
1376 	raw_spin_lock_irqsave(&iommu->lock, flags);
1377 	ret = __iommu_queue_command_sync(iommu, cmd, sync);
1378 	raw_spin_unlock_irqrestore(&iommu->lock, flags);
1379 
1380 	return ret;
1381 }
1382 
iommu_queue_command(struct amd_iommu * iommu,struct iommu_cmd * cmd)1383 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1384 {
1385 	return iommu_queue_command_sync(iommu, cmd, true);
1386 }
1387 
1388 /*
1389  * This function queues a completion wait command into the command
1390  * buffer of an IOMMU
1391  */
iommu_completion_wait(struct amd_iommu * iommu)1392 static int iommu_completion_wait(struct amd_iommu *iommu)
1393 {
1394 	struct iommu_cmd cmd;
1395 	unsigned long flags;
1396 	int ret;
1397 	u64 data;
1398 
1399 	if (!iommu->need_sync)
1400 		return 0;
1401 
1402 	data = atomic64_inc_return(&iommu->cmd_sem_val);
1403 	build_completion_wait(&cmd, iommu, data);
1404 
1405 	raw_spin_lock_irqsave(&iommu->lock, flags);
1406 
1407 	ret = __iommu_queue_command_sync(iommu, &cmd, false);
1408 	if (ret)
1409 		goto out_unlock;
1410 
1411 	ret = wait_on_sem(iommu, data);
1412 
1413 out_unlock:
1414 	raw_spin_unlock_irqrestore(&iommu->lock, flags);
1415 
1416 	return ret;
1417 }
1418 
domain_flush_complete(struct protection_domain * domain)1419 static void domain_flush_complete(struct protection_domain *domain)
1420 {
1421 	struct pdom_iommu_info *pdom_iommu_info;
1422 	unsigned long i;
1423 
1424 	lockdep_assert_held(&domain->lock);
1425 
1426 	/*
1427 	 * Devices of this domain are behind this IOMMU
1428 	 * We need to wait for completion of all commands.
1429 	 */
1430 	 xa_for_each(&domain->iommu_array, i, pdom_iommu_info)
1431 		iommu_completion_wait(pdom_iommu_info->iommu);
1432 }
1433 
iommu_flush_dte(struct amd_iommu * iommu,u16 devid)1434 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1435 {
1436 	struct iommu_cmd cmd;
1437 
1438 	build_inv_dte(&cmd, devid);
1439 
1440 	return iommu_queue_command(iommu, &cmd);
1441 }
1442 
iommu_flush_dte_sync(struct amd_iommu * iommu,u16 devid)1443 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid)
1444 {
1445 	int ret;
1446 
1447 	ret = iommu_flush_dte(iommu, devid);
1448 	if (!ret)
1449 		iommu_completion_wait(iommu);
1450 }
1451 
amd_iommu_flush_dte_all(struct amd_iommu * iommu)1452 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
1453 {
1454 	u32 devid;
1455 	u16 last_bdf = iommu->pci_seg->last_bdf;
1456 
1457 	for (devid = 0; devid <= last_bdf; ++devid)
1458 		iommu_flush_dte(iommu, devid);
1459 
1460 	iommu_completion_wait(iommu);
1461 }
1462 
1463 /*
1464  * This function uses heavy locking and may disable irqs for some time. But
1465  * this is no issue because it is only called during resume.
1466  */
amd_iommu_flush_tlb_all(struct amd_iommu * iommu)1467 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1468 {
1469 	u32 dom_id;
1470 	u16 last_bdf = iommu->pci_seg->last_bdf;
1471 
1472 	for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
1473 		struct iommu_cmd cmd;
1474 		build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1475 				      dom_id, IOMMU_NO_PASID, false);
1476 		iommu_queue_command(iommu, &cmd);
1477 	}
1478 
1479 	iommu_completion_wait(iommu);
1480 }
1481 
amd_iommu_flush_tlb_domid(struct amd_iommu * iommu,u32 dom_id)1482 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1483 {
1484 	struct iommu_cmd cmd;
1485 
1486 	build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1487 			      dom_id, IOMMU_NO_PASID, false);
1488 	iommu_queue_command(iommu, &cmd);
1489 
1490 	iommu_completion_wait(iommu);
1491 }
1492 
amd_iommu_flush_all(struct amd_iommu * iommu)1493 static void amd_iommu_flush_all(struct amd_iommu *iommu)
1494 {
1495 	struct iommu_cmd cmd;
1496 
1497 	build_inv_all(&cmd);
1498 
1499 	iommu_queue_command(iommu, &cmd);
1500 	iommu_completion_wait(iommu);
1501 }
1502 
iommu_flush_irt(struct amd_iommu * iommu,u16 devid)1503 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1504 {
1505 	struct iommu_cmd cmd;
1506 
1507 	build_inv_irt(&cmd, devid);
1508 
1509 	iommu_queue_command(iommu, &cmd);
1510 }
1511 
amd_iommu_flush_irt_all(struct amd_iommu * iommu)1512 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
1513 {
1514 	u32 devid;
1515 	u16 last_bdf = iommu->pci_seg->last_bdf;
1516 
1517 	if (iommu->irtcachedis_enabled)
1518 		return;
1519 
1520 	for (devid = 0; devid <= last_bdf; devid++)
1521 		iommu_flush_irt(iommu, devid);
1522 
1523 	iommu_completion_wait(iommu);
1524 }
1525 
amd_iommu_flush_all_caches(struct amd_iommu * iommu)1526 void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
1527 {
1528 	if (check_feature(FEATURE_IA)) {
1529 		amd_iommu_flush_all(iommu);
1530 	} else {
1531 		amd_iommu_flush_dte_all(iommu);
1532 		amd_iommu_flush_irt_all(iommu);
1533 		amd_iommu_flush_tlb_all(iommu);
1534 	}
1535 }
1536 
1537 /*
1538  * Command send function for flushing on-device TLB
1539  */
device_flush_iotlb(struct iommu_dev_data * dev_data,u64 address,size_t size,ioasid_t pasid,bool gn)1540 static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
1541 			      size_t size, ioasid_t pasid, bool gn)
1542 {
1543 	struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
1544 	struct iommu_cmd cmd;
1545 	int qdep = dev_data->ats_qdep;
1546 
1547 	build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
1548 			      size, pasid, gn);
1549 
1550 	return iommu_queue_command(iommu, &cmd);
1551 }
1552 
device_flush_dte_alias(struct pci_dev * pdev,u16 alias,void * data)1553 static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
1554 {
1555 	struct amd_iommu *iommu = data;
1556 
1557 	return iommu_flush_dte(iommu, alias);
1558 }
1559 
1560 /*
1561  * Command send function for invalidating a device table entry
1562  */
device_flush_dte(struct iommu_dev_data * dev_data)1563 static int device_flush_dte(struct iommu_dev_data *dev_data)
1564 {
1565 	struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
1566 	struct pci_dev *pdev = NULL;
1567 	struct amd_iommu_pci_seg *pci_seg;
1568 	u16 alias;
1569 	int ret;
1570 
1571 	if (dev_is_pci(dev_data->dev))
1572 		pdev = to_pci_dev(dev_data->dev);
1573 
1574 	if (pdev)
1575 		ret = pci_for_each_dma_alias(pdev,
1576 					     device_flush_dte_alias, iommu);
1577 	else
1578 		ret = iommu_flush_dte(iommu, dev_data->devid);
1579 	if (ret)
1580 		return ret;
1581 
1582 	pci_seg = iommu->pci_seg;
1583 	alias = pci_seg->alias_table[dev_data->devid];
1584 	if (alias != dev_data->devid) {
1585 		ret = iommu_flush_dte(iommu, alias);
1586 		if (ret)
1587 			return ret;
1588 	}
1589 
1590 	if (dev_data->ats_enabled) {
1591 		/* Invalidate the entire contents of an IOTLB */
1592 		ret = device_flush_iotlb(dev_data, 0, ~0UL,
1593 					 IOMMU_NO_PASID, false);
1594 	}
1595 
1596 	return ret;
1597 }
1598 
domain_flush_pages_v2(struct protection_domain * pdom,u64 address,size_t size)1599 static int domain_flush_pages_v2(struct protection_domain *pdom,
1600 				 u64 address, size_t size)
1601 {
1602 	struct iommu_dev_data *dev_data;
1603 	struct iommu_cmd cmd;
1604 	int ret = 0;
1605 
1606 	lockdep_assert_held(&pdom->lock);
1607 	list_for_each_entry(dev_data, &pdom->dev_list, list) {
1608 		struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
1609 		u16 domid = dev_data->gcr3_info.domid;
1610 
1611 		build_inv_iommu_pages(&cmd, address, size,
1612 				      domid, IOMMU_NO_PASID, true);
1613 
1614 		ret |= iommu_queue_command(iommu, &cmd);
1615 	}
1616 
1617 	return ret;
1618 }
1619 
domain_flush_pages_v1(struct protection_domain * pdom,u64 address,size_t size)1620 static int domain_flush_pages_v1(struct protection_domain *pdom,
1621 				 u64 address, size_t size)
1622 {
1623 	struct pdom_iommu_info *pdom_iommu_info;
1624 	struct iommu_cmd cmd;
1625 	int ret = 0;
1626 	unsigned long i;
1627 
1628 	lockdep_assert_held(&pdom->lock);
1629 
1630 	build_inv_iommu_pages(&cmd, address, size,
1631 			      pdom->id, IOMMU_NO_PASID, false);
1632 
1633 	xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) {
1634 		/*
1635 		 * Devices of this domain are behind this IOMMU
1636 		 * We need a TLB flush
1637 		 */
1638 		ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd);
1639 	}
1640 
1641 	return ret;
1642 }
1643 
1644 /*
1645  * TLB invalidation function which is called from the mapping functions.
1646  * It flushes range of PTEs of the domain.
1647  */
__domain_flush_pages(struct protection_domain * domain,u64 address,size_t size)1648 static void __domain_flush_pages(struct protection_domain *domain,
1649 				 u64 address, size_t size)
1650 {
1651 	struct iommu_dev_data *dev_data;
1652 	int ret = 0;
1653 	ioasid_t pasid = IOMMU_NO_PASID;
1654 	bool gn = false;
1655 
1656 	lockdep_assert_held(&domain->lock);
1657 
1658 	if (pdom_is_v2_pgtbl_mode(domain)) {
1659 		gn = true;
1660 		ret = domain_flush_pages_v2(domain, address, size);
1661 	} else {
1662 		ret = domain_flush_pages_v1(domain, address, size);
1663 	}
1664 
1665 	list_for_each_entry(dev_data, &domain->dev_list, list) {
1666 
1667 		if (!dev_data->ats_enabled)
1668 			continue;
1669 
1670 		ret |= device_flush_iotlb(dev_data, address, size, pasid, gn);
1671 	}
1672 
1673 	WARN_ON(ret);
1674 }
1675 
amd_iommu_domain_flush_pages(struct protection_domain * domain,u64 address,size_t size)1676 void amd_iommu_domain_flush_pages(struct protection_domain *domain,
1677 				  u64 address, size_t size)
1678 {
1679 	lockdep_assert_held(&domain->lock);
1680 
1681 	if (likely(!amd_iommu_np_cache)) {
1682 		__domain_flush_pages(domain, address, size);
1683 
1684 		/* Wait until IOMMU TLB and all device IOTLB flushes are complete */
1685 		domain_flush_complete(domain);
1686 
1687 		return;
1688 	}
1689 
1690 	/*
1691 	 * When NpCache is on, we infer that we run in a VM and use a vIOMMU.
1692 	 * In such setups it is best to avoid flushes of ranges which are not
1693 	 * naturally aligned, since it would lead to flushes of unmodified
1694 	 * PTEs. Such flushes would require the hypervisor to do more work than
1695 	 * necessary. Therefore, perform repeated flushes of aligned ranges
1696 	 * until you cover the range. Each iteration flushes the smaller
1697 	 * between the natural alignment of the address that we flush and the
1698 	 * greatest naturally aligned region that fits in the range.
1699 	 */
1700 	while (size != 0) {
1701 		int addr_alignment = __ffs(address);
1702 		int size_alignment = __fls(size);
1703 		int min_alignment;
1704 		size_t flush_size;
1705 
1706 		/*
1707 		 * size is always non-zero, but address might be zero, causing
1708 		 * addr_alignment to be negative. As the casting of the
1709 		 * argument in __ffs(address) to long might trim the high bits
1710 		 * of the address on x86-32, cast to long when doing the check.
1711 		 */
1712 		if (likely((unsigned long)address != 0))
1713 			min_alignment = min(addr_alignment, size_alignment);
1714 		else
1715 			min_alignment = size_alignment;
1716 
1717 		flush_size = 1ul << min_alignment;
1718 
1719 		__domain_flush_pages(domain, address, flush_size);
1720 		address += flush_size;
1721 		size -= flush_size;
1722 	}
1723 
1724 	/* Wait until IOMMU TLB and all device IOTLB flushes are complete */
1725 	domain_flush_complete(domain);
1726 }
1727 
1728 /* Flush the whole IO/TLB for a given protection domain - including PDE */
amd_iommu_domain_flush_all(struct protection_domain * domain)1729 static void amd_iommu_domain_flush_all(struct protection_domain *domain)
1730 {
1731 	amd_iommu_domain_flush_pages(domain, 0,
1732 				     CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
1733 }
1734 
amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data * dev_data,ioasid_t pasid,u64 address,size_t size)1735 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
1736 				     ioasid_t pasid, u64 address, size_t size)
1737 {
1738 	struct iommu_cmd cmd;
1739 	struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
1740 
1741 	build_inv_iommu_pages(&cmd, address, size,
1742 			      dev_data->gcr3_info.domid, pasid, true);
1743 	iommu_queue_command(iommu, &cmd);
1744 
1745 	if (dev_data->ats_enabled)
1746 		device_flush_iotlb(dev_data, address, size, pasid, true);
1747 
1748 	iommu_completion_wait(iommu);
1749 }
1750 
dev_flush_pasid_all(struct iommu_dev_data * dev_data,ioasid_t pasid)1751 static void dev_flush_pasid_all(struct iommu_dev_data *dev_data,
1752 				ioasid_t pasid)
1753 {
1754 	amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0,
1755 					CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
1756 }
1757 
1758 /* Flush the not present cache if it exists */
domain_flush_np_cache(struct protection_domain * domain,dma_addr_t iova,size_t size)1759 static void domain_flush_np_cache(struct protection_domain *domain,
1760 		dma_addr_t iova, size_t size)
1761 {
1762 	if (unlikely(amd_iommu_np_cache)) {
1763 		unsigned long flags;
1764 
1765 		spin_lock_irqsave(&domain->lock, flags);
1766 		amd_iommu_domain_flush_pages(domain, iova, size);
1767 		spin_unlock_irqrestore(&domain->lock, flags);
1768 	}
1769 }
1770 
1771 
1772 /*
1773  * This function flushes the DTEs for all devices in domain
1774  */
amd_iommu_update_and_flush_device_table(struct protection_domain * domain)1775 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
1776 {
1777 	struct iommu_dev_data *dev_data;
1778 
1779 	lockdep_assert_held(&domain->lock);
1780 
1781 	list_for_each_entry(dev_data, &domain->dev_list, list) {
1782 		struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
1783 
1784 		set_dte_entry(iommu, dev_data);
1785 		clone_aliases(iommu, dev_data->dev);
1786 	}
1787 
1788 	list_for_each_entry(dev_data, &domain->dev_list, list)
1789 		device_flush_dte(dev_data);
1790 
1791 	domain_flush_complete(domain);
1792 }
1793 
amd_iommu_complete_ppr(struct device * dev,u32 pasid,int status,int tag)1794 int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
1795 {
1796 	struct iommu_dev_data *dev_data;
1797 	struct amd_iommu *iommu;
1798 	struct iommu_cmd cmd;
1799 
1800 	dev_data = dev_iommu_priv_get(dev);
1801 	iommu    = get_amd_iommu_from_dev(dev);
1802 
1803 	build_complete_ppr(&cmd, dev_data->devid, pasid, status,
1804 			   tag, dev_data->pri_tlp);
1805 
1806 	return iommu_queue_command(iommu, &cmd);
1807 }
1808 
1809 /****************************************************************************
1810  *
1811  * The next functions belong to the domain allocation. A domain is
1812  * allocated for every IOMMU as the default domain. If device isolation
1813  * is enabled, every device get its own domain. The most important thing
1814  * about domains is the page table mapping the DMA address space they
1815  * contain.
1816  *
1817  ****************************************************************************/
1818 
pdom_id_alloc(void)1819 static int pdom_id_alloc(void)
1820 {
1821 	return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC);
1822 }
1823 
pdom_id_free(int id)1824 static void pdom_id_free(int id)
1825 {
1826 	ida_free(&pdom_ids, id);
1827 }
1828 
free_gcr3_tbl_level1(u64 * tbl)1829 static void free_gcr3_tbl_level1(u64 *tbl)
1830 {
1831 	u64 *ptr;
1832 	int i;
1833 
1834 	for (i = 0; i < 512; ++i) {
1835 		if (!(tbl[i] & GCR3_VALID))
1836 			continue;
1837 
1838 		ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1839 
1840 		iommu_free_pages(ptr);
1841 	}
1842 }
1843 
free_gcr3_tbl_level2(u64 * tbl)1844 static void free_gcr3_tbl_level2(u64 *tbl)
1845 {
1846 	u64 *ptr;
1847 	int i;
1848 
1849 	for (i = 0; i < 512; ++i) {
1850 		if (!(tbl[i] & GCR3_VALID))
1851 			continue;
1852 
1853 		ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1854 
1855 		free_gcr3_tbl_level1(ptr);
1856 	}
1857 }
1858 
free_gcr3_table(struct gcr3_tbl_info * gcr3_info)1859 static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
1860 {
1861 	if (gcr3_info->glx == 2)
1862 		free_gcr3_tbl_level2(gcr3_info->gcr3_tbl);
1863 	else if (gcr3_info->glx == 1)
1864 		free_gcr3_tbl_level1(gcr3_info->gcr3_tbl);
1865 	else
1866 		WARN_ON_ONCE(gcr3_info->glx != 0);
1867 
1868 	gcr3_info->glx = 0;
1869 
1870 	/* Free per device domain ID */
1871 	pdom_id_free(gcr3_info->domid);
1872 
1873 	iommu_free_pages(gcr3_info->gcr3_tbl);
1874 	gcr3_info->gcr3_tbl = NULL;
1875 }
1876 
1877 /*
1878  * Number of GCR3 table levels required. Level must be 4-Kbyte
1879  * page and can contain up to 512 entries.
1880  */
get_gcr3_levels(int pasids)1881 static int get_gcr3_levels(int pasids)
1882 {
1883 	int levels;
1884 
1885 	if (pasids == -1)
1886 		return amd_iommu_max_glx_val;
1887 
1888 	levels = get_count_order(pasids);
1889 
1890 	return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
1891 }
1892 
setup_gcr3_table(struct gcr3_tbl_info * gcr3_info,struct amd_iommu * iommu,int pasids)1893 static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
1894 			    struct amd_iommu *iommu, int pasids)
1895 {
1896 	int levels = get_gcr3_levels(pasids);
1897 	int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
1898 	int domid;
1899 
1900 	if (levels > amd_iommu_max_glx_val)
1901 		return -EINVAL;
1902 
1903 	if (gcr3_info->gcr3_tbl)
1904 		return -EBUSY;
1905 
1906 	/* Allocate per device domain ID */
1907 	domid = pdom_id_alloc();
1908 	if (domid <= 0)
1909 		return -ENOSPC;
1910 	gcr3_info->domid = domid;
1911 
1912 	gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
1913 	if (gcr3_info->gcr3_tbl == NULL) {
1914 		pdom_id_free(domid);
1915 		return -ENOMEM;
1916 	}
1917 
1918 	gcr3_info->glx = levels;
1919 
1920 	return 0;
1921 }
1922 
__get_gcr3_pte(struct gcr3_tbl_info * gcr3_info,ioasid_t pasid,bool alloc)1923 static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info,
1924 			   ioasid_t pasid, bool alloc)
1925 {
1926 	int index;
1927 	u64 *pte;
1928 	u64 *root = gcr3_info->gcr3_tbl;
1929 	int level = gcr3_info->glx;
1930 
1931 	while (true) {
1932 
1933 		index = (pasid >> (9 * level)) & 0x1ff;
1934 		pte   = &root[index];
1935 
1936 		if (level == 0)
1937 			break;
1938 
1939 		if (!(*pte & GCR3_VALID)) {
1940 			if (!alloc)
1941 				return NULL;
1942 
1943 			root = (void *)get_zeroed_page(GFP_ATOMIC);
1944 			if (root == NULL)
1945 				return NULL;
1946 
1947 			*pte = iommu_virt_to_phys(root) | GCR3_VALID;
1948 		}
1949 
1950 		root = iommu_phys_to_virt(*pte & PAGE_MASK);
1951 
1952 		level -= 1;
1953 	}
1954 
1955 	return pte;
1956 }
1957 
update_gcr3(struct iommu_dev_data * dev_data,ioasid_t pasid,unsigned long gcr3,bool set)1958 static int update_gcr3(struct iommu_dev_data *dev_data,
1959 		       ioasid_t pasid, unsigned long gcr3, bool set)
1960 {
1961 	struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
1962 	u64 *pte;
1963 
1964 	pte = __get_gcr3_pte(gcr3_info, pasid, true);
1965 	if (pte == NULL)
1966 		return -ENOMEM;
1967 
1968 	if (set)
1969 		*pte = (gcr3 & PAGE_MASK) | GCR3_VALID;
1970 	else
1971 		*pte = 0;
1972 
1973 	dev_flush_pasid_all(dev_data, pasid);
1974 	return 0;
1975 }
1976 
amd_iommu_set_gcr3(struct iommu_dev_data * dev_data,ioasid_t pasid,unsigned long gcr3)1977 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid,
1978 		       unsigned long gcr3)
1979 {
1980 	struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
1981 	int ret;
1982 
1983 	iommu_group_mutex_assert(dev_data->dev);
1984 
1985 	ret = update_gcr3(dev_data, pasid, gcr3, true);
1986 	if (ret)
1987 		return ret;
1988 
1989 	gcr3_info->pasid_cnt++;
1990 	return ret;
1991 }
1992 
amd_iommu_clear_gcr3(struct iommu_dev_data * dev_data,ioasid_t pasid)1993 int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
1994 {
1995 	struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
1996 	int ret;
1997 
1998 	iommu_group_mutex_assert(dev_data->dev);
1999 
2000 	ret = update_gcr3(dev_data, pasid, 0, false);
2001 	if (ret)
2002 		return ret;
2003 
2004 	gcr3_info->pasid_cnt--;
2005 	return ret;
2006 }
2007 
make_clear_dte(struct iommu_dev_data * dev_data,struct dev_table_entry * ptr,struct dev_table_entry * new)2008 static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr,
2009 			   struct dev_table_entry *new)
2010 {
2011 	/* All existing DTE must have V bit set */
2012 	new->data128[0] = DTE_FLAG_V;
2013 	new->data128[1] = 0;
2014 }
2015 
2016 /*
2017  * Note:
2018  * The old value for GCR3 table and GPT have been cleared from caller.
2019  */
set_dte_gcr3_table(struct amd_iommu * iommu,struct iommu_dev_data * dev_data,struct dev_table_entry * target)2020 static void set_dte_gcr3_table(struct amd_iommu *iommu,
2021 			       struct iommu_dev_data *dev_data,
2022 			       struct dev_table_entry *target)
2023 {
2024 	struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2025 	u64 gcr3;
2026 
2027 	if (!gcr3_info->gcr3_tbl)
2028 		return;
2029 
2030 	pr_debug("%s: devid=%#x, glx=%#x, gcr3_tbl=%#llx\n",
2031 		 __func__, dev_data->devid, gcr3_info->glx,
2032 		 (unsigned long long)gcr3_info->gcr3_tbl);
2033 
2034 	gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
2035 
2036 	target->data[0] |= DTE_FLAG_GV |
2037 			   FIELD_PREP(DTE_GLX, gcr3_info->glx) |
2038 			   FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12);
2039 	if (pdom_is_v2_pgtbl_mode(dev_data->domain))
2040 		target->data[0] |= DTE_FLAG_GIOV;
2041 
2042 	target->data[1] |= FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) |
2043 			   FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31);
2044 
2045 	/* Guest page table can only support 4 and 5 levels  */
2046 	if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL)
2047 		target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL);
2048 	else
2049 		target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL);
2050 }
2051 
set_dte_entry(struct amd_iommu * iommu,struct iommu_dev_data * dev_data)2052 static void set_dte_entry(struct amd_iommu *iommu,
2053 			  struct iommu_dev_data *dev_data)
2054 {
2055 	u16 domid;
2056 	u32 old_domid;
2057 	struct dev_table_entry *initial_dte;
2058 	struct dev_table_entry new = {};
2059 	struct protection_domain *domain = dev_data->domain;
2060 	struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2061 	struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
2062 
2063 	if (gcr3_info && gcr3_info->gcr3_tbl)
2064 		domid = dev_data->gcr3_info.domid;
2065 	else
2066 		domid = domain->id;
2067 
2068 	make_clear_dte(dev_data, dte, &new);
2069 
2070 	if (domain->iop.mode != PAGE_MODE_NONE)
2071 		new.data[0] |= iommu_virt_to_phys(domain->iop.root);
2072 
2073 	new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
2074 		    << DEV_ENTRY_MODE_SHIFT;
2075 
2076 	new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW;
2077 
2078 	/*
2079 	 * When SNP is enabled, we can only support TV=1 with non-zero domain ID.
2080 	 * This is prevented by the SNP-enable and IOMMU_DOMAIN_IDENTITY check in
2081 	 * do_iommu_domain_alloc().
2082 	 */
2083 	WARN_ON(amd_iommu_snp_en && (domid == 0));
2084 	new.data[0] |= DTE_FLAG_TV;
2085 
2086 	if (dev_data->ppr)
2087 		new.data[0] |= 1ULL << DEV_ENTRY_PPR;
2088 
2089 	if (domain->dirty_tracking)
2090 		new.data[0] |= DTE_FLAG_HAD;
2091 
2092 	if (dev_data->ats_enabled)
2093 		new.data[1] |= DTE_FLAG_IOTLB;
2094 
2095 	old_domid = READ_ONCE(dte->data[1]) & DEV_DOMID_MASK;
2096 	new.data[1] |= domid;
2097 
2098 	/*
2099 	 * Restore cached persistent DTE bits, which can be set by information
2100 	 * in IVRS table. See set_dev_entry_from_acpi().
2101 	 */
2102 	initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid);
2103 	if (initial_dte) {
2104 		new.data128[0] |= initial_dte->data128[0];
2105 		new.data128[1] |= initial_dte->data128[1];
2106 	}
2107 
2108 	set_dte_gcr3_table(iommu, dev_data, &new);
2109 
2110 	update_dte256(iommu, dev_data, &new);
2111 
2112 	/*
2113 	 * A kdump kernel might be replacing a domain ID that was copied from
2114 	 * the previous kernel--if so, it needs to flush the translation cache
2115 	 * entries for the old domain ID that is being overwritten
2116 	 */
2117 	if (old_domid) {
2118 		amd_iommu_flush_tlb_domid(iommu, old_domid);
2119 	}
2120 }
2121 
2122 /*
2123  * Clear DMA-remap related flags to block all DMA (blockeded domain)
2124  */
clear_dte_entry(struct amd_iommu * iommu,struct iommu_dev_data * dev_data)2125 static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data)
2126 {
2127 	struct dev_table_entry new = {};
2128 	struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
2129 
2130 	make_clear_dte(dev_data, dte, &new);
2131 	update_dte256(iommu, dev_data, &new);
2132 }
2133 
2134 /* Update and flush DTE for the given device */
dev_update_dte(struct iommu_dev_data * dev_data,bool set)2135 static void dev_update_dte(struct iommu_dev_data *dev_data, bool set)
2136 {
2137 	struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
2138 
2139 	if (set)
2140 		set_dte_entry(iommu, dev_data);
2141 	else
2142 		clear_dte_entry(iommu, dev_data);
2143 
2144 	clone_aliases(iommu, dev_data->dev);
2145 	device_flush_dte(dev_data);
2146 	iommu_completion_wait(iommu);
2147 }
2148 
2149 /*
2150  * If domain is SVA capable then initialize GCR3 table. Also if domain is
2151  * in v2 page table mode then update GCR3[0].
2152  */
init_gcr3_table(struct iommu_dev_data * dev_data,struct protection_domain * pdom)2153 static int init_gcr3_table(struct iommu_dev_data *dev_data,
2154 			   struct protection_domain *pdom)
2155 {
2156 	struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2157 	int max_pasids = dev_data->max_pasids;
2158 	int ret = 0;
2159 
2160 	 /*
2161 	  * If domain is in pt mode then setup GCR3 table only if device
2162 	  * is PASID capable
2163 	  */
2164 	if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data))
2165 		return ret;
2166 
2167 	/*
2168 	 * By default, setup GCR3 table to support MAX PASIDs
2169 	 * supported by the device/IOMMU.
2170 	 */
2171 	ret = setup_gcr3_table(&dev_data->gcr3_info, iommu,
2172 			       max_pasids > 0 ?  max_pasids : 1);
2173 	if (ret)
2174 		return ret;
2175 
2176 	/* Setup GCR3[0] only if domain is setup with v2 page table mode */
2177 	if (!pdom_is_v2_pgtbl_mode(pdom))
2178 		return ret;
2179 
2180 	ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true);
2181 	if (ret)
2182 		free_gcr3_table(&dev_data->gcr3_info);
2183 
2184 	return ret;
2185 }
2186 
destroy_gcr3_table(struct iommu_dev_data * dev_data,struct protection_domain * pdom)2187 static void destroy_gcr3_table(struct iommu_dev_data *dev_data,
2188 			       struct protection_domain *pdom)
2189 {
2190 	struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2191 
2192 	if (pdom_is_v2_pgtbl_mode(pdom))
2193 		update_gcr3(dev_data, 0, 0, false);
2194 
2195 	if (gcr3_info->gcr3_tbl == NULL)
2196 		return;
2197 
2198 	free_gcr3_table(gcr3_info);
2199 }
2200 
pdom_attach_iommu(struct amd_iommu * iommu,struct protection_domain * pdom)2201 static int pdom_attach_iommu(struct amd_iommu *iommu,
2202 			     struct protection_domain *pdom)
2203 {
2204 	struct pdom_iommu_info *pdom_iommu_info, *curr;
2205 	unsigned long flags;
2206 	int ret = 0;
2207 
2208 	spin_lock_irqsave(&pdom->lock, flags);
2209 
2210 	pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
2211 	if (pdom_iommu_info) {
2212 		pdom_iommu_info->refcnt++;
2213 		goto out_unlock;
2214 	}
2215 
2216 	pdom_iommu_info = kzalloc(sizeof(*pdom_iommu_info), GFP_ATOMIC);
2217 	if (!pdom_iommu_info) {
2218 		ret = -ENOMEM;
2219 		goto out_unlock;
2220 	}
2221 
2222 	pdom_iommu_info->iommu = iommu;
2223 	pdom_iommu_info->refcnt = 1;
2224 
2225 	curr = xa_cmpxchg(&pdom->iommu_array, iommu->index,
2226 			  NULL, pdom_iommu_info, GFP_ATOMIC);
2227 	if (curr) {
2228 		kfree(pdom_iommu_info);
2229 		ret = -ENOSPC;
2230 		goto out_unlock;
2231 	}
2232 
2233 out_unlock:
2234 	spin_unlock_irqrestore(&pdom->lock, flags);
2235 	return ret;
2236 }
2237 
pdom_detach_iommu(struct amd_iommu * iommu,struct protection_domain * pdom)2238 static void pdom_detach_iommu(struct amd_iommu *iommu,
2239 			      struct protection_domain *pdom)
2240 {
2241 	struct pdom_iommu_info *pdom_iommu_info;
2242 	unsigned long flags;
2243 
2244 	spin_lock_irqsave(&pdom->lock, flags);
2245 
2246 	pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
2247 	if (!pdom_iommu_info) {
2248 		spin_unlock_irqrestore(&pdom->lock, flags);
2249 		return;
2250 	}
2251 
2252 	pdom_iommu_info->refcnt--;
2253 	if (pdom_iommu_info->refcnt == 0) {
2254 		xa_erase(&pdom->iommu_array, iommu->index);
2255 		kfree(pdom_iommu_info);
2256 	}
2257 
2258 	spin_unlock_irqrestore(&pdom->lock, flags);
2259 }
2260 
2261 /*
2262  * If a device is not yet associated with a domain, this function makes the
2263  * device visible in the domain
2264  */
attach_device(struct device * dev,struct protection_domain * domain)2265 static int attach_device(struct device *dev,
2266 			 struct protection_domain *domain)
2267 {
2268 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2269 	struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2270 	struct pci_dev *pdev;
2271 	unsigned long flags;
2272 	int ret = 0;
2273 
2274 	mutex_lock(&dev_data->mutex);
2275 
2276 	if (dev_data->domain != NULL) {
2277 		ret = -EBUSY;
2278 		goto out;
2279 	}
2280 
2281 	/* Do reference counting */
2282 	ret = pdom_attach_iommu(iommu, domain);
2283 	if (ret)
2284 		goto out;
2285 
2286 	/* Setup GCR3 table */
2287 	if (pdom_is_sva_capable(domain)) {
2288 		ret = init_gcr3_table(dev_data, domain);
2289 		if (ret) {
2290 			pdom_detach_iommu(iommu, domain);
2291 			goto out;
2292 		}
2293 	}
2294 
2295 	pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
2296 	if (pdev && pdom_is_sva_capable(domain)) {
2297 		pdev_enable_caps(pdev);
2298 
2299 		/*
2300 		 * Device can continue to function even if IOPF
2301 		 * enablement failed. Hence in error path just
2302 		 * disable device PRI support.
2303 		 */
2304 		if (amd_iommu_iopf_add_device(iommu, dev_data))
2305 			pdev_disable_cap_pri(pdev);
2306 	} else if (pdev) {
2307 		pdev_enable_cap_ats(pdev);
2308 	}
2309 
2310 	/* Update data structures */
2311 	dev_data->domain = domain;
2312 	spin_lock_irqsave(&domain->lock, flags);
2313 	list_add(&dev_data->list, &domain->dev_list);
2314 	spin_unlock_irqrestore(&domain->lock, flags);
2315 
2316 	/* Update device table */
2317 	dev_update_dte(dev_data, true);
2318 
2319 out:
2320 	mutex_unlock(&dev_data->mutex);
2321 
2322 	return ret;
2323 }
2324 
2325 /*
2326  * Removes a device from a protection domain (with devtable_lock held)
2327  */
detach_device(struct device * dev)2328 static void detach_device(struct device *dev)
2329 {
2330 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2331 	struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2332 	struct protection_domain *domain = dev_data->domain;
2333 	unsigned long flags;
2334 
2335 	mutex_lock(&dev_data->mutex);
2336 
2337 	/*
2338 	 * First check if the device is still attached. It might already
2339 	 * be detached from its domain because the generic
2340 	 * iommu_detach_group code detached it and we try again here in
2341 	 * our alias handling.
2342 	 */
2343 	if (WARN_ON(!dev_data->domain))
2344 		goto out;
2345 
2346 	/* Remove IOPF handler */
2347 	if (dev_data->ppr) {
2348 		iopf_queue_flush_dev(dev);
2349 		amd_iommu_iopf_remove_device(iommu, dev_data);
2350 	}
2351 
2352 	if (dev_is_pci(dev))
2353 		pdev_disable_caps(to_pci_dev(dev));
2354 
2355 	/* Clear DTE and flush the entry */
2356 	dev_update_dte(dev_data, false);
2357 
2358 	/* Flush IOTLB and wait for the flushes to finish */
2359 	spin_lock_irqsave(&domain->lock, flags);
2360 	amd_iommu_domain_flush_all(domain);
2361 	list_del(&dev_data->list);
2362 	spin_unlock_irqrestore(&domain->lock, flags);
2363 
2364 	/* Clear GCR3 table */
2365 	if (pdom_is_sva_capable(domain))
2366 		destroy_gcr3_table(dev_data, domain);
2367 
2368 	/* Update data structures */
2369 	dev_data->domain = NULL;
2370 
2371 	/* decrease reference counters - needs to happen after the flushes */
2372 	pdom_detach_iommu(iommu, domain);
2373 
2374 out:
2375 	mutex_unlock(&dev_data->mutex);
2376 }
2377 
amd_iommu_probe_device(struct device * dev)2378 static struct iommu_device *amd_iommu_probe_device(struct device *dev)
2379 {
2380 	struct iommu_device *iommu_dev;
2381 	struct amd_iommu *iommu;
2382 	struct iommu_dev_data *dev_data;
2383 	int ret;
2384 
2385 	if (!check_device(dev))
2386 		return ERR_PTR(-ENODEV);
2387 
2388 	iommu = rlookup_amd_iommu(dev);
2389 	if (!iommu)
2390 		return ERR_PTR(-ENODEV);
2391 
2392 	/* Not registered yet? */
2393 	if (!iommu->iommu.ops)
2394 		return ERR_PTR(-ENODEV);
2395 
2396 	if (dev_iommu_priv_get(dev))
2397 		return &iommu->iommu;
2398 
2399 	ret = iommu_init_device(iommu, dev);
2400 	if (ret) {
2401 		dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
2402 		iommu_dev = ERR_PTR(ret);
2403 		iommu_ignore_device(iommu, dev);
2404 		goto out_err;
2405 	}
2406 
2407 	amd_iommu_set_pci_msi_domain(dev, iommu);
2408 	iommu_dev = &iommu->iommu;
2409 
2410 	/*
2411 	 * If IOMMU and device supports PASID then it will contain max
2412 	 * supported PASIDs, else it will be zero.
2413 	 */
2414 	dev_data = dev_iommu_priv_get(dev);
2415 	if (amd_iommu_pasid_supported() && dev_is_pci(dev) &&
2416 	    pdev_pasid_supported(dev_data)) {
2417 		dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids,
2418 					     pci_max_pasids(to_pci_dev(dev)));
2419 	}
2420 
2421 	if (amd_iommu_pgtable == PD_MODE_NONE) {
2422 		pr_warn_once("%s: DMA translation not supported by iommu.\n",
2423 			     __func__);
2424 		iommu_dev = ERR_PTR(-ENODEV);
2425 		goto out_err;
2426 	}
2427 
2428 out_err:
2429 
2430 	iommu_completion_wait(iommu);
2431 
2432 	if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
2433 		dev_data->max_irqs = MAX_IRQS_PER_TABLE_2K;
2434 	else
2435 		dev_data->max_irqs = MAX_IRQS_PER_TABLE_512;
2436 
2437 	if (dev_is_pci(dev))
2438 		pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT);
2439 
2440 	return iommu_dev;
2441 }
2442 
amd_iommu_release_device(struct device * dev)2443 static void amd_iommu_release_device(struct device *dev)
2444 {
2445 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2446 
2447 	WARN_ON(dev_data->domain);
2448 
2449 	/*
2450 	 * We keep dev_data around for unplugged devices and reuse it when the
2451 	 * device is re-plugged - not doing so would introduce a ton of races.
2452 	 */
2453 }
2454 
amd_iommu_device_group(struct device * dev)2455 static struct iommu_group *amd_iommu_device_group(struct device *dev)
2456 {
2457 	if (dev_is_pci(dev))
2458 		return pci_device_group(dev);
2459 
2460 	return acpihid_device_group(dev);
2461 }
2462 
2463 /*****************************************************************************
2464  *
2465  * The following functions belong to the exported interface of AMD IOMMU
2466  *
2467  * This interface allows access to lower level functions of the IOMMU
2468  * like protection domain handling and assignement of devices to domains
2469  * which is not possible with the dma_ops interface.
2470  *
2471  *****************************************************************************/
2472 
protection_domain_init(struct protection_domain * domain)2473 static void protection_domain_init(struct protection_domain *domain)
2474 {
2475 	spin_lock_init(&domain->lock);
2476 	INIT_LIST_HEAD(&domain->dev_list);
2477 	INIT_LIST_HEAD(&domain->dev_data_list);
2478 	xa_init(&domain->iommu_array);
2479 }
2480 
protection_domain_alloc(void)2481 struct protection_domain *protection_domain_alloc(void)
2482 {
2483 	struct protection_domain *domain;
2484 	int domid;
2485 
2486 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2487 	if (!domain)
2488 		return NULL;
2489 
2490 	domid = pdom_id_alloc();
2491 	if (domid <= 0) {
2492 		kfree(domain);
2493 		return NULL;
2494 	}
2495 	domain->id = domid;
2496 
2497 	protection_domain_init(domain);
2498 
2499 	return domain;
2500 }
2501 
pdom_setup_pgtable(struct protection_domain * domain,struct device * dev)2502 static int pdom_setup_pgtable(struct protection_domain *domain,
2503 			      struct device *dev)
2504 {
2505 	struct io_pgtable_ops *pgtbl_ops;
2506 	enum io_pgtable_fmt fmt;
2507 
2508 	switch (domain->pd_mode) {
2509 	case PD_MODE_V1:
2510 		fmt = AMD_IOMMU_V1;
2511 		break;
2512 	case PD_MODE_V2:
2513 		fmt = AMD_IOMMU_V2;
2514 		break;
2515 	case PD_MODE_NONE:
2516 		WARN_ON_ONCE(1);
2517 		return -EPERM;
2518 	}
2519 
2520 	domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev);
2521 	pgtbl_ops = alloc_io_pgtable_ops(fmt, &domain->iop.pgtbl.cfg, domain);
2522 	if (!pgtbl_ops)
2523 		return -ENOMEM;
2524 
2525 	return 0;
2526 }
2527 
dma_max_address(enum protection_domain_mode pgtable)2528 static inline u64 dma_max_address(enum protection_domain_mode pgtable)
2529 {
2530 	if (pgtable == PD_MODE_V1)
2531 		return PM_LEVEL_SIZE(amd_iommu_hpt_level);
2532 
2533 	/*
2534 	 * V2 with 4/5 level page table. Note that "2.2.6.5 AMD64 4-Kbyte Page
2535 	 * Translation" shows that the V2 table sign extends the top of the
2536 	 * address space creating a reserved region in the middle of the
2537 	 * translation, just like the CPU does. Further Vasant says the docs are
2538 	 * incomplete and this only applies to non-zero PASIDs. If the AMDv2
2539 	 * page table is assigned to the 0 PASID then there is no sign extension
2540 	 * check.
2541 	 *
2542 	 * Since the IOMMU must have a fixed geometry, and the core code does
2543 	 * not understand sign extended addressing, we have to chop off the high
2544 	 * bit to get consistent behavior with attachments of the domain to any
2545 	 * PASID.
2546 	 */
2547 	return ((1ULL << (PM_LEVEL_SHIFT(amd_iommu_gpt_level) - 1)) - 1);
2548 }
2549 
amd_iommu_hd_support(struct amd_iommu * iommu)2550 static bool amd_iommu_hd_support(struct amd_iommu *iommu)
2551 {
2552 	if (amd_iommu_hatdis)
2553 		return false;
2554 
2555 	return iommu && (iommu->features & FEATURE_HDSUP);
2556 }
2557 
2558 static struct iommu_domain *
do_iommu_domain_alloc(struct device * dev,u32 flags,enum protection_domain_mode pgtable)2559 do_iommu_domain_alloc(struct device *dev, u32 flags,
2560 		      enum protection_domain_mode pgtable)
2561 {
2562 	bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
2563 	struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2564 	struct protection_domain *domain;
2565 	int ret;
2566 
2567 	domain = protection_domain_alloc();
2568 	if (!domain)
2569 		return ERR_PTR(-ENOMEM);
2570 
2571 	domain->pd_mode = pgtable;
2572 	ret = pdom_setup_pgtable(domain, dev);
2573 	if (ret) {
2574 		pdom_id_free(domain->id);
2575 		kfree(domain);
2576 		return ERR_PTR(ret);
2577 	}
2578 
2579 	domain->domain.geometry.aperture_start = 0;
2580 	domain->domain.geometry.aperture_end   = dma_max_address(pgtable);
2581 	domain->domain.geometry.force_aperture = true;
2582 	domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
2583 
2584 	domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
2585 	domain->domain.ops = iommu->iommu.ops->default_domain_ops;
2586 
2587 	if (dirty_tracking)
2588 		domain->domain.dirty_ops = &amd_dirty_ops;
2589 
2590 	return &domain->domain;
2591 }
2592 
2593 static struct iommu_domain *
amd_iommu_domain_alloc_paging_flags(struct device * dev,u32 flags,const struct iommu_user_data * user_data)2594 amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
2595 				    const struct iommu_user_data *user_data)
2596 
2597 {
2598 	struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2599 	const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
2600 						IOMMU_HWPT_ALLOC_PASID;
2601 
2602 	if ((flags & ~supported_flags) || user_data)
2603 		return ERR_PTR(-EOPNOTSUPP);
2604 
2605 	switch (flags & supported_flags) {
2606 	case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
2607 		/* Allocate domain with v1 page table for dirty tracking */
2608 		if (!amd_iommu_hd_support(iommu))
2609 			break;
2610 		return do_iommu_domain_alloc(dev, flags, PD_MODE_V1);
2611 	case IOMMU_HWPT_ALLOC_PASID:
2612 		/* Allocate domain with v2 page table if IOMMU supports PASID. */
2613 		if (!amd_iommu_pasid_supported())
2614 			break;
2615 		return do_iommu_domain_alloc(dev, flags, PD_MODE_V2);
2616 	case 0:
2617 		/* If nothing specific is required use the kernel commandline default */
2618 		return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable);
2619 	default:
2620 		break;
2621 	}
2622 	return ERR_PTR(-EOPNOTSUPP);
2623 }
2624 
amd_iommu_domain_free(struct iommu_domain * dom)2625 void amd_iommu_domain_free(struct iommu_domain *dom)
2626 {
2627 	struct protection_domain *domain = to_pdomain(dom);
2628 
2629 	WARN_ON(!list_empty(&domain->dev_list));
2630 	if (domain->domain.type & __IOMMU_DOMAIN_PAGING)
2631 		free_io_pgtable_ops(&domain->iop.pgtbl.ops);
2632 	pdom_id_free(domain->id);
2633 	kfree(domain);
2634 }
2635 
blocked_domain_attach_device(struct iommu_domain * domain,struct device * dev)2636 static int blocked_domain_attach_device(struct iommu_domain *domain,
2637 					struct device *dev)
2638 {
2639 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2640 
2641 	if (dev_data->domain)
2642 		detach_device(dev);
2643 
2644 	/* Clear DTE and flush the entry */
2645 	mutex_lock(&dev_data->mutex);
2646 	dev_update_dte(dev_data, false);
2647 	mutex_unlock(&dev_data->mutex);
2648 
2649 	return 0;
2650 }
2651 
blocked_domain_set_dev_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_domain * old)2652 static int blocked_domain_set_dev_pasid(struct iommu_domain *domain,
2653 					struct device *dev, ioasid_t pasid,
2654 					struct iommu_domain *old)
2655 {
2656 	amd_iommu_remove_dev_pasid(dev, pasid, old);
2657 	return 0;
2658 }
2659 
2660 static struct iommu_domain blocked_domain = {
2661 	.type = IOMMU_DOMAIN_BLOCKED,
2662 	.ops = &(const struct iommu_domain_ops) {
2663 		.attach_dev     = blocked_domain_attach_device,
2664 		.set_dev_pasid  = blocked_domain_set_dev_pasid,
2665 	}
2666 };
2667 
2668 static struct protection_domain identity_domain;
2669 
2670 static const struct iommu_domain_ops identity_domain_ops = {
2671 	.attach_dev = amd_iommu_attach_device,
2672 };
2673 
amd_iommu_init_identity_domain(void)2674 void amd_iommu_init_identity_domain(void)
2675 {
2676 	struct iommu_domain *domain = &identity_domain.domain;
2677 
2678 	domain->type = IOMMU_DOMAIN_IDENTITY;
2679 	domain->ops = &identity_domain_ops;
2680 	domain->owner = &amd_iommu_ops;
2681 
2682 	identity_domain.id = pdom_id_alloc();
2683 
2684 	protection_domain_init(&identity_domain);
2685 }
2686 
2687 /* Same as blocked domain except it supports only ops->attach_dev() */
2688 static struct iommu_domain release_domain = {
2689 	.type = IOMMU_DOMAIN_BLOCKED,
2690 	.ops = &(const struct iommu_domain_ops) {
2691 		.attach_dev     = blocked_domain_attach_device,
2692 	}
2693 };
2694 
amd_iommu_attach_device(struct iommu_domain * dom,struct device * dev)2695 static int amd_iommu_attach_device(struct iommu_domain *dom,
2696 				   struct device *dev)
2697 {
2698 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2699 	struct protection_domain *domain = to_pdomain(dom);
2700 	struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2701 	int ret;
2702 
2703 	/*
2704 	 * Skip attach device to domain if new domain is same as
2705 	 * devices current domain
2706 	 */
2707 	if (dev_data->domain == domain)
2708 		return 0;
2709 
2710 	dev_data->defer_attach = false;
2711 
2712 	/*
2713 	 * Restrict to devices with compatible IOMMU hardware support
2714 	 * when enforcement of dirty tracking is enabled.
2715 	 */
2716 	if (dom->dirty_ops && !amd_iommu_hd_support(iommu))
2717 		return -EINVAL;
2718 
2719 	if (dev_data->domain)
2720 		detach_device(dev);
2721 
2722 	ret = attach_device(dev, domain);
2723 
2724 #ifdef CONFIG_IRQ_REMAP
2725 	if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
2726 		if (dom->type == IOMMU_DOMAIN_UNMANAGED)
2727 			dev_data->use_vapic = 1;
2728 		else
2729 			dev_data->use_vapic = 0;
2730 	}
2731 #endif
2732 
2733 	return ret;
2734 }
2735 
amd_iommu_iotlb_sync_map(struct iommu_domain * dom,unsigned long iova,size_t size)2736 static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
2737 				    unsigned long iova, size_t size)
2738 {
2739 	struct protection_domain *domain = to_pdomain(dom);
2740 	struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
2741 
2742 	if (ops->map_pages)
2743 		domain_flush_np_cache(domain, iova, size);
2744 	return 0;
2745 }
2746 
amd_iommu_map_pages(struct iommu_domain * dom,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int iommu_prot,gfp_t gfp,size_t * mapped)2747 static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
2748 			       phys_addr_t paddr, size_t pgsize, size_t pgcount,
2749 			       int iommu_prot, gfp_t gfp, size_t *mapped)
2750 {
2751 	struct protection_domain *domain = to_pdomain(dom);
2752 	struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
2753 	int prot = 0;
2754 	int ret = -EINVAL;
2755 
2756 	if ((domain->pd_mode == PD_MODE_V1) &&
2757 	    (domain->iop.mode == PAGE_MODE_NONE))
2758 		return -EINVAL;
2759 
2760 	if (iommu_prot & IOMMU_READ)
2761 		prot |= IOMMU_PROT_IR;
2762 	if (iommu_prot & IOMMU_WRITE)
2763 		prot |= IOMMU_PROT_IW;
2764 
2765 	if (ops->map_pages) {
2766 		ret = ops->map_pages(ops, iova, paddr, pgsize,
2767 				     pgcount, prot, gfp, mapped);
2768 	}
2769 
2770 	return ret;
2771 }
2772 
amd_iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)2773 static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
2774 					    struct iommu_iotlb_gather *gather,
2775 					    unsigned long iova, size_t size)
2776 {
2777 	/*
2778 	 * AMD's IOMMU can flush as many pages as necessary in a single flush.
2779 	 * Unless we run in a virtual machine, which can be inferred according
2780 	 * to whether "non-present cache" is on, it is probably best to prefer
2781 	 * (potentially) too extensive TLB flushing (i.e., more misses) over
2782 	 * mutliple TLB flushes (i.e., more flushes). For virtual machines the
2783 	 * hypervisor needs to synchronize the host IOMMU PTEs with those of
2784 	 * the guest, and the trade-off is different: unnecessary TLB flushes
2785 	 * should be avoided.
2786 	 */
2787 	if (amd_iommu_np_cache &&
2788 	    iommu_iotlb_gather_is_disjoint(gather, iova, size))
2789 		iommu_iotlb_sync(domain, gather);
2790 
2791 	iommu_iotlb_gather_add_range(gather, iova, size);
2792 }
2793 
amd_iommu_unmap_pages(struct iommu_domain * dom,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)2794 static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
2795 				    size_t pgsize, size_t pgcount,
2796 				    struct iommu_iotlb_gather *gather)
2797 {
2798 	struct protection_domain *domain = to_pdomain(dom);
2799 	struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
2800 	size_t r;
2801 
2802 	if ((domain->pd_mode == PD_MODE_V1) &&
2803 	    (domain->iop.mode == PAGE_MODE_NONE))
2804 		return 0;
2805 
2806 	r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
2807 
2808 	if (r)
2809 		amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
2810 
2811 	return r;
2812 }
2813 
amd_iommu_iova_to_phys(struct iommu_domain * dom,dma_addr_t iova)2814 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2815 					  dma_addr_t iova)
2816 {
2817 	struct protection_domain *domain = to_pdomain(dom);
2818 	struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
2819 
2820 	return ops->iova_to_phys(ops, iova);
2821 }
2822 
amd_iommu_capable(struct device * dev,enum iommu_cap cap)2823 static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
2824 {
2825 	switch (cap) {
2826 	case IOMMU_CAP_CACHE_COHERENCY:
2827 		return true;
2828 	case IOMMU_CAP_NOEXEC:
2829 		return false;
2830 	case IOMMU_CAP_PRE_BOOT_PROTECTION:
2831 		return amdr_ivrs_remap_support;
2832 	case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
2833 		return true;
2834 	case IOMMU_CAP_DEFERRED_FLUSH:
2835 		return true;
2836 	case IOMMU_CAP_DIRTY_TRACKING: {
2837 		struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2838 
2839 		return amd_iommu_hd_support(iommu);
2840 	}
2841 	default:
2842 		break;
2843 	}
2844 
2845 	return false;
2846 }
2847 
amd_iommu_set_dirty_tracking(struct iommu_domain * domain,bool enable)2848 static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
2849 					bool enable)
2850 {
2851 	struct protection_domain *pdomain = to_pdomain(domain);
2852 	struct dev_table_entry *dte;
2853 	struct iommu_dev_data *dev_data;
2854 	bool domain_flush = false;
2855 	struct amd_iommu *iommu;
2856 	unsigned long flags;
2857 	u64 new;
2858 
2859 	spin_lock_irqsave(&pdomain->lock, flags);
2860 	if (!(pdomain->dirty_tracking ^ enable)) {
2861 		spin_unlock_irqrestore(&pdomain->lock, flags);
2862 		return 0;
2863 	}
2864 
2865 	list_for_each_entry(dev_data, &pdomain->dev_list, list) {
2866 		spin_lock(&dev_data->dte_lock);
2867 		iommu = get_amd_iommu_from_dev_data(dev_data);
2868 		dte = &get_dev_table(iommu)[dev_data->devid];
2869 		new = dte->data[0];
2870 		new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD);
2871 		dte->data[0] = new;
2872 		spin_unlock(&dev_data->dte_lock);
2873 
2874 		/* Flush device DTE */
2875 		device_flush_dte(dev_data);
2876 		domain_flush = true;
2877 	}
2878 
2879 	/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
2880 	if (domain_flush)
2881 		amd_iommu_domain_flush_all(pdomain);
2882 
2883 	pdomain->dirty_tracking = enable;
2884 	spin_unlock_irqrestore(&pdomain->lock, flags);
2885 
2886 	return 0;
2887 }
2888 
amd_iommu_read_and_clear_dirty(struct iommu_domain * domain,unsigned long iova,size_t size,unsigned long flags,struct iommu_dirty_bitmap * dirty)2889 static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain,
2890 					  unsigned long iova, size_t size,
2891 					  unsigned long flags,
2892 					  struct iommu_dirty_bitmap *dirty)
2893 {
2894 	struct protection_domain *pdomain = to_pdomain(domain);
2895 	struct io_pgtable_ops *ops = &pdomain->iop.pgtbl.ops;
2896 	unsigned long lflags;
2897 
2898 	if (!ops || !ops->read_and_clear_dirty)
2899 		return -EOPNOTSUPP;
2900 
2901 	spin_lock_irqsave(&pdomain->lock, lflags);
2902 	if (!pdomain->dirty_tracking && dirty->bitmap) {
2903 		spin_unlock_irqrestore(&pdomain->lock, lflags);
2904 		return -EINVAL;
2905 	}
2906 	spin_unlock_irqrestore(&pdomain->lock, lflags);
2907 
2908 	return ops->read_and_clear_dirty(ops, iova, size, flags, dirty);
2909 }
2910 
amd_iommu_get_resv_regions(struct device * dev,struct list_head * head)2911 static void amd_iommu_get_resv_regions(struct device *dev,
2912 				       struct list_head *head)
2913 {
2914 	struct iommu_resv_region *region;
2915 	struct unity_map_entry *entry;
2916 	struct amd_iommu *iommu;
2917 	struct amd_iommu_pci_seg *pci_seg;
2918 	int devid, sbdf;
2919 
2920 	sbdf = get_device_sbdf_id(dev);
2921 	if (sbdf < 0)
2922 		return;
2923 
2924 	devid = PCI_SBDF_TO_DEVID(sbdf);
2925 	iommu = get_amd_iommu_from_dev(dev);
2926 	pci_seg = iommu->pci_seg;
2927 
2928 	list_for_each_entry(entry, &pci_seg->unity_map, list) {
2929 		int type, prot = 0;
2930 		size_t length;
2931 
2932 		if (devid < entry->devid_start || devid > entry->devid_end)
2933 			continue;
2934 
2935 		type   = IOMMU_RESV_DIRECT;
2936 		length = entry->address_end - entry->address_start;
2937 		if (entry->prot & IOMMU_PROT_IR)
2938 			prot |= IOMMU_READ;
2939 		if (entry->prot & IOMMU_PROT_IW)
2940 			prot |= IOMMU_WRITE;
2941 		if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
2942 			/* Exclusion range */
2943 			type = IOMMU_RESV_RESERVED;
2944 
2945 		region = iommu_alloc_resv_region(entry->address_start,
2946 						 length, prot, type,
2947 						 GFP_KERNEL);
2948 		if (!region) {
2949 			dev_err(dev, "Out of memory allocating dm-regions\n");
2950 			return;
2951 		}
2952 		list_add_tail(&region->list, head);
2953 	}
2954 
2955 	region = iommu_alloc_resv_region(MSI_RANGE_START,
2956 					 MSI_RANGE_END - MSI_RANGE_START + 1,
2957 					 0, IOMMU_RESV_MSI, GFP_KERNEL);
2958 	if (!region)
2959 		return;
2960 	list_add_tail(&region->list, head);
2961 
2962 	if (amd_iommu_ht_range_ignore())
2963 		return;
2964 
2965 	region = iommu_alloc_resv_region(HT_RANGE_START,
2966 					 HT_RANGE_END - HT_RANGE_START + 1,
2967 					 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
2968 	if (!region)
2969 		return;
2970 	list_add_tail(&region->list, head);
2971 }
2972 
amd_iommu_is_attach_deferred(struct device * dev)2973 static bool amd_iommu_is_attach_deferred(struct device *dev)
2974 {
2975 	struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2976 
2977 	return dev_data->defer_attach;
2978 }
2979 
amd_iommu_flush_iotlb_all(struct iommu_domain * domain)2980 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
2981 {
2982 	struct protection_domain *dom = to_pdomain(domain);
2983 	unsigned long flags;
2984 
2985 	spin_lock_irqsave(&dom->lock, flags);
2986 	amd_iommu_domain_flush_all(dom);
2987 	spin_unlock_irqrestore(&dom->lock, flags);
2988 }
2989 
amd_iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)2990 static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
2991 				 struct iommu_iotlb_gather *gather)
2992 {
2993 	struct protection_domain *dom = to_pdomain(domain);
2994 	unsigned long flags;
2995 
2996 	spin_lock_irqsave(&dom->lock, flags);
2997 	amd_iommu_domain_flush_pages(dom, gather->start,
2998 				     gather->end - gather->start + 1);
2999 	spin_unlock_irqrestore(&dom->lock, flags);
3000 }
3001 
amd_iommu_def_domain_type(struct device * dev)3002 static int amd_iommu_def_domain_type(struct device *dev)
3003 {
3004 	struct iommu_dev_data *dev_data;
3005 
3006 	dev_data = dev_iommu_priv_get(dev);
3007 	if (!dev_data)
3008 		return 0;
3009 
3010 	/* Always use DMA domain for untrusted device */
3011 	if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
3012 		return IOMMU_DOMAIN_DMA;
3013 
3014 	/*
3015 	 * Do not identity map IOMMUv2 capable devices when:
3016 	 *  - memory encryption is active, because some of those devices
3017 	 *    (AMD GPUs) don't have the encryption bit in their DMA-mask
3018 	 *    and require remapping.
3019 	 *  - SNP is enabled, because it prohibits DTE[Mode]=0.
3020 	 */
3021 	if (pdev_pasid_supported(dev_data) &&
3022 	    !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
3023 	    !amd_iommu_snp_en) {
3024 		return IOMMU_DOMAIN_IDENTITY;
3025 	}
3026 
3027 	return 0;
3028 }
3029 
amd_iommu_enforce_cache_coherency(struct iommu_domain * domain)3030 static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
3031 {
3032 	/* IOMMU_PTE_FC is always set */
3033 	return true;
3034 }
3035 
3036 static const struct iommu_dirty_ops amd_dirty_ops = {
3037 	.set_dirty_tracking = amd_iommu_set_dirty_tracking,
3038 	.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
3039 };
3040 
3041 const struct iommu_ops amd_iommu_ops = {
3042 	.capable = amd_iommu_capable,
3043 	.blocked_domain = &blocked_domain,
3044 	.release_domain = &release_domain,
3045 	.identity_domain = &identity_domain.domain,
3046 	.domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
3047 	.domain_alloc_sva = amd_iommu_domain_alloc_sva,
3048 	.probe_device = amd_iommu_probe_device,
3049 	.release_device = amd_iommu_release_device,
3050 	.device_group = amd_iommu_device_group,
3051 	.get_resv_regions = amd_iommu_get_resv_regions,
3052 	.is_attach_deferred = amd_iommu_is_attach_deferred,
3053 	.def_domain_type = amd_iommu_def_domain_type,
3054 	.page_response = amd_iommu_page_response,
3055 	.default_domain_ops = &(const struct iommu_domain_ops) {
3056 		.attach_dev	= amd_iommu_attach_device,
3057 		.map_pages	= amd_iommu_map_pages,
3058 		.unmap_pages	= amd_iommu_unmap_pages,
3059 		.iotlb_sync_map	= amd_iommu_iotlb_sync_map,
3060 		.iova_to_phys	= amd_iommu_iova_to_phys,
3061 		.flush_iotlb_all = amd_iommu_flush_iotlb_all,
3062 		.iotlb_sync	= amd_iommu_iotlb_sync,
3063 		.free		= amd_iommu_domain_free,
3064 		.enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
3065 	}
3066 };
3067 
3068 #ifdef CONFIG_IRQ_REMAP
3069 
3070 /*****************************************************************************
3071  *
3072  * Interrupt Remapping Implementation
3073  *
3074  *****************************************************************************/
3075 
3076 static struct irq_chip amd_ir_chip;
3077 static DEFINE_SPINLOCK(iommu_table_lock);
3078 
iommu_flush_irt_and_complete(struct amd_iommu * iommu,u16 devid)3079 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
3080 {
3081 	int ret;
3082 	u64 data;
3083 	unsigned long flags;
3084 	struct iommu_cmd cmd, cmd2;
3085 
3086 	if (iommu->irtcachedis_enabled)
3087 		return;
3088 
3089 	build_inv_irt(&cmd, devid);
3090 	data = atomic64_inc_return(&iommu->cmd_sem_val);
3091 	build_completion_wait(&cmd2, iommu, data);
3092 
3093 	raw_spin_lock_irqsave(&iommu->lock, flags);
3094 	ret = __iommu_queue_command_sync(iommu, &cmd, true);
3095 	if (ret)
3096 		goto out;
3097 	ret = __iommu_queue_command_sync(iommu, &cmd2, false);
3098 	if (ret)
3099 		goto out;
3100 	wait_on_sem(iommu, data);
3101 out:
3102 	raw_spin_unlock_irqrestore(&iommu->lock, flags);
3103 }
3104 
iommu_get_int_tablen(struct iommu_dev_data * dev_data)3105 static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)
3106 {
3107 	if (dev_data && dev_data->max_irqs == MAX_IRQS_PER_TABLE_2K)
3108 		return DTE_INTTABLEN_2K;
3109 	return DTE_INTTABLEN_512;
3110 }
3111 
set_dte_irq_entry(struct amd_iommu * iommu,u16 devid,struct irq_remap_table * table)3112 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
3113 			      struct irq_remap_table *table)
3114 {
3115 	u64 new;
3116 	struct dev_table_entry *dte = &get_dev_table(iommu)[devid];
3117 	struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
3118 
3119 	if (dev_data)
3120 		spin_lock(&dev_data->dte_lock);
3121 
3122 	new = READ_ONCE(dte->data[2]);
3123 	new &= ~DTE_IRQ_PHYS_ADDR_MASK;
3124 	new |= iommu_virt_to_phys(table->table);
3125 	new |= DTE_IRQ_REMAP_INTCTL;
3126 	new |= iommu_get_int_tablen(dev_data);
3127 	new |= DTE_IRQ_REMAP_ENABLE;
3128 	WRITE_ONCE(dte->data[2], new);
3129 
3130 	if (dev_data)
3131 		spin_unlock(&dev_data->dte_lock);
3132 }
3133 
get_irq_table(struct amd_iommu * iommu,u16 devid)3134 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
3135 {
3136 	struct irq_remap_table *table;
3137 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
3138 
3139 	if (WARN_ONCE(!pci_seg->rlookup_table[devid],
3140 		      "%s: no iommu for devid %x:%x\n",
3141 		      __func__, pci_seg->id, devid))
3142 		return NULL;
3143 
3144 	table = pci_seg->irq_lookup_table[devid];
3145 	if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
3146 		      __func__, pci_seg->id, devid))
3147 		return NULL;
3148 
3149 	return table;
3150 }
3151 
__alloc_irq_table(int nid,size_t size)3152 static struct irq_remap_table *__alloc_irq_table(int nid, size_t size)
3153 {
3154 	struct irq_remap_table *table;
3155 
3156 	table = kzalloc(sizeof(*table), GFP_KERNEL);
3157 	if (!table)
3158 		return NULL;
3159 
3160 	table->table = iommu_alloc_pages_node_sz(
3161 		nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size));
3162 	if (!table->table) {
3163 		kfree(table);
3164 		return NULL;
3165 	}
3166 	raw_spin_lock_init(&table->lock);
3167 
3168 	return table;
3169 }
3170 
set_remap_table_entry(struct amd_iommu * iommu,u16 devid,struct irq_remap_table * table)3171 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
3172 				  struct irq_remap_table *table)
3173 {
3174 	struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
3175 
3176 	pci_seg->irq_lookup_table[devid] = table;
3177 	set_dte_irq_entry(iommu, devid, table);
3178 	iommu_flush_dte(iommu, devid);
3179 }
3180 
set_remap_table_entry_alias(struct pci_dev * pdev,u16 alias,void * data)3181 static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
3182 				       void *data)
3183 {
3184 	struct irq_remap_table *table = data;
3185 	struct amd_iommu_pci_seg *pci_seg;
3186 	struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
3187 
3188 	if (!iommu)
3189 		return -EINVAL;
3190 
3191 	pci_seg = iommu->pci_seg;
3192 	pci_seg->irq_lookup_table[alias] = table;
3193 	set_dte_irq_entry(iommu, alias, table);
3194 	iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
3195 
3196 	return 0;
3197 }
3198 
get_irq_table_size(unsigned int max_irqs)3199 static inline size_t get_irq_table_size(unsigned int max_irqs)
3200 {
3201 	if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3202 		return max_irqs * sizeof(u32);
3203 
3204 	return max_irqs * (sizeof(u64) * 2);
3205 }
3206 
alloc_irq_table(struct amd_iommu * iommu,u16 devid,struct pci_dev * pdev,unsigned int max_irqs)3207 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
3208 					       u16 devid, struct pci_dev *pdev,
3209 					       unsigned int max_irqs)
3210 {
3211 	struct irq_remap_table *table = NULL;
3212 	struct irq_remap_table *new_table = NULL;
3213 	struct amd_iommu_pci_seg *pci_seg;
3214 	unsigned long flags;
3215 	int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
3216 	u16 alias;
3217 
3218 	spin_lock_irqsave(&iommu_table_lock, flags);
3219 
3220 	pci_seg = iommu->pci_seg;
3221 	table = pci_seg->irq_lookup_table[devid];
3222 	if (table)
3223 		goto out_unlock;
3224 
3225 	alias = pci_seg->alias_table[devid];
3226 	table = pci_seg->irq_lookup_table[alias];
3227 	if (table) {
3228 		set_remap_table_entry(iommu, devid, table);
3229 		goto out_wait;
3230 	}
3231 	spin_unlock_irqrestore(&iommu_table_lock, flags);
3232 
3233 	/* Nothing there yet, allocate new irq remapping table */
3234 	new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs));
3235 	if (!new_table)
3236 		return NULL;
3237 
3238 	spin_lock_irqsave(&iommu_table_lock, flags);
3239 
3240 	table = pci_seg->irq_lookup_table[devid];
3241 	if (table)
3242 		goto out_unlock;
3243 
3244 	table = pci_seg->irq_lookup_table[alias];
3245 	if (table) {
3246 		set_remap_table_entry(iommu, devid, table);
3247 		goto out_wait;
3248 	}
3249 
3250 	table = new_table;
3251 	new_table = NULL;
3252 
3253 	if (pdev)
3254 		pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
3255 				       table);
3256 	else
3257 		set_remap_table_entry(iommu, devid, table);
3258 
3259 	if (devid != alias)
3260 		set_remap_table_entry(iommu, alias, table);
3261 
3262 out_wait:
3263 	iommu_completion_wait(iommu);
3264 
3265 out_unlock:
3266 	spin_unlock_irqrestore(&iommu_table_lock, flags);
3267 
3268 	if (new_table) {
3269 		iommu_free_pages(new_table->table);
3270 		kfree(new_table);
3271 	}
3272 	return table;
3273 }
3274 
alloc_irq_index(struct amd_iommu * iommu,u16 devid,int count,bool align,struct pci_dev * pdev,unsigned long max_irqs)3275 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
3276 			   bool align, struct pci_dev *pdev,
3277 			   unsigned long max_irqs)
3278 {
3279 	struct irq_remap_table *table;
3280 	int index, c, alignment = 1;
3281 	unsigned long flags;
3282 
3283 	table = alloc_irq_table(iommu, devid, pdev, max_irqs);
3284 	if (!table)
3285 		return -ENODEV;
3286 
3287 	if (align)
3288 		alignment = roundup_pow_of_two(count);
3289 
3290 	raw_spin_lock_irqsave(&table->lock, flags);
3291 
3292 	/* Scan table for free entries */
3293 	for (index = ALIGN(table->min_index, alignment), c = 0;
3294 	     index < max_irqs;) {
3295 		if (!iommu->irte_ops->is_allocated(table, index)) {
3296 			c += 1;
3297 		} else {
3298 			c     = 0;
3299 			index = ALIGN(index + 1, alignment);
3300 			continue;
3301 		}
3302 
3303 		if (c == count)	{
3304 			for (; c != 0; --c)
3305 				iommu->irte_ops->set_allocated(table, index - c + 1);
3306 
3307 			index -= count - 1;
3308 			goto out;
3309 		}
3310 
3311 		index++;
3312 	}
3313 
3314 	index = -ENOSPC;
3315 
3316 out:
3317 	raw_spin_unlock_irqrestore(&table->lock, flags);
3318 
3319 	return index;
3320 }
3321 
__modify_irte_ga(struct amd_iommu * iommu,u16 devid,int index,struct irte_ga * irte)3322 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3323 			    struct irte_ga *irte)
3324 {
3325 	struct irq_remap_table *table;
3326 	struct irte_ga *entry;
3327 	unsigned long flags;
3328 	u128 old;
3329 
3330 	table = get_irq_table(iommu, devid);
3331 	if (!table)
3332 		return -ENOMEM;
3333 
3334 	raw_spin_lock_irqsave(&table->lock, flags);
3335 
3336 	entry = (struct irte_ga *)table->table;
3337 	entry = &entry[index];
3338 
3339 	/*
3340 	 * We use cmpxchg16 to atomically update the 128-bit IRTE,
3341 	 * and it cannot be updated by the hardware or other processors
3342 	 * behind us, so the return value of cmpxchg16 should be the
3343 	 * same as the old value.
3344 	 */
3345 	old = entry->irte;
3346 	WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte));
3347 
3348 	raw_spin_unlock_irqrestore(&table->lock, flags);
3349 
3350 	return 0;
3351 }
3352 
modify_irte_ga(struct amd_iommu * iommu,u16 devid,int index,struct irte_ga * irte)3353 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3354 			  struct irte_ga *irte)
3355 {
3356 	bool ret;
3357 
3358 	ret = __modify_irte_ga(iommu, devid, index, irte);
3359 	if (ret)
3360 		return ret;
3361 
3362 	iommu_flush_irt_and_complete(iommu, devid);
3363 
3364 	return 0;
3365 }
3366 
modify_irte(struct amd_iommu * iommu,u16 devid,int index,union irte * irte)3367 static int modify_irte(struct amd_iommu *iommu,
3368 		       u16 devid, int index, union irte *irte)
3369 {
3370 	struct irq_remap_table *table;
3371 	unsigned long flags;
3372 
3373 	table = get_irq_table(iommu, devid);
3374 	if (!table)
3375 		return -ENOMEM;
3376 
3377 	raw_spin_lock_irqsave(&table->lock, flags);
3378 	table->table[index] = irte->val;
3379 	raw_spin_unlock_irqrestore(&table->lock, flags);
3380 
3381 	iommu_flush_irt_and_complete(iommu, devid);
3382 
3383 	return 0;
3384 }
3385 
free_irte(struct amd_iommu * iommu,u16 devid,int index)3386 static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
3387 {
3388 	struct irq_remap_table *table;
3389 	unsigned long flags;
3390 
3391 	table = get_irq_table(iommu, devid);
3392 	if (!table)
3393 		return;
3394 
3395 	raw_spin_lock_irqsave(&table->lock, flags);
3396 	iommu->irte_ops->clear_allocated(table, index);
3397 	raw_spin_unlock_irqrestore(&table->lock, flags);
3398 
3399 	iommu_flush_irt_and_complete(iommu, devid);
3400 }
3401 
irte_prepare(void * entry,u32 delivery_mode,bool dest_mode,u8 vector,u32 dest_apicid,int devid)3402 static void irte_prepare(void *entry,
3403 			 u32 delivery_mode, bool dest_mode,
3404 			 u8 vector, u32 dest_apicid, int devid)
3405 {
3406 	union irte *irte = (union irte *) entry;
3407 
3408 	irte->val                = 0;
3409 	irte->fields.vector      = vector;
3410 	irte->fields.int_type    = delivery_mode;
3411 	irte->fields.destination = dest_apicid;
3412 	irte->fields.dm          = dest_mode;
3413 	irte->fields.valid       = 1;
3414 }
3415 
irte_ga_prepare(void * entry,u32 delivery_mode,bool dest_mode,u8 vector,u32 dest_apicid,int devid)3416 static void irte_ga_prepare(void *entry,
3417 			    u32 delivery_mode, bool dest_mode,
3418 			    u8 vector, u32 dest_apicid, int devid)
3419 {
3420 	struct irte_ga *irte = (struct irte_ga *) entry;
3421 
3422 	irte->lo.val                      = 0;
3423 	irte->hi.val                      = 0;
3424 	irte->lo.fields_remap.int_type    = delivery_mode;
3425 	irte->lo.fields_remap.dm          = dest_mode;
3426 	irte->hi.fields.vector            = vector;
3427 	irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
3428 	irte->hi.fields.destination       = APICID_TO_IRTE_DEST_HI(dest_apicid);
3429 	irte->lo.fields_remap.valid       = 1;
3430 }
3431 
irte_activate(struct amd_iommu * iommu,void * entry,u16 devid,u16 index)3432 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3433 {
3434 	union irte *irte = (union irte *) entry;
3435 
3436 	irte->fields.valid = 1;
3437 	modify_irte(iommu, devid, index, irte);
3438 }
3439 
irte_ga_activate(struct amd_iommu * iommu,void * entry,u16 devid,u16 index)3440 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3441 {
3442 	struct irte_ga *irte = (struct irte_ga *) entry;
3443 
3444 	irte->lo.fields_remap.valid = 1;
3445 	modify_irte_ga(iommu, devid, index, irte);
3446 }
3447 
irte_deactivate(struct amd_iommu * iommu,void * entry,u16 devid,u16 index)3448 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3449 {
3450 	union irte *irte = (union irte *) entry;
3451 
3452 	irte->fields.valid = 0;
3453 	modify_irte(iommu, devid, index, irte);
3454 }
3455 
irte_ga_deactivate(struct amd_iommu * iommu,void * entry,u16 devid,u16 index)3456 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3457 {
3458 	struct irte_ga *irte = (struct irte_ga *) entry;
3459 
3460 	irte->lo.fields_remap.valid = 0;
3461 	modify_irte_ga(iommu, devid, index, irte);
3462 }
3463 
irte_set_affinity(struct amd_iommu * iommu,void * entry,u16 devid,u16 index,u8 vector,u32 dest_apicid)3464 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3465 			      u8 vector, u32 dest_apicid)
3466 {
3467 	union irte *irte = (union irte *) entry;
3468 
3469 	irte->fields.vector = vector;
3470 	irte->fields.destination = dest_apicid;
3471 	modify_irte(iommu, devid, index, irte);
3472 }
3473 
irte_ga_set_affinity(struct amd_iommu * iommu,void * entry,u16 devid,u16 index,u8 vector,u32 dest_apicid)3474 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3475 				 u8 vector, u32 dest_apicid)
3476 {
3477 	struct irte_ga *irte = (struct irte_ga *) entry;
3478 
3479 	if (!irte->lo.fields_remap.guest_mode) {
3480 		irte->hi.fields.vector = vector;
3481 		irte->lo.fields_remap.destination =
3482 					APICID_TO_IRTE_DEST_LO(dest_apicid);
3483 		irte->hi.fields.destination =
3484 					APICID_TO_IRTE_DEST_HI(dest_apicid);
3485 		modify_irte_ga(iommu, devid, index, irte);
3486 	}
3487 }
3488 
3489 #define IRTE_ALLOCATED (~1U)
irte_set_allocated(struct irq_remap_table * table,int index)3490 static void irte_set_allocated(struct irq_remap_table *table, int index)
3491 {
3492 	table->table[index] = IRTE_ALLOCATED;
3493 }
3494 
irte_ga_set_allocated(struct irq_remap_table * table,int index)3495 static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3496 {
3497 	struct irte_ga *ptr = (struct irte_ga *)table->table;
3498 	struct irte_ga *irte = &ptr[index];
3499 
3500 	memset(&irte->lo.val, 0, sizeof(u64));
3501 	memset(&irte->hi.val, 0, sizeof(u64));
3502 	irte->hi.fields.vector = 0xff;
3503 }
3504 
irte_is_allocated(struct irq_remap_table * table,int index)3505 static bool irte_is_allocated(struct irq_remap_table *table, int index)
3506 {
3507 	union irte *ptr = (union irte *)table->table;
3508 	union irte *irte = &ptr[index];
3509 
3510 	return irte->val != 0;
3511 }
3512 
irte_ga_is_allocated(struct irq_remap_table * table,int index)3513 static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3514 {
3515 	struct irte_ga *ptr = (struct irte_ga *)table->table;
3516 	struct irte_ga *irte = &ptr[index];
3517 
3518 	return irte->hi.fields.vector != 0;
3519 }
3520 
irte_clear_allocated(struct irq_remap_table * table,int index)3521 static void irte_clear_allocated(struct irq_remap_table *table, int index)
3522 {
3523 	table->table[index] = 0;
3524 }
3525 
irte_ga_clear_allocated(struct irq_remap_table * table,int index)3526 static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3527 {
3528 	struct irte_ga *ptr = (struct irte_ga *)table->table;
3529 	struct irte_ga *irte = &ptr[index];
3530 
3531 	memset(&irte->lo.val, 0, sizeof(u64));
3532 	memset(&irte->hi.val, 0, sizeof(u64));
3533 }
3534 
get_devid(struct irq_alloc_info * info)3535 static int get_devid(struct irq_alloc_info *info)
3536 {
3537 	switch (info->type) {
3538 	case X86_IRQ_ALLOC_TYPE_IOAPIC:
3539 		return get_ioapic_devid(info->devid);
3540 	case X86_IRQ_ALLOC_TYPE_HPET:
3541 		return get_hpet_devid(info->devid);
3542 	case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3543 	case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3544 		return get_device_sbdf_id(msi_desc_to_dev(info->desc));
3545 	default:
3546 		WARN_ON_ONCE(1);
3547 		return -1;
3548 	}
3549 }
3550 
3551 struct irq_remap_ops amd_iommu_irq_ops = {
3552 	.prepare		= amd_iommu_prepare,
3553 	.enable			= amd_iommu_enable,
3554 	.disable		= amd_iommu_disable,
3555 	.reenable		= amd_iommu_reenable,
3556 	.enable_faulting	= amd_iommu_enable_faulting,
3557 };
3558 
fill_msi_msg(struct msi_msg * msg,u32 index)3559 static void fill_msi_msg(struct msi_msg *msg, u32 index)
3560 {
3561 	msg->data = index;
3562 	msg->address_lo = 0;
3563 	msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
3564 	/*
3565 	 * The struct msi_msg.dest_mode_logical is used to set the DM bit
3566 	 * in MSI Message Address Register. For device w/ 2K int-remap support,
3567 	 * this is bit must be set to 1 regardless of the actual destination
3568 	 * mode, which is signified by the IRTE[DM].
3569 	 */
3570 	if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
3571 		msg->arch_addr_lo.dest_mode_logical = true;
3572 	msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
3573 }
3574 
irq_remapping_prepare_irte(struct amd_ir_data * data,struct irq_cfg * irq_cfg,struct irq_alloc_info * info,int devid,int index,int sub_handle)3575 static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3576 				       struct irq_cfg *irq_cfg,
3577 				       struct irq_alloc_info *info,
3578 				       int devid, int index, int sub_handle)
3579 {
3580 	struct irq_2_irte *irte_info = &data->irq_2_irte;
3581 	struct amd_iommu *iommu = data->iommu;
3582 
3583 	if (!iommu)
3584 		return;
3585 
3586 	data->irq_2_irte.devid = devid;
3587 	data->irq_2_irte.index = index + sub_handle;
3588 	iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED,
3589 				 apic->dest_mode_logical, irq_cfg->vector,
3590 				 irq_cfg->dest_apicid, devid);
3591 
3592 	switch (info->type) {
3593 	case X86_IRQ_ALLOC_TYPE_IOAPIC:
3594 	case X86_IRQ_ALLOC_TYPE_HPET:
3595 	case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3596 	case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3597 		fill_msi_msg(&data->msi_entry, irte_info->index);
3598 		break;
3599 
3600 	default:
3601 		BUG_ON(1);
3602 		break;
3603 	}
3604 }
3605 
3606 struct amd_irte_ops irte_32_ops = {
3607 	.prepare = irte_prepare,
3608 	.activate = irte_activate,
3609 	.deactivate = irte_deactivate,
3610 	.set_affinity = irte_set_affinity,
3611 	.set_allocated = irte_set_allocated,
3612 	.is_allocated = irte_is_allocated,
3613 	.clear_allocated = irte_clear_allocated,
3614 };
3615 
3616 struct amd_irte_ops irte_128_ops = {
3617 	.prepare = irte_ga_prepare,
3618 	.activate = irte_ga_activate,
3619 	.deactivate = irte_ga_deactivate,
3620 	.set_affinity = irte_ga_set_affinity,
3621 	.set_allocated = irte_ga_set_allocated,
3622 	.is_allocated = irte_ga_is_allocated,
3623 	.clear_allocated = irte_ga_clear_allocated,
3624 };
3625 
irq_remapping_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)3626 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3627 			       unsigned int nr_irqs, void *arg)
3628 {
3629 	struct irq_alloc_info *info = arg;
3630 	struct irq_data *irq_data;
3631 	struct amd_ir_data *data = NULL;
3632 	struct amd_iommu *iommu;
3633 	struct irq_cfg *cfg;
3634 	struct iommu_dev_data *dev_data;
3635 	unsigned long max_irqs;
3636 	int i, ret, devid, seg, sbdf;
3637 	int index;
3638 
3639 	if (!info)
3640 		return -EINVAL;
3641 	if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI)
3642 		return -EINVAL;
3643 
3644 	sbdf = get_devid(info);
3645 	if (sbdf < 0)
3646 		return -EINVAL;
3647 
3648 	seg = PCI_SBDF_TO_SEGID(sbdf);
3649 	devid = PCI_SBDF_TO_DEVID(sbdf);
3650 	iommu = __rlookup_amd_iommu(seg, devid);
3651 	if (!iommu)
3652 		return -EINVAL;
3653 
3654 	dev_data = search_dev_data(iommu, devid);
3655 	max_irqs = dev_data ? dev_data->max_irqs : MAX_IRQS_PER_TABLE_512;
3656 
3657 	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3658 	if (ret < 0)
3659 		return ret;
3660 
3661 	if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
3662 		struct irq_remap_table *table;
3663 
3664 		table = alloc_irq_table(iommu, devid, NULL, max_irqs);
3665 		if (table) {
3666 			if (!table->min_index) {
3667 				/*
3668 				 * Keep the first 32 indexes free for IOAPIC
3669 				 * interrupts.
3670 				 */
3671 				table->min_index = 32;
3672 				for (i = 0; i < 32; ++i)
3673 					iommu->irte_ops->set_allocated(table, i);
3674 			}
3675 			WARN_ON(table->min_index != 32);
3676 			index = info->ioapic.pin;
3677 		} else {
3678 			index = -ENOMEM;
3679 		}
3680 	} else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
3681 		   info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
3682 		bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
3683 
3684 		index = alloc_irq_index(iommu, devid, nr_irqs, align,
3685 					msi_desc_to_pci_dev(info->desc),
3686 					max_irqs);
3687 	} else {
3688 		index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL,
3689 					max_irqs);
3690 	}
3691 
3692 	if (index < 0) {
3693 		pr_warn("Failed to allocate IRTE\n");
3694 		ret = index;
3695 		goto out_free_parent;
3696 	}
3697 
3698 	for (i = 0; i < nr_irqs; i++) {
3699 		irq_data = irq_domain_get_irq_data(domain, virq + i);
3700 		cfg = irq_data ? irqd_cfg(irq_data) : NULL;
3701 		if (!cfg) {
3702 			ret = -EINVAL;
3703 			goto out_free_data;
3704 		}
3705 
3706 		ret = -ENOMEM;
3707 		data = kzalloc(sizeof(*data), GFP_KERNEL);
3708 		if (!data)
3709 			goto out_free_data;
3710 
3711 		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3712 			data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
3713 		else
3714 			data->entry = kzalloc(sizeof(struct irte_ga),
3715 						     GFP_KERNEL);
3716 		if (!data->entry) {
3717 			kfree(data);
3718 			goto out_free_data;
3719 		}
3720 
3721 		data->iommu = iommu;
3722 		irq_data->hwirq = (devid << 16) + i;
3723 		irq_data->chip_data = data;
3724 		irq_data->chip = &amd_ir_chip;
3725 		irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3726 	}
3727 
3728 	return 0;
3729 
3730 out_free_data:
3731 	for (i--; i >= 0; i--) {
3732 		irq_data = irq_domain_get_irq_data(domain, virq + i);
3733 		if (irq_data)
3734 			kfree(irq_data->chip_data);
3735 	}
3736 	for (i = 0; i < nr_irqs; i++)
3737 		free_irte(iommu, devid, index + i);
3738 out_free_parent:
3739 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
3740 	return ret;
3741 }
3742 
irq_remapping_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)3743 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
3744 			       unsigned int nr_irqs)
3745 {
3746 	struct irq_2_irte *irte_info;
3747 	struct irq_data *irq_data;
3748 	struct amd_ir_data *data;
3749 	int i;
3750 
3751 	for (i = 0; i < nr_irqs; i++) {
3752 		irq_data = irq_domain_get_irq_data(domain, virq  + i);
3753 		if (irq_data && irq_data->chip_data) {
3754 			data = irq_data->chip_data;
3755 			irte_info = &data->irq_2_irte;
3756 			free_irte(data->iommu, irte_info->devid, irte_info->index);
3757 			kfree(data->entry);
3758 			kfree(data);
3759 		}
3760 	}
3761 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
3762 }
3763 
3764 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3765 			       struct amd_ir_data *ir_data,
3766 			       struct irq_2_irte *irte_info,
3767 			       struct irq_cfg *cfg);
3768 
irq_remapping_activate(struct irq_domain * domain,struct irq_data * irq_data,bool reserve)3769 static int irq_remapping_activate(struct irq_domain *domain,
3770 				  struct irq_data *irq_data, bool reserve)
3771 {
3772 	struct amd_ir_data *data = irq_data->chip_data;
3773 	struct irq_2_irte *irte_info = &data->irq_2_irte;
3774 	struct amd_iommu *iommu = data->iommu;
3775 	struct irq_cfg *cfg = irqd_cfg(irq_data);
3776 
3777 	if (!iommu)
3778 		return 0;
3779 
3780 	iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
3781 				  irte_info->index);
3782 	amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
3783 	return 0;
3784 }
3785 
irq_remapping_deactivate(struct irq_domain * domain,struct irq_data * irq_data)3786 static void irq_remapping_deactivate(struct irq_domain *domain,
3787 				     struct irq_data *irq_data)
3788 {
3789 	struct amd_ir_data *data = irq_data->chip_data;
3790 	struct irq_2_irte *irte_info = &data->irq_2_irte;
3791 	struct amd_iommu *iommu = data->iommu;
3792 
3793 	if (iommu)
3794 		iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
3795 					    irte_info->index);
3796 }
3797 
irq_remapping_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)3798 static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
3799 				enum irq_domain_bus_token bus_token)
3800 {
3801 	struct amd_iommu *iommu;
3802 	int devid = -1;
3803 
3804 	if (!amd_iommu_irq_remap)
3805 		return 0;
3806 
3807 	if (x86_fwspec_is_ioapic(fwspec))
3808 		devid = get_ioapic_devid(fwspec->param[0]);
3809 	else if (x86_fwspec_is_hpet(fwspec))
3810 		devid = get_hpet_devid(fwspec->param[0]);
3811 
3812 	if (devid < 0)
3813 		return 0;
3814 	iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
3815 
3816 	return iommu && iommu->ir_domain == d;
3817 }
3818 
3819 static const struct irq_domain_ops amd_ir_domain_ops = {
3820 	.select = irq_remapping_select,
3821 	.alloc = irq_remapping_alloc,
3822 	.free = irq_remapping_free,
3823 	.activate = irq_remapping_activate,
3824 	.deactivate = irq_remapping_deactivate,
3825 };
3826 
__amd_iommu_update_ga(struct irte_ga * entry,int cpu,bool ga_log_intr)3827 static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu,
3828 				  bool ga_log_intr)
3829 {
3830 	if (cpu >= 0) {
3831 		entry->lo.fields_vapic.destination =
3832 					APICID_TO_IRTE_DEST_LO(cpu);
3833 		entry->hi.fields.destination =
3834 					APICID_TO_IRTE_DEST_HI(cpu);
3835 		entry->lo.fields_vapic.is_run = true;
3836 		entry->lo.fields_vapic.ga_log_intr = false;
3837 	} else {
3838 		entry->lo.fields_vapic.is_run = false;
3839 		entry->lo.fields_vapic.ga_log_intr = ga_log_intr;
3840 	}
3841 }
3842 
3843 /*
3844  * Update the pCPU information for an IRTE that is configured to post IRQs to
3845  * a vCPU, without issuing an IOMMU invalidation for the IRTE.
3846  *
3847  * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination
3848  * with the pCPU's APIC ID, set IsRun, and clear GALogIntr.  If the vCPU isn't
3849  * associated with a pCPU (@cpu < 0), clear IsRun and set/clear GALogIntr based
3850  * on input from the caller (e.g. KVM only requests GALogIntr when the vCPU is
3851  * blocking and requires a notification wake event).  I.e. treat vCPUs that are
3852  * associated with a pCPU as running.  This API is intended to be used when a
3853  * vCPU is scheduled in/out (or stops running for any reason), to do a fast
3854  * update of IsRun, GALogIntr, and (conditionally) Destination.
3855  *
3856  * Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached
3857  * and thus don't require an invalidation to ensure the IOMMU consumes fresh
3858  * information.
3859  */
amd_iommu_update_ga(void * data,int cpu,bool ga_log_intr)3860 int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
3861 {
3862 	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3863 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3864 
3865 	if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
3866 		return -EINVAL;
3867 
3868 	if (!entry || !entry->lo.fields_vapic.guest_mode)
3869 		return 0;
3870 
3871 	if (!ir_data->iommu)
3872 		return -ENODEV;
3873 
3874 	__amd_iommu_update_ga(entry, cpu, ga_log_intr);
3875 
3876 	return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3877 				ir_data->irq_2_irte.index, entry);
3878 }
3879 EXPORT_SYMBOL(amd_iommu_update_ga);
3880 
amd_iommu_activate_guest_mode(void * data,int cpu,bool ga_log_intr)3881 int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
3882 {
3883 	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3884 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3885 	u64 valid;
3886 
3887 	if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
3888 		return -EINVAL;
3889 
3890 	if (!entry)
3891 		return 0;
3892 
3893 	valid = entry->lo.fields_vapic.valid;
3894 
3895 	entry->lo.val = 0;
3896 	entry->hi.val = 0;
3897 
3898 	entry->lo.fields_vapic.valid       = valid;
3899 	entry->lo.fields_vapic.guest_mode  = 1;
3900 	entry->hi.fields.ga_root_ptr       = ir_data->ga_root_ptr;
3901 	entry->hi.fields.vector            = ir_data->ga_vector;
3902 	entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
3903 
3904 	__amd_iommu_update_ga(entry, cpu, ga_log_intr);
3905 
3906 	return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3907 			      ir_data->irq_2_irte.index, entry);
3908 }
3909 EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
3910 
amd_iommu_deactivate_guest_mode(void * data)3911 int amd_iommu_deactivate_guest_mode(void *data)
3912 {
3913 	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3914 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3915 	struct irq_cfg *cfg = ir_data->cfg;
3916 	u64 valid;
3917 
3918 	if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
3919 		return -EINVAL;
3920 
3921 	if (!entry || !entry->lo.fields_vapic.guest_mode)
3922 		return 0;
3923 
3924 	valid = entry->lo.fields_remap.valid;
3925 
3926 	entry->lo.val = 0;
3927 	entry->hi.val = 0;
3928 
3929 	entry->lo.fields_remap.valid       = valid;
3930 	entry->lo.fields_remap.dm          = apic->dest_mode_logical;
3931 	entry->lo.fields_remap.int_type    = APIC_DELIVERY_MODE_FIXED;
3932 	entry->hi.fields.vector            = cfg->vector;
3933 	entry->lo.fields_remap.destination =
3934 				APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
3935 	entry->hi.fields.destination =
3936 				APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
3937 
3938 	return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
3939 			      ir_data->irq_2_irte.index, entry);
3940 }
3941 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
3942 
amd_ir_set_vcpu_affinity(struct irq_data * data,void * info)3943 static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info)
3944 {
3945 	int ret;
3946 	struct amd_iommu_pi_data *pi_data = info;
3947 	struct amd_ir_data *ir_data = data->chip_data;
3948 	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
3949 	struct iommu_dev_data *dev_data;
3950 
3951 	if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
3952 		return -EINVAL;
3953 
3954 	if (ir_data->iommu == NULL)
3955 		return -EINVAL;
3956 
3957 	dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
3958 
3959 	/* Note:
3960 	 * This device has never been set up for guest mode.
3961 	 * we should not modify the IRTE
3962 	 */
3963 	if (!dev_data || !dev_data->use_vapic)
3964 		return -EINVAL;
3965 
3966 	ir_data->cfg = irqd_cfg(data);
3967 
3968 	if (pi_data) {
3969 		pi_data->ir_data = ir_data;
3970 
3971 		ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12);
3972 		ir_data->ga_vector = pi_data->vector;
3973 		ir_data->ga_tag = pi_data->ga_tag;
3974 		if (pi_data->is_guest_mode)
3975 			ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu,
3976 							    pi_data->ga_log_intr);
3977 		else
3978 			ret = amd_iommu_deactivate_guest_mode(ir_data);
3979 	} else {
3980 		ret = amd_iommu_deactivate_guest_mode(ir_data);
3981 	}
3982 
3983 	return ret;
3984 }
3985 
3986 
amd_ir_update_irte(struct irq_data * irqd,struct amd_iommu * iommu,struct amd_ir_data * ir_data,struct irq_2_irte * irte_info,struct irq_cfg * cfg)3987 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3988 			       struct amd_ir_data *ir_data,
3989 			       struct irq_2_irte *irte_info,
3990 			       struct irq_cfg *cfg)
3991 {
3992 
3993 	/*
3994 	 * Atomically updates the IRTE with the new destination, vector
3995 	 * and flushes the interrupt entry cache.
3996 	 */
3997 	iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
3998 				      irte_info->index, cfg->vector,
3999 				      cfg->dest_apicid);
4000 }
4001 
amd_ir_set_affinity(struct irq_data * data,const struct cpumask * mask,bool force)4002 static int amd_ir_set_affinity(struct irq_data *data,
4003 			       const struct cpumask *mask, bool force)
4004 {
4005 	struct amd_ir_data *ir_data = data->chip_data;
4006 	struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4007 	struct irq_cfg *cfg = irqd_cfg(data);
4008 	struct irq_data *parent = data->parent_data;
4009 	struct amd_iommu *iommu = ir_data->iommu;
4010 	int ret;
4011 
4012 	if (!iommu)
4013 		return -ENODEV;
4014 
4015 	ret = parent->chip->irq_set_affinity(parent, mask, force);
4016 	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
4017 		return ret;
4018 
4019 	amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
4020 	/*
4021 	 * After this point, all the interrupts will start arriving
4022 	 * at the new destination. So, time to cleanup the previous
4023 	 * vector allocation.
4024 	 */
4025 	vector_schedule_cleanup(cfg);
4026 
4027 	return IRQ_SET_MASK_OK_DONE;
4028 }
4029 
ir_compose_msi_msg(struct irq_data * irq_data,struct msi_msg * msg)4030 static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4031 {
4032 	struct amd_ir_data *ir_data = irq_data->chip_data;
4033 
4034 	*msg = ir_data->msi_entry;
4035 }
4036 
4037 static struct irq_chip amd_ir_chip = {
4038 	.name			= "AMD-IR",
4039 	.irq_ack		= apic_ack_irq,
4040 	.irq_set_affinity	= amd_ir_set_affinity,
4041 	.irq_set_vcpu_affinity	= amd_ir_set_vcpu_affinity,
4042 	.irq_compose_msi_msg	= ir_compose_msi_msg,
4043 };
4044 
4045 static const struct msi_parent_ops amdvi_msi_parent_ops = {
4046 	.supported_flags	= X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI,
4047 	.bus_select_token	= DOMAIN_BUS_AMDVI,
4048 	.bus_select_mask	= MATCH_PCI_MSI,
4049 	.prefix			= "IR-",
4050 	.init_dev_msi_info	= msi_parent_init_dev_msi_info,
4051 };
4052 
amd_iommu_create_irq_domain(struct amd_iommu * iommu)4053 int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
4054 {
4055 	struct irq_domain_info info = {
4056 		.fwnode		= irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index),
4057 		.ops		= &amd_ir_domain_ops,
4058 		.domain_flags	= IRQ_DOMAIN_FLAG_ISOLATED_MSI,
4059 		.host_data	= iommu,
4060 		.parent		= arch_get_ir_parent_domain(),
4061 	};
4062 
4063 	if (!info.fwnode)
4064 		return -ENOMEM;
4065 
4066 	iommu->ir_domain = msi_create_parent_irq_domain(&info, &amdvi_msi_parent_ops);
4067 	if (!iommu->ir_domain) {
4068 		irq_domain_free_fwnode(info.fwnode);
4069 		return -ENOMEM;
4070 	}
4071 	return 0;
4072 }
4073 #endif
4074