1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
6 */
7
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
10
11 #include <linux/ratelimit.h>
12 #include <linux/pci.h>
13 #include <linux/acpi.h>
14 #include <linux/pci-ats.h>
15 #include <linux/bitmap.h>
16 #include <linux/slab.h>
17 #include <linux/string_choices.h>
18 #include <linux/debugfs.h>
19 #include <linux/scatterlist.h>
20 #include <linux/dma-map-ops.h>
21 #include <linux/dma-direct.h>
22 #include <linux/idr.h>
23 #include <linux/iommu-helper.h>
24 #include <linux/delay.h>
25 #include <linux/amd-iommu.h>
26 #include <linux/notifier.h>
27 #include <linux/export.h>
28 #include <linux/irq.h>
29 #include <linux/irqchip/irq-msi-lib.h>
30 #include <linux/msi.h>
31 #include <linux/irqdomain.h>
32 #include <linux/percpu.h>
33 #include <linux/cc_platform.h>
34 #include <asm/irq_remapping.h>
35 #include <asm/io_apic.h>
36 #include <asm/apic.h>
37 #include <asm/hw_irq.h>
38 #include <asm/proto.h>
39 #include <asm/iommu.h>
40 #include <asm/gart.h>
41 #include <asm/dma.h>
42 #include <uapi/linux/iommufd.h>
43 #include <linux/generic_pt/iommu.h>
44
45 #include "amd_iommu.h"
46 #include "iommufd.h"
47 #include "../irq_remapping.h"
48 #include "../iommu-pages.h"
49
50 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
51
52 /* Reserved IOVA ranges */
53 #define MSI_RANGE_START (0xfee00000)
54 #define MSI_RANGE_END (0xfeefffff)
55 #define HT_RANGE_START (0xfd00000000ULL)
56 #define HT_RANGE_END (0xffffffffffULL)
57
58 LIST_HEAD(ioapic_map);
59 LIST_HEAD(hpet_map);
60 LIST_HEAD(acpihid_map);
61
62 const struct iommu_ops amd_iommu_ops;
63
64 int amd_iommu_max_glx_val = -1;
65
66 /*
67 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
68 * to know which ones are already in use.
69 */
70 DEFINE_IDA(pdom_ids);
71
72 static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev,
73 struct iommu_domain *old);
74
75 static void set_dte_entry(struct amd_iommu *iommu,
76 struct iommu_dev_data *dev_data,
77 phys_addr_t top_paddr, unsigned int top_level);
78
79 static int device_flush_dte(struct iommu_dev_data *dev_data);
80
81 static void amd_iommu_change_top(struct pt_iommu *iommu_table,
82 phys_addr_t top_paddr, unsigned int top_level);
83
84 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
85
86 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid);
87 static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain);
88 static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
89 bool enable);
90
91 static void clone_aliases(struct amd_iommu *iommu, struct device *dev);
92
93 static int iommu_completion_wait(struct amd_iommu *iommu);
94
95 /****************************************************************************
96 *
97 * Helper functions
98 *
99 ****************************************************************************/
100
amd_iommu_atomic128_set(__int128 * ptr,__int128 val)101 static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val)
102 {
103 /*
104 * Note:
105 * We use arch_cmpxchg128_local() because:
106 * - Need cmpxchg16b instruction mainly for 128-bit store to DTE
107 * (not necessary for cmpxchg since this function is already
108 * protected by a spin_lock for this DTE).
109 * - Neither need LOCK_PREFIX nor try loop because of the spin_lock.
110 */
111 arch_cmpxchg128_local(ptr, *ptr, val);
112 }
113
write_dte_upper128(struct dev_table_entry * ptr,struct dev_table_entry * new)114 static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new)
115 {
116 struct dev_table_entry old;
117
118 old.data128[1] = ptr->data128[1];
119 /*
120 * Preserve DTE_DATA2_INTR_MASK. This needs to be
121 * done here since it requires to be inside
122 * spin_lock(&dev_data->dte_lock) context.
123 */
124 new->data[2] &= ~DTE_DATA2_INTR_MASK;
125 new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK;
126
127 amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]);
128 }
129
write_dte_lower128(struct dev_table_entry * ptr,struct dev_table_entry * new)130 static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new)
131 {
132 amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]);
133 }
134
135 /*
136 * Note:
137 * IOMMU reads the entire Device Table entry in a single 256-bit transaction
138 * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver
139 * need to ensure the following:
140 * - DTE[V|GV] bit is being written last when setting.
141 * - DTE[V|GV] bit is being written first when clearing.
142 *
143 * This function is used only by code, which updates DMA translation part of the DTE.
144 * So, only consider control bits related to DMA when updating the entry.
145 */
update_dte256(struct amd_iommu * iommu,struct iommu_dev_data * dev_data,struct dev_table_entry * new)146 static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
147 struct dev_table_entry *new)
148 {
149 unsigned long flags;
150 struct dev_table_entry *dev_table = get_dev_table(iommu);
151 struct dev_table_entry *ptr = &dev_table[dev_data->devid];
152
153 spin_lock_irqsave(&dev_data->dte_lock, flags);
154
155 if (!(ptr->data[0] & DTE_FLAG_V)) {
156 /* Existing DTE is not valid. */
157 write_dte_upper128(ptr, new);
158 write_dte_lower128(ptr, new);
159 iommu_flush_dte_sync(iommu, dev_data->devid);
160 } else if (!(new->data[0] & DTE_FLAG_V)) {
161 /* Existing DTE is valid. New DTE is not valid. */
162 write_dte_lower128(ptr, new);
163 write_dte_upper128(ptr, new);
164 iommu_flush_dte_sync(iommu, dev_data->devid);
165 } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) {
166 /*
167 * Both DTEs are valid.
168 * Existing DTE has no guest page table.
169 */
170 write_dte_upper128(ptr, new);
171 write_dte_lower128(ptr, new);
172 iommu_flush_dte_sync(iommu, dev_data->devid);
173 } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) {
174 /*
175 * Both DTEs are valid.
176 * Existing DTE has guest page table,
177 * new DTE has no guest page table,
178 */
179 write_dte_lower128(ptr, new);
180 write_dte_upper128(ptr, new);
181 iommu_flush_dte_sync(iommu, dev_data->devid);
182 } else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) !=
183 FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) {
184 /*
185 * Both DTEs are valid and have guest page table,
186 * but have different number of levels. So, we need
187 * to upadte both upper and lower 128-bit value, which
188 * require disabling and flushing.
189 */
190 struct dev_table_entry clear = {};
191
192 /* First disable DTE */
193 write_dte_lower128(ptr, &clear);
194 iommu_flush_dte_sync(iommu, dev_data->devid);
195
196 /* Then update DTE */
197 write_dte_upper128(ptr, new);
198 write_dte_lower128(ptr, new);
199 iommu_flush_dte_sync(iommu, dev_data->devid);
200 } else {
201 /*
202 * Both DTEs are valid and have guest page table,
203 * and same number of levels. We just need to only
204 * update the lower 128-bit. So no need to disable DTE.
205 */
206 write_dte_lower128(ptr, new);
207 }
208
209 spin_unlock_irqrestore(&dev_data->dte_lock, flags);
210 }
211
amd_iommu_update_dte(struct amd_iommu * iommu,struct iommu_dev_data * dev_data,struct dev_table_entry * new)212 void amd_iommu_update_dte(struct amd_iommu *iommu,
213 struct iommu_dev_data *dev_data,
214 struct dev_table_entry *new)
215 {
216 update_dte256(iommu, dev_data, new);
217 clone_aliases(iommu, dev_data->dev);
218 device_flush_dte(dev_data);
219 iommu_completion_wait(iommu);
220 }
221
get_dte256(struct amd_iommu * iommu,struct iommu_dev_data * dev_data,struct dev_table_entry * dte)222 static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data,
223 struct dev_table_entry *dte)
224 {
225 unsigned long flags;
226 struct dev_table_entry *ptr;
227 struct dev_table_entry *dev_table = get_dev_table(iommu);
228
229 ptr = &dev_table[dev_data->devid];
230
231 spin_lock_irqsave(&dev_data->dte_lock, flags);
232 dte->data128[0] = ptr->data128[0];
233 dte->data128[1] = ptr->data128[1];
234 spin_unlock_irqrestore(&dev_data->dte_lock, flags);
235 }
236
pdom_is_v2_pgtbl_mode(struct protection_domain * pdom)237 static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
238 {
239 return (pdom && (pdom->pd_mode == PD_MODE_V2));
240 }
241
pdom_is_in_pt_mode(struct protection_domain * pdom)242 static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom)
243 {
244 return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY);
245 }
246
247 /*
248 * We cannot support PASID w/ existing v1 page table in the same domain
249 * since it will be nested. However, existing domain w/ v2 page table
250 * or passthrough mode can be used for PASID.
251 */
pdom_is_sva_capable(struct protection_domain * pdom)252 static inline bool pdom_is_sva_capable(struct protection_domain *pdom)
253 {
254 return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom);
255 }
256
get_acpihid_device_id(struct device * dev,struct acpihid_map_entry ** entry)257 static inline int get_acpihid_device_id(struct device *dev,
258 struct acpihid_map_entry **entry)
259 {
260 struct acpi_device *adev = ACPI_COMPANION(dev);
261 struct acpihid_map_entry *p, *p1 = NULL;
262 int hid_count = 0;
263 bool fw_bug;
264
265 if (!adev)
266 return -ENODEV;
267
268 list_for_each_entry(p, &acpihid_map, list) {
269 if (acpi_dev_hid_uid_match(adev, p->hid,
270 p->uid[0] ? p->uid : NULL)) {
271 p1 = p;
272 fw_bug = false;
273 hid_count = 1;
274 break;
275 }
276
277 /*
278 * Count HID matches w/o UID, raise FW_BUG but allow exactly one match
279 */
280 if (acpi_dev_hid_match(adev, p->hid)) {
281 p1 = p;
282 hid_count++;
283 fw_bug = true;
284 }
285 }
286
287 if (!p1)
288 return -EINVAL;
289 if (fw_bug)
290 dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
291 hid_count, str_plural(hid_count));
292 if (hid_count > 1)
293 return -EINVAL;
294 if (entry)
295 *entry = p1;
296
297 return p1->devid;
298 }
299
get_device_sbdf_id(struct device * dev)300 static inline int get_device_sbdf_id(struct device *dev)
301 {
302 int sbdf;
303
304 if (dev_is_pci(dev))
305 sbdf = get_pci_sbdf_id(to_pci_dev(dev));
306 else
307 sbdf = get_acpihid_device_id(dev, NULL);
308
309 return sbdf;
310 }
311
get_dev_table(struct amd_iommu * iommu)312 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
313 {
314 struct dev_table_entry *dev_table;
315 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
316
317 BUG_ON(pci_seg == NULL);
318 dev_table = pci_seg->dev_table;
319 BUG_ON(dev_table == NULL);
320
321 return dev_table;
322 }
323
get_device_segment(struct device * dev)324 static inline u16 get_device_segment(struct device *dev)
325 {
326 u16 seg;
327
328 if (dev_is_pci(dev)) {
329 struct pci_dev *pdev = to_pci_dev(dev);
330
331 seg = pci_domain_nr(pdev->bus);
332 } else {
333 u32 devid = get_acpihid_device_id(dev, NULL);
334
335 seg = PCI_SBDF_TO_SEGID(devid);
336 }
337
338 return seg;
339 }
340
341 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
amd_iommu_set_rlookup_table(struct amd_iommu * iommu,u16 devid)342 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
343 {
344 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
345
346 pci_seg->rlookup_table[devid] = iommu;
347 }
348
__rlookup_amd_iommu(u16 seg,u16 devid)349 static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
350 {
351 struct amd_iommu_pci_seg *pci_seg;
352
353 for_each_pci_segment(pci_seg) {
354 if (pci_seg->id != seg)
355 continue;
356 /* IVRS may not describe every device on the bus */
357 if (devid > pci_seg->last_bdf)
358 return NULL;
359 return pci_seg->rlookup_table[devid];
360 }
361 return NULL;
362 }
363
rlookup_amd_iommu(struct device * dev)364 static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
365 {
366 u16 seg = get_device_segment(dev);
367 int devid = get_device_sbdf_id(dev);
368
369 if (devid < 0)
370 return NULL;
371 return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
372 }
373
alloc_dev_data(struct amd_iommu * iommu,u16 devid)374 static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
375 {
376 struct iommu_dev_data *dev_data;
377 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
378
379 dev_data = kzalloc_obj(*dev_data);
380 if (!dev_data)
381 return NULL;
382
383 mutex_init(&dev_data->mutex);
384 spin_lock_init(&dev_data->dte_lock);
385 dev_data->devid = devid;
386 ratelimit_default_init(&dev_data->rs);
387
388 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list);
389 return dev_data;
390 }
391
search_dev_data(struct amd_iommu * iommu,u16 devid)392 struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
393 {
394 struct iommu_dev_data *dev_data;
395 struct llist_node *node;
396 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
397
398 if (llist_empty(&pci_seg->dev_data_list))
399 return NULL;
400
401 node = pci_seg->dev_data_list.first;
402 llist_for_each_entry(dev_data, node, dev_data_list) {
403 if (dev_data->devid == devid)
404 return dev_data;
405 }
406
407 return NULL;
408 }
409
clone_alias(struct pci_dev * pdev_origin,u16 alias,void * data)410 static int clone_alias(struct pci_dev *pdev_origin, u16 alias, void *data)
411 {
412 struct dev_table_entry new;
413 struct amd_iommu *iommu;
414 struct iommu_dev_data *dev_data, *alias_data;
415 struct pci_dev *pdev = data;
416 u16 devid = pci_dev_id(pdev);
417 int ret = 0;
418
419 if (devid == alias)
420 return 0;
421
422 iommu = rlookup_amd_iommu(&pdev->dev);
423 if (!iommu)
424 return 0;
425
426 /* Copy the data from pdev */
427 dev_data = dev_iommu_priv_get(&pdev->dev);
428 if (!dev_data) {
429 pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid);
430 ret = -EINVAL;
431 goto out;
432 }
433 get_dte256(iommu, dev_data, &new);
434
435 /* Setup alias */
436 alias_data = find_dev_data(iommu, alias);
437 if (!alias_data) {
438 pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias);
439 ret = -EINVAL;
440 goto out;
441 }
442 update_dte256(iommu, alias_data, &new);
443
444 amd_iommu_set_rlookup_table(iommu, alias);
445 out:
446 return ret;
447 }
448
clone_aliases(struct amd_iommu * iommu,struct device * dev)449 static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
450 {
451 struct pci_dev *pdev;
452
453 if (!dev_is_pci(dev))
454 return;
455 pdev = to_pci_dev(dev);
456
457 /*
458 * The IVRS alias stored in the alias table may not be
459 * part of the PCI DMA aliases if it's bus differs
460 * from the original device.
461 */
462 clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], pdev);
463
464 pci_for_each_dma_alias(pdev, clone_alias, pdev);
465 }
466
setup_aliases(struct amd_iommu * iommu,struct device * dev)467 static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
468 {
469 struct pci_dev *pdev = to_pci_dev(dev);
470 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
471 u16 ivrs_alias;
472
473 /* For ACPI HID devices, there are no aliases */
474 if (!dev_is_pci(dev))
475 return;
476
477 /*
478 * Add the IVRS alias to the pci aliases if it is on the same
479 * bus. The IVRS table may know about a quirk that we don't.
480 */
481 ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)];
482 if (ivrs_alias != pci_dev_id(pdev) &&
483 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number)
484 pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1);
485
486 clone_aliases(iommu, dev);
487 }
488
find_dev_data(struct amd_iommu * iommu,u16 devid)489 static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
490 {
491 struct iommu_dev_data *dev_data;
492
493 dev_data = search_dev_data(iommu, devid);
494
495 if (dev_data == NULL) {
496 dev_data = alloc_dev_data(iommu, devid);
497 if (!dev_data)
498 return NULL;
499
500 if (translation_pre_enabled(iommu))
501 dev_data->defer_attach = true;
502 }
503
504 return dev_data;
505 }
506
507 /*
508 * Find or create an IOMMU group for a acpihid device.
509 */
acpihid_device_group(struct device * dev)510 static struct iommu_group *acpihid_device_group(struct device *dev)
511 {
512 struct acpihid_map_entry *p, *entry = NULL;
513 int devid;
514
515 devid = get_acpihid_device_id(dev, &entry);
516 if (devid < 0)
517 return ERR_PTR(devid);
518
519 list_for_each_entry(p, &acpihid_map, list) {
520 if ((devid == p->devid) && p->group)
521 entry->group = p->group;
522 }
523
524 if (!entry->group)
525 entry->group = generic_device_group(dev);
526 else
527 iommu_group_ref_get(entry->group);
528
529 return entry->group;
530 }
531
pdev_pasid_supported(struct iommu_dev_data * dev_data)532 static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data)
533 {
534 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP);
535 }
536
pdev_get_caps(struct pci_dev * pdev)537 static u32 pdev_get_caps(struct pci_dev *pdev)
538 {
539 int features;
540 u32 flags = 0;
541
542 if (pci_ats_supported(pdev))
543 flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
544
545 if (pci_pri_supported(pdev))
546 flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
547
548 features = pci_pasid_features(pdev);
549 if (features >= 0) {
550 flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
551
552 if (features & PCI_PASID_CAP_EXEC)
553 flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
554
555 if (features & PCI_PASID_CAP_PRIV)
556 flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
557 }
558
559 return flags;
560 }
561
pdev_enable_cap_ats(struct pci_dev * pdev)562 static inline int pdev_enable_cap_ats(struct pci_dev *pdev)
563 {
564 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
565 int ret = -EINVAL;
566
567 if (dev_data->ats_enabled)
568 return 0;
569
570 if (amd_iommu_iotlb_sup &&
571 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) {
572 ret = pci_enable_ats(pdev, PAGE_SHIFT);
573 if (!ret) {
574 dev_data->ats_enabled = 1;
575 dev_data->ats_qdep = pci_ats_queue_depth(pdev);
576 }
577 }
578
579 return ret;
580 }
581
pdev_disable_cap_ats(struct pci_dev * pdev)582 static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
583 {
584 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
585
586 if (dev_data->ats_enabled) {
587 pci_disable_ats(pdev);
588 dev_data->ats_enabled = 0;
589 }
590 }
591
pdev_enable_cap_pri(struct pci_dev * pdev)592 static inline int pdev_enable_cap_pri(struct pci_dev *pdev)
593 {
594 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
595 int ret = -EINVAL;
596
597 if (dev_data->pri_enabled)
598 return 0;
599
600 if (!dev_data->ats_enabled)
601 return 0;
602
603 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
604 /*
605 * First reset the PRI state of the device.
606 * FIXME: Hardcode number of outstanding requests for now
607 */
608 if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) {
609 dev_data->pri_enabled = 1;
610 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev);
611
612 ret = 0;
613 }
614 }
615
616 return ret;
617 }
618
pdev_disable_cap_pri(struct pci_dev * pdev)619 static inline void pdev_disable_cap_pri(struct pci_dev *pdev)
620 {
621 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
622
623 if (dev_data->pri_enabled) {
624 pci_disable_pri(pdev);
625 dev_data->pri_enabled = 0;
626 }
627 }
628
pdev_enable_cap_pasid(struct pci_dev * pdev)629 static inline int pdev_enable_cap_pasid(struct pci_dev *pdev)
630 {
631 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
632 int ret = -EINVAL;
633
634 if (dev_data->pasid_enabled)
635 return 0;
636
637 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) {
638 /* Only allow access to user-accessible pages */
639 ret = pci_enable_pasid(pdev, 0);
640 if (!ret)
641 dev_data->pasid_enabled = 1;
642 }
643
644 return ret;
645 }
646
pdev_disable_cap_pasid(struct pci_dev * pdev)647 static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
648 {
649 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
650
651 if (dev_data->pasid_enabled) {
652 pci_disable_pasid(pdev);
653 dev_data->pasid_enabled = 0;
654 }
655 }
656
pdev_enable_caps(struct pci_dev * pdev)657 static void pdev_enable_caps(struct pci_dev *pdev)
658 {
659 pdev_enable_cap_pasid(pdev);
660 pdev_enable_cap_ats(pdev);
661 pdev_enable_cap_pri(pdev);
662 }
663
pdev_disable_caps(struct pci_dev * pdev)664 static void pdev_disable_caps(struct pci_dev *pdev)
665 {
666 pdev_disable_cap_ats(pdev);
667 pdev_disable_cap_pasid(pdev);
668 pdev_disable_cap_pri(pdev);
669 }
670
671 /*
672 * This function checks if the driver got a valid device from the caller to
673 * avoid dereferencing invalid pointers.
674 */
check_device(struct device * dev)675 static bool check_device(struct device *dev)
676 {
677 struct amd_iommu_pci_seg *pci_seg;
678 struct amd_iommu *iommu;
679 int devid, sbdf;
680
681 if (!dev)
682 return false;
683
684 sbdf = get_device_sbdf_id(dev);
685 if (sbdf < 0)
686 return false;
687 devid = PCI_SBDF_TO_DEVID(sbdf);
688
689 iommu = rlookup_amd_iommu(dev);
690 if (!iommu)
691 return false;
692
693 /* Out of our scope? */
694 pci_seg = iommu->pci_seg;
695 if (devid > pci_seg->last_bdf)
696 return false;
697
698 return true;
699 }
700
iommu_init_device(struct amd_iommu * iommu,struct device * dev)701 static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
702 {
703 struct iommu_dev_data *dev_data;
704 int devid, sbdf;
705
706 if (dev_iommu_priv_get(dev))
707 return 0;
708
709 sbdf = get_device_sbdf_id(dev);
710 if (sbdf < 0)
711 return sbdf;
712
713 devid = PCI_SBDF_TO_DEVID(sbdf);
714 dev_data = find_dev_data(iommu, devid);
715 if (!dev_data)
716 return -ENOMEM;
717
718 dev_data->dev = dev;
719
720 /*
721 * The dev_iommu_priv_set() needes to be called before setup_aliases.
722 * Otherwise, subsequent call to dev_iommu_priv_get() will fail.
723 */
724 dev_iommu_priv_set(dev, dev_data);
725 setup_aliases(iommu, dev);
726
727 /*
728 * By default we use passthrough mode for IOMMUv2 capable device.
729 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
730 * invalid address), we ignore the capability for the device so
731 * it'll be forced to go into translation mode.
732 */
733 if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
734 dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) {
735 dev_data->flags = pdev_get_caps(to_pci_dev(dev));
736 }
737
738 return 0;
739 }
740
iommu_ignore_device(struct amd_iommu * iommu,struct device * dev)741 static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
742 {
743 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
744 struct dev_table_entry *dev_table = get_dev_table(iommu);
745 int devid, sbdf;
746
747 sbdf = get_device_sbdf_id(dev);
748 if (sbdf < 0)
749 return;
750
751 devid = PCI_SBDF_TO_DEVID(sbdf);
752 pci_seg->rlookup_table[devid] = NULL;
753 memset(&dev_table[devid], 0, sizeof(struct dev_table_entry));
754
755 setup_aliases(iommu, dev);
756 }
757
758
759 /****************************************************************************
760 *
761 * Interrupt handling functions
762 *
763 ****************************************************************************/
764
dump_dte_entry(struct amd_iommu * iommu,u16 devid)765 static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
766 {
767 int i;
768 struct dev_table_entry dte;
769 struct iommu_dev_data *dev_data = find_dev_data(iommu, devid);
770
771 get_dte256(iommu, dev_data, &dte);
772
773 for (i = 0; i < 4; ++i)
774 pr_err("DTE[%d]: %016llx\n", i, dte.data[i]);
775 }
776
dump_command(unsigned long phys_addr)777 static void dump_command(unsigned long phys_addr)
778 {
779 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
780 int i;
781
782 for (i = 0; i < 4; ++i)
783 pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
784 }
785
amd_iommu_report_rmp_hw_error(struct amd_iommu * iommu,volatile u32 * event)786 static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
787 {
788 struct iommu_dev_data *dev_data = NULL;
789 int devid, vmg_tag, flags;
790 struct pci_dev *pdev;
791 u64 spa;
792
793 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
794 vmg_tag = (event[1]) & 0xFFFF;
795 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
796 spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
797
798 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
799 devid & 0xff);
800 if (pdev)
801 dev_data = dev_iommu_priv_get(&pdev->dev);
802
803 if (dev_data) {
804 if (__ratelimit(&dev_data->rs)) {
805 pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
806 vmg_tag, spa, flags);
807 }
808 } else {
809 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
810 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
811 vmg_tag, spa, flags);
812 }
813
814 if (pdev)
815 pci_dev_put(pdev);
816 }
817
amd_iommu_report_rmp_fault(struct amd_iommu * iommu,volatile u32 * event)818 static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
819 {
820 struct iommu_dev_data *dev_data = NULL;
821 int devid, flags_rmp, vmg_tag, flags;
822 struct pci_dev *pdev;
823 u64 gpa;
824
825 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
826 flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
827 vmg_tag = (event[1]) & 0xFFFF;
828 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
829 gpa = ((u64)event[3] << 32) | event[2];
830
831 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
832 devid & 0xff);
833 if (pdev)
834 dev_data = dev_iommu_priv_get(&pdev->dev);
835
836 if (dev_data) {
837 if (__ratelimit(&dev_data->rs)) {
838 pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
839 vmg_tag, gpa, flags_rmp, flags);
840 }
841 } else {
842 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
843 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
844 vmg_tag, gpa, flags_rmp, flags);
845 }
846
847 if (pdev)
848 pci_dev_put(pdev);
849 }
850
851 #define IS_IOMMU_MEM_TRANSACTION(flags) \
852 (((flags) & EVENT_FLAG_I) == 0)
853
854 #define IS_WRITE_REQUEST(flags) \
855 ((flags) & EVENT_FLAG_RW)
856
amd_iommu_report_page_fault(struct amd_iommu * iommu,u16 devid,u16 domain_id,u64 address,int flags)857 static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
858 u16 devid, u16 domain_id,
859 u64 address, int flags)
860 {
861 struct iommu_dev_data *dev_data = NULL;
862 struct pci_dev *pdev;
863
864 pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid),
865 devid & 0xff);
866 if (pdev)
867 dev_data = dev_iommu_priv_get(&pdev->dev);
868
869 if (dev_data) {
870 /*
871 * If this is a DMA fault (for which the I(nterrupt)
872 * bit will be unset), allow report_iommu_fault() to
873 * prevent logging it.
874 */
875 if (IS_IOMMU_MEM_TRANSACTION(flags)) {
876 /* Device not attached to domain properly */
877 if (dev_data->domain == NULL) {
878 pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
879 pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n",
880 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
881 PCI_FUNC(devid), domain_id);
882 goto out;
883 }
884
885 if (!report_iommu_fault(&dev_data->domain->domain,
886 &pdev->dev, address,
887 IS_WRITE_REQUEST(flags) ?
888 IOMMU_FAULT_WRITE :
889 IOMMU_FAULT_READ))
890 goto out;
891 }
892
893 if (__ratelimit(&dev_data->rs)) {
894 pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
895 domain_id, address, flags);
896 }
897 } else {
898 pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
899 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
900 domain_id, address, flags);
901 }
902
903 out:
904 if (pdev)
905 pci_dev_put(pdev);
906 }
907
iommu_print_event(struct amd_iommu * iommu,void * __evt)908 static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
909 {
910 struct device *dev = iommu->iommu.dev;
911 int type, devid, flags, tag;
912 volatile u32 *event = __evt;
913 int count = 0;
914 u64 address, ctrl;
915 u32 pasid;
916
917 retry:
918 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
919 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
920 pasid = (event[0] & EVENT_DOMID_MASK_HI) |
921 (event[1] & EVENT_DOMID_MASK_LO);
922 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
923 address = (u64)(((u64)event[3]) << 32) | event[2];
924 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
925
926 if (type == 0) {
927 /* Did we hit the erratum? */
928 if (++count == LOOP_TIMEOUT) {
929 pr_err("No event written to event log\n");
930 return;
931 }
932 udelay(1);
933 goto retry;
934 }
935
936 if (type == EVENT_TYPE_IO_FAULT) {
937 amd_iommu_report_page_fault(iommu, devid, pasid, address, flags);
938 return;
939 }
940
941 switch (type) {
942 case EVENT_TYPE_ILL_DEV:
943 dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
944 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
945 pasid, address, flags);
946 dev_err(dev, "Control Reg : 0x%llx\n", ctrl);
947 dump_dte_entry(iommu, devid);
948 break;
949 case EVENT_TYPE_DEV_TAB_ERR:
950 dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
951 "address=0x%llx flags=0x%04x]\n",
952 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
953 address, flags);
954 break;
955 case EVENT_TYPE_PAGE_TAB_ERR:
956 dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
957 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
958 pasid, address, flags);
959 break;
960 case EVENT_TYPE_ILL_CMD:
961 dev_err(dev, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address);
962 dump_command(address);
963 break;
964 case EVENT_TYPE_CMD_HARD_ERR:
965 dev_err(dev, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
966 address, flags);
967 break;
968 case EVENT_TYPE_IOTLB_INV_TO:
969 dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
970 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
971 address);
972 break;
973 case EVENT_TYPE_INV_DEV_REQ:
974 dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
975 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
976 pasid, address, flags);
977 break;
978 case EVENT_TYPE_RMP_FAULT:
979 amd_iommu_report_rmp_fault(iommu, event);
980 break;
981 case EVENT_TYPE_RMP_HW_ERR:
982 amd_iommu_report_rmp_hw_error(iommu, event);
983 break;
984 case EVENT_TYPE_INV_PPR_REQ:
985 pasid = PPR_PASID(*((u64 *)__evt));
986 tag = event[1] & 0x03FF;
987 dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
988 iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
989 pasid, address, flags, tag);
990 break;
991 default:
992 dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
993 event[0], event[1], event[2], event[3]);
994 }
995
996 /*
997 * To detect the hardware errata 732 we need to clear the
998 * entry back to zero. This issue does not exist on SNP
999 * enabled system. Also this buffer is not writeable on
1000 * SNP enabled system.
1001 */
1002 if (!amd_iommu_snp_en)
1003 memset(__evt, 0, 4 * sizeof(u32));
1004 }
1005
iommu_poll_events(struct amd_iommu * iommu)1006 static void iommu_poll_events(struct amd_iommu *iommu)
1007 {
1008 u32 head, tail;
1009
1010 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
1011 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
1012
1013 while (head != tail) {
1014 iommu_print_event(iommu, iommu->evt_buf + head);
1015
1016 /* Update head pointer of hardware ring-buffer */
1017 head = (head + EVTLOG_ENTRY_SIZE) % amd_iommu_evtlog_size;
1018 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
1019 }
1020
1021 }
1022
1023 #ifdef CONFIG_IRQ_REMAP
1024 static int (*iommu_ga_log_notifier)(u32);
1025
amd_iommu_register_ga_log_notifier(int (* notifier)(u32))1026 int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
1027 {
1028 iommu_ga_log_notifier = notifier;
1029
1030 /*
1031 * Ensure all in-flight IRQ handlers run to completion before returning
1032 * to the caller, e.g. to ensure module code isn't unloaded while it's
1033 * being executed in the IRQ handler.
1034 */
1035 if (!notifier)
1036 synchronize_rcu();
1037
1038 return 0;
1039 }
1040 EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
1041
iommu_poll_ga_log(struct amd_iommu * iommu)1042 static void iommu_poll_ga_log(struct amd_iommu *iommu)
1043 {
1044 u32 head, tail;
1045
1046 if (iommu->ga_log == NULL)
1047 return;
1048
1049 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
1050 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
1051
1052 while (head != tail) {
1053 volatile u64 *raw;
1054 u64 log_entry;
1055
1056 raw = (u64 *)(iommu->ga_log + head);
1057
1058 /* Avoid memcpy function-call overhead */
1059 log_entry = *raw;
1060
1061 /* Update head pointer of hardware ring-buffer */
1062 head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
1063 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
1064
1065 /* Handle GA entry */
1066 switch (GA_REQ_TYPE(log_entry)) {
1067 case GA_GUEST_NR:
1068 if (!iommu_ga_log_notifier)
1069 break;
1070
1071 pr_debug("%s: devid=%#x, ga_tag=%#x\n",
1072 __func__, GA_DEVID(log_entry),
1073 GA_TAG(log_entry));
1074
1075 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
1076 pr_err("GA log notifier failed.\n");
1077 break;
1078 default:
1079 break;
1080 }
1081 }
1082 }
1083
1084 static void
amd_iommu_set_pci_msi_domain(struct device * dev,struct amd_iommu * iommu)1085 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
1086 {
1087 if (!irq_remapping_enabled || !dev_is_pci(dev) ||
1088 !pci_dev_has_default_msi_parent_domain(to_pci_dev(dev)))
1089 return;
1090
1091 dev_set_msi_domain(dev, iommu->ir_domain);
1092 }
1093
1094 #else /* CONFIG_IRQ_REMAP */
1095 static inline void
amd_iommu_set_pci_msi_domain(struct device * dev,struct amd_iommu * iommu)1096 amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
1097 #endif /* !CONFIG_IRQ_REMAP */
1098
amd_iommu_handle_irq(void * data,const char * evt_type,u32 int_mask,u32 overflow_mask,void (* int_handler)(struct amd_iommu *),void (* overflow_handler)(struct amd_iommu *))1099 static void amd_iommu_handle_irq(void *data, const char *evt_type,
1100 u32 int_mask, u32 overflow_mask,
1101 void (*int_handler)(struct amd_iommu *),
1102 void (*overflow_handler)(struct amd_iommu *))
1103 {
1104 struct amd_iommu *iommu = (struct amd_iommu *) data;
1105 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
1106 u32 mask = int_mask | overflow_mask;
1107
1108 while (status & mask) {
1109 /* Enable interrupt sources again */
1110 writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
1111
1112 if (int_handler) {
1113 pr_devel("Processing IOMMU (ivhd%d) %s Log\n",
1114 iommu->index, evt_type);
1115 int_handler(iommu);
1116 }
1117
1118 if ((status & overflow_mask) && overflow_handler)
1119 overflow_handler(iommu);
1120
1121 /*
1122 * Hardware bug: ERBT1312
1123 * When re-enabling interrupt (by writing 1
1124 * to clear the bit), the hardware might also try to set
1125 * the interrupt bit in the event status register.
1126 * In this scenario, the bit will be set, and disable
1127 * subsequent interrupts.
1128 *
1129 * Workaround: The IOMMU driver should read back the
1130 * status register and check if the interrupt bits are cleared.
1131 * If not, driver will need to go through the interrupt handler
1132 * again and re-clear the bits
1133 */
1134 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
1135 }
1136 }
1137
amd_iommu_int_thread_evtlog(int irq,void * data)1138 irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data)
1139 {
1140 amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK,
1141 MMIO_STATUS_EVT_OVERFLOW_MASK,
1142 iommu_poll_events, amd_iommu_restart_event_logging);
1143
1144 return IRQ_HANDLED;
1145 }
1146
amd_iommu_int_thread_pprlog(int irq,void * data)1147 irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
1148 {
1149 amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
1150 MMIO_STATUS_PPR_OVERFLOW_MASK,
1151 amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
1152
1153 return IRQ_HANDLED;
1154 }
1155
amd_iommu_int_thread_galog(int irq,void * data)1156 irqreturn_t amd_iommu_int_thread_galog(int irq, void *data)
1157 {
1158 #ifdef CONFIG_IRQ_REMAP
1159 amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK,
1160 MMIO_STATUS_GALOG_OVERFLOW_MASK,
1161 iommu_poll_ga_log, amd_iommu_restart_ga_log);
1162 #endif
1163
1164 return IRQ_HANDLED;
1165 }
1166
amd_iommu_int_thread(int irq,void * data)1167 irqreturn_t amd_iommu_int_thread(int irq, void *data)
1168 {
1169 amd_iommu_int_thread_evtlog(irq, data);
1170 amd_iommu_int_thread_pprlog(irq, data);
1171 amd_iommu_int_thread_galog(irq, data);
1172
1173 return IRQ_HANDLED;
1174 }
1175
1176 /****************************************************************************
1177 *
1178 * IOMMU command queuing functions
1179 *
1180 ****************************************************************************/
1181
dump_command_buffer(struct amd_iommu * iommu)1182 static void dump_command_buffer(struct amd_iommu *iommu)
1183 {
1184 struct iommu_cmd *cmd;
1185 u32 head, tail;
1186 int i;
1187
1188 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
1189 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
1190
1191 pr_err("CMD Buffer head=%llu tail=%llu\n", MMIO_CMD_BUFFER_HEAD(head),
1192 MMIO_CMD_BUFFER_TAIL(tail));
1193
1194 for (i = 0; i < CMD_BUFFER_ENTRIES; i++) {
1195 cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd));
1196 pr_err("%3d: %08x %08x %08x %08x\n", i, cmd->data[0], cmd->data[1], cmd->data[2],
1197 cmd->data[3]);
1198 }
1199 }
1200
wait_on_sem(struct amd_iommu * iommu,u64 data)1201 static int wait_on_sem(struct amd_iommu *iommu, u64 data)
1202 {
1203 int i = 0;
1204
1205 /*
1206 * cmd_sem holds a monotonically non-decreasing completion sequence
1207 * number.
1208 */
1209 while ((__s64)(READ_ONCE(*iommu->cmd_sem) - data) < 0 &&
1210 i < LOOP_TIMEOUT) {
1211 udelay(1);
1212 i += 1;
1213 }
1214
1215 if (i == LOOP_TIMEOUT) {
1216
1217 pr_alert("IOMMU %04x:%02x:%02x.%01x: Completion-Wait loop timed out\n",
1218 iommu->pci_seg->id, PCI_BUS_NUM(iommu->devid),
1219 PCI_SLOT(iommu->devid), PCI_FUNC(iommu->devid));
1220
1221 if (amd_iommu_dump)
1222 DO_ONCE_LITE(dump_command_buffer, iommu);
1223
1224 return -EIO;
1225 }
1226
1227 return 0;
1228 }
1229
copy_cmd_to_buffer(struct amd_iommu * iommu,struct iommu_cmd * cmd)1230 static void copy_cmd_to_buffer(struct amd_iommu *iommu,
1231 struct iommu_cmd *cmd)
1232 {
1233 u8 *target;
1234 u32 tail;
1235
1236 /* Copy command to buffer */
1237 tail = iommu->cmd_buf_tail;
1238 target = iommu->cmd_buf + tail;
1239 memcpy(target, cmd, sizeof(*cmd));
1240
1241 tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1242 iommu->cmd_buf_tail = tail;
1243
1244 /* Tell the IOMMU about it */
1245 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
1246 }
1247
build_completion_wait(struct iommu_cmd * cmd,struct amd_iommu * iommu,u64 data)1248 static void build_completion_wait(struct iommu_cmd *cmd,
1249 struct amd_iommu *iommu,
1250 u64 data)
1251 {
1252 u64 paddr = iommu->cmd_sem_paddr;
1253
1254 memset(cmd, 0, sizeof(*cmd));
1255 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
1256 cmd->data[1] = upper_32_bits(paddr);
1257 cmd->data[2] = lower_32_bits(data);
1258 cmd->data[3] = upper_32_bits(data);
1259 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
1260 }
1261
build_inv_dte(struct iommu_cmd * cmd,u16 devid)1262 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
1263 {
1264 memset(cmd, 0, sizeof(*cmd));
1265 cmd->data[0] = devid;
1266 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
1267 }
1268
1269 /*
1270 * Builds an invalidation address which is suitable for one page or multiple
1271 * pages. Sets the size bit (S) as needed is more than one page is flushed.
1272 */
build_inv_address(u64 address,size_t size)1273 static inline u64 build_inv_address(u64 address, size_t size)
1274 {
1275 u64 pages, end, msb_diff;
1276
1277 pages = iommu_num_pages(address, size, PAGE_SIZE);
1278
1279 if (pages == 1)
1280 return address & PAGE_MASK;
1281
1282 end = address + size - 1;
1283
1284 /*
1285 * msb_diff would hold the index of the most significant bit that
1286 * flipped between the start and end.
1287 */
1288 msb_diff = fls64(end ^ address) - 1;
1289
1290 /*
1291 * Bits 63:52 are sign extended. If for some reason bit 51 is different
1292 * between the start and the end, invalidate everything.
1293 */
1294 if (unlikely(msb_diff > 51)) {
1295 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
1296 } else {
1297 /*
1298 * The msb-bit must be clear on the address. Just set all the
1299 * lower bits.
1300 */
1301 address |= (1ull << msb_diff) - 1;
1302 }
1303
1304 /* Clear bits 11:0 */
1305 address &= PAGE_MASK;
1306
1307 /* Set the size bit - we flush more than one 4kb page */
1308 return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
1309 }
1310
build_inv_iommu_pages(struct iommu_cmd * cmd,u64 address,size_t size,u16 domid,ioasid_t pasid,bool gn)1311 static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
1312 size_t size, u16 domid,
1313 ioasid_t pasid, bool gn)
1314 {
1315 u64 inv_address = build_inv_address(address, size);
1316
1317 memset(cmd, 0, sizeof(*cmd));
1318
1319 cmd->data[1] |= domid;
1320 cmd->data[2] = lower_32_bits(inv_address);
1321 cmd->data[3] = upper_32_bits(inv_address);
1322 /* PDE bit - we want to flush everything, not only the PTEs */
1323 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
1324 if (gn) {
1325 cmd->data[0] |= pasid;
1326 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1327 }
1328 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
1329 }
1330
build_inv_iotlb_pages(struct iommu_cmd * cmd,u16 devid,int qdep,u64 address,size_t size,ioasid_t pasid,bool gn)1331 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
1332 u64 address, size_t size,
1333 ioasid_t pasid, bool gn)
1334 {
1335 u64 inv_address = build_inv_address(address, size);
1336
1337 memset(cmd, 0, sizeof(*cmd));
1338
1339 cmd->data[0] = devid;
1340 cmd->data[0] |= (qdep & 0xff) << 24;
1341 cmd->data[1] = devid;
1342 cmd->data[2] = lower_32_bits(inv_address);
1343 cmd->data[3] = upper_32_bits(inv_address);
1344 if (gn) {
1345 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
1346 cmd->data[1] |= (pasid & 0xff) << 16;
1347 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
1348 }
1349
1350 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
1351 }
1352
build_complete_ppr(struct iommu_cmd * cmd,u16 devid,u32 pasid,int status,int tag,u8 gn)1353 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
1354 int status, int tag, u8 gn)
1355 {
1356 memset(cmd, 0, sizeof(*cmd));
1357
1358 cmd->data[0] = devid;
1359 if (gn) {
1360 cmd->data[1] = pasid;
1361 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
1362 }
1363 cmd->data[3] = tag & 0x1ff;
1364 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
1365
1366 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
1367 }
1368
build_inv_all(struct iommu_cmd * cmd)1369 static void build_inv_all(struct iommu_cmd *cmd)
1370 {
1371 memset(cmd, 0, sizeof(*cmd));
1372 CMD_SET_TYPE(cmd, CMD_INV_ALL);
1373 }
1374
build_inv_irt(struct iommu_cmd * cmd,u16 devid)1375 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1376 {
1377 memset(cmd, 0, sizeof(*cmd));
1378 cmd->data[0] = devid;
1379 CMD_SET_TYPE(cmd, CMD_INV_IRT);
1380 }
1381
1382 /*
1383 * Writes the command to the IOMMUs command buffer and informs the
1384 * hardware about the new command.
1385 */
__iommu_queue_command_sync(struct amd_iommu * iommu,struct iommu_cmd * cmd,bool sync)1386 static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1387 struct iommu_cmd *cmd,
1388 bool sync)
1389 {
1390 unsigned int count = 0;
1391 u32 left, next_tail;
1392
1393 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1394 again:
1395 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
1396
1397 if (left <= 0x20) {
1398 /* Skip udelay() the first time around */
1399 if (count++) {
1400 if (count == LOOP_TIMEOUT) {
1401 pr_err("Command buffer timeout\n");
1402 return -EIO;
1403 }
1404
1405 udelay(1);
1406 }
1407
1408 /* Update head and recheck remaining space */
1409 iommu->cmd_buf_head = readl(iommu->mmio_base +
1410 MMIO_CMD_HEAD_OFFSET);
1411
1412 goto again;
1413 }
1414
1415 copy_cmd_to_buffer(iommu, cmd);
1416
1417 /* Do we need to make sure all commands are processed? */
1418 iommu->need_sync = sync;
1419
1420 return 0;
1421 }
1422
iommu_queue_command_sync(struct amd_iommu * iommu,struct iommu_cmd * cmd,bool sync)1423 static int iommu_queue_command_sync(struct amd_iommu *iommu,
1424 struct iommu_cmd *cmd,
1425 bool sync)
1426 {
1427 unsigned long flags;
1428 int ret;
1429
1430 raw_spin_lock_irqsave(&iommu->lock, flags);
1431 ret = __iommu_queue_command_sync(iommu, cmd, sync);
1432 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1433
1434 return ret;
1435 }
1436
iommu_queue_command(struct amd_iommu * iommu,struct iommu_cmd * cmd)1437 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1438 {
1439 return iommu_queue_command_sync(iommu, cmd, true);
1440 }
1441
get_cmdsem_val(struct amd_iommu * iommu)1442 static u64 get_cmdsem_val(struct amd_iommu *iommu)
1443 {
1444 lockdep_assert_held(&iommu->lock);
1445 return ++iommu->cmd_sem_val;
1446 }
1447
1448 /*
1449 * This function queues a completion wait command into the command
1450 * buffer of an IOMMU
1451 */
iommu_completion_wait(struct amd_iommu * iommu)1452 static int iommu_completion_wait(struct amd_iommu *iommu)
1453 {
1454 struct iommu_cmd cmd;
1455 unsigned long flags;
1456 int ret;
1457 u64 data;
1458
1459 if (!iommu->need_sync)
1460 return 0;
1461
1462 raw_spin_lock_irqsave(&iommu->lock, flags);
1463
1464 data = get_cmdsem_val(iommu);
1465 build_completion_wait(&cmd, iommu, data);
1466
1467 ret = __iommu_queue_command_sync(iommu, &cmd, false);
1468 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1469
1470 if (ret)
1471 return ret;
1472
1473 ret = wait_on_sem(iommu, data);
1474
1475 return ret;
1476 }
1477
domain_flush_complete(struct protection_domain * domain)1478 static void domain_flush_complete(struct protection_domain *domain)
1479 {
1480 struct pdom_iommu_info *pdom_iommu_info;
1481 unsigned long i;
1482
1483 lockdep_assert_held(&domain->lock);
1484
1485 /*
1486 * Devices of this domain are behind this IOMMU
1487 * We need to wait for completion of all commands.
1488 */
1489 xa_for_each(&domain->iommu_array, i, pdom_iommu_info)
1490 iommu_completion_wait(pdom_iommu_info->iommu);
1491 }
1492
iommu_flush_dte(struct amd_iommu * iommu,u16 devid)1493 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
1494 {
1495 struct iommu_cmd cmd;
1496
1497 build_inv_dte(&cmd, devid);
1498
1499 return iommu_queue_command(iommu, &cmd);
1500 }
1501
iommu_flush_dte_sync(struct amd_iommu * iommu,u16 devid)1502 static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid)
1503 {
1504 int ret;
1505
1506 ret = iommu_flush_dte(iommu, devid);
1507 if (!ret)
1508 iommu_completion_wait(iommu);
1509 }
1510
amd_iommu_flush_dte_all(struct amd_iommu * iommu)1511 static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
1512 {
1513 u32 devid;
1514 u16 last_bdf = iommu->pci_seg->last_bdf;
1515
1516 for (devid = 0; devid <= last_bdf; ++devid)
1517 iommu_flush_dte(iommu, devid);
1518
1519 iommu_completion_wait(iommu);
1520 }
1521
1522 /*
1523 * This function uses heavy locking and may disable irqs for some time. But
1524 * this is no issue because it is only called during resume.
1525 */
amd_iommu_flush_tlb_all(struct amd_iommu * iommu)1526 static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1527 {
1528 u32 dom_id;
1529 u16 last_bdf = iommu->pci_seg->last_bdf;
1530
1531 for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
1532 struct iommu_cmd cmd;
1533 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1534 dom_id, IOMMU_NO_PASID, false);
1535 iommu_queue_command(iommu, &cmd);
1536 }
1537
1538 iommu_completion_wait(iommu);
1539 }
1540
amd_iommu_flush_tlb_domid(struct amd_iommu * iommu,u32 dom_id)1541 static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1542 {
1543 struct iommu_cmd cmd;
1544
1545 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1546 dom_id, IOMMU_NO_PASID, false);
1547 iommu_queue_command(iommu, &cmd);
1548
1549 iommu_completion_wait(iommu);
1550 }
1551
iommu_flush_pages_v1_hdom_ids(struct protection_domain * pdom,u64 address,size_t size)1552 static int iommu_flush_pages_v1_hdom_ids(struct protection_domain *pdom, u64 address, size_t size)
1553 {
1554 int ret = 0;
1555 struct amd_iommu_viommu *aviommu;
1556
1557 list_for_each_entry(aviommu, &pdom->viommu_list, pdom_list) {
1558 unsigned long i;
1559 struct guest_domain_mapping_info *gdom_info;
1560 struct amd_iommu *iommu = container_of(aviommu->core.iommu_dev,
1561 struct amd_iommu, iommu);
1562
1563 xa_lock(&aviommu->gdomid_array);
1564 xa_for_each(&aviommu->gdomid_array, i, gdom_info) {
1565 struct iommu_cmd cmd;
1566
1567 pr_debug("%s: iommu=%#x, hdom_id=%#x\n", __func__,
1568 iommu->devid, gdom_info->hdom_id);
1569 build_inv_iommu_pages(&cmd, address, size, gdom_info->hdom_id,
1570 IOMMU_NO_PASID, false);
1571 ret |= iommu_queue_command(iommu, &cmd);
1572 }
1573 xa_unlock(&aviommu->gdomid_array);
1574 }
1575 return ret;
1576 }
1577
amd_iommu_flush_all(struct amd_iommu * iommu)1578 static void amd_iommu_flush_all(struct amd_iommu *iommu)
1579 {
1580 struct iommu_cmd cmd;
1581
1582 build_inv_all(&cmd);
1583
1584 iommu_queue_command(iommu, &cmd);
1585 iommu_completion_wait(iommu);
1586 }
1587
iommu_flush_irt(struct amd_iommu * iommu,u16 devid)1588 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1589 {
1590 struct iommu_cmd cmd;
1591
1592 build_inv_irt(&cmd, devid);
1593
1594 iommu_queue_command(iommu, &cmd);
1595 }
1596
amd_iommu_flush_irt_all(struct amd_iommu * iommu)1597 static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
1598 {
1599 u32 devid;
1600 u16 last_bdf = iommu->pci_seg->last_bdf;
1601
1602 if (iommu->irtcachedis_enabled)
1603 return;
1604
1605 for (devid = 0; devid <= last_bdf; devid++)
1606 iommu_flush_irt(iommu, devid);
1607
1608 iommu_completion_wait(iommu);
1609 }
1610
amd_iommu_flush_all_caches(struct amd_iommu * iommu)1611 void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
1612 {
1613 if (check_feature(FEATURE_IA)) {
1614 amd_iommu_flush_all(iommu);
1615 } else {
1616 amd_iommu_flush_dte_all(iommu);
1617 amd_iommu_flush_irt_all(iommu);
1618 amd_iommu_flush_tlb_all(iommu);
1619 }
1620 }
1621
1622 /*
1623 * Command send function for flushing on-device TLB
1624 */
device_flush_iotlb(struct iommu_dev_data * dev_data,u64 address,size_t size,ioasid_t pasid,bool gn)1625 static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
1626 size_t size, ioasid_t pasid, bool gn)
1627 {
1628 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
1629 struct iommu_cmd cmd;
1630 int qdep = dev_data->ats_qdep;
1631
1632 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
1633 size, pasid, gn);
1634
1635 return iommu_queue_command(iommu, &cmd);
1636 }
1637
device_flush_dte_alias(struct pci_dev * pdev,u16 alias,void * data)1638 static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
1639 {
1640 struct amd_iommu *iommu = data;
1641
1642 return iommu_flush_dte(iommu, alias);
1643 }
1644
1645 /*
1646 * Command send function for invalidating a device table entry
1647 */
device_flush_dte(struct iommu_dev_data * dev_data)1648 static int device_flush_dte(struct iommu_dev_data *dev_data)
1649 {
1650 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
1651 struct pci_dev *pdev = NULL;
1652 struct amd_iommu_pci_seg *pci_seg;
1653 u16 alias;
1654 int ret;
1655
1656 if (dev_is_pci(dev_data->dev))
1657 pdev = to_pci_dev(dev_data->dev);
1658
1659 if (pdev)
1660 ret = pci_for_each_dma_alias(pdev,
1661 device_flush_dte_alias, iommu);
1662 else
1663 ret = iommu_flush_dte(iommu, dev_data->devid);
1664 if (ret)
1665 return ret;
1666
1667 pci_seg = iommu->pci_seg;
1668 alias = pci_seg->alias_table[dev_data->devid];
1669 if (alias != dev_data->devid) {
1670 ret = iommu_flush_dte(iommu, alias);
1671 if (ret)
1672 return ret;
1673 }
1674
1675 if (dev_data->ats_enabled) {
1676 /* Invalidate the entire contents of an IOTLB */
1677 ret = device_flush_iotlb(dev_data, 0, ~0UL,
1678 IOMMU_NO_PASID, false);
1679 }
1680
1681 return ret;
1682 }
1683
domain_flush_pages_v2(struct protection_domain * pdom,u64 address,size_t size)1684 static int domain_flush_pages_v2(struct protection_domain *pdom,
1685 u64 address, size_t size)
1686 {
1687 struct iommu_dev_data *dev_data;
1688 struct iommu_cmd cmd;
1689 int ret = 0;
1690
1691 lockdep_assert_held(&pdom->lock);
1692 list_for_each_entry(dev_data, &pdom->dev_list, list) {
1693 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
1694 u16 domid = dev_data->gcr3_info.domid;
1695
1696 build_inv_iommu_pages(&cmd, address, size,
1697 domid, IOMMU_NO_PASID, true);
1698
1699 ret |= iommu_queue_command(iommu, &cmd);
1700 }
1701
1702 return ret;
1703 }
1704
domain_flush_pages_v1(struct protection_domain * pdom,u64 address,size_t size)1705 static int domain_flush_pages_v1(struct protection_domain *pdom,
1706 u64 address, size_t size)
1707 {
1708 struct pdom_iommu_info *pdom_iommu_info;
1709 struct iommu_cmd cmd;
1710 int ret = 0;
1711 unsigned long i;
1712
1713 lockdep_assert_held(&pdom->lock);
1714
1715 build_inv_iommu_pages(&cmd, address, size,
1716 pdom->id, IOMMU_NO_PASID, false);
1717
1718 xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) {
1719 /*
1720 * Devices of this domain are behind this IOMMU
1721 * We need a TLB flush
1722 */
1723 ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd);
1724 }
1725
1726 /*
1727 * A domain w/ v1 table can be a nest parent, which can have
1728 * multiple nested domains. Each nested domain has 1:1 mapping
1729 * between gDomID and hDomID. Therefore, flush every hDomID
1730 * associated to this nest parent domain.
1731 *
1732 * See drivers/iommu/amd/nested.c: amd_iommu_alloc_domain_nested()
1733 */
1734 if (!list_empty(&pdom->viommu_list))
1735 ret |= iommu_flush_pages_v1_hdom_ids(pdom, address, size);
1736
1737 return ret;
1738 }
1739
1740 /*
1741 * TLB invalidation function which is called from the mapping functions.
1742 * It flushes range of PTEs of the domain.
1743 */
__domain_flush_pages(struct protection_domain * domain,u64 address,size_t size)1744 static void __domain_flush_pages(struct protection_domain *domain,
1745 u64 address, size_t size)
1746 {
1747 struct iommu_dev_data *dev_data;
1748 int ret = 0;
1749 ioasid_t pasid = IOMMU_NO_PASID;
1750 bool gn = false;
1751
1752 lockdep_assert_held(&domain->lock);
1753
1754 if (pdom_is_v2_pgtbl_mode(domain)) {
1755 gn = true;
1756 ret = domain_flush_pages_v2(domain, address, size);
1757 } else {
1758 ret = domain_flush_pages_v1(domain, address, size);
1759 }
1760
1761 list_for_each_entry(dev_data, &domain->dev_list, list) {
1762
1763 if (!dev_data->ats_enabled)
1764 continue;
1765
1766 ret |= device_flush_iotlb(dev_data, address, size, pasid, gn);
1767 }
1768
1769 WARN_ON(ret);
1770 }
1771
amd_iommu_domain_flush_pages(struct protection_domain * domain,u64 address,size_t size)1772 void amd_iommu_domain_flush_pages(struct protection_domain *domain,
1773 u64 address, size_t size)
1774 {
1775 lockdep_assert_held(&domain->lock);
1776
1777 if (likely(!amd_iommu_np_cache)) {
1778 __domain_flush_pages(domain, address, size);
1779
1780 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
1781 domain_flush_complete(domain);
1782
1783 return;
1784 }
1785
1786 /*
1787 * When NpCache is on, we infer that we run in a VM and use a vIOMMU.
1788 * In such setups it is best to avoid flushes of ranges which are not
1789 * naturally aligned, since it would lead to flushes of unmodified
1790 * PTEs. Such flushes would require the hypervisor to do more work than
1791 * necessary. Therefore, perform repeated flushes of aligned ranges
1792 * until you cover the range. Each iteration flushes the smaller
1793 * between the natural alignment of the address that we flush and the
1794 * greatest naturally aligned region that fits in the range.
1795 */
1796 while (size != 0) {
1797 int addr_alignment = __ffs(address);
1798 int size_alignment = __fls(size);
1799 int min_alignment;
1800 size_t flush_size;
1801
1802 /*
1803 * size is always non-zero, but address might be zero, causing
1804 * addr_alignment to be negative. As the casting of the
1805 * argument in __ffs(address) to long might trim the high bits
1806 * of the address on x86-32, cast to long when doing the check.
1807 */
1808 if (likely((unsigned long)address != 0))
1809 min_alignment = min(addr_alignment, size_alignment);
1810 else
1811 min_alignment = size_alignment;
1812
1813 flush_size = 1ul << min_alignment;
1814
1815 __domain_flush_pages(domain, address, flush_size);
1816 address += flush_size;
1817 size -= flush_size;
1818 }
1819
1820 /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
1821 domain_flush_complete(domain);
1822 }
1823
1824 /* Flush the whole IO/TLB for a given protection domain - including PDE */
amd_iommu_domain_flush_all(struct protection_domain * domain)1825 static void amd_iommu_domain_flush_all(struct protection_domain *domain)
1826 {
1827 amd_iommu_domain_flush_pages(domain, 0,
1828 CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
1829 }
1830
amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data * dev_data,ioasid_t pasid,u64 address,size_t size)1831 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
1832 ioasid_t pasid, u64 address, size_t size)
1833 {
1834 struct iommu_cmd cmd;
1835 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
1836
1837 build_inv_iommu_pages(&cmd, address, size,
1838 dev_data->gcr3_info.domid, pasid, true);
1839 iommu_queue_command(iommu, &cmd);
1840
1841 if (dev_data->ats_enabled)
1842 device_flush_iotlb(dev_data, address, size, pasid, true);
1843
1844 iommu_completion_wait(iommu);
1845 }
1846
dev_flush_pasid_all(struct iommu_dev_data * dev_data,ioasid_t pasid)1847 static void dev_flush_pasid_all(struct iommu_dev_data *dev_data,
1848 ioasid_t pasid)
1849 {
1850 amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0,
1851 CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
1852 }
1853
amd_iommu_complete_ppr(struct device * dev,u32 pasid,int status,int tag)1854 int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
1855 {
1856 struct iommu_dev_data *dev_data;
1857 struct amd_iommu *iommu;
1858 struct iommu_cmd cmd;
1859
1860 dev_data = dev_iommu_priv_get(dev);
1861 iommu = get_amd_iommu_from_dev(dev);
1862
1863 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
1864 tag, dev_data->pri_tlp);
1865
1866 return iommu_queue_command(iommu, &cmd);
1867 }
1868
1869 /****************************************************************************
1870 *
1871 * The next functions belong to the domain allocation. A domain is
1872 * allocated for every IOMMU as the default domain. If device isolation
1873 * is enabled, every device get its own domain. The most important thing
1874 * about domains is the page table mapping the DMA address space they
1875 * contain.
1876 *
1877 ****************************************************************************/
amd_iommu_pdom_id_alloc(void)1878 int amd_iommu_pdom_id_alloc(void)
1879 {
1880 return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC);
1881 }
1882
amd_iommu_pdom_id_reserve(u16 id,gfp_t gfp)1883 int amd_iommu_pdom_id_reserve(u16 id, gfp_t gfp)
1884 {
1885 return ida_alloc_range(&pdom_ids, id, id, gfp);
1886 }
1887
amd_iommu_pdom_id_free(int id)1888 void amd_iommu_pdom_id_free(int id)
1889 {
1890 ida_free(&pdom_ids, id);
1891 }
1892
amd_iommu_pdom_id_destroy(void)1893 void amd_iommu_pdom_id_destroy(void)
1894 {
1895 ida_destroy(&pdom_ids);
1896 }
1897
free_gcr3_tbl_level1(u64 * tbl)1898 static void free_gcr3_tbl_level1(u64 *tbl)
1899 {
1900 u64 *ptr;
1901 int i;
1902
1903 for (i = 0; i < 512; ++i) {
1904 if (!(tbl[i] & GCR3_VALID))
1905 continue;
1906
1907 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1908
1909 iommu_free_pages(ptr);
1910 }
1911 }
1912
free_gcr3_tbl_level2(u64 * tbl)1913 static void free_gcr3_tbl_level2(u64 *tbl)
1914 {
1915 u64 *ptr;
1916 int i;
1917
1918 for (i = 0; i < 512; ++i) {
1919 if (!(tbl[i] & GCR3_VALID))
1920 continue;
1921
1922 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1923
1924 free_gcr3_tbl_level1(ptr);
1925 }
1926 }
1927
free_gcr3_table(struct gcr3_tbl_info * gcr3_info)1928 static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
1929 {
1930 if (gcr3_info->glx == 2)
1931 free_gcr3_tbl_level2(gcr3_info->gcr3_tbl);
1932 else if (gcr3_info->glx == 1)
1933 free_gcr3_tbl_level1(gcr3_info->gcr3_tbl);
1934 else
1935 WARN_ON_ONCE(gcr3_info->glx != 0);
1936
1937 gcr3_info->glx = 0;
1938
1939 /* Free per device domain ID */
1940 amd_iommu_pdom_id_free(gcr3_info->domid);
1941
1942 iommu_free_pages(gcr3_info->gcr3_tbl);
1943 gcr3_info->gcr3_tbl = NULL;
1944 }
1945
1946 /*
1947 * Number of GCR3 table levels required. Level must be 4-Kbyte
1948 * page and can contain up to 512 entries.
1949 */
get_gcr3_levels(int pasids)1950 static int get_gcr3_levels(int pasids)
1951 {
1952 int levels;
1953
1954 if (pasids == -1)
1955 return amd_iommu_max_glx_val;
1956
1957 levels = get_count_order(pasids);
1958
1959 return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
1960 }
1961
setup_gcr3_table(struct gcr3_tbl_info * gcr3_info,struct amd_iommu * iommu,int pasids)1962 static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
1963 struct amd_iommu *iommu, int pasids)
1964 {
1965 int levels = get_gcr3_levels(pasids);
1966 int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
1967 int domid;
1968
1969 if (levels > amd_iommu_max_glx_val)
1970 return -EINVAL;
1971
1972 if (gcr3_info->gcr3_tbl)
1973 return -EBUSY;
1974
1975 /* Allocate per device domain ID */
1976 domid = amd_iommu_pdom_id_alloc();
1977 if (domid <= 0)
1978 return -ENOSPC;
1979 gcr3_info->domid = domid;
1980
1981 gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
1982 if (gcr3_info->gcr3_tbl == NULL) {
1983 amd_iommu_pdom_id_free(domid);
1984 return -ENOMEM;
1985 }
1986
1987 gcr3_info->glx = levels;
1988
1989 return 0;
1990 }
1991
__get_gcr3_pte(struct gcr3_tbl_info * gcr3_info,ioasid_t pasid,bool alloc)1992 static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info,
1993 ioasid_t pasid, bool alloc)
1994 {
1995 int index;
1996 u64 *pte;
1997 u64 *root = gcr3_info->gcr3_tbl;
1998 int level = gcr3_info->glx;
1999
2000 while (true) {
2001
2002 index = (pasid >> (9 * level)) & 0x1ff;
2003 pte = &root[index];
2004
2005 if (level == 0)
2006 break;
2007
2008 if (!(*pte & GCR3_VALID)) {
2009 if (!alloc)
2010 return NULL;
2011
2012 root = (void *)get_zeroed_page(GFP_ATOMIC);
2013 if (root == NULL)
2014 return NULL;
2015
2016 *pte = iommu_virt_to_phys(root) | GCR3_VALID;
2017 }
2018
2019 root = iommu_phys_to_virt(*pte & PAGE_MASK);
2020
2021 level -= 1;
2022 }
2023
2024 return pte;
2025 }
2026
update_gcr3(struct iommu_dev_data * dev_data,ioasid_t pasid,unsigned long gcr3,bool set)2027 static int update_gcr3(struct iommu_dev_data *dev_data,
2028 ioasid_t pasid, unsigned long gcr3, bool set)
2029 {
2030 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2031 u64 *pte;
2032
2033 pte = __get_gcr3_pte(gcr3_info, pasid, true);
2034 if (pte == NULL)
2035 return -ENOMEM;
2036
2037 if (set)
2038 *pte = (gcr3 & PAGE_MASK) | GCR3_VALID;
2039 else
2040 *pte = 0;
2041
2042 dev_flush_pasid_all(dev_data, pasid);
2043 return 0;
2044 }
2045
amd_iommu_set_gcr3(struct iommu_dev_data * dev_data,ioasid_t pasid,unsigned long gcr3)2046 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid,
2047 unsigned long gcr3)
2048 {
2049 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2050 int ret;
2051
2052 iommu_group_mutex_assert(dev_data->dev);
2053
2054 ret = update_gcr3(dev_data, pasid, gcr3, true);
2055 if (ret)
2056 return ret;
2057
2058 gcr3_info->pasid_cnt++;
2059 return ret;
2060 }
2061
amd_iommu_clear_gcr3(struct iommu_dev_data * dev_data,ioasid_t pasid)2062 int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
2063 {
2064 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2065 int ret;
2066
2067 iommu_group_mutex_assert(dev_data->dev);
2068
2069 ret = update_gcr3(dev_data, pasid, 0, false);
2070 if (ret)
2071 return ret;
2072
2073 gcr3_info->pasid_cnt--;
2074 return ret;
2075 }
2076
2077 /*
2078 * Note:
2079 * The old value for GCR3 table and GPT have been cleared from caller.
2080 */
set_dte_gcr3_table(struct iommu_dev_data * dev_data,struct dev_table_entry * new)2081 static void set_dte_gcr3_table(struct iommu_dev_data *dev_data,
2082 struct dev_table_entry *new)
2083 {
2084 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2085 u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
2086
2087 new->data[0] |= DTE_FLAG_TV |
2088 (dev_data->ppr ? DTE_FLAG_PPR : 0) |
2089 (pdom_is_v2_pgtbl_mode(dev_data->domain) ? DTE_FLAG_GIOV : 0) |
2090 DTE_FLAG_GV |
2091 FIELD_PREP(DTE_GLX, gcr3_info->glx) |
2092 FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12) |
2093 DTE_FLAG_IR | DTE_FLAG_IW;
2094
2095 new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, dev_data->gcr3_info.domid) |
2096 FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) |
2097 (dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0) |
2098 FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31);
2099
2100 /* Guest page table can only support 4 and 5 levels */
2101 if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL)
2102 new->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL);
2103 else
2104 new->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL);
2105 }
2106
amd_iommu_set_dte_v1(struct iommu_dev_data * dev_data,struct protection_domain * domain,u16 domid,struct pt_iommu_amdv1_hw_info * pt_info,struct dev_table_entry * new)2107 void amd_iommu_set_dte_v1(struct iommu_dev_data *dev_data,
2108 struct protection_domain *domain, u16 domid,
2109 struct pt_iommu_amdv1_hw_info *pt_info,
2110 struct dev_table_entry *new)
2111 {
2112 u64 host_pt_root = __sme_set(pt_info->host_pt_root);
2113
2114 /* Note Dirty tracking is used for v1 table only for now */
2115 new->data[0] |= DTE_FLAG_TV |
2116 FIELD_PREP(DTE_MODE_MASK, pt_info->mode) |
2117 (domain->dirty_tracking ? DTE_FLAG_HAD : 0) |
2118 FIELD_PREP(DTE_HOST_TRP, host_pt_root >> 12) |
2119 DTE_FLAG_IR | DTE_FLAG_IW;
2120
2121 new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, domid) |
2122 (dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0);
2123 }
2124
set_dte_v1(struct iommu_dev_data * dev_data,struct protection_domain * domain,u16 domid,phys_addr_t top_paddr,unsigned int top_level,struct dev_table_entry * new)2125 static void set_dte_v1(struct iommu_dev_data *dev_data,
2126 struct protection_domain *domain, u16 domid,
2127 phys_addr_t top_paddr, unsigned int top_level,
2128 struct dev_table_entry *new)
2129 {
2130 struct pt_iommu_amdv1_hw_info pt_info;
2131
2132 /*
2133 * When updating the IO pagetable, the new top and level
2134 * are provided as parameters. For other operations i.e.
2135 * device attach, retrieve the current pagetable info
2136 * via the IOMMU PT API.
2137 */
2138 if (top_paddr) {
2139 pt_info.host_pt_root = top_paddr;
2140 pt_info.mode = top_level + 1;
2141 } else {
2142 WARN_ON(top_paddr || top_level);
2143 pt_iommu_amdv1_hw_info(&domain->amdv1, &pt_info);
2144 }
2145
2146 amd_iommu_set_dte_v1(dev_data, domain, domid, &pt_info, new);
2147 }
2148
set_dte_passthrough(struct iommu_dev_data * dev_data,struct protection_domain * domain,struct dev_table_entry * new)2149 static void set_dte_passthrough(struct iommu_dev_data *dev_data,
2150 struct protection_domain *domain,
2151 struct dev_table_entry *new)
2152 {
2153 new->data[0] |= DTE_FLAG_TV | DTE_FLAG_IR | DTE_FLAG_IW;
2154
2155 new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, domain->id) |
2156 (dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0);
2157
2158 }
2159
set_dte_entry(struct amd_iommu * iommu,struct iommu_dev_data * dev_data,phys_addr_t top_paddr,unsigned int top_level)2160 static void set_dte_entry(struct amd_iommu *iommu,
2161 struct iommu_dev_data *dev_data,
2162 phys_addr_t top_paddr, unsigned int top_level)
2163 {
2164 u32 old_domid;
2165 struct dev_table_entry new = {};
2166 struct protection_domain *domain = dev_data->domain;
2167 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2168 struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
2169
2170 amd_iommu_make_clear_dte(dev_data, &new);
2171
2172 old_domid = READ_ONCE(dte->data[1]) & DTE_DOMID_MASK;
2173 if (gcr3_info->gcr3_tbl)
2174 set_dte_gcr3_table(dev_data, &new);
2175 else if (domain->domain.type == IOMMU_DOMAIN_IDENTITY)
2176 set_dte_passthrough(dev_data, domain, &new);
2177 else if ((domain->domain.type & __IOMMU_DOMAIN_PAGING) &&
2178 domain->pd_mode == PD_MODE_V1)
2179 set_dte_v1(dev_data, domain, domain->id, top_paddr, top_level, &new);
2180 else
2181 WARN_ON(true);
2182
2183 amd_iommu_update_dte(iommu, dev_data, &new);
2184
2185 /*
2186 * A kdump kernel might be replacing a domain ID that was copied from
2187 * the previous kernel--if so, it needs to flush the translation cache
2188 * entries for the old domain ID that is being overwritten
2189 */
2190 if (old_domid) {
2191 amd_iommu_flush_tlb_domid(iommu, old_domid);
2192 }
2193 }
2194
2195 /*
2196 * Clear DMA-remap related flags to block all DMA (blockeded domain)
2197 */
clear_dte_entry(struct amd_iommu * iommu,struct iommu_dev_data * dev_data)2198 static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data)
2199 {
2200 struct dev_table_entry new = {};
2201
2202 amd_iommu_make_clear_dte(dev_data, &new);
2203 amd_iommu_update_dte(iommu, dev_data, &new);
2204 }
2205
2206 /* Update and flush DTE for the given device */
dev_update_dte(struct iommu_dev_data * dev_data,bool set)2207 static void dev_update_dte(struct iommu_dev_data *dev_data, bool set)
2208 {
2209 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
2210
2211 if (set)
2212 set_dte_entry(iommu, dev_data, 0, 0);
2213 else
2214 clear_dte_entry(iommu, dev_data);
2215 }
2216
2217 /*
2218 * If domain is SVA capable then initialize GCR3 table. Also if domain is
2219 * in v2 page table mode then update GCR3[0].
2220 */
init_gcr3_table(struct iommu_dev_data * dev_data,struct protection_domain * pdom)2221 static int init_gcr3_table(struct iommu_dev_data *dev_data,
2222 struct protection_domain *pdom)
2223 {
2224 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2225 int max_pasids = dev_data->max_pasids;
2226 struct pt_iommu_x86_64_hw_info pt_info;
2227 int ret = 0;
2228
2229 /*
2230 * If domain is in pt mode then setup GCR3 table only if device
2231 * is PASID capable
2232 */
2233 if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data))
2234 return ret;
2235
2236 /*
2237 * By default, setup GCR3 table to support MAX PASIDs
2238 * supported by the device/IOMMU.
2239 */
2240 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu,
2241 max_pasids > 0 ? max_pasids : 1);
2242 if (ret)
2243 return ret;
2244
2245 /* Setup GCR3[0] only if domain is setup with v2 page table mode */
2246 if (!pdom_is_v2_pgtbl_mode(pdom))
2247 return ret;
2248
2249 pt_iommu_x86_64_hw_info(&pdom->amdv2, &pt_info);
2250 ret = update_gcr3(dev_data, 0, __sme_set(pt_info.gcr3_pt), true);
2251 if (ret)
2252 free_gcr3_table(&dev_data->gcr3_info);
2253
2254 return ret;
2255 }
2256
destroy_gcr3_table(struct iommu_dev_data * dev_data,struct protection_domain * pdom)2257 static void destroy_gcr3_table(struct iommu_dev_data *dev_data,
2258 struct protection_domain *pdom)
2259 {
2260 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
2261
2262 if (pdom_is_v2_pgtbl_mode(pdom))
2263 update_gcr3(dev_data, 0, 0, false);
2264
2265 if (gcr3_info->gcr3_tbl == NULL)
2266 return;
2267
2268 free_gcr3_table(gcr3_info);
2269 }
2270
pdom_attach_iommu(struct amd_iommu * iommu,struct protection_domain * pdom)2271 static int pdom_attach_iommu(struct amd_iommu *iommu,
2272 struct protection_domain *pdom)
2273 {
2274 struct pdom_iommu_info *pdom_iommu_info, *curr;
2275 unsigned long flags;
2276 int ret = 0;
2277
2278 spin_lock_irqsave(&pdom->lock, flags);
2279
2280 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
2281 if (pdom_iommu_info) {
2282 pdom_iommu_info->refcnt++;
2283 goto out_unlock;
2284 }
2285
2286 pdom_iommu_info = kzalloc_obj(*pdom_iommu_info, GFP_ATOMIC);
2287 if (!pdom_iommu_info) {
2288 ret = -ENOMEM;
2289 goto out_unlock;
2290 }
2291
2292 pdom_iommu_info->iommu = iommu;
2293 pdom_iommu_info->refcnt = 1;
2294
2295 curr = xa_cmpxchg(&pdom->iommu_array, iommu->index,
2296 NULL, pdom_iommu_info, GFP_ATOMIC);
2297 if (curr) {
2298 kfree(pdom_iommu_info);
2299 ret = -ENOSPC;
2300 goto out_unlock;
2301 }
2302
2303 out_unlock:
2304 spin_unlock_irqrestore(&pdom->lock, flags);
2305 return ret;
2306 }
2307
pdom_detach_iommu(struct amd_iommu * iommu,struct protection_domain * pdom)2308 static void pdom_detach_iommu(struct amd_iommu *iommu,
2309 struct protection_domain *pdom)
2310 {
2311 struct pdom_iommu_info *pdom_iommu_info;
2312 unsigned long flags;
2313
2314 spin_lock_irqsave(&pdom->lock, flags);
2315
2316 pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
2317 if (!pdom_iommu_info) {
2318 spin_unlock_irqrestore(&pdom->lock, flags);
2319 return;
2320 }
2321
2322 pdom_iommu_info->refcnt--;
2323 if (pdom_iommu_info->refcnt == 0) {
2324 xa_erase(&pdom->iommu_array, iommu->index);
2325 kfree(pdom_iommu_info);
2326 }
2327
2328 spin_unlock_irqrestore(&pdom->lock, flags);
2329 }
2330
2331 /*
2332 * If a device is not yet associated with a domain, this function makes the
2333 * device visible in the domain
2334 */
attach_device(struct device * dev,struct protection_domain * domain)2335 static int attach_device(struct device *dev,
2336 struct protection_domain *domain)
2337 {
2338 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2339 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2340 struct pci_dev *pdev;
2341 unsigned long flags;
2342 int ret = 0;
2343
2344 mutex_lock(&dev_data->mutex);
2345
2346 if (dev_data->domain != NULL) {
2347 ret = -EBUSY;
2348 goto out;
2349 }
2350
2351 /* Do reference counting */
2352 ret = pdom_attach_iommu(iommu, domain);
2353 if (ret)
2354 goto out;
2355
2356 /* Setup GCR3 table */
2357 if (pdom_is_sva_capable(domain)) {
2358 ret = init_gcr3_table(dev_data, domain);
2359 if (ret) {
2360 pdom_detach_iommu(iommu, domain);
2361 goto out;
2362 }
2363 }
2364
2365 pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
2366 if (pdev && pdom_is_sva_capable(domain)) {
2367 pdev_enable_caps(pdev);
2368
2369 /*
2370 * Device can continue to function even if IOPF
2371 * enablement failed. Hence in error path just
2372 * disable device PRI support.
2373 */
2374 if (amd_iommu_iopf_add_device(iommu, dev_data))
2375 pdev_disable_cap_pri(pdev);
2376 } else if (pdev) {
2377 pdev_enable_cap_ats(pdev);
2378 }
2379
2380 /* Update data structures */
2381 dev_data->domain = domain;
2382 spin_lock_irqsave(&domain->lock, flags);
2383 list_add(&dev_data->list, &domain->dev_list);
2384 spin_unlock_irqrestore(&domain->lock, flags);
2385
2386 /* Update device table */
2387 dev_update_dte(dev_data, true);
2388
2389 out:
2390 mutex_unlock(&dev_data->mutex);
2391
2392 return ret;
2393 }
2394
2395 /*
2396 * Removes a device from a protection domain (with devtable_lock held)
2397 */
detach_device(struct device * dev)2398 static void detach_device(struct device *dev)
2399 {
2400 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2401 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2402 struct protection_domain *domain = dev_data->domain;
2403 unsigned long flags;
2404
2405 mutex_lock(&dev_data->mutex);
2406
2407 /*
2408 * First check if the device is still attached. It might already
2409 * be detached from its domain because the generic
2410 * iommu_detach_group code detached it and we try again here in
2411 * our alias handling.
2412 */
2413 if (WARN_ON(!dev_data->domain))
2414 goto out;
2415
2416 /* Remove IOPF handler */
2417 if (dev_data->ppr) {
2418 iopf_queue_flush_dev(dev);
2419 amd_iommu_iopf_remove_device(iommu, dev_data);
2420 }
2421
2422 if (dev_is_pci(dev))
2423 pdev_disable_caps(to_pci_dev(dev));
2424
2425 /* Clear DTE and flush the entry */
2426 dev_update_dte(dev_data, false);
2427
2428 /* Flush IOTLB and wait for the flushes to finish */
2429 spin_lock_irqsave(&domain->lock, flags);
2430 amd_iommu_domain_flush_all(domain);
2431 list_del(&dev_data->list);
2432 spin_unlock_irqrestore(&domain->lock, flags);
2433
2434 /* Clear GCR3 table */
2435 if (pdom_is_sva_capable(domain))
2436 destroy_gcr3_table(dev_data, domain);
2437
2438 /* Update data structures */
2439 dev_data->domain = NULL;
2440
2441 /* decrease reference counters - needs to happen after the flushes */
2442 pdom_detach_iommu(iommu, domain);
2443
2444 out:
2445 mutex_unlock(&dev_data->mutex);
2446 }
2447
amd_iommu_probe_device(struct device * dev)2448 static struct iommu_device *amd_iommu_probe_device(struct device *dev)
2449 {
2450 struct iommu_device *iommu_dev;
2451 struct amd_iommu *iommu;
2452 struct iommu_dev_data *dev_data;
2453 int ret;
2454
2455 if (!check_device(dev))
2456 return ERR_PTR(-ENODEV);
2457
2458 iommu = rlookup_amd_iommu(dev);
2459 if (!iommu)
2460 return ERR_PTR(-ENODEV);
2461
2462 /* Not registered yet? */
2463 if (!iommu->iommu.ops)
2464 return ERR_PTR(-ENODEV);
2465
2466 if (dev_iommu_priv_get(dev))
2467 return &iommu->iommu;
2468
2469 ret = iommu_init_device(iommu, dev);
2470 if (ret) {
2471 dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
2472 iommu_dev = ERR_PTR(ret);
2473 iommu_ignore_device(iommu, dev);
2474 goto out_err;
2475 }
2476
2477 amd_iommu_set_pci_msi_domain(dev, iommu);
2478 iommu_dev = &iommu->iommu;
2479
2480 /*
2481 * If IOMMU and device supports PASID then it will contain max
2482 * supported PASIDs, else it will be zero.
2483 */
2484 dev_data = dev_iommu_priv_get(dev);
2485 if (amd_iommu_pasid_supported() && dev_is_pci(dev) &&
2486 pdev_pasid_supported(dev_data)) {
2487 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids,
2488 pci_max_pasids(to_pci_dev(dev)));
2489 }
2490
2491 if (amd_iommu_pgtable == PD_MODE_NONE) {
2492 pr_warn_once("%s: DMA translation not supported by iommu.\n",
2493 __func__);
2494 iommu_dev = ERR_PTR(-ENODEV);
2495 goto out_err;
2496 }
2497
2498 iommu_completion_wait(iommu);
2499
2500 if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
2501 dev_data->max_irqs = MAX_IRQS_PER_TABLE_2K;
2502 else
2503 dev_data->max_irqs = MAX_IRQS_PER_TABLE_512;
2504
2505 if (dev_is_pci(dev))
2506 pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT);
2507
2508 out_err:
2509 return iommu_dev;
2510 }
2511
amd_iommu_release_device(struct device * dev)2512 static void amd_iommu_release_device(struct device *dev)
2513 {
2514 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2515
2516 WARN_ON(dev_data->domain);
2517
2518 /*
2519 * We keep dev_data around for unplugged devices and reuse it when the
2520 * device is re-plugged - not doing so would introduce a ton of races.
2521 */
2522 }
2523
amd_iommu_device_group(struct device * dev)2524 static struct iommu_group *amd_iommu_device_group(struct device *dev)
2525 {
2526 if (dev_is_pci(dev))
2527 return pci_device_group(dev);
2528
2529 return acpihid_device_group(dev);
2530 }
2531
2532 /*****************************************************************************
2533 *
2534 * The following functions belong to the exported interface of AMD IOMMU
2535 *
2536 * This interface allows access to lower level functions of the IOMMU
2537 * like protection domain handling and assignement of devices to domains
2538 * which is not possible with the dma_ops interface.
2539 *
2540 *****************************************************************************/
2541
protection_domain_init(struct protection_domain * domain)2542 static void protection_domain_init(struct protection_domain *domain)
2543 {
2544 spin_lock_init(&domain->lock);
2545 INIT_LIST_HEAD(&domain->dev_list);
2546 INIT_LIST_HEAD(&domain->dev_data_list);
2547 INIT_LIST_HEAD(&domain->viommu_list);
2548 xa_init(&domain->iommu_array);
2549 }
2550
protection_domain_alloc(void)2551 struct protection_domain *protection_domain_alloc(void)
2552 {
2553 struct protection_domain *domain;
2554 int domid;
2555
2556 domain = kzalloc_obj(*domain);
2557 if (!domain)
2558 return NULL;
2559
2560 domid = amd_iommu_pdom_id_alloc();
2561 if (domid <= 0) {
2562 kfree(domain);
2563 return NULL;
2564 }
2565 domain->id = domid;
2566
2567 protection_domain_init(domain);
2568
2569 return domain;
2570 }
2571
amd_iommu_hd_support(struct amd_iommu * iommu)2572 static bool amd_iommu_hd_support(struct amd_iommu *iommu)
2573 {
2574 if (amd_iommu_hatdis)
2575 return false;
2576
2577 return iommu && (iommu->features & FEATURE_HDSUP);
2578 }
2579
amd_iommu_get_top_lock(struct pt_iommu * iommupt)2580 static spinlock_t *amd_iommu_get_top_lock(struct pt_iommu *iommupt)
2581 {
2582 struct protection_domain *pdom =
2583 container_of(iommupt, struct protection_domain, iommu);
2584
2585 return &pdom->lock;
2586 }
2587
2588 /*
2589 * Update all HW references to the domain with a new pgtable configuration.
2590 */
amd_iommu_change_top(struct pt_iommu * iommu_table,phys_addr_t top_paddr,unsigned int top_level)2591 static void amd_iommu_change_top(struct pt_iommu *iommu_table,
2592 phys_addr_t top_paddr, unsigned int top_level)
2593 {
2594 struct protection_domain *pdom =
2595 container_of(iommu_table, struct protection_domain, iommu);
2596 struct iommu_dev_data *dev_data;
2597
2598 lockdep_assert_held(&pdom->lock);
2599
2600 /* Update the DTE for all devices attached to this domain */
2601 list_for_each_entry(dev_data, &pdom->dev_list, list) {
2602 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
2603
2604 /* Update the HW references with the new level and top ptr */
2605 set_dte_entry(iommu, dev_data, top_paddr, top_level);
2606 clone_aliases(iommu, dev_data->dev);
2607 }
2608
2609 list_for_each_entry(dev_data, &pdom->dev_list, list)
2610 device_flush_dte(dev_data);
2611
2612 domain_flush_complete(pdom);
2613 }
2614
2615 /*
2616 * amd_iommu_iotlb_sync_map() is used to generate flushes for non-present to
2617 * present (ie mapping) operations. It is a NOP if the IOMMU doesn't have non
2618 * present caching (like hypervisor shadowing).
2619 */
amd_iommu_iotlb_sync_map(struct iommu_domain * dom,unsigned long iova,size_t size)2620 static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
2621 unsigned long iova, size_t size)
2622 {
2623 struct protection_domain *domain = to_pdomain(dom);
2624 unsigned long flags;
2625
2626 if (likely(!amd_iommu_np_cache))
2627 return 0;
2628
2629 spin_lock_irqsave(&domain->lock, flags);
2630 amd_iommu_domain_flush_pages(domain, iova, size);
2631 spin_unlock_irqrestore(&domain->lock, flags);
2632 return 0;
2633 }
2634
amd_iommu_flush_iotlb_all(struct iommu_domain * domain)2635 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
2636 {
2637 struct protection_domain *dom = to_pdomain(domain);
2638 unsigned long flags;
2639
2640 spin_lock_irqsave(&dom->lock, flags);
2641 amd_iommu_domain_flush_all(dom);
2642 spin_unlock_irqrestore(&dom->lock, flags);
2643 }
2644
amd_iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)2645 static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
2646 struct iommu_iotlb_gather *gather)
2647 {
2648 struct protection_domain *dom = to_pdomain(domain);
2649 unsigned long flags;
2650
2651 spin_lock_irqsave(&dom->lock, flags);
2652 amd_iommu_domain_flush_pages(dom, gather->start,
2653 gather->end - gather->start + 1);
2654 spin_unlock_irqrestore(&dom->lock, flags);
2655 iommu_put_pages_list(&gather->freelist);
2656 }
2657
2658 static const struct pt_iommu_driver_ops amd_hw_driver_ops_v1 = {
2659 .get_top_lock = amd_iommu_get_top_lock,
2660 .change_top = amd_iommu_change_top,
2661 };
2662
2663 static const struct iommu_domain_ops amdv1_ops = {
2664 IOMMU_PT_DOMAIN_OPS(amdv1),
2665 .iotlb_sync_map = amd_iommu_iotlb_sync_map,
2666 .flush_iotlb_all = amd_iommu_flush_iotlb_all,
2667 .iotlb_sync = amd_iommu_iotlb_sync,
2668 .attach_dev = amd_iommu_attach_device,
2669 .free = amd_iommu_domain_free,
2670 .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
2671 };
2672
2673 static const struct iommu_dirty_ops amdv1_dirty_ops = {
2674 IOMMU_PT_DIRTY_OPS(amdv1),
2675 .set_dirty_tracking = amd_iommu_set_dirty_tracking,
2676 };
2677
amd_iommu_domain_alloc_paging_v1(struct device * dev,u32 flags)2678 static struct iommu_domain *amd_iommu_domain_alloc_paging_v1(struct device *dev,
2679 u32 flags)
2680 {
2681 struct pt_iommu_amdv1_cfg cfg = {};
2682 struct protection_domain *domain;
2683 int ret;
2684
2685 if (amd_iommu_hatdis)
2686 return ERR_PTR(-EOPNOTSUPP);
2687
2688 domain = protection_domain_alloc();
2689 if (!domain)
2690 return ERR_PTR(-ENOMEM);
2691
2692 domain->pd_mode = PD_MODE_V1;
2693 domain->iommu.driver_ops = &amd_hw_driver_ops_v1;
2694 domain->iommu.nid = dev_to_node(dev);
2695 if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
2696 domain->domain.dirty_ops = &amdv1_dirty_ops;
2697
2698 /*
2699 * Someday FORCE_COHERENCE should be set by
2700 * amd_iommu_enforce_cache_coherency() like VT-d does.
2701 */
2702 cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) |
2703 BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
2704 BIT(PT_FEAT_AMDV1_FORCE_COHERENCE);
2705
2706 /*
2707 * AMD's IOMMU can flush as many pages as necessary in a single flush.
2708 * Unless we run in a virtual machine, which can be inferred according
2709 * to whether "non-present cache" is on, it is probably best to prefer
2710 * (potentially) too extensive TLB flushing (i.e., more misses) over
2711 * multiple TLB flushes (i.e., more flushes). For virtual machines the
2712 * hypervisor needs to synchronize the host IOMMU PTEs with those of
2713 * the guest, and the trade-off is different: unnecessary TLB flushes
2714 * should be avoided.
2715 */
2716 if (amd_iommu_np_cache)
2717 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS);
2718 else
2719 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE);
2720
2721 cfg.common.hw_max_vasz_lg2 =
2722 min(64, (amd_iommu_hpt_level - 1) * 9 + 21);
2723 cfg.common.hw_max_oasz_lg2 = 52;
2724 cfg.starting_level = 2;
2725 domain->domain.ops = &amdv1_ops;
2726
2727 ret = pt_iommu_amdv1_init(&domain->amdv1, &cfg, GFP_KERNEL);
2728 if (ret) {
2729 amd_iommu_domain_free(&domain->domain);
2730 return ERR_PTR(ret);
2731 }
2732
2733 /*
2734 * Narrow the supported page sizes to those selected by the kernel
2735 * command line.
2736 */
2737 domain->domain.pgsize_bitmap &= amd_iommu_pgsize_bitmap;
2738 return &domain->domain;
2739 }
2740
2741 static const struct iommu_domain_ops amdv2_ops = {
2742 IOMMU_PT_DOMAIN_OPS(x86_64),
2743 .iotlb_sync_map = amd_iommu_iotlb_sync_map,
2744 .flush_iotlb_all = amd_iommu_flush_iotlb_all,
2745 .iotlb_sync = amd_iommu_iotlb_sync,
2746 .attach_dev = amd_iommu_attach_device,
2747 .free = amd_iommu_domain_free,
2748 /*
2749 * Note the AMDv2 page table format does not support a Force Coherency
2750 * bit, so enforce_cache_coherency should not be set. However VFIO is
2751 * not prepared to handle a case where some domains will support
2752 * enforcement and others do not. VFIO and iommufd will have to be fixed
2753 * before it can fully use the V2 page table. See the comment in
2754 * iommufd_hwpt_paging_alloc(). For now leave things as they have
2755 * historically been and lie about enforce_cache_coherencey.
2756 */
2757 .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
2758 };
2759
amd_iommu_domain_alloc_paging_v2(struct device * dev,u32 flags)2760 static struct iommu_domain *amd_iommu_domain_alloc_paging_v2(struct device *dev,
2761 u32 flags)
2762 {
2763 struct pt_iommu_x86_64_cfg cfg = {};
2764 struct protection_domain *domain;
2765 int ret;
2766
2767 if (!amd_iommu_v2_pgtbl_supported())
2768 return ERR_PTR(-EOPNOTSUPP);
2769
2770 domain = protection_domain_alloc();
2771 if (!domain)
2772 return ERR_PTR(-ENOMEM);
2773
2774 domain->pd_mode = PD_MODE_V2;
2775 domain->iommu.nid = dev_to_node(dev);
2776
2777 cfg.common.features = BIT(PT_FEAT_X86_64_AMD_ENCRYPT_TABLES);
2778 if (amd_iommu_np_cache)
2779 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS);
2780 else
2781 cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE);
2782
2783 /*
2784 * The v2 table behaves differently if it is attached to PASID 0 vs a
2785 * non-zero PASID. On PASID 0 it has no sign extension and the full
2786 * 57/48 bits decode the lower addresses. Otherwise it behaves like a
2787 * normal sign extended x86 page table. Since we want the domain to work
2788 * in both modes the top bit is removed and PT_FEAT_SIGN_EXTEND is not
2789 * set which creates a table that is compatible in both modes.
2790 */
2791 if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) {
2792 cfg.common.hw_max_vasz_lg2 = 56;
2793 cfg.top_level = 4;
2794 } else {
2795 cfg.common.hw_max_vasz_lg2 = 47;
2796 cfg.top_level = 3;
2797 }
2798 cfg.common.hw_max_oasz_lg2 = 52;
2799 domain->domain.ops = &amdv2_ops;
2800
2801 ret = pt_iommu_x86_64_init(&domain->amdv2, &cfg, GFP_KERNEL);
2802 if (ret) {
2803 amd_iommu_domain_free(&domain->domain);
2804 return ERR_PTR(ret);
2805 }
2806 return &domain->domain;
2807 }
2808
is_nest_parent_supported(u32 flags)2809 static inline bool is_nest_parent_supported(u32 flags)
2810 {
2811 /* Only allow nest parent when these features are supported */
2812 return check_feature(FEATURE_GT) &&
2813 check_feature(FEATURE_GIOSUP) &&
2814 check_feature2(FEATURE_GCR3TRPMODE);
2815 }
2816
2817 static struct iommu_domain *
amd_iommu_domain_alloc_paging_flags(struct device * dev,u32 flags,const struct iommu_user_data * user_data)2818 amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
2819 const struct iommu_user_data *user_data)
2820
2821 {
2822 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2823 const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
2824 IOMMU_HWPT_ALLOC_PASID |
2825 IOMMU_HWPT_ALLOC_NEST_PARENT;
2826
2827 if ((flags & ~supported_flags) || user_data)
2828 return ERR_PTR(-EOPNOTSUPP);
2829
2830 switch (flags & supported_flags) {
2831 case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
2832 case IOMMU_HWPT_ALLOC_NEST_PARENT:
2833 case IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_ALLOC_NEST_PARENT:
2834 /*
2835 * Allocate domain with v1 page table for dirty tracking
2836 * and/or Nest parent.
2837 */
2838 if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
2839 !amd_iommu_hd_support(iommu))
2840 break;
2841
2842 if ((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) &&
2843 !is_nest_parent_supported(flags))
2844 break;
2845
2846 return amd_iommu_domain_alloc_paging_v1(dev, flags);
2847 case IOMMU_HWPT_ALLOC_PASID:
2848 /* Allocate domain with v2 page table if IOMMU supports PASID. */
2849 if (!amd_iommu_pasid_supported())
2850 break;
2851 return amd_iommu_domain_alloc_paging_v2(dev, flags);
2852 case 0: {
2853 struct iommu_domain *ret;
2854
2855 /* If nothing specific is required use the kernel commandline default */
2856 if (amd_iommu_pgtable == PD_MODE_V1) {
2857 ret = amd_iommu_domain_alloc_paging_v1(dev, flags);
2858 if (ret != ERR_PTR(-EOPNOTSUPP))
2859 return ret;
2860 return amd_iommu_domain_alloc_paging_v2(dev, flags);
2861 }
2862 ret = amd_iommu_domain_alloc_paging_v2(dev, flags);
2863 if (ret != ERR_PTR(-EOPNOTSUPP))
2864 return ret;
2865 return amd_iommu_domain_alloc_paging_v1(dev, flags);
2866 }
2867 default:
2868 break;
2869 }
2870 return ERR_PTR(-EOPNOTSUPP);
2871 }
2872
amd_iommu_domain_free(struct iommu_domain * dom)2873 void amd_iommu_domain_free(struct iommu_domain *dom)
2874 {
2875 struct protection_domain *domain = to_pdomain(dom);
2876
2877 WARN_ON(!list_empty(&domain->dev_list));
2878 pt_iommu_deinit(&domain->iommu);
2879 amd_iommu_pdom_id_free(domain->id);
2880 kfree(domain);
2881 }
2882
blocked_domain_attach_device(struct iommu_domain * domain,struct device * dev,struct iommu_domain * old)2883 static int blocked_domain_attach_device(struct iommu_domain *domain,
2884 struct device *dev,
2885 struct iommu_domain *old)
2886 {
2887 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2888
2889 if (dev_data->domain)
2890 detach_device(dev);
2891
2892 /* Clear DTE and flush the entry */
2893 mutex_lock(&dev_data->mutex);
2894 dev_update_dte(dev_data, false);
2895 mutex_unlock(&dev_data->mutex);
2896
2897 return 0;
2898 }
2899
blocked_domain_set_dev_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_domain * old)2900 static int blocked_domain_set_dev_pasid(struct iommu_domain *domain,
2901 struct device *dev, ioasid_t pasid,
2902 struct iommu_domain *old)
2903 {
2904 amd_iommu_remove_dev_pasid(dev, pasid, old);
2905 return 0;
2906 }
2907
2908 static struct iommu_domain blocked_domain = {
2909 .type = IOMMU_DOMAIN_BLOCKED,
2910 .ops = &(const struct iommu_domain_ops) {
2911 .attach_dev = blocked_domain_attach_device,
2912 .set_dev_pasid = blocked_domain_set_dev_pasid,
2913 }
2914 };
2915
2916 static struct protection_domain identity_domain;
2917
amd_iommu_identity_attach(struct iommu_domain * dom,struct device * dev,struct iommu_domain * old)2918 static int amd_iommu_identity_attach(struct iommu_domain *dom, struct device *dev,
2919 struct iommu_domain *old)
2920 {
2921 /*
2922 * Don't allow attaching a device to the identity domain if SNP is
2923 * enabled.
2924 */
2925 if (amd_iommu_snp_en)
2926 return -EINVAL;
2927
2928 return amd_iommu_attach_device(dom, dev, old);
2929 }
2930
2931 static const struct iommu_domain_ops identity_domain_ops = {
2932 .attach_dev = amd_iommu_identity_attach,
2933 };
2934
amd_iommu_init_identity_domain(void)2935 void amd_iommu_init_identity_domain(void)
2936 {
2937 struct iommu_domain *domain = &identity_domain.domain;
2938
2939 domain->type = IOMMU_DOMAIN_IDENTITY;
2940 domain->ops = &identity_domain_ops;
2941 domain->owner = &amd_iommu_ops;
2942
2943 identity_domain.id = amd_iommu_pdom_id_alloc();
2944
2945 protection_domain_init(&identity_domain);
2946 }
2947
amd_iommu_attach_device(struct iommu_domain * dom,struct device * dev,struct iommu_domain * old)2948 static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev,
2949 struct iommu_domain *old)
2950 {
2951 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2952 struct protection_domain *domain = to_pdomain(dom);
2953 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
2954 int ret;
2955
2956 /*
2957 * Skip attach device to domain if new domain is same as
2958 * devices current domain
2959 */
2960 if (dev_data->domain == domain)
2961 return 0;
2962
2963 dev_data->defer_attach = false;
2964
2965 /*
2966 * Restrict to devices with compatible IOMMU hardware support
2967 * when enforcement of dirty tracking is enabled.
2968 */
2969 if (dom->dirty_ops && !amd_iommu_hd_support(iommu))
2970 return -EINVAL;
2971
2972 if (dev_data->domain)
2973 detach_device(dev);
2974
2975 ret = attach_device(dev, domain);
2976
2977 #ifdef CONFIG_IRQ_REMAP
2978 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
2979 if (dom->type == IOMMU_DOMAIN_UNMANAGED)
2980 dev_data->use_vapic = 1;
2981 else
2982 dev_data->use_vapic = 0;
2983 }
2984 #endif
2985
2986 return ret;
2987 }
2988
amd_iommu_capable(struct device * dev,enum iommu_cap cap)2989 static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
2990 {
2991 switch (cap) {
2992 case IOMMU_CAP_CACHE_COHERENCY:
2993 return true;
2994 case IOMMU_CAP_NOEXEC:
2995 return false;
2996 case IOMMU_CAP_PRE_BOOT_PROTECTION:
2997 return amdr_ivrs_remap_support;
2998 case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
2999 return true;
3000 case IOMMU_CAP_DIRTY_TRACKING: {
3001 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
3002
3003 return amd_iommu_hd_support(iommu);
3004 }
3005 case IOMMU_CAP_PCI_ATS_SUPPORTED: {
3006 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
3007
3008 return amd_iommu_iotlb_sup &&
3009 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP);
3010 }
3011 default:
3012 break;
3013 }
3014
3015 return false;
3016 }
3017
amd_iommu_set_dirty_tracking(struct iommu_domain * domain,bool enable)3018 static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
3019 bool enable)
3020 {
3021 struct protection_domain *pdomain = to_pdomain(domain);
3022 struct dev_table_entry *dte;
3023 struct iommu_dev_data *dev_data;
3024 bool domain_flush = false;
3025 struct amd_iommu *iommu;
3026 unsigned long flags;
3027 u64 new;
3028
3029 spin_lock_irqsave(&pdomain->lock, flags);
3030 if (!(pdomain->dirty_tracking ^ enable)) {
3031 spin_unlock_irqrestore(&pdomain->lock, flags);
3032 return 0;
3033 }
3034
3035 list_for_each_entry(dev_data, &pdomain->dev_list, list) {
3036 spin_lock(&dev_data->dte_lock);
3037 iommu = get_amd_iommu_from_dev_data(dev_data);
3038 dte = &get_dev_table(iommu)[dev_data->devid];
3039 new = dte->data[0];
3040 new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD);
3041 dte->data[0] = new;
3042 spin_unlock(&dev_data->dte_lock);
3043
3044 /* Flush device DTE */
3045 device_flush_dte(dev_data);
3046 domain_flush = true;
3047 }
3048
3049 /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
3050 if (domain_flush)
3051 amd_iommu_domain_flush_all(pdomain);
3052
3053 pdomain->dirty_tracking = enable;
3054 spin_unlock_irqrestore(&pdomain->lock, flags);
3055
3056 return 0;
3057 }
3058
amd_iommu_get_resv_regions(struct device * dev,struct list_head * head)3059 static void amd_iommu_get_resv_regions(struct device *dev,
3060 struct list_head *head)
3061 {
3062 struct iommu_resv_region *region;
3063 struct unity_map_entry *entry;
3064 struct amd_iommu *iommu;
3065 struct amd_iommu_pci_seg *pci_seg;
3066 int devid, sbdf;
3067
3068 sbdf = get_device_sbdf_id(dev);
3069 if (sbdf < 0)
3070 return;
3071
3072 devid = PCI_SBDF_TO_DEVID(sbdf);
3073 iommu = get_amd_iommu_from_dev(dev);
3074 pci_seg = iommu->pci_seg;
3075
3076 list_for_each_entry(entry, &pci_seg->unity_map, list) {
3077 int type, prot = 0;
3078 size_t length;
3079
3080 if (devid < entry->devid_start || devid > entry->devid_end)
3081 continue;
3082
3083 type = IOMMU_RESV_DIRECT;
3084 length = entry->address_end - entry->address_start;
3085 if (entry->prot & IOMMU_PROT_IR)
3086 prot |= IOMMU_READ;
3087 if (entry->prot & IOMMU_PROT_IW)
3088 prot |= IOMMU_WRITE;
3089 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
3090 /* Exclusion range */
3091 type = IOMMU_RESV_RESERVED;
3092
3093 region = iommu_alloc_resv_region(entry->address_start,
3094 length, prot, type,
3095 GFP_KERNEL);
3096 if (!region) {
3097 dev_err(dev, "Out of memory allocating dm-regions\n");
3098 return;
3099 }
3100 list_add_tail(®ion->list, head);
3101 }
3102
3103 region = iommu_alloc_resv_region(MSI_RANGE_START,
3104 MSI_RANGE_END - MSI_RANGE_START + 1,
3105 0, IOMMU_RESV_MSI, GFP_KERNEL);
3106 if (!region)
3107 return;
3108 list_add_tail(®ion->list, head);
3109
3110 if (amd_iommu_ht_range_ignore())
3111 return;
3112
3113 region = iommu_alloc_resv_region(HT_RANGE_START,
3114 HT_RANGE_END - HT_RANGE_START + 1,
3115 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
3116 if (!region)
3117 return;
3118 list_add_tail(®ion->list, head);
3119 }
3120
amd_iommu_is_attach_deferred(struct device * dev)3121 static bool amd_iommu_is_attach_deferred(struct device *dev)
3122 {
3123 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
3124
3125 return dev_data->defer_attach;
3126 }
3127
amd_iommu_def_domain_type(struct device * dev)3128 static int amd_iommu_def_domain_type(struct device *dev)
3129 {
3130 struct iommu_dev_data *dev_data;
3131
3132 dev_data = dev_iommu_priv_get(dev);
3133 if (!dev_data)
3134 return 0;
3135
3136 /* Always use DMA domain for untrusted device */
3137 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
3138 return IOMMU_DOMAIN_DMA;
3139
3140 /*
3141 * Do not identity map IOMMUv2 capable devices when:
3142 * - memory encryption is active, because some of those devices
3143 * (AMD GPUs) don't have the encryption bit in their DMA-mask
3144 * and require remapping.
3145 * - SNP is enabled, because it prohibits DTE[Mode]=0.
3146 */
3147 if (pdev_pasid_supported(dev_data) &&
3148 !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
3149 !amd_iommu_snp_en) {
3150 return IOMMU_DOMAIN_IDENTITY;
3151 }
3152
3153 return 0;
3154 }
3155
amd_iommu_enforce_cache_coherency(struct iommu_domain * domain)3156 static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
3157 {
3158 /* IOMMU_PTE_FC is always set */
3159 return true;
3160 }
3161
3162 const struct iommu_ops amd_iommu_ops = {
3163 .capable = amd_iommu_capable,
3164 .hw_info = amd_iommufd_hw_info,
3165 .blocked_domain = &blocked_domain,
3166 .release_domain = &blocked_domain,
3167 .identity_domain = &identity_domain.domain,
3168 .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
3169 .domain_alloc_sva = amd_iommu_domain_alloc_sva,
3170 .probe_device = amd_iommu_probe_device,
3171 .release_device = amd_iommu_release_device,
3172 .device_group = amd_iommu_device_group,
3173 .get_resv_regions = amd_iommu_get_resv_regions,
3174 .is_attach_deferred = amd_iommu_is_attach_deferred,
3175 .def_domain_type = amd_iommu_def_domain_type,
3176 .page_response = amd_iommu_page_response,
3177 .get_viommu_size = amd_iommufd_get_viommu_size,
3178 .viommu_init = amd_iommufd_viommu_init,
3179 };
3180
3181 #ifdef CONFIG_IRQ_REMAP
3182
3183 /*****************************************************************************
3184 *
3185 * Interrupt Remapping Implementation
3186 *
3187 *****************************************************************************/
3188
3189 static struct irq_chip amd_ir_chip;
3190 static DEFINE_SPINLOCK(iommu_table_lock);
3191
iommu_flush_dev_irt(struct pci_dev * unused,u16 devid,void * data)3192 static int iommu_flush_dev_irt(struct pci_dev *unused, u16 devid, void *data)
3193 {
3194 int ret;
3195 struct iommu_cmd cmd;
3196 struct amd_iommu *iommu = data;
3197
3198 build_inv_irt(&cmd, devid);
3199 ret = __iommu_queue_command_sync(iommu, &cmd, true);
3200 return ret;
3201 }
3202
iommu_flush_irt_and_complete(struct amd_iommu * iommu,u16 devid)3203 static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
3204 {
3205 int ret;
3206 u64 data;
3207 unsigned long flags;
3208 struct iommu_cmd cmd;
3209 struct pci_dev *pdev = NULL;
3210 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
3211
3212 if (iommu->irtcachedis_enabled)
3213 return;
3214
3215 if (dev_data && dev_data->dev && dev_is_pci(dev_data->dev))
3216 pdev = to_pci_dev(dev_data->dev);
3217
3218 raw_spin_lock_irqsave(&iommu->lock, flags);
3219 data = get_cmdsem_val(iommu);
3220 build_completion_wait(&cmd, iommu, data);
3221
3222 if (pdev)
3223 ret = pci_for_each_dma_alias(pdev, iommu_flush_dev_irt, iommu);
3224 else
3225 ret = iommu_flush_dev_irt(NULL, devid, iommu);
3226 if (ret)
3227 goto out_err;
3228
3229 ret = __iommu_queue_command_sync(iommu, &cmd, false);
3230 if (ret)
3231 goto out_err;
3232 raw_spin_unlock_irqrestore(&iommu->lock, flags);
3233
3234 wait_on_sem(iommu, data);
3235 return;
3236
3237 out_err:
3238 raw_spin_unlock_irqrestore(&iommu->lock, flags);
3239 }
3240
iommu_get_int_tablen(struct iommu_dev_data * dev_data)3241 static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)
3242 {
3243 if (dev_data && dev_data->max_irqs == MAX_IRQS_PER_TABLE_2K)
3244 return DTE_INTTABLEN_2K;
3245 return DTE_INTTABLEN_512;
3246 }
3247
set_dte_irq_entry(struct amd_iommu * iommu,u16 devid,struct irq_remap_table * table)3248 static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
3249 struct irq_remap_table *table)
3250 {
3251 u64 new;
3252 struct dev_table_entry *dte = &get_dev_table(iommu)[devid];
3253 struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
3254
3255 if (dev_data)
3256 spin_lock(&dev_data->dte_lock);
3257
3258 new = READ_ONCE(dte->data[2]);
3259 new &= ~DTE_IRQ_PHYS_ADDR_MASK;
3260 new |= iommu_virt_to_phys(table->table);
3261 new |= DTE_IRQ_REMAP_INTCTL;
3262 new |= iommu_get_int_tablen(dev_data);
3263 new |= DTE_IRQ_REMAP_ENABLE;
3264 WRITE_ONCE(dte->data[2], new);
3265
3266 if (dev_data)
3267 spin_unlock(&dev_data->dte_lock);
3268 }
3269
get_irq_table(struct amd_iommu * iommu,u16 devid)3270 static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
3271 {
3272 struct irq_remap_table *table;
3273 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
3274
3275 if (WARN_ONCE(!pci_seg->rlookup_table[devid],
3276 "%s: no iommu for devid %x:%x\n",
3277 __func__, pci_seg->id, devid))
3278 return NULL;
3279
3280 table = pci_seg->irq_lookup_table[devid];
3281 if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
3282 __func__, pci_seg->id, devid))
3283 return NULL;
3284
3285 return table;
3286 }
3287
__alloc_irq_table(int nid,size_t size)3288 static struct irq_remap_table *__alloc_irq_table(int nid, size_t size)
3289 {
3290 struct irq_remap_table *table;
3291
3292 table = kzalloc_obj(*table);
3293 if (!table)
3294 return NULL;
3295
3296 table->table = iommu_alloc_pages_node_sz(
3297 nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size));
3298 if (!table->table) {
3299 kfree(table);
3300 return NULL;
3301 }
3302 raw_spin_lock_init(&table->lock);
3303
3304 return table;
3305 }
3306
set_remap_table_entry(struct amd_iommu * iommu,u16 devid,struct irq_remap_table * table)3307 static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
3308 struct irq_remap_table *table)
3309 {
3310 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
3311
3312 pci_seg->irq_lookup_table[devid] = table;
3313 set_dte_irq_entry(iommu, devid, table);
3314 iommu_flush_dte(iommu, devid);
3315 }
3316
set_remap_table_entry_alias(struct pci_dev * pdev,u16 alias,void * data)3317 static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
3318 void *data)
3319 {
3320 struct irq_remap_table *table = data;
3321 struct amd_iommu_pci_seg *pci_seg;
3322 struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev);
3323
3324 if (!iommu)
3325 return -EINVAL;
3326
3327 pci_seg = iommu->pci_seg;
3328 pci_seg->irq_lookup_table[alias] = table;
3329 set_dte_irq_entry(iommu, alias, table);
3330 iommu_flush_dte(pci_seg->rlookup_table[alias], alias);
3331
3332 return 0;
3333 }
3334
get_irq_table_size(unsigned int max_irqs)3335 static inline size_t get_irq_table_size(unsigned int max_irqs)
3336 {
3337 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3338 return max_irqs * sizeof(u32);
3339
3340 return max_irqs * (sizeof(u64) * 2);
3341 }
3342
alloc_irq_table(struct amd_iommu * iommu,u16 devid,struct pci_dev * pdev,unsigned int max_irqs)3343 static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
3344 u16 devid, struct pci_dev *pdev,
3345 unsigned int max_irqs)
3346 {
3347 struct irq_remap_table *table = NULL;
3348 struct irq_remap_table *new_table = NULL;
3349 struct amd_iommu_pci_seg *pci_seg;
3350 unsigned long flags;
3351 int nid = iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
3352 u16 alias;
3353
3354 spin_lock_irqsave(&iommu_table_lock, flags);
3355
3356 pci_seg = iommu->pci_seg;
3357 table = pci_seg->irq_lookup_table[devid];
3358 if (table)
3359 goto out_unlock;
3360
3361 alias = pci_seg->alias_table[devid];
3362 table = pci_seg->irq_lookup_table[alias];
3363 if (table) {
3364 set_remap_table_entry(iommu, devid, table);
3365 goto out_wait;
3366 }
3367 spin_unlock_irqrestore(&iommu_table_lock, flags);
3368
3369 /* Nothing there yet, allocate new irq remapping table */
3370 new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs));
3371 if (!new_table)
3372 return NULL;
3373
3374 spin_lock_irqsave(&iommu_table_lock, flags);
3375
3376 table = pci_seg->irq_lookup_table[devid];
3377 if (table)
3378 goto out_unlock;
3379
3380 table = pci_seg->irq_lookup_table[alias];
3381 if (table) {
3382 set_remap_table_entry(iommu, devid, table);
3383 goto out_wait;
3384 }
3385
3386 table = new_table;
3387 new_table = NULL;
3388
3389 if (pdev)
3390 pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
3391 table);
3392 else
3393 set_remap_table_entry(iommu, devid, table);
3394
3395 if (devid != alias)
3396 set_remap_table_entry(iommu, alias, table);
3397
3398 out_wait:
3399 iommu_completion_wait(iommu);
3400
3401 out_unlock:
3402 spin_unlock_irqrestore(&iommu_table_lock, flags);
3403
3404 if (new_table) {
3405 iommu_free_pages(new_table->table);
3406 kfree(new_table);
3407 }
3408 return table;
3409 }
3410
alloc_irq_index(struct amd_iommu * iommu,u16 devid,int count,bool align,struct pci_dev * pdev,unsigned long max_irqs)3411 static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
3412 bool align, struct pci_dev *pdev,
3413 unsigned long max_irqs)
3414 {
3415 struct irq_remap_table *table;
3416 int index, c, alignment = 1;
3417 unsigned long flags;
3418
3419 table = alloc_irq_table(iommu, devid, pdev, max_irqs);
3420 if (!table)
3421 return -ENODEV;
3422
3423 if (align)
3424 alignment = roundup_pow_of_two(count);
3425
3426 raw_spin_lock_irqsave(&table->lock, flags);
3427
3428 /* Scan table for free entries */
3429 for (index = ALIGN(table->min_index, alignment), c = 0;
3430 index < max_irqs;) {
3431 if (!iommu->irte_ops->is_allocated(table, index)) {
3432 c += 1;
3433 } else {
3434 c = 0;
3435 index = ALIGN(index + 1, alignment);
3436 continue;
3437 }
3438
3439 if (c == count) {
3440 for (; c != 0; --c)
3441 iommu->irte_ops->set_allocated(table, index - c + 1);
3442
3443 index -= count - 1;
3444 goto out;
3445 }
3446
3447 index++;
3448 }
3449
3450 index = -ENOSPC;
3451
3452 out:
3453 raw_spin_unlock_irqrestore(&table->lock, flags);
3454
3455 return index;
3456 }
3457
__modify_irte_ga(struct amd_iommu * iommu,u16 devid,int index,struct irte_ga * irte)3458 static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3459 struct irte_ga *irte)
3460 {
3461 struct irq_remap_table *table;
3462 struct irte_ga *entry;
3463 unsigned long flags;
3464 u128 old;
3465
3466 table = get_irq_table(iommu, devid);
3467 if (!table)
3468 return -ENOMEM;
3469
3470 raw_spin_lock_irqsave(&table->lock, flags);
3471
3472 entry = (struct irte_ga *)table->table;
3473 entry = &entry[index];
3474
3475 /*
3476 * We use cmpxchg16 to atomically update the 128-bit IRTE,
3477 * and it cannot be updated by the hardware or other processors
3478 * behind us, so the return value of cmpxchg16 should be the
3479 * same as the old value.
3480 */
3481 old = entry->irte;
3482 WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte));
3483
3484 raw_spin_unlock_irqrestore(&table->lock, flags);
3485
3486 return 0;
3487 }
3488
modify_irte_ga(struct amd_iommu * iommu,u16 devid,int index,struct irte_ga * irte)3489 static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
3490 struct irte_ga *irte)
3491 {
3492 int ret;
3493
3494 ret = __modify_irte_ga(iommu, devid, index, irte);
3495 if (ret)
3496 return ret;
3497
3498 iommu_flush_irt_and_complete(iommu, devid);
3499
3500 return 0;
3501 }
3502
modify_irte(struct amd_iommu * iommu,u16 devid,int index,union irte * irte)3503 static int modify_irte(struct amd_iommu *iommu,
3504 u16 devid, int index, union irte *irte)
3505 {
3506 struct irq_remap_table *table;
3507 unsigned long flags;
3508
3509 table = get_irq_table(iommu, devid);
3510 if (!table)
3511 return -ENOMEM;
3512
3513 raw_spin_lock_irqsave(&table->lock, flags);
3514 table->table[index] = irte->val;
3515 raw_spin_unlock_irqrestore(&table->lock, flags);
3516
3517 iommu_flush_irt_and_complete(iommu, devid);
3518
3519 return 0;
3520 }
3521
free_irte(struct amd_iommu * iommu,u16 devid,int index)3522 static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
3523 {
3524 struct irq_remap_table *table;
3525 unsigned long flags;
3526
3527 table = get_irq_table(iommu, devid);
3528 if (!table)
3529 return;
3530
3531 raw_spin_lock_irqsave(&table->lock, flags);
3532 iommu->irte_ops->clear_allocated(table, index);
3533 raw_spin_unlock_irqrestore(&table->lock, flags);
3534
3535 iommu_flush_irt_and_complete(iommu, devid);
3536 }
3537
irte_prepare(void * entry,u32 delivery_mode,bool dest_mode,u8 vector,u32 dest_apicid,int devid)3538 static void irte_prepare(void *entry,
3539 u32 delivery_mode, bool dest_mode,
3540 u8 vector, u32 dest_apicid, int devid)
3541 {
3542 union irte *irte = (union irte *) entry;
3543
3544 irte->val = 0;
3545 irte->fields.vector = vector;
3546 irte->fields.int_type = delivery_mode;
3547 irte->fields.destination = dest_apicid;
3548 irte->fields.dm = dest_mode;
3549 irte->fields.valid = 1;
3550 }
3551
irte_ga_prepare(void * entry,u32 delivery_mode,bool dest_mode,u8 vector,u32 dest_apicid,int devid)3552 static void irte_ga_prepare(void *entry,
3553 u32 delivery_mode, bool dest_mode,
3554 u8 vector, u32 dest_apicid, int devid)
3555 {
3556 struct irte_ga *irte = (struct irte_ga *) entry;
3557
3558 irte->lo.val = 0;
3559 irte->hi.val = 0;
3560 irte->lo.fields_remap.int_type = delivery_mode;
3561 irte->lo.fields_remap.dm = dest_mode;
3562 irte->hi.fields.vector = vector;
3563 irte->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(dest_apicid);
3564 irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid);
3565 irte->lo.fields_remap.valid = 1;
3566 }
3567
irte_activate(struct amd_iommu * iommu,void * entry,u16 devid,u16 index)3568 static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3569 {
3570 union irte *irte = (union irte *) entry;
3571
3572 irte->fields.valid = 1;
3573 modify_irte(iommu, devid, index, irte);
3574 }
3575
irte_ga_activate(struct amd_iommu * iommu,void * entry,u16 devid,u16 index)3576 static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3577 {
3578 struct irte_ga *irte = (struct irte_ga *) entry;
3579
3580 irte->lo.fields_remap.valid = 1;
3581 modify_irte_ga(iommu, devid, index, irte);
3582 }
3583
irte_deactivate(struct amd_iommu * iommu,void * entry,u16 devid,u16 index)3584 static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3585 {
3586 union irte *irte = (union irte *) entry;
3587
3588 irte->fields.valid = 0;
3589 modify_irte(iommu, devid, index, irte);
3590 }
3591
irte_ga_deactivate(struct amd_iommu * iommu,void * entry,u16 devid,u16 index)3592 static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
3593 {
3594 struct irte_ga *irte = (struct irte_ga *) entry;
3595
3596 irte->lo.fields_remap.valid = 0;
3597 modify_irte_ga(iommu, devid, index, irte);
3598 }
3599
irte_set_affinity(struct amd_iommu * iommu,void * entry,u16 devid,u16 index,u8 vector,u32 dest_apicid)3600 static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3601 u8 vector, u32 dest_apicid)
3602 {
3603 union irte *irte = (union irte *) entry;
3604
3605 irte->fields.vector = vector;
3606 irte->fields.destination = dest_apicid;
3607 modify_irte(iommu, devid, index, irte);
3608 }
3609
irte_ga_set_affinity(struct amd_iommu * iommu,void * entry,u16 devid,u16 index,u8 vector,u32 dest_apicid)3610 static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
3611 u8 vector, u32 dest_apicid)
3612 {
3613 struct irte_ga *irte = (struct irte_ga *) entry;
3614
3615 if (!irte->lo.fields_remap.guest_mode) {
3616 irte->hi.fields.vector = vector;
3617 irte->lo.fields_remap.destination =
3618 APICID_TO_IRTE_DEST_LO(dest_apicid);
3619 irte->hi.fields.destination =
3620 APICID_TO_IRTE_DEST_HI(dest_apicid);
3621 modify_irte_ga(iommu, devid, index, irte);
3622 }
3623 }
3624
3625 #define IRTE_ALLOCATED (~1U)
irte_set_allocated(struct irq_remap_table * table,int index)3626 static void irte_set_allocated(struct irq_remap_table *table, int index)
3627 {
3628 table->table[index] = IRTE_ALLOCATED;
3629 }
3630
irte_ga_set_allocated(struct irq_remap_table * table,int index)3631 static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3632 {
3633 struct irte_ga *ptr = (struct irte_ga *)table->table;
3634 struct irte_ga *irte = &ptr[index];
3635
3636 memset(&irte->lo.val, 0, sizeof(u64));
3637 memset(&irte->hi.val, 0, sizeof(u64));
3638 irte->hi.fields.vector = 0xff;
3639 }
3640
irte_is_allocated(struct irq_remap_table * table,int index)3641 static bool irte_is_allocated(struct irq_remap_table *table, int index)
3642 {
3643 union irte *ptr = (union irte *)table->table;
3644 union irte *irte = &ptr[index];
3645
3646 return irte->val != 0;
3647 }
3648
irte_ga_is_allocated(struct irq_remap_table * table,int index)3649 static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3650 {
3651 struct irte_ga *ptr = (struct irte_ga *)table->table;
3652 struct irte_ga *irte = &ptr[index];
3653
3654 return irte->hi.fields.vector != 0;
3655 }
3656
irte_clear_allocated(struct irq_remap_table * table,int index)3657 static void irte_clear_allocated(struct irq_remap_table *table, int index)
3658 {
3659 table->table[index] = 0;
3660 }
3661
irte_ga_clear_allocated(struct irq_remap_table * table,int index)3662 static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3663 {
3664 struct irte_ga *ptr = (struct irte_ga *)table->table;
3665 struct irte_ga *irte = &ptr[index];
3666
3667 memset(&irte->lo.val, 0, sizeof(u64));
3668 memset(&irte->hi.val, 0, sizeof(u64));
3669 }
3670
get_devid(struct irq_alloc_info * info)3671 static int get_devid(struct irq_alloc_info *info)
3672 {
3673 switch (info->type) {
3674 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3675 return get_ioapic_devid(info->devid);
3676 case X86_IRQ_ALLOC_TYPE_HPET:
3677 return get_hpet_devid(info->devid);
3678 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3679 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3680 return get_device_sbdf_id(msi_desc_to_dev(info->desc));
3681 default:
3682 WARN_ON_ONCE(1);
3683 return -1;
3684 }
3685 }
3686
3687 struct irq_remap_ops amd_iommu_irq_ops = {
3688 .prepare = amd_iommu_prepare,
3689 .enable = amd_iommu_enable,
3690 .disable = amd_iommu_disable,
3691 .reenable = amd_iommu_reenable,
3692 .enable_faulting = amd_iommu_enable_faulting,
3693 };
3694
fill_msi_msg(struct msi_msg * msg,u32 index)3695 static void fill_msi_msg(struct msi_msg *msg, u32 index)
3696 {
3697 msg->data = index;
3698 msg->address_lo = 0;
3699 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
3700 /*
3701 * The struct msi_msg.dest_mode_logical is used to set the DM bit
3702 * in MSI Message Address Register. For device w/ 2K int-remap support,
3703 * this is bit must be set to 1 regardless of the actual destination
3704 * mode, which is signified by the IRTE[DM].
3705 */
3706 if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
3707 msg->arch_addr_lo.dest_mode_logical = true;
3708 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
3709 }
3710
irq_remapping_prepare_irte(struct amd_ir_data * data,struct irq_cfg * irq_cfg,struct irq_alloc_info * info,int devid,int index,int sub_handle)3711 static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3712 struct irq_cfg *irq_cfg,
3713 struct irq_alloc_info *info,
3714 int devid, int index, int sub_handle)
3715 {
3716 struct irq_2_irte *irte_info = &data->irq_2_irte;
3717 struct amd_iommu *iommu = data->iommu;
3718
3719 if (!iommu)
3720 return;
3721
3722 data->irq_2_irte.devid = devid;
3723 data->irq_2_irte.index = index + sub_handle;
3724 iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED,
3725 apic->dest_mode_logical, irq_cfg->vector,
3726 irq_cfg->dest_apicid, devid);
3727
3728 switch (info->type) {
3729 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3730 case X86_IRQ_ALLOC_TYPE_HPET:
3731 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
3732 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
3733 fill_msi_msg(&data->msi_entry, irte_info->index);
3734 break;
3735
3736 default:
3737 BUG_ON(1);
3738 break;
3739 }
3740 }
3741
3742 struct amd_irte_ops irte_32_ops = {
3743 .prepare = irte_prepare,
3744 .activate = irte_activate,
3745 .deactivate = irte_deactivate,
3746 .set_affinity = irte_set_affinity,
3747 .set_allocated = irte_set_allocated,
3748 .is_allocated = irte_is_allocated,
3749 .clear_allocated = irte_clear_allocated,
3750 };
3751
3752 struct amd_irte_ops irte_128_ops = {
3753 .prepare = irte_ga_prepare,
3754 .activate = irte_ga_activate,
3755 .deactivate = irte_ga_deactivate,
3756 .set_affinity = irte_ga_set_affinity,
3757 .set_allocated = irte_ga_set_allocated,
3758 .is_allocated = irte_ga_is_allocated,
3759 .clear_allocated = irte_ga_clear_allocated,
3760 };
3761
irq_remapping_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)3762 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
3763 unsigned int nr_irqs, void *arg)
3764 {
3765 struct irq_alloc_info *info = arg;
3766 struct irq_data *irq_data;
3767 struct amd_ir_data *data = NULL;
3768 struct amd_iommu *iommu;
3769 struct irq_cfg *cfg;
3770 struct iommu_dev_data *dev_data;
3771 unsigned long max_irqs;
3772 int i, ret, devid, seg, sbdf;
3773 int index;
3774
3775 if (!info)
3776 return -EINVAL;
3777 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI)
3778 return -EINVAL;
3779
3780 sbdf = get_devid(info);
3781 if (sbdf < 0)
3782 return -EINVAL;
3783
3784 seg = PCI_SBDF_TO_SEGID(sbdf);
3785 devid = PCI_SBDF_TO_DEVID(sbdf);
3786 iommu = __rlookup_amd_iommu(seg, devid);
3787 if (!iommu)
3788 return -EINVAL;
3789
3790 dev_data = search_dev_data(iommu, devid);
3791 max_irqs = dev_data ? dev_data->max_irqs : MAX_IRQS_PER_TABLE_512;
3792
3793 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
3794 if (ret < 0)
3795 return ret;
3796
3797 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
3798 struct irq_remap_table *table;
3799
3800 table = alloc_irq_table(iommu, devid, NULL, max_irqs);
3801 if (table) {
3802 if (!table->min_index) {
3803 /*
3804 * Keep the first 32 indexes free for IOAPIC
3805 * interrupts.
3806 */
3807 table->min_index = 32;
3808 for (i = 0; i < 32; ++i)
3809 iommu->irte_ops->set_allocated(table, i);
3810 }
3811 WARN_ON(table->min_index != 32);
3812 index = info->ioapic.pin;
3813 } else {
3814 index = -ENOMEM;
3815 }
3816 } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
3817 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
3818 bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
3819
3820 index = alloc_irq_index(iommu, devid, nr_irqs, align,
3821 msi_desc_to_pci_dev(info->desc),
3822 max_irqs);
3823 } else {
3824 index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL,
3825 max_irqs);
3826 }
3827
3828 if (index < 0) {
3829 pr_warn("Failed to allocate IRTE\n");
3830 ret = index;
3831 goto out_free_parent;
3832 }
3833
3834 for (i = 0; i < nr_irqs; i++) {
3835 irq_data = irq_domain_get_irq_data(domain, virq + i);
3836 cfg = irq_data ? irqd_cfg(irq_data) : NULL;
3837 if (!cfg) {
3838 ret = -EINVAL;
3839 goto out_free_data;
3840 }
3841
3842 ret = -ENOMEM;
3843 data = kzalloc_obj(*data);
3844 if (!data)
3845 goto out_free_data;
3846
3847 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3848 data->entry = kzalloc_obj(union irte);
3849 else
3850 data->entry = kzalloc_obj(struct irte_ga);
3851 if (!data->entry) {
3852 kfree(data);
3853 goto out_free_data;
3854 }
3855
3856 data->iommu = iommu;
3857 irq_data->hwirq = (devid << 16) + i;
3858 irq_data->chip_data = data;
3859 irq_data->chip = &amd_ir_chip;
3860 irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
3861 }
3862
3863 return 0;
3864
3865 out_free_data:
3866 for (i--; i >= 0; i--) {
3867 irq_data = irq_domain_get_irq_data(domain, virq + i);
3868 if (irq_data)
3869 kfree(irq_data->chip_data);
3870 }
3871 for (i = 0; i < nr_irqs; i++)
3872 free_irte(iommu, devid, index + i);
3873 out_free_parent:
3874 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3875 return ret;
3876 }
3877
irq_remapping_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)3878 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
3879 unsigned int nr_irqs)
3880 {
3881 struct irq_2_irte *irte_info;
3882 struct irq_data *irq_data;
3883 struct amd_ir_data *data;
3884 int i;
3885
3886 for (i = 0; i < nr_irqs; i++) {
3887 irq_data = irq_domain_get_irq_data(domain, virq + i);
3888 if (irq_data && irq_data->chip_data) {
3889 data = irq_data->chip_data;
3890 irte_info = &data->irq_2_irte;
3891 free_irte(data->iommu, irte_info->devid, irte_info->index);
3892 kfree(data->entry);
3893 kfree(data);
3894 }
3895 }
3896 irq_domain_free_irqs_common(domain, virq, nr_irqs);
3897 }
3898
3899 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
3900 struct amd_ir_data *ir_data,
3901 struct irq_2_irte *irte_info,
3902 struct irq_cfg *cfg);
3903
irq_remapping_activate(struct irq_domain * domain,struct irq_data * irq_data,bool reserve)3904 static int irq_remapping_activate(struct irq_domain *domain,
3905 struct irq_data *irq_data, bool reserve)
3906 {
3907 struct amd_ir_data *data = irq_data->chip_data;
3908 struct irq_2_irte *irte_info = &data->irq_2_irte;
3909 struct amd_iommu *iommu = data->iommu;
3910 struct irq_cfg *cfg = irqd_cfg(irq_data);
3911
3912 if (!iommu)
3913 return 0;
3914
3915 iommu->irte_ops->activate(iommu, data->entry, irte_info->devid,
3916 irte_info->index);
3917 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
3918 return 0;
3919 }
3920
irq_remapping_deactivate(struct irq_domain * domain,struct irq_data * irq_data)3921 static void irq_remapping_deactivate(struct irq_domain *domain,
3922 struct irq_data *irq_data)
3923 {
3924 struct amd_ir_data *data = irq_data->chip_data;
3925 struct irq_2_irte *irte_info = &data->irq_2_irte;
3926 struct amd_iommu *iommu = data->iommu;
3927
3928 if (iommu)
3929 iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid,
3930 irte_info->index);
3931 }
3932
irq_remapping_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)3933 static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
3934 enum irq_domain_bus_token bus_token)
3935 {
3936 struct amd_iommu *iommu;
3937 int devid = -1;
3938
3939 if (!amd_iommu_irq_remap)
3940 return 0;
3941
3942 if (x86_fwspec_is_ioapic(fwspec))
3943 devid = get_ioapic_devid(fwspec->param[0]);
3944 else if (x86_fwspec_is_hpet(fwspec))
3945 devid = get_hpet_devid(fwspec->param[0]);
3946
3947 if (devid < 0)
3948 return 0;
3949 iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff));
3950
3951 return iommu && iommu->ir_domain == d;
3952 }
3953
3954 static const struct irq_domain_ops amd_ir_domain_ops = {
3955 .select = irq_remapping_select,
3956 .alloc = irq_remapping_alloc,
3957 .free = irq_remapping_free,
3958 .activate = irq_remapping_activate,
3959 .deactivate = irq_remapping_deactivate,
3960 };
3961
__amd_iommu_update_ga(struct irte_ga * entry,int cpu,bool ga_log_intr)3962 static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu,
3963 bool ga_log_intr)
3964 {
3965 if (cpu >= 0) {
3966 entry->lo.fields_vapic.destination =
3967 APICID_TO_IRTE_DEST_LO(cpu);
3968 entry->hi.fields.destination =
3969 APICID_TO_IRTE_DEST_HI(cpu);
3970 entry->lo.fields_vapic.is_run = true;
3971 entry->lo.fields_vapic.ga_log_intr = false;
3972 } else {
3973 entry->lo.fields_vapic.is_run = false;
3974 entry->lo.fields_vapic.ga_log_intr = ga_log_intr;
3975 }
3976 }
3977
3978 /*
3979 * Update the pCPU information for an IRTE that is configured to post IRQs to
3980 * a vCPU, without issuing an IOMMU invalidation for the IRTE.
3981 *
3982 * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination
3983 * with the pCPU's APIC ID, set IsRun, and clear GALogIntr. If the vCPU isn't
3984 * associated with a pCPU (@cpu < 0), clear IsRun and set/clear GALogIntr based
3985 * on input from the caller (e.g. KVM only requests GALogIntr when the vCPU is
3986 * blocking and requires a notification wake event). I.e. treat vCPUs that are
3987 * associated with a pCPU as running. This API is intended to be used when a
3988 * vCPU is scheduled in/out (or stops running for any reason), to do a fast
3989 * update of IsRun, GALogIntr, and (conditionally) Destination.
3990 *
3991 * Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached
3992 * and thus don't require an invalidation to ensure the IOMMU consumes fresh
3993 * information.
3994 */
amd_iommu_update_ga(void * data,int cpu,bool ga_log_intr)3995 int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
3996 {
3997 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
3998 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
3999
4000 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
4001 return -EINVAL;
4002
4003 if (!entry || !entry->lo.fields_vapic.guest_mode)
4004 return 0;
4005
4006 if (!ir_data->iommu)
4007 return -ENODEV;
4008
4009 __amd_iommu_update_ga(entry, cpu, ga_log_intr);
4010
4011 return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
4012 ir_data->irq_2_irte.index, entry);
4013 }
4014 EXPORT_SYMBOL(amd_iommu_update_ga);
4015
amd_iommu_activate_guest_mode(void * data,int cpu,bool ga_log_intr)4016 int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
4017 {
4018 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
4019 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
4020 u64 valid;
4021
4022 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
4023 return -EINVAL;
4024
4025 if (!entry)
4026 return 0;
4027
4028 valid = entry->lo.fields_vapic.valid;
4029
4030 entry->lo.val = 0;
4031 entry->hi.val = 0;
4032
4033 entry->lo.fields_vapic.valid = valid;
4034 entry->lo.fields_vapic.guest_mode = 1;
4035 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
4036 entry->hi.fields.vector = ir_data->ga_vector;
4037 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
4038
4039 __amd_iommu_update_ga(entry, cpu, ga_log_intr);
4040
4041 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
4042 ir_data->irq_2_irte.index, entry);
4043 }
4044 EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
4045
amd_iommu_deactivate_guest_mode(void * data)4046 int amd_iommu_deactivate_guest_mode(void *data)
4047 {
4048 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
4049 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
4050 struct irq_cfg *cfg = ir_data->cfg;
4051 u64 valid;
4052
4053 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
4054 return -EINVAL;
4055
4056 if (!entry || !entry->lo.fields_vapic.guest_mode)
4057 return 0;
4058
4059 valid = entry->lo.fields_remap.valid;
4060
4061 entry->lo.val = 0;
4062 entry->hi.val = 0;
4063
4064 entry->lo.fields_remap.valid = valid;
4065 entry->lo.fields_remap.dm = apic->dest_mode_logical;
4066 entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED;
4067 entry->hi.fields.vector = cfg->vector;
4068 entry->lo.fields_remap.destination =
4069 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
4070 entry->hi.fields.destination =
4071 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
4072
4073 return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
4074 ir_data->irq_2_irte.index, entry);
4075 }
4076 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
4077
amd_ir_set_vcpu_affinity(struct irq_data * data,void * info)4078 static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info)
4079 {
4080 int ret;
4081 struct amd_iommu_pi_data *pi_data = info;
4082 struct amd_ir_data *ir_data = data->chip_data;
4083 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4084 struct iommu_dev_data *dev_data;
4085
4086 if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
4087 return -EINVAL;
4088
4089 if (ir_data->iommu == NULL)
4090 return -EINVAL;
4091
4092 dev_data = search_dev_data(ir_data->iommu, irte_info->devid);
4093
4094 /* Note:
4095 * This device has never been set up for guest mode.
4096 * we should not modify the IRTE
4097 */
4098 if (!dev_data || !dev_data->use_vapic)
4099 return -EINVAL;
4100
4101 ir_data->cfg = irqd_cfg(data);
4102
4103 if (pi_data) {
4104 pi_data->ir_data = ir_data;
4105
4106 ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12);
4107 ir_data->ga_vector = pi_data->vector;
4108 ir_data->ga_tag = pi_data->ga_tag;
4109 if (pi_data->is_guest_mode)
4110 ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu,
4111 pi_data->ga_log_intr);
4112 else
4113 ret = amd_iommu_deactivate_guest_mode(ir_data);
4114 } else {
4115 ret = amd_iommu_deactivate_guest_mode(ir_data);
4116 }
4117
4118 return ret;
4119 }
4120
4121
amd_ir_update_irte(struct irq_data * irqd,struct amd_iommu * iommu,struct amd_ir_data * ir_data,struct irq_2_irte * irte_info,struct irq_cfg * cfg)4122 static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
4123 struct amd_ir_data *ir_data,
4124 struct irq_2_irte *irte_info,
4125 struct irq_cfg *cfg)
4126 {
4127
4128 /*
4129 * Atomically updates the IRTE with the new destination, vector
4130 * and flushes the interrupt entry cache.
4131 */
4132 iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid,
4133 irte_info->index, cfg->vector,
4134 cfg->dest_apicid);
4135 }
4136
amd_ir_set_affinity(struct irq_data * data,const struct cpumask * mask,bool force)4137 static int amd_ir_set_affinity(struct irq_data *data,
4138 const struct cpumask *mask, bool force)
4139 {
4140 struct amd_ir_data *ir_data = data->chip_data;
4141 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4142 struct irq_cfg *cfg = irqd_cfg(data);
4143 struct irq_data *parent = data->parent_data;
4144 struct amd_iommu *iommu = ir_data->iommu;
4145 int ret;
4146
4147 if (!iommu)
4148 return -ENODEV;
4149
4150 ret = parent->chip->irq_set_affinity(parent, mask, force);
4151 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
4152 return ret;
4153
4154 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
4155 /*
4156 * After this point, all the interrupts will start arriving
4157 * at the new destination. So, time to cleanup the previous
4158 * vector allocation.
4159 */
4160 vector_schedule_cleanup(cfg);
4161
4162 return IRQ_SET_MASK_OK_DONE;
4163 }
4164
ir_compose_msi_msg(struct irq_data * irq_data,struct msi_msg * msg)4165 static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4166 {
4167 struct amd_ir_data *ir_data = irq_data->chip_data;
4168
4169 *msg = ir_data->msi_entry;
4170 }
4171
4172 static struct irq_chip amd_ir_chip = {
4173 .name = "AMD-IR",
4174 .irq_ack = apic_ack_irq,
4175 .irq_set_affinity = amd_ir_set_affinity,
4176 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
4177 .irq_compose_msi_msg = ir_compose_msi_msg,
4178 };
4179
4180 static const struct msi_parent_ops amdvi_msi_parent_ops = {
4181 .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI,
4182 .bus_select_token = DOMAIN_BUS_AMDVI,
4183 .bus_select_mask = MATCH_PCI_MSI,
4184 .prefix = "IR-",
4185 .init_dev_msi_info = msi_parent_init_dev_msi_info,
4186 };
4187
amd_iommu_create_irq_domain(struct amd_iommu * iommu)4188 int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
4189 {
4190 struct irq_domain_info info = {
4191 .fwnode = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index),
4192 .ops = &amd_ir_domain_ops,
4193 .domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI,
4194 .host_data = iommu,
4195 .parent = arch_get_ir_parent_domain(),
4196 };
4197
4198 if (!info.fwnode)
4199 return -ENOMEM;
4200
4201 iommu->ir_domain = msi_create_parent_irq_domain(&info, &amdvi_msi_parent_ops);
4202 if (!iommu->ir_domain) {
4203 irq_domain_free_fwnode(info.fwnode);
4204 return -ENOMEM;
4205 }
4206 return 0;
4207 }
4208 #endif
4209
4210 MODULE_IMPORT_NS("GENERIC_PT_IOMMU");
4211