xref: /linux/drivers/iommu/amd/amd_iommu.h (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <jroedel@suse.de>
5  */
6 
7 #ifndef AMD_IOMMU_H
8 #define AMD_IOMMU_H
9 
10 #include <linux/iommu.h>
11 
12 #include "amd_iommu_types.h"
13 
14 irqreturn_t amd_iommu_int_thread(int irq, void *data);
15 irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
16 irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
17 irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
18 irqreturn_t amd_iommu_int_handler(int irq, void *data);
19 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
20 void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
21 void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
22 void amd_iommu_restart_ppr_log(struct amd_iommu *iommu);
23 void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
24 
25 #ifdef CONFIG_AMD_IOMMU_DEBUGFS
26 void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
27 #else
28 static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
29 #endif
30 
31 /* Needed for interrupt remapping */
32 int amd_iommu_prepare(void);
33 int amd_iommu_enable(void);
34 void amd_iommu_disable(void);
35 int amd_iommu_reenable(int mode);
36 int amd_iommu_enable_faulting(void);
37 extern int amd_iommu_guest_ir;
38 extern enum io_pgtable_fmt amd_iommu_pgtable;
39 extern int amd_iommu_gpt_level;
40 
41 bool amd_iommu_v2_supported(void);
42 
43 /* Device capabilities */
44 int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
45 void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
46 
47 /* GCR3 setup */
48 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
49 		       ioasid_t pasid, unsigned long gcr3);
50 int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
51 
52 /*
53  * This function flushes all internal caches of
54  * the IOMMU used by this driver.
55  */
56 void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
57 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
58 void amd_iommu_domain_update(struct protection_domain *domain);
59 void amd_iommu_domain_flush_complete(struct protection_domain *domain);
60 void amd_iommu_domain_flush_pages(struct protection_domain *domain,
61 				  u64 address, size_t size);
62 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
63 				     ioasid_t pasid, u64 address, size_t size);
64 void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
65 				   ioasid_t pasid);
66 
67 #ifdef CONFIG_IRQ_REMAP
68 int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
69 #else
70 static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
71 {
72 	return 0;
73 }
74 #endif
75 
76 int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
77 			   int status, int tag);
78 
79 static inline bool is_rd890_iommu(struct pci_dev *pdev)
80 {
81 	return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
82 	       (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
83 }
84 
85 static inline bool check_feature(u64 mask)
86 {
87 	return (amd_iommu_efr & mask);
88 }
89 
90 static inline bool check_feature2(u64 mask)
91 {
92 	return (amd_iommu_efr2 & mask);
93 }
94 
95 static inline int check_feature_gpt_level(void)
96 {
97 	return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
98 }
99 
100 static inline bool amd_iommu_gt_ppr_supported(void)
101 {
102 	return (check_feature(FEATURE_GT) &&
103 		check_feature(FEATURE_PPR));
104 }
105 
106 static inline u64 iommu_virt_to_phys(void *vaddr)
107 {
108 	return (u64)__sme_set(virt_to_phys(vaddr));
109 }
110 
111 static inline void *iommu_phys_to_virt(unsigned long paddr)
112 {
113 	return phys_to_virt(__sme_clr(paddr));
114 }
115 
116 static inline
117 void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
118 {
119 	domain->iop.root = (u64 *)(root & PAGE_MASK);
120 	domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
121 }
122 
123 static inline
124 void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
125 {
126 	amd_iommu_domain_set_pt_root(domain, 0);
127 }
128 
129 static inline int get_pci_sbdf_id(struct pci_dev *pdev)
130 {
131 	int seg = pci_domain_nr(pdev->bus);
132 	u16 devid = pci_dev_id(pdev);
133 
134 	return PCI_SEG_DEVID_TO_SBDF(seg, devid);
135 }
136 
137 static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
138 {
139 	struct page *page;
140 
141 	page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0);
142 	return page ? page_address(page) : NULL;
143 }
144 
145 /*
146  * This must be called after device probe completes. During probe
147  * use rlookup_amd_iommu() get the iommu.
148  */
149 static inline struct amd_iommu *get_amd_iommu_from_dev(struct device *dev)
150 {
151 	return iommu_get_iommu_dev(dev, struct amd_iommu, iommu);
152 }
153 
154 /* This must be called after device probe completes. */
155 static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_data *dev_data)
156 {
157 	return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu);
158 }
159 
160 bool translation_pre_enabled(struct amd_iommu *iommu);
161 bool amd_iommu_is_attach_deferred(struct device *dev);
162 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
163 
164 #ifdef CONFIG_DMI
165 void amd_iommu_apply_ivrs_quirks(void);
166 #else
167 static inline void amd_iommu_apply_ivrs_quirks(void) { }
168 #endif
169 
170 void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
171 				  u64 *root, int mode);
172 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
173 
174 #endif
175