xref: /linux/drivers/iommu/intel/pasid.h (revision 8477ab143069c6b05d6da4a8184ded8b969240f5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * pasid.h - PASID idr, table and entry header
4  *
5  * Copyright (C) 2018 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 
10 #ifndef __INTEL_PASID_H
11 #define __INTEL_PASID_H
12 
13 #define PASID_MAX			0x100000
14 #define PASID_PTE_MASK			0x3F
15 #define PASID_PTE_PRESENT		1
16 #define PASID_PTE_FPD			2
17 #define PDE_PFN_MASK			PAGE_MASK
18 #define PASID_PDE_SHIFT			6
19 #define MAX_NR_PASID_BITS		20
20 #define PASID_TBL_ENTRIES		BIT(PASID_PDE_SHIFT)
21 
22 #define is_pasid_enabled(entry)		(((entry)->lo >> 3) & 0x1)
23 #define get_pasid_dir_size(entry)	(1 << ((((entry)->lo >> 9) & 0x7) + 7))
24 
25 #define PASID_FLAG_NESTED		BIT(1)
26 #define PASID_FLAG_PAGE_SNOOP		BIT(2)
27 
28 /*
29  * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
30  * level translation, otherwise, 4-level paging will be used.
31  */
32 #define PASID_FLAG_FL5LP		BIT(1)
33 
34 struct pasid_dir_entry {
35 	u64 val;
36 };
37 
38 struct pasid_entry {
39 	u64 val[8];
40 };
41 
42 #define PASID_ENTRY_PGTT_FL_ONLY	(1)
43 #define PASID_ENTRY_PGTT_SL_ONLY	(2)
44 #define PASID_ENTRY_PGTT_NESTED		(3)
45 #define PASID_ENTRY_PGTT_PT		(4)
46 
47 /* The representative of a PASID table */
48 struct pasid_table {
49 	void			*table;		/* pasid table pointer */
50 	u32			max_pasid;	/* max pasid */
51 };
52 
53 /* Get PRESENT bit of a PASID directory entry. */
pasid_pde_is_present(struct pasid_dir_entry * pde)54 static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde)
55 {
56 	return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
57 }
58 
59 /* Get PASID table from a PASID directory entry. */
60 static inline struct pasid_entry *
get_pasid_table_from_pde(struct pasid_dir_entry * pde)61 get_pasid_table_from_pde(struct pasid_dir_entry *pde)
62 {
63 	if (!pasid_pde_is_present(pde))
64 		return NULL;
65 
66 	return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
67 }
68 
69 /* Get PRESENT bit of a PASID table entry. */
pasid_pte_is_present(struct pasid_entry * pte)70 static inline bool pasid_pte_is_present(struct pasid_entry *pte)
71 {
72 	return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
73 }
74 
75 /* Get FPD(Fault Processing Disable) bit of a PASID table entry */
pasid_pte_is_fault_disabled(struct pasid_entry * pte)76 static inline bool pasid_pte_is_fault_disabled(struct pasid_entry *pte)
77 {
78 	return READ_ONCE(pte->val[0]) & PASID_PTE_FPD;
79 }
80 
81 /* Get PGTT field of a PASID table entry */
pasid_pte_get_pgtt(struct pasid_entry * pte)82 static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
83 {
84 	return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
85 }
86 
pasid_clear_entry(struct pasid_entry * pe)87 static inline void pasid_clear_entry(struct pasid_entry *pe)
88 {
89 	WRITE_ONCE(pe->val[0], 0);
90 	WRITE_ONCE(pe->val[1], 0);
91 	WRITE_ONCE(pe->val[2], 0);
92 	WRITE_ONCE(pe->val[3], 0);
93 	WRITE_ONCE(pe->val[4], 0);
94 	WRITE_ONCE(pe->val[5], 0);
95 	WRITE_ONCE(pe->val[6], 0);
96 	WRITE_ONCE(pe->val[7], 0);
97 }
98 
pasid_clear_entry_with_fpd(struct pasid_entry * pe)99 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
100 {
101 	WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
102 	WRITE_ONCE(pe->val[1], 0);
103 	WRITE_ONCE(pe->val[2], 0);
104 	WRITE_ONCE(pe->val[3], 0);
105 	WRITE_ONCE(pe->val[4], 0);
106 	WRITE_ONCE(pe->val[5], 0);
107 	WRITE_ONCE(pe->val[6], 0);
108 	WRITE_ONCE(pe->val[7], 0);
109 }
110 
pasid_set_bits(u64 * ptr,u64 mask,u64 bits)111 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
112 {
113 	u64 old;
114 
115 	old = READ_ONCE(*ptr);
116 	WRITE_ONCE(*ptr, (old & ~mask) | bits);
117 }
118 
pasid_get_bits(u64 * ptr)119 static inline u64 pasid_get_bits(u64 *ptr)
120 {
121 	return READ_ONCE(*ptr);
122 }
123 
124 /*
125  * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
126  * PASID entry.
127  */
128 static inline void
pasid_set_domain_id(struct pasid_entry * pe,u64 value)129 pasid_set_domain_id(struct pasid_entry *pe, u64 value)
130 {
131 	pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
132 }
133 
134 /*
135  * Get domain ID value of a scalable mode PASID entry.
136  */
137 static inline u16
pasid_get_domain_id(struct pasid_entry * pe)138 pasid_get_domain_id(struct pasid_entry *pe)
139 {
140 	return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
141 }
142 
143 /*
144  * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
145  * of a scalable mode PASID entry.
146  */
147 static inline void
pasid_set_slptr(struct pasid_entry * pe,u64 value)148 pasid_set_slptr(struct pasid_entry *pe, u64 value)
149 {
150 	pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
151 }
152 
153 /*
154  * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
155  * entry.
156  */
157 static inline void
pasid_set_address_width(struct pasid_entry * pe,u64 value)158 pasid_set_address_width(struct pasid_entry *pe, u64 value)
159 {
160 	pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
161 }
162 
163 /*
164  * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
165  * of a scalable mode PASID entry.
166  */
167 static inline void
pasid_set_translation_type(struct pasid_entry * pe,u64 value)168 pasid_set_translation_type(struct pasid_entry *pe, u64 value)
169 {
170 	pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
171 }
172 
173 /*
174  * Enable fault processing by clearing the FPD(Fault Processing
175  * Disable) field (Bit 1) of a scalable mode PASID entry.
176  */
pasid_set_fault_enable(struct pasid_entry * pe)177 static inline void pasid_set_fault_enable(struct pasid_entry *pe)
178 {
179 	pasid_set_bits(&pe->val[0], 1 << 1, 0);
180 }
181 
182 /*
183  * Enable second level A/D bits by setting the SLADE (Second Level
184  * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
185  * entry.
186  */
pasid_set_ssade(struct pasid_entry * pe)187 static inline void pasid_set_ssade(struct pasid_entry *pe)
188 {
189 	pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
190 }
191 
192 /*
193  * Disable second level A/D bits by clearing the SLADE (Second Level
194  * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
195  * entry.
196  */
pasid_clear_ssade(struct pasid_entry * pe)197 static inline void pasid_clear_ssade(struct pasid_entry *pe)
198 {
199 	pasid_set_bits(&pe->val[0], 1 << 9, 0);
200 }
201 
202 /*
203  * Checks if second level A/D bits specifically the SLADE (Second Level
204  * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
205  * entry is set.
206  */
pasid_get_ssade(struct pasid_entry * pe)207 static inline bool pasid_get_ssade(struct pasid_entry *pe)
208 {
209 	return pasid_get_bits(&pe->val[0]) & (1 << 9);
210 }
211 
212 /*
213  * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
214  * scalable mode PASID entry.
215  */
pasid_set_sre(struct pasid_entry * pe)216 static inline void pasid_set_sre(struct pasid_entry *pe)
217 {
218 	pasid_set_bits(&pe->val[2], 1 << 0, 1);
219 }
220 
221 /*
222  * Setup the WPE(Write Protect Enable) field (Bit 132) of a
223  * scalable mode PASID entry.
224  */
pasid_set_wpe(struct pasid_entry * pe)225 static inline void pasid_set_wpe(struct pasid_entry *pe)
226 {
227 	pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
228 }
229 
230 /*
231  * Setup the P(Present) field (Bit 0) of a scalable mode PASID
232  * entry.
233  */
pasid_set_present(struct pasid_entry * pe)234 static inline void pasid_set_present(struct pasid_entry *pe)
235 {
236 	pasid_set_bits(&pe->val[0], 1 << 0, 1);
237 }
238 
239 /*
240  * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
241  * entry.
242  */
pasid_set_page_snoop(struct pasid_entry * pe,bool value)243 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
244 {
245 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
246 }
247 
248 /*
249  * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
250  * PASID entry.
251  */
252 static inline void
pasid_set_pgsnp(struct pasid_entry * pe)253 pasid_set_pgsnp(struct pasid_entry *pe)
254 {
255 	pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
256 }
257 
258 /*
259  * Setup the First Level Page table Pointer field (Bit 140~191)
260  * of a scalable mode PASID entry.
261  */
262 static inline void
pasid_set_flptr(struct pasid_entry * pe,u64 value)263 pasid_set_flptr(struct pasid_entry *pe, u64 value)
264 {
265 	pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
266 }
267 
268 /*
269  * Setup the First Level Paging Mode field (Bit 130~131) of a
270  * scalable mode PASID entry.
271  */
272 static inline void
pasid_set_flpm(struct pasid_entry * pe,u64 value)273 pasid_set_flpm(struct pasid_entry *pe, u64 value)
274 {
275 	pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
276 }
277 
278 /*
279  * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
280  * of a scalable mode PASID entry.
281  */
pasid_set_eafe(struct pasid_entry * pe)282 static inline void pasid_set_eafe(struct pasid_entry *pe)
283 {
284 	pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
285 }
286 
287 extern unsigned int intel_pasid_max_id;
288 int intel_pasid_alloc_table(struct device *dev);
289 void intel_pasid_free_table(struct device *dev);
290 struct pasid_table *intel_pasid_get_table(struct device *dev);
291 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
292 				  struct device *dev, pgd_t *pgd,
293 				  u32 pasid, u16 did, int flags);
294 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
295 				   struct dmar_domain *domain,
296 				   struct device *dev, u32 pasid);
297 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
298 				     struct device *dev, u32 pasid,
299 				     bool enabled);
300 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
301 				   struct device *dev, u32 pasid);
302 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
303 			     u32 pasid, struct dmar_domain *domain);
304 int intel_pasid_replace_first_level(struct intel_iommu *iommu,
305 				    struct device *dev, pgd_t *pgd,
306 				    u32 pasid, u16 did, u16 old_did,
307 				    int flags);
308 int intel_pasid_replace_second_level(struct intel_iommu *iommu,
309 				     struct dmar_domain *domain,
310 				     struct device *dev, u16 old_did,
311 				     u32 pasid);
312 int intel_pasid_replace_pass_through(struct intel_iommu *iommu,
313 				     struct device *dev, u16 old_did,
314 				     u32 pasid);
315 int intel_pasid_replace_nested(struct intel_iommu *iommu,
316 			       struct device *dev, u32 pasid,
317 			       u16 old_did, struct dmar_domain *domain);
318 
319 void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
320 				 struct device *dev, u32 pasid,
321 				 bool fault_ignore);
322 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
323 					  struct device *dev, u32 pasid);
324 int intel_pasid_setup_sm_context(struct device *dev);
325 void intel_pasid_teardown_sm_context(struct device *dev);
326 #endif /* __INTEL_PASID_H */
327