xref: /linux/drivers/iommu/intel/pasid.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * pasid.h - PASID idr, table and entry header
4  *
5  * Copyright (C) 2018 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 
10 #ifndef __INTEL_PASID_H
11 #define __INTEL_PASID_H
12 
13 #define PASID_MAX			0x100000
14 #define PASID_PTE_MASK			0x3F
15 #define PASID_PTE_PRESENT		1
16 #define PASID_PTE_FPD			2
17 #define PDE_PFN_MASK			PAGE_MASK
18 #define PASID_PDE_SHIFT			6
19 #define MAX_NR_PASID_BITS		20
20 #define PASID_TBL_ENTRIES		BIT(PASID_PDE_SHIFT)
21 
22 #define is_pasid_enabled(entry)		(((entry)->lo >> 3) & 0x1)
23 #define get_pasid_dir_size(entry)	(1 << ((((entry)->lo >> 9) & 0x7) + 7))
24 
25 #define PASID_FLAG_NESTED		BIT(1)
26 #define PASID_FLAG_PAGE_SNOOP		BIT(2)
27 
28 /*
29  * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
30  * level translation, otherwise, 4-level paging will be used.
31  */
32 #define PASID_FLAG_FL5LP		BIT(1)
33 
34 struct pasid_dir_entry {
35 	u64 val;
36 };
37 
38 struct pasid_entry {
39 	u64 val[8];
40 };
41 
42 #define PASID_ENTRY_PGTT_FL_ONLY	(1)
43 #define PASID_ENTRY_PGTT_SL_ONLY	(2)
44 #define PASID_ENTRY_PGTT_NESTED		(3)
45 #define PASID_ENTRY_PGTT_PT		(4)
46 
47 /* The representative of a PASID table */
48 struct pasid_table {
49 	void			*table;		/* pasid table pointer */
50 	int			order;		/* page order of pasid table */
51 	u32			max_pasid;	/* max pasid */
52 };
53 
54 /* Get PRESENT bit of a PASID directory entry. */
55 static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde)
56 {
57 	return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
58 }
59 
60 /* Get PASID table from a PASID directory entry. */
61 static inline struct pasid_entry *
62 get_pasid_table_from_pde(struct pasid_dir_entry *pde)
63 {
64 	if (!pasid_pde_is_present(pde))
65 		return NULL;
66 
67 	return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
68 }
69 
70 /* Get PRESENT bit of a PASID table entry. */
71 static inline bool pasid_pte_is_present(struct pasid_entry *pte)
72 {
73 	return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
74 }
75 
76 /* Get PGTT field of a PASID table entry */
77 static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
78 {
79 	return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
80 }
81 
82 static inline void pasid_clear_entry(struct pasid_entry *pe)
83 {
84 	WRITE_ONCE(pe->val[0], 0);
85 	WRITE_ONCE(pe->val[1], 0);
86 	WRITE_ONCE(pe->val[2], 0);
87 	WRITE_ONCE(pe->val[3], 0);
88 	WRITE_ONCE(pe->val[4], 0);
89 	WRITE_ONCE(pe->val[5], 0);
90 	WRITE_ONCE(pe->val[6], 0);
91 	WRITE_ONCE(pe->val[7], 0);
92 }
93 
94 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
95 {
96 	WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
97 	WRITE_ONCE(pe->val[1], 0);
98 	WRITE_ONCE(pe->val[2], 0);
99 	WRITE_ONCE(pe->val[3], 0);
100 	WRITE_ONCE(pe->val[4], 0);
101 	WRITE_ONCE(pe->val[5], 0);
102 	WRITE_ONCE(pe->val[6], 0);
103 	WRITE_ONCE(pe->val[7], 0);
104 }
105 
106 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
107 {
108 	u64 old;
109 
110 	old = READ_ONCE(*ptr);
111 	WRITE_ONCE(*ptr, (old & ~mask) | bits);
112 }
113 
114 static inline u64 pasid_get_bits(u64 *ptr)
115 {
116 	return READ_ONCE(*ptr);
117 }
118 
119 /*
120  * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
121  * PASID entry.
122  */
123 static inline void
124 pasid_set_domain_id(struct pasid_entry *pe, u64 value)
125 {
126 	pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
127 }
128 
129 /*
130  * Get domain ID value of a scalable mode PASID entry.
131  */
132 static inline u16
133 pasid_get_domain_id(struct pasid_entry *pe)
134 {
135 	return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
136 }
137 
138 /*
139  * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
140  * of a scalable mode PASID entry.
141  */
142 static inline void
143 pasid_set_slptr(struct pasid_entry *pe, u64 value)
144 {
145 	pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
146 }
147 
148 /*
149  * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
150  * entry.
151  */
152 static inline void
153 pasid_set_address_width(struct pasid_entry *pe, u64 value)
154 {
155 	pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
156 }
157 
158 /*
159  * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
160  * of a scalable mode PASID entry.
161  */
162 static inline void
163 pasid_set_translation_type(struct pasid_entry *pe, u64 value)
164 {
165 	pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
166 }
167 
168 /*
169  * Enable fault processing by clearing the FPD(Fault Processing
170  * Disable) field (Bit 1) of a scalable mode PASID entry.
171  */
172 static inline void pasid_set_fault_enable(struct pasid_entry *pe)
173 {
174 	pasid_set_bits(&pe->val[0], 1 << 1, 0);
175 }
176 
177 /*
178  * Enable second level A/D bits by setting the SLADE (Second Level
179  * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
180  * entry.
181  */
182 static inline void pasid_set_ssade(struct pasid_entry *pe)
183 {
184 	pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
185 }
186 
187 /*
188  * Disable second level A/D bits by clearing the SLADE (Second Level
189  * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
190  * entry.
191  */
192 static inline void pasid_clear_ssade(struct pasid_entry *pe)
193 {
194 	pasid_set_bits(&pe->val[0], 1 << 9, 0);
195 }
196 
197 /*
198  * Checks if second level A/D bits specifically the SLADE (Second Level
199  * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
200  * entry is set.
201  */
202 static inline bool pasid_get_ssade(struct pasid_entry *pe)
203 {
204 	return pasid_get_bits(&pe->val[0]) & (1 << 9);
205 }
206 
207 /*
208  * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
209  * scalable mode PASID entry.
210  */
211 static inline void pasid_set_sre(struct pasid_entry *pe)
212 {
213 	pasid_set_bits(&pe->val[2], 1 << 0, 1);
214 }
215 
216 /*
217  * Setup the WPE(Write Protect Enable) field (Bit 132) of a
218  * scalable mode PASID entry.
219  */
220 static inline void pasid_set_wpe(struct pasid_entry *pe)
221 {
222 	pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
223 }
224 
225 /*
226  * Setup the P(Present) field (Bit 0) of a scalable mode PASID
227  * entry.
228  */
229 static inline void pasid_set_present(struct pasid_entry *pe)
230 {
231 	pasid_set_bits(&pe->val[0], 1 << 0, 1);
232 }
233 
234 /*
235  * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
236  * entry.
237  */
238 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
239 {
240 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
241 }
242 
243 /*
244  * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
245  * PASID entry.
246  */
247 static inline void
248 pasid_set_pgsnp(struct pasid_entry *pe)
249 {
250 	pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
251 }
252 
253 /*
254  * Setup the First Level Page table Pointer field (Bit 140~191)
255  * of a scalable mode PASID entry.
256  */
257 static inline void
258 pasid_set_flptr(struct pasid_entry *pe, u64 value)
259 {
260 	pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
261 }
262 
263 /*
264  * Setup the First Level Paging Mode field (Bit 130~131) of a
265  * scalable mode PASID entry.
266  */
267 static inline void
268 pasid_set_flpm(struct pasid_entry *pe, u64 value)
269 {
270 	pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
271 }
272 
273 /*
274  * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
275  * of a scalable mode PASID entry.
276  */
277 static inline void pasid_set_eafe(struct pasid_entry *pe)
278 {
279 	pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
280 }
281 
282 extern unsigned int intel_pasid_max_id;
283 int intel_pasid_alloc_table(struct device *dev);
284 void intel_pasid_free_table(struct device *dev);
285 struct pasid_table *intel_pasid_get_table(struct device *dev);
286 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
287 				  struct device *dev, pgd_t *pgd,
288 				  u32 pasid, u16 did, int flags);
289 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
290 				   struct dmar_domain *domain,
291 				   struct device *dev, u32 pasid);
292 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
293 				     struct device *dev, u32 pasid,
294 				     bool enabled);
295 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
296 				   struct device *dev, u32 pasid);
297 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
298 			     u32 pasid, struct dmar_domain *domain);
299 int intel_pasid_replace_first_level(struct intel_iommu *iommu,
300 				    struct device *dev, pgd_t *pgd,
301 				    u32 pasid, u16 did, u16 old_did,
302 				    int flags);
303 int intel_pasid_replace_second_level(struct intel_iommu *iommu,
304 				     struct dmar_domain *domain,
305 				     struct device *dev, u16 old_did,
306 				     u32 pasid);
307 int intel_pasid_replace_pass_through(struct intel_iommu *iommu,
308 				     struct device *dev, u16 old_did,
309 				     u32 pasid);
310 int intel_pasid_replace_nested(struct intel_iommu *iommu,
311 			       struct device *dev, u32 pasid,
312 			       u16 old_did, struct dmar_domain *domain);
313 
314 void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
315 				 struct device *dev, u32 pasid,
316 				 bool fault_ignore);
317 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
318 					  struct device *dev, u32 pasid);
319 int intel_pasid_setup_sm_context(struct device *dev);
320 void intel_pasid_teardown_sm_context(struct device *dev);
321 #endif /* __INTEL_PASID_H */
322