xref: /linux/drivers/iommu/intel/pasid.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * pasid.h - PASID idr, table and entry header
4  *
5  * Copyright (C) 2018 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 
10 #ifndef __INTEL_PASID_H
11 #define __INTEL_PASID_H
12 
13 #define PASID_MAX			0x100000
14 #define PASID_PTE_MASK			0x3F
15 #define PASID_PTE_PRESENT		1
16 #define PASID_PTE_FPD			2
17 #define PDE_PFN_MASK			PAGE_MASK
18 #define PASID_PDE_SHIFT			6
19 #define MAX_NR_PASID_BITS		20
20 #define PASID_TBL_ENTRIES		BIT(PASID_PDE_SHIFT)
21 
22 #define is_pasid_enabled(entry)		(((entry)->lo >> 3) & 0x1)
23 #define get_pasid_dir_size(entry)	(1 << ((((entry)->lo >> 9) & 0x7) + 7))
24 
25 /*
26  * Domain ID reserved for pasid entries programmed for first-level
27  * only and pass-through transfer modes.
28  */
29 #define FLPT_DEFAULT_DID		1
30 #define NUM_RESERVED_DID		2
31 
32 #define PASID_FLAG_NESTED		BIT(1)
33 #define PASID_FLAG_PAGE_SNOOP		BIT(2)
34 
35 /*
36  * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
37  * level translation, otherwise, 4-level paging will be used.
38  */
39 #define PASID_FLAG_FL5LP		BIT(1)
40 
41 struct pasid_dir_entry {
42 	u64 val;
43 };
44 
45 struct pasid_entry {
46 	u64 val[8];
47 };
48 
49 #define PASID_ENTRY_PGTT_FL_ONLY	(1)
50 #define PASID_ENTRY_PGTT_SL_ONLY	(2)
51 #define PASID_ENTRY_PGTT_NESTED		(3)
52 #define PASID_ENTRY_PGTT_PT		(4)
53 
54 /* The representative of a PASID table */
55 struct pasid_table {
56 	void			*table;		/* pasid table pointer */
57 	int			order;		/* page order of pasid table */
58 	u32			max_pasid;	/* max pasid */
59 };
60 
61 /* Get PRESENT bit of a PASID directory entry. */
pasid_pde_is_present(struct pasid_dir_entry * pde)62 static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde)
63 {
64 	return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
65 }
66 
67 /* Get PASID table from a PASID directory entry. */
68 static inline struct pasid_entry *
get_pasid_table_from_pde(struct pasid_dir_entry * pde)69 get_pasid_table_from_pde(struct pasid_dir_entry *pde)
70 {
71 	if (!pasid_pde_is_present(pde))
72 		return NULL;
73 
74 	return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
75 }
76 
77 /* Get PRESENT bit of a PASID table entry. */
pasid_pte_is_present(struct pasid_entry * pte)78 static inline bool pasid_pte_is_present(struct pasid_entry *pte)
79 {
80 	return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
81 }
82 
83 /* Get PGTT field of a PASID table entry */
pasid_pte_get_pgtt(struct pasid_entry * pte)84 static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
85 {
86 	return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
87 }
88 
pasid_clear_entry(struct pasid_entry * pe)89 static inline void pasid_clear_entry(struct pasid_entry *pe)
90 {
91 	WRITE_ONCE(pe->val[0], 0);
92 	WRITE_ONCE(pe->val[1], 0);
93 	WRITE_ONCE(pe->val[2], 0);
94 	WRITE_ONCE(pe->val[3], 0);
95 	WRITE_ONCE(pe->val[4], 0);
96 	WRITE_ONCE(pe->val[5], 0);
97 	WRITE_ONCE(pe->val[6], 0);
98 	WRITE_ONCE(pe->val[7], 0);
99 }
100 
pasid_clear_entry_with_fpd(struct pasid_entry * pe)101 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
102 {
103 	WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
104 	WRITE_ONCE(pe->val[1], 0);
105 	WRITE_ONCE(pe->val[2], 0);
106 	WRITE_ONCE(pe->val[3], 0);
107 	WRITE_ONCE(pe->val[4], 0);
108 	WRITE_ONCE(pe->val[5], 0);
109 	WRITE_ONCE(pe->val[6], 0);
110 	WRITE_ONCE(pe->val[7], 0);
111 }
112 
pasid_set_bits(u64 * ptr,u64 mask,u64 bits)113 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
114 {
115 	u64 old;
116 
117 	old = READ_ONCE(*ptr);
118 	WRITE_ONCE(*ptr, (old & ~mask) | bits);
119 }
120 
pasid_get_bits(u64 * ptr)121 static inline u64 pasid_get_bits(u64 *ptr)
122 {
123 	return READ_ONCE(*ptr);
124 }
125 
126 /*
127  * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
128  * PASID entry.
129  */
130 static inline void
pasid_set_domain_id(struct pasid_entry * pe,u64 value)131 pasid_set_domain_id(struct pasid_entry *pe, u64 value)
132 {
133 	pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
134 }
135 
136 /*
137  * Get domain ID value of a scalable mode PASID entry.
138  */
139 static inline u16
pasid_get_domain_id(struct pasid_entry * pe)140 pasid_get_domain_id(struct pasid_entry *pe)
141 {
142 	return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
143 }
144 
145 /*
146  * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
147  * of a scalable mode PASID entry.
148  */
149 static inline void
pasid_set_slptr(struct pasid_entry * pe,u64 value)150 pasid_set_slptr(struct pasid_entry *pe, u64 value)
151 {
152 	pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
153 }
154 
155 /*
156  * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
157  * entry.
158  */
159 static inline void
pasid_set_address_width(struct pasid_entry * pe,u64 value)160 pasid_set_address_width(struct pasid_entry *pe, u64 value)
161 {
162 	pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
163 }
164 
165 /*
166  * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
167  * of a scalable mode PASID entry.
168  */
169 static inline void
pasid_set_translation_type(struct pasid_entry * pe,u64 value)170 pasid_set_translation_type(struct pasid_entry *pe, u64 value)
171 {
172 	pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
173 }
174 
175 /*
176  * Enable fault processing by clearing the FPD(Fault Processing
177  * Disable) field (Bit 1) of a scalable mode PASID entry.
178  */
pasid_set_fault_enable(struct pasid_entry * pe)179 static inline void pasid_set_fault_enable(struct pasid_entry *pe)
180 {
181 	pasid_set_bits(&pe->val[0], 1 << 1, 0);
182 }
183 
184 /*
185  * Enable second level A/D bits by setting the SLADE (Second Level
186  * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
187  * entry.
188  */
pasid_set_ssade(struct pasid_entry * pe)189 static inline void pasid_set_ssade(struct pasid_entry *pe)
190 {
191 	pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
192 }
193 
194 /*
195  * Disable second level A/D bits by clearing the SLADE (Second Level
196  * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
197  * entry.
198  */
pasid_clear_ssade(struct pasid_entry * pe)199 static inline void pasid_clear_ssade(struct pasid_entry *pe)
200 {
201 	pasid_set_bits(&pe->val[0], 1 << 9, 0);
202 }
203 
204 /*
205  * Checks if second level A/D bits specifically the SLADE (Second Level
206  * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
207  * entry is set.
208  */
pasid_get_ssade(struct pasid_entry * pe)209 static inline bool pasid_get_ssade(struct pasid_entry *pe)
210 {
211 	return pasid_get_bits(&pe->val[0]) & (1 << 9);
212 }
213 
214 /*
215  * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
216  * scalable mode PASID entry.
217  */
pasid_set_sre(struct pasid_entry * pe)218 static inline void pasid_set_sre(struct pasid_entry *pe)
219 {
220 	pasid_set_bits(&pe->val[2], 1 << 0, 1);
221 }
222 
223 /*
224  * Setup the WPE(Write Protect Enable) field (Bit 132) of a
225  * scalable mode PASID entry.
226  */
pasid_set_wpe(struct pasid_entry * pe)227 static inline void pasid_set_wpe(struct pasid_entry *pe)
228 {
229 	pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
230 }
231 
232 /*
233  * Setup the P(Present) field (Bit 0) of a scalable mode PASID
234  * entry.
235  */
pasid_set_present(struct pasid_entry * pe)236 static inline void pasid_set_present(struct pasid_entry *pe)
237 {
238 	pasid_set_bits(&pe->val[0], 1 << 0, 1);
239 }
240 
241 /*
242  * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
243  * entry.
244  */
pasid_set_page_snoop(struct pasid_entry * pe,bool value)245 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
246 {
247 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
248 }
249 
250 /*
251  * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
252  * PASID entry.
253  */
254 static inline void
pasid_set_pgsnp(struct pasid_entry * pe)255 pasid_set_pgsnp(struct pasid_entry *pe)
256 {
257 	pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
258 }
259 
260 /*
261  * Setup the First Level Page table Pointer field (Bit 140~191)
262  * of a scalable mode PASID entry.
263  */
264 static inline void
pasid_set_flptr(struct pasid_entry * pe,u64 value)265 pasid_set_flptr(struct pasid_entry *pe, u64 value)
266 {
267 	pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
268 }
269 
270 /*
271  * Setup the First Level Paging Mode field (Bit 130~131) of a
272  * scalable mode PASID entry.
273  */
274 static inline void
pasid_set_flpm(struct pasid_entry * pe,u64 value)275 pasid_set_flpm(struct pasid_entry *pe, u64 value)
276 {
277 	pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
278 }
279 
280 /*
281  * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
282  * of a scalable mode PASID entry.
283  */
pasid_set_eafe(struct pasid_entry * pe)284 static inline void pasid_set_eafe(struct pasid_entry *pe)
285 {
286 	pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
287 }
288 
289 extern unsigned int intel_pasid_max_id;
290 int intel_pasid_alloc_table(struct device *dev);
291 void intel_pasid_free_table(struct device *dev);
292 struct pasid_table *intel_pasid_get_table(struct device *dev);
293 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
294 				  struct device *dev, pgd_t *pgd,
295 				  u32 pasid, u16 did, int flags);
296 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
297 				   struct dmar_domain *domain,
298 				   struct device *dev, u32 pasid);
299 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
300 				     struct device *dev, u32 pasid,
301 				     bool enabled);
302 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
303 				   struct device *dev, u32 pasid);
304 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
305 			     u32 pasid, struct dmar_domain *domain);
306 void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
307 				 struct device *dev, u32 pasid,
308 				 bool fault_ignore);
309 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
310 					  struct device *dev, u32 pasid);
311 int intel_pasid_setup_sm_context(struct device *dev);
312 void intel_pasid_teardown_sm_context(struct device *dev);
313 #endif /* __INTEL_PASID_H */
314