1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * pasid.h - PASID idr, table and entry header 4 * 5 * Copyright (C) 2018 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 10 #ifndef __INTEL_PASID_H 11 #define __INTEL_PASID_H 12 13 #define PASID_MAX 0x100000 14 #define PASID_PTE_MASK 0x3F 15 #define PASID_PTE_PRESENT 1 16 #define PASID_PTE_FPD 2 17 #define PDE_PFN_MASK PAGE_MASK 18 #define PASID_PDE_SHIFT 6 19 #define MAX_NR_PASID_BITS 20 20 #define PASID_TBL_ENTRIES BIT(PASID_PDE_SHIFT) 21 22 #define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1) 23 #define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7)) 24 25 #define PASID_FLAG_NESTED BIT(1) 26 #define PASID_FLAG_PAGE_SNOOP BIT(2) 27 28 /* 29 * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first- 30 * level translation, otherwise, 4-level paging will be used. 31 */ 32 #define PASID_FLAG_FL5LP BIT(1) 33 34 struct pasid_dir_entry { 35 u64 val; 36 }; 37 38 struct pasid_entry { 39 u64 val[8]; 40 }; 41 42 #define PASID_ENTRY_PGTT_FL_ONLY (1) 43 #define PASID_ENTRY_PGTT_SL_ONLY (2) 44 #define PASID_ENTRY_PGTT_NESTED (3) 45 #define PASID_ENTRY_PGTT_PT (4) 46 47 /* The representative of a PASID table */ 48 struct pasid_table { 49 void *table; /* pasid table pointer */ 50 int order; /* page order of pasid table */ 51 u32 max_pasid; /* max pasid */ 52 }; 53 54 /* Get PRESENT bit of a PASID directory entry. */ 55 static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde) 56 { 57 return READ_ONCE(pde->val) & PASID_PTE_PRESENT; 58 } 59 60 /* Get PASID table from a PASID directory entry. */ 61 static inline struct pasid_entry * 62 get_pasid_table_from_pde(struct pasid_dir_entry *pde) 63 { 64 if (!pasid_pde_is_present(pde)) 65 return NULL; 66 67 return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK); 68 } 69 70 /* Get PRESENT bit of a PASID table entry. */ 71 static inline bool pasid_pte_is_present(struct pasid_entry *pte) 72 { 73 return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT; 74 } 75 76 /* Get FPD(Fault Processing Disable) bit of a PASID table entry */ 77 static inline bool pasid_pte_is_fault_disabled(struct pasid_entry *pte) 78 { 79 return READ_ONCE(pte->val[0]) & PASID_PTE_FPD; 80 } 81 82 /* Get PGTT field of a PASID table entry */ 83 static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte) 84 { 85 return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7); 86 } 87 88 static inline void pasid_clear_entry(struct pasid_entry *pe) 89 { 90 WRITE_ONCE(pe->val[0], 0); 91 WRITE_ONCE(pe->val[1], 0); 92 WRITE_ONCE(pe->val[2], 0); 93 WRITE_ONCE(pe->val[3], 0); 94 WRITE_ONCE(pe->val[4], 0); 95 WRITE_ONCE(pe->val[5], 0); 96 WRITE_ONCE(pe->val[6], 0); 97 WRITE_ONCE(pe->val[7], 0); 98 } 99 100 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe) 101 { 102 WRITE_ONCE(pe->val[0], PASID_PTE_FPD); 103 WRITE_ONCE(pe->val[1], 0); 104 WRITE_ONCE(pe->val[2], 0); 105 WRITE_ONCE(pe->val[3], 0); 106 WRITE_ONCE(pe->val[4], 0); 107 WRITE_ONCE(pe->val[5], 0); 108 WRITE_ONCE(pe->val[6], 0); 109 WRITE_ONCE(pe->val[7], 0); 110 } 111 112 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits) 113 { 114 u64 old; 115 116 old = READ_ONCE(*ptr); 117 WRITE_ONCE(*ptr, (old & ~mask) | bits); 118 } 119 120 static inline u64 pasid_get_bits(u64 *ptr) 121 { 122 return READ_ONCE(*ptr); 123 } 124 125 /* 126 * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode 127 * PASID entry. 128 */ 129 static inline void 130 pasid_set_domain_id(struct pasid_entry *pe, u64 value) 131 { 132 pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value); 133 } 134 135 /* 136 * Get domain ID value of a scalable mode PASID entry. 137 */ 138 static inline u16 139 pasid_get_domain_id(struct pasid_entry *pe) 140 { 141 return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0)); 142 } 143 144 /* 145 * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63) 146 * of a scalable mode PASID entry. 147 */ 148 static inline void 149 pasid_set_slptr(struct pasid_entry *pe, u64 value) 150 { 151 pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value); 152 } 153 154 /* 155 * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID 156 * entry. 157 */ 158 static inline void 159 pasid_set_address_width(struct pasid_entry *pe, u64 value) 160 { 161 pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2); 162 } 163 164 /* 165 * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8) 166 * of a scalable mode PASID entry. 167 */ 168 static inline void 169 pasid_set_translation_type(struct pasid_entry *pe, u64 value) 170 { 171 pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6); 172 } 173 174 /* 175 * Enable fault processing by clearing the FPD(Fault Processing 176 * Disable) field (Bit 1) of a scalable mode PASID entry. 177 */ 178 static inline void pasid_set_fault_enable(struct pasid_entry *pe) 179 { 180 pasid_set_bits(&pe->val[0], 1 << 1, 0); 181 } 182 183 /* 184 * Enable second level A/D bits by setting the SLADE (Second Level 185 * Access Dirty Enable) field (Bit 9) of a scalable mode PASID 186 * entry. 187 */ 188 static inline void pasid_set_ssade(struct pasid_entry *pe) 189 { 190 pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9); 191 } 192 193 /* 194 * Disable second level A/D bits by clearing the SLADE (Second Level 195 * Access Dirty Enable) field (Bit 9) of a scalable mode PASID 196 * entry. 197 */ 198 static inline void pasid_clear_ssade(struct pasid_entry *pe) 199 { 200 pasid_set_bits(&pe->val[0], 1 << 9, 0); 201 } 202 203 /* 204 * Checks if second level A/D bits specifically the SLADE (Second Level 205 * Access Dirty Enable) field (Bit 9) of a scalable mode PASID 206 * entry is set. 207 */ 208 static inline bool pasid_get_ssade(struct pasid_entry *pe) 209 { 210 return pasid_get_bits(&pe->val[0]) & (1 << 9); 211 } 212 213 /* 214 * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a 215 * scalable mode PASID entry. 216 */ 217 static inline void pasid_set_sre(struct pasid_entry *pe) 218 { 219 pasid_set_bits(&pe->val[2], 1 << 0, 1); 220 } 221 222 /* 223 * Setup the WPE(Write Protect Enable) field (Bit 132) of a 224 * scalable mode PASID entry. 225 */ 226 static inline void pasid_set_wpe(struct pasid_entry *pe) 227 { 228 pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4); 229 } 230 231 /* 232 * Setup the P(Present) field (Bit 0) of a scalable mode PASID 233 * entry. 234 */ 235 static inline void pasid_set_present(struct pasid_entry *pe) 236 { 237 pasid_set_bits(&pe->val[0], 1 << 0, 1); 238 } 239 240 /* 241 * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID 242 * entry. 243 */ 244 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value) 245 { 246 pasid_set_bits(&pe->val[1], 1 << 23, value << 23); 247 } 248 249 /* 250 * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode 251 * PASID entry. 252 */ 253 static inline void 254 pasid_set_pgsnp(struct pasid_entry *pe) 255 { 256 pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24); 257 } 258 259 /* 260 * Setup the First Level Page table Pointer field (Bit 140~191) 261 * of a scalable mode PASID entry. 262 */ 263 static inline void 264 pasid_set_flptr(struct pasid_entry *pe, u64 value) 265 { 266 pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value); 267 } 268 269 /* 270 * Setup the First Level Paging Mode field (Bit 130~131) of a 271 * scalable mode PASID entry. 272 */ 273 static inline void 274 pasid_set_flpm(struct pasid_entry *pe, u64 value) 275 { 276 pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2); 277 } 278 279 /* 280 * Setup the Extended Access Flag Enable (EAFE) field (Bit 135) 281 * of a scalable mode PASID entry. 282 */ 283 static inline void pasid_set_eafe(struct pasid_entry *pe) 284 { 285 pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7); 286 } 287 288 extern unsigned int intel_pasid_max_id; 289 int intel_pasid_alloc_table(struct device *dev); 290 void intel_pasid_free_table(struct device *dev); 291 struct pasid_table *intel_pasid_get_table(struct device *dev); 292 int intel_pasid_setup_first_level(struct intel_iommu *iommu, 293 struct device *dev, pgd_t *pgd, 294 u32 pasid, u16 did, int flags); 295 int intel_pasid_setup_second_level(struct intel_iommu *iommu, 296 struct dmar_domain *domain, 297 struct device *dev, u32 pasid); 298 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, 299 struct device *dev, u32 pasid, 300 bool enabled); 301 int intel_pasid_setup_pass_through(struct intel_iommu *iommu, 302 struct device *dev, u32 pasid); 303 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, 304 u32 pasid, struct dmar_domain *domain); 305 int intel_pasid_replace_first_level(struct intel_iommu *iommu, 306 struct device *dev, pgd_t *pgd, 307 u32 pasid, u16 did, u16 old_did, 308 int flags); 309 int intel_pasid_replace_second_level(struct intel_iommu *iommu, 310 struct dmar_domain *domain, 311 struct device *dev, u16 old_did, 312 u32 pasid); 313 int intel_pasid_replace_pass_through(struct intel_iommu *iommu, 314 struct device *dev, u16 old_did, 315 u32 pasid); 316 int intel_pasid_replace_nested(struct intel_iommu *iommu, 317 struct device *dev, u32 pasid, 318 u16 old_did, struct dmar_domain *domain); 319 320 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, 321 struct device *dev, u32 pasid, 322 bool fault_ignore); 323 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, 324 struct device *dev, u32 pasid); 325 int intel_pasid_setup_sm_context(struct device *dev); 326 void intel_pasid_teardown_sm_context(struct device *dev); 327 #endif /* __INTEL_PASID_H */ 328