1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright © 2006-2015, Intel Corporation. 4 * 5 * Authors: Ashok Raj <ashok.raj@intel.com> 6 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7 * David Woodhouse <David.Woodhouse@intel.com> 8 */ 9 10 #ifndef _INTEL_IOMMU_H_ 11 #define _INTEL_IOMMU_H_ 12 13 #include <linux/types.h> 14 #include <linux/iova.h> 15 #include <linux/io.h> 16 #include <linux/idr.h> 17 #include <linux/mmu_notifier.h> 18 #include <linux/list.h> 19 #include <linux/iommu.h> 20 #include <linux/io-64-nonatomic-lo-hi.h> 21 #include <linux/dmar.h> 22 #include <linux/bitfield.h> 23 #include <linux/xarray.h> 24 #include <linux/perf_event.h> 25 #include <linux/pci.h> 26 #include <linux/generic_pt/iommu.h> 27 28 #include <asm/iommu.h> 29 #include <uapi/linux/iommufd.h> 30 31 /* 32 * VT-d hardware uses 4KiB page size regardless of host page size. 33 */ 34 #define VTD_PAGE_SHIFT (12) 35 #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) 36 #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) 37 #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) 38 39 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 40 41 #define VTD_STRIDE_SHIFT (9) 42 #define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) 43 44 #define DMA_PTE_READ BIT_ULL(0) 45 #define DMA_PTE_WRITE BIT_ULL(1) 46 #define DMA_PTE_LARGE_PAGE BIT_ULL(7) 47 #define DMA_PTE_SNP BIT_ULL(11) 48 49 #define DMA_FL_PTE_PRESENT BIT_ULL(0) 50 #define DMA_FL_PTE_US BIT_ULL(2) 51 #define DMA_FL_PTE_ACCESS BIT_ULL(5) 52 #define DMA_FL_PTE_DIRTY BIT_ULL(6) 53 54 #define DMA_SL_PTE_DIRTY_BIT 9 55 #define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT) 56 57 #define ADDR_WIDTH_5LEVEL (57) 58 #define ADDR_WIDTH_4LEVEL (48) 59 60 #define CONTEXT_TT_MULTI_LEVEL 0 61 #define CONTEXT_TT_DEV_IOTLB 1 62 #define CONTEXT_TT_PASS_THROUGH 2 63 #define CONTEXT_PASIDE BIT_ULL(3) 64 65 /* 66 * Intel IOMMU register specification per version 1.0 public spec. 67 */ 68 #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ 69 #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ 70 #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ 71 #define DMAR_GCMD_REG 0x18 /* Global command register */ 72 #define DMAR_GSTS_REG 0x1c /* Global status register */ 73 #define DMAR_RTADDR_REG 0x20 /* Root entry table */ 74 #define DMAR_CCMD_REG 0x28 /* Context command reg */ 75 #define DMAR_FSTS_REG 0x34 /* Fault Status register */ 76 #define DMAR_FECTL_REG 0x38 /* Fault control register */ 77 #define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ 78 #define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ 79 #define DMAR_FEUADDR_REG 0x44 /* Upper address register */ 80 #define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ 81 #define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ 82 #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ 83 #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ 84 #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ 85 #define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ 86 #define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ 87 #define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ 88 #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ 89 #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ 90 #define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */ 91 #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ 92 #define DMAR_PQH_REG 0xc0 /* Page request queue head register */ 93 #define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ 94 #define DMAR_PQA_REG 0xd0 /* Page request queue address register */ 95 #define DMAR_PRS_REG 0xdc /* Page request status register */ 96 #define DMAR_PECTL_REG 0xe0 /* Page request event control register */ 97 #define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ 98 #define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ 99 #define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ 100 #define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */ 101 #define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */ 102 #define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */ 103 #define DMAR_MTRR_FIX16K_80000_REG 0x128 104 #define DMAR_MTRR_FIX16K_A0000_REG 0x130 105 #define DMAR_MTRR_FIX4K_C0000_REG 0x138 106 #define DMAR_MTRR_FIX4K_C8000_REG 0x140 107 #define DMAR_MTRR_FIX4K_D0000_REG 0x148 108 #define DMAR_MTRR_FIX4K_D8000_REG 0x150 109 #define DMAR_MTRR_FIX4K_E0000_REG 0x158 110 #define DMAR_MTRR_FIX4K_E8000_REG 0x160 111 #define DMAR_MTRR_FIX4K_F0000_REG 0x168 112 #define DMAR_MTRR_FIX4K_F8000_REG 0x170 113 #define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */ 114 #define DMAR_MTRR_PHYSMASK0_REG 0x188 115 #define DMAR_MTRR_PHYSBASE1_REG 0x190 116 #define DMAR_MTRR_PHYSMASK1_REG 0x198 117 #define DMAR_MTRR_PHYSBASE2_REG 0x1a0 118 #define DMAR_MTRR_PHYSMASK2_REG 0x1a8 119 #define DMAR_MTRR_PHYSBASE3_REG 0x1b0 120 #define DMAR_MTRR_PHYSMASK3_REG 0x1b8 121 #define DMAR_MTRR_PHYSBASE4_REG 0x1c0 122 #define DMAR_MTRR_PHYSMASK4_REG 0x1c8 123 #define DMAR_MTRR_PHYSBASE5_REG 0x1d0 124 #define DMAR_MTRR_PHYSMASK5_REG 0x1d8 125 #define DMAR_MTRR_PHYSBASE6_REG 0x1e0 126 #define DMAR_MTRR_PHYSMASK6_REG 0x1e8 127 #define DMAR_MTRR_PHYSBASE7_REG 0x1f0 128 #define DMAR_MTRR_PHYSMASK7_REG 0x1f8 129 #define DMAR_MTRR_PHYSBASE8_REG 0x200 130 #define DMAR_MTRR_PHYSMASK8_REG 0x208 131 #define DMAR_MTRR_PHYSBASE9_REG 0x210 132 #define DMAR_MTRR_PHYSMASK9_REG 0x218 133 #define DMAR_PERFCAP_REG 0x300 134 #define DMAR_PERFCFGOFF_REG 0x310 135 #define DMAR_PERFOVFOFF_REG 0x318 136 #define DMAR_PERFCNTROFF_REG 0x31c 137 #define DMAR_PERFINTRSTS_REG 0x324 138 #define DMAR_PERFINTRCTL_REG 0x328 139 #define DMAR_PERFEVNTCAP_REG 0x380 140 #define DMAR_ECMD_REG 0x400 141 #define DMAR_ECEO_REG 0x408 142 #define DMAR_ECRSP_REG 0x410 143 #define DMAR_ECCAP_REG 0x430 144 145 #define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) 146 #define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) 147 #define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg) 148 149 #define OFFSET_STRIDE (9) 150 151 #define dmar_readq(a) readq(a) 152 #define dmar_writeq(a,v) writeq(v,a) 153 #define dmar_readl(a) readl(a) 154 #define dmar_writel(a, v) writel(v, a) 155 156 #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) 157 #define DMAR_VER_MINOR(v) ((v) & 0x0f) 158 159 /* 160 * Decoding Capability Register 161 */ 162 #define cap_esrtps(c) (((c) >> 63) & 1) 163 #define cap_esirtps(c) (((c) >> 62) & 1) 164 #define cap_ecmds(c) (((c) >> 61) & 1) 165 #define cap_fl5lp_support(c) (((c) >> 60) & 1) 166 #define cap_pi_support(c) (((c) >> 59) & 1) 167 #define cap_fl1gp_support(c) (((c) >> 56) & 1) 168 #define cap_read_drain(c) (((c) >> 55) & 1) 169 #define cap_write_drain(c) (((c) >> 54) & 1) 170 #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) 171 #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) 172 #define cap_pgsel_inv(c) (((c) >> 39) & 1) 173 174 #define cap_super_page_val(c) (((c) >> 34) & 0xf) 175 176 #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) 177 #define cap_max_fault_reg_offset(c) \ 178 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) 179 180 #define cap_zlr(c) (((c) >> 22) & 1) 181 #define cap_isoch(c) (((c) >> 23) & 1) 182 #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) 183 #define cap_sagaw(c) (((c) >> 8) & 0x1f) 184 #define cap_caching_mode(c) (((c) >> 7) & 1) 185 #define cap_phmr(c) (((c) >> 6) & 1) 186 #define cap_plmr(c) (((c) >> 5) & 1) 187 #define cap_rwbf(c) (((c) >> 4) & 1) 188 #define cap_afl(c) (((c) >> 3) & 1) 189 #define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) 190 /* 191 * Extended Capability Register 192 */ 193 194 #define ecap_pms(e) (((e) >> 51) & 0x1) 195 #define ecap_rps(e) (((e) >> 49) & 0x1) 196 #define ecap_smpwc(e) (((e) >> 48) & 0x1) 197 #define ecap_flts(e) (((e) >> 47) & 0x1) 198 #define ecap_slts(e) (((e) >> 46) & 0x1) 199 #define ecap_slads(e) (((e) >> 45) & 0x1) 200 #define ecap_smts(e) (((e) >> 43) & 0x1) 201 #define ecap_dit(e) (((e) >> 41) & 0x1) 202 #define ecap_pds(e) (((e) >> 42) & 0x1) 203 #define ecap_pasid(e) (((e) >> 40) & 0x1) 204 #define ecap_pss(e) (((e) >> 35) & 0x1f) 205 #define ecap_eafs(e) (((e) >> 34) & 0x1) 206 #define ecap_nwfs(e) (((e) >> 33) & 0x1) 207 #define ecap_srs(e) (((e) >> 31) & 0x1) 208 #define ecap_ers(e) (((e) >> 30) & 0x1) 209 #define ecap_prs(e) (((e) >> 29) & 0x1) 210 #define ecap_broken_pasid(e) (((e) >> 28) & 0x1) 211 #define ecap_dis(e) (((e) >> 27) & 0x1) 212 #define ecap_nest(e) (((e) >> 26) & 0x1) 213 #define ecap_mts(e) (((e) >> 25) & 0x1) 214 #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) 215 #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) 216 #define ecap_coherent(e) ((e) & 0x1) 217 #define ecap_qis(e) ((e) & 0x2) 218 #define ecap_pass_through(e) (((e) >> 6) & 0x1) 219 #define ecap_eim_support(e) (((e) >> 4) & 0x1) 220 #define ecap_ir_support(e) (((e) >> 3) & 0x1) 221 #define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) 222 #define ecap_max_handle_mask(e) (((e) >> 20) & 0xf) 223 #define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */ 224 225 /* 226 * Decoding Perf Capability Register 227 */ 228 #define pcap_num_cntr(p) ((p) & 0xffff) 229 #define pcap_cntr_width(p) (((p) >> 16) & 0x7f) 230 #define pcap_num_event_group(p) (((p) >> 24) & 0x1f) 231 #define pcap_filters_mask(p) (((p) >> 32) & 0x1f) 232 #define pcap_interrupt(p) (((p) >> 50) & 0x1) 233 /* The counter stride is calculated as 2 ^ (x+10) bytes */ 234 #define pcap_cntr_stride(p) (1ULL << ((((p) >> 52) & 0x7) + 10)) 235 236 /* 237 * Decoding Perf Event Capability Register 238 */ 239 #define pecap_es(p) ((p) & 0xfffffff) 240 241 /* Virtual command interface capability */ 242 #define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */ 243 244 /* IOTLB_REG */ 245 #define DMA_TLB_FLUSH_GRANU_OFFSET 60 246 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) 247 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60) 248 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60) 249 #define DMA_TLB_IIRG(type) ((type >> 60) & 3) 250 #define DMA_TLB_IAIG(val) (((val) >> 57) & 3) 251 #define DMA_TLB_READ_DRAIN (((u64)1) << 49) 252 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) 253 #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) 254 #define DMA_TLB_IVT (((u64)1) << 63) 255 #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) 256 #define DMA_TLB_MAX_SIZE (0x3f) 257 258 /* INVALID_DESC */ 259 #define DMA_CCMD_INVL_GRANU_OFFSET 61 260 #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4) 261 #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4) 262 #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4) 263 #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) 264 #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) 265 #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) 266 #define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6) 267 #define DMA_ID_TLB_ADDR(addr) (addr) 268 #define DMA_ID_TLB_ADDR_MASK(mask) (mask) 269 270 /* PMEN_REG */ 271 #define DMA_PMEN_EPM (((u32)1)<<31) 272 #define DMA_PMEN_PRS (((u32)1)<<0) 273 274 /* GCMD_REG */ 275 #define DMA_GCMD_TE (((u32)1) << 31) 276 #define DMA_GCMD_SRTP (((u32)1) << 30) 277 #define DMA_GCMD_SFL (((u32)1) << 29) 278 #define DMA_GCMD_EAFL (((u32)1) << 28) 279 #define DMA_GCMD_WBF (((u32)1) << 27) 280 #define DMA_GCMD_QIE (((u32)1) << 26) 281 #define DMA_GCMD_SIRTP (((u32)1) << 24) 282 #define DMA_GCMD_IRE (((u32) 1) << 25) 283 #define DMA_GCMD_CFI (((u32) 1) << 23) 284 285 /* GSTS_REG */ 286 #define DMA_GSTS_TES (((u32)1) << 31) 287 #define DMA_GSTS_RTPS (((u32)1) << 30) 288 #define DMA_GSTS_FLS (((u32)1) << 29) 289 #define DMA_GSTS_AFLS (((u32)1) << 28) 290 #define DMA_GSTS_WBFS (((u32)1) << 27) 291 #define DMA_GSTS_QIES (((u32)1) << 26) 292 #define DMA_GSTS_IRTPS (((u32)1) << 24) 293 #define DMA_GSTS_IRES (((u32)1) << 25) 294 #define DMA_GSTS_CFIS (((u32)1) << 23) 295 296 /* DMA_RTADDR_REG */ 297 #define DMA_RTADDR_SMT (((u64)1) << 10) 298 299 /* CCMD_REG */ 300 #define DMA_CCMD_ICC (((u64)1) << 63) 301 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) 302 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) 303 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) 304 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) 305 #define DMA_CCMD_MASK_NOBIT 0 306 #define DMA_CCMD_MASK_1BIT 1 307 #define DMA_CCMD_MASK_2BIT 2 308 #define DMA_CCMD_MASK_3BIT 3 309 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) 310 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) 311 312 /* ECMD_REG */ 313 #define DMA_MAX_NUM_ECMD 256 314 #define DMA_MAX_NUM_ECMDCAP (DMA_MAX_NUM_ECMD / 64) 315 #define DMA_ECMD_REG_STEP 8 316 #define DMA_ECMD_ENABLE 0xf0 317 #define DMA_ECMD_DISABLE 0xf1 318 #define DMA_ECMD_FREEZE 0xf4 319 #define DMA_ECMD_UNFREEZE 0xf5 320 #define DMA_ECMD_OA_SHIFT 16 321 #define DMA_ECMD_ECRSP_IP 0x1 322 #define DMA_ECMD_ECCAP3 3 323 #define DMA_ECMD_ECCAP3_ECNTS BIT_ULL(48) 324 #define DMA_ECMD_ECCAP3_DCNTS BIT_ULL(49) 325 #define DMA_ECMD_ECCAP3_FCNTS BIT_ULL(52) 326 #define DMA_ECMD_ECCAP3_UFCNTS BIT_ULL(53) 327 #define DMA_ECMD_ECCAP3_ESSENTIAL (DMA_ECMD_ECCAP3_ECNTS | \ 328 DMA_ECMD_ECCAP3_DCNTS | \ 329 DMA_ECMD_ECCAP3_FCNTS | \ 330 DMA_ECMD_ECCAP3_UFCNTS) 331 332 /* FECTL_REG */ 333 #define DMA_FECTL_IM (((u32)1) << 31) 334 335 /* FSTS_REG */ 336 #define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ 337 #define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ 338 #define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ 339 #define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ 340 #define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ 341 #define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ 342 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) 343 344 /* FRCD_REG, 32 bits access */ 345 #define DMA_FRCD_F (((u32)1) << 31) 346 #define dma_frcd_type(d) ((d >> 30) & 1) 347 #define dma_frcd_fault_reason(c) (c & 0xff) 348 #define dma_frcd_source_id(c) (c & 0xffff) 349 #define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) 350 #define dma_frcd_pasid_present(c) (((c) >> 31) & 1) 351 /* low 64 bit */ 352 #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) 353 354 /* PRS_REG */ 355 #define DMA_PRS_PPR ((u32)1) 356 #define DMA_PRS_PRO ((u32)2) 357 358 #define DMA_VCS_PAS ((u64)1) 359 360 /* PERFINTRSTS_REG */ 361 #define DMA_PERFINTRSTS_PIS ((u32)1) 362 363 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 364 do { \ 365 cycles_t start_time = get_cycles(); \ 366 while (1) { \ 367 sts = op(iommu->reg + offset); \ 368 if (cond) \ 369 break; \ 370 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ 371 panic("DMAR hardware is malfunctioning\n"); \ 372 cpu_relax(); \ 373 } \ 374 } while (0) 375 376 #define QI_LENGTH 256 /* queue length */ 377 378 enum { 379 QI_FREE, 380 QI_IN_USE, 381 QI_DONE, 382 QI_ABORT 383 }; 384 385 #define QI_CC_TYPE 0x1 386 #define QI_IOTLB_TYPE 0x2 387 #define QI_DIOTLB_TYPE 0x3 388 #define QI_IEC_TYPE 0x4 389 #define QI_IWD_TYPE 0x5 390 #define QI_EIOTLB_TYPE 0x6 391 #define QI_PC_TYPE 0x7 392 #define QI_DEIOTLB_TYPE 0x8 393 #define QI_PGRP_RESP_TYPE 0x9 394 #define QI_PSTRM_RESP_TYPE 0xa 395 396 #define QI_IEC_SELECTIVE (((u64)1) << 4) 397 #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) 398 #define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27)) 399 400 #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) 401 #define QI_IWD_STATUS_WRITE (((u64)1) << 5) 402 #define QI_IWD_FENCE (((u64)1) << 6) 403 #define QI_IWD_PRQ_DRAIN (((u64)1) << 7) 404 405 #define QI_IOTLB_DID(did) (((u64)did) << 16) 406 #define QI_IOTLB_DR(dr) (((u64)dr) << 7) 407 #define QI_IOTLB_DW(dw) (((u64)dw) << 6) 408 #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) 409 #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) 410 #define QI_IOTLB_IH(ih) (((u64)ih) << 6) 411 #define QI_IOTLB_AM(am) (((u8)am) & 0x3f) 412 413 #define QI_CC_FM(fm) (((u64)fm) << 48) 414 #define QI_CC_SID(sid) (((u64)sid) << 32) 415 #define QI_CC_DID(did) (((u64)did) << 16) 416 #define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) 417 418 #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) 419 #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) 420 #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) 421 #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ 422 ((u64)((pfsid >> 4) & 0xfff) << 52)) 423 #define QI_DEV_IOTLB_SIZE 1 424 #define QI_DEV_IOTLB_MAX_INVS 32 425 426 #define QI_PC_PASID(pasid) (((u64)pasid) << 32) 427 #define QI_PC_DID(did) (((u64)did) << 16) 428 #define QI_PC_GRAN(gran) (((u64)gran) << 4) 429 430 /* PASID cache invalidation granu */ 431 #define QI_PC_ALL_PASIDS 0 432 #define QI_PC_PASID_SEL 1 433 #define QI_PC_GLOBAL 3 434 435 #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) 436 #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) 437 #define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) 438 #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) 439 #define QI_EIOTLB_DID(did) (((u64)did) << 16) 440 #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) 441 442 /* QI Dev-IOTLB inv granu */ 443 #define QI_DEV_IOTLB_GRAN_ALL 1 444 #define QI_DEV_IOTLB_GRAN_PASID_SEL 0 445 446 #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) 447 #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) 448 #define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) 449 #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) 450 #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) 451 #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ 452 ((u64)((pfsid >> 4) & 0xfff) << 52)) 453 #define QI_DEV_EIOTLB_MAX_INVS 32 454 455 /* Page group response descriptor QW0 */ 456 #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) 457 #define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12) 458 #define QI_PGRP_DID(rid) (((u64)(rid)) << 16) 459 #define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) 460 461 /* Page group response descriptor QW1 */ 462 #define QI_PGRP_IDX(idx) (((u64)(idx)) << 3) 463 464 465 #define QI_RESP_SUCCESS 0x0 466 #define QI_RESP_INVALID 0x1 467 #define QI_RESP_FAILURE 0xf 468 469 #define QI_GRAN_NONG_PASID 2 470 #define QI_GRAN_PSI_PASID 3 471 472 #define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) 473 474 struct qi_desc { 475 u64 qw0; 476 u64 qw1; 477 u64 qw2; 478 u64 qw3; 479 }; 480 481 struct q_inval { 482 raw_spinlock_t q_lock; 483 void *desc; /* invalidation queue */ 484 int *desc_status; /* desc status */ 485 int free_head; /* first free entry */ 486 int free_tail; /* last free entry */ 487 int free_cnt; 488 }; 489 490 /* Page Request Queue depth */ 491 #define PRQ_ORDER 4 492 #define PRQ_SIZE (SZ_4K << PRQ_ORDER) 493 #define PRQ_RING_MASK (PRQ_SIZE - 0x20) 494 #define PRQ_DEPTH (PRQ_SIZE >> 5) 495 496 struct dmar_pci_notify_info; 497 498 #ifdef CONFIG_IRQ_REMAP 499 #define INTR_REMAP_TABLE_REG_SIZE 0xf 500 #define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf 501 502 #define INTR_REMAP_TABLE_ENTRIES 65536 503 504 struct irq_domain; 505 506 struct ir_table { 507 struct irte *base; 508 unsigned long *bitmap; 509 }; 510 511 void intel_irq_remap_add_device(struct dmar_pci_notify_info *info); 512 #else 513 static inline void 514 intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { } 515 #endif 516 517 struct iommu_flush { 518 void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, 519 u8 fm, u64 type); 520 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, 521 unsigned int size_order, u64 type); 522 }; 523 524 enum { 525 SR_DMAR_FECTL_REG, 526 SR_DMAR_FEDATA_REG, 527 SR_DMAR_FEADDR_REG, 528 SR_DMAR_FEUADDR_REG, 529 MAX_SR_DMAR_REGS 530 }; 531 532 #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) 533 #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) 534 #define VTD_FLAG_SVM_CAPABLE (1 << 2) 535 536 #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) 537 #define pasid_supported(iommu) (sm_supported(iommu) && \ 538 ecap_pasid((iommu)->ecap)) 539 #define ssads_supported(iommu) (sm_supported(iommu) && \ 540 ecap_slads((iommu)->ecap) && \ 541 ecap_smpwc(iommu->ecap)) 542 #define nested_supported(iommu) (sm_supported(iommu) && \ 543 ecap_nest((iommu)->ecap)) 544 545 struct pasid_entry; 546 struct pasid_state_entry; 547 struct page_req_dsc; 548 549 /* 550 * 0: Present 551 * 1-11: Reserved 552 * 12-63: Context Ptr (12 - (haw-1)) 553 * 64-127: Reserved 554 */ 555 struct root_entry { 556 u64 lo; 557 u64 hi; 558 }; 559 560 /* 561 * low 64 bits: 562 * 0: present 563 * 1: fault processing disable 564 * 2-3: translation type 565 * 12-63: address space root 566 * high 64 bits: 567 * 0-2: address width 568 * 3-6: aval 569 * 8-23: domain id 570 */ 571 struct context_entry { 572 u64 lo; 573 u64 hi; 574 }; 575 576 struct iommu_domain_info { 577 struct intel_iommu *iommu; 578 unsigned int refcnt; /* Refcount of devices per iommu */ 579 u16 did; /* Domain ids per IOMMU. Use u16 since 580 * domain ids are 16 bit wide according 581 * to VT-d spec, section 9.3 */ 582 }; 583 584 /* 585 * We start simply by using a fixed size for the batched descriptors. This 586 * size is currently sufficient for our needs. Future improvements could 587 * involve dynamically allocating the batch buffer based on actual demand, 588 * allowing us to adjust the batch size for optimal performance in different 589 * scenarios. 590 */ 591 #define QI_MAX_BATCHED_DESC_COUNT 16 592 struct qi_batch { 593 struct qi_desc descs[QI_MAX_BATCHED_DESC_COUNT]; 594 unsigned int index; 595 }; 596 597 struct dmar_domain { 598 union { 599 struct iommu_domain domain; 600 struct pt_iommu iommu; 601 /* First stage page table */ 602 struct pt_iommu_x86_64 fspt; 603 /* Second stage page table */ 604 struct pt_iommu_vtdss sspt; 605 }; 606 607 struct xarray iommu_array; /* Attached IOMMU array */ 608 609 u8 force_snooping:1; /* Create PASID entry with snoop control */ 610 u8 dirty_tracking:1; /* Dirty tracking is enabled */ 611 u8 nested_parent:1; /* Has other domains nested on it */ 612 u8 iotlb_sync_map:1; /* Need to flush IOTLB cache or write 613 * buffer when creating mappings. 614 */ 615 616 spinlock_t lock; /* Protect device tracking lists */ 617 struct list_head devices; /* all devices' list */ 618 struct list_head dev_pasids; /* all attached pasids */ 619 620 spinlock_t cache_lock; /* Protect the cache tag list */ 621 struct list_head cache_tags; /* Cache tag list */ 622 struct qi_batch *qi_batch; /* Batched QI descriptors */ 623 624 union { 625 /* DMA remapping domain */ 626 struct { 627 /* Protect the s1_domains list */ 628 spinlock_t s1_lock; 629 /* Track s1_domains nested on this domain */ 630 struct list_head s1_domains; 631 }; 632 633 /* Nested user domain */ 634 struct { 635 /* parent page table which the user domain is nested on */ 636 struct dmar_domain *s2_domain; 637 /* page table attributes */ 638 struct iommu_hwpt_vtd_s1 s1_cfg; 639 /* link to parent domain siblings */ 640 struct list_head s2_link; 641 }; 642 643 /* SVA domain */ 644 struct { 645 struct mmu_notifier notifier; 646 }; 647 }; 648 }; 649 PT_IOMMU_CHECK_DOMAIN(struct dmar_domain, iommu, domain); 650 PT_IOMMU_CHECK_DOMAIN(struct dmar_domain, sspt.iommu, domain); 651 PT_IOMMU_CHECK_DOMAIN(struct dmar_domain, fspt.iommu, domain); 652 653 /* 654 * In theory, the VT-d 4.0 spec can support up to 2 ^ 16 counters. 655 * But in practice, there are only 14 counters for the existing 656 * platform. Setting the max number of counters to 64 should be good 657 * enough for a long time. Also, supporting more than 64 counters 658 * requires more extras, e.g., extra freeze and overflow registers, 659 * which is not necessary for now. 660 */ 661 #define IOMMU_PMU_IDX_MAX 64 662 663 struct iommu_pmu { 664 struct intel_iommu *iommu; 665 u32 num_cntr; /* Number of counters */ 666 u32 num_eg; /* Number of event group */ 667 u32 cntr_width; /* Counter width */ 668 u32 cntr_stride; /* Counter Stride */ 669 u32 filter; /* Bitmask of filter support */ 670 void __iomem *base; /* the PerfMon base address */ 671 void __iomem *cfg_reg; /* counter configuration base address */ 672 void __iomem *cntr_reg; /* counter 0 address*/ 673 void __iomem *overflow; /* overflow status register */ 674 675 u64 *evcap; /* Indicates all supported events */ 676 u32 **cntr_evcap; /* Supported events of each counter. */ 677 678 struct pmu pmu; 679 DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX); 680 struct perf_event *event_list[IOMMU_PMU_IDX_MAX]; 681 unsigned char irq_name[16]; 682 }; 683 684 #define IOMMU_IRQ_ID_OFFSET_PRQ (DMAR_UNITS_SUPPORTED) 685 #define IOMMU_IRQ_ID_OFFSET_PERF (2 * DMAR_UNITS_SUPPORTED) 686 687 struct intel_iommu { 688 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 689 u64 reg_phys; /* physical address of hw register set */ 690 u64 reg_size; /* size of hw register set */ 691 u64 cap; 692 u64 ecap; 693 u64 vccap; 694 u64 ecmdcap[DMA_MAX_NUM_ECMDCAP]; 695 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ 696 raw_spinlock_t register_lock; /* protect register handling */ 697 int seq_id; /* sequence id of the iommu */ 698 int agaw; /* agaw of this iommu */ 699 int msagaw; /* max sagaw of this iommu */ 700 unsigned int irq, pr_irq, perf_irq; 701 u16 segment; /* PCI segment# */ 702 unsigned char name[16]; /* Device Name */ 703 704 #ifdef CONFIG_INTEL_IOMMU 705 /* mutex to protect domain_ida */ 706 struct mutex did_lock; 707 struct ida domain_ida; /* domain id allocator */ 708 unsigned long *copied_tables; /* bitmap of copied tables */ 709 spinlock_t lock; /* protect context, domain ids */ 710 struct root_entry *root_entry; /* virtual address */ 711 712 struct iommu_flush flush; 713 #endif 714 struct page_req_dsc *prq; 715 unsigned char prq_name[16]; /* Name for PRQ interrupt */ 716 unsigned long prq_seq_number; 717 struct completion prq_complete; 718 struct iopf_queue *iopf_queue; 719 unsigned char iopfq_name[16]; 720 /* Synchronization between fault report and iommu device release. */ 721 struct mutex iopf_lock; 722 struct q_inval *qi; /* Queued invalidation info */ 723 u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/ 724 725 /* rb tree for all probed devices */ 726 struct rb_root device_rbtree; 727 /* protect the device_rbtree */ 728 spinlock_t device_rbtree_lock; 729 730 #ifdef CONFIG_IRQ_REMAP 731 struct ir_table *ir_table; /* Interrupt remapping info */ 732 struct irq_domain *ir_domain; 733 #endif 734 struct iommu_device iommu; /* IOMMU core code handle */ 735 int node; 736 u32 flags; /* Software defined flags */ 737 738 struct dmar_drhd_unit *drhd; 739 void *perf_statistic; 740 741 struct iommu_pmu *pmu; 742 }; 743 744 /* PCI domain-device relationship */ 745 struct device_domain_info { 746 struct list_head link; /* link to domain siblings */ 747 u32 segment; /* PCI segment number */ 748 u8 bus; /* PCI bus number */ 749 u8 devfn; /* PCI devfn number */ 750 u16 pfsid; /* SRIOV physical function source ID */ 751 u8 pasid_supported:3; 752 u8 pasid_enabled:1; 753 u8 pri_supported:1; 754 u8 pri_enabled:1; 755 u8 ats_supported:1; 756 u8 ats_enabled:1; 757 u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */ 758 u8 domain_attached:1; /* Device has domain attached */ 759 u8 ats_qdep; 760 unsigned int iopf_refcount; 761 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ 762 struct intel_iommu *iommu; /* IOMMU used by this device */ 763 struct dmar_domain *domain; /* pointer to domain */ 764 struct pasid_table *pasid_table; /* pasid table */ 765 /* device tracking node(lookup by PCI RID) */ 766 struct rb_node node; 767 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS 768 struct dentry *debugfs_dentry; /* pointer to device directory dentry */ 769 #endif 770 }; 771 772 struct dev_pasid_info { 773 struct list_head link_domain; /* link to domain siblings */ 774 struct device *dev; 775 ioasid_t pasid; 776 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS 777 struct dentry *debugfs_dentry; /* pointer to pasid directory dentry */ 778 #endif 779 }; 780 781 static inline void __iommu_flush_cache( 782 struct intel_iommu *iommu, void *addr, int size) 783 { 784 if (!ecap_coherent(iommu->ecap)) 785 clflush_cache_range(addr, size); 786 } 787 788 /* Convert generic struct iommu_domain to private struct dmar_domain */ 789 static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) 790 { 791 return container_of(dom, struct dmar_domain, domain); 792 } 793 794 /* 795 * Domain ID 0 and 1 are reserved: 796 * 797 * If Caching mode is set, then invalid translations are tagged 798 * with domain-id 0, hence we need to pre-allocate it. We also 799 * use domain-id 0 as a marker for non-allocated domain-id, so 800 * make sure it is not used for a real domain. 801 * 802 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid 803 * entry for first-level or pass-through translation modes should 804 * be programmed with a domain id different from those used for 805 * second-level or nested translation. We reserve a domain id for 806 * this purpose. This domain id is also used for identity domain 807 * in legacy mode. 808 */ 809 #define FLPT_DEFAULT_DID 1 810 #define IDA_START_DID 2 811 812 /* Retrieve the domain ID which has allocated to the domain */ 813 static inline u16 814 domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) 815 { 816 struct iommu_domain_info *info = 817 xa_load(&domain->iommu_array, iommu->seq_id); 818 819 return info->did; 820 } 821 822 static inline u16 823 iommu_domain_did(struct iommu_domain *domain, struct intel_iommu *iommu) 824 { 825 if (domain->type == IOMMU_DOMAIN_SVA || 826 domain->type == IOMMU_DOMAIN_IDENTITY) 827 return FLPT_DEFAULT_DID; 828 return domain_id_iommu(to_dmar_domain(domain), iommu); 829 } 830 831 static inline bool dev_is_real_dma_subdevice(struct device *dev) 832 { 833 return dev && dev_is_pci(dev) && 834 pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev); 835 } 836 837 /* 838 * 0: readable 839 * 1: writable 840 * 2-6: reserved 841 * 7: super page 842 * 8-10: available 843 * 11: snoop behavior 844 * 12-63: Host physical address 845 */ 846 struct dma_pte { 847 u64 val; 848 }; 849 850 static inline u64 dma_pte_addr(struct dma_pte *pte) 851 { 852 #ifdef CONFIG_64BIT 853 return pte->val & VTD_PAGE_MASK; 854 #else 855 /* Must have a full atomic 64-bit read */ 856 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; 857 #endif 858 } 859 860 static inline bool dma_pte_present(struct dma_pte *pte) 861 { 862 return (pte->val & 3) != 0; 863 } 864 865 static inline bool dma_pte_superpage(struct dma_pte *pte) 866 { 867 return (pte->val & DMA_PTE_LARGE_PAGE); 868 } 869 870 static inline bool context_present(struct context_entry *context) 871 { 872 return (context->lo & 1); 873 } 874 875 #define LEVEL_STRIDE (9) 876 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) 877 #define MAX_AGAW_WIDTH (64) 878 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT) 879 880 static inline int agaw_to_level(int agaw) 881 { 882 return agaw + 2; 883 } 884 885 static inline int width_to_agaw(int width) 886 { 887 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE); 888 } 889 890 static inline unsigned int level_to_offset_bits(int level) 891 { 892 return (level - 1) * LEVEL_STRIDE; 893 } 894 895 static inline int pfn_level_offset(u64 pfn, int level) 896 { 897 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; 898 } 899 900 901 static inline void context_set_present(struct context_entry *context) 902 { 903 u64 val; 904 905 dma_wmb(); 906 val = READ_ONCE(context->lo) | 1; 907 WRITE_ONCE(context->lo, val); 908 } 909 910 /* 911 * Clear the Present (P) bit (bit 0) of a context table entry. This initiates 912 * the transition of the entry's ownership from hardware to software. The 913 * caller is responsible for fulfilling the invalidation handshake recommended 914 * by the VT-d spec, Section 6.5.3.3 (Guidance to Software for Invalidations). 915 */ 916 static inline void context_clear_present(struct context_entry *context) 917 { 918 u64 val; 919 920 val = READ_ONCE(context->lo) & GENMASK_ULL(63, 1); 921 WRITE_ONCE(context->lo, val); 922 dma_wmb(); 923 } 924 925 static inline void context_set_fault_enable(struct context_entry *context) 926 { 927 context->lo &= (((u64)-1) << 2) | 1; 928 } 929 930 static inline void context_set_translation_type(struct context_entry *context, 931 unsigned long value) 932 { 933 context->lo &= (((u64)-1) << 4) | 3; 934 context->lo |= (value & 3) << 2; 935 } 936 937 static inline void context_set_address_root(struct context_entry *context, 938 unsigned long value) 939 { 940 context->lo &= ~VTD_PAGE_MASK; 941 context->lo |= value & VTD_PAGE_MASK; 942 } 943 944 static inline void context_set_address_width(struct context_entry *context, 945 unsigned long value) 946 { 947 context->hi |= value & 7; 948 } 949 950 static inline void context_set_domain_id(struct context_entry *context, 951 unsigned long value) 952 { 953 context->hi |= (value & ((1 << 16) - 1)) << 8; 954 } 955 956 static inline void context_set_pasid(struct context_entry *context) 957 { 958 context->lo |= CONTEXT_PASIDE; 959 } 960 961 static inline int context_domain_id(struct context_entry *c) 962 { 963 return((c->hi >> 8) & 0xffff); 964 } 965 966 static inline void context_clear_entry(struct context_entry *context) 967 { 968 context->lo = 0; 969 context->hi = 0; 970 } 971 972 #ifdef CONFIG_INTEL_IOMMU 973 static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) 974 { 975 if (!iommu->copied_tables) 976 return false; 977 978 return test_bit(((long)bus << 8) | devfn, iommu->copied_tables); 979 } 980 981 static inline void 982 set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) 983 { 984 set_bit(((long)bus << 8) | devfn, iommu->copied_tables); 985 } 986 987 static inline void 988 clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) 989 { 990 clear_bit(((long)bus << 8) | devfn, iommu->copied_tables); 991 } 992 #endif /* CONFIG_INTEL_IOMMU */ 993 994 /* 995 * Set the RID_PASID field of a scalable mode context entry. The 996 * IOMMU hardware will use the PASID value set in this field for 997 * DMA translations of DMA requests without PASID. 998 */ 999 static inline void 1000 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid) 1001 { 1002 context->hi |= pasid & ((1 << 20) - 1); 1003 } 1004 1005 /* 1006 * Set the DTE(Device-TLB Enable) field of a scalable mode context 1007 * entry. 1008 */ 1009 static inline void context_set_sm_dte(struct context_entry *context) 1010 { 1011 context->lo |= BIT_ULL(2); 1012 } 1013 1014 /* 1015 * Set the PRE(Page Request Enable) field of a scalable mode context 1016 * entry. 1017 */ 1018 static inline void context_set_sm_pre(struct context_entry *context) 1019 { 1020 context->lo |= BIT_ULL(4); 1021 } 1022 1023 /* 1024 * Clear the PRE(Page Request Enable) field of a scalable mode context 1025 * entry. 1026 */ 1027 static inline void context_clear_sm_pre(struct context_entry *context) 1028 { 1029 context->lo &= ~BIT_ULL(4); 1030 } 1031 1032 /* Returns a number of VTD pages, but aligned to MM page size */ 1033 static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size) 1034 { 1035 host_addr &= ~PAGE_MASK; 1036 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; 1037 } 1038 1039 /* Return a size from number of VTD pages. */ 1040 static inline unsigned long nrpages_to_size(unsigned long npages) 1041 { 1042 return npages << VTD_PAGE_SHIFT; 1043 } 1044 1045 static inline void qi_desc_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 1046 unsigned int size_order, u64 type, 1047 struct qi_desc *desc) 1048 { 1049 u8 dw = 0, dr = 0; 1050 int ih = addr & 1; 1051 1052 if (cap_write_drain(iommu->cap)) 1053 dw = 1; 1054 1055 if (cap_read_drain(iommu->cap)) 1056 dr = 1; 1057 1058 desc->qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) 1059 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; 1060 desc->qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) 1061 | QI_IOTLB_AM(size_order); 1062 desc->qw2 = 0; 1063 desc->qw3 = 0; 1064 } 1065 1066 static inline void qi_desc_dev_iotlb(u16 sid, u16 pfsid, u16 qdep, u64 addr, 1067 unsigned int mask, struct qi_desc *desc) 1068 { 1069 if (mask) { 1070 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; 1071 desc->qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; 1072 } else { 1073 desc->qw1 = QI_DEV_IOTLB_ADDR(addr); 1074 } 1075 1076 if (qdep >= QI_DEV_IOTLB_MAX_INVS) 1077 qdep = 0; 1078 1079 desc->qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | 1080 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); 1081 desc->qw2 = 0; 1082 desc->qw3 = 0; 1083 } 1084 1085 static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr, 1086 unsigned long npages, bool ih, 1087 struct qi_desc *desc) 1088 { 1089 if (npages == -1) { 1090 desc->qw0 = QI_EIOTLB_PASID(pasid) | 1091 QI_EIOTLB_DID(did) | 1092 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | 1093 QI_EIOTLB_TYPE; 1094 desc->qw1 = 0; 1095 } else { 1096 int mask = ilog2(__roundup_pow_of_two(npages)); 1097 unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask)); 1098 1099 if (WARN_ON_ONCE(!IS_ALIGNED(addr, align))) 1100 addr = ALIGN_DOWN(addr, align); 1101 1102 desc->qw0 = QI_EIOTLB_PASID(pasid) | 1103 QI_EIOTLB_DID(did) | 1104 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | 1105 QI_EIOTLB_TYPE; 1106 desc->qw1 = QI_EIOTLB_ADDR(addr) | 1107 QI_EIOTLB_IH(ih) | 1108 QI_EIOTLB_AM(mask); 1109 } 1110 } 1111 1112 static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid, 1113 u16 qdep, u64 addr, 1114 unsigned int size_order, 1115 struct qi_desc *desc) 1116 { 1117 unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1); 1118 1119 desc->qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | 1120 QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | 1121 QI_DEV_IOTLB_PFSID(pfsid); 1122 1123 /* 1124 * If S bit is 0, we only flush a single page. If S bit is set, 1125 * The least significant zero bit indicates the invalidation address 1126 * range. VT-d spec 6.5.2.6. 1127 * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB. 1128 * size order = 0 is PAGE_SIZE 4KB 1129 * Max Invs Pending (MIP) is set to 0 for now until we have DIT in 1130 * ECAP. 1131 */ 1132 if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order)) 1133 pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n", 1134 addr, size_order); 1135 1136 /* Take page address */ 1137 desc->qw1 = QI_DEV_EIOTLB_ADDR(addr); 1138 1139 if (size_order) { 1140 /* 1141 * Existing 0s in address below size_order may be the least 1142 * significant bit, we must set them to 1s to avoid having 1143 * smaller size than desired. 1144 */ 1145 desc->qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1, 1146 VTD_PAGE_SHIFT); 1147 /* Clear size_order bit to indicate size */ 1148 desc->qw1 &= ~mask; 1149 /* Set the S bit to indicate flushing more than 1 page */ 1150 desc->qw1 |= QI_DEV_EIOTLB_SIZE; 1151 } 1152 } 1153 1154 /* Convert value to context PASID directory size field coding. */ 1155 #define context_pdts(pds) (((pds) & 0x7) << 9) 1156 1157 struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev); 1158 1159 int dmar_enable_qi(struct intel_iommu *iommu); 1160 void dmar_disable_qi(struct intel_iommu *iommu); 1161 int dmar_reenable_qi(struct intel_iommu *iommu); 1162 void qi_global_iec(struct intel_iommu *iommu); 1163 1164 void qi_flush_context(struct intel_iommu *iommu, u16 did, 1165 u16 sid, u8 fm, u64 type); 1166 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 1167 unsigned int size_order, u64 type); 1168 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, 1169 u16 qdep, u64 addr, unsigned mask); 1170 1171 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, 1172 unsigned long npages, bool ih); 1173 1174 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, 1175 u32 pasid, u16 qdep, u64 addr, 1176 unsigned int size_order); 1177 void quirk_extra_dev_tlb_flush(struct device_domain_info *info, 1178 unsigned long address, unsigned long pages, 1179 u32 pasid, u16 qdep); 1180 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, 1181 u32 pasid); 1182 1183 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, 1184 unsigned int count, unsigned long options); 1185 1186 void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 1187 unsigned int size_order, u64 type); 1188 /* 1189 * Options used in qi_submit_sync: 1190 * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8. 1191 */ 1192 #define QI_OPT_WAIT_DRAIN BIT(0) 1193 1194 int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); 1195 void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); 1196 void device_block_translation(struct device *dev); 1197 int paging_domain_compatible(struct iommu_domain *domain, struct device *dev); 1198 1199 struct dev_pasid_info * 1200 domain_add_dev_pasid(struct iommu_domain *domain, 1201 struct device *dev, ioasid_t pasid); 1202 void domain_remove_dev_pasid(struct iommu_domain *domain, 1203 struct device *dev, ioasid_t pasid); 1204 1205 int __domain_setup_first_level(struct intel_iommu *iommu, struct device *dev, 1206 ioasid_t pasid, u16 did, phys_addr_t fsptptr, 1207 int flags, struct iommu_domain *old); 1208 1209 int dmar_ir_support(void); 1210 1211 void iommu_flush_write_buffer(struct intel_iommu *iommu); 1212 struct iommu_domain * 1213 intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, 1214 u32 flags, 1215 const struct iommu_user_data *user_data); 1216 struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid); 1217 1218 enum cache_tag_type { 1219 CACHE_TAG_IOTLB, 1220 CACHE_TAG_DEVTLB, 1221 CACHE_TAG_NESTING_IOTLB, 1222 CACHE_TAG_NESTING_DEVTLB, 1223 }; 1224 1225 struct cache_tag { 1226 struct list_head node; 1227 enum cache_tag_type type; 1228 struct intel_iommu *iommu; 1229 /* 1230 * The @dev field represents the location of the cache. For IOTLB, it 1231 * resides on the IOMMU hardware. @dev stores the device pointer to 1232 * the IOMMU hardware. For DevTLB, it locates in the PCIe endpoint. 1233 * @dev stores the device pointer to that endpoint. 1234 */ 1235 struct device *dev; 1236 u16 domain_id; 1237 ioasid_t pasid; 1238 unsigned int users; 1239 }; 1240 1241 int cache_tag_assign(struct dmar_domain *domain, u16 did, struct device *dev, 1242 ioasid_t pasid, enum cache_tag_type type); 1243 int cache_tag_assign_domain(struct dmar_domain *domain, 1244 struct device *dev, ioasid_t pasid); 1245 void cache_tag_unassign_domain(struct dmar_domain *domain, 1246 struct device *dev, ioasid_t pasid); 1247 void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, 1248 unsigned long end, int ih); 1249 void cache_tag_flush_all(struct dmar_domain *domain); 1250 void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start, 1251 unsigned long end); 1252 1253 void intel_context_flush_no_pasid(struct device_domain_info *info, 1254 struct context_entry *context, u16 did); 1255 1256 int intel_iommu_enable_prq(struct intel_iommu *iommu); 1257 int intel_iommu_finish_prq(struct intel_iommu *iommu); 1258 void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt, 1259 struct iommu_page_response *msg); 1260 void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid); 1261 1262 int intel_iommu_enable_iopf(struct device *dev); 1263 void intel_iommu_disable_iopf(struct device *dev); 1264 1265 static inline int iopf_for_domain_set(struct iommu_domain *domain, 1266 struct device *dev) 1267 { 1268 if (!domain || !domain->iopf_handler) 1269 return 0; 1270 1271 return intel_iommu_enable_iopf(dev); 1272 } 1273 1274 static inline void iopf_for_domain_remove(struct iommu_domain *domain, 1275 struct device *dev) 1276 { 1277 if (!domain || !domain->iopf_handler) 1278 return; 1279 1280 intel_iommu_disable_iopf(dev); 1281 } 1282 1283 static inline int iopf_for_domain_replace(struct iommu_domain *new, 1284 struct iommu_domain *old, 1285 struct device *dev) 1286 { 1287 int ret; 1288 1289 ret = iopf_for_domain_set(new, dev); 1290 if (ret) 1291 return ret; 1292 1293 iopf_for_domain_remove(old, dev); 1294 1295 return 0; 1296 } 1297 1298 #ifdef CONFIG_INTEL_IOMMU_SVM 1299 void intel_svm_check(struct intel_iommu *iommu); 1300 struct iommu_domain *intel_svm_domain_alloc(struct device *dev, 1301 struct mm_struct *mm); 1302 #else 1303 static inline void intel_svm_check(struct intel_iommu *iommu) {} 1304 static inline struct iommu_domain *intel_svm_domain_alloc(struct device *dev, 1305 struct mm_struct *mm) 1306 { 1307 return ERR_PTR(-ENODEV); 1308 } 1309 #endif 1310 1311 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS 1312 void intel_iommu_debugfs_init(void); 1313 void intel_iommu_debugfs_create_dev(struct device_domain_info *info); 1314 void intel_iommu_debugfs_remove_dev(struct device_domain_info *info); 1315 void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid); 1316 void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid); 1317 #else 1318 static inline void intel_iommu_debugfs_init(void) {} 1319 static inline void intel_iommu_debugfs_create_dev(struct device_domain_info *info) {} 1320 static inline void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) {} 1321 static inline void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) {} 1322 static inline void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) {} 1323 #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ 1324 1325 extern const struct attribute_group *intel_iommu_groups[]; 1326 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, 1327 u8 devfn, int alloc); 1328 1329 extern const struct iommu_ops intel_iommu_ops; 1330 extern const struct iommu_domain_ops intel_fs_paging_domain_ops; 1331 extern const struct iommu_domain_ops intel_ss_paging_domain_ops; 1332 1333 static inline bool intel_domain_is_fs_paging(struct dmar_domain *domain) 1334 { 1335 return domain->domain.ops == &intel_fs_paging_domain_ops; 1336 } 1337 1338 static inline bool intel_domain_is_ss_paging(struct dmar_domain *domain) 1339 { 1340 return domain->domain.ops == &intel_ss_paging_domain_ops; 1341 } 1342 1343 #ifdef CONFIG_INTEL_IOMMU 1344 extern int intel_iommu_sm; 1345 int iommu_calculate_agaw(struct intel_iommu *iommu); 1346 int iommu_calculate_max_sagaw(struct intel_iommu *iommu); 1347 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob); 1348 1349 static inline bool ecmd_has_pmu_essential(struct intel_iommu *iommu) 1350 { 1351 return (iommu->ecmdcap[DMA_ECMD_ECCAP3] & DMA_ECMD_ECCAP3_ESSENTIAL) == 1352 DMA_ECMD_ECCAP3_ESSENTIAL; 1353 } 1354 1355 extern int dmar_disabled; 1356 extern int intel_iommu_enabled; 1357 #else 1358 static inline int iommu_calculate_agaw(struct intel_iommu *iommu) 1359 { 1360 return 0; 1361 } 1362 static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) 1363 { 1364 return 0; 1365 } 1366 #define dmar_disabled (1) 1367 #define intel_iommu_enabled (0) 1368 #define intel_iommu_sm (0) 1369 #endif 1370 1371 static inline const char *decode_prq_descriptor(char *str, size_t size, 1372 u64 dw0, u64 dw1, u64 dw2, u64 dw3) 1373 { 1374 char *buf = str; 1375 int bytes; 1376 1377 bytes = snprintf(buf, size, 1378 "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx", 1379 FIELD_GET(GENMASK_ULL(31, 16), dw0), 1380 FIELD_GET(GENMASK_ULL(63, 12), dw1), 1381 dw1 & BIT_ULL(0) ? 'r' : '-', 1382 dw1 & BIT_ULL(1) ? 'w' : '-', 1383 dw0 & BIT_ULL(52) ? 'x' : '-', 1384 dw0 & BIT_ULL(53) ? 'p' : '-', 1385 dw1 & BIT_ULL(2) ? 'l' : '-', 1386 FIELD_GET(GENMASK_ULL(51, 32), dw0), 1387 FIELD_GET(GENMASK_ULL(11, 3), dw1)); 1388 1389 /* Private Data */ 1390 if (dw0 & BIT_ULL(9)) { 1391 size -= bytes; 1392 buf += bytes; 1393 snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3); 1394 } 1395 1396 return str; 1397 } 1398 1399 #endif 1400