1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the IOMMU SVA API for the ARM SMMUv3 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/mmu_context.h> 8 #include <linux/mmu_notifier.h> 9 #include <linux/sched/mm.h> 10 #include <linux/slab.h> 11 #include <kunit/visibility.h> 12 13 #include "arm-smmu-v3.h" 14 #include "../../io-pgtable-arm.h" 15 16 static DEFINE_MUTEX(sva_lock); 17 18 static void __maybe_unused 19 arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain) 20 { 21 struct arm_smmu_master_domain *master_domain; 22 struct arm_smmu_cd target_cd; 23 unsigned long flags; 24 25 spin_lock_irqsave(&smmu_domain->devices_lock, flags); 26 list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) { 27 struct arm_smmu_master *master = master_domain->master; 28 struct arm_smmu_cd *cdptr; 29 30 cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid); 31 if (WARN_ON(!cdptr)) 32 continue; 33 34 arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); 35 arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr, 36 &target_cd); 37 } 38 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); 39 } 40 41 static u64 page_size_to_cd(void) 42 { 43 static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K || 44 PAGE_SIZE == SZ_64K); 45 if (PAGE_SIZE == SZ_64K) 46 return ARM_LPAE_TCR_TG0_64K; 47 if (PAGE_SIZE == SZ_16K) 48 return ARM_LPAE_TCR_TG0_16K; 49 return ARM_LPAE_TCR_TG0_4K; 50 } 51 52 VISIBLE_IF_KUNIT 53 void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, 54 struct arm_smmu_master *master, struct mm_struct *mm, 55 u16 asid) 56 { 57 u64 par; 58 59 memset(target, 0, sizeof(*target)); 60 61 par = cpuid_feature_extract_unsigned_field( 62 read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1), 63 ID_AA64MMFR0_EL1_PARANGE_SHIFT); 64 65 target->data[0] = cpu_to_le64( 66 CTXDESC_CD_0_TCR_EPD1 | 67 #ifdef __BIG_ENDIAN 68 CTXDESC_CD_0_ENDI | 69 #endif 70 CTXDESC_CD_0_V | 71 FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par) | 72 CTXDESC_CD_0_AA64 | 73 (master->stall_enabled ? CTXDESC_CD_0_S : 0) | 74 CTXDESC_CD_0_R | 75 CTXDESC_CD_0_A | 76 CTXDESC_CD_0_ASET | 77 FIELD_PREP(CTXDESC_CD_0_ASID, asid)); 78 79 /* 80 * If no MM is passed then this creates a SVA entry that faults 81 * everything. arm_smmu_write_cd_entry() can hitlessly go between these 82 * two entries types since TTB0 is ignored by HW when EPD0 is set. 83 */ 84 if (mm) { 85 target->data[0] |= cpu_to_le64( 86 FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 87 64ULL - vabits_actual) | 88 FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) | 89 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, 90 ARM_LPAE_TCR_RGN_WBWA) | 91 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, 92 ARM_LPAE_TCR_RGN_WBWA) | 93 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS)); 94 95 target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) & 96 CTXDESC_CD_1_TTB0_MASK); 97 } else { 98 target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0); 99 100 /* 101 * Disable stall and immediately generate an abort if stall 102 * disable is permitted. This speeds up cleanup for an unclean 103 * exit if the device is still doing a lot of DMA. 104 */ 105 if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) 106 target->data[0] &= 107 cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R)); 108 } 109 110 /* 111 * MAIR value is pretty much constant and global, so we can just get it 112 * from the current CPU register 113 */ 114 target->data[3] = cpu_to_le64(read_sysreg(mair_el1)); 115 } 116 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd); 117 118 /* 119 * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this 120 * is used as a threshold to replace per-page TLBI commands to issue in the 121 * command queue with an address-space TLBI command, when SMMU w/o a range 122 * invalidation feature handles too many per-page TLBI commands, which will 123 * otherwise result in a soft lockup. 124 */ 125 #define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3)) 126 127 static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, 128 struct mm_struct *mm, 129 unsigned long start, 130 unsigned long end) 131 { 132 struct arm_smmu_domain *smmu_domain = 133 container_of(mn, struct arm_smmu_domain, mmu_notifier); 134 size_t size; 135 136 /* 137 * The mm_types defines vm_end as the first byte after the end address, 138 * different from IOMMU subsystem using the last address of an address 139 * range. So do a simple translation here by calculating size correctly. 140 */ 141 size = end - start; 142 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) { 143 if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE) 144 size = 0; 145 } else { 146 if (size == ULONG_MAX) 147 size = 0; 148 } 149 150 if (!size) 151 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); 152 else 153 arm_smmu_tlb_inv_range_asid(start, size, smmu_domain->cd.asid, 154 PAGE_SIZE, false, smmu_domain); 155 156 arm_smmu_atc_inv_domain(smmu_domain, start, size); 157 } 158 159 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 160 { 161 struct arm_smmu_domain *smmu_domain = 162 container_of(mn, struct arm_smmu_domain, mmu_notifier); 163 struct arm_smmu_master_domain *master_domain; 164 unsigned long flags; 165 166 /* 167 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, 168 * but disable translation. 169 */ 170 spin_lock_irqsave(&smmu_domain->devices_lock, flags); 171 list_for_each_entry(master_domain, &smmu_domain->devices, 172 devices_elm) { 173 struct arm_smmu_master *master = master_domain->master; 174 struct arm_smmu_cd target; 175 struct arm_smmu_cd *cdptr; 176 177 cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid); 178 if (WARN_ON(!cdptr)) 179 continue; 180 arm_smmu_make_sva_cd(&target, master, NULL, 181 smmu_domain->cd.asid); 182 arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr, 183 &target); 184 } 185 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); 186 187 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); 188 arm_smmu_atc_inv_domain(smmu_domain, 0, 0); 189 } 190 191 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn) 192 { 193 kfree(container_of(mn, struct arm_smmu_domain, mmu_notifier)); 194 } 195 196 static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { 197 .arch_invalidate_secondary_tlbs = arm_smmu_mm_arch_invalidate_secondary_tlbs, 198 .release = arm_smmu_mm_release, 199 .free_notifier = arm_smmu_mmu_notifier_free, 200 }; 201 202 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) 203 { 204 unsigned long reg, fld; 205 unsigned long oas; 206 unsigned long asid_bits; 207 u32 feat_mask = ARM_SMMU_FEAT_COHERENCY; 208 209 if (vabits_actual == 52) 210 feat_mask |= ARM_SMMU_FEAT_VAX; 211 212 if ((smmu->features & feat_mask) != feat_mask) 213 return false; 214 215 if (!(smmu->pgsize_bitmap & PAGE_SIZE)) 216 return false; 217 218 /* 219 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're 220 * not even pretending to support AArch32 here. Abort if the MMU outputs 221 * addresses larger than what we support. 222 */ 223 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 224 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT); 225 oas = id_aa64mmfr0_parange_to_phys_shift(fld); 226 if (smmu->oas < oas) 227 return false; 228 229 /* We can support bigger ASIDs than the CPU, but not smaller */ 230 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT); 231 asid_bits = fld ? 16 : 8; 232 if (smmu->asid_bits < asid_bits) 233 return false; 234 235 /* 236 * See max_pinned_asids in arch/arm64/mm/context.c. The following is 237 * generally the maximum number of bindable processes. 238 */ 239 if (arm64_kernel_unmapped_at_el0()) 240 asid_bits--; 241 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) - 242 num_possible_cpus() - 2); 243 244 return true; 245 } 246 247 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master) 248 { 249 /* We're not keeping track of SIDs in fault events */ 250 if (master->num_streams != 1) 251 return false; 252 253 return master->stall_enabled; 254 } 255 256 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) 257 { 258 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) 259 return false; 260 261 /* SSID support is mandatory for the moment */ 262 return master->ssid_bits; 263 } 264 265 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) 266 { 267 bool enabled; 268 269 mutex_lock(&sva_lock); 270 enabled = master->sva_enabled; 271 mutex_unlock(&sva_lock); 272 return enabled; 273 } 274 275 static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master) 276 { 277 struct device *dev = master->dev; 278 279 /* 280 * Drivers for devices supporting PRI or stall should enable IOPF first. 281 * Others have device-specific fault handlers and don't need IOPF. 282 */ 283 if (!arm_smmu_master_iopf_supported(master)) 284 return 0; 285 286 if (!master->iopf_enabled) 287 return -EINVAL; 288 289 return iopf_queue_add_device(master->smmu->evtq.iopf, dev); 290 } 291 292 static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master) 293 { 294 struct device *dev = master->dev; 295 296 if (!master->iopf_enabled) 297 return; 298 299 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); 300 } 301 302 int arm_smmu_master_enable_sva(struct arm_smmu_master *master) 303 { 304 int ret; 305 306 mutex_lock(&sva_lock); 307 ret = arm_smmu_master_sva_enable_iopf(master); 308 if (!ret) 309 master->sva_enabled = true; 310 mutex_unlock(&sva_lock); 311 312 return ret; 313 } 314 315 int arm_smmu_master_disable_sva(struct arm_smmu_master *master) 316 { 317 mutex_lock(&sva_lock); 318 arm_smmu_master_sva_disable_iopf(master); 319 master->sva_enabled = false; 320 mutex_unlock(&sva_lock); 321 322 return 0; 323 } 324 325 void arm_smmu_sva_notifier_synchronize(void) 326 { 327 /* 328 * Some MMU notifiers may still be waiting to be freed, using 329 * arm_smmu_mmu_notifier_free(). Wait for them. 330 */ 331 mmu_notifier_synchronize(); 332 } 333 334 static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, 335 struct device *dev, ioasid_t id) 336 { 337 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 338 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 339 struct arm_smmu_cd target; 340 int ret; 341 342 /* Prevent arm_smmu_mm_release from being called while we are attaching */ 343 if (!mmget_not_zero(domain->mm)) 344 return -EINVAL; 345 346 /* 347 * This does not need the arm_smmu_asid_lock because SVA domains never 348 * get reassigned 349 */ 350 arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid); 351 ret = arm_smmu_set_pasid(master, smmu_domain, id, &target); 352 353 mmput(domain->mm); 354 return ret; 355 } 356 357 static void arm_smmu_sva_domain_free(struct iommu_domain *domain) 358 { 359 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 360 361 /* 362 * Ensure the ASID is empty in the iommu cache before allowing reuse. 363 */ 364 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); 365 366 /* 367 * Notice that the arm_smmu_mm_arch_invalidate_secondary_tlbs op can 368 * still be called/running at this point. We allow the ASID to be 369 * reused, and if there is a race then it just suffers harmless 370 * unnecessary invalidation. 371 */ 372 xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); 373 374 /* 375 * Actual free is defered to the SRCU callback 376 * arm_smmu_mmu_notifier_free() 377 */ 378 mmu_notifier_put(&smmu_domain->mmu_notifier); 379 } 380 381 static const struct iommu_domain_ops arm_smmu_sva_domain_ops = { 382 .set_dev_pasid = arm_smmu_sva_set_dev_pasid, 383 .free = arm_smmu_sva_domain_free 384 }; 385 386 struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev, 387 struct mm_struct *mm) 388 { 389 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 390 struct arm_smmu_device *smmu = master->smmu; 391 struct arm_smmu_domain *smmu_domain; 392 u32 asid; 393 int ret; 394 395 smmu_domain = arm_smmu_domain_alloc(); 396 if (IS_ERR(smmu_domain)) 397 return ERR_CAST(smmu_domain); 398 smmu_domain->domain.type = IOMMU_DOMAIN_SVA; 399 smmu_domain->domain.ops = &arm_smmu_sva_domain_ops; 400 smmu_domain->smmu = smmu; 401 402 ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain, 403 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); 404 if (ret) 405 goto err_free; 406 407 smmu_domain->cd.asid = asid; 408 smmu_domain->mmu_notifier.ops = &arm_smmu_mmu_notifier_ops; 409 ret = mmu_notifier_register(&smmu_domain->mmu_notifier, mm); 410 if (ret) 411 goto err_asid; 412 413 return &smmu_domain->domain; 414 415 err_asid: 416 xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); 417 err_free: 418 kfree(smmu_domain); 419 return ERR_PTR(ret); 420 } 421