1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Implementation of the IOMMU SVA API for the ARM SMMUv3 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/mmu_context.h> 8 #include <linux/mmu_notifier.h> 9 #include <linux/slab.h> 10 11 #include "arm-smmu-v3.h" 12 #include "../../iommu-sva-lib.h" 13 #include "../../io-pgtable-arm.h" 14 15 struct arm_smmu_mmu_notifier { 16 struct mmu_notifier mn; 17 struct arm_smmu_ctx_desc *cd; 18 bool cleared; 19 refcount_t refs; 20 struct list_head list; 21 struct arm_smmu_domain *domain; 22 }; 23 24 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn) 25 26 struct arm_smmu_bond { 27 struct iommu_sva sva; 28 struct mm_struct *mm; 29 struct arm_smmu_mmu_notifier *smmu_mn; 30 struct list_head list; 31 refcount_t refs; 32 }; 33 34 #define sva_to_bond(handle) \ 35 container_of(handle, struct arm_smmu_bond, sva) 36 37 static DEFINE_MUTEX(sva_lock); 38 39 /* 40 * Check if the CPU ASID is available on the SMMU side. If a private context 41 * descriptor is using it, try to replace it. 42 */ 43 static struct arm_smmu_ctx_desc * 44 arm_smmu_share_asid(struct mm_struct *mm, u16 asid) 45 { 46 int ret; 47 u32 new_asid; 48 struct arm_smmu_ctx_desc *cd; 49 struct arm_smmu_device *smmu; 50 struct arm_smmu_domain *smmu_domain; 51 52 cd = xa_load(&arm_smmu_asid_xa, asid); 53 if (!cd) 54 return NULL; 55 56 if (cd->mm) { 57 if (WARN_ON(cd->mm != mm)) 58 return ERR_PTR(-EINVAL); 59 /* All devices bound to this mm use the same cd struct. */ 60 refcount_inc(&cd->refs); 61 return cd; 62 } 63 64 smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd); 65 smmu = smmu_domain->smmu; 66 67 ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd, 68 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); 69 if (ret) 70 return ERR_PTR(-ENOSPC); 71 /* 72 * Race with unmap: TLB invalidations will start targeting the new ASID, 73 * which isn't assigned yet. We'll do an invalidate-all on the old ASID 74 * later, so it doesn't matter. 75 */ 76 cd->asid = new_asid; 77 /* 78 * Update ASID and invalidate CD in all associated masters. There will 79 * be some overlap between use of both ASIDs, until we invalidate the 80 * TLB. 81 */ 82 arm_smmu_write_ctx_desc(smmu_domain, 0, cd); 83 84 /* Invalidate TLB entries previously associated with that context */ 85 arm_smmu_tlb_inv_asid(smmu, asid); 86 87 xa_erase(&arm_smmu_asid_xa, asid); 88 return NULL; 89 } 90 91 static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) 92 { 93 u16 asid; 94 int err = 0; 95 u64 tcr, par, reg; 96 struct arm_smmu_ctx_desc *cd; 97 struct arm_smmu_ctx_desc *ret = NULL; 98 99 asid = arm64_mm_context_get(mm); 100 if (!asid) 101 return ERR_PTR(-ESRCH); 102 103 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 104 if (!cd) { 105 err = -ENOMEM; 106 goto out_put_context; 107 } 108 109 refcount_set(&cd->refs, 1); 110 111 mutex_lock(&arm_smmu_asid_lock); 112 ret = arm_smmu_share_asid(mm, asid); 113 if (ret) { 114 mutex_unlock(&arm_smmu_asid_lock); 115 goto out_free_cd; 116 } 117 118 err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL); 119 mutex_unlock(&arm_smmu_asid_lock); 120 121 if (err) 122 goto out_free_asid; 123 124 tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) | 125 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) | 126 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) | 127 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) | 128 CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64; 129 130 switch (PAGE_SIZE) { 131 case SZ_4K: 132 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K); 133 break; 134 case SZ_16K: 135 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K); 136 break; 137 case SZ_64K: 138 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K); 139 break; 140 default: 141 WARN_ON(1); 142 err = -EINVAL; 143 goto out_free_asid; 144 } 145 146 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 147 par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); 148 tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par); 149 150 cd->ttbr = virt_to_phys(mm->pgd); 151 cd->tcr = tcr; 152 /* 153 * MAIR value is pretty much constant and global, so we can just get it 154 * from the current CPU register 155 */ 156 cd->mair = read_sysreg(mair_el1); 157 cd->asid = asid; 158 cd->mm = mm; 159 160 return cd; 161 162 out_free_asid: 163 arm_smmu_free_asid(cd); 164 out_free_cd: 165 kfree(cd); 166 out_put_context: 167 arm64_mm_context_put(mm); 168 return err < 0 ? ERR_PTR(err) : ret; 169 } 170 171 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) 172 { 173 if (arm_smmu_free_asid(cd)) { 174 /* Unpin ASID */ 175 arm64_mm_context_put(cd->mm); 176 kfree(cd); 177 } 178 } 179 180 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 181 { 182 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); 183 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; 184 185 mutex_lock(&sva_lock); 186 if (smmu_mn->cleared) { 187 mutex_unlock(&sva_lock); 188 return; 189 } 190 191 /* 192 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, 193 * but disable translation. 194 */ 195 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd); 196 197 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); 198 199 smmu_mn->cleared = true; 200 mutex_unlock(&sva_lock); 201 } 202 203 static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn) 204 { 205 kfree(mn_to_smmu(mn)); 206 } 207 208 static struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { 209 .release = arm_smmu_mm_release, 210 .free_notifier = arm_smmu_mmu_notifier_free, 211 }; 212 213 /* Allocate or get existing MMU notifier for this {domain, mm} pair */ 214 static struct arm_smmu_mmu_notifier * 215 arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain, 216 struct mm_struct *mm) 217 { 218 int ret; 219 struct arm_smmu_ctx_desc *cd; 220 struct arm_smmu_mmu_notifier *smmu_mn; 221 222 list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { 223 if (smmu_mn->mn.mm == mm) { 224 refcount_inc(&smmu_mn->refs); 225 return smmu_mn; 226 } 227 } 228 229 cd = arm_smmu_alloc_shared_cd(mm); 230 if (IS_ERR(cd)) 231 return ERR_CAST(cd); 232 233 smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL); 234 if (!smmu_mn) { 235 ret = -ENOMEM; 236 goto err_free_cd; 237 } 238 239 refcount_set(&smmu_mn->refs, 1); 240 smmu_mn->cd = cd; 241 smmu_mn->domain = smmu_domain; 242 smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops; 243 244 ret = mmu_notifier_register(&smmu_mn->mn, mm); 245 if (ret) { 246 kfree(smmu_mn); 247 goto err_free_cd; 248 } 249 250 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd); 251 if (ret) 252 goto err_put_notifier; 253 254 list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers); 255 return smmu_mn; 256 257 err_put_notifier: 258 /* Frees smmu_mn */ 259 mmu_notifier_put(&smmu_mn->mn); 260 err_free_cd: 261 arm_smmu_free_shared_cd(cd); 262 return ERR_PTR(ret); 263 } 264 265 static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) 266 { 267 struct mm_struct *mm = smmu_mn->mn.mm; 268 struct arm_smmu_ctx_desc *cd = smmu_mn->cd; 269 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; 270 271 if (!refcount_dec_and_test(&smmu_mn->refs)) 272 return; 273 274 list_del(&smmu_mn->list); 275 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL); 276 277 /* 278 * If we went through clear(), we've already invalidated, and no 279 * new TLB entry can have been formed. 280 */ 281 if (!smmu_mn->cleared) 282 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); 283 284 /* Frees smmu_mn */ 285 mmu_notifier_put(&smmu_mn->mn); 286 arm_smmu_free_shared_cd(cd); 287 } 288 289 static struct iommu_sva * 290 __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) 291 { 292 int ret; 293 struct arm_smmu_bond *bond; 294 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 295 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 296 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 297 298 if (!master || !master->sva_enabled) 299 return ERR_PTR(-ENODEV); 300 301 /* If bind() was already called for this {dev, mm} pair, reuse it. */ 302 list_for_each_entry(bond, &master->bonds, list) { 303 if (bond->mm == mm) { 304 refcount_inc(&bond->refs); 305 return &bond->sva; 306 } 307 } 308 309 bond = kzalloc(sizeof(*bond), GFP_KERNEL); 310 if (!bond) 311 return ERR_PTR(-ENOMEM); 312 313 /* Allocate a PASID for this mm if necessary */ 314 ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1); 315 if (ret) 316 goto err_free_bond; 317 318 bond->mm = mm; 319 bond->sva.dev = dev; 320 refcount_set(&bond->refs, 1); 321 322 bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); 323 if (IS_ERR(bond->smmu_mn)) { 324 ret = PTR_ERR(bond->smmu_mn); 325 goto err_free_pasid; 326 } 327 328 list_add(&bond->list, &master->bonds); 329 return &bond->sva; 330 331 err_free_pasid: 332 iommu_sva_free_pasid(mm); 333 err_free_bond: 334 kfree(bond); 335 return ERR_PTR(ret); 336 } 337 338 struct iommu_sva * 339 arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata) 340 { 341 struct iommu_sva *handle; 342 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 343 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 344 345 if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) 346 return ERR_PTR(-EINVAL); 347 348 mutex_lock(&sva_lock); 349 handle = __arm_smmu_sva_bind(dev, mm); 350 mutex_unlock(&sva_lock); 351 return handle; 352 } 353 354 void arm_smmu_sva_unbind(struct iommu_sva *handle) 355 { 356 struct arm_smmu_bond *bond = sva_to_bond(handle); 357 358 mutex_lock(&sva_lock); 359 if (refcount_dec_and_test(&bond->refs)) { 360 list_del(&bond->list); 361 arm_smmu_mmu_notifier_put(bond->smmu_mn); 362 iommu_sva_free_pasid(bond->mm); 363 kfree(bond); 364 } 365 mutex_unlock(&sva_lock); 366 } 367 368 u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle) 369 { 370 struct arm_smmu_bond *bond = sva_to_bond(handle); 371 372 return bond->mm->pasid; 373 } 374 375 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) 376 { 377 unsigned long reg, fld; 378 unsigned long oas; 379 unsigned long asid_bits; 380 u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY; 381 382 if (vabits_actual == 52) 383 feat_mask |= ARM_SMMU_FEAT_VAX; 384 385 if ((smmu->features & feat_mask) != feat_mask) 386 return false; 387 388 if (!(smmu->pgsize_bitmap & PAGE_SIZE)) 389 return false; 390 391 /* 392 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're 393 * not even pretending to support AArch32 here. Abort if the MMU outputs 394 * addresses larger than what we support. 395 */ 396 reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 397 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); 398 oas = id_aa64mmfr0_parange_to_phys_shift(fld); 399 if (smmu->oas < oas) 400 return false; 401 402 /* We can support bigger ASIDs than the CPU, but not smaller */ 403 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT); 404 asid_bits = fld ? 16 : 8; 405 if (smmu->asid_bits < asid_bits) 406 return false; 407 408 /* 409 * See max_pinned_asids in arch/arm64/mm/context.c. The following is 410 * generally the maximum number of bindable processes. 411 */ 412 if (arm64_kernel_unmapped_at_el0()) 413 asid_bits--; 414 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) - 415 num_possible_cpus() - 2); 416 417 return true; 418 } 419 420 static bool arm_smmu_iopf_supported(struct arm_smmu_master *master) 421 { 422 return false; 423 } 424 425 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) 426 { 427 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) 428 return false; 429 430 /* SSID and IOPF support are mandatory for the moment */ 431 return master->ssid_bits && arm_smmu_iopf_supported(master); 432 } 433 434 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) 435 { 436 bool enabled; 437 438 mutex_lock(&sva_lock); 439 enabled = master->sva_enabled; 440 mutex_unlock(&sva_lock); 441 return enabled; 442 } 443 444 int arm_smmu_master_enable_sva(struct arm_smmu_master *master) 445 { 446 mutex_lock(&sva_lock); 447 master->sva_enabled = true; 448 mutex_unlock(&sva_lock); 449 450 return 0; 451 } 452 453 int arm_smmu_master_disable_sva(struct arm_smmu_master *master) 454 { 455 mutex_lock(&sva_lock); 456 if (!list_empty(&master->bonds)) { 457 dev_err(master->dev, "cannot disable SVA, device is bound\n"); 458 mutex_unlock(&sva_lock); 459 return -EBUSY; 460 } 461 master->sva_enabled = false; 462 mutex_unlock(&sva_lock); 463 464 return 0; 465 } 466 467 void arm_smmu_sva_notifier_synchronize(void) 468 { 469 /* 470 * Some MMU notifiers may still be waiting to be freed, using 471 * arm_smmu_mmu_notifier_free(). Wait for them. 472 */ 473 mmu_notifier_synchronize(); 474 } 475