1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IOMMU API for ARM architected SMMUv3 implementations. 4 * 5 * Copyright (C) 2015 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 * 9 * This driver is powered by bad coffee and bombay mix. 10 */ 11 12 #include <linux/acpi.h> 13 #include <linux/acpi_iort.h> 14 #include <linux/bitops.h> 15 #include <linux/crash_dump.h> 16 #include <linux/delay.h> 17 #include <linux/err.h> 18 #include <linux/interrupt.h> 19 #include <linux/io-pgtable.h> 20 #include <linux/iopoll.h> 21 #include <linux/module.h> 22 #include <linux/msi.h> 23 #include <linux/of.h> 24 #include <linux/of_address.h> 25 #include <linux/of_platform.h> 26 #include <linux/pci.h> 27 #include <linux/pci-ats.h> 28 #include <linux/platform_device.h> 29 #include <linux/string_choices.h> 30 #include <kunit/visibility.h> 31 #include <uapi/linux/iommufd.h> 32 33 #include "arm-smmu-v3.h" 34 #include "../../dma-iommu.h" 35 36 static bool disable_msipolling; 37 module_param(disable_msipolling, bool, 0444); 38 MODULE_PARM_DESC(disable_msipolling, 39 "Disable MSI-based polling for CMD_SYNC completion."); 40 41 static struct iommu_ops arm_smmu_ops; 42 static struct iommu_dirty_ops arm_smmu_dirty_ops; 43 44 enum arm_smmu_msi_index { 45 EVTQ_MSI_INDEX, 46 GERROR_MSI_INDEX, 47 PRIQ_MSI_INDEX, 48 ARM_SMMU_MAX_MSIS, 49 }; 50 51 #define NUM_ENTRY_QWORDS 8 52 static_assert(sizeof(struct arm_smmu_ste) == NUM_ENTRY_QWORDS * sizeof(u64)); 53 static_assert(sizeof(struct arm_smmu_cd) == NUM_ENTRY_QWORDS * sizeof(u64)); 54 55 static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = { 56 [EVTQ_MSI_INDEX] = { 57 ARM_SMMU_EVTQ_IRQ_CFG0, 58 ARM_SMMU_EVTQ_IRQ_CFG1, 59 ARM_SMMU_EVTQ_IRQ_CFG2, 60 }, 61 [GERROR_MSI_INDEX] = { 62 ARM_SMMU_GERROR_IRQ_CFG0, 63 ARM_SMMU_GERROR_IRQ_CFG1, 64 ARM_SMMU_GERROR_IRQ_CFG2, 65 }, 66 [PRIQ_MSI_INDEX] = { 67 ARM_SMMU_PRIQ_IRQ_CFG0, 68 ARM_SMMU_PRIQ_IRQ_CFG1, 69 ARM_SMMU_PRIQ_IRQ_CFG2, 70 }, 71 }; 72 73 struct arm_smmu_option_prop { 74 u32 opt; 75 const char *prop; 76 }; 77 78 DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa); 79 DEFINE_MUTEX(arm_smmu_asid_lock); 80 81 static struct arm_smmu_option_prop arm_smmu_options[] = { 82 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, 83 { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, 84 { 0, NULL}, 85 }; 86 87 static const char * const event_str[] = { 88 [EVT_ID_BAD_STREAMID_CONFIG] = "C_BAD_STREAMID", 89 [EVT_ID_STE_FETCH_FAULT] = "F_STE_FETCH", 90 [EVT_ID_BAD_STE_CONFIG] = "C_BAD_STE", 91 [EVT_ID_STREAM_DISABLED_FAULT] = "F_STREAM_DISABLED", 92 [EVT_ID_BAD_SUBSTREAMID_CONFIG] = "C_BAD_SUBSTREAMID", 93 [EVT_ID_CD_FETCH_FAULT] = "F_CD_FETCH", 94 [EVT_ID_BAD_CD_CONFIG] = "C_BAD_CD", 95 [EVT_ID_TRANSLATION_FAULT] = "F_TRANSLATION", 96 [EVT_ID_ADDR_SIZE_FAULT] = "F_ADDR_SIZE", 97 [EVT_ID_ACCESS_FAULT] = "F_ACCESS", 98 [EVT_ID_PERMISSION_FAULT] = "F_PERMISSION", 99 [EVT_ID_VMS_FETCH_FAULT] = "F_VMS_FETCH", 100 }; 101 102 static const char * const event_class_str[] = { 103 [0] = "CD fetch", 104 [1] = "Stage 1 translation table fetch", 105 [2] = "Input address caused fault", 106 [3] = "Reserved", 107 }; 108 109 static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master); 110 111 static void parse_driver_options(struct arm_smmu_device *smmu) 112 { 113 int i = 0; 114 115 do { 116 if (of_property_read_bool(smmu->dev->of_node, 117 arm_smmu_options[i].prop)) { 118 smmu->options |= arm_smmu_options[i].opt; 119 dev_notice(smmu->dev, "option %s\n", 120 arm_smmu_options[i].prop); 121 } 122 } while (arm_smmu_options[++i].opt); 123 } 124 125 /* Low-level queue manipulation functions */ 126 static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n) 127 { 128 u32 space, prod, cons; 129 130 prod = Q_IDX(q, q->prod); 131 cons = Q_IDX(q, q->cons); 132 133 if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) 134 space = (1 << q->max_n_shift) - (prod - cons); 135 else 136 space = cons - prod; 137 138 return space >= n; 139 } 140 141 static bool queue_full(struct arm_smmu_ll_queue *q) 142 { 143 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && 144 Q_WRP(q, q->prod) != Q_WRP(q, q->cons); 145 } 146 147 static bool queue_empty(struct arm_smmu_ll_queue *q) 148 { 149 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && 150 Q_WRP(q, q->prod) == Q_WRP(q, q->cons); 151 } 152 153 static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod) 154 { 155 return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && 156 (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || 157 ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && 158 (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); 159 } 160 161 static void queue_sync_cons_out(struct arm_smmu_queue *q) 162 { 163 /* 164 * Ensure that all CPU accesses (reads and writes) to the queue 165 * are complete before we update the cons pointer. 166 */ 167 __iomb(); 168 writel_relaxed(q->llq.cons, q->cons_reg); 169 } 170 171 static void queue_inc_cons(struct arm_smmu_ll_queue *q) 172 { 173 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; 174 q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); 175 } 176 177 static void queue_sync_cons_ovf(struct arm_smmu_queue *q) 178 { 179 struct arm_smmu_ll_queue *llq = &q->llq; 180 181 if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons))) 182 return; 183 184 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | 185 Q_IDX(llq, llq->cons); 186 queue_sync_cons_out(q); 187 } 188 189 static int queue_sync_prod_in(struct arm_smmu_queue *q) 190 { 191 u32 prod; 192 int ret = 0; 193 194 /* 195 * We can't use the _relaxed() variant here, as we must prevent 196 * speculative reads of the queue before we have determined that 197 * prod has indeed moved. 198 */ 199 prod = readl(q->prod_reg); 200 201 if (Q_OVF(prod) != Q_OVF(q->llq.prod)) 202 ret = -EOVERFLOW; 203 204 q->llq.prod = prod; 205 return ret; 206 } 207 208 static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n) 209 { 210 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; 211 return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); 212 } 213 214 static void queue_poll_init(struct arm_smmu_device *smmu, 215 struct arm_smmu_queue_poll *qp) 216 { 217 qp->delay = 1; 218 qp->spin_cnt = 0; 219 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); 220 qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); 221 } 222 223 static int queue_poll(struct arm_smmu_queue_poll *qp) 224 { 225 if (ktime_compare(ktime_get(), qp->timeout) > 0) 226 return -ETIMEDOUT; 227 228 if (qp->wfe) { 229 wfe(); 230 } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) { 231 cpu_relax(); 232 } else { 233 udelay(qp->delay); 234 qp->delay *= 2; 235 qp->spin_cnt = 0; 236 } 237 238 return 0; 239 } 240 241 static void queue_write(__le64 *dst, u64 *src, size_t n_dwords) 242 { 243 int i; 244 245 for (i = 0; i < n_dwords; ++i) 246 *dst++ = cpu_to_le64(*src++); 247 } 248 249 static void queue_read(u64 *dst, __le64 *src, size_t n_dwords) 250 { 251 int i; 252 253 for (i = 0; i < n_dwords; ++i) 254 *dst++ = le64_to_cpu(*src++); 255 } 256 257 static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) 258 { 259 if (queue_empty(&q->llq)) 260 return -EAGAIN; 261 262 queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); 263 queue_inc_cons(&q->llq); 264 queue_sync_cons_out(q); 265 return 0; 266 } 267 268 /* High-level queue accessors */ 269 static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) 270 { 271 memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT); 272 cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode); 273 274 switch (ent->opcode) { 275 case CMDQ_OP_TLBI_EL2_ALL: 276 case CMDQ_OP_TLBI_NSNH_ALL: 277 break; 278 case CMDQ_OP_PREFETCH_CFG: 279 cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid); 280 break; 281 case CMDQ_OP_CFGI_CD: 282 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid); 283 fallthrough; 284 case CMDQ_OP_CFGI_STE: 285 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); 286 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); 287 break; 288 case CMDQ_OP_CFGI_CD_ALL: 289 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); 290 break; 291 case CMDQ_OP_CFGI_ALL: 292 /* Cover the entire SID range */ 293 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31); 294 break; 295 case CMDQ_OP_TLBI_NH_VA: 296 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); 297 fallthrough; 298 case CMDQ_OP_TLBI_EL2_VA: 299 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); 300 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); 301 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); 302 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); 303 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); 304 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); 305 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; 306 break; 307 case CMDQ_OP_TLBI_S2_IPA: 308 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); 309 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); 310 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); 311 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); 312 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); 313 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); 314 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; 315 break; 316 case CMDQ_OP_TLBI_NH_ASID: 317 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); 318 fallthrough; 319 case CMDQ_OP_TLBI_NH_ALL: 320 case CMDQ_OP_TLBI_S12_VMALL: 321 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); 322 break; 323 case CMDQ_OP_TLBI_EL2_ASID: 324 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); 325 break; 326 case CMDQ_OP_ATC_INV: 327 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); 328 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global); 329 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid); 330 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid); 331 cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size); 332 cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK; 333 break; 334 case CMDQ_OP_PRI_RESP: 335 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); 336 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid); 337 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid); 338 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid); 339 switch (ent->pri.resp) { 340 case PRI_RESP_DENY: 341 case PRI_RESP_FAIL: 342 case PRI_RESP_SUCC: 343 break; 344 default: 345 return -EINVAL; 346 } 347 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); 348 break; 349 case CMDQ_OP_RESUME: 350 cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_SID, ent->resume.sid); 351 cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_RESP, ent->resume.resp); 352 cmd[1] |= FIELD_PREP(CMDQ_RESUME_1_STAG, ent->resume.stag); 353 break; 354 case CMDQ_OP_CMD_SYNC: 355 if (ent->sync.msiaddr) { 356 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ); 357 cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; 358 } else { 359 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV); 360 } 361 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH); 362 cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB); 363 break; 364 default: 365 return -ENOENT; 366 } 367 368 return 0; 369 } 370 371 static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu, 372 struct arm_smmu_cmdq_ent *ent) 373 { 374 struct arm_smmu_cmdq *cmdq = NULL; 375 376 if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq) 377 cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent); 378 379 return cmdq ?: &smmu->cmdq; 380 } 381 382 static bool arm_smmu_cmdq_needs_busy_polling(struct arm_smmu_device *smmu, 383 struct arm_smmu_cmdq *cmdq) 384 { 385 if (cmdq == &smmu->cmdq) 386 return false; 387 388 return smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV; 389 } 390 391 static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, 392 struct arm_smmu_cmdq *cmdq, u32 prod) 393 { 394 struct arm_smmu_queue *q = &cmdq->q; 395 struct arm_smmu_cmdq_ent ent = { 396 .opcode = CMDQ_OP_CMD_SYNC, 397 }; 398 399 /* 400 * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI 401 * payload, so the write will zero the entire command on that platform. 402 */ 403 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) { 404 ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * 405 q->ent_dwords * 8; 406 } 407 408 arm_smmu_cmdq_build_cmd(cmd, &ent); 409 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) 410 u64p_replace_bits(cmd, CMDQ_SYNC_0_CS_NONE, CMDQ_SYNC_0_CS); 411 } 412 413 void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu, 414 struct arm_smmu_cmdq *cmdq) 415 { 416 static const char * const cerror_str[] = { 417 [CMDQ_ERR_CERROR_NONE_IDX] = "No error", 418 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command", 419 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch", 420 [CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout", 421 }; 422 struct arm_smmu_queue *q = &cmdq->q; 423 424 int i; 425 u64 cmd[CMDQ_ENT_DWORDS]; 426 u32 cons = readl_relaxed(q->cons_reg); 427 u32 idx = FIELD_GET(CMDQ_CONS_ERR, cons); 428 struct arm_smmu_cmdq_ent cmd_sync = { 429 .opcode = CMDQ_OP_CMD_SYNC, 430 }; 431 432 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, 433 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown"); 434 435 switch (idx) { 436 case CMDQ_ERR_CERROR_ABT_IDX: 437 dev_err(smmu->dev, "retrying command fetch\n"); 438 return; 439 case CMDQ_ERR_CERROR_NONE_IDX: 440 return; 441 case CMDQ_ERR_CERROR_ATC_INV_IDX: 442 /* 443 * ATC Invalidation Completion timeout. CONS is still pointing 444 * at the CMD_SYNC. Attempt to complete other pending commands 445 * by repeating the CMD_SYNC, though we might well end up back 446 * here since the ATC invalidation may still be pending. 447 */ 448 return; 449 case CMDQ_ERR_CERROR_ILL_IDX: 450 default: 451 break; 452 } 453 454 /* 455 * We may have concurrent producers, so we need to be careful 456 * not to touch any of the shadow cmdq state. 457 */ 458 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); 459 dev_err(smmu->dev, "skipping command in error state:\n"); 460 for (i = 0; i < ARRAY_SIZE(cmd); ++i) 461 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); 462 463 /* Convert the erroneous command into a CMD_SYNC */ 464 arm_smmu_cmdq_build_cmd(cmd, &cmd_sync); 465 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) 466 u64p_replace_bits(cmd, CMDQ_SYNC_0_CS_NONE, CMDQ_SYNC_0_CS); 467 468 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); 469 } 470 471 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) 472 { 473 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq); 474 } 475 476 /* 477 * Command queue locking. 478 * This is a form of bastardised rwlock with the following major changes: 479 * 480 * - The only LOCK routines are exclusive_trylock() and shared_lock(). 481 * Neither have barrier semantics, and instead provide only a control 482 * dependency. 483 * 484 * - The UNLOCK routines are supplemented with shared_tryunlock(), which 485 * fails if the caller appears to be the last lock holder (yes, this is 486 * racy). All successful UNLOCK routines have RELEASE semantics. 487 */ 488 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) 489 { 490 int val; 491 492 /* 493 * We can try to avoid the cmpxchg() loop by simply incrementing the 494 * lock counter. When held in exclusive state, the lock counter is set 495 * to INT_MIN so these increments won't hurt as the value will remain 496 * negative. 497 */ 498 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) 499 return; 500 501 do { 502 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); 503 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); 504 } 505 506 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) 507 { 508 (void)atomic_dec_return_release(&cmdq->lock); 509 } 510 511 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) 512 { 513 if (atomic_read(&cmdq->lock) == 1) 514 return false; 515 516 arm_smmu_cmdq_shared_unlock(cmdq); 517 return true; 518 } 519 520 #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ 521 ({ \ 522 bool __ret; \ 523 local_irq_save(flags); \ 524 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \ 525 if (!__ret) \ 526 local_irq_restore(flags); \ 527 __ret; \ 528 }) 529 530 #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ 531 ({ \ 532 atomic_set_release(&cmdq->lock, 0); \ 533 local_irq_restore(flags); \ 534 }) 535 536 537 /* 538 * Command queue insertion. 539 * This is made fiddly by our attempts to achieve some sort of scalability 540 * since there is one queue shared amongst all of the CPUs in the system. If 541 * you like mixed-size concurrency, dependency ordering and relaxed atomics, 542 * then you'll *love* this monstrosity. 543 * 544 * The basic idea is to split the queue up into ranges of commands that are 545 * owned by a given CPU; the owner may not have written all of the commands 546 * itself, but is responsible for advancing the hardware prod pointer when 547 * the time comes. The algorithm is roughly: 548 * 549 * 1. Allocate some space in the queue. At this point we also discover 550 * whether the head of the queue is currently owned by another CPU, 551 * or whether we are the owner. 552 * 553 * 2. Write our commands into our allocated slots in the queue. 554 * 555 * 3. Mark our slots as valid in arm_smmu_cmdq.valid_map. 556 * 557 * 4. If we are an owner: 558 * a. Wait for the previous owner to finish. 559 * b. Mark the queue head as unowned, which tells us the range 560 * that we are responsible for publishing. 561 * c. Wait for all commands in our owned range to become valid. 562 * d. Advance the hardware prod pointer. 563 * e. Tell the next owner we've finished. 564 * 565 * 5. If we are inserting a CMD_SYNC (we may or may not have been an 566 * owner), then we need to stick around until it has completed: 567 * a. If we have MSIs, the SMMU can write back into the CMD_SYNC 568 * to clear the first 4 bytes. 569 * b. Otherwise, we spin waiting for the hardware cons pointer to 570 * advance past our command. 571 * 572 * The devil is in the details, particularly the use of locking for handling 573 * SYNC completion and freeing up space in the queue before we think that it is 574 * full. 575 */ 576 static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, 577 u32 sprod, u32 eprod, bool set) 578 { 579 u32 swidx, sbidx, ewidx, ebidx; 580 struct arm_smmu_ll_queue llq = { 581 .max_n_shift = cmdq->q.llq.max_n_shift, 582 .prod = sprod, 583 }; 584 585 ewidx = BIT_WORD(Q_IDX(&llq, eprod)); 586 ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG; 587 588 while (llq.prod != eprod) { 589 unsigned long mask; 590 atomic_long_t *ptr; 591 u32 limit = BITS_PER_LONG; 592 593 swidx = BIT_WORD(Q_IDX(&llq, llq.prod)); 594 sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG; 595 596 ptr = &cmdq->valid_map[swidx]; 597 598 if ((swidx == ewidx) && (sbidx < ebidx)) 599 limit = ebidx; 600 601 mask = GENMASK(limit - 1, sbidx); 602 603 /* 604 * The valid bit is the inverse of the wrap bit. This means 605 * that a zero-initialised queue is invalid and, after marking 606 * all entries as valid, they become invalid again when we 607 * wrap. 608 */ 609 if (set) { 610 atomic_long_xor(mask, ptr); 611 } else { /* Poll */ 612 unsigned long valid; 613 614 valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask; 615 atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid); 616 } 617 618 llq.prod = queue_inc_prod_n(&llq, limit - sbidx); 619 } 620 } 621 622 /* Mark all entries in the range [sprod, eprod) as valid */ 623 static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, 624 u32 sprod, u32 eprod) 625 { 626 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); 627 } 628 629 /* Wait for all entries in the range [sprod, eprod) to become valid */ 630 static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, 631 u32 sprod, u32 eprod) 632 { 633 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); 634 } 635 636 /* Wait for the command queue to become non-full */ 637 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, 638 struct arm_smmu_cmdq *cmdq, 639 struct arm_smmu_ll_queue *llq) 640 { 641 unsigned long flags; 642 struct arm_smmu_queue_poll qp; 643 int ret = 0; 644 645 /* 646 * Try to update our copy of cons by grabbing exclusive cmdq access. If 647 * that fails, spin until somebody else updates it for us. 648 */ 649 if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { 650 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); 651 arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); 652 llq->val = READ_ONCE(cmdq->q.llq.val); 653 return 0; 654 } 655 656 queue_poll_init(smmu, &qp); 657 do { 658 llq->val = READ_ONCE(cmdq->q.llq.val); 659 if (!queue_full(llq)) 660 break; 661 662 ret = queue_poll(&qp); 663 } while (!ret); 664 665 return ret; 666 } 667 668 /* 669 * Wait until the SMMU signals a CMD_SYNC completion MSI. 670 * Must be called with the cmdq lock held in some capacity. 671 */ 672 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, 673 struct arm_smmu_cmdq *cmdq, 674 struct arm_smmu_ll_queue *llq) 675 { 676 int ret = 0; 677 struct arm_smmu_queue_poll qp; 678 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); 679 680 queue_poll_init(smmu, &qp); 681 682 /* 683 * The MSI won't generate an event, since it's being written back 684 * into the command queue. 685 */ 686 qp.wfe = false; 687 smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp))); 688 llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); 689 return ret; 690 } 691 692 /* 693 * Wait until the SMMU cons index passes llq->prod. 694 * Must be called with the cmdq lock held in some capacity. 695 */ 696 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, 697 struct arm_smmu_cmdq *cmdq, 698 struct arm_smmu_ll_queue *llq) 699 { 700 struct arm_smmu_queue_poll qp; 701 u32 prod = llq->prod; 702 int ret = 0; 703 704 queue_poll_init(smmu, &qp); 705 llq->val = READ_ONCE(cmdq->q.llq.val); 706 do { 707 if (queue_consumed(llq, prod)) 708 break; 709 710 ret = queue_poll(&qp); 711 712 /* 713 * This needs to be a readl() so that our subsequent call 714 * to arm_smmu_cmdq_shared_tryunlock() can fail accurately. 715 * 716 * Specifically, we need to ensure that we observe all 717 * shared_lock()s by other CMD_SYNCs that share our owner, 718 * so that a failing call to tryunlock() means that we're 719 * the last one out and therefore we can safely advance 720 * cmdq->q.llq.cons. Roughly speaking: 721 * 722 * CPU 0 CPU1 CPU2 (us) 723 * 724 * if (sync) 725 * shared_lock(); 726 * 727 * dma_wmb(); 728 * set_valid_map(); 729 * 730 * if (owner) { 731 * poll_valid_map(); 732 * <control dependency> 733 * writel(prod_reg); 734 * 735 * readl(cons_reg); 736 * tryunlock(); 737 * 738 * Requires us to see CPU 0's shared_lock() acquisition. 739 */ 740 llq->cons = readl(cmdq->q.cons_reg); 741 } while (!ret); 742 743 return ret; 744 } 745 746 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, 747 struct arm_smmu_cmdq *cmdq, 748 struct arm_smmu_ll_queue *llq) 749 { 750 if (smmu->options & ARM_SMMU_OPT_MSIPOLL && 751 !arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) 752 return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq); 753 754 return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq); 755 } 756 757 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, 758 u32 prod, int n) 759 { 760 int i; 761 struct arm_smmu_ll_queue llq = { 762 .max_n_shift = cmdq->q.llq.max_n_shift, 763 .prod = prod, 764 }; 765 766 for (i = 0; i < n; ++i) { 767 u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS]; 768 769 prod = queue_inc_prod_n(&llq, i); 770 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); 771 } 772 } 773 774 /* 775 * This is the actual insertion function, and provides the following 776 * ordering guarantees to callers: 777 * 778 * - There is a dma_wmb() before publishing any commands to the queue. 779 * This can be relied upon to order prior writes to data structures 780 * in memory (such as a CD or an STE) before the command. 781 * 782 * - On completion of a CMD_SYNC, there is a control dependency. 783 * This can be relied upon to order subsequent writes to memory (e.g. 784 * freeing an IOVA) after completion of the CMD_SYNC. 785 * 786 * - Command insertion is totally ordered, so if two CPUs each race to 787 * insert their own list of commands then all of the commands from one 788 * CPU will appear before any of the commands from the other CPU. 789 */ 790 int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, 791 struct arm_smmu_cmdq *cmdq, u64 *cmds, int n, 792 bool sync) 793 { 794 u64 cmd_sync[CMDQ_ENT_DWORDS]; 795 u32 prod; 796 unsigned long flags; 797 bool owner; 798 struct arm_smmu_ll_queue llq, head; 799 int ret = 0; 800 801 llq.max_n_shift = cmdq->q.llq.max_n_shift; 802 803 /* 1. Allocate some space in the queue */ 804 local_irq_save(flags); 805 llq.val = READ_ONCE(cmdq->q.llq.val); 806 do { 807 u64 old; 808 809 while (!queue_has_space(&llq, n + sync)) { 810 local_irq_restore(flags); 811 if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq)) 812 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); 813 local_irq_save(flags); 814 } 815 816 head.cons = llq.cons; 817 head.prod = queue_inc_prod_n(&llq, n + sync) | 818 CMDQ_PROD_OWNED_FLAG; 819 820 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); 821 if (old == llq.val) 822 break; 823 824 llq.val = old; 825 } while (1); 826 owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG); 827 head.prod &= ~CMDQ_PROD_OWNED_FLAG; 828 llq.prod &= ~CMDQ_PROD_OWNED_FLAG; 829 830 /* 831 * 2. Write our commands into the queue 832 * Dependency ordering from the cmpxchg() loop above. 833 */ 834 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); 835 if (sync) { 836 prod = queue_inc_prod_n(&llq, n); 837 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, cmdq, prod); 838 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); 839 840 /* 841 * In order to determine completion of our CMD_SYNC, we must 842 * ensure that the queue can't wrap twice without us noticing. 843 * We achieve that by taking the cmdq lock as shared before 844 * marking our slot as valid. 845 */ 846 arm_smmu_cmdq_shared_lock(cmdq); 847 } 848 849 /* 3. Mark our slots as valid, ensuring commands are visible first */ 850 dma_wmb(); 851 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); 852 853 /* 4. If we are the owner, take control of the SMMU hardware */ 854 if (owner) { 855 /* a. Wait for previous owner to finish */ 856 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); 857 858 /* b. Stop gathering work by clearing the owned flag */ 859 prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG, 860 &cmdq->q.llq.atomic.prod); 861 prod &= ~CMDQ_PROD_OWNED_FLAG; 862 863 /* 864 * c. Wait for any gathered work to be written to the queue. 865 * Note that we read our own entries so that we have the control 866 * dependency required by (d). 867 */ 868 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); 869 870 /* 871 * d. Advance the hardware prod pointer 872 * Control dependency ordering from the entries becoming valid. 873 */ 874 writel_relaxed(prod, cmdq->q.prod_reg); 875 876 /* 877 * e. Tell the next owner we're done 878 * Make sure we've updated the hardware first, so that we don't 879 * race to update prod and potentially move it backwards. 880 */ 881 atomic_set_release(&cmdq->owner_prod, prod); 882 } 883 884 /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */ 885 if (sync) { 886 llq.prod = queue_inc_prod_n(&llq, n); 887 ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq); 888 if (ret) { 889 dev_err_ratelimited(smmu->dev, 890 "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n", 891 llq.prod, 892 readl_relaxed(cmdq->q.prod_reg), 893 readl_relaxed(cmdq->q.cons_reg)); 894 } 895 896 /* 897 * Try to unlock the cmdq lock. This will fail if we're the last 898 * reader, in which case we can safely update cmdq->q.llq.cons 899 */ 900 if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { 901 WRITE_ONCE(cmdq->q.llq.cons, llq.cons); 902 arm_smmu_cmdq_shared_unlock(cmdq); 903 } 904 } 905 906 local_irq_restore(flags); 907 return ret; 908 } 909 910 static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, 911 struct arm_smmu_cmdq_ent *ent, 912 bool sync) 913 { 914 u64 cmd[CMDQ_ENT_DWORDS]; 915 916 if (unlikely(arm_smmu_cmdq_build_cmd(cmd, ent))) { 917 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", 918 ent->opcode); 919 return -EINVAL; 920 } 921 922 return arm_smmu_cmdq_issue_cmdlist( 923 smmu, arm_smmu_get_cmdq(smmu, ent), cmd, 1, sync); 924 } 925 926 static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, 927 struct arm_smmu_cmdq_ent *ent) 928 { 929 return __arm_smmu_cmdq_issue_cmd(smmu, ent, false); 930 } 931 932 static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu, 933 struct arm_smmu_cmdq_ent *ent) 934 { 935 return __arm_smmu_cmdq_issue_cmd(smmu, ent, true); 936 } 937 938 static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu, 939 struct arm_smmu_cmdq_batch *cmds, 940 struct arm_smmu_cmdq_ent *ent) 941 { 942 cmds->num = 0; 943 cmds->cmdq = arm_smmu_get_cmdq(smmu, ent); 944 } 945 946 static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, 947 struct arm_smmu_cmdq_batch *cmds, 948 struct arm_smmu_cmdq_ent *cmd) 949 { 950 bool unsupported_cmd = !arm_smmu_cmdq_supports_cmd(cmds->cmdq, cmd); 951 bool force_sync = (cmds->num == CMDQ_BATCH_ENTRIES - 1) && 952 (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC); 953 int index; 954 955 if (force_sync || unsupported_cmd) { 956 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, 957 cmds->num, true); 958 arm_smmu_cmdq_batch_init(smmu, cmds, cmd); 959 } 960 961 if (cmds->num == CMDQ_BATCH_ENTRIES) { 962 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, 963 cmds->num, false); 964 arm_smmu_cmdq_batch_init(smmu, cmds, cmd); 965 } 966 967 index = cmds->num * CMDQ_ENT_DWORDS; 968 if (unlikely(arm_smmu_cmdq_build_cmd(&cmds->cmds[index], cmd))) { 969 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", 970 cmd->opcode); 971 return; 972 } 973 974 cmds->num++; 975 } 976 977 static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu, 978 struct arm_smmu_cmdq_batch *cmds) 979 { 980 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, 981 cmds->num, true); 982 } 983 984 static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused, 985 struct iommu_page_response *resp) 986 { 987 struct arm_smmu_cmdq_ent cmd = {0}; 988 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 989 int sid = master->streams[0].id; 990 991 if (WARN_ON(!master->stall_enabled)) 992 return; 993 994 cmd.opcode = CMDQ_OP_RESUME; 995 cmd.resume.sid = sid; 996 cmd.resume.stag = resp->grpid; 997 switch (resp->code) { 998 case IOMMU_PAGE_RESP_INVALID: 999 case IOMMU_PAGE_RESP_FAILURE: 1000 cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT; 1001 break; 1002 case IOMMU_PAGE_RESP_SUCCESS: 1003 cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY; 1004 break; 1005 default: 1006 break; 1007 } 1008 1009 arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); 1010 /* 1011 * Don't send a SYNC, it doesn't do anything for RESUME or PRI_RESP. 1012 * RESUME consumption guarantees that the stalled transaction will be 1013 * terminated... at some point in the future. PRI_RESP is fire and 1014 * forget. 1015 */ 1016 } 1017 1018 /* Context descriptor manipulation functions */ 1019 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid) 1020 { 1021 struct arm_smmu_cmdq_ent cmd = { 1022 .opcode = smmu->features & ARM_SMMU_FEAT_E2H ? 1023 CMDQ_OP_TLBI_EL2_ASID : CMDQ_OP_TLBI_NH_ASID, 1024 .tlbi.asid = asid, 1025 }; 1026 1027 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); 1028 } 1029 1030 /* 1031 * Based on the value of ent report which bits of the STE the HW will access. It 1032 * would be nice if this was complete according to the spec, but minimally it 1033 * has to capture the bits this driver uses. 1034 */ 1035 VISIBLE_IF_KUNIT 1036 void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits) 1037 { 1038 unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent[0])); 1039 1040 used_bits[0] = cpu_to_le64(STRTAB_STE_0_V); 1041 if (!(ent[0] & cpu_to_le64(STRTAB_STE_0_V))) 1042 return; 1043 1044 used_bits[0] |= cpu_to_le64(STRTAB_STE_0_CFG); 1045 1046 /* S1 translates */ 1047 if (cfg & BIT(0)) { 1048 used_bits[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT | 1049 STRTAB_STE_0_S1CTXPTR_MASK | 1050 STRTAB_STE_0_S1CDMAX); 1051 used_bits[1] |= 1052 cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR | 1053 STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH | 1054 STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW | 1055 STRTAB_STE_1_EATS); 1056 used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID); 1057 1058 /* 1059 * See 13.5 Summary of attribute/permission configuration fields 1060 * for the SHCFG behavior. 1061 */ 1062 if (FIELD_GET(STRTAB_STE_1_S1DSS, le64_to_cpu(ent[1])) == 1063 STRTAB_STE_1_S1DSS_BYPASS) 1064 used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG); 1065 } 1066 1067 /* S2 translates */ 1068 if (cfg & BIT(1)) { 1069 used_bits[1] |= 1070 cpu_to_le64(STRTAB_STE_1_S2FWB | STRTAB_STE_1_EATS | 1071 STRTAB_STE_1_SHCFG); 1072 used_bits[2] |= 1073 cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR | 1074 STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI | 1075 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2S | 1076 STRTAB_STE_2_S2R); 1077 used_bits[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK); 1078 } 1079 1080 if (cfg == STRTAB_STE_0_CFG_BYPASS) 1081 used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG); 1082 } 1083 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used); 1084 1085 /* 1086 * Figure out if we can do a hitless update of entry to become target. Returns a 1087 * bit mask where 1 indicates that qword needs to be set disruptively. 1088 * unused_update is an intermediate value of entry that has unused bits set to 1089 * their new values. 1090 */ 1091 static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer, 1092 const __le64 *entry, const __le64 *target, 1093 __le64 *unused_update) 1094 { 1095 __le64 target_used[NUM_ENTRY_QWORDS] = {}; 1096 __le64 cur_used[NUM_ENTRY_QWORDS] = {}; 1097 u8 used_qword_diff = 0; 1098 unsigned int i; 1099 1100 writer->ops->get_used(entry, cur_used); 1101 writer->ops->get_used(target, target_used); 1102 1103 for (i = 0; i != NUM_ENTRY_QWORDS; i++) { 1104 /* 1105 * Check that masks are up to date, the make functions are not 1106 * allowed to set a bit to 1 if the used function doesn't say it 1107 * is used. 1108 */ 1109 WARN_ON_ONCE(target[i] & ~target_used[i]); 1110 1111 /* Bits can change because they are not currently being used */ 1112 unused_update[i] = (entry[i] & cur_used[i]) | 1113 (target[i] & ~cur_used[i]); 1114 /* 1115 * Each bit indicates that a used bit in a qword needs to be 1116 * changed after unused_update is applied. 1117 */ 1118 if ((unused_update[i] & target_used[i]) != target[i]) 1119 used_qword_diff |= 1 << i; 1120 } 1121 return used_qword_diff; 1122 } 1123 1124 static bool entry_set(struct arm_smmu_entry_writer *writer, __le64 *entry, 1125 const __le64 *target, unsigned int start, 1126 unsigned int len) 1127 { 1128 bool changed = false; 1129 unsigned int i; 1130 1131 for (i = start; len != 0; len--, i++) { 1132 if (entry[i] != target[i]) { 1133 WRITE_ONCE(entry[i], target[i]); 1134 changed = true; 1135 } 1136 } 1137 1138 if (changed) 1139 writer->ops->sync(writer); 1140 return changed; 1141 } 1142 1143 /* 1144 * Update the STE/CD to the target configuration. The transition from the 1145 * current entry to the target entry takes place over multiple steps that 1146 * attempts to make the transition hitless if possible. This function takes care 1147 * not to create a situation where the HW can perceive a corrupted entry. HW is 1148 * only required to have a 64 bit atomicity with stores from the CPU, while 1149 * entries are many 64 bit values big. 1150 * 1151 * The difference between the current value and the target value is analyzed to 1152 * determine which of three updates are required - disruptive, hitless or no 1153 * change. 1154 * 1155 * In the most general disruptive case we can make any update in three steps: 1156 * - Disrupting the entry (V=0) 1157 * - Fill now unused qwords, execpt qword 0 which contains V 1158 * - Make qword 0 have the final value and valid (V=1) with a single 64 1159 * bit store 1160 * 1161 * However this disrupts the HW while it is happening. There are several 1162 * interesting cases where a STE/CD can be updated without disturbing the HW 1163 * because only a small number of bits are changing (S1DSS, CONFIG, etc) or 1164 * because the used bits don't intersect. We can detect this by calculating how 1165 * many 64 bit values need update after adjusting the unused bits and skip the 1166 * V=0 process. This relies on the IGNORED behavior described in the 1167 * specification. 1168 */ 1169 VISIBLE_IF_KUNIT 1170 void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry, 1171 const __le64 *target) 1172 { 1173 __le64 unused_update[NUM_ENTRY_QWORDS]; 1174 u8 used_qword_diff; 1175 1176 used_qword_diff = 1177 arm_smmu_entry_qword_diff(writer, entry, target, unused_update); 1178 if (hweight8(used_qword_diff) == 1) { 1179 /* 1180 * Only one qword needs its used bits to be changed. This is a 1181 * hitless update, update all bits the current STE/CD is 1182 * ignoring to their new values, then update a single "critical 1183 * qword" to change the STE/CD and finally 0 out any bits that 1184 * are now unused in the target configuration. 1185 */ 1186 unsigned int critical_qword_index = ffs(used_qword_diff) - 1; 1187 1188 /* 1189 * Skip writing unused bits in the critical qword since we'll be 1190 * writing it in the next step anyways. This can save a sync 1191 * when the only change is in that qword. 1192 */ 1193 unused_update[critical_qword_index] = 1194 entry[critical_qword_index]; 1195 entry_set(writer, entry, unused_update, 0, NUM_ENTRY_QWORDS); 1196 entry_set(writer, entry, target, critical_qword_index, 1); 1197 entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS); 1198 } else if (used_qword_diff) { 1199 /* 1200 * At least two qwords need their inuse bits to be changed. This 1201 * requires a breaking update, zero the V bit, write all qwords 1202 * but 0, then set qword 0 1203 */ 1204 unused_update[0] = 0; 1205 entry_set(writer, entry, unused_update, 0, 1); 1206 entry_set(writer, entry, target, 1, NUM_ENTRY_QWORDS - 1); 1207 entry_set(writer, entry, target, 0, 1); 1208 } else { 1209 /* 1210 * No inuse bit changed. Sanity check that all unused bits are 0 1211 * in the entry. The target was already sanity checked by 1212 * compute_qword_diff(). 1213 */ 1214 WARN_ON_ONCE( 1215 entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS)); 1216 } 1217 } 1218 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry); 1219 1220 static void arm_smmu_sync_cd(struct arm_smmu_master *master, 1221 int ssid, bool leaf) 1222 { 1223 size_t i; 1224 struct arm_smmu_cmdq_batch cmds; 1225 struct arm_smmu_device *smmu = master->smmu; 1226 struct arm_smmu_cmdq_ent cmd = { 1227 .opcode = CMDQ_OP_CFGI_CD, 1228 .cfgi = { 1229 .ssid = ssid, 1230 .leaf = leaf, 1231 }, 1232 }; 1233 1234 arm_smmu_cmdq_batch_init(smmu, &cmds, &cmd); 1235 for (i = 0; i < master->num_streams; i++) { 1236 cmd.cfgi.sid = master->streams[i].id; 1237 arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); 1238 } 1239 1240 arm_smmu_cmdq_batch_submit(smmu, &cmds); 1241 } 1242 1243 static void arm_smmu_write_cd_l1_desc(struct arm_smmu_cdtab_l1 *dst, 1244 dma_addr_t l2ptr_dma) 1245 { 1246 u64 val = (l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | CTXDESC_L1_DESC_V; 1247 1248 /* The HW has 64 bit atomicity with stores to the L2 CD table */ 1249 WRITE_ONCE(dst->l2ptr, cpu_to_le64(val)); 1250 } 1251 1252 static dma_addr_t arm_smmu_cd_l1_get_desc(const struct arm_smmu_cdtab_l1 *src) 1253 { 1254 return le64_to_cpu(src->l2ptr) & CTXDESC_L1_DESC_L2PTR_MASK; 1255 } 1256 1257 struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, 1258 u32 ssid) 1259 { 1260 struct arm_smmu_cdtab_l2 *l2; 1261 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; 1262 1263 if (!arm_smmu_cdtab_allocated(cd_table)) 1264 return NULL; 1265 1266 if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR) 1267 return &cd_table->linear.table[ssid]; 1268 1269 l2 = cd_table->l2.l2ptrs[arm_smmu_cdtab_l1_idx(ssid)]; 1270 if (!l2) 1271 return NULL; 1272 return &l2->cds[arm_smmu_cdtab_l2_idx(ssid)]; 1273 } 1274 1275 static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master, 1276 u32 ssid) 1277 { 1278 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; 1279 struct arm_smmu_device *smmu = master->smmu; 1280 1281 might_sleep(); 1282 iommu_group_mutex_assert(master->dev); 1283 1284 if (!arm_smmu_cdtab_allocated(cd_table)) { 1285 if (arm_smmu_alloc_cd_tables(master)) 1286 return NULL; 1287 } 1288 1289 if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) { 1290 unsigned int idx = arm_smmu_cdtab_l1_idx(ssid); 1291 struct arm_smmu_cdtab_l2 **l2ptr = &cd_table->l2.l2ptrs[idx]; 1292 1293 if (!*l2ptr) { 1294 dma_addr_t l2ptr_dma; 1295 1296 *l2ptr = dma_alloc_coherent(smmu->dev, sizeof(**l2ptr), 1297 &l2ptr_dma, GFP_KERNEL); 1298 if (!*l2ptr) 1299 return NULL; 1300 1301 arm_smmu_write_cd_l1_desc(&cd_table->l2.l1tab[idx], 1302 l2ptr_dma); 1303 /* An invalid L1CD can be cached */ 1304 arm_smmu_sync_cd(master, ssid, false); 1305 } 1306 } 1307 return arm_smmu_get_cd_ptr(master, ssid); 1308 } 1309 1310 struct arm_smmu_cd_writer { 1311 struct arm_smmu_entry_writer writer; 1312 unsigned int ssid; 1313 }; 1314 1315 VISIBLE_IF_KUNIT 1316 void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits) 1317 { 1318 used_bits[0] = cpu_to_le64(CTXDESC_CD_0_V); 1319 if (!(ent[0] & cpu_to_le64(CTXDESC_CD_0_V))) 1320 return; 1321 memset(used_bits, 0xFF, sizeof(struct arm_smmu_cd)); 1322 1323 /* 1324 * If EPD0 is set by the make function it means 1325 * T0SZ/TG0/IR0/OR0/SH0/TTB0 are IGNORED 1326 */ 1327 if (ent[0] & cpu_to_le64(CTXDESC_CD_0_TCR_EPD0)) { 1328 used_bits[0] &= ~cpu_to_le64( 1329 CTXDESC_CD_0_TCR_T0SZ | CTXDESC_CD_0_TCR_TG0 | 1330 CTXDESC_CD_0_TCR_IRGN0 | CTXDESC_CD_0_TCR_ORGN0 | 1331 CTXDESC_CD_0_TCR_SH0); 1332 used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK); 1333 } 1334 } 1335 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_cd_used); 1336 1337 static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer) 1338 { 1339 struct arm_smmu_cd_writer *cd_writer = 1340 container_of(writer, struct arm_smmu_cd_writer, writer); 1341 1342 arm_smmu_sync_cd(writer->master, cd_writer->ssid, true); 1343 } 1344 1345 static const struct arm_smmu_entry_writer_ops arm_smmu_cd_writer_ops = { 1346 .sync = arm_smmu_cd_writer_sync_entry, 1347 .get_used = arm_smmu_get_cd_used, 1348 }; 1349 1350 void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, 1351 struct arm_smmu_cd *cdptr, 1352 const struct arm_smmu_cd *target) 1353 { 1354 bool target_valid = target->data[0] & cpu_to_le64(CTXDESC_CD_0_V); 1355 bool cur_valid = cdptr->data[0] & cpu_to_le64(CTXDESC_CD_0_V); 1356 struct arm_smmu_cd_writer cd_writer = { 1357 .writer = { 1358 .ops = &arm_smmu_cd_writer_ops, 1359 .master = master, 1360 }, 1361 .ssid = ssid, 1362 }; 1363 1364 if (ssid != IOMMU_NO_PASID && cur_valid != target_valid) { 1365 if (cur_valid) 1366 master->cd_table.used_ssids--; 1367 else 1368 master->cd_table.used_ssids++; 1369 } 1370 1371 arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data); 1372 } 1373 1374 void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, 1375 struct arm_smmu_master *master, 1376 struct arm_smmu_domain *smmu_domain) 1377 { 1378 struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; 1379 const struct io_pgtable_cfg *pgtbl_cfg = 1380 &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; 1381 typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = 1382 &pgtbl_cfg->arm_lpae_s1_cfg.tcr; 1383 1384 memset(target, 0, sizeof(*target)); 1385 1386 target->data[0] = cpu_to_le64( 1387 FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) | 1388 FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) | 1389 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) | 1390 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) | 1391 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) | 1392 #ifdef __BIG_ENDIAN 1393 CTXDESC_CD_0_ENDI | 1394 #endif 1395 CTXDESC_CD_0_TCR_EPD1 | 1396 CTXDESC_CD_0_V | 1397 FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) | 1398 CTXDESC_CD_0_AA64 | 1399 (master->stall_enabled ? CTXDESC_CD_0_S : 0) | 1400 CTXDESC_CD_0_R | 1401 CTXDESC_CD_0_A | 1402 CTXDESC_CD_0_ASET | 1403 FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) 1404 ); 1405 1406 /* To enable dirty flag update, set both Access flag and dirty state update */ 1407 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_HD) 1408 target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_HA | 1409 CTXDESC_CD_0_TCR_HD); 1410 1411 target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr & 1412 CTXDESC_CD_1_TTB0_MASK); 1413 target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair); 1414 } 1415 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s1_cd); 1416 1417 void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid) 1418 { 1419 struct arm_smmu_cd target = {}; 1420 struct arm_smmu_cd *cdptr; 1421 1422 if (!arm_smmu_cdtab_allocated(&master->cd_table)) 1423 return; 1424 cdptr = arm_smmu_get_cd_ptr(master, ssid); 1425 if (WARN_ON(!cdptr)) 1426 return; 1427 arm_smmu_write_cd_entry(master, ssid, cdptr, &target); 1428 } 1429 1430 static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master) 1431 { 1432 int ret; 1433 size_t l1size; 1434 size_t max_contexts; 1435 struct arm_smmu_device *smmu = master->smmu; 1436 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; 1437 1438 cd_table->s1cdmax = master->ssid_bits; 1439 max_contexts = 1 << cd_table->s1cdmax; 1440 1441 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) || 1442 max_contexts <= CTXDESC_L2_ENTRIES) { 1443 cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; 1444 cd_table->linear.num_ents = max_contexts; 1445 1446 l1size = max_contexts * sizeof(struct arm_smmu_cd); 1447 cd_table->linear.table = dma_alloc_coherent(smmu->dev, l1size, 1448 &cd_table->cdtab_dma, 1449 GFP_KERNEL); 1450 if (!cd_table->linear.table) 1451 return -ENOMEM; 1452 } else { 1453 cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2; 1454 cd_table->l2.num_l1_ents = 1455 DIV_ROUND_UP(max_contexts, CTXDESC_L2_ENTRIES); 1456 1457 cd_table->l2.l2ptrs = kcalloc(cd_table->l2.num_l1_ents, 1458 sizeof(*cd_table->l2.l2ptrs), 1459 GFP_KERNEL); 1460 if (!cd_table->l2.l2ptrs) 1461 return -ENOMEM; 1462 1463 l1size = cd_table->l2.num_l1_ents * sizeof(struct arm_smmu_cdtab_l1); 1464 cd_table->l2.l1tab = dma_alloc_coherent(smmu->dev, l1size, 1465 &cd_table->cdtab_dma, 1466 GFP_KERNEL); 1467 if (!cd_table->l2.l2ptrs) { 1468 ret = -ENOMEM; 1469 goto err_free_l2ptrs; 1470 } 1471 } 1472 return 0; 1473 1474 err_free_l2ptrs: 1475 kfree(cd_table->l2.l2ptrs); 1476 cd_table->l2.l2ptrs = NULL; 1477 return ret; 1478 } 1479 1480 static void arm_smmu_free_cd_tables(struct arm_smmu_master *master) 1481 { 1482 int i; 1483 struct arm_smmu_device *smmu = master->smmu; 1484 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; 1485 1486 if (cd_table->s1fmt != STRTAB_STE_0_S1FMT_LINEAR) { 1487 for (i = 0; i < cd_table->l2.num_l1_ents; i++) { 1488 if (!cd_table->l2.l2ptrs[i]) 1489 continue; 1490 1491 dma_free_coherent(smmu->dev, 1492 sizeof(*cd_table->l2.l2ptrs[i]), 1493 cd_table->l2.l2ptrs[i], 1494 arm_smmu_cd_l1_get_desc(&cd_table->l2.l1tab[i])); 1495 } 1496 kfree(cd_table->l2.l2ptrs); 1497 1498 dma_free_coherent(smmu->dev, 1499 cd_table->l2.num_l1_ents * 1500 sizeof(struct arm_smmu_cdtab_l1), 1501 cd_table->l2.l1tab, cd_table->cdtab_dma); 1502 } else { 1503 dma_free_coherent(smmu->dev, 1504 cd_table->linear.num_ents * 1505 sizeof(struct arm_smmu_cd), 1506 cd_table->linear.table, cd_table->cdtab_dma); 1507 } 1508 } 1509 1510 /* Stream table manipulation functions */ 1511 static void arm_smmu_write_strtab_l1_desc(struct arm_smmu_strtab_l1 *dst, 1512 dma_addr_t l2ptr_dma) 1513 { 1514 u64 val = 0; 1515 1516 val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, STRTAB_SPLIT + 1); 1517 val |= l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; 1518 1519 /* The HW has 64 bit atomicity with stores to the L2 STE table */ 1520 WRITE_ONCE(dst->l2ptr, cpu_to_le64(val)); 1521 } 1522 1523 struct arm_smmu_ste_writer { 1524 struct arm_smmu_entry_writer writer; 1525 u32 sid; 1526 }; 1527 1528 static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer) 1529 { 1530 struct arm_smmu_ste_writer *ste_writer = 1531 container_of(writer, struct arm_smmu_ste_writer, writer); 1532 struct arm_smmu_cmdq_ent cmd = { 1533 .opcode = CMDQ_OP_CFGI_STE, 1534 .cfgi = { 1535 .sid = ste_writer->sid, 1536 .leaf = true, 1537 }, 1538 }; 1539 1540 arm_smmu_cmdq_issue_cmd_with_sync(writer->master->smmu, &cmd); 1541 } 1542 1543 static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = { 1544 .sync = arm_smmu_ste_writer_sync_entry, 1545 .get_used = arm_smmu_get_ste_used, 1546 }; 1547 1548 static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid, 1549 struct arm_smmu_ste *ste, 1550 const struct arm_smmu_ste *target) 1551 { 1552 struct arm_smmu_device *smmu = master->smmu; 1553 struct arm_smmu_ste_writer ste_writer = { 1554 .writer = { 1555 .ops = &arm_smmu_ste_writer_ops, 1556 .master = master, 1557 }, 1558 .sid = sid, 1559 }; 1560 1561 arm_smmu_write_entry(&ste_writer.writer, ste->data, target->data); 1562 1563 /* It's likely that we'll want to use the new STE soon */ 1564 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) { 1565 struct arm_smmu_cmdq_ent 1566 prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG, 1567 .prefetch = { 1568 .sid = sid, 1569 } }; 1570 1571 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); 1572 } 1573 } 1574 1575 void arm_smmu_make_abort_ste(struct arm_smmu_ste *target) 1576 { 1577 memset(target, 0, sizeof(*target)); 1578 target->data[0] = cpu_to_le64( 1579 STRTAB_STE_0_V | 1580 FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT)); 1581 } 1582 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_abort_ste); 1583 1584 VISIBLE_IF_KUNIT 1585 void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, 1586 struct arm_smmu_ste *target) 1587 { 1588 memset(target, 0, sizeof(*target)); 1589 target->data[0] = cpu_to_le64( 1590 STRTAB_STE_0_V | 1591 FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS)); 1592 1593 if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) 1594 target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, 1595 STRTAB_STE_1_SHCFG_INCOMING)); 1596 } 1597 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste); 1598 1599 VISIBLE_IF_KUNIT 1600 void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, 1601 struct arm_smmu_master *master, bool ats_enabled, 1602 unsigned int s1dss) 1603 { 1604 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; 1605 struct arm_smmu_device *smmu = master->smmu; 1606 1607 memset(target, 0, sizeof(*target)); 1608 target->data[0] = cpu_to_le64( 1609 STRTAB_STE_0_V | 1610 FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | 1611 FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) | 1612 (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | 1613 FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax)); 1614 1615 target->data[1] = cpu_to_le64( 1616 FIELD_PREP(STRTAB_STE_1_S1DSS, s1dss) | 1617 FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | 1618 FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | 1619 FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | 1620 ((smmu->features & ARM_SMMU_FEAT_STALLS && 1621 !master->stall_enabled) ? 1622 STRTAB_STE_1_S1STALLD : 1623 0) | 1624 FIELD_PREP(STRTAB_STE_1_EATS, 1625 ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); 1626 1627 if ((smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) && 1628 s1dss == STRTAB_STE_1_S1DSS_BYPASS) 1629 target->data[1] |= cpu_to_le64(FIELD_PREP( 1630 STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); 1631 1632 if (smmu->features & ARM_SMMU_FEAT_E2H) { 1633 /* 1634 * To support BTM the streamworld needs to match the 1635 * configuration of the CPU so that the ASID broadcasts are 1636 * properly matched. This means either S/NS-EL2-E2H (hypervisor) 1637 * or NS-EL1 (guest). Since an SVA domain can be installed in a 1638 * PASID this should always use a BTM compatible configuration 1639 * if the HW supports it. 1640 */ 1641 target->data[1] |= cpu_to_le64( 1642 FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_EL2)); 1643 } else { 1644 target->data[1] |= cpu_to_le64( 1645 FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1)); 1646 1647 /* 1648 * VMID 0 is reserved for stage-2 bypass EL1 STEs, see 1649 * arm_smmu_domain_alloc_id() 1650 */ 1651 target->data[2] = 1652 cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0)); 1653 } 1654 } 1655 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste); 1656 1657 void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, 1658 struct arm_smmu_master *master, 1659 struct arm_smmu_domain *smmu_domain, 1660 bool ats_enabled) 1661 { 1662 struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; 1663 const struct io_pgtable_cfg *pgtbl_cfg = 1664 &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; 1665 typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr = 1666 &pgtbl_cfg->arm_lpae_s2_cfg.vtcr; 1667 u64 vtcr_val; 1668 struct arm_smmu_device *smmu = master->smmu; 1669 1670 memset(target, 0, sizeof(*target)); 1671 target->data[0] = cpu_to_le64( 1672 STRTAB_STE_0_V | 1673 FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS)); 1674 1675 target->data[1] = cpu_to_le64( 1676 FIELD_PREP(STRTAB_STE_1_EATS, 1677 ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); 1678 1679 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_S2FWB) 1680 target->data[1] |= cpu_to_le64(STRTAB_STE_1_S2FWB); 1681 if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) 1682 target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, 1683 STRTAB_STE_1_SHCFG_INCOMING)); 1684 1685 vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) | 1686 FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) | 1687 FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) | 1688 FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) | 1689 FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) | 1690 FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) | 1691 FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps); 1692 target->data[2] = cpu_to_le64( 1693 FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | 1694 FIELD_PREP(STRTAB_STE_2_VTCR, vtcr_val) | 1695 STRTAB_STE_2_S2AA64 | 1696 #ifdef __BIG_ENDIAN 1697 STRTAB_STE_2_S2ENDI | 1698 #endif 1699 STRTAB_STE_2_S2PTW | 1700 (master->stall_enabled ? STRTAB_STE_2_S2S : 0) | 1701 STRTAB_STE_2_S2R); 1702 1703 target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr & 1704 STRTAB_STE_3_S2TTB_MASK); 1705 } 1706 EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s2_domain_ste); 1707 1708 /* 1709 * This can safely directly manipulate the STE memory without a sync sequence 1710 * because the STE table has not been installed in the SMMU yet. 1711 */ 1712 static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab, 1713 unsigned int nent) 1714 { 1715 unsigned int i; 1716 1717 for (i = 0; i < nent; ++i) { 1718 arm_smmu_make_abort_ste(strtab); 1719 strtab++; 1720 } 1721 } 1722 1723 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) 1724 { 1725 dma_addr_t l2ptr_dma; 1726 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 1727 struct arm_smmu_strtab_l2 **l2table; 1728 1729 l2table = &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)]; 1730 if (*l2table) 1731 return 0; 1732 1733 *l2table = dmam_alloc_coherent(smmu->dev, sizeof(**l2table), 1734 &l2ptr_dma, GFP_KERNEL); 1735 if (!*l2table) { 1736 dev_err(smmu->dev, 1737 "failed to allocate l2 stream table for SID %u\n", 1738 sid); 1739 return -ENOMEM; 1740 } 1741 1742 arm_smmu_init_initial_stes((*l2table)->stes, 1743 ARRAY_SIZE((*l2table)->stes)); 1744 arm_smmu_write_strtab_l1_desc(&cfg->l2.l1tab[arm_smmu_strtab_l1_idx(sid)], 1745 l2ptr_dma); 1746 return 0; 1747 } 1748 1749 static int arm_smmu_streams_cmp_key(const void *lhs, const struct rb_node *rhs) 1750 { 1751 struct arm_smmu_stream *stream_rhs = 1752 rb_entry(rhs, struct arm_smmu_stream, node); 1753 const u32 *sid_lhs = lhs; 1754 1755 if (*sid_lhs < stream_rhs->id) 1756 return -1; 1757 if (*sid_lhs > stream_rhs->id) 1758 return 1; 1759 return 0; 1760 } 1761 1762 static int arm_smmu_streams_cmp_node(struct rb_node *lhs, 1763 const struct rb_node *rhs) 1764 { 1765 return arm_smmu_streams_cmp_key( 1766 &rb_entry(lhs, struct arm_smmu_stream, node)->id, rhs); 1767 } 1768 1769 static struct arm_smmu_master * 1770 arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) 1771 { 1772 struct rb_node *node; 1773 1774 lockdep_assert_held(&smmu->streams_mutex); 1775 1776 node = rb_find(&sid, &smmu->streams, arm_smmu_streams_cmp_key); 1777 if (!node) 1778 return NULL; 1779 return rb_entry(node, struct arm_smmu_stream, node)->master; 1780 } 1781 1782 /* IRQ and event handlers */ 1783 static void arm_smmu_decode_event(struct arm_smmu_device *smmu, u64 *raw, 1784 struct arm_smmu_event *event) 1785 { 1786 struct arm_smmu_master *master; 1787 1788 event->id = FIELD_GET(EVTQ_0_ID, raw[0]); 1789 event->sid = FIELD_GET(EVTQ_0_SID, raw[0]); 1790 event->ssv = FIELD_GET(EVTQ_0_SSV, raw[0]); 1791 event->ssid = event->ssv ? FIELD_GET(EVTQ_0_SSID, raw[0]) : IOMMU_NO_PASID; 1792 event->privileged = FIELD_GET(EVTQ_1_PnU, raw[1]); 1793 event->instruction = FIELD_GET(EVTQ_1_InD, raw[1]); 1794 event->s2 = FIELD_GET(EVTQ_1_S2, raw[1]); 1795 event->read = FIELD_GET(EVTQ_1_RnW, raw[1]); 1796 event->stag = FIELD_GET(EVTQ_1_STAG, raw[1]); 1797 event->stall = FIELD_GET(EVTQ_1_STALL, raw[1]); 1798 event->class = FIELD_GET(EVTQ_1_CLASS, raw[1]); 1799 event->iova = FIELD_GET(EVTQ_2_ADDR, raw[2]); 1800 event->ipa = raw[3] & EVTQ_3_IPA; 1801 event->fetch_addr = raw[3] & EVTQ_3_FETCH_ADDR; 1802 event->ttrnw = FIELD_GET(EVTQ_1_TT_READ, raw[1]); 1803 event->class_tt = false; 1804 event->dev = NULL; 1805 1806 if (event->id == EVT_ID_PERMISSION_FAULT) 1807 event->class_tt = (event->class == EVTQ_1_CLASS_TT); 1808 1809 mutex_lock(&smmu->streams_mutex); 1810 master = arm_smmu_find_master(smmu, event->sid); 1811 if (master) 1812 event->dev = get_device(master->dev); 1813 mutex_unlock(&smmu->streams_mutex); 1814 } 1815 1816 static int arm_smmu_handle_event(struct arm_smmu_device *smmu, 1817 struct arm_smmu_event *event) 1818 { 1819 int ret = 0; 1820 u32 perm = 0; 1821 struct arm_smmu_master *master; 1822 struct iopf_fault fault_evt = { }; 1823 struct iommu_fault *flt = &fault_evt.fault; 1824 1825 switch (event->id) { 1826 case EVT_ID_TRANSLATION_FAULT: 1827 case EVT_ID_ADDR_SIZE_FAULT: 1828 case EVT_ID_ACCESS_FAULT: 1829 case EVT_ID_PERMISSION_FAULT: 1830 break; 1831 default: 1832 return -EOPNOTSUPP; 1833 } 1834 1835 if (!event->stall) 1836 return -EOPNOTSUPP; 1837 1838 if (event->read) 1839 perm |= IOMMU_FAULT_PERM_READ; 1840 else 1841 perm |= IOMMU_FAULT_PERM_WRITE; 1842 1843 if (event->instruction) 1844 perm |= IOMMU_FAULT_PERM_EXEC; 1845 1846 if (event->privileged) 1847 perm |= IOMMU_FAULT_PERM_PRIV; 1848 1849 flt->type = IOMMU_FAULT_PAGE_REQ; 1850 flt->prm = (struct iommu_fault_page_request) { 1851 .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE, 1852 .grpid = event->stag, 1853 .perm = perm, 1854 .addr = event->iova, 1855 }; 1856 1857 if (event->ssv) { 1858 flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; 1859 flt->prm.pasid = event->ssid; 1860 } 1861 1862 mutex_lock(&smmu->streams_mutex); 1863 master = arm_smmu_find_master(smmu, event->sid); 1864 if (!master) { 1865 ret = -EINVAL; 1866 goto out_unlock; 1867 } 1868 1869 ret = iommu_report_device_fault(master->dev, &fault_evt); 1870 out_unlock: 1871 mutex_unlock(&smmu->streams_mutex); 1872 return ret; 1873 } 1874 1875 static void arm_smmu_dump_raw_event(struct arm_smmu_device *smmu, u64 *raw, 1876 struct arm_smmu_event *event) 1877 { 1878 int i; 1879 1880 dev_err(smmu->dev, "event 0x%02x received:\n", event->id); 1881 1882 for (i = 0; i < EVTQ_ENT_DWORDS; ++i) 1883 dev_err(smmu->dev, "\t0x%016llx\n", raw[i]); 1884 } 1885 1886 #define ARM_SMMU_EVT_KNOWN(e) ((e)->id < ARRAY_SIZE(event_str) && event_str[(e)->id]) 1887 #define ARM_SMMU_LOG_EVT_STR(e) ARM_SMMU_EVT_KNOWN(e) ? event_str[(e)->id] : "UNKNOWN" 1888 #define ARM_SMMU_LOG_CLIENT(e) (e)->dev ? dev_name((e)->dev) : "(unassigned sid)" 1889 1890 static void arm_smmu_dump_event(struct arm_smmu_device *smmu, u64 *raw, 1891 struct arm_smmu_event *evt, 1892 struct ratelimit_state *rs) 1893 { 1894 if (!__ratelimit(rs)) 1895 return; 1896 1897 arm_smmu_dump_raw_event(smmu, raw, evt); 1898 1899 switch (evt->id) { 1900 case EVT_ID_TRANSLATION_FAULT: 1901 case EVT_ID_ADDR_SIZE_FAULT: 1902 case EVT_ID_ACCESS_FAULT: 1903 case EVT_ID_PERMISSION_FAULT: 1904 dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x iova: %#llx ipa: %#llx", 1905 ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt), 1906 evt->sid, evt->ssid, evt->iova, evt->ipa); 1907 1908 dev_err(smmu->dev, "%s %s %s %s \"%s\"%s%s stag: %#x", 1909 evt->privileged ? "priv" : "unpriv", 1910 evt->instruction ? "inst" : "data", 1911 str_read_write(evt->read), 1912 evt->s2 ? "s2" : "s1", event_class_str[evt->class], 1913 evt->class_tt ? (evt->ttrnw ? " ttd_read" : " ttd_write") : "", 1914 evt->stall ? " stall" : "", evt->stag); 1915 1916 break; 1917 1918 case EVT_ID_STE_FETCH_FAULT: 1919 case EVT_ID_CD_FETCH_FAULT: 1920 case EVT_ID_VMS_FETCH_FAULT: 1921 dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x fetch_addr: %#llx", 1922 ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt), 1923 evt->sid, evt->ssid, evt->fetch_addr); 1924 1925 break; 1926 1927 default: 1928 dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x", 1929 ARM_SMMU_LOG_EVT_STR(evt), ARM_SMMU_LOG_CLIENT(evt), 1930 evt->sid, evt->ssid); 1931 } 1932 } 1933 1934 static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) 1935 { 1936 u64 evt[EVTQ_ENT_DWORDS]; 1937 struct arm_smmu_event event = {0}; 1938 struct arm_smmu_device *smmu = dev; 1939 struct arm_smmu_queue *q = &smmu->evtq.q; 1940 struct arm_smmu_ll_queue *llq = &q->llq; 1941 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 1942 DEFAULT_RATELIMIT_BURST); 1943 1944 do { 1945 while (!queue_remove_raw(q, evt)) { 1946 arm_smmu_decode_event(smmu, evt, &event); 1947 if (arm_smmu_handle_event(smmu, &event)) 1948 arm_smmu_dump_event(smmu, evt, &event, &rs); 1949 1950 put_device(event.dev); 1951 cond_resched(); 1952 } 1953 1954 /* 1955 * Not much we can do on overflow, so scream and pretend we're 1956 * trying harder. 1957 */ 1958 if (queue_sync_prod_in(q) == -EOVERFLOW) 1959 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); 1960 } while (!queue_empty(llq)); 1961 1962 /* Sync our overflow flag, as we believe we're up to speed */ 1963 queue_sync_cons_ovf(q); 1964 return IRQ_HANDLED; 1965 } 1966 1967 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt) 1968 { 1969 u32 sid, ssid; 1970 u16 grpid; 1971 bool ssv, last; 1972 1973 sid = FIELD_GET(PRIQ_0_SID, evt[0]); 1974 ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]); 1975 ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : IOMMU_NO_PASID; 1976 last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]); 1977 grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]); 1978 1979 dev_info(smmu->dev, "unexpected PRI request received:\n"); 1980 dev_info(smmu->dev, 1981 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n", 1982 sid, ssid, grpid, last ? "L" : "", 1983 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un", 1984 evt[0] & PRIQ_0_PERM_READ ? "R" : "", 1985 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "", 1986 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "", 1987 evt[1] & PRIQ_1_ADDR_MASK); 1988 1989 if (last) { 1990 struct arm_smmu_cmdq_ent cmd = { 1991 .opcode = CMDQ_OP_PRI_RESP, 1992 .substream_valid = ssv, 1993 .pri = { 1994 .sid = sid, 1995 .ssid = ssid, 1996 .grpid = grpid, 1997 .resp = PRI_RESP_DENY, 1998 }, 1999 }; 2000 2001 arm_smmu_cmdq_issue_cmd(smmu, &cmd); 2002 } 2003 } 2004 2005 static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) 2006 { 2007 struct arm_smmu_device *smmu = dev; 2008 struct arm_smmu_queue *q = &smmu->priq.q; 2009 struct arm_smmu_ll_queue *llq = &q->llq; 2010 u64 evt[PRIQ_ENT_DWORDS]; 2011 2012 do { 2013 while (!queue_remove_raw(q, evt)) 2014 arm_smmu_handle_ppr(smmu, evt); 2015 2016 if (queue_sync_prod_in(q) == -EOVERFLOW) 2017 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); 2018 } while (!queue_empty(llq)); 2019 2020 /* Sync our overflow flag, as we believe we're up to speed */ 2021 queue_sync_cons_ovf(q); 2022 return IRQ_HANDLED; 2023 } 2024 2025 static int arm_smmu_device_disable(struct arm_smmu_device *smmu); 2026 2027 static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) 2028 { 2029 u32 gerror, gerrorn, active; 2030 struct arm_smmu_device *smmu = dev; 2031 2032 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR); 2033 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN); 2034 2035 active = gerror ^ gerrorn; 2036 if (!(active & GERROR_ERR_MASK)) 2037 return IRQ_NONE; /* No errors pending */ 2038 2039 dev_warn(smmu->dev, 2040 "unexpected global error reported (0x%08x), this could be serious\n", 2041 active); 2042 2043 if (active & GERROR_SFM_ERR) { 2044 dev_err(smmu->dev, "device has entered Service Failure Mode!\n"); 2045 arm_smmu_device_disable(smmu); 2046 } 2047 2048 if (active & GERROR_MSI_GERROR_ABT_ERR) 2049 dev_warn(smmu->dev, "GERROR MSI write aborted\n"); 2050 2051 if (active & GERROR_MSI_PRIQ_ABT_ERR) 2052 dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); 2053 2054 if (active & GERROR_MSI_EVTQ_ABT_ERR) 2055 dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); 2056 2057 if (active & GERROR_MSI_CMDQ_ABT_ERR) 2058 dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); 2059 2060 if (active & GERROR_PRIQ_ABT_ERR) 2061 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); 2062 2063 if (active & GERROR_EVTQ_ABT_ERR) 2064 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n"); 2065 2066 if (active & GERROR_CMDQ_ERR) 2067 arm_smmu_cmdq_skip_err(smmu); 2068 2069 writel(gerror, smmu->base + ARM_SMMU_GERRORN); 2070 return IRQ_HANDLED; 2071 } 2072 2073 static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev) 2074 { 2075 struct arm_smmu_device *smmu = dev; 2076 2077 arm_smmu_evtq_thread(irq, dev); 2078 if (smmu->features & ARM_SMMU_FEAT_PRI) 2079 arm_smmu_priq_thread(irq, dev); 2080 2081 return IRQ_HANDLED; 2082 } 2083 2084 static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev) 2085 { 2086 arm_smmu_gerror_handler(irq, dev); 2087 return IRQ_WAKE_THREAD; 2088 } 2089 2090 static void 2091 arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, 2092 struct arm_smmu_cmdq_ent *cmd) 2093 { 2094 size_t log2_span; 2095 size_t span_mask; 2096 /* ATC invalidates are always on 4096-bytes pages */ 2097 size_t inval_grain_shift = 12; 2098 unsigned long page_start, page_end; 2099 2100 /* 2101 * ATS and PASID: 2102 * 2103 * If substream_valid is clear, the PCIe TLP is sent without a PASID 2104 * prefix. In that case all ATC entries within the address range are 2105 * invalidated, including those that were requested with a PASID! There 2106 * is no way to invalidate only entries without PASID. 2107 * 2108 * When using STRTAB_STE_1_S1DSS_SSID0 (reserving CD 0 for non-PASID 2109 * traffic), translation requests without PASID create ATC entries 2110 * without PASID, which must be invalidated with substream_valid clear. 2111 * This has the unpleasant side-effect of invalidating all PASID-tagged 2112 * ATC entries within the address range. 2113 */ 2114 *cmd = (struct arm_smmu_cmdq_ent) { 2115 .opcode = CMDQ_OP_ATC_INV, 2116 .substream_valid = (ssid != IOMMU_NO_PASID), 2117 .atc.ssid = ssid, 2118 }; 2119 2120 if (!size) { 2121 cmd->atc.size = ATC_INV_SIZE_ALL; 2122 return; 2123 } 2124 2125 page_start = iova >> inval_grain_shift; 2126 page_end = (iova + size - 1) >> inval_grain_shift; 2127 2128 /* 2129 * In an ATS Invalidate Request, the address must be aligned on the 2130 * range size, which must be a power of two number of page sizes. We 2131 * thus have to choose between grossly over-invalidating the region, or 2132 * splitting the invalidation into multiple commands. For simplicity 2133 * we'll go with the first solution, but should refine it in the future 2134 * if multiple commands are shown to be more efficient. 2135 * 2136 * Find the smallest power of two that covers the range. The most 2137 * significant differing bit between the start and end addresses, 2138 * fls(start ^ end), indicates the required span. For example: 2139 * 2140 * We want to invalidate pages [8; 11]. This is already the ideal range: 2141 * x = 0b1000 ^ 0b1011 = 0b11 2142 * span = 1 << fls(x) = 4 2143 * 2144 * To invalidate pages [7; 10], we need to invalidate [0; 15]: 2145 * x = 0b0111 ^ 0b1010 = 0b1101 2146 * span = 1 << fls(x) = 16 2147 */ 2148 log2_span = fls_long(page_start ^ page_end); 2149 span_mask = (1ULL << log2_span) - 1; 2150 2151 page_start &= ~span_mask; 2152 2153 cmd->atc.addr = page_start << inval_grain_shift; 2154 cmd->atc.size = log2_span; 2155 } 2156 2157 static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, 2158 ioasid_t ssid) 2159 { 2160 int i; 2161 struct arm_smmu_cmdq_ent cmd; 2162 struct arm_smmu_cmdq_batch cmds; 2163 2164 arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd); 2165 2166 arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd); 2167 for (i = 0; i < master->num_streams; i++) { 2168 cmd.atc.sid = master->streams[i].id; 2169 arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd); 2170 } 2171 2172 return arm_smmu_cmdq_batch_submit(master->smmu, &cmds); 2173 } 2174 2175 int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, 2176 unsigned long iova, size_t size) 2177 { 2178 struct arm_smmu_master_domain *master_domain; 2179 int i; 2180 unsigned long flags; 2181 struct arm_smmu_cmdq_ent cmd = { 2182 .opcode = CMDQ_OP_ATC_INV, 2183 }; 2184 struct arm_smmu_cmdq_batch cmds; 2185 2186 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) 2187 return 0; 2188 2189 /* 2190 * Ensure that we've completed prior invalidation of the main TLBs 2191 * before we read 'nr_ats_masters' in case of a concurrent call to 2192 * arm_smmu_enable_ats(): 2193 * 2194 * // unmap() // arm_smmu_enable_ats() 2195 * TLBI+SYNC atomic_inc(&nr_ats_masters); 2196 * smp_mb(); [...] 2197 * atomic_read(&nr_ats_masters); pci_enable_ats() // writel() 2198 * 2199 * Ensures that we always see the incremented 'nr_ats_masters' count if 2200 * ATS was enabled at the PCI device before completion of the TLBI. 2201 */ 2202 smp_mb(); 2203 if (!atomic_read(&smmu_domain->nr_ats_masters)) 2204 return 0; 2205 2206 arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd); 2207 2208 spin_lock_irqsave(&smmu_domain->devices_lock, flags); 2209 list_for_each_entry(master_domain, &smmu_domain->devices, 2210 devices_elm) { 2211 struct arm_smmu_master *master = master_domain->master; 2212 2213 if (!master->ats_enabled) 2214 continue; 2215 2216 if (master_domain->nested_ats_flush) { 2217 /* 2218 * If a S2 used as a nesting parent is changed we have 2219 * no option but to completely flush the ATC. 2220 */ 2221 arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd); 2222 } else { 2223 arm_smmu_atc_inv_to_cmd(master_domain->ssid, iova, size, 2224 &cmd); 2225 } 2226 2227 for (i = 0; i < master->num_streams; i++) { 2228 cmd.atc.sid = master->streams[i].id; 2229 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); 2230 } 2231 } 2232 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); 2233 2234 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); 2235 } 2236 2237 /* IO_PGTABLE API */ 2238 static void arm_smmu_tlb_inv_context(void *cookie) 2239 { 2240 struct arm_smmu_domain *smmu_domain = cookie; 2241 struct arm_smmu_device *smmu = smmu_domain->smmu; 2242 struct arm_smmu_cmdq_ent cmd; 2243 2244 /* 2245 * NOTE: when io-pgtable is in non-strict mode, we may get here with 2246 * PTEs previously cleared by unmaps on the current CPU not yet visible 2247 * to the SMMU. We are relying on the dma_wmb() implicit during cmd 2248 * insertion to guarantee those are observed before the TLBI. Do be 2249 * careful, 007. 2250 */ 2251 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { 2252 arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid); 2253 } else { 2254 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL; 2255 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; 2256 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); 2257 } 2258 arm_smmu_atc_inv_domain(smmu_domain, 0, 0); 2259 } 2260 2261 static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, 2262 unsigned long iova, size_t size, 2263 size_t granule, 2264 struct arm_smmu_domain *smmu_domain) 2265 { 2266 struct arm_smmu_device *smmu = smmu_domain->smmu; 2267 unsigned long end = iova + size, num_pages = 0, tg = 0; 2268 size_t inv_range = granule; 2269 struct arm_smmu_cmdq_batch cmds; 2270 2271 if (!size) 2272 return; 2273 2274 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { 2275 /* Get the leaf page size */ 2276 tg = __ffs(smmu_domain->domain.pgsize_bitmap); 2277 2278 num_pages = size >> tg; 2279 2280 /* Convert page size of 12,14,16 (log2) to 1,2,3 */ 2281 cmd->tlbi.tg = (tg - 10) / 2; 2282 2283 /* 2284 * Determine what level the granule is at. For non-leaf, both 2285 * io-pgtable and SVA pass a nominal last-level granule because 2286 * they don't know what level(s) actually apply, so ignore that 2287 * and leave TTL=0. However for various errata reasons we still 2288 * want to use a range command, so avoid the SVA corner case 2289 * where both scale and num could be 0 as well. 2290 */ 2291 if (cmd->tlbi.leaf) 2292 cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); 2293 else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1) 2294 num_pages++; 2295 } 2296 2297 arm_smmu_cmdq_batch_init(smmu, &cmds, cmd); 2298 2299 while (iova < end) { 2300 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { 2301 /* 2302 * On each iteration of the loop, the range is 5 bits 2303 * worth of the aligned size remaining. 2304 * The range in pages is: 2305 * 2306 * range = (num_pages & (0x1f << __ffs(num_pages))) 2307 */ 2308 unsigned long scale, num; 2309 2310 /* Determine the power of 2 multiple number of pages */ 2311 scale = __ffs(num_pages); 2312 cmd->tlbi.scale = scale; 2313 2314 /* Determine how many chunks of 2^scale size we have */ 2315 num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX; 2316 cmd->tlbi.num = num - 1; 2317 2318 /* range is num * 2^scale * pgsize */ 2319 inv_range = num << (scale + tg); 2320 2321 /* Clear out the lower order bits for the next iteration */ 2322 num_pages -= num << scale; 2323 } 2324 2325 cmd->tlbi.addr = iova; 2326 arm_smmu_cmdq_batch_add(smmu, &cmds, cmd); 2327 iova += inv_range; 2328 } 2329 arm_smmu_cmdq_batch_submit(smmu, &cmds); 2330 } 2331 2332 static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, 2333 size_t granule, bool leaf, 2334 struct arm_smmu_domain *smmu_domain) 2335 { 2336 struct arm_smmu_cmdq_ent cmd = { 2337 .tlbi = { 2338 .leaf = leaf, 2339 }, 2340 }; 2341 2342 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { 2343 cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? 2344 CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA; 2345 cmd.tlbi.asid = smmu_domain->cd.asid; 2346 } else { 2347 cmd.opcode = CMDQ_OP_TLBI_S2_IPA; 2348 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; 2349 } 2350 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); 2351 2352 if (smmu_domain->nest_parent) { 2353 /* 2354 * When the S2 domain changes all the nested S1 ASIDs have to be 2355 * flushed too. 2356 */ 2357 cmd.opcode = CMDQ_OP_TLBI_NH_ALL; 2358 arm_smmu_cmdq_issue_cmd_with_sync(smmu_domain->smmu, &cmd); 2359 } 2360 2361 /* 2362 * Unfortunately, this can't be leaf-only since we may have 2363 * zapped an entire table. 2364 */ 2365 arm_smmu_atc_inv_domain(smmu_domain, iova, size); 2366 } 2367 2368 void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, 2369 size_t granule, bool leaf, 2370 struct arm_smmu_domain *smmu_domain) 2371 { 2372 struct arm_smmu_cmdq_ent cmd = { 2373 .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? 2374 CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA, 2375 .tlbi = { 2376 .asid = asid, 2377 .leaf = leaf, 2378 }, 2379 }; 2380 2381 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); 2382 } 2383 2384 static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, 2385 unsigned long iova, size_t granule, 2386 void *cookie) 2387 { 2388 struct arm_smmu_domain *smmu_domain = cookie; 2389 struct iommu_domain *domain = &smmu_domain->domain; 2390 2391 iommu_iotlb_gather_add_page(domain, gather, iova, granule); 2392 } 2393 2394 static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, 2395 size_t granule, void *cookie) 2396 { 2397 arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie); 2398 } 2399 2400 static const struct iommu_flush_ops arm_smmu_flush_ops = { 2401 .tlb_flush_all = arm_smmu_tlb_inv_context, 2402 .tlb_flush_walk = arm_smmu_tlb_inv_walk, 2403 .tlb_add_page = arm_smmu_tlb_inv_page_nosync, 2404 }; 2405 2406 static bool arm_smmu_dbm_capable(struct arm_smmu_device *smmu) 2407 { 2408 u32 features = (ARM_SMMU_FEAT_HD | ARM_SMMU_FEAT_COHERENCY); 2409 2410 return (smmu->features & features) == features; 2411 } 2412 2413 /* IOMMU API */ 2414 static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) 2415 { 2416 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 2417 2418 switch (cap) { 2419 case IOMMU_CAP_CACHE_COHERENCY: 2420 /* Assume that a coherent TCU implies coherent TBUs */ 2421 return master->smmu->features & ARM_SMMU_FEAT_COHERENCY; 2422 case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: 2423 return arm_smmu_master_canwbs(master); 2424 case IOMMU_CAP_NOEXEC: 2425 case IOMMU_CAP_DEFERRED_FLUSH: 2426 return true; 2427 case IOMMU_CAP_DIRTY_TRACKING: 2428 return arm_smmu_dbm_capable(master->smmu); 2429 default: 2430 return false; 2431 } 2432 } 2433 2434 static bool arm_smmu_enforce_cache_coherency(struct iommu_domain *domain) 2435 { 2436 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 2437 struct arm_smmu_master_domain *master_domain; 2438 unsigned long flags; 2439 bool ret = true; 2440 2441 spin_lock_irqsave(&smmu_domain->devices_lock, flags); 2442 list_for_each_entry(master_domain, &smmu_domain->devices, 2443 devices_elm) { 2444 if (!arm_smmu_master_canwbs(master_domain->master)) { 2445 ret = false; 2446 break; 2447 } 2448 } 2449 smmu_domain->enforce_cache_coherency = ret; 2450 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); 2451 return ret; 2452 } 2453 2454 struct arm_smmu_domain *arm_smmu_domain_alloc(void) 2455 { 2456 struct arm_smmu_domain *smmu_domain; 2457 2458 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); 2459 if (!smmu_domain) 2460 return ERR_PTR(-ENOMEM); 2461 2462 INIT_LIST_HEAD(&smmu_domain->devices); 2463 spin_lock_init(&smmu_domain->devices_lock); 2464 2465 return smmu_domain; 2466 } 2467 2468 static void arm_smmu_domain_free_paging(struct iommu_domain *domain) 2469 { 2470 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 2471 struct arm_smmu_device *smmu = smmu_domain->smmu; 2472 2473 free_io_pgtable_ops(smmu_domain->pgtbl_ops); 2474 2475 /* Free the ASID or VMID */ 2476 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { 2477 /* Prevent SVA from touching the CD while we're freeing it */ 2478 mutex_lock(&arm_smmu_asid_lock); 2479 xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); 2480 mutex_unlock(&arm_smmu_asid_lock); 2481 } else { 2482 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; 2483 if (cfg->vmid) 2484 ida_free(&smmu->vmid_map, cfg->vmid); 2485 } 2486 2487 kfree(smmu_domain); 2488 } 2489 2490 static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu, 2491 struct arm_smmu_domain *smmu_domain) 2492 { 2493 int ret; 2494 u32 asid = 0; 2495 struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; 2496 2497 /* Prevent SVA from modifying the ASID until it is written to the CD */ 2498 mutex_lock(&arm_smmu_asid_lock); 2499 ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain, 2500 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); 2501 cd->asid = (u16)asid; 2502 mutex_unlock(&arm_smmu_asid_lock); 2503 return ret; 2504 } 2505 2506 static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu, 2507 struct arm_smmu_domain *smmu_domain) 2508 { 2509 int vmid; 2510 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; 2511 2512 /* Reserve VMID 0 for stage-2 bypass STEs */ 2513 vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1, 2514 GFP_KERNEL); 2515 if (vmid < 0) 2516 return vmid; 2517 2518 cfg->vmid = (u16)vmid; 2519 return 0; 2520 } 2521 2522 static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, 2523 struct arm_smmu_device *smmu, u32 flags) 2524 { 2525 int ret; 2526 enum io_pgtable_fmt fmt; 2527 struct io_pgtable_cfg pgtbl_cfg; 2528 struct io_pgtable_ops *pgtbl_ops; 2529 int (*finalise_stage_fn)(struct arm_smmu_device *smmu, 2530 struct arm_smmu_domain *smmu_domain); 2531 bool enable_dirty = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; 2532 2533 pgtbl_cfg = (struct io_pgtable_cfg) { 2534 .pgsize_bitmap = smmu->pgsize_bitmap, 2535 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, 2536 .tlb = &arm_smmu_flush_ops, 2537 .iommu_dev = smmu->dev, 2538 }; 2539 2540 switch (smmu_domain->stage) { 2541 case ARM_SMMU_DOMAIN_S1: { 2542 unsigned long ias = (smmu->features & 2543 ARM_SMMU_FEAT_VAX) ? 52 : 48; 2544 2545 pgtbl_cfg.ias = min_t(unsigned long, ias, VA_BITS); 2546 pgtbl_cfg.oas = smmu->ias; 2547 if (enable_dirty) 2548 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_ARM_HD; 2549 fmt = ARM_64_LPAE_S1; 2550 finalise_stage_fn = arm_smmu_domain_finalise_s1; 2551 break; 2552 } 2553 case ARM_SMMU_DOMAIN_S2: 2554 if (enable_dirty) 2555 return -EOPNOTSUPP; 2556 pgtbl_cfg.ias = smmu->ias; 2557 pgtbl_cfg.oas = smmu->oas; 2558 fmt = ARM_64_LPAE_S2; 2559 finalise_stage_fn = arm_smmu_domain_finalise_s2; 2560 if ((smmu->features & ARM_SMMU_FEAT_S2FWB) && 2561 (flags & IOMMU_HWPT_ALLOC_NEST_PARENT)) 2562 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_ARM_S2FWB; 2563 break; 2564 default: 2565 return -EINVAL; 2566 } 2567 2568 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); 2569 if (!pgtbl_ops) 2570 return -ENOMEM; 2571 2572 smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; 2573 smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; 2574 smmu_domain->domain.geometry.force_aperture = true; 2575 if (enable_dirty && smmu_domain->stage == ARM_SMMU_DOMAIN_S1) 2576 smmu_domain->domain.dirty_ops = &arm_smmu_dirty_ops; 2577 2578 ret = finalise_stage_fn(smmu, smmu_domain); 2579 if (ret < 0) { 2580 free_io_pgtable_ops(pgtbl_ops); 2581 return ret; 2582 } 2583 2584 smmu_domain->pgtbl_ops = pgtbl_ops; 2585 smmu_domain->smmu = smmu; 2586 return 0; 2587 } 2588 2589 static struct arm_smmu_ste * 2590 arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) 2591 { 2592 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2593 2594 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { 2595 /* Two-level walk */ 2596 return &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)] 2597 ->stes[arm_smmu_strtab_l2_idx(sid)]; 2598 } else { 2599 /* Simple linear lookup */ 2600 return &cfg->linear.table[sid]; 2601 } 2602 } 2603 2604 void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master, 2605 const struct arm_smmu_ste *target) 2606 { 2607 int i, j; 2608 struct arm_smmu_device *smmu = master->smmu; 2609 2610 master->cd_table.in_ste = 2611 FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(target->data[0])) == 2612 STRTAB_STE_0_CFG_S1_TRANS; 2613 master->ste_ats_enabled = 2614 FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(target->data[1])) == 2615 STRTAB_STE_1_EATS_TRANS; 2616 2617 for (i = 0; i < master->num_streams; ++i) { 2618 u32 sid = master->streams[i].id; 2619 struct arm_smmu_ste *step = 2620 arm_smmu_get_step_for_sid(smmu, sid); 2621 2622 /* Bridged PCI devices may end up with duplicated IDs */ 2623 for (j = 0; j < i; j++) 2624 if (master->streams[j].id == sid) 2625 break; 2626 if (j < i) 2627 continue; 2628 2629 arm_smmu_write_ste(master, sid, step, target); 2630 } 2631 } 2632 2633 static bool arm_smmu_ats_supported(struct arm_smmu_master *master) 2634 { 2635 struct device *dev = master->dev; 2636 struct arm_smmu_device *smmu = master->smmu; 2637 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2638 2639 if (!(smmu->features & ARM_SMMU_FEAT_ATS)) 2640 return false; 2641 2642 if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS)) 2643 return false; 2644 2645 return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)); 2646 } 2647 2648 static void arm_smmu_enable_ats(struct arm_smmu_master *master) 2649 { 2650 size_t stu; 2651 struct pci_dev *pdev; 2652 struct arm_smmu_device *smmu = master->smmu; 2653 2654 /* Smallest Translation Unit: log2 of the smallest supported granule */ 2655 stu = __ffs(smmu->pgsize_bitmap); 2656 pdev = to_pci_dev(master->dev); 2657 2658 /* 2659 * ATC invalidation of PASID 0 causes the entire ATC to be flushed. 2660 */ 2661 arm_smmu_atc_inv_master(master, IOMMU_NO_PASID); 2662 if (pci_enable_ats(pdev, stu)) 2663 dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); 2664 } 2665 2666 static int arm_smmu_enable_pasid(struct arm_smmu_master *master) 2667 { 2668 int ret; 2669 int features; 2670 int num_pasids; 2671 struct pci_dev *pdev; 2672 2673 if (!dev_is_pci(master->dev)) 2674 return -ENODEV; 2675 2676 pdev = to_pci_dev(master->dev); 2677 2678 features = pci_pasid_features(pdev); 2679 if (features < 0) 2680 return features; 2681 2682 num_pasids = pci_max_pasids(pdev); 2683 if (num_pasids <= 0) 2684 return num_pasids; 2685 2686 ret = pci_enable_pasid(pdev, features); 2687 if (ret) { 2688 dev_err(&pdev->dev, "Failed to enable PASID\n"); 2689 return ret; 2690 } 2691 2692 master->ssid_bits = min_t(u8, ilog2(num_pasids), 2693 master->smmu->ssid_bits); 2694 return 0; 2695 } 2696 2697 static void arm_smmu_disable_pasid(struct arm_smmu_master *master) 2698 { 2699 struct pci_dev *pdev; 2700 2701 if (!dev_is_pci(master->dev)) 2702 return; 2703 2704 pdev = to_pci_dev(master->dev); 2705 2706 if (!pdev->pasid_enabled) 2707 return; 2708 2709 master->ssid_bits = 0; 2710 pci_disable_pasid(pdev); 2711 } 2712 2713 static struct arm_smmu_master_domain * 2714 arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain, 2715 struct arm_smmu_master *master, 2716 ioasid_t ssid, bool nested_ats_flush) 2717 { 2718 struct arm_smmu_master_domain *master_domain; 2719 2720 lockdep_assert_held(&smmu_domain->devices_lock); 2721 2722 list_for_each_entry(master_domain, &smmu_domain->devices, 2723 devices_elm) { 2724 if (master_domain->master == master && 2725 master_domain->ssid == ssid && 2726 master_domain->nested_ats_flush == nested_ats_flush) 2727 return master_domain; 2728 } 2729 return NULL; 2730 } 2731 2732 /* 2733 * If the domain uses the smmu_domain->devices list return the arm_smmu_domain 2734 * structure, otherwise NULL. These domains track attached devices so they can 2735 * issue invalidations. 2736 */ 2737 static struct arm_smmu_domain * 2738 to_smmu_domain_devices(struct iommu_domain *domain) 2739 { 2740 /* The domain can be NULL only when processing the first attach */ 2741 if (!domain) 2742 return NULL; 2743 if ((domain->type & __IOMMU_DOMAIN_PAGING) || 2744 domain->type == IOMMU_DOMAIN_SVA) 2745 return to_smmu_domain(domain); 2746 if (domain->type == IOMMU_DOMAIN_NESTED) 2747 return to_smmu_nested_domain(domain)->vsmmu->s2_parent; 2748 return NULL; 2749 } 2750 2751 static void arm_smmu_remove_master_domain(struct arm_smmu_master *master, 2752 struct iommu_domain *domain, 2753 ioasid_t ssid) 2754 { 2755 struct arm_smmu_domain *smmu_domain = to_smmu_domain_devices(domain); 2756 struct arm_smmu_master_domain *master_domain; 2757 bool nested_ats_flush = false; 2758 unsigned long flags; 2759 2760 if (!smmu_domain) 2761 return; 2762 2763 if (domain->type == IOMMU_DOMAIN_NESTED) 2764 nested_ats_flush = to_smmu_nested_domain(domain)->enable_ats; 2765 2766 spin_lock_irqsave(&smmu_domain->devices_lock, flags); 2767 master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid, 2768 nested_ats_flush); 2769 if (master_domain) { 2770 list_del(&master_domain->devices_elm); 2771 kfree(master_domain); 2772 if (master->ats_enabled) 2773 atomic_dec(&smmu_domain->nr_ats_masters); 2774 } 2775 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); 2776 } 2777 2778 /* 2779 * Start the sequence to attach a domain to a master. The sequence contains three 2780 * steps: 2781 * arm_smmu_attach_prepare() 2782 * arm_smmu_install_ste_for_dev() 2783 * arm_smmu_attach_commit() 2784 * 2785 * If prepare succeeds then the sequence must be completed. The STE installed 2786 * must set the STE.EATS field according to state.ats_enabled. 2787 * 2788 * If the device supports ATS then this determines if EATS should be enabled 2789 * in the STE, and starts sequencing EATS disable if required. 2790 * 2791 * The change of the EATS in the STE and the PCI ATS config space is managed by 2792 * this sequence to be in the right order so that if PCI ATS is enabled then 2793 * STE.ETAS is enabled. 2794 * 2795 * new_domain can be a non-paging domain. In this case ATS will not be enabled, 2796 * and invalidations won't be tracked. 2797 */ 2798 int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, 2799 struct iommu_domain *new_domain) 2800 { 2801 struct arm_smmu_master *master = state->master; 2802 struct arm_smmu_master_domain *master_domain; 2803 struct arm_smmu_domain *smmu_domain = 2804 to_smmu_domain_devices(new_domain); 2805 unsigned long flags; 2806 2807 /* 2808 * arm_smmu_share_asid() must not see two domains pointing to the same 2809 * arm_smmu_master_domain contents otherwise it could randomly write one 2810 * or the other to the CD. 2811 */ 2812 lockdep_assert_held(&arm_smmu_asid_lock); 2813 2814 if (smmu_domain || state->cd_needs_ats) { 2815 /* 2816 * The SMMU does not support enabling ATS with bypass/abort. 2817 * When the STE is in bypass (STE.Config[2:0] == 0b100), ATS 2818 * Translation Requests and Translated transactions are denied 2819 * as though ATS is disabled for the stream (STE.EATS == 0b00), 2820 * causing F_BAD_ATS_TREQ and F_TRANSL_FORBIDDEN events 2821 * (IHI0070Ea 5.2 Stream Table Entry). 2822 * 2823 * However, if we have installed a CD table and are using S1DSS 2824 * then ATS will work in S1DSS bypass. See "13.6.4 Full ATS 2825 * skipping stage 1". 2826 * 2827 * Disable ATS if we are going to create a normal 0b100 bypass 2828 * STE. 2829 */ 2830 state->ats_enabled = !state->disable_ats && 2831 arm_smmu_ats_supported(master); 2832 } 2833 2834 if (smmu_domain) { 2835 master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL); 2836 if (!master_domain) 2837 return -ENOMEM; 2838 master_domain->master = master; 2839 master_domain->ssid = state->ssid; 2840 if (new_domain->type == IOMMU_DOMAIN_NESTED) 2841 master_domain->nested_ats_flush = 2842 to_smmu_nested_domain(new_domain)->enable_ats; 2843 2844 /* 2845 * During prepare we want the current smmu_domain and new 2846 * smmu_domain to be in the devices list before we change any 2847 * HW. This ensures that both domains will send ATS 2848 * invalidations to the master until we are done. 2849 * 2850 * It is tempting to make this list only track masters that are 2851 * using ATS, but arm_smmu_share_asid() also uses this to change 2852 * the ASID of a domain, unrelated to ATS. 2853 * 2854 * Notice if we are re-attaching the same domain then the list 2855 * will have two identical entries and commit will remove only 2856 * one of them. 2857 */ 2858 spin_lock_irqsave(&smmu_domain->devices_lock, flags); 2859 if (smmu_domain->enforce_cache_coherency && 2860 !arm_smmu_master_canwbs(master)) { 2861 spin_unlock_irqrestore(&smmu_domain->devices_lock, 2862 flags); 2863 kfree(master_domain); 2864 return -EINVAL; 2865 } 2866 2867 if (state->ats_enabled) 2868 atomic_inc(&smmu_domain->nr_ats_masters); 2869 list_add(&master_domain->devices_elm, &smmu_domain->devices); 2870 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); 2871 } 2872 2873 if (!state->ats_enabled && master->ats_enabled) { 2874 pci_disable_ats(to_pci_dev(master->dev)); 2875 /* 2876 * This is probably overkill, but the config write for disabling 2877 * ATS should complete before the STE is configured to generate 2878 * UR to avoid AER noise. 2879 */ 2880 wmb(); 2881 } 2882 return 0; 2883 } 2884 2885 /* 2886 * Commit is done after the STE/CD are configured with the EATS setting. It 2887 * completes synchronizing the PCI device's ATC and finishes manipulating the 2888 * smmu_domain->devices list. 2889 */ 2890 void arm_smmu_attach_commit(struct arm_smmu_attach_state *state) 2891 { 2892 struct arm_smmu_master *master = state->master; 2893 2894 lockdep_assert_held(&arm_smmu_asid_lock); 2895 2896 if (state->ats_enabled && !master->ats_enabled) { 2897 arm_smmu_enable_ats(master); 2898 } else if (state->ats_enabled && master->ats_enabled) { 2899 /* 2900 * The translation has changed, flush the ATC. At this point the 2901 * SMMU is translating for the new domain and both the old&new 2902 * domain will issue invalidations. 2903 */ 2904 arm_smmu_atc_inv_master(master, state->ssid); 2905 } else if (!state->ats_enabled && master->ats_enabled) { 2906 /* ATS is being switched off, invalidate the entire ATC */ 2907 arm_smmu_atc_inv_master(master, IOMMU_NO_PASID); 2908 } 2909 master->ats_enabled = state->ats_enabled; 2910 2911 arm_smmu_remove_master_domain(master, state->old_domain, state->ssid); 2912 } 2913 2914 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) 2915 { 2916 int ret = 0; 2917 struct arm_smmu_ste target; 2918 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 2919 struct arm_smmu_device *smmu; 2920 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 2921 struct arm_smmu_attach_state state = { 2922 .old_domain = iommu_get_domain_for_dev(dev), 2923 .ssid = IOMMU_NO_PASID, 2924 }; 2925 struct arm_smmu_master *master; 2926 struct arm_smmu_cd *cdptr; 2927 2928 if (!fwspec) 2929 return -ENOENT; 2930 2931 state.master = master = dev_iommu_priv_get(dev); 2932 smmu = master->smmu; 2933 2934 if (smmu_domain->smmu != smmu) 2935 return ret; 2936 2937 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { 2938 cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID); 2939 if (!cdptr) 2940 return -ENOMEM; 2941 } else if (arm_smmu_ssids_in_use(&master->cd_table)) 2942 return -EBUSY; 2943 2944 /* 2945 * Prevent arm_smmu_share_asid() from trying to change the ASID 2946 * of either the old or new domain while we are working on it. 2947 * This allows the STE and the smmu_domain->devices list to 2948 * be inconsistent during this routine. 2949 */ 2950 mutex_lock(&arm_smmu_asid_lock); 2951 2952 ret = arm_smmu_attach_prepare(&state, domain); 2953 if (ret) { 2954 mutex_unlock(&arm_smmu_asid_lock); 2955 return ret; 2956 } 2957 2958 switch (smmu_domain->stage) { 2959 case ARM_SMMU_DOMAIN_S1: { 2960 struct arm_smmu_cd target_cd; 2961 2962 arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); 2963 arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr, 2964 &target_cd); 2965 arm_smmu_make_cdtable_ste(&target, master, state.ats_enabled, 2966 STRTAB_STE_1_S1DSS_SSID0); 2967 arm_smmu_install_ste_for_dev(master, &target); 2968 break; 2969 } 2970 case ARM_SMMU_DOMAIN_S2: 2971 arm_smmu_make_s2_domain_ste(&target, master, smmu_domain, 2972 state.ats_enabled); 2973 arm_smmu_install_ste_for_dev(master, &target); 2974 arm_smmu_clear_cd(master, IOMMU_NO_PASID); 2975 break; 2976 } 2977 2978 arm_smmu_attach_commit(&state); 2979 mutex_unlock(&arm_smmu_asid_lock); 2980 return 0; 2981 } 2982 2983 static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain, 2984 struct device *dev, ioasid_t id, 2985 struct iommu_domain *old) 2986 { 2987 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 2988 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 2989 struct arm_smmu_device *smmu = master->smmu; 2990 struct arm_smmu_cd target_cd; 2991 2992 if (smmu_domain->smmu != smmu) 2993 return -EINVAL; 2994 2995 if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) 2996 return -EINVAL; 2997 2998 /* 2999 * We can read cd.asid outside the lock because arm_smmu_set_pasid() 3000 * will fix it 3001 */ 3002 arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); 3003 return arm_smmu_set_pasid(master, to_smmu_domain(domain), id, 3004 &target_cd, old); 3005 } 3006 3007 static void arm_smmu_update_ste(struct arm_smmu_master *master, 3008 struct iommu_domain *sid_domain, 3009 bool ats_enabled) 3010 { 3011 unsigned int s1dss = STRTAB_STE_1_S1DSS_TERMINATE; 3012 struct arm_smmu_ste ste; 3013 3014 if (master->cd_table.in_ste && master->ste_ats_enabled == ats_enabled) 3015 return; 3016 3017 if (sid_domain->type == IOMMU_DOMAIN_IDENTITY) 3018 s1dss = STRTAB_STE_1_S1DSS_BYPASS; 3019 else 3020 WARN_ON(sid_domain->type != IOMMU_DOMAIN_BLOCKED); 3021 3022 /* 3023 * Change the STE into a cdtable one with SID IDENTITY/BLOCKED behavior 3024 * using s1dss if necessary. If the cd_table is already installed then 3025 * the S1DSS is correct and this will just update the EATS. Otherwise it 3026 * installs the entire thing. This will be hitless. 3027 */ 3028 arm_smmu_make_cdtable_ste(&ste, master, ats_enabled, s1dss); 3029 arm_smmu_install_ste_for_dev(master, &ste); 3030 } 3031 3032 int arm_smmu_set_pasid(struct arm_smmu_master *master, 3033 struct arm_smmu_domain *smmu_domain, ioasid_t pasid, 3034 struct arm_smmu_cd *cd, struct iommu_domain *old) 3035 { 3036 struct iommu_domain *sid_domain = iommu_get_domain_for_dev(master->dev); 3037 struct arm_smmu_attach_state state = { 3038 .master = master, 3039 .ssid = pasid, 3040 .old_domain = old, 3041 }; 3042 struct arm_smmu_cd *cdptr; 3043 int ret; 3044 3045 /* The core code validates pasid */ 3046 3047 if (smmu_domain->smmu != master->smmu) 3048 return -EINVAL; 3049 3050 if (!master->cd_table.in_ste && 3051 sid_domain->type != IOMMU_DOMAIN_IDENTITY && 3052 sid_domain->type != IOMMU_DOMAIN_BLOCKED) 3053 return -EINVAL; 3054 3055 cdptr = arm_smmu_alloc_cd_ptr(master, pasid); 3056 if (!cdptr) 3057 return -ENOMEM; 3058 3059 mutex_lock(&arm_smmu_asid_lock); 3060 ret = arm_smmu_attach_prepare(&state, &smmu_domain->domain); 3061 if (ret) 3062 goto out_unlock; 3063 3064 /* 3065 * We don't want to obtain to the asid_lock too early, so fix up the 3066 * caller set ASID under the lock in case it changed. 3067 */ 3068 cd->data[0] &= ~cpu_to_le64(CTXDESC_CD_0_ASID); 3069 cd->data[0] |= cpu_to_le64( 3070 FIELD_PREP(CTXDESC_CD_0_ASID, smmu_domain->cd.asid)); 3071 3072 arm_smmu_write_cd_entry(master, pasid, cdptr, cd); 3073 arm_smmu_update_ste(master, sid_domain, state.ats_enabled); 3074 3075 arm_smmu_attach_commit(&state); 3076 3077 out_unlock: 3078 mutex_unlock(&arm_smmu_asid_lock); 3079 return ret; 3080 } 3081 3082 static int arm_smmu_blocking_set_dev_pasid(struct iommu_domain *new_domain, 3083 struct device *dev, ioasid_t pasid, 3084 struct iommu_domain *old_domain) 3085 { 3086 struct arm_smmu_domain *smmu_domain = to_smmu_domain(old_domain); 3087 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 3088 3089 mutex_lock(&arm_smmu_asid_lock); 3090 arm_smmu_clear_cd(master, pasid); 3091 if (master->ats_enabled) 3092 arm_smmu_atc_inv_master(master, pasid); 3093 arm_smmu_remove_master_domain(master, &smmu_domain->domain, pasid); 3094 mutex_unlock(&arm_smmu_asid_lock); 3095 3096 /* 3097 * When the last user of the CD table goes away downgrade the STE back 3098 * to a non-cd_table one. 3099 */ 3100 if (!arm_smmu_ssids_in_use(&master->cd_table)) { 3101 struct iommu_domain *sid_domain = 3102 iommu_get_domain_for_dev(master->dev); 3103 3104 if (sid_domain->type == IOMMU_DOMAIN_IDENTITY || 3105 sid_domain->type == IOMMU_DOMAIN_BLOCKED) 3106 sid_domain->ops->attach_dev(sid_domain, dev); 3107 } 3108 return 0; 3109 } 3110 3111 static void arm_smmu_attach_dev_ste(struct iommu_domain *domain, 3112 struct device *dev, 3113 struct arm_smmu_ste *ste, 3114 unsigned int s1dss) 3115 { 3116 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 3117 struct arm_smmu_attach_state state = { 3118 .master = master, 3119 .old_domain = iommu_get_domain_for_dev(dev), 3120 .ssid = IOMMU_NO_PASID, 3121 }; 3122 3123 /* 3124 * Do not allow any ASID to be changed while are working on the STE, 3125 * otherwise we could miss invalidations. 3126 */ 3127 mutex_lock(&arm_smmu_asid_lock); 3128 3129 /* 3130 * If the CD table is not in use we can use the provided STE, otherwise 3131 * we use a cdtable STE with the provided S1DSS. 3132 */ 3133 if (arm_smmu_ssids_in_use(&master->cd_table)) { 3134 /* 3135 * If a CD table has to be present then we need to run with ATS 3136 * on because we have to assume a PASID is using ATS. For 3137 * IDENTITY this will setup things so that S1DSS=bypass which 3138 * follows the explanation in "13.6.4 Full ATS skipping stage 1" 3139 * and allows for ATS on the RID to work. 3140 */ 3141 state.cd_needs_ats = true; 3142 arm_smmu_attach_prepare(&state, domain); 3143 arm_smmu_make_cdtable_ste(ste, master, state.ats_enabled, s1dss); 3144 } else { 3145 arm_smmu_attach_prepare(&state, domain); 3146 } 3147 arm_smmu_install_ste_for_dev(master, ste); 3148 arm_smmu_attach_commit(&state); 3149 mutex_unlock(&arm_smmu_asid_lock); 3150 3151 /* 3152 * This has to be done after removing the master from the 3153 * arm_smmu_domain->devices to avoid races updating the same context 3154 * descriptor from arm_smmu_share_asid(). 3155 */ 3156 arm_smmu_clear_cd(master, IOMMU_NO_PASID); 3157 } 3158 3159 static int arm_smmu_attach_dev_identity(struct iommu_domain *domain, 3160 struct device *dev) 3161 { 3162 struct arm_smmu_ste ste; 3163 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 3164 3165 arm_smmu_make_bypass_ste(master->smmu, &ste); 3166 arm_smmu_attach_dev_ste(domain, dev, &ste, STRTAB_STE_1_S1DSS_BYPASS); 3167 return 0; 3168 } 3169 3170 static const struct iommu_domain_ops arm_smmu_identity_ops = { 3171 .attach_dev = arm_smmu_attach_dev_identity, 3172 }; 3173 3174 static struct iommu_domain arm_smmu_identity_domain = { 3175 .type = IOMMU_DOMAIN_IDENTITY, 3176 .ops = &arm_smmu_identity_ops, 3177 }; 3178 3179 static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain, 3180 struct device *dev) 3181 { 3182 struct arm_smmu_ste ste; 3183 3184 arm_smmu_make_abort_ste(&ste); 3185 arm_smmu_attach_dev_ste(domain, dev, &ste, 3186 STRTAB_STE_1_S1DSS_TERMINATE); 3187 return 0; 3188 } 3189 3190 static const struct iommu_domain_ops arm_smmu_blocked_ops = { 3191 .attach_dev = arm_smmu_attach_dev_blocked, 3192 .set_dev_pasid = arm_smmu_blocking_set_dev_pasid, 3193 }; 3194 3195 static struct iommu_domain arm_smmu_blocked_domain = { 3196 .type = IOMMU_DOMAIN_BLOCKED, 3197 .ops = &arm_smmu_blocked_ops, 3198 }; 3199 3200 static struct iommu_domain * 3201 arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags, 3202 const struct iommu_user_data *user_data) 3203 { 3204 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 3205 struct arm_smmu_device *smmu = master->smmu; 3206 const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | 3207 IOMMU_HWPT_ALLOC_PASID | 3208 IOMMU_HWPT_ALLOC_NEST_PARENT; 3209 struct arm_smmu_domain *smmu_domain; 3210 int ret; 3211 3212 if (flags & ~PAGING_FLAGS) 3213 return ERR_PTR(-EOPNOTSUPP); 3214 if (user_data) 3215 return ERR_PTR(-EOPNOTSUPP); 3216 3217 smmu_domain = arm_smmu_domain_alloc(); 3218 if (IS_ERR(smmu_domain)) 3219 return ERR_CAST(smmu_domain); 3220 3221 switch (flags) { 3222 case 0: 3223 /* Prefer S1 if available */ 3224 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) 3225 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; 3226 else 3227 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; 3228 break; 3229 case IOMMU_HWPT_ALLOC_NEST_PARENT: 3230 if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) { 3231 ret = -EOPNOTSUPP; 3232 goto err_free; 3233 } 3234 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; 3235 smmu_domain->nest_parent = true; 3236 break; 3237 case IOMMU_HWPT_ALLOC_DIRTY_TRACKING: 3238 case IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_ALLOC_PASID: 3239 case IOMMU_HWPT_ALLOC_PASID: 3240 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) { 3241 ret = -EOPNOTSUPP; 3242 goto err_free; 3243 } 3244 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; 3245 break; 3246 default: 3247 ret = -EOPNOTSUPP; 3248 goto err_free; 3249 } 3250 3251 smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED; 3252 smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops; 3253 ret = arm_smmu_domain_finalise(smmu_domain, smmu, flags); 3254 if (ret) 3255 goto err_free; 3256 return &smmu_domain->domain; 3257 3258 err_free: 3259 kfree(smmu_domain); 3260 return ERR_PTR(ret); 3261 } 3262 3263 static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, 3264 phys_addr_t paddr, size_t pgsize, size_t pgcount, 3265 int prot, gfp_t gfp, size_t *mapped) 3266 { 3267 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; 3268 3269 if (!ops) 3270 return -ENODEV; 3271 3272 return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); 3273 } 3274 3275 static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova, 3276 size_t pgsize, size_t pgcount, 3277 struct iommu_iotlb_gather *gather) 3278 { 3279 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 3280 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; 3281 3282 if (!ops) 3283 return 0; 3284 3285 return ops->unmap_pages(ops, iova, pgsize, pgcount, gather); 3286 } 3287 3288 static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) 3289 { 3290 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 3291 3292 if (smmu_domain->smmu) 3293 arm_smmu_tlb_inv_context(smmu_domain); 3294 } 3295 3296 static void arm_smmu_iotlb_sync(struct iommu_domain *domain, 3297 struct iommu_iotlb_gather *gather) 3298 { 3299 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 3300 3301 if (!gather->pgsize) 3302 return; 3303 3304 arm_smmu_tlb_inv_range_domain(gather->start, 3305 gather->end - gather->start + 1, 3306 gather->pgsize, true, smmu_domain); 3307 } 3308 3309 static phys_addr_t 3310 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 3311 { 3312 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; 3313 3314 if (!ops) 3315 return 0; 3316 3317 return ops->iova_to_phys(ops, iova); 3318 } 3319 3320 static struct platform_driver arm_smmu_driver; 3321 3322 static 3323 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) 3324 { 3325 struct device *dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); 3326 3327 put_device(dev); 3328 return dev ? dev_get_drvdata(dev) : NULL; 3329 } 3330 3331 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) 3332 { 3333 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) 3334 return arm_smmu_strtab_l1_idx(sid) < smmu->strtab_cfg.l2.num_l1_ents; 3335 return sid < smmu->strtab_cfg.linear.num_ents; 3336 } 3337 3338 static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid) 3339 { 3340 /* Check the SIDs are in range of the SMMU and our stream table */ 3341 if (!arm_smmu_sid_in_range(smmu, sid)) 3342 return -ERANGE; 3343 3344 /* Ensure l2 strtab is initialised */ 3345 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) 3346 return arm_smmu_init_l2_strtab(smmu, sid); 3347 3348 return 0; 3349 } 3350 3351 static int arm_smmu_insert_master(struct arm_smmu_device *smmu, 3352 struct arm_smmu_master *master) 3353 { 3354 int i; 3355 int ret = 0; 3356 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); 3357 3358 master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams), 3359 GFP_KERNEL); 3360 if (!master->streams) 3361 return -ENOMEM; 3362 master->num_streams = fwspec->num_ids; 3363 3364 mutex_lock(&smmu->streams_mutex); 3365 for (i = 0; i < fwspec->num_ids; i++) { 3366 struct arm_smmu_stream *new_stream = &master->streams[i]; 3367 u32 sid = fwspec->ids[i]; 3368 3369 new_stream->id = sid; 3370 new_stream->master = master; 3371 3372 ret = arm_smmu_init_sid_strtab(smmu, sid); 3373 if (ret) 3374 break; 3375 3376 /* Insert into SID tree */ 3377 if (rb_find_add(&new_stream->node, &smmu->streams, 3378 arm_smmu_streams_cmp_node)) { 3379 dev_warn(master->dev, "stream %u already in tree\n", 3380 sid); 3381 ret = -EINVAL; 3382 break; 3383 } 3384 } 3385 3386 if (ret) { 3387 for (i--; i >= 0; i--) 3388 rb_erase(&master->streams[i].node, &smmu->streams); 3389 kfree(master->streams); 3390 } 3391 mutex_unlock(&smmu->streams_mutex); 3392 3393 return ret; 3394 } 3395 3396 static void arm_smmu_remove_master(struct arm_smmu_master *master) 3397 { 3398 int i; 3399 struct arm_smmu_device *smmu = master->smmu; 3400 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); 3401 3402 if (!smmu || !master->streams) 3403 return; 3404 3405 mutex_lock(&smmu->streams_mutex); 3406 for (i = 0; i < fwspec->num_ids; i++) 3407 rb_erase(&master->streams[i].node, &smmu->streams); 3408 mutex_unlock(&smmu->streams_mutex); 3409 3410 kfree(master->streams); 3411 } 3412 3413 static struct iommu_device *arm_smmu_probe_device(struct device *dev) 3414 { 3415 int ret; 3416 struct arm_smmu_device *smmu; 3417 struct arm_smmu_master *master; 3418 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 3419 3420 if (WARN_ON_ONCE(dev_iommu_priv_get(dev))) 3421 return ERR_PTR(-EBUSY); 3422 3423 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); 3424 if (!smmu) 3425 return ERR_PTR(-ENODEV); 3426 3427 master = kzalloc(sizeof(*master), GFP_KERNEL); 3428 if (!master) 3429 return ERR_PTR(-ENOMEM); 3430 3431 master->dev = dev; 3432 master->smmu = smmu; 3433 dev_iommu_priv_set(dev, master); 3434 3435 ret = arm_smmu_insert_master(smmu, master); 3436 if (ret) 3437 goto err_free_master; 3438 3439 device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits); 3440 master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits); 3441 3442 /* 3443 * Note that PASID must be enabled before, and disabled after ATS: 3444 * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register 3445 * 3446 * Behavior is undefined if this bit is Set and the value of the PASID 3447 * Enable, Execute Requested Enable, or Privileged Mode Requested bits 3448 * are changed. 3449 */ 3450 arm_smmu_enable_pasid(master); 3451 3452 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB)) 3453 master->ssid_bits = min_t(u8, master->ssid_bits, 3454 CTXDESC_LINEAR_CDMAX); 3455 3456 if ((smmu->features & ARM_SMMU_FEAT_STALLS && 3457 device_property_read_bool(dev, "dma-can-stall")) || 3458 smmu->features & ARM_SMMU_FEAT_STALL_FORCE) 3459 master->stall_enabled = true; 3460 3461 if (dev_is_pci(dev)) { 3462 unsigned int stu = __ffs(smmu->pgsize_bitmap); 3463 3464 pci_prepare_ats(to_pci_dev(dev), stu); 3465 } 3466 3467 return &smmu->iommu; 3468 3469 err_free_master: 3470 kfree(master); 3471 return ERR_PTR(ret); 3472 } 3473 3474 static void arm_smmu_release_device(struct device *dev) 3475 { 3476 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 3477 3478 if (WARN_ON(arm_smmu_master_sva_enabled(master))) 3479 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); 3480 3481 /* Put the STE back to what arm_smmu_init_strtab() sets */ 3482 if (dev->iommu->require_direct) 3483 arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev); 3484 else 3485 arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev); 3486 3487 arm_smmu_disable_pasid(master); 3488 arm_smmu_remove_master(master); 3489 if (arm_smmu_cdtab_allocated(&master->cd_table)) 3490 arm_smmu_free_cd_tables(master); 3491 kfree(master); 3492 } 3493 3494 static int arm_smmu_read_and_clear_dirty(struct iommu_domain *domain, 3495 unsigned long iova, size_t size, 3496 unsigned long flags, 3497 struct iommu_dirty_bitmap *dirty) 3498 { 3499 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 3500 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; 3501 3502 return ops->read_and_clear_dirty(ops, iova, size, flags, dirty); 3503 } 3504 3505 static int arm_smmu_set_dirty_tracking(struct iommu_domain *domain, 3506 bool enabled) 3507 { 3508 /* 3509 * Always enabled and the dirty bitmap is cleared prior to 3510 * set_dirty_tracking(). 3511 */ 3512 return 0; 3513 } 3514 3515 static struct iommu_group *arm_smmu_device_group(struct device *dev) 3516 { 3517 struct iommu_group *group; 3518 3519 /* 3520 * We don't support devices sharing stream IDs other than PCI RID 3521 * aliases, since the necessary ID-to-device lookup becomes rather 3522 * impractical given a potential sparse 32-bit stream ID space. 3523 */ 3524 if (dev_is_pci(dev)) 3525 group = pci_device_group(dev); 3526 else 3527 group = generic_device_group(dev); 3528 3529 return group; 3530 } 3531 3532 static int arm_smmu_of_xlate(struct device *dev, 3533 const struct of_phandle_args *args) 3534 { 3535 return iommu_fwspec_add_ids(dev, args->args, 1); 3536 } 3537 3538 static void arm_smmu_get_resv_regions(struct device *dev, 3539 struct list_head *head) 3540 { 3541 struct iommu_resv_region *region; 3542 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 3543 3544 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, 3545 prot, IOMMU_RESV_SW_MSI, GFP_KERNEL); 3546 if (!region) 3547 return; 3548 3549 list_add_tail(®ion->list, head); 3550 3551 iommu_dma_get_resv_regions(dev, head); 3552 } 3553 3554 static int arm_smmu_dev_enable_feature(struct device *dev, 3555 enum iommu_dev_features feat) 3556 { 3557 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 3558 3559 if (!master) 3560 return -ENODEV; 3561 3562 switch (feat) { 3563 case IOMMU_DEV_FEAT_IOPF: 3564 if (!arm_smmu_master_iopf_supported(master)) 3565 return -EINVAL; 3566 if (master->iopf_enabled) 3567 return -EBUSY; 3568 master->iopf_enabled = true; 3569 return 0; 3570 case IOMMU_DEV_FEAT_SVA: 3571 if (!arm_smmu_master_sva_supported(master)) 3572 return -EINVAL; 3573 if (arm_smmu_master_sva_enabled(master)) 3574 return -EBUSY; 3575 return arm_smmu_master_enable_sva(master); 3576 default: 3577 return -EINVAL; 3578 } 3579 } 3580 3581 static int arm_smmu_dev_disable_feature(struct device *dev, 3582 enum iommu_dev_features feat) 3583 { 3584 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 3585 3586 if (!master) 3587 return -EINVAL; 3588 3589 switch (feat) { 3590 case IOMMU_DEV_FEAT_IOPF: 3591 if (!master->iopf_enabled) 3592 return -EINVAL; 3593 if (master->sva_enabled) 3594 return -EBUSY; 3595 master->iopf_enabled = false; 3596 return 0; 3597 case IOMMU_DEV_FEAT_SVA: 3598 if (!arm_smmu_master_sva_enabled(master)) 3599 return -EINVAL; 3600 return arm_smmu_master_disable_sva(master); 3601 default: 3602 return -EINVAL; 3603 } 3604 } 3605 3606 /* 3607 * HiSilicon PCIe tune and trace device can be used to trace TLP headers on the 3608 * PCIe link and save the data to memory by DMA. The hardware is restricted to 3609 * use identity mapping only. 3610 */ 3611 #define IS_HISI_PTT_DEVICE(pdev) ((pdev)->vendor == PCI_VENDOR_ID_HUAWEI && \ 3612 (pdev)->device == 0xa12e) 3613 3614 static int arm_smmu_def_domain_type(struct device *dev) 3615 { 3616 if (dev_is_pci(dev)) { 3617 struct pci_dev *pdev = to_pci_dev(dev); 3618 3619 if (IS_HISI_PTT_DEVICE(pdev)) 3620 return IOMMU_DOMAIN_IDENTITY; 3621 } 3622 3623 return 0; 3624 } 3625 3626 static struct iommu_ops arm_smmu_ops = { 3627 .identity_domain = &arm_smmu_identity_domain, 3628 .blocked_domain = &arm_smmu_blocked_domain, 3629 .capable = arm_smmu_capable, 3630 .hw_info = arm_smmu_hw_info, 3631 .domain_alloc_sva = arm_smmu_sva_domain_alloc, 3632 .domain_alloc_paging_flags = arm_smmu_domain_alloc_paging_flags, 3633 .probe_device = arm_smmu_probe_device, 3634 .release_device = arm_smmu_release_device, 3635 .device_group = arm_smmu_device_group, 3636 .of_xlate = arm_smmu_of_xlate, 3637 .get_resv_regions = arm_smmu_get_resv_regions, 3638 .dev_enable_feat = arm_smmu_dev_enable_feature, 3639 .dev_disable_feat = arm_smmu_dev_disable_feature, 3640 .page_response = arm_smmu_page_response, 3641 .def_domain_type = arm_smmu_def_domain_type, 3642 .viommu_alloc = arm_vsmmu_alloc, 3643 .user_pasid_table = 1, 3644 .pgsize_bitmap = -1UL, /* Restricted during device attach */ 3645 .owner = THIS_MODULE, 3646 .default_domain_ops = &(const struct iommu_domain_ops) { 3647 .attach_dev = arm_smmu_attach_dev, 3648 .enforce_cache_coherency = arm_smmu_enforce_cache_coherency, 3649 .set_dev_pasid = arm_smmu_s1_set_dev_pasid, 3650 .map_pages = arm_smmu_map_pages, 3651 .unmap_pages = arm_smmu_unmap_pages, 3652 .flush_iotlb_all = arm_smmu_flush_iotlb_all, 3653 .iotlb_sync = arm_smmu_iotlb_sync, 3654 .iova_to_phys = arm_smmu_iova_to_phys, 3655 .free = arm_smmu_domain_free_paging, 3656 } 3657 }; 3658 3659 static struct iommu_dirty_ops arm_smmu_dirty_ops = { 3660 .read_and_clear_dirty = arm_smmu_read_and_clear_dirty, 3661 .set_dirty_tracking = arm_smmu_set_dirty_tracking, 3662 }; 3663 3664 /* Probing and initialisation functions */ 3665 int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, 3666 struct arm_smmu_queue *q, void __iomem *page, 3667 unsigned long prod_off, unsigned long cons_off, 3668 size_t dwords, const char *name) 3669 { 3670 size_t qsz; 3671 3672 do { 3673 qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; 3674 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, 3675 GFP_KERNEL); 3676 if (q->base || qsz < PAGE_SIZE) 3677 break; 3678 3679 q->llq.max_n_shift--; 3680 } while (1); 3681 3682 if (!q->base) { 3683 dev_err(smmu->dev, 3684 "failed to allocate queue (0x%zx bytes) for %s\n", 3685 qsz, name); 3686 return -ENOMEM; 3687 } 3688 3689 if (!WARN_ON(q->base_dma & (qsz - 1))) { 3690 dev_info(smmu->dev, "allocated %u entries for %s\n", 3691 1 << q->llq.max_n_shift, name); 3692 } 3693 3694 q->prod_reg = page + prod_off; 3695 q->cons_reg = page + cons_off; 3696 q->ent_dwords = dwords; 3697 3698 q->q_base = Q_BASE_RWA; 3699 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; 3700 q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); 3701 3702 q->llq.prod = q->llq.cons = 0; 3703 return 0; 3704 } 3705 3706 int arm_smmu_cmdq_init(struct arm_smmu_device *smmu, 3707 struct arm_smmu_cmdq *cmdq) 3708 { 3709 unsigned int nents = 1 << cmdq->q.llq.max_n_shift; 3710 3711 atomic_set(&cmdq->owner_prod, 0); 3712 atomic_set(&cmdq->lock, 0); 3713 3714 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents, 3715 GFP_KERNEL); 3716 if (!cmdq->valid_map) 3717 return -ENOMEM; 3718 3719 return 0; 3720 } 3721 3722 static int arm_smmu_init_queues(struct arm_smmu_device *smmu) 3723 { 3724 int ret; 3725 3726 /* cmdq */ 3727 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base, 3728 ARM_SMMU_CMDQ_PROD, ARM_SMMU_CMDQ_CONS, 3729 CMDQ_ENT_DWORDS, "cmdq"); 3730 if (ret) 3731 return ret; 3732 3733 ret = arm_smmu_cmdq_init(smmu, &smmu->cmdq); 3734 if (ret) 3735 return ret; 3736 3737 /* evtq */ 3738 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1, 3739 ARM_SMMU_EVTQ_PROD, ARM_SMMU_EVTQ_CONS, 3740 EVTQ_ENT_DWORDS, "evtq"); 3741 if (ret) 3742 return ret; 3743 3744 if ((smmu->features & ARM_SMMU_FEAT_SVA) && 3745 (smmu->features & ARM_SMMU_FEAT_STALLS)) { 3746 smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev)); 3747 if (!smmu->evtq.iopf) 3748 return -ENOMEM; 3749 } 3750 3751 /* priq */ 3752 if (!(smmu->features & ARM_SMMU_FEAT_PRI)) 3753 return 0; 3754 3755 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1, 3756 ARM_SMMU_PRIQ_PROD, ARM_SMMU_PRIQ_CONS, 3757 PRIQ_ENT_DWORDS, "priq"); 3758 } 3759 3760 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) 3761 { 3762 u32 l1size; 3763 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 3764 unsigned int last_sid_idx = 3765 arm_smmu_strtab_l1_idx((1ULL << smmu->sid_bits) - 1); 3766 3767 /* Calculate the L1 size, capped to the SIDSIZE. */ 3768 cfg->l2.num_l1_ents = min(last_sid_idx + 1, STRTAB_MAX_L1_ENTRIES); 3769 if (cfg->l2.num_l1_ents <= last_sid_idx) 3770 dev_warn(smmu->dev, 3771 "2-level strtab only covers %u/%u bits of SID\n", 3772 ilog2(cfg->l2.num_l1_ents * STRTAB_NUM_L2_STES), 3773 smmu->sid_bits); 3774 3775 l1size = cfg->l2.num_l1_ents * sizeof(struct arm_smmu_strtab_l1); 3776 cfg->l2.l1tab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->l2.l1_dma, 3777 GFP_KERNEL); 3778 if (!cfg->l2.l1tab) { 3779 dev_err(smmu->dev, 3780 "failed to allocate l1 stream table (%u bytes)\n", 3781 l1size); 3782 return -ENOMEM; 3783 } 3784 3785 cfg->l2.l2ptrs = devm_kcalloc(smmu->dev, cfg->l2.num_l1_ents, 3786 sizeof(*cfg->l2.l2ptrs), GFP_KERNEL); 3787 if (!cfg->l2.l2ptrs) 3788 return -ENOMEM; 3789 3790 return 0; 3791 } 3792 3793 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) 3794 { 3795 u32 size; 3796 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 3797 3798 size = (1 << smmu->sid_bits) * sizeof(struct arm_smmu_ste); 3799 cfg->linear.table = dmam_alloc_coherent(smmu->dev, size, 3800 &cfg->linear.ste_dma, 3801 GFP_KERNEL); 3802 if (!cfg->linear.table) { 3803 dev_err(smmu->dev, 3804 "failed to allocate linear stream table (%u bytes)\n", 3805 size); 3806 return -ENOMEM; 3807 } 3808 cfg->linear.num_ents = 1 << smmu->sid_bits; 3809 3810 arm_smmu_init_initial_stes(cfg->linear.table, cfg->linear.num_ents); 3811 return 0; 3812 } 3813 3814 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu) 3815 { 3816 int ret; 3817 3818 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) 3819 ret = arm_smmu_init_strtab_2lvl(smmu); 3820 else 3821 ret = arm_smmu_init_strtab_linear(smmu); 3822 if (ret) 3823 return ret; 3824 3825 ida_init(&smmu->vmid_map); 3826 3827 return 0; 3828 } 3829 3830 static int arm_smmu_init_structures(struct arm_smmu_device *smmu) 3831 { 3832 int ret; 3833 3834 mutex_init(&smmu->streams_mutex); 3835 smmu->streams = RB_ROOT; 3836 3837 ret = arm_smmu_init_queues(smmu); 3838 if (ret) 3839 return ret; 3840 3841 ret = arm_smmu_init_strtab(smmu); 3842 if (ret) 3843 return ret; 3844 3845 if (smmu->impl_ops && smmu->impl_ops->init_structures) 3846 return smmu->impl_ops->init_structures(smmu); 3847 3848 return 0; 3849 } 3850 3851 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, 3852 unsigned int reg_off, unsigned int ack_off) 3853 { 3854 u32 reg; 3855 3856 writel_relaxed(val, smmu->base + reg_off); 3857 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val, 3858 1, ARM_SMMU_POLL_TIMEOUT_US); 3859 } 3860 3861 /* GBPA is "special" */ 3862 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) 3863 { 3864 int ret; 3865 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA; 3866 3867 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), 3868 1, ARM_SMMU_POLL_TIMEOUT_US); 3869 if (ret) 3870 return ret; 3871 3872 reg &= ~clr; 3873 reg |= set; 3874 writel_relaxed(reg | GBPA_UPDATE, gbpa); 3875 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), 3876 1, ARM_SMMU_POLL_TIMEOUT_US); 3877 3878 if (ret) 3879 dev_err(smmu->dev, "GBPA not responding to update\n"); 3880 return ret; 3881 } 3882 3883 static void arm_smmu_free_msis(void *data) 3884 { 3885 struct device *dev = data; 3886 3887 platform_device_msi_free_irqs_all(dev); 3888 } 3889 3890 static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 3891 { 3892 phys_addr_t doorbell; 3893 struct device *dev = msi_desc_to_dev(desc); 3894 struct arm_smmu_device *smmu = dev_get_drvdata(dev); 3895 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->msi_index]; 3896 3897 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; 3898 doorbell &= MSI_CFG0_ADDR_MASK; 3899 3900 writeq_relaxed(doorbell, smmu->base + cfg[0]); 3901 writel_relaxed(msg->data, smmu->base + cfg[1]); 3902 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); 3903 } 3904 3905 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) 3906 { 3907 int ret, nvec = ARM_SMMU_MAX_MSIS; 3908 struct device *dev = smmu->dev; 3909 3910 /* Clear the MSI address regs */ 3911 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); 3912 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0); 3913 3914 if (smmu->features & ARM_SMMU_FEAT_PRI) 3915 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); 3916 else 3917 nvec--; 3918 3919 if (!(smmu->features & ARM_SMMU_FEAT_MSI)) 3920 return; 3921 3922 if (!dev->msi.domain) { 3923 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n"); 3924 return; 3925 } 3926 3927 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */ 3928 ret = platform_device_msi_init_and_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg); 3929 if (ret) { 3930 dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n"); 3931 return; 3932 } 3933 3934 smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX); 3935 smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX); 3936 smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX); 3937 3938 /* Add callback to free MSIs on teardown */ 3939 devm_add_action_or_reset(dev, arm_smmu_free_msis, dev); 3940 } 3941 3942 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) 3943 { 3944 int irq, ret; 3945 3946 arm_smmu_setup_msis(smmu); 3947 3948 /* Request interrupt lines */ 3949 irq = smmu->evtq.q.irq; 3950 if (irq) { 3951 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, 3952 arm_smmu_evtq_thread, 3953 IRQF_ONESHOT, 3954 "arm-smmu-v3-evtq", smmu); 3955 if (ret < 0) 3956 dev_warn(smmu->dev, "failed to enable evtq irq\n"); 3957 } else { 3958 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n"); 3959 } 3960 3961 irq = smmu->gerr_irq; 3962 if (irq) { 3963 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, 3964 0, "arm-smmu-v3-gerror", smmu); 3965 if (ret < 0) 3966 dev_warn(smmu->dev, "failed to enable gerror irq\n"); 3967 } else { 3968 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n"); 3969 } 3970 3971 if (smmu->features & ARM_SMMU_FEAT_PRI) { 3972 irq = smmu->priq.q.irq; 3973 if (irq) { 3974 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, 3975 arm_smmu_priq_thread, 3976 IRQF_ONESHOT, 3977 "arm-smmu-v3-priq", 3978 smmu); 3979 if (ret < 0) 3980 dev_warn(smmu->dev, 3981 "failed to enable priq irq\n"); 3982 } else { 3983 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n"); 3984 } 3985 } 3986 } 3987 3988 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) 3989 { 3990 int ret, irq; 3991 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; 3992 3993 /* Disable IRQs first */ 3994 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, 3995 ARM_SMMU_IRQ_CTRLACK); 3996 if (ret) { 3997 dev_err(smmu->dev, "failed to disable irqs\n"); 3998 return ret; 3999 } 4000 4001 irq = smmu->combined_irq; 4002 if (irq) { 4003 /* 4004 * Cavium ThunderX2 implementation doesn't support unique irq 4005 * lines. Use a single irq line for all the SMMUv3 interrupts. 4006 */ 4007 ret = devm_request_threaded_irq(smmu->dev, irq, 4008 arm_smmu_combined_irq_handler, 4009 arm_smmu_combined_irq_thread, 4010 IRQF_ONESHOT, 4011 "arm-smmu-v3-combined-irq", smmu); 4012 if (ret < 0) 4013 dev_warn(smmu->dev, "failed to enable combined irq\n"); 4014 } else 4015 arm_smmu_setup_unique_irqs(smmu); 4016 4017 if (smmu->features & ARM_SMMU_FEAT_PRI) 4018 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; 4019 4020 /* Enable interrupt generation on the SMMU */ 4021 ret = arm_smmu_write_reg_sync(smmu, irqen_flags, 4022 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK); 4023 if (ret) 4024 dev_warn(smmu->dev, "failed to enable irqs\n"); 4025 4026 return 0; 4027 } 4028 4029 static int arm_smmu_device_disable(struct arm_smmu_device *smmu) 4030 { 4031 int ret; 4032 4033 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK); 4034 if (ret) 4035 dev_err(smmu->dev, "failed to clear cr0\n"); 4036 4037 return ret; 4038 } 4039 4040 static void arm_smmu_write_strtab(struct arm_smmu_device *smmu) 4041 { 4042 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 4043 dma_addr_t dma; 4044 u32 reg; 4045 4046 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { 4047 reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, 4048 STRTAB_BASE_CFG_FMT_2LVL) | 4049 FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, 4050 ilog2(cfg->l2.num_l1_ents) + STRTAB_SPLIT) | 4051 FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); 4052 dma = cfg->l2.l1_dma; 4053 } else { 4054 reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, 4055 STRTAB_BASE_CFG_FMT_LINEAR) | 4056 FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); 4057 dma = cfg->linear.ste_dma; 4058 } 4059 writeq_relaxed((dma & STRTAB_BASE_ADDR_MASK) | STRTAB_BASE_RA, 4060 smmu->base + ARM_SMMU_STRTAB_BASE); 4061 writel_relaxed(reg, smmu->base + ARM_SMMU_STRTAB_BASE_CFG); 4062 } 4063 4064 static int arm_smmu_device_reset(struct arm_smmu_device *smmu) 4065 { 4066 int ret; 4067 u32 reg, enables; 4068 struct arm_smmu_cmdq_ent cmd; 4069 4070 /* Clear CR0 and sync (disables SMMU and queue processing) */ 4071 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); 4072 if (reg & CR0_SMMUEN) { 4073 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); 4074 arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0); 4075 } 4076 4077 ret = arm_smmu_device_disable(smmu); 4078 if (ret) 4079 return ret; 4080 4081 /* CR1 (table and queue memory attributes) */ 4082 reg = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) | 4083 FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) | 4084 FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) | 4085 FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) | 4086 FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) | 4087 FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB); 4088 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1); 4089 4090 /* CR2 (random crap) */ 4091 reg = CR2_PTM | CR2_RECINVSID; 4092 4093 if (smmu->features & ARM_SMMU_FEAT_E2H) 4094 reg |= CR2_E2H; 4095 4096 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); 4097 4098 /* Stream table */ 4099 arm_smmu_write_strtab(smmu); 4100 4101 /* Command queue */ 4102 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); 4103 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); 4104 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); 4105 4106 enables = CR0_CMDQEN; 4107 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, 4108 ARM_SMMU_CR0ACK); 4109 if (ret) { 4110 dev_err(smmu->dev, "failed to enable command queue\n"); 4111 return ret; 4112 } 4113 4114 /* Invalidate any cached configuration */ 4115 cmd.opcode = CMDQ_OP_CFGI_ALL; 4116 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); 4117 4118 /* Invalidate any stale TLB entries */ 4119 if (smmu->features & ARM_SMMU_FEAT_HYP) { 4120 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL; 4121 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); 4122 } 4123 4124 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL; 4125 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); 4126 4127 /* Event queue */ 4128 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); 4129 writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD); 4130 writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS); 4131 4132 enables |= CR0_EVTQEN; 4133 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, 4134 ARM_SMMU_CR0ACK); 4135 if (ret) { 4136 dev_err(smmu->dev, "failed to enable event queue\n"); 4137 return ret; 4138 } 4139 4140 /* PRI queue */ 4141 if (smmu->features & ARM_SMMU_FEAT_PRI) { 4142 writeq_relaxed(smmu->priq.q.q_base, 4143 smmu->base + ARM_SMMU_PRIQ_BASE); 4144 writel_relaxed(smmu->priq.q.llq.prod, 4145 smmu->page1 + ARM_SMMU_PRIQ_PROD); 4146 writel_relaxed(smmu->priq.q.llq.cons, 4147 smmu->page1 + ARM_SMMU_PRIQ_CONS); 4148 4149 enables |= CR0_PRIQEN; 4150 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, 4151 ARM_SMMU_CR0ACK); 4152 if (ret) { 4153 dev_err(smmu->dev, "failed to enable PRI queue\n"); 4154 return ret; 4155 } 4156 } 4157 4158 if (smmu->features & ARM_SMMU_FEAT_ATS) { 4159 enables |= CR0_ATSCHK; 4160 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, 4161 ARM_SMMU_CR0ACK); 4162 if (ret) { 4163 dev_err(smmu->dev, "failed to enable ATS check\n"); 4164 return ret; 4165 } 4166 } 4167 4168 ret = arm_smmu_setup_irqs(smmu); 4169 if (ret) { 4170 dev_err(smmu->dev, "failed to setup irqs\n"); 4171 return ret; 4172 } 4173 4174 if (is_kdump_kernel()) 4175 enables &= ~(CR0_EVTQEN | CR0_PRIQEN); 4176 4177 /* Enable the SMMU interface */ 4178 enables |= CR0_SMMUEN; 4179 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, 4180 ARM_SMMU_CR0ACK); 4181 if (ret) { 4182 dev_err(smmu->dev, "failed to enable SMMU interface\n"); 4183 return ret; 4184 } 4185 4186 if (smmu->impl_ops && smmu->impl_ops->device_reset) { 4187 ret = smmu->impl_ops->device_reset(smmu); 4188 if (ret) { 4189 dev_err(smmu->dev, "failed to reset impl\n"); 4190 return ret; 4191 } 4192 } 4193 4194 return 0; 4195 } 4196 4197 #define IIDR_IMPLEMENTER_ARM 0x43b 4198 #define IIDR_PRODUCTID_ARM_MMU_600 0x483 4199 #define IIDR_PRODUCTID_ARM_MMU_700 0x487 4200 4201 static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu) 4202 { 4203 u32 reg; 4204 unsigned int implementer, productid, variant, revision; 4205 4206 reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR); 4207 implementer = FIELD_GET(IIDR_IMPLEMENTER, reg); 4208 productid = FIELD_GET(IIDR_PRODUCTID, reg); 4209 variant = FIELD_GET(IIDR_VARIANT, reg); 4210 revision = FIELD_GET(IIDR_REVISION, reg); 4211 4212 switch (implementer) { 4213 case IIDR_IMPLEMENTER_ARM: 4214 switch (productid) { 4215 case IIDR_PRODUCTID_ARM_MMU_600: 4216 /* Arm erratum 1076982 */ 4217 if (variant == 0 && revision <= 2) 4218 smmu->features &= ~ARM_SMMU_FEAT_SEV; 4219 /* Arm erratum 1209401 */ 4220 if (variant < 2) 4221 smmu->features &= ~ARM_SMMU_FEAT_NESTING; 4222 break; 4223 case IIDR_PRODUCTID_ARM_MMU_700: 4224 /* Arm erratum 2812531 */ 4225 smmu->features &= ~ARM_SMMU_FEAT_BTM; 4226 smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC; 4227 /* Arm errata 2268618, 2812531 */ 4228 smmu->features &= ~ARM_SMMU_FEAT_NESTING; 4229 break; 4230 } 4231 break; 4232 } 4233 } 4234 4235 static void arm_smmu_get_httu(struct arm_smmu_device *smmu, u32 reg) 4236 { 4237 u32 fw_features = smmu->features & (ARM_SMMU_FEAT_HA | ARM_SMMU_FEAT_HD); 4238 u32 hw_features = 0; 4239 4240 switch (FIELD_GET(IDR0_HTTU, reg)) { 4241 case IDR0_HTTU_ACCESS_DIRTY: 4242 hw_features |= ARM_SMMU_FEAT_HD; 4243 fallthrough; 4244 case IDR0_HTTU_ACCESS: 4245 hw_features |= ARM_SMMU_FEAT_HA; 4246 } 4247 4248 if (smmu->dev->of_node) 4249 smmu->features |= hw_features; 4250 else if (hw_features != fw_features) 4251 /* ACPI IORT sets the HTTU bits */ 4252 dev_warn(smmu->dev, 4253 "IDR0.HTTU features(0x%x) overridden by FW configuration (0x%x)\n", 4254 hw_features, fw_features); 4255 } 4256 4257 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) 4258 { 4259 u32 reg; 4260 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY; 4261 4262 /* IDR0 */ 4263 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); 4264 4265 /* 2-level structures */ 4266 if (FIELD_GET(IDR0_ST_LVL, reg) == IDR0_ST_LVL_2LVL) 4267 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB; 4268 4269 if (reg & IDR0_CD2L) 4270 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB; 4271 4272 /* 4273 * Translation table endianness. 4274 * We currently require the same endianness as the CPU, but this 4275 * could be changed later by adding a new IO_PGTABLE_QUIRK. 4276 */ 4277 switch (FIELD_GET(IDR0_TTENDIAN, reg)) { 4278 case IDR0_TTENDIAN_MIXED: 4279 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE; 4280 break; 4281 #ifdef __BIG_ENDIAN 4282 case IDR0_TTENDIAN_BE: 4283 smmu->features |= ARM_SMMU_FEAT_TT_BE; 4284 break; 4285 #else 4286 case IDR0_TTENDIAN_LE: 4287 smmu->features |= ARM_SMMU_FEAT_TT_LE; 4288 break; 4289 #endif 4290 default: 4291 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n"); 4292 return -ENXIO; 4293 } 4294 4295 /* Boolean feature flags */ 4296 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI) 4297 smmu->features |= ARM_SMMU_FEAT_PRI; 4298 4299 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS) 4300 smmu->features |= ARM_SMMU_FEAT_ATS; 4301 4302 if (reg & IDR0_SEV) 4303 smmu->features |= ARM_SMMU_FEAT_SEV; 4304 4305 if (reg & IDR0_MSI) { 4306 smmu->features |= ARM_SMMU_FEAT_MSI; 4307 if (coherent && !disable_msipolling) 4308 smmu->options |= ARM_SMMU_OPT_MSIPOLL; 4309 } 4310 4311 if (reg & IDR0_HYP) { 4312 smmu->features |= ARM_SMMU_FEAT_HYP; 4313 if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN)) 4314 smmu->features |= ARM_SMMU_FEAT_E2H; 4315 } 4316 4317 arm_smmu_get_httu(smmu, reg); 4318 4319 /* 4320 * The coherency feature as set by FW is used in preference to the ID 4321 * register, but warn on mismatch. 4322 */ 4323 if (!!(reg & IDR0_COHACC) != coherent) 4324 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n", 4325 str_true_false(coherent)); 4326 4327 switch (FIELD_GET(IDR0_STALL_MODEL, reg)) { 4328 case IDR0_STALL_MODEL_FORCE: 4329 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE; 4330 fallthrough; 4331 case IDR0_STALL_MODEL_STALL: 4332 smmu->features |= ARM_SMMU_FEAT_STALLS; 4333 } 4334 4335 if (reg & IDR0_S1P) 4336 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; 4337 4338 if (reg & IDR0_S2P) 4339 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; 4340 4341 if (!(reg & (IDR0_S1P | IDR0_S2P))) { 4342 dev_err(smmu->dev, "no translation support!\n"); 4343 return -ENXIO; 4344 } 4345 4346 /* We only support the AArch64 table format at present */ 4347 switch (FIELD_GET(IDR0_TTF, reg)) { 4348 case IDR0_TTF_AARCH32_64: 4349 smmu->ias = 40; 4350 fallthrough; 4351 case IDR0_TTF_AARCH64: 4352 break; 4353 default: 4354 dev_err(smmu->dev, "AArch64 table format not supported!\n"); 4355 return -ENXIO; 4356 } 4357 4358 /* ASID/VMID sizes */ 4359 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8; 4360 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8; 4361 4362 /* IDR1 */ 4363 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1); 4364 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) { 4365 dev_err(smmu->dev, "embedded implementation not supported\n"); 4366 return -ENXIO; 4367 } 4368 4369 if (reg & IDR1_ATTR_TYPES_OVR) 4370 smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR; 4371 4372 /* Queue sizes, capped to ensure natural alignment */ 4373 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, 4374 FIELD_GET(IDR1_CMDQS, reg)); 4375 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { 4376 /* 4377 * We don't support splitting up batches, so one batch of 4378 * commands plus an extra sync needs to fit inside the command 4379 * queue. There's also no way we can handle the weird alignment 4380 * restrictions on the base pointer for a unit-length queue. 4381 */ 4382 dev_err(smmu->dev, "command queue size <= %d entries not supported\n", 4383 CMDQ_BATCH_ENTRIES); 4384 return -ENXIO; 4385 } 4386 4387 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, 4388 FIELD_GET(IDR1_EVTQS, reg)); 4389 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, 4390 FIELD_GET(IDR1_PRIQS, reg)); 4391 4392 /* SID/SSID sizes */ 4393 smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); 4394 smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg); 4395 smmu->iommu.max_pasids = 1UL << smmu->ssid_bits; 4396 4397 /* 4398 * If the SMMU supports fewer bits than would fill a single L2 stream 4399 * table, use a linear table instead. 4400 */ 4401 if (smmu->sid_bits <= STRTAB_SPLIT) 4402 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; 4403 4404 /* IDR3 */ 4405 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); 4406 if (FIELD_GET(IDR3_RIL, reg)) 4407 smmu->features |= ARM_SMMU_FEAT_RANGE_INV; 4408 4409 /* IDR5 */ 4410 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); 4411 4412 /* Maximum number of outstanding stalls */ 4413 smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg); 4414 4415 /* Page sizes */ 4416 if (reg & IDR5_GRAN64K) 4417 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; 4418 if (reg & IDR5_GRAN16K) 4419 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; 4420 if (reg & IDR5_GRAN4K) 4421 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; 4422 4423 /* Input address size */ 4424 if (FIELD_GET(IDR5_VAX, reg) == IDR5_VAX_52_BIT) 4425 smmu->features |= ARM_SMMU_FEAT_VAX; 4426 4427 /* Output address size */ 4428 switch (FIELD_GET(IDR5_OAS, reg)) { 4429 case IDR5_OAS_32_BIT: 4430 smmu->oas = 32; 4431 break; 4432 case IDR5_OAS_36_BIT: 4433 smmu->oas = 36; 4434 break; 4435 case IDR5_OAS_40_BIT: 4436 smmu->oas = 40; 4437 break; 4438 case IDR5_OAS_42_BIT: 4439 smmu->oas = 42; 4440 break; 4441 case IDR5_OAS_44_BIT: 4442 smmu->oas = 44; 4443 break; 4444 case IDR5_OAS_52_BIT: 4445 smmu->oas = 52; 4446 smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */ 4447 break; 4448 default: 4449 dev_info(smmu->dev, 4450 "unknown output address size. Truncating to 48-bit\n"); 4451 fallthrough; 4452 case IDR5_OAS_48_BIT: 4453 smmu->oas = 48; 4454 } 4455 4456 if (arm_smmu_ops.pgsize_bitmap == -1UL) 4457 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; 4458 else 4459 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; 4460 4461 /* Set the DMA mask for our table walker */ 4462 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas))) 4463 dev_warn(smmu->dev, 4464 "failed to set DMA mask for table walker\n"); 4465 4466 smmu->ias = max(smmu->ias, smmu->oas); 4467 4468 if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) && 4469 (smmu->features & ARM_SMMU_FEAT_TRANS_S2)) 4470 smmu->features |= ARM_SMMU_FEAT_NESTING; 4471 4472 arm_smmu_device_iidr_probe(smmu); 4473 4474 if (arm_smmu_sva_supported(smmu)) 4475 smmu->features |= ARM_SMMU_FEAT_SVA; 4476 4477 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", 4478 smmu->ias, smmu->oas, smmu->features); 4479 return 0; 4480 } 4481 4482 #ifdef CONFIG_ACPI 4483 #ifdef CONFIG_TEGRA241_CMDQV 4484 static void acpi_smmu_dsdt_probe_tegra241_cmdqv(struct acpi_iort_node *node, 4485 struct arm_smmu_device *smmu) 4486 { 4487 const char *uid = kasprintf(GFP_KERNEL, "%u", node->identifier); 4488 struct acpi_device *adev; 4489 4490 /* Look for an NVDA200C node whose _UID matches the SMMU node ID */ 4491 adev = acpi_dev_get_first_match_dev("NVDA200C", uid, -1); 4492 if (adev) { 4493 /* Tegra241 CMDQV driver is responsible for put_device() */ 4494 smmu->impl_dev = &adev->dev; 4495 smmu->options |= ARM_SMMU_OPT_TEGRA241_CMDQV; 4496 dev_info(smmu->dev, "found companion CMDQV device: %s\n", 4497 dev_name(smmu->impl_dev)); 4498 } 4499 kfree(uid); 4500 } 4501 #else 4502 static void acpi_smmu_dsdt_probe_tegra241_cmdqv(struct acpi_iort_node *node, 4503 struct arm_smmu_device *smmu) 4504 { 4505 } 4506 #endif 4507 4508 static int acpi_smmu_iort_probe_model(struct acpi_iort_node *node, 4509 struct arm_smmu_device *smmu) 4510 { 4511 struct acpi_iort_smmu_v3 *iort_smmu = 4512 (struct acpi_iort_smmu_v3 *)node->node_data; 4513 4514 switch (iort_smmu->model) { 4515 case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX: 4516 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; 4517 break; 4518 case ACPI_IORT_SMMU_V3_HISILICON_HI161X: 4519 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; 4520 break; 4521 case ACPI_IORT_SMMU_V3_GENERIC: 4522 /* 4523 * Tegra241 implementation stores its SMMU options and impl_dev 4524 * in DSDT. Thus, go through the ACPI tables unconditionally. 4525 */ 4526 acpi_smmu_dsdt_probe_tegra241_cmdqv(node, smmu); 4527 break; 4528 } 4529 4530 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); 4531 return 0; 4532 } 4533 4534 static int arm_smmu_device_acpi_probe(struct platform_device *pdev, 4535 struct arm_smmu_device *smmu) 4536 { 4537 struct acpi_iort_smmu_v3 *iort_smmu; 4538 struct device *dev = smmu->dev; 4539 struct acpi_iort_node *node; 4540 4541 node = *(struct acpi_iort_node **)dev_get_platdata(dev); 4542 4543 /* Retrieve SMMUv3 specific data */ 4544 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 4545 4546 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) 4547 smmu->features |= ARM_SMMU_FEAT_COHERENCY; 4548 4549 switch (FIELD_GET(ACPI_IORT_SMMU_V3_HTTU_OVERRIDE, iort_smmu->flags)) { 4550 case IDR0_HTTU_ACCESS_DIRTY: 4551 smmu->features |= ARM_SMMU_FEAT_HD; 4552 fallthrough; 4553 case IDR0_HTTU_ACCESS: 4554 smmu->features |= ARM_SMMU_FEAT_HA; 4555 } 4556 4557 return acpi_smmu_iort_probe_model(node, smmu); 4558 } 4559 #else 4560 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev, 4561 struct arm_smmu_device *smmu) 4562 { 4563 return -ENODEV; 4564 } 4565 #endif 4566 4567 static int arm_smmu_device_dt_probe(struct platform_device *pdev, 4568 struct arm_smmu_device *smmu) 4569 { 4570 struct device *dev = &pdev->dev; 4571 u32 cells; 4572 int ret = -EINVAL; 4573 4574 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells)) 4575 dev_err(dev, "missing #iommu-cells property\n"); 4576 else if (cells != 1) 4577 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells); 4578 else 4579 ret = 0; 4580 4581 parse_driver_options(smmu); 4582 4583 if (of_dma_is_coherent(dev->of_node)) 4584 smmu->features |= ARM_SMMU_FEAT_COHERENCY; 4585 4586 return ret; 4587 } 4588 4589 static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) 4590 { 4591 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY) 4592 return SZ_64K; 4593 else 4594 return SZ_128K; 4595 } 4596 4597 static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start, 4598 resource_size_t size) 4599 { 4600 struct resource res = DEFINE_RES_MEM(start, size); 4601 4602 return devm_ioremap_resource(dev, &res); 4603 } 4604 4605 static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) 4606 { 4607 struct list_head rmr_list; 4608 struct iommu_resv_region *e; 4609 4610 INIT_LIST_HEAD(&rmr_list); 4611 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); 4612 4613 list_for_each_entry(e, &rmr_list, list) { 4614 struct iommu_iort_rmr_data *rmr; 4615 int ret, i; 4616 4617 rmr = container_of(e, struct iommu_iort_rmr_data, rr); 4618 for (i = 0; i < rmr->num_sids; i++) { 4619 ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]); 4620 if (ret) { 4621 dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n", 4622 rmr->sids[i]); 4623 continue; 4624 } 4625 4626 /* 4627 * STE table is not programmed to HW, see 4628 * arm_smmu_initial_bypass_stes() 4629 */ 4630 arm_smmu_make_bypass_ste(smmu, 4631 arm_smmu_get_step_for_sid(smmu, rmr->sids[i])); 4632 } 4633 } 4634 4635 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); 4636 } 4637 4638 static void arm_smmu_impl_remove(void *data) 4639 { 4640 struct arm_smmu_device *smmu = data; 4641 4642 if (smmu->impl_ops && smmu->impl_ops->device_remove) 4643 smmu->impl_ops->device_remove(smmu); 4644 } 4645 4646 /* 4647 * Probe all the compiled in implementations. Each one checks to see if it 4648 * matches this HW and if so returns a devm_krealloc'd arm_smmu_device which 4649 * replaces the callers. Otherwise the original is returned or ERR_PTR. 4650 */ 4651 static struct arm_smmu_device *arm_smmu_impl_probe(struct arm_smmu_device *smmu) 4652 { 4653 struct arm_smmu_device *new_smmu = ERR_PTR(-ENODEV); 4654 int ret; 4655 4656 if (smmu->impl_dev && (smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV)) 4657 new_smmu = tegra241_cmdqv_probe(smmu); 4658 4659 if (new_smmu == ERR_PTR(-ENODEV)) 4660 return smmu; 4661 if (IS_ERR(new_smmu)) 4662 return new_smmu; 4663 4664 ret = devm_add_action_or_reset(new_smmu->dev, arm_smmu_impl_remove, 4665 new_smmu); 4666 if (ret) 4667 return ERR_PTR(ret); 4668 return new_smmu; 4669 } 4670 4671 static int arm_smmu_device_probe(struct platform_device *pdev) 4672 { 4673 int irq, ret; 4674 struct resource *res; 4675 resource_size_t ioaddr; 4676 struct arm_smmu_device *smmu; 4677 struct device *dev = &pdev->dev; 4678 4679 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 4680 if (!smmu) 4681 return -ENOMEM; 4682 smmu->dev = dev; 4683 4684 if (dev->of_node) { 4685 ret = arm_smmu_device_dt_probe(pdev, smmu); 4686 } else { 4687 ret = arm_smmu_device_acpi_probe(pdev, smmu); 4688 } 4689 if (ret) 4690 return ret; 4691 4692 smmu = arm_smmu_impl_probe(smmu); 4693 if (IS_ERR(smmu)) 4694 return PTR_ERR(smmu); 4695 4696 /* Base address */ 4697 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4698 if (!res) 4699 return -EINVAL; 4700 if (resource_size(res) < arm_smmu_resource_size(smmu)) { 4701 dev_err(dev, "MMIO region too small (%pr)\n", res); 4702 return -EINVAL; 4703 } 4704 ioaddr = res->start; 4705 4706 /* 4707 * Don't map the IMPLEMENTATION DEFINED regions, since they may contain 4708 * the PMCG registers which are reserved by the PMU driver. 4709 */ 4710 smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ); 4711 if (IS_ERR(smmu->base)) 4712 return PTR_ERR(smmu->base); 4713 4714 if (arm_smmu_resource_size(smmu) > SZ_64K) { 4715 smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K, 4716 ARM_SMMU_REG_SZ); 4717 if (IS_ERR(smmu->page1)) 4718 return PTR_ERR(smmu->page1); 4719 } else { 4720 smmu->page1 = smmu->base; 4721 } 4722 4723 /* Interrupt lines */ 4724 4725 irq = platform_get_irq_byname_optional(pdev, "combined"); 4726 if (irq > 0) 4727 smmu->combined_irq = irq; 4728 else { 4729 irq = platform_get_irq_byname_optional(pdev, "eventq"); 4730 if (irq > 0) 4731 smmu->evtq.q.irq = irq; 4732 4733 irq = platform_get_irq_byname_optional(pdev, "priq"); 4734 if (irq > 0) 4735 smmu->priq.q.irq = irq; 4736 4737 irq = platform_get_irq_byname_optional(pdev, "gerror"); 4738 if (irq > 0) 4739 smmu->gerr_irq = irq; 4740 } 4741 /* Probe the h/w */ 4742 ret = arm_smmu_device_hw_probe(smmu); 4743 if (ret) 4744 return ret; 4745 4746 /* Initialise in-memory data structures */ 4747 ret = arm_smmu_init_structures(smmu); 4748 if (ret) 4749 goto err_free_iopf; 4750 4751 /* Record our private device structure */ 4752 platform_set_drvdata(pdev, smmu); 4753 4754 /* Check for RMRs and install bypass STEs if any */ 4755 arm_smmu_rmr_install_bypass_ste(smmu); 4756 4757 /* Reset the device */ 4758 ret = arm_smmu_device_reset(smmu); 4759 if (ret) 4760 goto err_disable; 4761 4762 /* And we're up. Go go go! */ 4763 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, 4764 "smmu3.%pa", &ioaddr); 4765 if (ret) 4766 goto err_disable; 4767 4768 ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); 4769 if (ret) { 4770 dev_err(dev, "Failed to register iommu\n"); 4771 goto err_free_sysfs; 4772 } 4773 4774 return 0; 4775 4776 err_free_sysfs: 4777 iommu_device_sysfs_remove(&smmu->iommu); 4778 err_disable: 4779 arm_smmu_device_disable(smmu); 4780 err_free_iopf: 4781 iopf_queue_free(smmu->evtq.iopf); 4782 return ret; 4783 } 4784 4785 static void arm_smmu_device_remove(struct platform_device *pdev) 4786 { 4787 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); 4788 4789 iommu_device_unregister(&smmu->iommu); 4790 iommu_device_sysfs_remove(&smmu->iommu); 4791 arm_smmu_device_disable(smmu); 4792 iopf_queue_free(smmu->evtq.iopf); 4793 ida_destroy(&smmu->vmid_map); 4794 } 4795 4796 static void arm_smmu_device_shutdown(struct platform_device *pdev) 4797 { 4798 struct arm_smmu_device *smmu = platform_get_drvdata(pdev); 4799 4800 arm_smmu_device_disable(smmu); 4801 } 4802 4803 static const struct of_device_id arm_smmu_of_match[] = { 4804 { .compatible = "arm,smmu-v3", }, 4805 { }, 4806 }; 4807 MODULE_DEVICE_TABLE(of, arm_smmu_of_match); 4808 4809 static void arm_smmu_driver_unregister(struct platform_driver *drv) 4810 { 4811 arm_smmu_sva_notifier_synchronize(); 4812 platform_driver_unregister(drv); 4813 } 4814 4815 static struct platform_driver arm_smmu_driver = { 4816 .driver = { 4817 .name = "arm-smmu-v3", 4818 .of_match_table = arm_smmu_of_match, 4819 .suppress_bind_attrs = true, 4820 }, 4821 .probe = arm_smmu_device_probe, 4822 .remove = arm_smmu_device_remove, 4823 .shutdown = arm_smmu_device_shutdown, 4824 }; 4825 module_driver(arm_smmu_driver, platform_driver_register, 4826 arm_smmu_driver_unregister); 4827 4828 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); 4829 MODULE_AUTHOR("Will Deacon <will@kernel.org>"); 4830 MODULE_ALIAS("platform:arm-smmu-v3"); 4831 MODULE_LICENSE("GPL v2"); 4832