Lines Matching +full:ats +full:- +full:supported
1 // SPDX-License-Identifier: GPL-2.0
8 #include "arm-smmu-v3.h"
19 return ERR_PTR(-ENOMEM);
21 base_idr = master->smmu->base + ARM_SMMU_IDR0;
23 info->idr[i] = readl_relaxed(base_idr + i);
24 info->iidr = readl_relaxed(master->smmu->base + ARM_SMMU_IIDR);
25 info->aidr = readl_relaxed(master->smmu->base + ARM_SMMU_AIDR);
38 target, master, nested_domain->vsmmu->s2_parent, ats_enabled);
40 target->data[0] = cpu_to_le64(STRTAB_STE_0_V |
43 target->data[0] |= nested_domain->ste[0] &
45 target->data[1] |= nested_domain->ste[1];
47 target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
53 * - Non-valid STE
54 * - Abort STE
55 * - Bypass STE (install the S2, no CD table)
56 * - CD table STE (install the S2 and the userspace CD table)
63 FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0]));
66 * Userspace can request a non-valid STE through the nesting interface.
70 if (!(nested_domain->ste[0] & cpu_to_le64(STRTAB_STE_0_V)))
80 nested_domain->vsmmu->s2_parent,
97 iommu_group_mutex_assert(state->master->dev);
99 ret = iommufd_viommu_get_vdev_id(&nested_domain->vsmmu->core,
100 state->master->dev, &vsid);
106 return -ENOMEM;
107 vmaster->vsmmu = nested_domain->vsmmu;
108 vmaster->vsid = vsid;
109 state->vmaster = vmaster;
116 struct arm_smmu_master *master = state->master;
118 mutex_lock(&master->smmu->streams_mutex);
119 kfree(master->vmaster);
120 master->vmaster = state->vmaster;
121 mutex_unlock(&master->smmu->streams_mutex);
145 if (nested_domain->vsmmu->smmu != master->smmu)
146 return -EINVAL;
147 if (arm_smmu_ssids_in_use(&master->cd_table))
148 return -EBUSY;
152 * The VM has to control the actual ATS state at the PCI device because
154 * think ATS is on it will not generate ATC flushes and the ATC will
155 * become incoherent. Since we can't access the actual virtual PCI ATS
159 state.disable_ats = !nested_domain->enable_ats;
190 if (!(arg->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) {
191 memset(arg->ste, 0, sizeof(arg->ste));
196 if ((arg->ste[0] & ~STRTAB_STE_0_NESTING_ALLOWED) ||
197 (arg->ste[1] & ~STRTAB_STE_1_NESTING_ALLOWED))
198 return -EIO;
200 cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(arg->ste[0]));
203 return -EIO;
206 * Only Full ATS or ATS UR is supported
209 eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg->ste[1]));
210 arg->ste[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS);
212 return -EIO;
230 return ERR_PTR(-EOPNOTSUPP);
243 return ERR_PTR(-ENOMEM);
245 nested_domain->domain.type = IOMMU_DOMAIN_NESTED;
246 nested_domain->domain.ops = &arm_smmu_nested_ops;
247 nested_domain->enable_ats = enable_ats;
248 nested_domain->vsmmu = vsmmu;
249 nested_domain->ste[0] = arg.ste[0];
250 nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS);
252 return &nested_domain->domain;
261 xa_lock(&vsmmu->core.vdevs);
262 dev = iommufd_viommu_find_dev(&vsmmu->core, (unsigned long)vsid);
264 ret = -EIO;
271 *sid = master->streams[0].id;
273 xa_unlock(&vsmmu->core.vdevs);
296 cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]);
297 cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]);
299 switch (cmd->cmd[0] & CMDQ_0_OP) {
302 cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL |
303 FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid);
304 cmd->cmd[1] = 0;
310 cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID;
311 cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid);
316 u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]);
319 return -EIO;
320 cmd->cmd[0] &= ~CMDQ_CFGI_0_SID;
321 cmd->cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid);
325 return -EIO;
334 struct arm_smmu_device *smmu = vsmmu->smmu;
341 cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
343 return -ENOMEM;
345 end = cmds + array->entry_num;
362 if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1)
366 ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd,
367 cur - last, true);
369 cur--;
375 array->entry_num = cur - cmds;
397 return ERR_PTR(-EOPNOTSUPP);
399 if (!(smmu->features & ARM_SMMU_FEAT_NESTING))
400 return ERR_PTR(-EOPNOTSUPP);
402 if (s2_parent->smmu != master->smmu)
403 return ERR_PTR(-EINVAL);
410 if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC))
411 return ERR_PTR(-EOPNOTSUPP);
417 * ever required, even for PCI No-Snoop. S2FWB means the S1 can't make
418 * things non-coherent using the memattr, but No-Snoop behavior is not
422 !(smmu->features & ARM_SMMU_FEAT_S2FWB))
423 return ERR_PTR(-EOPNOTSUPP);
430 vsmmu->smmu = smmu;
431 vsmmu->s2_parent = s2_parent;
433 vsmmu->vmid = s2_parent->s2_cfg.vmid;
435 return &vsmmu->core;
443 lockdep_assert_held(&vmaster->vsmmu->smmu->streams_mutex);
446 FIELD_PREP(EVTQ_0_SID, vmaster->vsid));
450 return iommufd_viommu_report_event(&vmaster->vsmmu->core,