16912ec91SNicolin Chen // SPDX-License-Identifier: GPL-2.0 26912ec91SNicolin Chen /* 36912ec91SNicolin Chen * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES 46912ec91SNicolin Chen */ 56912ec91SNicolin Chen 66912ec91SNicolin Chen #include <uapi/linux/iommufd.h> 76912ec91SNicolin Chen 86912ec91SNicolin Chen #include "arm-smmu-v3.h" 96912ec91SNicolin Chen 106912ec91SNicolin Chen void *arm_smmu_hw_info(struct device *dev, u32 *length, u32 *type) 116912ec91SNicolin Chen { 126912ec91SNicolin Chen struct arm_smmu_master *master = dev_iommu_priv_get(dev); 136912ec91SNicolin Chen struct iommu_hw_info_arm_smmuv3 *info; 146912ec91SNicolin Chen u32 __iomem *base_idr; 156912ec91SNicolin Chen unsigned int i; 166912ec91SNicolin Chen 176912ec91SNicolin Chen info = kzalloc(sizeof(*info), GFP_KERNEL); 186912ec91SNicolin Chen if (!info) 196912ec91SNicolin Chen return ERR_PTR(-ENOMEM); 206912ec91SNicolin Chen 216912ec91SNicolin Chen base_idr = master->smmu->base + ARM_SMMU_IDR0; 226912ec91SNicolin Chen for (i = 0; i <= 5; i++) 236912ec91SNicolin Chen info->idr[i] = readl_relaxed(base_idr + i); 246912ec91SNicolin Chen info->iidr = readl_relaxed(master->smmu->base + ARM_SMMU_IIDR); 256912ec91SNicolin Chen info->aidr = readl_relaxed(master->smmu->base + ARM_SMMU_AIDR); 266912ec91SNicolin Chen 276912ec91SNicolin Chen *length = sizeof(*info); 286912ec91SNicolin Chen *type = IOMMU_HW_INFO_TYPE_ARM_SMMUV3; 296912ec91SNicolin Chen 306912ec91SNicolin Chen return info; 316912ec91SNicolin Chen } 3269d9b312SNicolin Chen 331e8be08dSJason Gunthorpe static void arm_smmu_make_nested_cd_table_ste( 341e8be08dSJason Gunthorpe struct arm_smmu_ste *target, struct arm_smmu_master *master, 351e8be08dSJason Gunthorpe struct arm_smmu_nested_domain *nested_domain, bool ats_enabled) 361e8be08dSJason Gunthorpe { 371e8be08dSJason Gunthorpe arm_smmu_make_s2_domain_ste( 381e8be08dSJason Gunthorpe target, master, nested_domain->vsmmu->s2_parent, ats_enabled); 391e8be08dSJason Gunthorpe 401e8be08dSJason Gunthorpe target->data[0] = cpu_to_le64(STRTAB_STE_0_V | 411e8be08dSJason Gunthorpe FIELD_PREP(STRTAB_STE_0_CFG, 421e8be08dSJason Gunthorpe STRTAB_STE_0_CFG_NESTED)); 431e8be08dSJason Gunthorpe target->data[0] |= nested_domain->ste[0] & 441e8be08dSJason Gunthorpe ~cpu_to_le64(STRTAB_STE_0_CFG); 451e8be08dSJason Gunthorpe target->data[1] |= nested_domain->ste[1]; 46*da0c5652SNicolin Chen /* Merge events for DoS mitigations on eventq */ 47*da0c5652SNicolin Chen target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV); 481e8be08dSJason Gunthorpe } 491e8be08dSJason Gunthorpe 501e8be08dSJason Gunthorpe /* 511e8be08dSJason Gunthorpe * Create a physical STE from the virtual STE that userspace provided when it 521e8be08dSJason Gunthorpe * created the nested domain. Using the vSTE userspace can request: 531e8be08dSJason Gunthorpe * - Non-valid STE 541e8be08dSJason Gunthorpe * - Abort STE 551e8be08dSJason Gunthorpe * - Bypass STE (install the S2, no CD table) 561e8be08dSJason Gunthorpe * - CD table STE (install the S2 and the userspace CD table) 571e8be08dSJason Gunthorpe */ 581e8be08dSJason Gunthorpe static void arm_smmu_make_nested_domain_ste( 591e8be08dSJason Gunthorpe struct arm_smmu_ste *target, struct arm_smmu_master *master, 601e8be08dSJason Gunthorpe struct arm_smmu_nested_domain *nested_domain, bool ats_enabled) 611e8be08dSJason Gunthorpe { 621e8be08dSJason Gunthorpe unsigned int cfg = 631e8be08dSJason Gunthorpe FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0])); 641e8be08dSJason Gunthorpe 651e8be08dSJason Gunthorpe /* 661e8be08dSJason Gunthorpe * Userspace can request a non-valid STE through the nesting interface. 671e8be08dSJason Gunthorpe * We relay that into an abort physical STE with the intention that 681e8be08dSJason Gunthorpe * C_BAD_STE for this SID can be generated to userspace. 691e8be08dSJason Gunthorpe */ 701e8be08dSJason Gunthorpe if (!(nested_domain->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) 711e8be08dSJason Gunthorpe cfg = STRTAB_STE_0_CFG_ABORT; 721e8be08dSJason Gunthorpe 731e8be08dSJason Gunthorpe switch (cfg) { 741e8be08dSJason Gunthorpe case STRTAB_STE_0_CFG_S1_TRANS: 751e8be08dSJason Gunthorpe arm_smmu_make_nested_cd_table_ste(target, master, nested_domain, 761e8be08dSJason Gunthorpe ats_enabled); 771e8be08dSJason Gunthorpe break; 781e8be08dSJason Gunthorpe case STRTAB_STE_0_CFG_BYPASS: 791e8be08dSJason Gunthorpe arm_smmu_make_s2_domain_ste(target, master, 801e8be08dSJason Gunthorpe nested_domain->vsmmu->s2_parent, 811e8be08dSJason Gunthorpe ats_enabled); 821e8be08dSJason Gunthorpe break; 831e8be08dSJason Gunthorpe case STRTAB_STE_0_CFG_ABORT: 841e8be08dSJason Gunthorpe default: 851e8be08dSJason Gunthorpe arm_smmu_make_abort_ste(target); 861e8be08dSJason Gunthorpe break; 871e8be08dSJason Gunthorpe } 881e8be08dSJason Gunthorpe } 891e8be08dSJason Gunthorpe 90f0ea207eSNicolin Chen int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state, 91f0ea207eSNicolin Chen struct arm_smmu_nested_domain *nested_domain) 92f0ea207eSNicolin Chen { 93f0ea207eSNicolin Chen struct arm_smmu_vmaster *vmaster; 94f0ea207eSNicolin Chen unsigned long vsid; 95f0ea207eSNicolin Chen int ret; 96f0ea207eSNicolin Chen 97f0ea207eSNicolin Chen iommu_group_mutex_assert(state->master->dev); 98f0ea207eSNicolin Chen 99f0ea207eSNicolin Chen ret = iommufd_viommu_get_vdev_id(&nested_domain->vsmmu->core, 100f0ea207eSNicolin Chen state->master->dev, &vsid); 101f0ea207eSNicolin Chen if (ret) 102f0ea207eSNicolin Chen return ret; 103f0ea207eSNicolin Chen 104f0ea207eSNicolin Chen vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 105f0ea207eSNicolin Chen if (!vmaster) 106f0ea207eSNicolin Chen return -ENOMEM; 107f0ea207eSNicolin Chen vmaster->vsmmu = nested_domain->vsmmu; 108f0ea207eSNicolin Chen vmaster->vsid = vsid; 109f0ea207eSNicolin Chen state->vmaster = vmaster; 110f0ea207eSNicolin Chen 111f0ea207eSNicolin Chen return 0; 112f0ea207eSNicolin Chen } 113f0ea207eSNicolin Chen 114f0ea207eSNicolin Chen void arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state) 115f0ea207eSNicolin Chen { 116f0ea207eSNicolin Chen struct arm_smmu_master *master = state->master; 117f0ea207eSNicolin Chen 118f0ea207eSNicolin Chen mutex_lock(&master->smmu->streams_mutex); 119f0ea207eSNicolin Chen kfree(master->vmaster); 120f0ea207eSNicolin Chen master->vmaster = state->vmaster; 121f0ea207eSNicolin Chen mutex_unlock(&master->smmu->streams_mutex); 122f0ea207eSNicolin Chen } 123f0ea207eSNicolin Chen 124f0ea207eSNicolin Chen void arm_smmu_master_clear_vmaster(struct arm_smmu_master *master) 125f0ea207eSNicolin Chen { 126f0ea207eSNicolin Chen struct arm_smmu_attach_state state = { .master = master }; 127f0ea207eSNicolin Chen 128f0ea207eSNicolin Chen arm_smmu_attach_commit_vmaster(&state); 129f0ea207eSNicolin Chen } 130f0ea207eSNicolin Chen 1311e8be08dSJason Gunthorpe static int arm_smmu_attach_dev_nested(struct iommu_domain *domain, 1321e8be08dSJason Gunthorpe struct device *dev) 1331e8be08dSJason Gunthorpe { 1341e8be08dSJason Gunthorpe struct arm_smmu_nested_domain *nested_domain = 1351e8be08dSJason Gunthorpe to_smmu_nested_domain(domain); 1361e8be08dSJason Gunthorpe struct arm_smmu_master *master = dev_iommu_priv_get(dev); 1371e8be08dSJason Gunthorpe struct arm_smmu_attach_state state = { 1381e8be08dSJason Gunthorpe .master = master, 1391e8be08dSJason Gunthorpe .old_domain = iommu_get_domain_for_dev(dev), 1401e8be08dSJason Gunthorpe .ssid = IOMMU_NO_PASID, 1411e8be08dSJason Gunthorpe }; 1421e8be08dSJason Gunthorpe struct arm_smmu_ste ste; 1431e8be08dSJason Gunthorpe int ret; 1441e8be08dSJason Gunthorpe 1451e8be08dSJason Gunthorpe if (nested_domain->vsmmu->smmu != master->smmu) 1461e8be08dSJason Gunthorpe return -EINVAL; 1471e8be08dSJason Gunthorpe if (arm_smmu_ssids_in_use(&master->cd_table)) 1481e8be08dSJason Gunthorpe return -EBUSY; 1491e8be08dSJason Gunthorpe 1501e8be08dSJason Gunthorpe mutex_lock(&arm_smmu_asid_lock); 151f27298a8SJason Gunthorpe /* 152f27298a8SJason Gunthorpe * The VM has to control the actual ATS state at the PCI device because 153f27298a8SJason Gunthorpe * we forward the invalidations directly from the VM. If the VM doesn't 154f27298a8SJason Gunthorpe * think ATS is on it will not generate ATC flushes and the ATC will 155f27298a8SJason Gunthorpe * become incoherent. Since we can't access the actual virtual PCI ATS 156f27298a8SJason Gunthorpe * config bit here base this off the EATS value in the STE. If the EATS 157f27298a8SJason Gunthorpe * is set then the VM must generate ATC flushes. 158f27298a8SJason Gunthorpe */ 159f27298a8SJason Gunthorpe state.disable_ats = !nested_domain->enable_ats; 1601e8be08dSJason Gunthorpe ret = arm_smmu_attach_prepare(&state, domain); 1611e8be08dSJason Gunthorpe if (ret) { 1621e8be08dSJason Gunthorpe mutex_unlock(&arm_smmu_asid_lock); 1631e8be08dSJason Gunthorpe return ret; 1641e8be08dSJason Gunthorpe } 1651e8be08dSJason Gunthorpe 1661e8be08dSJason Gunthorpe arm_smmu_make_nested_domain_ste(&ste, master, nested_domain, 1671e8be08dSJason Gunthorpe state.ats_enabled); 1681e8be08dSJason Gunthorpe arm_smmu_install_ste_for_dev(master, &ste); 1691e8be08dSJason Gunthorpe arm_smmu_attach_commit(&state); 1701e8be08dSJason Gunthorpe mutex_unlock(&arm_smmu_asid_lock); 1711e8be08dSJason Gunthorpe return 0; 1721e8be08dSJason Gunthorpe } 1731e8be08dSJason Gunthorpe 1741e8be08dSJason Gunthorpe static void arm_smmu_domain_nested_free(struct iommu_domain *domain) 1751e8be08dSJason Gunthorpe { 1761e8be08dSJason Gunthorpe kfree(to_smmu_nested_domain(domain)); 1771e8be08dSJason Gunthorpe } 1781e8be08dSJason Gunthorpe 1791e8be08dSJason Gunthorpe static const struct iommu_domain_ops arm_smmu_nested_ops = { 1801e8be08dSJason Gunthorpe .attach_dev = arm_smmu_attach_dev_nested, 1811e8be08dSJason Gunthorpe .free = arm_smmu_domain_nested_free, 1821e8be08dSJason Gunthorpe }; 1831e8be08dSJason Gunthorpe 184f27298a8SJason Gunthorpe static int arm_smmu_validate_vste(struct iommu_hwpt_arm_smmuv3 *arg, 185f27298a8SJason Gunthorpe bool *enable_ats) 1861e8be08dSJason Gunthorpe { 187f27298a8SJason Gunthorpe unsigned int eats; 1881e8be08dSJason Gunthorpe unsigned int cfg; 1891e8be08dSJason Gunthorpe 1901e8be08dSJason Gunthorpe if (!(arg->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) { 1911e8be08dSJason Gunthorpe memset(arg->ste, 0, sizeof(arg->ste)); 1921e8be08dSJason Gunthorpe return 0; 1931e8be08dSJason Gunthorpe } 1941e8be08dSJason Gunthorpe 1951e8be08dSJason Gunthorpe /* EIO is reserved for invalid STE data. */ 1961e8be08dSJason Gunthorpe if ((arg->ste[0] & ~STRTAB_STE_0_NESTING_ALLOWED) || 1971e8be08dSJason Gunthorpe (arg->ste[1] & ~STRTAB_STE_1_NESTING_ALLOWED)) 1981e8be08dSJason Gunthorpe return -EIO; 1991e8be08dSJason Gunthorpe 2001e8be08dSJason Gunthorpe cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(arg->ste[0])); 2011e8be08dSJason Gunthorpe if (cfg != STRTAB_STE_0_CFG_ABORT && cfg != STRTAB_STE_0_CFG_BYPASS && 2021e8be08dSJason Gunthorpe cfg != STRTAB_STE_0_CFG_S1_TRANS) 2031e8be08dSJason Gunthorpe return -EIO; 204f27298a8SJason Gunthorpe 205f27298a8SJason Gunthorpe /* 206f27298a8SJason Gunthorpe * Only Full ATS or ATS UR is supported 207f27298a8SJason Gunthorpe * The EATS field will be set by arm_smmu_make_nested_domain_ste() 208f27298a8SJason Gunthorpe */ 209f27298a8SJason Gunthorpe eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg->ste[1])); 210f27298a8SJason Gunthorpe arg->ste[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS); 211f27298a8SJason Gunthorpe if (eats != STRTAB_STE_1_EATS_ABT && eats != STRTAB_STE_1_EATS_TRANS) 212f27298a8SJason Gunthorpe return -EIO; 213f27298a8SJason Gunthorpe 214f27298a8SJason Gunthorpe if (cfg == STRTAB_STE_0_CFG_S1_TRANS) 215f27298a8SJason Gunthorpe *enable_ats = (eats == STRTAB_STE_1_EATS_TRANS); 2161e8be08dSJason Gunthorpe return 0; 2171e8be08dSJason Gunthorpe } 2181e8be08dSJason Gunthorpe 2191e8be08dSJason Gunthorpe static struct iommu_domain * 2201e8be08dSJason Gunthorpe arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags, 2211e8be08dSJason Gunthorpe const struct iommu_user_data *user_data) 2221e8be08dSJason Gunthorpe { 2231e8be08dSJason Gunthorpe struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core); 2241e8be08dSJason Gunthorpe struct arm_smmu_nested_domain *nested_domain; 2251e8be08dSJason Gunthorpe struct iommu_hwpt_arm_smmuv3 arg; 226f27298a8SJason Gunthorpe bool enable_ats = false; 2271e8be08dSJason Gunthorpe int ret; 2281e8be08dSJason Gunthorpe 22911534b4dSYi Liu if (flags) 2301e8be08dSJason Gunthorpe return ERR_PTR(-EOPNOTSUPP); 2311e8be08dSJason Gunthorpe 2321e8be08dSJason Gunthorpe ret = iommu_copy_struct_from_user(&arg, user_data, 2331e8be08dSJason Gunthorpe IOMMU_HWPT_DATA_ARM_SMMUV3, ste); 2341e8be08dSJason Gunthorpe if (ret) 2351e8be08dSJason Gunthorpe return ERR_PTR(ret); 2361e8be08dSJason Gunthorpe 237f27298a8SJason Gunthorpe ret = arm_smmu_validate_vste(&arg, &enable_ats); 2381e8be08dSJason Gunthorpe if (ret) 2391e8be08dSJason Gunthorpe return ERR_PTR(ret); 2401e8be08dSJason Gunthorpe 2411e8be08dSJason Gunthorpe nested_domain = kzalloc(sizeof(*nested_domain), GFP_KERNEL_ACCOUNT); 2421e8be08dSJason Gunthorpe if (!nested_domain) 2431e8be08dSJason Gunthorpe return ERR_PTR(-ENOMEM); 2441e8be08dSJason Gunthorpe 2451e8be08dSJason Gunthorpe nested_domain->domain.type = IOMMU_DOMAIN_NESTED; 2461e8be08dSJason Gunthorpe nested_domain->domain.ops = &arm_smmu_nested_ops; 247f27298a8SJason Gunthorpe nested_domain->enable_ats = enable_ats; 2481e8be08dSJason Gunthorpe nested_domain->vsmmu = vsmmu; 2491e8be08dSJason Gunthorpe nested_domain->ste[0] = arg.ste[0]; 2501e8be08dSJason Gunthorpe nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS); 2511e8be08dSJason Gunthorpe 2521e8be08dSJason Gunthorpe return &nested_domain->domain; 2531e8be08dSJason Gunthorpe } 2541e8be08dSJason Gunthorpe 255d68beb27SNicolin Chen static int arm_vsmmu_vsid_to_sid(struct arm_vsmmu *vsmmu, u32 vsid, u32 *sid) 256d68beb27SNicolin Chen { 257d68beb27SNicolin Chen struct arm_smmu_master *master; 258d68beb27SNicolin Chen struct device *dev; 259d68beb27SNicolin Chen int ret = 0; 260d68beb27SNicolin Chen 261d68beb27SNicolin Chen xa_lock(&vsmmu->core.vdevs); 262d68beb27SNicolin Chen dev = iommufd_viommu_find_dev(&vsmmu->core, (unsigned long)vsid); 263d68beb27SNicolin Chen if (!dev) { 264d68beb27SNicolin Chen ret = -EIO; 265d68beb27SNicolin Chen goto unlock; 266d68beb27SNicolin Chen } 267d68beb27SNicolin Chen master = dev_iommu_priv_get(dev); 268d68beb27SNicolin Chen 269d68beb27SNicolin Chen /* At this moment, iommufd only supports PCI device that has one SID */ 270d68beb27SNicolin Chen if (sid) 271d68beb27SNicolin Chen *sid = master->streams[0].id; 272d68beb27SNicolin Chen unlock: 273d68beb27SNicolin Chen xa_unlock(&vsmmu->core.vdevs); 274d68beb27SNicolin Chen return ret; 275d68beb27SNicolin Chen } 276d68beb27SNicolin Chen 277d68beb27SNicolin Chen /* This is basically iommu_viommu_arm_smmuv3_invalidate in u64 for conversion */ 278d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd { 279d68beb27SNicolin Chen union { 280d68beb27SNicolin Chen u64 cmd[2]; 281d68beb27SNicolin Chen struct iommu_viommu_arm_smmuv3_invalidate ucmd; 282d68beb27SNicolin Chen }; 283d68beb27SNicolin Chen }; 284d68beb27SNicolin Chen 285d68beb27SNicolin Chen /* 286d68beb27SNicolin Chen * Convert, in place, the raw invalidation command into an internal format that 287d68beb27SNicolin Chen * can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are 288d68beb27SNicolin Chen * stored in CPU endian. 289d68beb27SNicolin Chen * 290d68beb27SNicolin Chen * Enforce the VMID or SID on the command. 291d68beb27SNicolin Chen */ 292d68beb27SNicolin Chen static int arm_vsmmu_convert_user_cmd(struct arm_vsmmu *vsmmu, 293d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *cmd) 294d68beb27SNicolin Chen { 295d68beb27SNicolin Chen /* Commands are le64 stored in u64 */ 296d68beb27SNicolin Chen cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]); 297d68beb27SNicolin Chen cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]); 298d68beb27SNicolin Chen 299d68beb27SNicolin Chen switch (cmd->cmd[0] & CMDQ_0_OP) { 300d68beb27SNicolin Chen case CMDQ_OP_TLBI_NSNH_ALL: 301d68beb27SNicolin Chen /* Convert to NH_ALL */ 302d68beb27SNicolin Chen cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL | 303d68beb27SNicolin Chen FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid); 304d68beb27SNicolin Chen cmd->cmd[1] = 0; 305d68beb27SNicolin Chen break; 306d68beb27SNicolin Chen case CMDQ_OP_TLBI_NH_VA: 307d68beb27SNicolin Chen case CMDQ_OP_TLBI_NH_VAA: 308d68beb27SNicolin Chen case CMDQ_OP_TLBI_NH_ALL: 309d68beb27SNicolin Chen case CMDQ_OP_TLBI_NH_ASID: 310d68beb27SNicolin Chen cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID; 311d68beb27SNicolin Chen cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid); 312d68beb27SNicolin Chen break; 313d68beb27SNicolin Chen case CMDQ_OP_ATC_INV: 314d68beb27SNicolin Chen case CMDQ_OP_CFGI_CD: 315d68beb27SNicolin Chen case CMDQ_OP_CFGI_CD_ALL: { 316d68beb27SNicolin Chen u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]); 317d68beb27SNicolin Chen 318d68beb27SNicolin Chen if (arm_vsmmu_vsid_to_sid(vsmmu, vsid, &sid)) 319d68beb27SNicolin Chen return -EIO; 320d68beb27SNicolin Chen cmd->cmd[0] &= ~CMDQ_CFGI_0_SID; 321d68beb27SNicolin Chen cmd->cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid); 322d68beb27SNicolin Chen break; 323d68beb27SNicolin Chen } 324d68beb27SNicolin Chen default: 325d68beb27SNicolin Chen return -EIO; 326d68beb27SNicolin Chen } 327d68beb27SNicolin Chen return 0; 328d68beb27SNicolin Chen } 329d68beb27SNicolin Chen 330d68beb27SNicolin Chen static int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu, 331d68beb27SNicolin Chen struct iommu_user_data_array *array) 332d68beb27SNicolin Chen { 333d68beb27SNicolin Chen struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core); 334d68beb27SNicolin Chen struct arm_smmu_device *smmu = vsmmu->smmu; 335d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *last; 336d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *cmds; 337d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *cur; 338d68beb27SNicolin Chen struct arm_vsmmu_invalidation_cmd *end; 339d68beb27SNicolin Chen int ret; 340d68beb27SNicolin Chen 341d68beb27SNicolin Chen cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL); 342d68beb27SNicolin Chen if (!cmds) 343d68beb27SNicolin Chen return -ENOMEM; 344d68beb27SNicolin Chen cur = cmds; 345d68beb27SNicolin Chen end = cmds + array->entry_num; 346d68beb27SNicolin Chen 347d68beb27SNicolin Chen static_assert(sizeof(*cmds) == 2 * sizeof(u64)); 348d68beb27SNicolin Chen ret = iommu_copy_struct_from_full_user_array( 349d68beb27SNicolin Chen cmds, sizeof(*cmds), array, 350d68beb27SNicolin Chen IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3); 351d68beb27SNicolin Chen if (ret) 352d68beb27SNicolin Chen goto out; 353d68beb27SNicolin Chen 354d68beb27SNicolin Chen last = cmds; 355d68beb27SNicolin Chen while (cur != end) { 356d68beb27SNicolin Chen ret = arm_vsmmu_convert_user_cmd(vsmmu, cur); 357d68beb27SNicolin Chen if (ret) 358d68beb27SNicolin Chen goto out; 359d68beb27SNicolin Chen 360d68beb27SNicolin Chen /* FIXME work in blocks of CMDQ_BATCH_ENTRIES and copy each block? */ 361d68beb27SNicolin Chen cur++; 362d68beb27SNicolin Chen if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1) 363d68beb27SNicolin Chen continue; 364d68beb27SNicolin Chen 365d68beb27SNicolin Chen /* FIXME always uses the main cmdq rather than trying to group by type */ 366d68beb27SNicolin Chen ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd, 367d68beb27SNicolin Chen cur - last, true); 368d68beb27SNicolin Chen if (ret) { 369d68beb27SNicolin Chen cur--; 370d68beb27SNicolin Chen goto out; 371d68beb27SNicolin Chen } 372d68beb27SNicolin Chen last = cur; 373d68beb27SNicolin Chen } 374d68beb27SNicolin Chen out: 375d68beb27SNicolin Chen array->entry_num = cur - cmds; 376d68beb27SNicolin Chen kfree(cmds); 377d68beb27SNicolin Chen return ret; 378d68beb27SNicolin Chen } 379d68beb27SNicolin Chen 38069d9b312SNicolin Chen static const struct iommufd_viommu_ops arm_vsmmu_ops = { 3811e8be08dSJason Gunthorpe .alloc_domain_nested = arm_vsmmu_alloc_domain_nested, 382d68beb27SNicolin Chen .cache_invalidate = arm_vsmmu_cache_invalidate, 38369d9b312SNicolin Chen }; 38469d9b312SNicolin Chen 38569d9b312SNicolin Chen struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev, 38669d9b312SNicolin Chen struct iommu_domain *parent, 38769d9b312SNicolin Chen struct iommufd_ctx *ictx, 38869d9b312SNicolin Chen unsigned int viommu_type) 38969d9b312SNicolin Chen { 39069d9b312SNicolin Chen struct arm_smmu_device *smmu = 39169d9b312SNicolin Chen iommu_get_iommu_dev(dev, struct arm_smmu_device, iommu); 39269d9b312SNicolin Chen struct arm_smmu_master *master = dev_iommu_priv_get(dev); 39369d9b312SNicolin Chen struct arm_smmu_domain *s2_parent = to_smmu_domain(parent); 39469d9b312SNicolin Chen struct arm_vsmmu *vsmmu; 39569d9b312SNicolin Chen 39669d9b312SNicolin Chen if (viommu_type != IOMMU_VIOMMU_TYPE_ARM_SMMUV3) 39769d9b312SNicolin Chen return ERR_PTR(-EOPNOTSUPP); 39869d9b312SNicolin Chen 39969d9b312SNicolin Chen if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) 40069d9b312SNicolin Chen return ERR_PTR(-EOPNOTSUPP); 40169d9b312SNicolin Chen 40269d9b312SNicolin Chen if (s2_parent->smmu != master->smmu) 40369d9b312SNicolin Chen return ERR_PTR(-EINVAL); 40469d9b312SNicolin Chen 40569d9b312SNicolin Chen /* 406d68beb27SNicolin Chen * FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW 407d68beb27SNicolin Chen * defect is needed to determine if arm_vsmmu_cache_invalidate() needs 408d68beb27SNicolin Chen * any change to remove this. 409d68beb27SNicolin Chen */ 410d68beb27SNicolin Chen if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) 411d68beb27SNicolin Chen return ERR_PTR(-EOPNOTSUPP); 412d68beb27SNicolin Chen 413d68beb27SNicolin Chen /* 41469d9b312SNicolin Chen * Must support some way to prevent the VM from bypassing the cache 41569d9b312SNicolin Chen * because VFIO currently does not do any cache maintenance. canwbs 41669d9b312SNicolin Chen * indicates the device is fully coherent and no cache maintenance is 41767e4fe39SJason Gunthorpe * ever required, even for PCI No-Snoop. S2FWB means the S1 can't make 41867e4fe39SJason Gunthorpe * things non-coherent using the memattr, but No-Snoop behavior is not 41967e4fe39SJason Gunthorpe * effected. 42069d9b312SNicolin Chen */ 42167e4fe39SJason Gunthorpe if (!arm_smmu_master_canwbs(master) && 42267e4fe39SJason Gunthorpe !(smmu->features & ARM_SMMU_FEAT_S2FWB)) 42369d9b312SNicolin Chen return ERR_PTR(-EOPNOTSUPP); 42469d9b312SNicolin Chen 42569d9b312SNicolin Chen vsmmu = iommufd_viommu_alloc(ictx, struct arm_vsmmu, core, 42669d9b312SNicolin Chen &arm_vsmmu_ops); 42769d9b312SNicolin Chen if (IS_ERR(vsmmu)) 42869d9b312SNicolin Chen return ERR_CAST(vsmmu); 42969d9b312SNicolin Chen 43069d9b312SNicolin Chen vsmmu->smmu = smmu; 43169d9b312SNicolin Chen vsmmu->s2_parent = s2_parent; 43269d9b312SNicolin Chen /* FIXME Move VMID allocation from the S2 domain allocation to here */ 43369d9b312SNicolin Chen vsmmu->vmid = s2_parent->s2_cfg.vmid; 43469d9b312SNicolin Chen 43569d9b312SNicolin Chen return &vsmmu->core; 43669d9b312SNicolin Chen } 4376d026e6dSNathan Chancellor 438e7d3fa3dSNicolin Chen int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt) 439e7d3fa3dSNicolin Chen { 440e7d3fa3dSNicolin Chen struct iommu_vevent_arm_smmuv3 vevt; 441e7d3fa3dSNicolin Chen int i; 442e7d3fa3dSNicolin Chen 443e7d3fa3dSNicolin Chen lockdep_assert_held(&vmaster->vsmmu->smmu->streams_mutex); 444e7d3fa3dSNicolin Chen 445e7d3fa3dSNicolin Chen vevt.evt[0] = cpu_to_le64((evt[0] & ~EVTQ_0_SID) | 446e7d3fa3dSNicolin Chen FIELD_PREP(EVTQ_0_SID, vmaster->vsid)); 447e7d3fa3dSNicolin Chen for (i = 1; i < EVTQ_ENT_DWORDS; i++) 448e7d3fa3dSNicolin Chen vevt.evt[i] = cpu_to_le64(evt[i]); 449e7d3fa3dSNicolin Chen 450e7d3fa3dSNicolin Chen return iommufd_viommu_report_event(&vmaster->vsmmu->core, 451e7d3fa3dSNicolin Chen IOMMU_VEVENTQ_TYPE_ARM_SMMUV3, &vevt, 452e7d3fa3dSNicolin Chen sizeof(vevt)); 453e7d3fa3dSNicolin Chen } 454e7d3fa3dSNicolin Chen 455cdd30ebbSPeter Zijlstra MODULE_IMPORT_NS("IOMMUFD"); 456