16912ec91SNicolin Chen // SPDX-License-Identifier: GPL-2.0 26912ec91SNicolin Chen /* 36912ec91SNicolin Chen * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES 46912ec91SNicolin Chen */ 56912ec91SNicolin Chen 66912ec91SNicolin Chen #include <uapi/linux/iommufd.h> 76912ec91SNicolin Chen 86912ec91SNicolin Chen #include "arm-smmu-v3.h" 96912ec91SNicolin Chen 106912ec91SNicolin Chen void *arm_smmu_hw_info(struct device *dev, u32 *length, u32 *type) 116912ec91SNicolin Chen { 126912ec91SNicolin Chen struct arm_smmu_master *master = dev_iommu_priv_get(dev); 136912ec91SNicolin Chen struct iommu_hw_info_arm_smmuv3 *info; 146912ec91SNicolin Chen u32 __iomem *base_idr; 156912ec91SNicolin Chen unsigned int i; 166912ec91SNicolin Chen 176912ec91SNicolin Chen info = kzalloc(sizeof(*info), GFP_KERNEL); 186912ec91SNicolin Chen if (!info) 196912ec91SNicolin Chen return ERR_PTR(-ENOMEM); 206912ec91SNicolin Chen 216912ec91SNicolin Chen base_idr = master->smmu->base + ARM_SMMU_IDR0; 226912ec91SNicolin Chen for (i = 0; i <= 5; i++) 236912ec91SNicolin Chen info->idr[i] = readl_relaxed(base_idr + i); 246912ec91SNicolin Chen info->iidr = readl_relaxed(master->smmu->base + ARM_SMMU_IIDR); 256912ec91SNicolin Chen info->aidr = readl_relaxed(master->smmu->base + ARM_SMMU_AIDR); 266912ec91SNicolin Chen 276912ec91SNicolin Chen *length = sizeof(*info); 286912ec91SNicolin Chen *type = IOMMU_HW_INFO_TYPE_ARM_SMMUV3; 296912ec91SNicolin Chen 306912ec91SNicolin Chen return info; 316912ec91SNicolin Chen } 32*69d9b312SNicolin Chen 33*69d9b312SNicolin Chen static const struct iommufd_viommu_ops arm_vsmmu_ops = { 34*69d9b312SNicolin Chen }; 35*69d9b312SNicolin Chen 36*69d9b312SNicolin Chen struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev, 37*69d9b312SNicolin Chen struct iommu_domain *parent, 38*69d9b312SNicolin Chen struct iommufd_ctx *ictx, 39*69d9b312SNicolin Chen unsigned int viommu_type) 40*69d9b312SNicolin Chen { 41*69d9b312SNicolin Chen struct arm_smmu_device *smmu = 42*69d9b312SNicolin Chen iommu_get_iommu_dev(dev, struct arm_smmu_device, iommu); 43*69d9b312SNicolin Chen struct arm_smmu_master *master = dev_iommu_priv_get(dev); 44*69d9b312SNicolin Chen struct arm_smmu_domain *s2_parent = to_smmu_domain(parent); 45*69d9b312SNicolin Chen struct arm_vsmmu *vsmmu; 46*69d9b312SNicolin Chen 47*69d9b312SNicolin Chen if (viommu_type != IOMMU_VIOMMU_TYPE_ARM_SMMUV3) 48*69d9b312SNicolin Chen return ERR_PTR(-EOPNOTSUPP); 49*69d9b312SNicolin Chen 50*69d9b312SNicolin Chen if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) 51*69d9b312SNicolin Chen return ERR_PTR(-EOPNOTSUPP); 52*69d9b312SNicolin Chen 53*69d9b312SNicolin Chen if (s2_parent->smmu != master->smmu) 54*69d9b312SNicolin Chen return ERR_PTR(-EINVAL); 55*69d9b312SNicolin Chen 56*69d9b312SNicolin Chen /* 57*69d9b312SNicolin Chen * Must support some way to prevent the VM from bypassing the cache 58*69d9b312SNicolin Chen * because VFIO currently does not do any cache maintenance. canwbs 59*69d9b312SNicolin Chen * indicates the device is fully coherent and no cache maintenance is 60*69d9b312SNicolin Chen * ever required, even for PCI No-Snoop. 61*69d9b312SNicolin Chen */ 62*69d9b312SNicolin Chen if (!arm_smmu_master_canwbs(master)) 63*69d9b312SNicolin Chen return ERR_PTR(-EOPNOTSUPP); 64*69d9b312SNicolin Chen 65*69d9b312SNicolin Chen vsmmu = iommufd_viommu_alloc(ictx, struct arm_vsmmu, core, 66*69d9b312SNicolin Chen &arm_vsmmu_ops); 67*69d9b312SNicolin Chen if (IS_ERR(vsmmu)) 68*69d9b312SNicolin Chen return ERR_CAST(vsmmu); 69*69d9b312SNicolin Chen 70*69d9b312SNicolin Chen vsmmu->smmu = smmu; 71*69d9b312SNicolin Chen vsmmu->s2_parent = s2_parent; 72*69d9b312SNicolin Chen /* FIXME Move VMID allocation from the S2 domain allocation to here */ 73*69d9b312SNicolin Chen vsmmu->vmid = s2_parent->s2_cfg.vmid; 74*69d9b312SNicolin Chen 75*69d9b312SNicolin Chen return &vsmmu->core; 76*69d9b312SNicolin Chen } 77