xref: /linux/drivers/iommu/intel/nested.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nested.c - nested mode translation support
4  *
5  * Copyright (C) 2023 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  *         Jacob Pan <jacob.jun.pan@linux.intel.com>
9  *         Yi Liu <yi.l.liu@intel.com>
10  */
11 
12 #define pr_fmt(fmt)	"DMAR: " fmt
13 
14 #include <linux/iommu.h>
15 #include <linux/pci.h>
16 #include <linux/pci-ats.h>
17 
18 #include "iommu.h"
19 #include "pasid.h"
20 
21 static int intel_nested_attach_dev(struct iommu_domain *domain,
22 				   struct device *dev)
23 {
24 	struct device_domain_info *info = dev_iommu_priv_get(dev);
25 	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
26 	struct intel_iommu *iommu = info->iommu;
27 	unsigned long flags;
28 	int ret = 0;
29 
30 	if (info->domain)
31 		device_block_translation(dev);
32 
33 	if (iommu->agaw < dmar_domain->s2_domain->agaw) {
34 		dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
35 		return -ENODEV;
36 	}
37 
38 	/*
39 	 * Stage-1 domain cannot work alone, it is nested on a s2_domain.
40 	 * The s2_domain will be used in nested translation, hence needs
41 	 * to ensure the s2_domain is compatible with this IOMMU.
42 	 */
43 	ret = prepare_domain_attach_device(&dmar_domain->s2_domain->domain, dev);
44 	if (ret) {
45 		dev_err_ratelimited(dev, "s2 domain is not compatible\n");
46 		return ret;
47 	}
48 
49 	ret = domain_attach_iommu(dmar_domain, iommu);
50 	if (ret) {
51 		dev_err_ratelimited(dev, "Failed to attach domain to iommu\n");
52 		return ret;
53 	}
54 
55 	ret = intel_pasid_setup_nested(iommu, dev,
56 				       IOMMU_NO_PASID, dmar_domain);
57 	if (ret) {
58 		domain_detach_iommu(dmar_domain, iommu);
59 		dev_err_ratelimited(dev, "Failed to setup pasid entry\n");
60 		return ret;
61 	}
62 
63 	info->domain = dmar_domain;
64 	spin_lock_irqsave(&dmar_domain->lock, flags);
65 	list_add(&info->link, &dmar_domain->devices);
66 	spin_unlock_irqrestore(&dmar_domain->lock, flags);
67 
68 	return 0;
69 }
70 
71 static void intel_nested_domain_free(struct iommu_domain *domain)
72 {
73 	kfree(to_dmar_domain(domain));
74 }
75 
76 static const struct iommu_domain_ops intel_nested_domain_ops = {
77 	.attach_dev		= intel_nested_attach_dev,
78 	.free			= intel_nested_domain_free,
79 };
80 
81 struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
82 					       const struct iommu_user_data *user_data)
83 {
84 	struct dmar_domain *s2_domain = to_dmar_domain(parent);
85 	struct iommu_hwpt_vtd_s1 vtd;
86 	struct dmar_domain *domain;
87 	int ret;
88 
89 	/* Must be nested domain */
90 	if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
91 		return ERR_PTR(-EOPNOTSUPP);
92 	if (parent->ops != intel_iommu_ops.default_domain_ops ||
93 	    !s2_domain->nested_parent)
94 		return ERR_PTR(-EINVAL);
95 
96 	ret = iommu_copy_struct_from_user(&vtd, user_data,
97 					  IOMMU_HWPT_DATA_VTD_S1, __reserved);
98 	if (ret)
99 		return ERR_PTR(ret);
100 
101 	domain = kzalloc(sizeof(*domain), GFP_KERNEL_ACCOUNT);
102 	if (!domain)
103 		return ERR_PTR(-ENOMEM);
104 
105 	domain->use_first_level = true;
106 	domain->s2_domain = s2_domain;
107 	domain->s1_pgtbl = vtd.pgtbl_addr;
108 	domain->s1_cfg = vtd;
109 	domain->domain.ops = &intel_nested_domain_ops;
110 	domain->domain.type = IOMMU_DOMAIN_NESTED;
111 	INIT_LIST_HEAD(&domain->devices);
112 	INIT_LIST_HEAD(&domain->dev_pasids);
113 	spin_lock_init(&domain->lock);
114 	xa_init(&domain->iommu_array);
115 
116 	return &domain->domain;
117 }
118