xref: /linux/drivers/iommu/amd/nested.c (revision 774180a74abc89fd1389f51a6f93dbfcded365c2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2025 Advanced Micro Devices, Inc.
4  */
5 
6 #define dev_fmt(fmt)	"AMD-Vi: " fmt
7 
8 #include <linux/iommu.h>
9 #include <uapi/linux/iommufd.h>
10 
11 #include "amd_iommu.h"
12 
13 static const struct iommu_domain_ops nested_domain_ops;
14 
15 static inline struct nested_domain *to_ndomain(struct iommu_domain *dom)
16 {
17 	return container_of(dom, struct nested_domain, domain);
18 }
19 
20 /*
21  * Validate guest DTE to make sure that configuration for host (v1)
22  * and guest (v2) page tables are valid when allocating nested domain.
23  */
24 static int validate_gdte_nested(struct iommu_hwpt_amd_guest *gdte)
25 {
26 	u32 gpt_level = FIELD_GET(DTE_GPT_LEVEL_MASK, gdte->dte[2]);
27 
28 	/* Must be zero: Mode, Host-TPR */
29 	if (FIELD_GET(DTE_MODE_MASK, gdte->dte[0]) != 0 ||
30 	    FIELD_GET(DTE_HOST_TRP, gdte->dte[0]) != 0)
31 		return -EINVAL;
32 
33 	/* GCR3 TRP must be non-zero if V, GV is set */
34 	if (FIELD_GET(DTE_FLAG_V, gdte->dte[0]) == 1 &&
35 	    FIELD_GET(DTE_FLAG_GV, gdte->dte[0]) == 1 &&
36 	    FIELD_GET(DTE_GCR3_14_12, gdte->dte[0]) == 0 &&
37 	    FIELD_GET(DTE_GCR3_30_15, gdte->dte[1]) == 0 &&
38 	    FIELD_GET(DTE_GCR3_51_31, gdte->dte[1]) == 0)
39 		return -EINVAL;
40 
41 	/* Valid Guest Paging Mode values are 0 and 1 */
42 	if (gpt_level != GUEST_PGTABLE_4_LEVEL &&
43 	    gpt_level != GUEST_PGTABLE_5_LEVEL)
44 		return -EINVAL;
45 
46 	/* GLX = 3 is reserved */
47 	if (FIELD_GET(DTE_GLX, gdte->dte[0]) == 3)
48 		return -EINVAL;
49 
50 	/*
51 	 * We need to check host capability before setting
52 	 * the Guest Paging Mode
53 	 */
54 	if (gpt_level == GUEST_PGTABLE_5_LEVEL &&
55 	    amd_iommu_gpt_level < PAGE_MODE_5_LEVEL)
56 		return -EOPNOTSUPP;
57 
58 	return 0;
59 }
60 
61 /*
62  * This function is assigned to struct iommufd_viommu_ops.alloc_domain_nested()
63  * during the call to struct iommu_ops.viommu_init().
64  */
65 struct iommu_domain *
66 amd_iommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
67 			      const struct iommu_user_data *user_data)
68 {
69 	int ret;
70 	struct nested_domain *ndom;
71 	struct amd_iommu_viommu *aviommu = container_of(viommu, struct amd_iommu_viommu, core);
72 
73 	if (user_data->type != IOMMU_HWPT_DATA_AMD_GUEST)
74 		return ERR_PTR(-EOPNOTSUPP);
75 
76 	ndom = kzalloc(sizeof(*ndom), GFP_KERNEL);
77 	if (!ndom)
78 		return ERR_PTR(-ENOMEM);
79 
80 	ret = iommu_copy_struct_from_user(&ndom->gdte, user_data,
81 					  IOMMU_HWPT_DATA_AMD_GUEST,
82 					  dte);
83 	if (ret)
84 		goto out_err;
85 
86 	ret = validate_gdte_nested(&ndom->gdte);
87 	if (ret)
88 		goto out_err;
89 
90 	ndom->gdom_id = FIELD_GET(DTE_DOMID_MASK, ndom->gdte.dte[1]);
91 	ndom->domain.ops = &nested_domain_ops;
92 	ndom->domain.type = IOMMU_DOMAIN_NESTED;
93 	ndom->viommu = aviommu;
94 
95 	return &ndom->domain;
96 out_err:
97 	kfree(ndom);
98 	return ERR_PTR(ret);
99 }
100 
101 static void nested_domain_free(struct iommu_domain *dom)
102 {
103 	struct nested_domain *ndom = to_ndomain(dom);
104 
105 	kfree(ndom);
106 }
107 
108 static const struct iommu_domain_ops nested_domain_ops = {
109 	.free = nested_domain_free,
110 };
111