1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
4 *
5 * Intel VT-d Second Stange 5/4 level page table
6 *
7 * This is described in
8 * Section "3.7 Second-Stage Translation"
9 * Section "9.8 Second-Stage Paging Entries"
10 *
11 * Of the "Intel Virtualization Technology for Directed I/O Architecture
12 * Specification".
13 *
14 * The named levels in the spec map to the pts->level as:
15 * Table/SS-PTE - 0
16 * Directory/SS-PDE - 1
17 * Directory Ptr/SS-PDPTE - 2
18 * PML4/SS-PML4E - 3
19 * PML5/SS-PML5E - 4
20 */
21 #ifndef __GENERIC_PT_FMT_VTDSS_H
22 #define __GENERIC_PT_FMT_VTDSS_H
23
24 #include "defs_vtdss.h"
25 #include "../pt_defs.h"
26
27 #include <linux/bitfield.h>
28 #include <linux/container_of.h>
29 #include <linux/log2.h>
30
31 enum {
32 PT_MAX_OUTPUT_ADDRESS_LG2 = 52,
33 PT_MAX_VA_ADDRESS_LG2 = 57,
34 PT_ITEM_WORD_SIZE = sizeof(u64),
35 PT_MAX_TOP_LEVEL = 4,
36 PT_GRANULE_LG2SZ = 12,
37 PT_TABLEMEM_LG2SZ = 12,
38
39 /* SSPTPTR is 4k aligned and limited by HAW */
40 PT_TOP_PHYS_MASK = GENMASK_ULL(63, 12),
41 };
42
43 /* Shared descriptor bits */
44 enum {
45 VTDSS_FMT_R = BIT(0),
46 VTDSS_FMT_W = BIT(1),
47 VTDSS_FMT_A = BIT(8),
48 VTDSS_FMT_D = BIT(9),
49 VTDSS_FMT_SNP = BIT(11),
50 VTDSS_FMT_OA = GENMASK_ULL(51, 12),
51 };
52
53 /* PDPTE/PDE */
54 enum {
55 VTDSS_FMT_PS = BIT(7),
56 };
57
58 #define common_to_vtdss_pt(common_ptr) \
59 container_of_const(common_ptr, struct pt_vtdss, common)
60 #define to_vtdss_pt(pts) common_to_vtdss_pt((pts)->range->common)
61
vtdss_pt_table_pa(const struct pt_state * pts)62 static inline pt_oaddr_t vtdss_pt_table_pa(const struct pt_state *pts)
63 {
64 return oalog2_mul(FIELD_GET(VTDSS_FMT_OA, pts->entry),
65 PT_TABLEMEM_LG2SZ);
66 }
67 #define pt_table_pa vtdss_pt_table_pa
68
vtdss_pt_entry_oa(const struct pt_state * pts)69 static inline pt_oaddr_t vtdss_pt_entry_oa(const struct pt_state *pts)
70 {
71 return oalog2_mul(FIELD_GET(VTDSS_FMT_OA, pts->entry),
72 PT_GRANULE_LG2SZ);
73 }
74 #define pt_entry_oa vtdss_pt_entry_oa
75
vtdss_pt_can_have_leaf(const struct pt_state * pts)76 static inline bool vtdss_pt_can_have_leaf(const struct pt_state *pts)
77 {
78 return pts->level <= 2;
79 }
80 #define pt_can_have_leaf vtdss_pt_can_have_leaf
81
vtdss_pt_num_items_lg2(const struct pt_state * pts)82 static inline unsigned int vtdss_pt_num_items_lg2(const struct pt_state *pts)
83 {
84 return PT_TABLEMEM_LG2SZ - ilog2(sizeof(u64));
85 }
86 #define pt_num_items_lg2 vtdss_pt_num_items_lg2
87
vtdss_pt_load_entry_raw(struct pt_state * pts)88 static inline enum pt_entry_type vtdss_pt_load_entry_raw(struct pt_state *pts)
89 {
90 const u64 *tablep = pt_cur_table(pts, u64);
91 u64 entry;
92
93 pts->entry = entry = READ_ONCE(tablep[pts->index]);
94 if (!entry)
95 return PT_ENTRY_EMPTY;
96 if (pts->level == 0 ||
97 (vtdss_pt_can_have_leaf(pts) && (pts->entry & VTDSS_FMT_PS)))
98 return PT_ENTRY_OA;
99 return PT_ENTRY_TABLE;
100 }
101 #define pt_load_entry_raw vtdss_pt_load_entry_raw
102
103 static inline void
vtdss_pt_install_leaf_entry(struct pt_state * pts,pt_oaddr_t oa,unsigned int oasz_lg2,const struct pt_write_attrs * attrs)104 vtdss_pt_install_leaf_entry(struct pt_state *pts, pt_oaddr_t oa,
105 unsigned int oasz_lg2,
106 const struct pt_write_attrs *attrs)
107 {
108 u64 *tablep = pt_cur_table(pts, u64);
109 u64 entry;
110
111 if (!pt_check_install_leaf_args(pts, oa, oasz_lg2))
112 return;
113
114 entry = FIELD_PREP(VTDSS_FMT_OA, log2_div(oa, PT_GRANULE_LG2SZ)) |
115 attrs->descriptor_bits;
116 if (pts->level != 0)
117 entry |= VTDSS_FMT_PS;
118
119 WRITE_ONCE(tablep[pts->index], entry);
120 pts->entry = entry;
121 }
122 #define pt_install_leaf_entry vtdss_pt_install_leaf_entry
123
vtdss_pt_install_table(struct pt_state * pts,pt_oaddr_t table_pa,const struct pt_write_attrs * attrs)124 static inline bool vtdss_pt_install_table(struct pt_state *pts,
125 pt_oaddr_t table_pa,
126 const struct pt_write_attrs *attrs)
127 {
128 u64 entry;
129
130 entry = VTDSS_FMT_R | VTDSS_FMT_W |
131 FIELD_PREP(VTDSS_FMT_OA, log2_div(table_pa, PT_GRANULE_LG2SZ));
132 return pt_table_install64(pts, entry);
133 }
134 #define pt_install_table vtdss_pt_install_table
135
vtdss_pt_attr_from_entry(const struct pt_state * pts,struct pt_write_attrs * attrs)136 static inline void vtdss_pt_attr_from_entry(const struct pt_state *pts,
137 struct pt_write_attrs *attrs)
138 {
139 attrs->descriptor_bits = pts->entry &
140 (VTDSS_FMT_R | VTDSS_FMT_W | VTDSS_FMT_SNP);
141 }
142 #define pt_attr_from_entry vtdss_pt_attr_from_entry
143
vtdss_pt_entry_is_write_dirty(const struct pt_state * pts)144 static inline bool vtdss_pt_entry_is_write_dirty(const struct pt_state *pts)
145 {
146 u64 *tablep = pt_cur_table(pts, u64) + pts->index;
147
148 return READ_ONCE(*tablep) & VTDSS_FMT_D;
149 }
150 #define pt_entry_is_write_dirty vtdss_pt_entry_is_write_dirty
151
vtdss_pt_entry_make_write_clean(struct pt_state * pts)152 static inline void vtdss_pt_entry_make_write_clean(struct pt_state *pts)
153 {
154 u64 *tablep = pt_cur_table(pts, u64) + pts->index;
155
156 WRITE_ONCE(*tablep, READ_ONCE(*tablep) & ~(u64)VTDSS_FMT_D);
157 }
158 #define pt_entry_make_write_clean vtdss_pt_entry_make_write_clean
159
vtdss_pt_entry_make_write_dirty(struct pt_state * pts)160 static inline bool vtdss_pt_entry_make_write_dirty(struct pt_state *pts)
161 {
162 u64 *tablep = pt_cur_table(pts, u64) + pts->index;
163 u64 new = pts->entry | VTDSS_FMT_D;
164
165 return try_cmpxchg64(tablep, &pts->entry, new);
166 }
167 #define pt_entry_make_write_dirty vtdss_pt_entry_make_write_dirty
168
vtdss_pt_max_sw_bit(struct pt_common * common)169 static inline unsigned int vtdss_pt_max_sw_bit(struct pt_common *common)
170 {
171 return 10;
172 }
173 #define pt_max_sw_bit vtdss_pt_max_sw_bit
174
vtdss_pt_sw_bit(unsigned int bitnr)175 static inline u64 vtdss_pt_sw_bit(unsigned int bitnr)
176 {
177 if (__builtin_constant_p(bitnr) && bitnr > 10)
178 BUILD_BUG();
179
180 /* Bits marked Ignored in the specification */
181 switch (bitnr) {
182 case 0:
183 return BIT(10);
184 case 1 ... 9:
185 return BIT_ULL((bitnr - 1) + 52);
186 case 10:
187 return BIT_ULL(63);
188 /* Some bits in 9-3 are available in some entries */
189 default:
190 PT_WARN_ON(true);
191 return 0;
192 }
193 }
194 #define pt_sw_bit vtdss_pt_sw_bit
195
196 /* --- iommu */
197 #include <linux/generic_pt/iommu.h>
198 #include <linux/iommu.h>
199
200 #define pt_iommu_table pt_iommu_vtdss
201
202 /* The common struct is in the per-format common struct */
common_from_iommu(struct pt_iommu * iommu_table)203 static inline struct pt_common *common_from_iommu(struct pt_iommu *iommu_table)
204 {
205 return &container_of(iommu_table, struct pt_iommu_table, iommu)
206 ->vtdss_pt.common;
207 }
208
iommu_from_common(struct pt_common * common)209 static inline struct pt_iommu *iommu_from_common(struct pt_common *common)
210 {
211 return &container_of(common, struct pt_iommu_table, vtdss_pt.common)
212 ->iommu;
213 }
214
vtdss_pt_iommu_set_prot(struct pt_common * common,struct pt_write_attrs * attrs,unsigned int iommu_prot)215 static inline int vtdss_pt_iommu_set_prot(struct pt_common *common,
216 struct pt_write_attrs *attrs,
217 unsigned int iommu_prot)
218 {
219 u64 pte = 0;
220
221 /*
222 * VTDSS does not have a present bit, so we tell if any entry is present
223 * by checking for R or W.
224 */
225 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
226 return -EINVAL;
227
228 if (iommu_prot & IOMMU_READ)
229 pte |= VTDSS_FMT_R;
230 if (iommu_prot & IOMMU_WRITE)
231 pte |= VTDSS_FMT_W;
232 if (pt_feature(common, PT_FEAT_VTDSS_FORCE_COHERENCE))
233 pte |= VTDSS_FMT_SNP;
234
235 if (pt_feature(common, PT_FEAT_VTDSS_FORCE_WRITEABLE) &&
236 !(iommu_prot & IOMMU_WRITE)) {
237 pr_err_ratelimited(
238 "Read-only mapping is disallowed on the domain which serves as the parent in a nested configuration, due to HW errata (ERRATA_772415_SPR17)\n");
239 return -EINVAL;
240 }
241
242 attrs->descriptor_bits = pte;
243 return 0;
244 }
245 #define pt_iommu_set_prot vtdss_pt_iommu_set_prot
246
vtdss_pt_iommu_fmt_init(struct pt_iommu_vtdss * iommu_table,const struct pt_iommu_vtdss_cfg * cfg)247 static inline int vtdss_pt_iommu_fmt_init(struct pt_iommu_vtdss *iommu_table,
248 const struct pt_iommu_vtdss_cfg *cfg)
249 {
250 struct pt_vtdss *table = &iommu_table->vtdss_pt;
251
252 if (cfg->top_level > 4 || cfg->top_level < 2)
253 return -EOPNOTSUPP;
254
255 pt_top_set_level(&table->common, cfg->top_level);
256 return 0;
257 }
258 #define pt_iommu_fmt_init vtdss_pt_iommu_fmt_init
259
260 static inline void
vtdss_pt_iommu_fmt_hw_info(struct pt_iommu_vtdss * table,const struct pt_range * top_range,struct pt_iommu_vtdss_hw_info * info)261 vtdss_pt_iommu_fmt_hw_info(struct pt_iommu_vtdss *table,
262 const struct pt_range *top_range,
263 struct pt_iommu_vtdss_hw_info *info)
264 {
265 info->ssptptr = virt_to_phys(top_range->top_table);
266 PT_WARN_ON(info->ssptptr & ~PT_TOP_PHYS_MASK);
267 /*
268 * top_level = 2 = 3 level table aw=1
269 * top_level = 3 = 4 level table aw=2
270 * top_level = 4 = 5 level table aw=3
271 */
272 info->aw = top_range->top_level - 1;
273 }
274 #define pt_iommu_fmt_hw_info vtdss_pt_iommu_fmt_hw_info
275
276 #if defined(GENERIC_PT_KUNIT)
277 static const struct pt_iommu_vtdss_cfg vtdss_kunit_fmt_cfgs[] = {
278 [0] = { .common.hw_max_vasz_lg2 = 39, .top_level = 2},
279 [1] = { .common.hw_max_vasz_lg2 = 48, .top_level = 3},
280 [2] = { .common.hw_max_vasz_lg2 = 57, .top_level = 4},
281 };
282 #define kunit_fmt_cfgs vtdss_kunit_fmt_cfgs
283 enum { KUNIT_FMT_FEATURES = BIT(PT_FEAT_VTDSS_FORCE_WRITEABLE) };
284 #endif
285 #endif
286