xref: /freebsd/sys/amd64/vmm/amd/amdvi_priv.h (revision 62cfcf62f627e5093fb37026a6d8c98e4d2ef04c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2016 Anish Gupta (anish@freebsd.org)
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef _AMDVI_PRIV_H_
32 #define _AMDVI_PRIV_H_
33 
34 #include <contrib/dev/acpica/include/acpi.h>
35 
36 #define	BIT(n)			(1ULL << (n))
37 /* Return value of bits[n:m] where n and (n >= ) m are bit positions. */
38 #define REG_BITS(x, n, m)	(((x) >> (m)) & 		\
39 				((1 << (((n) - (m)) + 1)) - 1))
40 
41 /*
42  * IOMMU PCI capability.
43  */
44 #define AMDVI_PCI_CAP_IOTLB	BIT(0)	/* IOTLB is supported. */
45 #define AMDVI_PCI_CAP_HT	BIT(1)	/* HyperTransport tunnel support. */
46 #define AMDVI_PCI_CAP_NPCACHE	BIT(2)	/* Not present page cached. */
47 #define AMDVI_PCI_CAP_EFR	BIT(3)	/* Extended features. */
48 #define AMDVI_PCI_CAP_EXT	BIT(4)	/* Miscellaneous information reg. */
49 
50 /*
51  * IOMMU extended features.
52  */
53 #define AMDVI_EX_FEA_PREFSUP	BIT(0)	/* Prefetch command support. */
54 #define AMDVI_EX_FEA_PPRSUP	BIT(1)	/* PPR support */
55 #define AMDVI_EX_FEA_XTSUP	BIT(2)	/* Reserved */
56 #define AMDVI_EX_FEA_NXSUP	BIT(3)	/* No-execute. */
57 #define AMDVI_EX_FEA_GTSUP	BIT(4)	/* Guest translation support. */
58 #define AMDVI_EX_FEA_EFRW	BIT(5)	/* Reserved */
59 #define AMDVI_EX_FEA_IASUP	BIT(6)	/* Invalidate all command supp. */
60 #define AMDVI_EX_FEA_GASUP	BIT(7)	/* Guest APIC or AVIC support. */
61 #define AMDVI_EX_FEA_HESUP	BIT(8)	/* Hardware Error. */
62 #define AMDVI_EX_FEA_PCSUP	BIT(9)	/* Performance counters support. */
63 /* XXX: add more EFER bits. */
64 
65 /*
66  * Device table entry or DTE
67  * NOTE: Must be 256-bits/32 bytes aligned.
68  */
69 struct amdvi_dte {
70 	uint32_t dt_valid:1;		/* Device Table valid. */
71 	uint32_t pt_valid:1;		/* Page translation valid. */
72 	uint16_t :7;			/* Reserved[8:2] */
73 	uint8_t	 pt_level:3;		/* Paging level, 0 to disable. */
74 	uint64_t pt_base:40;		/* Page table root pointer. */
75 	uint8_t  :3;			/* Reserved[54:52] */
76 	uint8_t	 gv_valid:1;		/* Revision 2, GVA to SPA. */
77 	uint8_t	 gv_level:2;		/* Revision 2, GLX level. */
78 	uint8_t	 gv_cr3_lsb:3;		/* Revision 2, GCR3[14:12] */
79 	uint8_t	 read_allow:1;		/* I/O read enabled. */
80 	uint8_t	 write_allow:1;		/* I/O write enabled. */
81 	uint8_t  :1;			/* Reserved[63] */
82 	uint16_t domain_id:16;		/* Domain ID */
83 	uint16_t gv_cr3_lsb2:16;	/* Revision 2, GCR3[30:15] */
84 	uint8_t	 iotlb_enable:1;	/* Device support IOTLB */
85 	uint8_t	 sup_second_io_fault:1;	/* Suppress subsequent I/O faults. */
86 	uint8_t	 sup_all_io_fault:1;	/* Suppress all I/O page faults. */
87 	uint8_t	 IOctl:2;		/* Port I/O control. */
88 	uint8_t	 iotlb_cache_disable:1;	/* IOTLB cache hints. */
89 	uint8_t	 snoop_disable:1;	/* Snoop disable. */
90 	uint8_t	 allow_ex:1;		/* Allow exclusion. */
91 	uint8_t	 sysmgmt:2;		/* System management message.*/
92 	uint8_t  :1;			/* Reserved[106] */
93 	uint32_t gv_cr3_msb:21;		/* Revision 2, GCR3[51:31] */
94 	uint8_t	 intmap_valid:1;	/* Interrupt map valid. */
95 	uint8_t	 intmap_len:4;		/* Interrupt map table length. */
96 	uint8_t	 intmap_ign:1;		/* Ignore unmapped interrupts. */
97 	uint64_t intmap_base:46;	/* IntMap base. */
98 	uint8_t  :4;			/* Reserved[183:180] */
99 	uint8_t	 init_pass:1;		/* INIT pass through or PT */
100 	uint8_t	 extintr_pass:1;	/* External Interrupt PT */
101 	uint8_t	 nmi_pass:1;		/* NMI PT */
102 	uint8_t  :1;			/* Reserved[187] */
103 	uint8_t	 intr_ctrl:2;		/* Interrupt control */
104 	uint8_t	 lint0_pass:1;		/* LINT0 PT */
105 	uint8_t	 lint1_pass:1;		/* LINT1 PT */
106 	uint64_t :64;			/* Reserved[255:192] */
107 } __attribute__((__packed__));
108 CTASSERT(sizeof(struct amdvi_dte) == 32);
109 
110 /*
111  * IOMMU command entry.
112  */
113 struct amdvi_cmd {
114 	uint32_t 	word0;
115 	uint32_t 	word1:28;
116 	uint8_t		opcode:4;
117 	uint64_t 	addr;
118 } __attribute__((__packed__));
119 
120 /* Command opcodes. */
121 #define AMDVI_CMP_WAIT_OPCODE	0x1	/* Completion wait. */
122 #define AMDVI_INVD_DTE_OPCODE	0x2	/* Invalidate device table entry. */
123 #define AMDVI_INVD_PAGE_OPCODE	0x3	/* Invalidate pages. */
124 #define AMDVI_INVD_IOTLB_OPCODE	0x4	/* Invalidate IOTLB pages. */
125 #define AMDVI_INVD_INTR_OPCODE	0x5	/* Invalidate Interrupt table. */
126 #define AMDVI_PREFETCH_PAGES_OPCODE	0x6	/* Prefetch IOMMU pages. */
127 #define AMDVI_COMP_PPR_OPCODE	0x7	/* Complete PPR request. */
128 #define AMDVI_INV_ALL_OPCODE	0x8	/* Invalidate all. */
129 
130 /* Completion wait attributes. */
131 #define AMDVI_CMP_WAIT_STORE	BIT(0)	/* Write back data. */
132 #define AMDVI_CMP_WAIT_INTR	BIT(1)	/* Completion wait interrupt. */
133 #define AMDVI_CMP_WAIT_FLUSH	BIT(2)	/* Flush queue. */
134 
135 /* Invalidate page. */
136 #define AMDVI_INVD_PAGE_S	BIT(0)	/* Invalidation size. */
137 #define AMDVI_INVD_PAGE_PDE	BIT(1)	/* Invalidate PDE. */
138 #define AMDVI_INVD_PAGE_GN_GVA	BIT(2)	/* GPA or GVA. */
139 
140 #define AMDVI_INVD_PAGE_ALL_ADDR	(0x7FFFFFFFFFFFFULL << 12)
141 
142 /* Invalidate IOTLB. */
143 #define AMDVI_INVD_IOTLB_S	BIT(0)	/* Invalidation size 4k or addr */
144 #define AMDVI_INVD_IOTLB_GN_GVA	BIT(2)	/* GPA or GVA. */
145 
146 #define AMDVI_INVD_IOTLB_ALL_ADDR	(0x7FFFFFFFFFFFFULL << 12)
147 /* XXX: add more command entries. */
148 
149 /*
150  * IOMMU event entry.
151  */
152 struct amdvi_event {
153 	uint16_t 	devid;
154 	uint16_t 	pasid_hi;
155 	uint16_t 	pasid_domid;	/* PASID low or DomainID */
156 	uint16_t 	flag:12;
157 	uint8_t		opcode:4;
158 	uint64_t 	addr;
159 } __attribute__((__packed__));
160 CTASSERT(sizeof(struct amdvi_event) == 16);
161 
162 /* Various event types. */
163 #define AMDVI_EVENT_INVALID_DTE		0x1
164 #define AMDVI_EVENT_PFAULT		0x2
165 #define AMDVI_EVENT_DTE_HW_ERROR	0x3
166 #define AMDVI_EVENT_PAGE_HW_ERROR	0x4
167 #define AMDVI_EVENT_ILLEGAL_CMD		0x5
168 #define AMDVI_EVENT_CMD_HW_ERROR	0x6
169 #define AMDVI_EVENT_IOTLB_TIMEOUT	0x7
170 #define AMDVI_EVENT_INVALID_DTE_REQ	0x8
171 #define AMDVI_EVENT_INVALID_PPR_REQ	0x9
172 #define AMDVI_EVENT_COUNTER_ZERO	0xA
173 
174 #define AMDVI_EVENT_FLAG_MASK           0x1FF	/* Mask for event flags. */
175 #define AMDVI_EVENT_FLAG_TYPE(x)        (((x) >> 9) & 0x3)
176 
177 /*
178  * IOMMU control block.
179  */
180 struct amdvi_ctrl {
181 	struct {
182 		uint16_t size:9;
183 		uint16_t :3;
184 		uint64_t base:40;	/* Devtable register base. */
185 		uint16_t :12;
186 	} dte;
187 	struct {
188 		uint16_t :12;
189 		uint64_t base:40;
190 		uint8_t  :4;
191 		uint8_t	 len:4;
192 		uint8_t  :4;
193 	} cmd;
194 	struct {
195 		uint16_t :12;
196 		uint64_t base:40;
197 		uint8_t  :4;
198 		uint8_t	 len:4;
199 		uint8_t  :4;
200 	} event;
201 	uint16_t control :13;
202 	uint64_t	 :51;
203 	struct {
204 		uint8_t	 enable:1;
205 		uint8_t	 allow:1;
206 		uint16_t :10;
207 		uint64_t base:40;
208 		uint16_t :12;
209 		uint16_t :12;
210 		uint64_t limit:40;
211 		uint16_t :12;
212 	} excl;
213 	/*
214 	 * Revision 2 only.
215 	 */
216 	uint64_t ex_feature;
217 	struct {
218 		uint16_t :12;
219 		uint64_t base:40;
220 		uint8_t  :4;
221 		uint8_t	 len:4;
222 		uint8_t  :4;
223 	} ppr;
224 	uint64_t first_event;
225 	uint64_t second_event;
226 	uint64_t event_status;
227 	/* Revision 2 only, end. */
228 	uint8_t	 pad1[0x1FA8];		/* Padding. */
229 	uint32_t cmd_head:19;
230 	uint64_t :45;
231 	uint32_t cmd_tail:19;
232 	uint64_t :45;
233 	uint32_t evt_head:19;
234 	uint64_t :45;
235 	uint32_t evt_tail:19;
236 	uint64_t :45;
237 	uint32_t status:19;
238 	uint64_t :45;
239 	uint64_t pad2;
240 	uint8_t  :4;
241 	uint16_t ppr_head:15;
242 	uint64_t :45;
243 	uint8_t  :4;
244 	uint16_t ppr_tail:15;
245 	uint64_t :45;
246 	uint8_t	 pad3[0x1FC0];		/* Padding. */
247 
248 	/* XXX: More for rev2. */
249 } __attribute__((__packed__));
250 CTASSERT(offsetof(struct amdvi_ctrl, pad1)== 0x58);
251 CTASSERT(offsetof(struct amdvi_ctrl, pad2)== 0x2028);
252 CTASSERT(offsetof(struct amdvi_ctrl, pad3)== 0x2040);
253 
254 #define AMDVI_MMIO_V1_SIZE	(4 * PAGE_SIZE)	/* v1 size */
255 /*
256  * AMF IOMMU v2 size including event counters
257  */
258 #define AMDVI_MMIO_V2_SIZE	(8 * PAGE_SIZE)
259 
260 CTASSERT(sizeof(struct amdvi_ctrl) == 0x4000);
261 CTASSERT(sizeof(struct amdvi_ctrl) == AMDVI_MMIO_V1_SIZE);
262 
263 /* IVHD flag */
264 #define IVHD_FLAG_HTT		BIT(0)	/* Hypertransport Tunnel. */
265 #define IVHD_FLAG_PPW		BIT(1)	/* Pass posted write. */
266 #define IVHD_FLAG_RPPW		BIT(2)	/* Response pass posted write. */
267 #define IVHD_FLAG_ISOC		BIT(3)	/* Isoc support. */
268 #define IVHD_FLAG_IOTLB		BIT(4)	/* IOTLB support. */
269 #define IVHD_FLAG_COH		BIT(5)	/* Coherent control, default 1 */
270 #define IVHD_FLAG_PFS		BIT(6)	/* Prefetch IOMMU pages. */
271 #define IVHD_FLAG_PPRS		BIT(7)	/* Peripheral page support. */
272 
273 /* IVHD device entry data setting. */
274 #define IVHD_DEV_LINT0_PASS	BIT(6)	/* LINT0 interrupts. */
275 #define IVHD_DEV_LINT1_PASS	BIT(7)	/* LINT1 interrupts. */
276 
277 /* Bit[5:4] for System Mgmt. Bit3 is reserved. */
278 #define IVHD_DEV_INIT_PASS	BIT(0)	/* INIT */
279 #define IVHD_DEV_EXTINTR_PASS	BIT(1)	/* ExtInt */
280 #define IVHD_DEV_NMI_PASS	BIT(2)	/* NMI */
281 
282 /* IVHD 8-byte extended data settings. */
283 #define IVHD_DEV_EXT_ATS_DISABLE	BIT(31)	/* Disable ATS */
284 
285 /* IOMMU control register. */
286 #define AMDVI_CTRL_EN		BIT(0)	/* IOMMU enable. */
287 #define AMDVI_CTRL_HTT		BIT(1)	/* Hypertransport tunnel enable. */
288 #define AMDVI_CTRL_ELOG		BIT(2)	/* Event log enable. */
289 #define AMDVI_CTRL_ELOGINT	BIT(3)	/* Event log interrupt. */
290 #define AMDVI_CTRL_COMINT	BIT(4)	/* Completion wait interrupt. */
291 #define AMDVI_CTRL_PPW		BIT(8)
292 #define AMDVI_CTRL_RPPW		BIT(9)
293 #define AMDVI_CTRL_COH		BIT(10)
294 #define AMDVI_CTRL_ISOC		BIT(11)
295 #define AMDVI_CTRL_CMD		BIT(12)	/* Command buffer enable. */
296 #define AMDVI_CTRL_PPRLOG	BIT(13)
297 #define AMDVI_CTRL_PPRINT	BIT(14)
298 #define AMDVI_CTRL_PPREN	BIT(15)
299 #define AMDVI_CTRL_GTE		BIT(16)	/* Guest translation enable. */
300 #define AMDVI_CTRL_GAE		BIT(17)	/* Guest APIC enable. */
301 
302 /* Invalidation timeout. */
303 #define AMDVI_CTRL_INV_NO_TO	0	/* No timeout. */
304 #define AMDVI_CTRL_INV_TO_1ms	1	/* 1 ms */
305 #define AMDVI_CTRL_INV_TO_10ms	2	/* 10 ms */
306 #define AMDVI_CTRL_INV_TO_100ms	3	/* 100 ms */
307 #define AMDVI_CTRL_INV_TO_1S	4	/* 1 second */
308 #define AMDVI_CTRL_INV_TO_10S	5	/* 10 second */
309 #define AMDVI_CTRL_INV_TO_100S	6	/* 100 second */
310 
311 /*
312  * Max number of PCI devices.
313  * 256 bus x 32 slot/devices x 8 functions.
314  */
315 #define PCI_NUM_DEV_MAX		0x10000
316 
317 /* Maximum number of domains supported by IOMMU. */
318 #define AMDVI_MAX_DOMAIN	(BIT(16) - 1)
319 
320 /*
321  * IOMMU Page Table attributes.
322  */
323 #define AMDVI_PT_PRESENT	BIT(0)
324 #define AMDVI_PT_COHERENT	BIT(60)
325 #define AMDVI_PT_READ		BIT(61)
326 #define AMDVI_PT_WRITE		BIT(62)
327 
328 #define AMDVI_PT_RW		(AMDVI_PT_READ | AMDVI_PT_WRITE)
329 #define AMDVI_PT_MASK		0xFFFFFFFFFF000UL /* Only [51:12] for PA */
330 
331 #define AMDVI_PD_LEVEL_SHIFT	9
332 #define AMDVI_PD_SUPER(x)	(((x) >> AMDVI_PD_LEVEL_SHIFT) == 7)
333 /*
334  * IOMMU Status, offset 0x2020
335  */
336 #define AMDVI_STATUS_EV_OF		BIT(0)	/* Event overflow. */
337 #define AMDVI_STATUS_EV_INTR		BIT(1)	/* Event interrupt. */
338 /* Completion wait command completed. */
339 #define AMDVI_STATUS_CMP		BIT(2)
340 
341 #define	IVRS_CTRL_RID			1	/* MMIO RID */
342 
343 /* ACPI IVHD */
344 struct ivhd_dev_cfg {
345 	uint32_t start_id;
346 	uint32_t end_id;
347 	uint8_t	 data;			/* Device configuration. */
348 	bool	 enable_ats;		/* ATS enabled for the device. */
349 	int	 ats_qlen;		/* ATS invalidation queue depth. */
350 };
351 
352 struct amdvi_domain {
353 	uint64_t *ptp;			/* Highest level page table */
354 	int	ptp_level;		/* Level of page tables */
355 	u_int	id;			/* Domain id */
356 	SLIST_ENTRY (amdvi_domain) next;
357 };
358 
359 /*
360  * Different type of IVHD.
361  * XXX: Use AcpiIvrsType once new IVHD types are available.
362 */
363 enum IvrsType
364 {
365 	IVRS_TYPE_HARDWARE_LEGACY = ACPI_IVRS_TYPE_HARDWARE1,
366 					/* Legacy without EFRi support. */
367 	IVRS_TYPE_HARDWARE_EFR	  = ACPI_IVRS_TYPE_HARDWARE2,
368 						/* With EFR support. */
369 	IVRS_TYPE_HARDWARE_MIXED  = 0x40, /* Mixed with EFR support. */
370 };
371 
372 /*
373  * AMD IOMMU softc.
374  */
375 struct amdvi_softc {
376 	struct amdvi_ctrl *ctrl;	/* Control area. */
377 	device_t 	dev;		/* IOMMU device. */
378 	enum IvrsType   ivhd_type;	/* IOMMU IVHD type. */
379 	bool		iotlb;		/* IOTLB supported by IOMMU */
380 	struct amdvi_cmd *cmd;		/* Command descriptor area. */
381 	int 		cmd_max;	/* Max number of commands. */
382 	uint64_t	cmp_data;	/* Command completion write back. */
383 	struct amdvi_event *event;	/* Event descriptor area. */
384 	struct resource *event_res;	/* Event interrupt resource. */
385 	void   		*event_tag;	/* Event interrupt tag. */
386 	int		event_max;	/* Max number of events. */
387 	int		event_irq;
388 	int		event_rid;
389 	/* ACPI various flags. */
390 	uint32_t 	ivhd_flag;	/* ACPI IVHD flag. */
391 	uint32_t 	ivhd_feature;	/* ACPI v1 Reserved or v2 attribute. */
392 	uint64_t 	ext_feature;	/* IVHD EFR */
393 	/* PCI related. */
394 	uint16_t 	cap_off;	/* PCI Capability offset. */
395 	uint8_t		pci_cap;	/* PCI capability. */
396 	uint16_t 	pci_seg;	/* IOMMU PCI domain/segment. */
397 	uint16_t 	pci_rid;	/* PCI BDF of IOMMU */
398 	/* Device range under this IOMMU. */
399 	uint16_t 	start_dev_rid;	/* First device under this IOMMU. */
400 	uint16_t 	end_dev_rid;	/* Last device under this IOMMU. */
401 
402 	/* BIOS provided device configuration for end points. */
403 	struct 		ivhd_dev_cfg dev_cfg[10];
404 	int		dev_cfg_cnt;
405 
406 	/* Software statistics. */
407 	uint64_t 	event_intr_cnt;	/* Total event INTR count. */
408 	uint64_t 	total_cmd;	/* Total number of commands. */
409 };
410 
411 int	amdvi_setup_hw(struct amdvi_softc *softc);
412 int	amdvi_teardown_hw(struct amdvi_softc *softc);
413 #endif /* _AMDVI_PRIV_H_ */
414