xref: /freebsd/sys/arm64/iommu/smmuvar.h (revision 4d846d260e2b9a3d4d0a701462568268cbfe7a5b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019-2020 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 #ifndef	_ARM64_IOMMU_SMMUVAR_H_
36 #define	_ARM64_IOMMU_SMMUVAR_H_
37 
38 #include <arm64/iommu/iommu_pmap.h>
39 
40 #define	SMMU_DEVSTR		"ARM System Memory Management Unit"
41 #define	SMMU_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
42 #define	SMMU_UNLOCK(_sc)	mtx_unlock(&(_sc)->sc_mtx)
43 
44 DECLARE_CLASS(smmu_driver);
45 
46 struct smmu_unit {
47 	struct iommu_unit		iommu;
48 	LIST_HEAD(, smmu_domain)	domain_list;
49 	LIST_ENTRY(smmu_unit)		next;
50 	device_t			dev;
51 	intptr_t			xref;
52 };
53 
54 struct smmu_domain {
55 	struct iommu_domain		iodom;
56 	LIST_HEAD(, smmu_ctx)		ctx_list;
57 	LIST_ENTRY(smmu_domain)	next;
58 	u_int entries_cnt;
59 	struct smmu_cd			*cd;
60 	struct smmu_pmap		p;
61 	uint16_t			asid;
62 };
63 
64 struct smmu_ctx {
65 	struct iommu_ctx		ioctx;
66 	struct smmu_domain		*domain;
67 	LIST_ENTRY(smmu_ctx)		next;
68 	device_t			dev;
69 	bool				bypass;
70 	int				sid;
71 	uint16_t			vendor;
72 	uint16_t			device;
73 };
74 
75 struct smmu_queue_local_copy {
76 	union {
77 		uint64_t val;
78 		struct {
79 			uint32_t prod;
80 			uint32_t cons;
81 		};
82 	};
83 };
84 
85 struct smmu_cd {
86 	vm_paddr_t paddr;
87 	vm_size_t size;
88 	void *vaddr;
89 };
90 
91 struct smmu_queue {
92 	struct smmu_queue_local_copy lc;
93 	vm_paddr_t paddr;
94 	void *vaddr;
95 	uint32_t prod_off;
96 	uint32_t cons_off;
97 	int size_log2;
98 	uint64_t base;
99 };
100 
101 struct smmu_cmdq_entry {
102 	uint8_t opcode;
103 	union {
104 		struct {
105 			uint16_t asid;
106 			uint16_t vmid;
107 			vm_offset_t addr;
108 			bool leaf;
109 		} tlbi;
110 		struct {
111 			uint32_t sid;
112 			uint32_t ssid;
113 			bool leaf;
114 		} cfgi;
115 		struct {
116 			uint32_t sid;
117 		} prefetch;
118 		struct {
119 			uint64_t msiaddr;
120 		} sync;
121 	};
122 };
123 
124 struct l1_desc {
125 	uint8_t		span;
126 	size_t		size;
127 	void		*va;
128 	vm_paddr_t	pa;
129 };
130 
131 struct smmu_strtab {
132 	void		*vaddr;
133 	uint64_t	base;
134 	uint32_t	base_cfg;
135 	uint32_t	num_l1_entries;
136 	struct l1_desc	*l1;
137 };
138 
139 struct smmu_softc {
140 	device_t		dev;
141 	struct resource		*res[5];
142 	void			*intr_cookie[3];
143 	uint32_t		ias; /* Intermediate Physical Address */
144 	uint32_t		oas; /* Physical Address */
145 	uint32_t		asid_bits;
146 	uint32_t		vmid_bits;
147 	uint32_t		sid_bits;
148 	uint32_t		ssid_bits;
149 	uint32_t		pgsizes;
150 	uint32_t		features;
151 #define	SMMU_FEATURE_2_LVL_STREAM_TABLE		(1 << 0)
152 #define	SMMU_FEATURE_2_LVL_CD			(1 << 1)
153 #define	SMMU_FEATURE_TT_LE			(1 << 2)
154 #define	SMMU_FEATURE_TT_BE			(1 << 3)
155 #define	SMMU_FEATURE_SEV			(1 << 4)
156 #define	SMMU_FEATURE_MSI			(1 << 5)
157 #define	SMMU_FEATURE_HYP			(1 << 6)
158 #define	SMMU_FEATURE_ATS			(1 << 7)
159 #define	SMMU_FEATURE_PRI			(1 << 8)
160 #define	SMMU_FEATURE_STALL_FORCE		(1 << 9)
161 #define	SMMU_FEATURE_STALL			(1 << 10)
162 #define	SMMU_FEATURE_S1P			(1 << 11)
163 #define	SMMU_FEATURE_S2P			(1 << 12)
164 #define	SMMU_FEATURE_VAX			(1 << 13)
165 #define	SMMU_FEATURE_COHERENCY			(1 << 14)
166 #define	SMMU_FEATURE_RANGE_INV			(1 << 15)
167 	struct smmu_queue cmdq;
168 	struct smmu_queue evtq;
169 	struct smmu_queue priq;
170 	struct smmu_strtab strtab;
171 	int				sync;
172 	struct mtx			sc_mtx;
173 	bitstr_t			*asid_set;
174 	int				asid_set_size;
175 	struct mtx			asid_set_mutex;
176 	struct smmu_unit		unit;
177 	uintptr_t			xref;
178 };
179 
180 MALLOC_DECLARE(M_SMMU);
181 
182 /* Device methods */
183 int smmu_attach(device_t dev);
184 int smmu_detach(device_t dev);
185 
186 struct smmu_ctx *smmu_ctx_lookup_by_sid(device_t dev, u_int sid);
187 bool smmu_quirks_check(device_t dev, u_int sid, uint8_t event_id,
188     uintptr_t input_addr);
189 
190 #endif /* _ARM64_IOMMU_SMMUVAR_H_ */
191