xref: /freebsd/sys/arm64/iommu/smmuvar.h (revision 3750ccefb8629a08890bfbae894dd6bc6a7483b4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019-2020 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #ifndef	_ARM64_IOMMU_SMMUVAR_H_
34 #define	_ARM64_IOMMU_SMMUVAR_H_
35 
36 #include <arm64/iommu/iommu_pmap.h>
37 
38 #define	SMMU_DEVSTR		"ARM System Memory Management Unit"
39 #define	SMMU_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
40 #define	SMMU_UNLOCK(_sc)	mtx_unlock(&(_sc)->sc_mtx)
41 
42 DECLARE_CLASS(smmu_driver);
43 
44 struct smmu_unit {
45 	struct iommu_unit		iommu;
46 	LIST_HEAD(, smmu_domain)	domain_list;
47 	LIST_ENTRY(smmu_unit)		next;
48 	device_t			dev;
49 	intptr_t			xref;
50 };
51 
52 struct smmu_domain {
53 	struct iommu_domain		iodom;
54 	LIST_HEAD(, smmu_ctx)		ctx_list;
55 	LIST_ENTRY(smmu_domain)	next;
56 	u_int entries_cnt;
57 	struct smmu_cd			*cd;
58 	struct smmu_pmap		p;
59 	uint16_t			asid;
60 };
61 
62 struct smmu_ctx {
63 	struct iommu_ctx		ioctx;
64 	struct smmu_domain		*domain;
65 	LIST_ENTRY(smmu_ctx)		next;
66 	device_t			dev;
67 	bool				bypass;
68 	int				sid;
69 	uint16_t			vendor;
70 	uint16_t			device;
71 };
72 
73 struct smmu_queue_local_copy {
74 	union {
75 		uint64_t val;
76 		struct {
77 			uint32_t prod;
78 			uint32_t cons;
79 		};
80 	};
81 };
82 
83 struct smmu_cd {
84 	vm_paddr_t paddr;
85 	void *vaddr;
86 };
87 
88 struct smmu_queue {
89 	struct smmu_queue_local_copy lc;
90 	vm_paddr_t paddr;
91 	void *vaddr;
92 	uint32_t prod_off;
93 	uint32_t cons_off;
94 	int size_log2;
95 	uint64_t base;
96 };
97 
98 struct smmu_cmdq_entry {
99 	uint8_t opcode;
100 	union {
101 		struct {
102 			uint16_t asid;
103 			uint16_t vmid;
104 			vm_offset_t addr;
105 			bool leaf;
106 		} tlbi;
107 		struct {
108 			uint32_t sid;
109 			uint32_t ssid;
110 			bool leaf;
111 		} cfgi;
112 		struct {
113 			uint32_t sid;
114 		} prefetch;
115 		struct {
116 			uint64_t msiaddr;
117 		} sync;
118 	};
119 };
120 
121 struct l1_desc {
122 	uint8_t		span;
123 	void		*va;
124 	vm_paddr_t	pa;
125 };
126 
127 struct smmu_strtab {
128 	void		*vaddr;
129 	uint64_t	base;
130 	uint32_t	base_cfg;
131 	uint32_t	num_l1_entries;
132 	struct l1_desc	*l1;
133 };
134 
135 struct smmu_softc {
136 	device_t		dev;
137 	struct resource		*res[5];
138 	void			*intr_cookie[3];
139 	uint32_t		ias; /* Intermediate Physical Address */
140 	uint32_t		oas; /* Physical Address */
141 	uint32_t		asid_bits;
142 	uint32_t		vmid_bits;
143 	uint32_t		sid_bits;
144 	uint32_t		ssid_bits;
145 	uint32_t		pgsizes;
146 	uint32_t		features;
147 #define	SMMU_FEATURE_2_LVL_STREAM_TABLE		(1 << 0)
148 #define	SMMU_FEATURE_2_LVL_CD			(1 << 1)
149 #define	SMMU_FEATURE_TT_LE			(1 << 2)
150 #define	SMMU_FEATURE_TT_BE			(1 << 3)
151 #define	SMMU_FEATURE_SEV			(1 << 4)
152 #define	SMMU_FEATURE_MSI			(1 << 5)
153 #define	SMMU_FEATURE_HYP			(1 << 6)
154 #define	SMMU_FEATURE_ATS			(1 << 7)
155 #define	SMMU_FEATURE_PRI			(1 << 8)
156 #define	SMMU_FEATURE_STALL_FORCE		(1 << 9)
157 #define	SMMU_FEATURE_STALL			(1 << 10)
158 #define	SMMU_FEATURE_S1P			(1 << 11)
159 #define	SMMU_FEATURE_S2P			(1 << 12)
160 #define	SMMU_FEATURE_VAX			(1 << 13)
161 #define	SMMU_FEATURE_COHERENCY			(1 << 14)
162 #define	SMMU_FEATURE_RANGE_INV			(1 << 15)
163 	struct smmu_queue cmdq;
164 	struct smmu_queue evtq;
165 	struct smmu_queue priq;
166 	struct smmu_strtab strtab;
167 	int				sync;
168 	struct mtx			sc_mtx;
169 	bitstr_t			*asid_set;
170 	int				asid_set_size;
171 	struct mtx			asid_set_mutex;
172 	struct smmu_unit		unit;
173 	uintptr_t			xref;
174 };
175 
176 MALLOC_DECLARE(M_SMMU);
177 
178 /* Device methods */
179 int smmu_attach(device_t dev);
180 int smmu_detach(device_t dev);
181 
182 struct smmu_ctx *smmu_ctx_lookup_by_sid(device_t dev, u_int sid);
183 bool smmu_quirks_check(device_t dev, u_int sid, uint8_t event_id,
184     uintptr_t input_addr);
185 
186 #endif /* _ARM64_IOMMU_SMMUVAR_H_ */
187