xref: /titanic_44/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c (revision cd21e7c548ae2a3b5e522244bf798f2a6b4ba02d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2012 Garrett D'Amore <garrett@damore.org>.  All rights reserved.
25  */
26 
27 #include <sys/sunddi.h>
28 #include <sys/sunndi.h>
29 #include <sys/iommulib.h>
30 #include <sys/amd_iommu.h>
31 #include <sys/pci_cap.h>
32 #include <sys/bootconf.h>
33 #include <sys/ddidmareq.h>
34 
35 #include "amd_iommu_impl.h"
36 #include "amd_iommu_acpi.h"
37 #include "amd_iommu_page_tables.h"
38 
39 static int amd_iommu_fini(amd_iommu_t *iommu, int type);
40 static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
41 static void amd_iommu_stop(amd_iommu_t *iommu);
42 
43 static int amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip);
44 static int amd_iommu_allochdl(iommulib_handle_t handle,
45     dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
46     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
47 static int amd_iommu_freehdl(iommulib_handle_t handle,
48     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
49 static int amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
50     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
51     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
52     uint_t *ccountp);
53 static int amd_iommu_unbindhdl(iommulib_handle_t handle,
54     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
55 static int amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
56     dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
57     size_t len, uint_t cache_flags);
58 static int amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
59     dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
60     off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
61     uint_t *ccountp);
62 static int amd_iommu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
63     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
64     struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao);
65 static int amd_iommu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
66     dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao);
67 
68 static int unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
69     ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked);
70 
71 extern void *device_arena_alloc(size_t size, int vm_flag);
72 extern void device_arena_free(void * vaddr, size_t size);
73 
74 ddi_dma_attr_t amd_iommu_dma_attr = {
75 	DMA_ATTR_V0,
76 	0U,				/* dma_attr_addr_lo */
77 	0xffffffffffffffffULL,		/* dma_attr_addr_hi */
78 	0xffffffffU,			/* dma_attr_count_max */
79 	(uint64_t)4096,			/* dma_attr_align */
80 	1,				/* dma_attr_burstsizes */
81 	64,				/* dma_attr_minxfer */
82 	0xffffffffU,			/* dma_attr_maxxfer */
83 	0xffffffffU,			/* dma_attr_seg */
84 	1,				/* dma_attr_sgllen, variable */
85 	64,				/* dma_attr_granular */
86 	0				/* dma_attr_flags */
87 };
88 
89 ddi_device_acc_attr_t amd_iommu_devacc = {
90 	DDI_DEVICE_ATTR_V0,
91 	DDI_NEVERSWAP_ACC,
92 	DDI_STRICTORDER_ACC
93 };
94 
95 struct iommulib_ops amd_iommulib_ops = {
96 	IOMMU_OPS_VERSION,
97 	AMD_IOMMU,
98 	"AMD IOMMU Vers. 1",
99 	NULL,
100 	amd_iommu_probe,
101 	amd_iommu_allochdl,
102 	amd_iommu_freehdl,
103 	amd_iommu_bindhdl,
104 	amd_iommu_unbindhdl,
105 	amd_iommu_sync,
106 	amd_iommu_win,
107 	amd_iommu_mapobject,
108 	amd_iommu_unmapobject,
109 };
110 
111 static kmutex_t amd_iommu_pgtable_lock;
112 
113 static int
amd_iommu_register(amd_iommu_t * iommu)114 amd_iommu_register(amd_iommu_t *iommu)
115 {
116 	dev_info_t *dip = iommu->aiomt_dip;
117 	const char *driver = ddi_driver_name(dip);
118 	int instance = ddi_get_instance(dip);
119 	iommulib_ops_t *iommulib_ops;
120 	iommulib_handle_t handle;
121 	const char *f = "amd_iommu_register";
122 
123 	iommulib_ops = kmem_zalloc(sizeof (iommulib_ops_t), KM_SLEEP);
124 
125 	*iommulib_ops = amd_iommulib_ops;
126 
127 	iommulib_ops->ilops_data = (void *)iommu;
128 	iommu->aiomt_iommulib_ops = iommulib_ops;
129 
130 	if (iommulib_iommu_register(dip, iommulib_ops, &handle)
131 	    != DDI_SUCCESS) {
132 		cmn_err(CE_WARN, "%s: %s%d: Register with iommulib "
133 		    "failed idx=%d", f, driver, instance, iommu->aiomt_idx);
134 		kmem_free(iommulib_ops, sizeof (iommulib_ops_t));
135 		return (DDI_FAILURE);
136 	}
137 
138 	iommu->aiomt_iommulib_handle = handle;
139 
140 	return (DDI_SUCCESS);
141 }
142 
143 static int
amd_iommu_unregister(amd_iommu_t * iommu)144 amd_iommu_unregister(amd_iommu_t *iommu)
145 {
146 	if (iommu->aiomt_iommulib_handle == NULL) {
147 		/* we never registered */
148 		return (DDI_SUCCESS);
149 	}
150 
151 	if (iommulib_iommu_unregister(iommu->aiomt_iommulib_handle)
152 	    != DDI_SUCCESS) {
153 		return (DDI_FAILURE);
154 	}
155 
156 	kmem_free(iommu->aiomt_iommulib_ops, sizeof (iommulib_ops_t));
157 	iommu->aiomt_iommulib_ops = NULL;
158 	iommu->aiomt_iommulib_handle = NULL;
159 
160 	return (DDI_SUCCESS);
161 }
162 
163 static int
amd_iommu_setup_passthru(amd_iommu_t * iommu)164 amd_iommu_setup_passthru(amd_iommu_t *iommu)
165 {
166 	gfx_entry_t *gfxp;
167 	dev_info_t *dip;
168 
169 	/*
170 	 * Setup passthru mapping for "special" devices
171 	 */
172 	amd_iommu_set_passthru(iommu, NULL);
173 
174 	for (gfxp = gfx_devinfo_list; gfxp; gfxp = gfxp->g_next) {
175 		gfxp->g_ref++;
176 		dip = gfxp->g_dip;
177 		if (dip) {
178 			amd_iommu_set_passthru(iommu, dip);
179 		}
180 		gfxp->g_ref--;
181 	}
182 
183 	return (DDI_SUCCESS);
184 }
185 
186 static int
amd_iommu_start(amd_iommu_t * iommu)187 amd_iommu_start(amd_iommu_t *iommu)
188 {
189 	dev_info_t *dip = iommu->aiomt_dip;
190 	int instance = ddi_get_instance(dip);
191 	const char *driver = ddi_driver_name(dip);
192 	amd_iommu_acpi_ivhd_t *hinfop;
193 	const char *f = "amd_iommu_start";
194 
195 	hinfop = amd_iommu_lookup_all_ivhd();
196 
197 	/*
198 	 * Disable HT tunnel translation.
199 	 * XXX use ACPI
200 	 */
201 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
202 	    AMD_IOMMU_HT_TUN_ENABLE, 0);
203 
204 	if (hinfop) {
205 		if (amd_iommu_debug) {
206 			cmn_err(CE_NOTE,
207 			    "amd_iommu: using ACPI for CTRL registers");
208 		}
209 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
210 		    AMD_IOMMU_ISOC, hinfop->ach_Isoc);
211 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
212 		    AMD_IOMMU_RESPASSPW, hinfop->ach_ResPassPW);
213 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
214 		    AMD_IOMMU_PASSPW, hinfop->ach_PassPW);
215 	}
216 
217 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
218 	    AMD_IOMMU_INVTO, 5);
219 
220 
221 	/*
222 	 * The Device table entry bit 0 (V) controls whether the device
223 	 * table entry is valid for address translation and Device table
224 	 * entry bit 128 (IV) controls whether interrupt remapping is valid.
225 	 * By setting both to zero we are essentially doing pass-thru. Since
226 	 * this table is zeroed on allocation, essentially we will have
227 	 * pass-thru when IOMMU is enabled.
228 	 */
229 
230 	/* Finally enable the IOMMU ... */
231 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
232 	    AMD_IOMMU_ENABLE, 1);
233 
234 	if (amd_iommu_debug) {
235 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
236 		    "Successfully started AMD IOMMU", f, driver, instance,
237 		    iommu->aiomt_idx);
238 	}
239 	cmn_err(CE_NOTE, "AMD IOMMU (%d,%d) enabled",
240 	    instance, iommu->aiomt_idx);
241 
242 	return (DDI_SUCCESS);
243 }
244 
245 static void
amd_iommu_stop(amd_iommu_t * iommu)246 amd_iommu_stop(amd_iommu_t *iommu)
247 {
248 	dev_info_t *dip = iommu->aiomt_dip;
249 	int instance = ddi_get_instance(dip);
250 	const char *driver = ddi_driver_name(dip);
251 	const char *f = "amd_iommu_stop";
252 
253 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
254 	    AMD_IOMMU_ENABLE, 0);
255 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
256 	    AMD_IOMMU_EVENTINT_ENABLE, 0);
257 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
258 	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
259 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
260 	    AMD_IOMMU_EVENTLOG_ENABLE, 0);
261 
262 	/*
263 	 * Disable translation on HT tunnel traffic
264 	 */
265 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
266 	    AMD_IOMMU_HT_TUN_ENABLE, 0);
267 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
268 	    AMD_IOMMU_CMDBUF_ENABLE, 0);
269 
270 	cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMYU idx=%d. "
271 	    "Successfully stopped AMD IOMMU", f, driver, instance,
272 	    iommu->aiomt_idx);
273 }
274 
275 static int
amd_iommu_setup_tables_and_buffers(amd_iommu_t * iommu)276 amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
277 {
278 	dev_info_t *dip = iommu->aiomt_dip;
279 	int instance = ddi_get_instance(dip);
280 	const char *driver = ddi_driver_name(dip);
281 	uint32_t dma_bufsz;
282 	caddr_t addr;
283 	uint32_t sz;
284 	uint32_t p2sz;
285 	int i;
286 	uint64_t *dentry;
287 	int err;
288 	const char *f = "amd_iommu_setup_tables_and_buffers";
289 
290 	/*
291 	 * We will put the Device Table, Command Buffer and
292 	 * Event Log in contiguous memory. Allocate the maximum
293 	 * size allowed for such structures
294 	 * Device Table:  256b * 64K = 32B * 64K
295 	 * Command Buffer: 128b * 32K = 16B * 32K
296 	 * Event Log:  128b * 32K = 16B * 32K
297 	 */
298 	iommu->aiomt_devtbl_sz = (1<<AMD_IOMMU_DEVTBL_SZ) * AMD_IOMMU_DEVENT_SZ;
299 	iommu->aiomt_cmdbuf_sz = (1<<AMD_IOMMU_CMDBUF_SZ) * AMD_IOMMU_CMD_SZ;
300 	iommu->aiomt_eventlog_sz =
301 	    (1<<AMD_IOMMU_EVENTLOG_SZ) * AMD_IOMMU_EVENT_SZ;
302 
303 	dma_bufsz = iommu->aiomt_devtbl_sz + iommu->aiomt_cmdbuf_sz
304 	    + iommu->aiomt_eventlog_sz;
305 
306 	/*
307 	 * Alloc a DMA handle.
308 	 */
309 	err = ddi_dma_alloc_handle(dip, &amd_iommu_dma_attr,
310 	    DDI_DMA_SLEEP, NULL, &iommu->aiomt_dmahdl);
311 	if (err != DDI_SUCCESS) {
312 		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc DMA handle for "
313 		    "AMD IOMMU tables and buffers", f, driver, instance);
314 		return (DDI_FAILURE);
315 	}
316 
317 	/*
318 	 * Alloc memory for tables and buffers
319 	 * XXX remove cast to size_t
320 	 */
321 	err = ddi_dma_mem_alloc(iommu->aiomt_dmahdl, dma_bufsz,
322 	    &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
323 	    DDI_DMA_SLEEP,  NULL, (caddr_t *)&iommu->aiomt_dma_bufva,
324 	    (size_t *)&iommu->aiomt_dma_mem_realsz, &iommu->aiomt_dma_mem_hdl);
325 	if (err != DDI_SUCCESS) {
326 		cmn_err(CE_WARN, "%s: %s%d: Cannot alloc memory for DMA "
327 		    "to AMD IOMMU tables and buffers", f, driver, instance);
328 		iommu->aiomt_dma_bufva = NULL;
329 		iommu->aiomt_dma_mem_realsz = 0;
330 		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
331 		iommu->aiomt_dmahdl = NULL;
332 		return (DDI_FAILURE);
333 	}
334 
335 	/*
336 	 * The VA must be 4K aligned and >= table size
337 	 */
338 	ASSERT(((uintptr_t)iommu->aiomt_dma_bufva &
339 	    AMD_IOMMU_TABLE_ALIGN) == 0);
340 	ASSERT(iommu->aiomt_dma_mem_realsz >= dma_bufsz);
341 
342 	/*
343 	 * Now bind the handle
344 	 */
345 	err = ddi_dma_addr_bind_handle(iommu->aiomt_dmahdl, NULL,
346 	    iommu->aiomt_dma_bufva, iommu->aiomt_dma_mem_realsz,
347 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
348 	    NULL, &iommu->aiomt_buf_dma_cookie, &iommu->aiomt_buf_dma_ncookie);
349 	if (err != DDI_DMA_MAPPED) {
350 		cmn_err(CE_WARN, "%s: %s%d: Cannot bind memory for DMA "
351 		    "to AMD IOMMU tables and buffers. bufrealsz=%p",
352 		    f, driver, instance,
353 		    (void *)(uintptr_t)iommu->aiomt_dma_mem_realsz);
354 		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
355 		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
356 		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
357 		iommu->aiomt_buf_dma_ncookie = 0;
358 		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
359 		iommu->aiomt_dma_mem_hdl = NULL;
360 		iommu->aiomt_dma_bufva = NULL;
361 		iommu->aiomt_dma_mem_realsz = 0;
362 		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
363 		iommu->aiomt_dmahdl = NULL;
364 		return (DDI_FAILURE);
365 	}
366 
367 	/*
368 	 * We assume the DMA engine on the IOMMU is capable of handling the
369 	 * whole table buffer in a single cookie. If not and multiple cookies
370 	 * are needed we fail.
371 	 */
372 	if (iommu->aiomt_buf_dma_ncookie != 1) {
373 		cmn_err(CE_WARN, "%s: %s%d: Cannot handle multiple "
374 		    "cookies for DMA to AMD IOMMU tables and buffers. "
375 		    "#cookies=%u", f, driver, instance,
376 		    iommu->aiomt_buf_dma_ncookie);
377 		(void) ddi_dma_unbind_handle(iommu->aiomt_dmahdl);
378 		iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
379 		iommu->aiomt_buf_dma_cookie.dmac_size = 0;
380 		iommu->aiomt_buf_dma_cookie.dmac_type = 0;
381 		iommu->aiomt_buf_dma_ncookie = 0;
382 		ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
383 		iommu->aiomt_dma_mem_hdl = NULL;
384 		iommu->aiomt_dma_bufva = NULL;
385 		iommu->aiomt_dma_mem_realsz = 0;
386 		ddi_dma_free_handle(&iommu->aiomt_dmahdl);
387 		iommu->aiomt_dmahdl = NULL;
388 		return (DDI_FAILURE);
389 	}
390 
391 	/*
392 	 * The address in the cookie must be 4K aligned and >= table size
393 	 */
394 	ASSERT((iommu->aiomt_buf_dma_cookie.dmac_cookie_addr
395 	    & AMD_IOMMU_TABLE_ALIGN) == 0);
396 	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size
397 	    <= iommu->aiomt_dma_mem_realsz);
398 	ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size >= dma_bufsz);
399 
400 	/*
401 	 * Setup the device table pointers in the iommu struct as
402 	 * well as the IOMMU device table register
403 	 */
404 	iommu->aiomt_devtbl = iommu->aiomt_dma_bufva;
405 	bzero(iommu->aiomt_devtbl, iommu->aiomt_devtbl_sz);
406 
407 	/*
408 	 * Set V=1 and TV = 0, so any inadvertant pass-thrus cause
409 	 * page faults. Also set SE bit so we aren't swamped with
410 	 * page fault messages
411 	 */
412 	for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
413 		/*LINTED*/
414 		dentry = (uint64_t *)&iommu->aiomt_devtbl
415 		    [i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
416 		AMD_IOMMU_REG_SET64(dentry, AMD_IOMMU_DEVTBL_V, 1);
417 		AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SE, 1);
418 	}
419 
420 	addr = (caddr_t)(uintptr_t)iommu->aiomt_buf_dma_cookie.dmac_cookie_addr;
421 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
422 	    AMD_IOMMU_DEVTABBASE, ((uint64_t)(uintptr_t)addr) >> 12);
423 	sz = (iommu->aiomt_devtbl_sz >> 12) - 1;
424 	ASSERT(sz <= ((1 << 9) - 1));
425 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
426 	    AMD_IOMMU_DEVTABSIZE, sz);
427 
428 	/*
429 	 * Setup the command buffer pointers
430 	 */
431 	iommu->aiomt_cmdbuf = iommu->aiomt_devtbl +
432 	    iommu->aiomt_devtbl_sz;
433 	bzero(iommu->aiomt_cmdbuf, iommu->aiomt_cmdbuf_sz);
434 	addr += iommu->aiomt_devtbl_sz;
435 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
436 	    AMD_IOMMU_COMBASE, ((uint64_t)(uintptr_t)addr) >> 12);
437 
438 	p2sz = AMD_IOMMU_CMDBUF_SZ;
439 	ASSERT(p2sz >= AMD_IOMMU_CMDBUF_MINSZ &&
440 	    p2sz <= AMD_IOMMU_CMDBUF_MAXSZ);
441 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
442 	    AMD_IOMMU_COMLEN, p2sz);
443 	/*LINTED*/
444 	iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
445 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
446 	    AMD_IOMMU_CMDHEADPTR, 0);
447 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
448 	    AMD_IOMMU_CMDTAILPTR, 0);
449 
450 	/*
451 	 * Setup the event log pointers
452 	 */
453 	iommu->aiomt_eventlog = iommu->aiomt_cmdbuf +
454 	    iommu->aiomt_eventlog_sz;
455 	bzero(iommu->aiomt_eventlog, iommu->aiomt_eventlog_sz);
456 	addr += iommu->aiomt_cmdbuf_sz;
457 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
458 	    AMD_IOMMU_EVENTBASE, ((uint64_t)(uintptr_t)addr) >> 12);
459 	p2sz = AMD_IOMMU_EVENTLOG_SZ;
460 	ASSERT(p2sz >= AMD_IOMMU_EVENTLOG_MINSZ &&
461 	    p2sz <= AMD_IOMMU_EVENTLOG_MAXSZ);
462 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
463 	    AMD_IOMMU_EVENTLEN, sz);
464 	/*LINTED*/
465 	iommu->aiomt_event_head = (uint32_t *)iommu->aiomt_eventlog;
466 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
467 	    AMD_IOMMU_EVENTHEADPTR, 0);
468 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
469 	    AMD_IOMMU_EVENTTAILPTR, 0);
470 
471 	/* dma sync so device sees this init */
472 	SYNC_FORDEV(iommu->aiomt_dmahdl);
473 
474 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
475 		cmn_err(CE_NOTE, "%s: %s%d: successfully setup AMD IOMMU "
476 		    "tables, idx=%d", f, driver, instance, iommu->aiomt_idx);
477 	}
478 
479 	return (DDI_SUCCESS);
480 }
481 
482 static void
amd_iommu_teardown_tables_and_buffers(amd_iommu_t * iommu,int type)483 amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu, int type)
484 {
485 	dev_info_t *dip = iommu->aiomt_dip;
486 	int instance = ddi_get_instance(dip);
487 	const char *driver = ddi_driver_name(dip);
488 	const char *f = "amd_iommu_teardown_tables_and_buffers";
489 
490 	iommu->aiomt_eventlog = NULL;
491 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
492 	    AMD_IOMMU_EVENTBASE, 0);
493 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
494 	    AMD_IOMMU_EVENTLEN, 0);
495 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
496 	    AMD_IOMMU_EVENTHEADPTR, 0);
497 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
498 	    AMD_IOMMU_EVENTTAILPTR, 0);
499 
500 
501 	iommu->aiomt_cmdbuf = NULL;
502 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
503 	    AMD_IOMMU_COMBASE, 0);
504 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
505 	    AMD_IOMMU_COMLEN, 0);
506 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
507 	    AMD_IOMMU_CMDHEADPTR, 0);
508 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
509 	    AMD_IOMMU_CMDTAILPTR, 0);
510 
511 
512 	iommu->aiomt_devtbl = NULL;
513 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
514 	    AMD_IOMMU_DEVTABBASE, 0);
515 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
516 	    AMD_IOMMU_DEVTABSIZE, 0);
517 
518 	if (iommu->aiomt_dmahdl == NULL || type == AMD_IOMMU_QUIESCE)
519 		return;
520 
521 	/* Unbind the handle */
522 	if (ddi_dma_unbind_handle(iommu->aiomt_dmahdl) != DDI_SUCCESS) {
523 		cmn_err(CE_WARN, "%s: %s%d: failed to unbind handle: "
524 		    "%p for IOMMU idx=%d", f, driver, instance,
525 		    (void *)iommu->aiomt_dmahdl, iommu->aiomt_idx);
526 	}
527 	iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
528 	iommu->aiomt_buf_dma_cookie.dmac_size = 0;
529 	iommu->aiomt_buf_dma_cookie.dmac_type = 0;
530 	iommu->aiomt_buf_dma_ncookie = 0;
531 
532 	/* Free the table memory allocated for DMA */
533 	ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
534 	iommu->aiomt_dma_mem_hdl = NULL;
535 	iommu->aiomt_dma_bufva = NULL;
536 	iommu->aiomt_dma_mem_realsz = 0;
537 
538 	/* Free the DMA handle */
539 	ddi_dma_free_handle(&iommu->aiomt_dmahdl);
540 	iommu->aiomt_dmahdl = NULL;
541 }
542 
543 static void
amd_iommu_enable_interrupts(amd_iommu_t * iommu)544 amd_iommu_enable_interrupts(amd_iommu_t *iommu)
545 {
546 	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
547 	    AMD_IOMMU_CMDBUF_RUN) == 0);
548 	ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
549 	    AMD_IOMMU_EVENT_LOG_RUN) == 0);
550 
551 	/* Must be set prior to enabling command buffer */
552 	/* Must be set prior to enabling event logging */
553 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
554 	    AMD_IOMMU_CMDBUF_ENABLE, 1);
555 	/* No interrupts for completion wait  - too heavy weight. use polling */
556 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
557 	    AMD_IOMMU_COMWAITINT_ENABLE, 0);
558 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
559 	    AMD_IOMMU_EVENTLOG_ENABLE, 1);
560 	AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
561 	    AMD_IOMMU_EVENTINT_ENABLE, 1);
562 }
563 
564 static int
amd_iommu_setup_exclusion(amd_iommu_t * iommu)565 amd_iommu_setup_exclusion(amd_iommu_t *iommu)
566 {
567 	amd_iommu_acpi_ivmd_t *minfop;
568 
569 	minfop = amd_iommu_lookup_all_ivmd();
570 
571 	if (minfop && minfop->acm_ExclRange == 1) {
572 		cmn_err(CE_NOTE, "Programming exclusion range");
573 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
574 		    AMD_IOMMU_EXCL_BASE_ADDR,
575 		    minfop->acm_ivmd_phys_start >> 12);
576 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
577 		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
578 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
579 		    AMD_IOMMU_EXCL_BASE_EXEN, 1);
580 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
581 		    AMD_IOMMU_EXCL_LIM, (minfop->acm_ivmd_phys_start +
582 		    minfop->acm_ivmd_phys_len) >> 12);
583 	} else {
584 		if (amd_iommu_debug) {
585 			cmn_err(CE_NOTE, "Skipping exclusion range");
586 		}
587 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
588 		    AMD_IOMMU_EXCL_BASE_ADDR, 0);
589 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
590 		    AMD_IOMMU_EXCL_BASE_ALLOW, 1);
591 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
592 		    AMD_IOMMU_EXCL_BASE_EXEN, 0);
593 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
594 		    AMD_IOMMU_EXCL_LIM, 0);
595 	}
596 
597 	return (DDI_SUCCESS);
598 }
599 
600 static void
amd_iommu_teardown_exclusion(amd_iommu_t * iommu)601 amd_iommu_teardown_exclusion(amd_iommu_t *iommu)
602 {
603 	(void) amd_iommu_setup_exclusion(iommu);
604 }
605 
606 static uint_t
amd_iommu_intr_handler(caddr_t arg1,caddr_t arg2)607 amd_iommu_intr_handler(caddr_t arg1, caddr_t arg2)
608 {
609 	/*LINTED*/
610 	amd_iommu_t *iommu = (amd_iommu_t *)arg1;
611 	dev_info_t *dip = iommu->aiomt_dip;
612 	int instance = ddi_get_instance(dip);
613 	const char *driver = ddi_driver_name(dip);
614 	const char *f = "amd_iommu_intr_handler";
615 
616 	ASSERT(arg1);
617 	ASSERT(arg2 == NULL);
618 
619 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
620 		cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d. In INTR handler",
621 		    f, driver, instance, iommu->aiomt_idx);
622 	}
623 
624 	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
625 	    AMD_IOMMU_EVENT_LOG_INT) == 1) {
626 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
627 			cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d "
628 			    "Event Log Interrupt", f, driver, instance,
629 			    iommu->aiomt_idx);
630 		}
631 		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISPLAY);
632 		WAIT_SEC(1);
633 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
634 		    AMD_IOMMU_EVENT_LOG_INT, 1);
635 		return (DDI_INTR_CLAIMED);
636 	}
637 
638 	if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
639 	    AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
640 		cmn_err(CE_NOTE, "!%s: %s%d: IOMMU unit idx=%d "
641 		    "Event Overflow Interrupt", f, driver, instance,
642 		    iommu->aiomt_idx);
643 		(void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISCARD);
644 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
645 		    AMD_IOMMU_EVENT_LOG_INT, 1);
646 		AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
647 		    AMD_IOMMU_EVENT_OVERFLOW_INT, 1);
648 		return (DDI_INTR_CLAIMED);
649 	}
650 
651 	return (DDI_INTR_UNCLAIMED);
652 }
653 
654 
655 static int
amd_iommu_setup_interrupts(amd_iommu_t * iommu)656 amd_iommu_setup_interrupts(amd_iommu_t *iommu)
657 {
658 	dev_info_t *dip = iommu->aiomt_dip;
659 	int instance = ddi_get_instance(dip);
660 	const char *driver = ddi_driver_name(dip);
661 	int intrcap0;
662 	int intrcapN;
663 	int type;
664 	int err;
665 	int req;
666 	int avail;
667 	int p2req;
668 	int actual;
669 	int i;
670 	int j;
671 	const char *f = "amd_iommu_setup_interrupts";
672 
673 	if (ddi_intr_get_supported_types(dip, &type) != DDI_SUCCESS) {
674 		cmn_err(CE_WARN, "%s: %s%d: ddi_intr_get_supported_types "
675 		    "failed: idx=%d", f, driver, instance, iommu->aiomt_idx);
676 		return (DDI_FAILURE);
677 	}
678 
679 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
680 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
681 		    "Interrupt types supported = 0x%x", f, driver, instance,
682 		    iommu->aiomt_idx, type);
683 	}
684 
685 	/*
686 	 * for now we only support MSI
687 	 */
688 	if ((type & DDI_INTR_TYPE_MSI) == 0) {
689 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
690 		    "MSI interrupts not supported. Failing init.",
691 		    f, driver, instance, iommu->aiomt_idx);
692 		return (DDI_FAILURE);
693 	}
694 
695 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
696 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. MSI supported",
697 		    f, driver, instance, iommu->aiomt_idx);
698 	}
699 
700 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSI, &req);
701 	if (err != DDI_SUCCESS) {
702 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
703 		    "ddi_intr_get_nintrs failed err = %d",
704 		    f, driver, instance, iommu->aiomt_idx, err);
705 		return (DDI_FAILURE);
706 	}
707 
708 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
709 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
710 		    "MSI number of interrupts requested: %d",
711 		    f, driver, instance, iommu->aiomt_idx, req);
712 	}
713 
714 	if (req == 0) {
715 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
716 		    "interrupts requested. Failing init", f,
717 		    driver, instance, iommu->aiomt_idx);
718 		return (DDI_FAILURE);
719 	}
720 
721 	err = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSI, &avail);
722 	if (err != DDI_SUCCESS) {
723 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d "
724 		    "ddi_intr_get_navail failed err = %d", f,
725 		    driver, instance, iommu->aiomt_idx, err);
726 		return (DDI_FAILURE);
727 	}
728 
729 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
730 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
731 		    "MSI number of interrupts available: %d",
732 		    f, driver, instance, iommu->aiomt_idx, avail);
733 	}
734 
735 	if (avail == 0) {
736 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
737 		    "interrupts available. Failing init", f,
738 		    driver, instance, iommu->aiomt_idx);
739 		return (DDI_FAILURE);
740 	}
741 
742 	if (avail < req) {
743 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: MSI "
744 		    "interrupts: requested (%d) > available (%d). "
745 		    "Failing init", f, driver, instance, iommu->aiomt_idx,
746 		    req, avail);
747 		return (DDI_FAILURE);
748 	}
749 
750 	/* Allocate memory for DDI interrupt handles */
751 	iommu->aiomt_intr_htable_sz = req * sizeof (ddi_intr_handle_t);
752 	iommu->aiomt_intr_htable = kmem_zalloc(iommu->aiomt_intr_htable_sz,
753 	    KM_SLEEP);
754 
755 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_TABLE;
756 
757 	/* Convert req to a power of two as required by ddi_intr_alloc */
758 	p2req = 0;
759 	while (1<<p2req <= req)
760 		p2req++;
761 	p2req--;
762 	req = 1<<p2req;
763 
764 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
765 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
766 		    "MSI power of 2 number of interrupts: %d,%d",
767 		    f, driver, instance, iommu->aiomt_idx, p2req, req);
768 	}
769 
770 	err = ddi_intr_alloc(iommu->aiomt_dip, iommu->aiomt_intr_htable,
771 	    DDI_INTR_TYPE_MSI, 0, req, &actual, DDI_INTR_ALLOC_STRICT);
772 	if (err != DDI_SUCCESS) {
773 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
774 		    "ddi_intr_alloc failed: err = %d",
775 		    f, driver, instance, iommu->aiomt_idx, err);
776 		amd_iommu_teardown_interrupts(iommu);
777 		return (DDI_FAILURE);
778 	}
779 
780 	iommu->aiomt_actual_intrs = actual;
781 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ALLOCED;
782 
783 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
784 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
785 		    "number of interrupts actually allocated %d",
786 		    f, driver, instance, iommu->aiomt_idx, actual);
787 	}
788 
789 	if (iommu->aiomt_actual_intrs < req) {
790 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
791 		    "ddi_intr_alloc failed: actual (%d) < req (%d)",
792 		    f, driver, instance, iommu->aiomt_idx,
793 		    iommu->aiomt_actual_intrs, req);
794 		amd_iommu_teardown_interrupts(iommu);
795 		return (DDI_FAILURE);
796 	}
797 
798 	for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
799 		if (ddi_intr_add_handler(iommu->aiomt_intr_htable[i],
800 		    amd_iommu_intr_handler, (void *)iommu, NULL)
801 		    != DDI_SUCCESS) {
802 			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
803 			    "ddi_intr_add_handler failed: intr = %d, err = %d",
804 			    f, driver, instance, iommu->aiomt_idx, i, err);
805 			for (j = 0; j < i; j++) {
806 				(void) ddi_intr_remove_handler(
807 				    iommu->aiomt_intr_htable[j]);
808 			}
809 			amd_iommu_teardown_interrupts(iommu);
810 			return (DDI_FAILURE);
811 		}
812 	}
813 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_HANDLER;
814 
815 	intrcap0 = intrcapN = -1;
816 	if (ddi_intr_get_cap(iommu->aiomt_intr_htable[0], &intrcap0)
817 	    != DDI_SUCCESS ||
818 	    ddi_intr_get_cap(
819 	    iommu->aiomt_intr_htable[iommu->aiomt_actual_intrs - 1], &intrcapN)
820 	    != DDI_SUCCESS || intrcap0 != intrcapN) {
821 		cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
822 		    "ddi_intr_get_cap failed or inconsistent cap among "
823 		    "interrupts: intrcap0 (%d) < intrcapN (%d)",
824 		    f, driver, instance, iommu->aiomt_idx, intrcap0, intrcapN);
825 		amd_iommu_teardown_interrupts(iommu);
826 		return (DDI_FAILURE);
827 	}
828 	iommu->aiomt_intr_cap = intrcap0;
829 
830 	if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
831 		/* Need to call block enable */
832 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
833 			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
834 			    "Need to call block enable",
835 			    f, driver, instance, iommu->aiomt_idx);
836 		}
837 		if (ddi_intr_block_enable(iommu->aiomt_intr_htable,
838 		    iommu->aiomt_actual_intrs) != DDI_SUCCESS) {
839 			cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
840 			    "ddi_intr_block enable failed ", f, driver,
841 			    instance, iommu->aiomt_idx);
842 			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
843 			    iommu->aiomt_actual_intrs);
844 			amd_iommu_teardown_interrupts(iommu);
845 			return (DDI_FAILURE);
846 		}
847 	} else {
848 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
849 			cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
850 			    "Need to call individual enable",
851 			    f, driver, instance, iommu->aiomt_idx);
852 		}
853 		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
854 			if (ddi_intr_enable(iommu->aiomt_intr_htable[i])
855 			    != DDI_SUCCESS) {
856 				cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
857 				    "ddi_intr_enable failed: intr = %d", f,
858 				    driver, instance, iommu->aiomt_idx, i);
859 				for (j = 0; j < i; j++) {
860 					(void) ddi_intr_disable(
861 					    iommu->aiomt_intr_htable[j]);
862 				}
863 				amd_iommu_teardown_interrupts(iommu);
864 				return (DDI_FAILURE);
865 			}
866 		}
867 	}
868 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_ENABLED;
869 
870 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
871 		cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
872 		    "Interrupts successfully %s enabled. # of interrupts = %d",
873 		    f, driver, instance, iommu->aiomt_idx,
874 		    (intrcap0 & DDI_INTR_FLAG_BLOCK) ? "(block)" :
875 		    "(individually)", iommu->aiomt_actual_intrs);
876 	}
877 
878 	return (DDI_SUCCESS);
879 }
880 
881 static void
amd_iommu_teardown_interrupts(amd_iommu_t * iommu)882 amd_iommu_teardown_interrupts(amd_iommu_t *iommu)
883 {
884 	int i;
885 
886 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ENABLED) {
887 		if (iommu->aiomt_intr_cap & DDI_INTR_FLAG_BLOCK) {
888 			(void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
889 			    iommu->aiomt_actual_intrs);
890 		} else {
891 			for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
892 				(void) ddi_intr_disable(
893 				    iommu->aiomt_intr_htable[i]);
894 			}
895 		}
896 	}
897 
898 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_HANDLER) {
899 		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
900 			(void) ddi_intr_remove_handler(
901 			    iommu->aiomt_intr_htable[i]);
902 		}
903 	}
904 
905 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ALLOCED) {
906 		for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
907 			(void) ddi_intr_free(iommu->aiomt_intr_htable[i]);
908 		}
909 	}
910 	if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_TABLE) {
911 		kmem_free(iommu->aiomt_intr_htable,
912 		    iommu->aiomt_intr_htable_sz);
913 	}
914 	iommu->aiomt_intr_htable = NULL;
915 	iommu->aiomt_intr_htable_sz = 0;
916 	iommu->aiomt_intr_state = AMD_IOMMU_INTR_INVALID;
917 }
918 
919 static amd_iommu_t *
amd_iommu_init(dev_info_t * dip,ddi_acc_handle_t handle,int idx,uint16_t cap_base)920 amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
921     uint16_t cap_base)
922 {
923 	amd_iommu_t *iommu;
924 	int instance = ddi_get_instance(dip);
925 	const char *driver = ddi_driver_name(dip);
926 	uint32_t caphdr;
927 	uint32_t low_addr32;
928 	uint32_t hi_addr32;
929 	uint32_t range;
930 	uint32_t misc;
931 	uint64_t pgoffset;
932 	amd_iommu_acpi_global_t *global;
933 	amd_iommu_acpi_ivhd_t *hinfop;
934 	int bus, device, func;
935 	const char *f = "amd_iommu_init";
936 
937 	low_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
938 	    AMD_IOMMU_CAP_ADDR_LOW_OFF);
939 	if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
940 		cmn_err(CE_WARN, "%s: %s%d: capability registers not locked. "
941 		    "Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
942 		    instance, idx);
943 		return (NULL);
944 	}
945 
946 	iommu = kmem_zalloc(sizeof (amd_iommu_t), KM_SLEEP);
947 	mutex_init(&iommu->aiomt_mutex, NULL, MUTEX_DRIVER, NULL);
948 	mutex_enter(&iommu->aiomt_mutex);
949 
950 	mutex_init(&iommu->aiomt_cmdlock, NULL, MUTEX_DRIVER, NULL);
951 	mutex_init(&iommu->aiomt_eventlock, NULL, MUTEX_DRIVER, NULL);
952 
953 	iommu->aiomt_dip = dip;
954 	iommu->aiomt_idx = idx;
955 
956 	if (acpica_get_bdf(iommu->aiomt_dip, &bus, &device, &func)
957 	    != DDI_SUCCESS) {
958 		cmn_err(CE_WARN, "%s: %s%d: Failed to get BDF"
959 		    "Unable to use IOMMU unit idx=%d - skipping ...",
960 		    f, driver, instance, idx);
961 		return (NULL);
962 	}
963 
964 	iommu->aiomt_bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) |
965 	    (uint8_t)func;
966 
967 	/*
968 	 * Since everything in the capability block is locked and RO at this
969 	 * point, copy everything into the IOMMU struct
970 	 */
971 
972 	/* Get cap header */
973 	caphdr = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_HDR_OFF);
974 	iommu->aiomt_cap_hdr = caphdr;
975 	iommu->aiomt_npcache = AMD_IOMMU_REG_GET32(&caphdr,
976 	    AMD_IOMMU_CAP_NPCACHE);
977 	iommu->aiomt_httun = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_HTTUN);
978 
979 	global = amd_iommu_lookup_acpi_global();
980 	hinfop = amd_iommu_lookup_any_ivhd(iommu);
981 
982 	if (hinfop)
983 		iommu->aiomt_iotlb = hinfop->ach_IotlbSup;
984 	else
985 		iommu->aiomt_iotlb =
986 		    AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_IOTLB);
987 
988 	iommu->aiomt_captype = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
989 	iommu->aiomt_capid = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
990 
991 	/*
992 	 * Get address of IOMMU control registers
993 	 */
994 	hi_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
995 	    AMD_IOMMU_CAP_ADDR_HI_OFF);
996 	iommu->aiomt_low_addr32 = low_addr32;
997 	iommu->aiomt_hi_addr32 = hi_addr32;
998 	low_addr32 &= ~AMD_IOMMU_REG_ADDR_LOCKED;
999 
1000 	if (hinfop) {
1001 		iommu->aiomt_reg_pa =  hinfop->ach_IOMMU_reg_base;
1002 		ASSERT(hinfop->ach_IOMMU_pci_seg == 0);
1003 	} else {
1004 		iommu->aiomt_reg_pa =  ((uint64_t)hi_addr32 << 32 | low_addr32);
1005 	}
1006 
1007 	/*
1008 	 * Get cap range reg
1009 	 */
1010 	range = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_RANGE_OFF);
1011 	iommu->aiomt_range = range;
1012 	iommu->aiomt_rng_valid = AMD_IOMMU_REG_GET32(&range,
1013 	    AMD_IOMMU_RNG_VALID);
1014 	if (iommu->aiomt_rng_valid) {
1015 		iommu->aiomt_rng_bus = AMD_IOMMU_REG_GET32(&range,
1016 		    AMD_IOMMU_RNG_BUS);
1017 		iommu->aiomt_first_devfn = AMD_IOMMU_REG_GET32(&range,
1018 		    AMD_IOMMU_FIRST_DEVFN);
1019 		iommu->aiomt_last_devfn = AMD_IOMMU_REG_GET32(&range,
1020 		    AMD_IOMMU_LAST_DEVFN);
1021 	} else {
1022 		iommu->aiomt_rng_bus = 0;
1023 		iommu->aiomt_first_devfn = 0;
1024 		iommu->aiomt_last_devfn = 0;
1025 	}
1026 
1027 	if (hinfop)
1028 		iommu->aiomt_ht_unitid = hinfop->ach_IOMMU_UnitID;
1029 	else
1030 		iommu->aiomt_ht_unitid = AMD_IOMMU_REG_GET32(&range,
1031 		    AMD_IOMMU_HT_UNITID);
1032 
1033 	/*
1034 	 * Get cap misc reg
1035 	 */
1036 	misc = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_MISC_OFF);
1037 	iommu->aiomt_misc = misc;
1038 
1039 	if (global) {
1040 		iommu->aiomt_htatsresv = global->acg_HtAtsResv;
1041 		iommu->aiomt_vasize = global->acg_VAsize;
1042 		iommu->aiomt_pasize = global->acg_PAsize;
1043 	} else {
1044 		iommu->aiomt_htatsresv = AMD_IOMMU_REG_GET32(&misc,
1045 		    AMD_IOMMU_HT_ATSRSV);
1046 		iommu->aiomt_vasize = AMD_IOMMU_REG_GET32(&misc,
1047 		    AMD_IOMMU_VA_SIZE);
1048 		iommu->aiomt_pasize = AMD_IOMMU_REG_GET32(&misc,
1049 		    AMD_IOMMU_PA_SIZE);
1050 	}
1051 
1052 	if (hinfop) {
1053 		iommu->aiomt_msinum = hinfop->ach_IOMMU_MSInum;
1054 	} else {
1055 		iommu->aiomt_msinum =
1056 		    AMD_IOMMU_REG_GET32(&misc, AMD_IOMMU_MSINUM);
1057 	}
1058 
1059 	/*
1060 	 * Set up mapping between control registers PA and VA
1061 	 */
1062 	pgoffset = iommu->aiomt_reg_pa & MMU_PAGEOFFSET;
1063 	ASSERT(pgoffset == 0);
1064 	iommu->aiomt_reg_pages = mmu_btopr(AMD_IOMMU_REG_SIZE + pgoffset);
1065 	iommu->aiomt_reg_size = mmu_ptob(iommu->aiomt_reg_pages);
1066 
1067 	iommu->aiomt_va = (uintptr_t)device_arena_alloc(
1068 	    ptob(iommu->aiomt_reg_pages), VM_SLEEP);
1069 	if (iommu->aiomt_va == 0) {
1070 		cmn_err(CE_WARN, "%s: %s%d: Failed to alloc VA for IOMMU "
1071 		    "control regs. Skipping IOMMU idx=%d", f, driver,
1072 		    instance, idx);
1073 		mutex_exit(&iommu->aiomt_mutex);
1074 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1075 		return (NULL);
1076 	}
1077 
1078 	hat_devload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1079 	    iommu->aiomt_reg_size,
1080 	    mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
1081 	    | HAT_STRICTORDER, HAT_LOAD_LOCK);
1082 
1083 	iommu->aiomt_reg_va = iommu->aiomt_va + pgoffset;
1084 
1085 	/*
1086 	 * Setup the various control register's VA
1087 	 */
1088 	iommu->aiomt_reg_devtbl_va = iommu->aiomt_reg_va +
1089 	    AMD_IOMMU_DEVTBL_REG_OFF;
1090 	iommu->aiomt_reg_cmdbuf_va = iommu->aiomt_reg_va +
1091 	    AMD_IOMMU_CMDBUF_REG_OFF;
1092 	iommu->aiomt_reg_eventlog_va = iommu->aiomt_reg_va +
1093 	    AMD_IOMMU_EVENTLOG_REG_OFF;
1094 	iommu->aiomt_reg_ctrl_va = iommu->aiomt_reg_va +
1095 	    AMD_IOMMU_CTRL_REG_OFF;
1096 	iommu->aiomt_reg_excl_base_va = iommu->aiomt_reg_va +
1097 	    AMD_IOMMU_EXCL_BASE_REG_OFF;
1098 	iommu->aiomt_reg_excl_lim_va = iommu->aiomt_reg_va +
1099 	    AMD_IOMMU_EXCL_LIM_REG_OFF;
1100 	iommu->aiomt_reg_cmdbuf_head_va = iommu->aiomt_reg_va +
1101 	    AMD_IOMMU_CMDBUF_HEAD_REG_OFF;
1102 	iommu->aiomt_reg_cmdbuf_tail_va = iommu->aiomt_reg_va +
1103 	    AMD_IOMMU_CMDBUF_TAIL_REG_OFF;
1104 	iommu->aiomt_reg_eventlog_head_va = iommu->aiomt_reg_va +
1105 	    AMD_IOMMU_EVENTLOG_HEAD_REG_OFF;
1106 	iommu->aiomt_reg_eventlog_tail_va = iommu->aiomt_reg_va +
1107 	    AMD_IOMMU_EVENTLOG_TAIL_REG_OFF;
1108 	iommu->aiomt_reg_status_va = iommu->aiomt_reg_va +
1109 	    AMD_IOMMU_STATUS_REG_OFF;
1110 
1111 
1112 	/*
1113 	 * Setup the DEVICE table, CMD buffer, and LOG buffer in
1114 	 * memory and setup DMA access to this memory location
1115 	 */
1116 	if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
1117 		mutex_exit(&iommu->aiomt_mutex);
1118 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1119 		return (NULL);
1120 	}
1121 
1122 	if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
1123 		mutex_exit(&iommu->aiomt_mutex);
1124 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1125 		return (NULL);
1126 	}
1127 
1128 	amd_iommu_enable_interrupts(iommu);
1129 
1130 	if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
1131 		mutex_exit(&iommu->aiomt_mutex);
1132 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1133 		return (NULL);
1134 	}
1135 
1136 	/*
1137 	 * need to setup domain table before gfx bypass
1138 	 */
1139 	amd_iommu_init_page_tables(iommu);
1140 
1141 	/*
1142 	 * Set pass-thru for special devices like IOAPIC and HPET
1143 	 *
1144 	 * Also, gfx devices don't use DDI for DMA. No need to register
1145 	 * before setting up gfx passthru
1146 	 */
1147 	if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
1148 		mutex_exit(&iommu->aiomt_mutex);
1149 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1150 		return (NULL);
1151 	}
1152 
1153 	/* Initialize device table entries based on ACPI settings */
1154 	if (amd_iommu_acpi_init_devtbl(iommu) !=  DDI_SUCCESS) {
1155 		cmn_err(CE_WARN, "%s: %s%d: Can't initialize device table",
1156 		    f, driver, instance);
1157 		mutex_exit(&iommu->aiomt_mutex);
1158 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1159 		return (NULL);
1160 	}
1161 
1162 	if (amd_iommu_start(iommu) != DDI_SUCCESS) {
1163 		mutex_exit(&iommu->aiomt_mutex);
1164 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1165 		return (NULL);
1166 	}
1167 
1168 	/* xxx register/start race  */
1169 	if (amd_iommu_register(iommu) != DDI_SUCCESS) {
1170 		mutex_exit(&iommu->aiomt_mutex);
1171 		(void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1172 		return (NULL);
1173 	}
1174 
1175 	if (amd_iommu_debug) {
1176 		cmn_err(CE_NOTE, "%s: %s%d: IOMMU idx=%d inited.", f, driver,
1177 		    instance, idx);
1178 	}
1179 
1180 	return (iommu);
1181 }
1182 
1183 static int
amd_iommu_fini(amd_iommu_t * iommu,int type)1184 amd_iommu_fini(amd_iommu_t *iommu, int type)
1185 {
1186 	int idx = iommu->aiomt_idx;
1187 	dev_info_t *dip = iommu->aiomt_dip;
1188 	int instance = ddi_get_instance(dip);
1189 	const char *driver = ddi_driver_name(dip);
1190 	const char *f = "amd_iommu_fini";
1191 
1192 	if (type == AMD_IOMMU_TEARDOWN) {
1193 		mutex_enter(&iommu->aiomt_mutex);
1194 		if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
1195 			cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
1196 			    "idx = %d", f, driver, instance, idx);
1197 			return (DDI_FAILURE);
1198 		}
1199 	}
1200 
1201 	amd_iommu_stop(iommu);
1202 
1203 	if (type == AMD_IOMMU_TEARDOWN) {
1204 		amd_iommu_fini_page_tables(iommu);
1205 		amd_iommu_teardown_interrupts(iommu);
1206 		amd_iommu_teardown_exclusion(iommu);
1207 	}
1208 
1209 	amd_iommu_teardown_tables_and_buffers(iommu, type);
1210 
1211 	if (type == AMD_IOMMU_QUIESCE)
1212 		return (DDI_SUCCESS);
1213 
1214 	if (iommu->aiomt_va != NULL) {
1215 		hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1216 		    iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
1217 		device_arena_free((void *)(uintptr_t)iommu->aiomt_va,
1218 		    ptob(iommu->aiomt_reg_pages));
1219 		iommu->aiomt_va = NULL;
1220 		iommu->aiomt_reg_va = NULL;
1221 	}
1222 	mutex_destroy(&iommu->aiomt_eventlock);
1223 	mutex_destroy(&iommu->aiomt_cmdlock);
1224 	mutex_exit(&iommu->aiomt_mutex);
1225 	mutex_destroy(&iommu->aiomt_mutex);
1226 	kmem_free(iommu, sizeof (amd_iommu_t));
1227 
1228 	cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit complete. idx = %d",
1229 	    f, driver, instance, idx);
1230 
1231 	return (DDI_SUCCESS);
1232 }
1233 
1234 int
amd_iommu_setup(dev_info_t * dip,amd_iommu_state_t * statep)1235 amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
1236 {
1237 	int instance = ddi_get_instance(dip);
1238 	const char *driver = ddi_driver_name(dip);
1239 	ddi_acc_handle_t handle;
1240 	uint8_t base_class;
1241 	uint8_t sub_class;
1242 	uint8_t prog_class;
1243 	int idx;
1244 	uint32_t id;
1245 	uint16_t cap_base;
1246 	uint32_t caphdr;
1247 	uint8_t cap_type;
1248 	uint8_t cap_id;
1249 	amd_iommu_t *iommu;
1250 	const char *f = "amd_iommu_setup";
1251 
1252 	ASSERT(instance >= 0);
1253 	ASSERT(driver);
1254 
1255 	/* First setup PCI access to config space */
1256 
1257 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
1258 		cmn_err(CE_WARN, "%s: PCI config setup failed: %s%d",
1259 		    f, driver, instance);
1260 		return (DDI_FAILURE);
1261 	}
1262 
1263 	/*
1264 	 * The AMD IOMMU is part of an independent PCI function. There may be
1265 	 * more than one IOMMU in that PCI function
1266 	 */
1267 	base_class = pci_config_get8(handle, PCI_CONF_BASCLASS);
1268 	sub_class = pci_config_get8(handle, PCI_CONF_SUBCLASS);
1269 	prog_class = pci_config_get8(handle, PCI_CONF_PROGCLASS);
1270 
1271 	if (base_class != PCI_CLASS_PERIPH || sub_class != PCI_PERIPH_IOMMU ||
1272 	    prog_class != AMD_IOMMU_PCI_PROG_IF) {
1273 		cmn_err(CE_WARN, "%s: %s%d: invalid PCI class(0x%x)/"
1274 		    "subclass(0x%x)/programming interface(0x%x)", f, driver,
1275 		    instance, base_class, sub_class, prog_class);
1276 		pci_config_teardown(&handle);
1277 		return (DDI_FAILURE);
1278 	}
1279 
1280 	/*
1281 	 * Find and initialize all IOMMU units in this function
1282 	 */
1283 	for (idx = 0; ; idx++) {
1284 		if (pci_cap_probe(handle, idx, &id, &cap_base) != DDI_SUCCESS)
1285 			break;
1286 
1287 		/* check if cap ID is secure device cap id */
1288 		if (id != PCI_CAP_ID_SECURE_DEV) {
1289 			if (amd_iommu_debug) {
1290 				cmn_err(CE_NOTE,
1291 				    "%s: %s%d: skipping IOMMU: idx(0x%x) "
1292 				    "cap ID (0x%x) != secure dev capid (0x%x)",
1293 				    f, driver, instance, idx, id,
1294 				    PCI_CAP_ID_SECURE_DEV);
1295 			}
1296 			continue;
1297 		}
1298 
1299 		/* check if cap type is IOMMU cap type */
1300 		caphdr = PCI_CAP_GET32(handle, 0, cap_base,
1301 		    AMD_IOMMU_CAP_HDR_OFF);
1302 		cap_type = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
1303 		cap_id = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
1304 
1305 		if (cap_type != AMD_IOMMU_CAP) {
1306 			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1307 			    "cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
1308 			    driver, instance, idx, cap_type, AMD_IOMMU_CAP);
1309 			continue;
1310 		}
1311 		ASSERT(cap_id == PCI_CAP_ID_SECURE_DEV);
1312 		ASSERT(cap_id == id);
1313 
1314 		iommu = amd_iommu_init(dip, handle, idx, cap_base);
1315 		if (iommu == NULL) {
1316 			cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1317 			    "failed to init IOMMU", f,
1318 			    driver, instance, idx);
1319 			continue;
1320 		}
1321 
1322 		if (statep->aioms_iommu_start == NULL) {
1323 			statep->aioms_iommu_start = iommu;
1324 		} else {
1325 			statep->aioms_iommu_end->aiomt_next = iommu;
1326 		}
1327 		statep->aioms_iommu_end = iommu;
1328 
1329 		statep->aioms_nunits++;
1330 	}
1331 
1332 	pci_config_teardown(&handle);
1333 
1334 	if (amd_iommu_debug) {
1335 		cmn_err(CE_NOTE, "%s: %s%d: state=%p: setup %d IOMMU units",
1336 		    f, driver, instance, (void *)statep, statep->aioms_nunits);
1337 	}
1338 
1339 	return (DDI_SUCCESS);
1340 }
1341 
1342 int
amd_iommu_teardown(dev_info_t * dip,amd_iommu_state_t * statep,int type)1343 amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep, int type)
1344 {
1345 	int instance = ddi_get_instance(dip);
1346 	const char *driver = ddi_driver_name(dip);
1347 	amd_iommu_t *iommu, *next_iommu;
1348 	int teardown;
1349 	int error = DDI_SUCCESS;
1350 	const char *f = "amd_iommu_teardown";
1351 
1352 	teardown = 0;
1353 	for (iommu = statep->aioms_iommu_start; iommu;
1354 	    iommu = next_iommu) {
1355 		ASSERT(statep->aioms_nunits > 0);
1356 		next_iommu = iommu->aiomt_next;
1357 		if (amd_iommu_fini(iommu, type) != DDI_SUCCESS) {
1358 			error = DDI_FAILURE;
1359 			continue;
1360 		}
1361 		statep->aioms_nunits--;
1362 		teardown++;
1363 	}
1364 
1365 	cmn_err(CE_NOTE, "%s: %s%d: state=%p: toredown %d units. "
1366 	    "%d units left", f, driver, instance, (void *)statep,
1367 	    teardown, statep->aioms_nunits);
1368 
1369 	return (error);
1370 }
1371 
1372 dev_info_t *
amd_iommu_pci_dip(dev_info_t * rdip,const char * path)1373 amd_iommu_pci_dip(dev_info_t *rdip, const char *path)
1374 {
1375 	dev_info_t *pdip;
1376 	const char *driver = ddi_driver_name(rdip);
1377 	int instance = ddi_get_instance(rdip);
1378 	const char *f = "amd_iommu_pci_dip";
1379 
1380 	/* Hold rdip so it and its parents don't go away */
1381 	ndi_hold_devi(rdip);
1382 
1383 	if (ddi_is_pci_dip(rdip))
1384 		return (rdip);
1385 
1386 	pdip = rdip;
1387 	while (pdip = ddi_get_parent(pdip)) {
1388 		if (ddi_is_pci_dip(pdip)) {
1389 			ndi_hold_devi(pdip);
1390 			ndi_rele_devi(rdip);
1391 			return (pdip);
1392 		}
1393 	}
1394 
1395 	cmn_err(
1396 #ifdef	DEBUG
1397 	    CE_PANIC,
1398 #else
1399 	    CE_WARN,
1400 #endif	/* DEBUG */
1401 	    "%s: %s%d dip = %p has no PCI parent, path = %s",
1402 	    f, driver, instance, (void *)rdip, path);
1403 
1404 	ndi_rele_devi(rdip);
1405 
1406 	return (NULL);
1407 }
1408 
1409 /* Interface with IOMMULIB */
1410 /*ARGSUSED*/
1411 static int
amd_iommu_probe(iommulib_handle_t handle,dev_info_t * rdip)1412 amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip)
1413 {
1414 	const char *driver = ddi_driver_name(rdip);
1415 	char *s;
1416 	int bus, device, func, bdf;
1417 	amd_iommu_acpi_ivhd_t *hinfop;
1418 	dev_info_t *pci_dip;
1419 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1420 	const char *f = "amd_iommu_probe";
1421 	int instance = ddi_get_instance(iommu->aiomt_dip);
1422 	const char *idriver = ddi_driver_name(iommu->aiomt_dip);
1423 	char *path, *pathp;
1424 
1425 	if (amd_iommu_disable_list) {
1426 		s = strstr(amd_iommu_disable_list, driver);
1427 		if (s == NULL)
1428 			return (DDI_SUCCESS);
1429 		if (s == amd_iommu_disable_list || *(s - 1) == ':') {
1430 			s += strlen(driver);
1431 			if (*s == '\0' || *s == ':') {
1432 				amd_iommu_set_passthru(iommu, rdip);
1433 				return (DDI_FAILURE);
1434 			}
1435 		}
1436 	}
1437 
1438 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1439 	if ((pathp = ddi_pathname(rdip, path)) == NULL)
1440 		pathp = "<unknown>";
1441 
1442 	pci_dip = amd_iommu_pci_dip(rdip, path);
1443 	if (pci_dip == NULL) {
1444 		cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get PCI dip "
1445 		    "for rdip=%p, path = %s",
1446 		    f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1447 		    pathp);
1448 		kmem_free(path, MAXPATHLEN);
1449 		return (DDI_FAILURE);
1450 	}
1451 
1452 	if (acpica_get_bdf(pci_dip, &bus, &device, &func) != DDI_SUCCESS) {
1453 		cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get BDF "
1454 		    "for rdip=%p, path = %s",
1455 		    f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1456 		    pathp);
1457 		kmem_free(path, MAXPATHLEN);
1458 		return (DDI_FAILURE);
1459 	}
1460 	kmem_free(path, MAXPATHLEN);
1461 
1462 	/*
1463 	 * See whether device is described by IVRS as being managed
1464 	 * by this IOMMU
1465 	 */
1466 	bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) | (uint8_t)func;
1467 	hinfop = amd_iommu_lookup_ivhd(bdf);
1468 	if (hinfop && hinfop->ach_IOMMU_deviceid == iommu->aiomt_bdf)
1469 		return (DDI_SUCCESS);
1470 
1471 	return (DDI_FAILURE);
1472 }
1473 
1474 /*ARGSUSED*/
1475 static int
amd_iommu_allochdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * dma_handlep)1476 amd_iommu_allochdl(iommulib_handle_t handle,
1477     dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1478     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
1479 {
1480 	return (iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
1481 	    arg, dma_handlep));
1482 }
1483 
1484 /*ARGSUSED*/
1485 static int
amd_iommu_freehdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)1486 amd_iommu_freehdl(iommulib_handle_t handle,
1487     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1488 {
1489 	return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
1490 }
1491 
1492 /*ARGSUSED*/
1493 static int
map_current_window(amd_iommu_t * iommu,dev_info_t * rdip,ddi_dma_attr_t * attrp,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookie_array,uint_t ccount,int km_flags)1494 map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
1495     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookie_array, uint_t ccount,
1496     int km_flags)
1497 {
1498 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
1499 	int instance = ddi_get_instance(iommu->aiomt_dip);
1500 	int idx = iommu->aiomt_idx;
1501 	int i;
1502 	uint64_t start_va;
1503 	char *path;
1504 	int error = DDI_FAILURE;
1505 	const char *f = "map_current_window";
1506 
1507 	path = kmem_alloc(MAXPATHLEN, km_flags);
1508 	if (path == NULL) {
1509 		return (DDI_DMA_NORESOURCES);
1510 	}
1511 
1512 	(void) ddi_pathname(rdip, path);
1513 	mutex_enter(&amd_iommu_pgtable_lock);
1514 
1515 	if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
1516 		cmn_err(CE_NOTE, "%s: %s%d: idx=%d Attempting to get cookies "
1517 		    "from handle for device %s",
1518 		    f, driver, instance, idx, path);
1519 	}
1520 
1521 	start_va = 0;
1522 	for (i = 0; i < ccount; i++) {
1523 		if ((error = amd_iommu_map_pa2va(iommu, rdip, attrp, dmareq,
1524 		    cookie_array[i].dmac_cookie_addr,
1525 		    cookie_array[i].dmac_size,
1526 		    AMD_IOMMU_VMEM_MAP, &start_va, km_flags)) != DDI_SUCCESS) {
1527 			break;
1528 		}
1529 		cookie_array[i].dmac_cookie_addr = (uintptr_t)start_va;
1530 		cookie_array[i].dmac_type = 0;
1531 	}
1532 
1533 	if (i != ccount) {
1534 		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot map cookie# %d "
1535 		    "for device %s", f, driver, instance, idx, i, path);
1536 		(void) unmap_current_window(iommu, rdip, cookie_array,
1537 		    ccount, i, 1);
1538 		goto out;
1539 	}
1540 
1541 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1542 		cmn_err(CE_NOTE, "%s: return SUCCESS", f);
1543 	}
1544 
1545 	error = DDI_DMA_MAPPED;
1546 out:
1547 	mutex_exit(&amd_iommu_pgtable_lock);
1548 	kmem_free(path, MAXPATHLEN);
1549 	return (error);
1550 }
1551 
1552 /*ARGSUSED*/
1553 static int
unmap_current_window(amd_iommu_t * iommu,dev_info_t * rdip,ddi_dma_cookie_t * cookie_array,uint_t ccount,int ncookies,int locked)1554 unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
1555     ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked)
1556 {
1557 	const char *driver = ddi_driver_name(iommu->aiomt_dip);
1558 	int instance = ddi_get_instance(iommu->aiomt_dip);
1559 	int idx = iommu->aiomt_idx;
1560 	int i;
1561 	int error = DDI_FAILURE;
1562 	char *path;
1563 	int pathfree;
1564 	const char *f = "unmap_current_window";
1565 
1566 	if (!locked)
1567 		mutex_enter(&amd_iommu_pgtable_lock);
1568 
1569 	path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
1570 	if (path) {
1571 		(void) ddi_pathname(rdip, path);
1572 		pathfree = 1;
1573 	} else {
1574 		path = "<path-mem-alloc-failed>";
1575 		pathfree = 0;
1576 	}
1577 
1578 	if (ncookies == -1)
1579 		ncookies = ccount;
1580 
1581 	for (i = 0; i < ncookies; i++) {
1582 		if (amd_iommu_unmap_va(iommu, rdip,
1583 		    cookie_array[i].dmac_cookie_addr,
1584 		    cookie_array[i].dmac_size,
1585 		    AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
1586 			break;
1587 		}
1588 	}
1589 
1590 	if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT, NULL, 0, 0)
1591 	    != DDI_SUCCESS) {
1592 		cmn_err(CE_WARN, "%s: AMD IOMMU completion wait failed for: %s",
1593 		    f, path);
1594 	}
1595 
1596 	if (i != ncookies) {
1597 		cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot unmap cookie# %d "
1598 		    "for device %s", f, driver, instance, idx, i, path);
1599 		error = DDI_FAILURE;
1600 		goto out;
1601 	}
1602 
1603 	error = DDI_SUCCESS;
1604 
1605 out:
1606 	if (pathfree)
1607 		kmem_free(path, MAXPATHLEN);
1608 	if (!locked)
1609 		mutex_exit(&amd_iommu_pgtable_lock);
1610 	return (error);
1611 }
1612 
1613 /*ARGSUSED*/
1614 static int
amd_iommu_bindhdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookiep,uint_t * ccountp)1615 amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
1616     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1617     struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
1618     uint_t *ccountp)
1619 {
1620 	int dma_error = DDI_DMA_NOMAPPING;
1621 	int error;
1622 	char *path;
1623 	ddi_dma_cookie_t *cookie_array = NULL;
1624 	uint_t ccount = 0;
1625 	ddi_dma_impl_t *hp;
1626 	ddi_dma_attr_t *attrp;
1627 	int km_flags;
1628 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1629 	int instance = ddi_get_instance(rdip);
1630 	const char *driver = ddi_driver_name(rdip);
1631 	const char *f = "amd_iommu_bindhdl";
1632 
1633 	dma_error = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
1634 	    dmareq, cookiep, ccountp);
1635 
1636 	if (dma_error != DDI_DMA_MAPPED && dma_error != DDI_DMA_PARTIAL_MAP)
1637 		return (dma_error);
1638 
1639 	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1640 
1641 	path = kmem_alloc(MAXPATHLEN, km_flags);
1642 	if (path) {
1643 		(void) ddi_pathname(rdip, path);
1644 	} else {
1645 		dma_error = DDI_DMA_NORESOURCES;
1646 		goto unbind;
1647 	}
1648 
1649 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1650 		cmn_err(CE_NOTE, "%s: %s got cookie (%p), #cookies: %d",
1651 		    f, path,
1652 		    (void *)cookiep->dmac_cookie_addr,
1653 		    *ccountp);
1654 	}
1655 
1656 	cookie_array = NULL;
1657 	ccount = 0;
1658 	if ((error = iommulib_iommu_dma_get_cookies(dip, dma_handle,
1659 	    &cookie_array, &ccount)) != DDI_SUCCESS) {
1660 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1661 		    "for device %s", f, driver, instance, path);
1662 		dma_error = error;
1663 		goto unbind;
1664 	}
1665 
1666 	hp = (ddi_dma_impl_t *)dma_handle;
1667 	attrp = &hp->dmai_attr;
1668 
1669 	error = map_current_window(iommu, rdip, attrp, dmareq,
1670 	    cookie_array, ccount, km_flags);
1671 	if (error != DDI_SUCCESS) {
1672 		dma_error = error;
1673 		goto unbind;
1674 	}
1675 
1676 	if ((error = iommulib_iommu_dma_set_cookies(dip, dma_handle,
1677 	    cookie_array, ccount)) != DDI_SUCCESS) {
1678 		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1679 		    "for device %s", f, driver, instance, path);
1680 		dma_error = error;
1681 		goto unbind;
1682 	}
1683 
1684 	*cookiep = cookie_array[0];
1685 
1686 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1687 		cmn_err(CE_NOTE, "%s: %s remapped cookie (%p), #cookies: %d",
1688 		    f, path,
1689 		    (void *)(uintptr_t)cookiep->dmac_cookie_addr,
1690 		    *ccountp);
1691 	}
1692 
1693 	kmem_free(path, MAXPATHLEN);
1694 	ASSERT(dma_error == DDI_DMA_MAPPED || dma_error == DDI_DMA_PARTIAL_MAP);
1695 	return (dma_error);
1696 unbind:
1697 	kmem_free(path, MAXPATHLEN);
1698 	(void) iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle);
1699 	return (dma_error);
1700 }
1701 
1702 /*ARGSUSED*/
1703 static int
amd_iommu_unbindhdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)1704 amd_iommu_unbindhdl(iommulib_handle_t handle,
1705     dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1706 {
1707 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1708 	ddi_dma_cookie_t *cookie_array = NULL;
1709 	uint_t ccount = 0;
1710 	int error = DDI_FAILURE;
1711 	int instance = ddi_get_instance(rdip);
1712 	const char *driver = ddi_driver_name(rdip);
1713 	const char *f = "amd_iommu_unbindhdl";
1714 
1715 	cookie_array = NULL;
1716 	ccount = 0;
1717 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1718 	    &ccount) != DDI_SUCCESS) {
1719 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1720 		    "for device %p", f, driver, instance, (void *)rdip);
1721 		error = DDI_FAILURE;
1722 		goto out;
1723 	}
1724 
1725 	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1726 		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1727 		    "for device %p", f, driver, instance, (void *)rdip);
1728 		error = DDI_FAILURE;
1729 		goto out;
1730 	}
1731 
1732 	if (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle)
1733 	    != DDI_SUCCESS) {
1734 		cmn_err(CE_WARN, "%s: %s%d: failed to unbindhdl for dip=%p",
1735 		    f, driver, instance, (void *)rdip);
1736 		error = DDI_FAILURE;
1737 		goto out;
1738 	}
1739 
1740 	if (unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0)
1741 	    != DDI_SUCCESS) {
1742 		cmn_err(CE_WARN, "%s: %s%d: failed to unmap current window "
1743 		    "for dip=%p", f, driver, instance, (void *)rdip);
1744 		error = DDI_FAILURE;
1745 	} else {
1746 		error = DDI_SUCCESS;
1747 	}
1748 out:
1749 	if (cookie_array)
1750 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1751 	return (error);
1752 }
1753 
1754 /*ARGSUSED*/
1755 static int
amd_iommu_sync(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,off_t off,size_t len,uint_t cache_flags)1756 amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
1757     dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
1758     size_t len, uint_t cache_flags)
1759 {
1760 	ddi_dma_cookie_t *cookie_array = NULL;
1761 	uint_t ccount = 0;
1762 	int error;
1763 	const char *f = "amd_iommu_sync";
1764 
1765 	cookie_array = NULL;
1766 	ccount = 0;
1767 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1768 	    &ccount) != DDI_SUCCESS) {
1769 		ASSERT(cookie_array == NULL);
1770 		cmn_err(CE_WARN, "%s: Cannot get cookies "
1771 		    "for device %p", f, (void *)rdip);
1772 		error = DDI_FAILURE;
1773 		goto out;
1774 	}
1775 
1776 	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1777 		cmn_err(CE_WARN, "%s: Cannot clear cookies "
1778 		    "for device %p", f, (void *)rdip);
1779 		error = DDI_FAILURE;
1780 		goto out;
1781 	}
1782 
1783 	error = iommulib_iommu_dma_sync(dip, rdip, dma_handle, off,
1784 	    len, cache_flags);
1785 
1786 	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1787 	    ccount) != DDI_SUCCESS) {
1788 		cmn_err(CE_WARN, "%s: Cannot set cookies "
1789 		    "for device %p", f, (void *)rdip);
1790 		error = DDI_FAILURE;
1791 	} else {
1792 		cookie_array = NULL;
1793 		ccount = 0;
1794 	}
1795 
1796 out:
1797 	if (cookie_array)
1798 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1799 	return (error);
1800 }
1801 
1802 /*ARGSUSED*/
1803 static int
amd_iommu_win(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)1804 amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
1805     dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
1806     off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
1807     uint_t *ccountp)
1808 {
1809 	int error = DDI_FAILURE;
1810 	amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1811 	ddi_dma_cookie_t *cookie_array = NULL;
1812 	uint_t ccount = 0;
1813 	int km_flags;
1814 	ddi_dma_impl_t *hp;
1815 	ddi_dma_attr_t *attrp;
1816 	struct ddi_dma_req sdmareq = {0};
1817 	int instance = ddi_get_instance(rdip);
1818 	const char *driver = ddi_driver_name(rdip);
1819 	const char *f = "amd_iommu_win";
1820 
1821 	km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1822 
1823 	cookie_array = NULL;
1824 	ccount = 0;
1825 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1826 	    &ccount) != DDI_SUCCESS) {
1827 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1828 		    "for device %p", f, driver, instance, (void *)rdip);
1829 		error = DDI_FAILURE;
1830 		goto out;
1831 	}
1832 
1833 	if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1834 		cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1835 		    "for device %p", f, driver, instance, (void *)rdip);
1836 		error = DDI_FAILURE;
1837 		goto out;
1838 	}
1839 
1840 	if (iommulib_iommu_dma_win(dip, rdip, dma_handle, win,
1841 	    offp, lenp, cookiep, ccountp) != DDI_SUCCESS) {
1842 		cmn_err(CE_WARN, "%s: %s%d: failed switch windows for dip=%p",
1843 		    f, driver, instance, (void *)rdip);
1844 		error = DDI_FAILURE;
1845 		goto out;
1846 	}
1847 
1848 	(void) unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0);
1849 
1850 	if (cookie_array) {
1851 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1852 		cookie_array = NULL;
1853 		ccount = 0;
1854 	}
1855 
1856 	cookie_array = NULL;
1857 	ccount = 0;
1858 	if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1859 	    &ccount) != DDI_SUCCESS) {
1860 		cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1861 		    "for device %p", f, driver, instance, (void *)rdip);
1862 		error = DDI_FAILURE;
1863 		goto out;
1864 	}
1865 
1866 	hp = (ddi_dma_impl_t *)dma_handle;
1867 	attrp = &hp->dmai_attr;
1868 
1869 	sdmareq.dmar_flags = DDI_DMA_RDWR;
1870 	error = map_current_window(iommu, rdip, attrp, &sdmareq,
1871 	    cookie_array, ccount, km_flags);
1872 
1873 	if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1874 	    ccount) != DDI_SUCCESS) {
1875 		cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1876 		    "for device %p", f, driver, instance, (void *)rdip);
1877 		error = DDI_FAILURE;
1878 		goto out;
1879 	}
1880 
1881 	*cookiep = cookie_array[0];
1882 
1883 	return (error == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1884 out:
1885 	if (cookie_array)
1886 		kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1887 
1888 	return (error);
1889 }
1890 
1891 /*ARGSUSED*/
1892 static int
amd_iommu_mapobject(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_obj_t * dmao)1893 amd_iommu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
1894     dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1895     struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao)
1896 {
1897 	return (DDI_ENOTSUP);
1898 }
1899 
1900 /*ARGSUSED*/
1901 static int
amd_iommu_unmapobject(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,ddi_dma_obj_t * dmao)1902 amd_iommu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
1903     dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
1904 {
1905 	return (DDI_ENOTSUP);
1906 }
1907 
1908 uint64_t
amd_iommu_reg_get64_workaround(uint64_t * regp,uint32_t bits)1909 amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits)
1910 {
1911 	split_t s;
1912 	uint32_t *ptr32 = (uint32_t *)regp;
1913 	uint64_t *s64p = &(s.u64);
1914 
1915 	s.u32[0] = ptr32[0];
1916 	s.u32[1] = ptr32[1];
1917 
1918 	return (AMD_IOMMU_REG_GET64_IMPL(s64p, bits));
1919 }
1920 
1921 uint64_t
amd_iommu_reg_set64_workaround(uint64_t * regp,uint32_t bits,uint64_t value)1922 amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits, uint64_t value)
1923 {
1924 	split_t s;
1925 	uint32_t *ptr32 = (uint32_t *)regp;
1926 	uint64_t *s64p = &(s.u64);
1927 
1928 	s.u32[0] = ptr32[0];
1929 	s.u32[1] = ptr32[1];
1930 
1931 	AMD_IOMMU_REG_SET64_IMPL(s64p, bits, value);
1932 
1933 	*regp = s.u64;
1934 
1935 	return (s.u64);
1936 }
1937 
1938 void
amd_iommu_read_boot_props(void)1939 amd_iommu_read_boot_props(void)
1940 {
1941 	char *propval;
1942 
1943 	/*
1944 	 * if "amd-iommu = no/false" boot property is set,
1945 	 * ignore AMD iommu
1946 	 */
1947 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1948 	    DDI_PROP_DONTPASS, "amd-iommu", &propval) == DDI_SUCCESS) {
1949 		if (strcmp(propval, "no") == 0 ||
1950 		    strcmp(propval, "false") == 0) {
1951 			amd_iommu_disable = 1;
1952 		}
1953 		ddi_prop_free(propval);
1954 	}
1955 
1956 	/*
1957 	 * Copy the list of drivers for which IOMMU is disabled by user.
1958 	 */
1959 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1960 	    DDI_PROP_DONTPASS, "amd-iommu-disable-list", &propval)
1961 	    == DDI_SUCCESS) {
1962 		amd_iommu_disable_list = kmem_alloc(strlen(propval) + 1,
1963 		    KM_SLEEP);
1964 		(void) strcpy(amd_iommu_disable_list, propval);
1965 		ddi_prop_free(propval);
1966 	}
1967 
1968 }
1969 
1970 void
amd_iommu_lookup_conf_props(dev_info_t * dip)1971 amd_iommu_lookup_conf_props(dev_info_t *dip)
1972 {
1973 	char *disable;
1974 
1975 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1976 	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu", &disable)
1977 	    == DDI_PROP_SUCCESS) {
1978 		if (strcmp(disable, "no") == 0) {
1979 			amd_iommu_disable = 1;
1980 		}
1981 		ddi_prop_free(disable);
1982 	}
1983 
1984 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1985 	    DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu-disable-list",
1986 	    &disable) == DDI_PROP_SUCCESS) {
1987 		amd_iommu_disable_list = kmem_alloc(strlen(disable) + 1,
1988 		    KM_SLEEP);
1989 		(void) strcpy(amd_iommu_disable_list, disable);
1990 		ddi_prop_free(disable);
1991 	}
1992 }
1993