1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
25 * Copyright 2019 Joyent, Inc.
26 */
27
28 #include <sys/sunddi.h>
29 #include <sys/sunndi.h>
30 #include <sys/iommulib.h>
31 #include <sys/amd_iommu.h>
32 #include <sys/pci_cap.h>
33 #include <sys/bootconf.h>
34 #include <sys/ddidmareq.h>
35
36 #include "amd_iommu_impl.h"
37 #include "amd_iommu_acpi.h"
38 #include "amd_iommu_page_tables.h"
39
40 static int amd_iommu_fini(amd_iommu_t *iommu, int type);
41 static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
42 static void amd_iommu_stop(amd_iommu_t *iommu);
43
44 static int amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip);
45 static int amd_iommu_allochdl(iommulib_handle_t handle,
46 dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
47 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
48 static int amd_iommu_freehdl(iommulib_handle_t handle,
49 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
50 static int amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
51 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
52 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
53 uint_t *ccountp);
54 static int amd_iommu_unbindhdl(iommulib_handle_t handle,
55 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
56 static int amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
57 dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
58 size_t len, uint_t cache_flags);
59 static int amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
60 dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
61 off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
62 uint_t *ccountp);
63 static int amd_iommu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
64 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
65 struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao);
66 static int amd_iommu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
67 dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao);
68
69 static int unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
70 ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked);
71
72 extern void *device_arena_alloc(size_t size, int vm_flag);
73 extern void device_arena_free(void * vaddr, size_t size);
74
75 ddi_dma_attr_t amd_iommu_dma_attr = {
76 DMA_ATTR_V0,
77 0U, /* dma_attr_addr_lo */
78 0xffffffffffffffffULL, /* dma_attr_addr_hi */
79 0xffffffffU, /* dma_attr_count_max */
80 (uint64_t)4096, /* dma_attr_align */
81 1, /* dma_attr_burstsizes */
82 64, /* dma_attr_minxfer */
83 0xffffffffU, /* dma_attr_maxxfer */
84 0xffffffffU, /* dma_attr_seg */
85 1, /* dma_attr_sgllen, variable */
86 64, /* dma_attr_granular */
87 0 /* dma_attr_flags */
88 };
89
90 ddi_device_acc_attr_t amd_iommu_devacc = {
91 DDI_DEVICE_ATTR_V0,
92 DDI_NEVERSWAP_ACC,
93 DDI_STRICTORDER_ACC
94 };
95
96 struct iommulib_ops amd_iommulib_ops = {
97 IOMMU_OPS_VERSION,
98 AMD_IOMMU,
99 "AMD IOMMU Vers. 1",
100 NULL,
101 amd_iommu_probe,
102 amd_iommu_allochdl,
103 amd_iommu_freehdl,
104 amd_iommu_bindhdl,
105 amd_iommu_unbindhdl,
106 amd_iommu_sync,
107 amd_iommu_win,
108 amd_iommu_mapobject,
109 amd_iommu_unmapobject,
110 };
111
112 static kmutex_t amd_iommu_pgtable_lock;
113
114 static int
amd_iommu_register(amd_iommu_t * iommu)115 amd_iommu_register(amd_iommu_t *iommu)
116 {
117 dev_info_t *dip = iommu->aiomt_dip;
118 const char *driver = ddi_driver_name(dip);
119 int instance = ddi_get_instance(dip);
120 iommulib_ops_t *iommulib_ops;
121 iommulib_handle_t handle;
122 const char *f = "amd_iommu_register";
123
124 iommulib_ops = kmem_zalloc(sizeof (iommulib_ops_t), KM_SLEEP);
125
126 *iommulib_ops = amd_iommulib_ops;
127
128 iommulib_ops->ilops_data = (void *)iommu;
129 iommu->aiomt_iommulib_ops = iommulib_ops;
130
131 if (iommulib_iommu_register(dip, iommulib_ops, &handle)
132 != DDI_SUCCESS) {
133 cmn_err(CE_WARN, "%s: %s%d: Register with iommulib "
134 "failed idx=%d", f, driver, instance, iommu->aiomt_idx);
135 kmem_free(iommulib_ops, sizeof (iommulib_ops_t));
136 return (DDI_FAILURE);
137 }
138
139 iommu->aiomt_iommulib_handle = handle;
140
141 return (DDI_SUCCESS);
142 }
143
144 static int
amd_iommu_unregister(amd_iommu_t * iommu)145 amd_iommu_unregister(amd_iommu_t *iommu)
146 {
147 if (iommu->aiomt_iommulib_handle == NULL) {
148 /* we never registered */
149 return (DDI_SUCCESS);
150 }
151
152 if (iommulib_iommu_unregister(iommu->aiomt_iommulib_handle)
153 != DDI_SUCCESS) {
154 return (DDI_FAILURE);
155 }
156
157 kmem_free(iommu->aiomt_iommulib_ops, sizeof (iommulib_ops_t));
158 iommu->aiomt_iommulib_ops = NULL;
159 iommu->aiomt_iommulib_handle = NULL;
160
161 return (DDI_SUCCESS);
162 }
163
164 static int
amd_iommu_setup_passthru(amd_iommu_t * iommu)165 amd_iommu_setup_passthru(amd_iommu_t *iommu)
166 {
167 gfx_entry_t *gfxp;
168 dev_info_t *dip;
169
170 /*
171 * Setup passthru mapping for "special" devices
172 */
173 amd_iommu_set_passthru(iommu, NULL);
174
175 for (gfxp = gfx_devinfo_list; gfxp; gfxp = gfxp->g_next) {
176 gfxp->g_ref++;
177 dip = gfxp->g_dip;
178 if (dip) {
179 amd_iommu_set_passthru(iommu, dip);
180 }
181 gfxp->g_ref--;
182 }
183
184 return (DDI_SUCCESS);
185 }
186
187 static int
amd_iommu_start(amd_iommu_t * iommu)188 amd_iommu_start(amd_iommu_t *iommu)
189 {
190 dev_info_t *dip = iommu->aiomt_dip;
191 int instance = ddi_get_instance(dip);
192 const char *driver = ddi_driver_name(dip);
193 amd_iommu_acpi_ivhd_t *hinfop;
194 const char *f = "amd_iommu_start";
195
196 hinfop = amd_iommu_lookup_all_ivhd();
197
198 /*
199 * Disable HT tunnel translation.
200 * XXX use ACPI
201 */
202 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
203 AMD_IOMMU_HT_TUN_ENABLE, 0);
204
205 if (hinfop) {
206 if (amd_iommu_debug) {
207 cmn_err(CE_NOTE,
208 "amd_iommu: using ACPI for CTRL registers");
209 }
210 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
211 AMD_IOMMU_ISOC, hinfop->ach_Isoc);
212 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
213 AMD_IOMMU_RESPASSPW, hinfop->ach_ResPassPW);
214 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
215 AMD_IOMMU_PASSPW, hinfop->ach_PassPW);
216 }
217
218 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
219 AMD_IOMMU_INVTO, 5);
220
221
222 /*
223 * The Device table entry bit 0 (V) controls whether the device
224 * table entry is valid for address translation and Device table
225 * entry bit 128 (IV) controls whether interrupt remapping is valid.
226 * By setting both to zero we are essentially doing pass-thru. Since
227 * this table is zeroed on allocation, essentially we will have
228 * pass-thru when IOMMU is enabled.
229 */
230
231 /* Finally enable the IOMMU ... */
232 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
233 AMD_IOMMU_ENABLE, 1);
234
235 if (amd_iommu_debug) {
236 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
237 "Successfully started AMD IOMMU", f, driver, instance,
238 iommu->aiomt_idx);
239 }
240 cmn_err(CE_NOTE, "AMD IOMMU (%d,%d) enabled",
241 instance, iommu->aiomt_idx);
242
243 return (DDI_SUCCESS);
244 }
245
246 static void
amd_iommu_stop(amd_iommu_t * iommu)247 amd_iommu_stop(amd_iommu_t *iommu)
248 {
249 dev_info_t *dip = iommu->aiomt_dip;
250 int instance = ddi_get_instance(dip);
251 const char *driver = ddi_driver_name(dip);
252 const char *f = "amd_iommu_stop";
253
254 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
255 AMD_IOMMU_ENABLE, 0);
256 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
257 AMD_IOMMU_EVENTINT_ENABLE, 0);
258 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
259 AMD_IOMMU_COMWAITINT_ENABLE, 0);
260 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
261 AMD_IOMMU_EVENTLOG_ENABLE, 0);
262
263 /*
264 * Disable translation on HT tunnel traffic
265 */
266 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
267 AMD_IOMMU_HT_TUN_ENABLE, 0);
268 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
269 AMD_IOMMU_CMDBUF_ENABLE, 0);
270
271 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMYU idx=%d. "
272 "Successfully stopped AMD IOMMU", f, driver, instance,
273 iommu->aiomt_idx);
274 }
275
276 static int
amd_iommu_setup_tables_and_buffers(amd_iommu_t * iommu)277 amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
278 {
279 dev_info_t *dip = iommu->aiomt_dip;
280 int instance = ddi_get_instance(dip);
281 const char *driver = ddi_driver_name(dip);
282 uint32_t dma_bufsz;
283 caddr_t addr;
284 uint32_t sz;
285 uint32_t p2sz;
286 int i;
287 uint64_t *dentry;
288 int err;
289 const char *f = "amd_iommu_setup_tables_and_buffers";
290
291 /*
292 * We will put the Device Table, Command Buffer and
293 * Event Log in contiguous memory. Allocate the maximum
294 * size allowed for such structures
295 * Device Table: 256b * 64K = 32B * 64K
296 * Command Buffer: 128b * 32K = 16B * 32K
297 * Event Log: 128b * 32K = 16B * 32K
298 */
299 iommu->aiomt_devtbl_sz = (1<<AMD_IOMMU_DEVTBL_SZ) * AMD_IOMMU_DEVENT_SZ;
300 iommu->aiomt_cmdbuf_sz = (1<<AMD_IOMMU_CMDBUF_SZ) * AMD_IOMMU_CMD_SZ;
301 iommu->aiomt_eventlog_sz =
302 (1<<AMD_IOMMU_EVENTLOG_SZ) * AMD_IOMMU_EVENT_SZ;
303
304 dma_bufsz = iommu->aiomt_devtbl_sz + iommu->aiomt_cmdbuf_sz
305 + iommu->aiomt_eventlog_sz;
306
307 /*
308 * Alloc a DMA handle.
309 */
310 err = ddi_dma_alloc_handle(dip, &amd_iommu_dma_attr,
311 DDI_DMA_SLEEP, NULL, &iommu->aiomt_dmahdl);
312 if (err != DDI_SUCCESS) {
313 cmn_err(CE_WARN, "%s: %s%d: Cannot alloc DMA handle for "
314 "AMD IOMMU tables and buffers", f, driver, instance);
315 return (DDI_FAILURE);
316 }
317
318 /*
319 * Alloc memory for tables and buffers
320 * XXX remove cast to size_t
321 */
322 err = ddi_dma_mem_alloc(iommu->aiomt_dmahdl, dma_bufsz,
323 &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
324 DDI_DMA_SLEEP, NULL, (caddr_t *)&iommu->aiomt_dma_bufva,
325 (size_t *)&iommu->aiomt_dma_mem_realsz, &iommu->aiomt_dma_mem_hdl);
326 if (err != DDI_SUCCESS) {
327 cmn_err(CE_WARN, "%s: %s%d: Cannot alloc memory for DMA "
328 "to AMD IOMMU tables and buffers", f, driver, instance);
329 iommu->aiomt_dma_bufva = NULL;
330 iommu->aiomt_dma_mem_realsz = 0;
331 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
332 iommu->aiomt_dmahdl = NULL;
333 return (DDI_FAILURE);
334 }
335
336 /*
337 * The VA must be 4K aligned and >= table size
338 */
339 ASSERT(((uintptr_t)iommu->aiomt_dma_bufva &
340 AMD_IOMMU_TABLE_ALIGN) == 0);
341 ASSERT(iommu->aiomt_dma_mem_realsz >= dma_bufsz);
342
343 /*
344 * Now bind the handle
345 */
346 err = ddi_dma_addr_bind_handle(iommu->aiomt_dmahdl, NULL,
347 iommu->aiomt_dma_bufva, iommu->aiomt_dma_mem_realsz,
348 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
349 NULL, &iommu->aiomt_buf_dma_cookie, &iommu->aiomt_buf_dma_ncookie);
350 if (err != DDI_DMA_MAPPED) {
351 cmn_err(CE_WARN, "%s: %s%d: Cannot bind memory for DMA "
352 "to AMD IOMMU tables and buffers. bufrealsz=%p",
353 f, driver, instance,
354 (void *)(uintptr_t)iommu->aiomt_dma_mem_realsz);
355 iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
356 iommu->aiomt_buf_dma_cookie.dmac_size = 0;
357 iommu->aiomt_buf_dma_cookie.dmac_type = 0;
358 iommu->aiomt_buf_dma_ncookie = 0;
359 ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
360 iommu->aiomt_dma_mem_hdl = NULL;
361 iommu->aiomt_dma_bufva = NULL;
362 iommu->aiomt_dma_mem_realsz = 0;
363 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
364 iommu->aiomt_dmahdl = NULL;
365 return (DDI_FAILURE);
366 }
367
368 /*
369 * We assume the DMA engine on the IOMMU is capable of handling the
370 * whole table buffer in a single cookie. If not and multiple cookies
371 * are needed we fail.
372 */
373 if (iommu->aiomt_buf_dma_ncookie != 1) {
374 cmn_err(CE_WARN, "%s: %s%d: Cannot handle multiple "
375 "cookies for DMA to AMD IOMMU tables and buffers. "
376 "#cookies=%u", f, driver, instance,
377 iommu->aiomt_buf_dma_ncookie);
378 (void) ddi_dma_unbind_handle(iommu->aiomt_dmahdl);
379 iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
380 iommu->aiomt_buf_dma_cookie.dmac_size = 0;
381 iommu->aiomt_buf_dma_cookie.dmac_type = 0;
382 iommu->aiomt_buf_dma_ncookie = 0;
383 ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
384 iommu->aiomt_dma_mem_hdl = NULL;
385 iommu->aiomt_dma_bufva = NULL;
386 iommu->aiomt_dma_mem_realsz = 0;
387 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
388 iommu->aiomt_dmahdl = NULL;
389 return (DDI_FAILURE);
390 }
391
392 /*
393 * The address in the cookie must be 4K aligned and >= table size
394 */
395 ASSERT((iommu->aiomt_buf_dma_cookie.dmac_cookie_addr
396 & AMD_IOMMU_TABLE_ALIGN) == 0);
397 ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size
398 <= iommu->aiomt_dma_mem_realsz);
399 ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size >= dma_bufsz);
400
401 /*
402 * Setup the device table pointers in the iommu struct as
403 * well as the IOMMU device table register
404 */
405 iommu->aiomt_devtbl = iommu->aiomt_dma_bufva;
406 bzero(iommu->aiomt_devtbl, iommu->aiomt_devtbl_sz);
407
408 /*
409 * Set V=1 and TV = 0, so any inadvertant pass-thrus cause
410 * page faults. Also set SE bit so we aren't swamped with
411 * page fault messages
412 */
413 for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
414 /*LINTED*/
415 dentry = (uint64_t *)&iommu->aiomt_devtbl
416 [i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
417 AMD_IOMMU_REG_SET64(dentry, AMD_IOMMU_DEVTBL_V, 1);
418 AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SE, 1);
419 }
420
421 addr = (caddr_t)(uintptr_t)iommu->aiomt_buf_dma_cookie.dmac_cookie_addr;
422 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
423 AMD_IOMMU_DEVTABBASE, ((uint64_t)(uintptr_t)addr) >> 12);
424 sz = (iommu->aiomt_devtbl_sz >> 12) - 1;
425 ASSERT(sz <= ((1 << 9) - 1));
426 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
427 AMD_IOMMU_DEVTABSIZE, sz);
428
429 /*
430 * Setup the command buffer pointers
431 */
432 iommu->aiomt_cmdbuf = iommu->aiomt_devtbl +
433 iommu->aiomt_devtbl_sz;
434 bzero(iommu->aiomt_cmdbuf, iommu->aiomt_cmdbuf_sz);
435 addr += iommu->aiomt_devtbl_sz;
436 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
437 AMD_IOMMU_COMBASE, ((uint64_t)(uintptr_t)addr) >> 12);
438
439 p2sz = AMD_IOMMU_CMDBUF_SZ;
440 ASSERT(p2sz >= AMD_IOMMU_CMDBUF_MINSZ &&
441 p2sz <= AMD_IOMMU_CMDBUF_MAXSZ);
442 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
443 AMD_IOMMU_COMLEN, p2sz);
444 /*LINTED*/
445 iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
446 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
447 AMD_IOMMU_CMDHEADPTR, 0);
448 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
449 AMD_IOMMU_CMDTAILPTR, 0);
450
451 /*
452 * Setup the event log pointers
453 */
454 iommu->aiomt_eventlog = iommu->aiomt_cmdbuf +
455 iommu->aiomt_eventlog_sz;
456 bzero(iommu->aiomt_eventlog, iommu->aiomt_eventlog_sz);
457 addr += iommu->aiomt_cmdbuf_sz;
458 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
459 AMD_IOMMU_EVENTBASE, ((uint64_t)(uintptr_t)addr) >> 12);
460 p2sz = AMD_IOMMU_EVENTLOG_SZ;
461 ASSERT(p2sz >= AMD_IOMMU_EVENTLOG_MINSZ &&
462 p2sz <= AMD_IOMMU_EVENTLOG_MAXSZ);
463 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
464 AMD_IOMMU_EVENTLEN, sz);
465 /*LINTED*/
466 iommu->aiomt_event_head = (uint32_t *)iommu->aiomt_eventlog;
467 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
468 AMD_IOMMU_EVENTHEADPTR, 0);
469 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
470 AMD_IOMMU_EVENTTAILPTR, 0);
471
472 /* dma sync so device sees this init */
473 SYNC_FORDEV(iommu->aiomt_dmahdl);
474
475 if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
476 cmn_err(CE_NOTE, "%s: %s%d: successfully setup AMD IOMMU "
477 "tables, idx=%d", f, driver, instance, iommu->aiomt_idx);
478 }
479
480 return (DDI_SUCCESS);
481 }
482
483 static void
amd_iommu_teardown_tables_and_buffers(amd_iommu_t * iommu,int type)484 amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu, int type)
485 {
486 dev_info_t *dip = iommu->aiomt_dip;
487 int instance = ddi_get_instance(dip);
488 const char *driver = ddi_driver_name(dip);
489 const char *f = "amd_iommu_teardown_tables_and_buffers";
490
491 iommu->aiomt_eventlog = NULL;
492 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
493 AMD_IOMMU_EVENTBASE, 0);
494 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
495 AMD_IOMMU_EVENTLEN, 0);
496 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
497 AMD_IOMMU_EVENTHEADPTR, 0);
498 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
499 AMD_IOMMU_EVENTTAILPTR, 0);
500
501
502 iommu->aiomt_cmdbuf = NULL;
503 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
504 AMD_IOMMU_COMBASE, 0);
505 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
506 AMD_IOMMU_COMLEN, 0);
507 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
508 AMD_IOMMU_CMDHEADPTR, 0);
509 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
510 AMD_IOMMU_CMDTAILPTR, 0);
511
512
513 iommu->aiomt_devtbl = NULL;
514 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
515 AMD_IOMMU_DEVTABBASE, 0);
516 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
517 AMD_IOMMU_DEVTABSIZE, 0);
518
519 if (iommu->aiomt_dmahdl == NULL || type == AMD_IOMMU_QUIESCE)
520 return;
521
522 /* Unbind the handle */
523 if (ddi_dma_unbind_handle(iommu->aiomt_dmahdl) != DDI_SUCCESS) {
524 cmn_err(CE_WARN, "%s: %s%d: failed to unbind handle: "
525 "%p for IOMMU idx=%d", f, driver, instance,
526 (void *)iommu->aiomt_dmahdl, iommu->aiomt_idx);
527 }
528 iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
529 iommu->aiomt_buf_dma_cookie.dmac_size = 0;
530 iommu->aiomt_buf_dma_cookie.dmac_type = 0;
531 iommu->aiomt_buf_dma_ncookie = 0;
532
533 /* Free the table memory allocated for DMA */
534 ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
535 iommu->aiomt_dma_mem_hdl = NULL;
536 iommu->aiomt_dma_bufva = NULL;
537 iommu->aiomt_dma_mem_realsz = 0;
538
539 /* Free the DMA handle */
540 ddi_dma_free_handle(&iommu->aiomt_dmahdl);
541 iommu->aiomt_dmahdl = NULL;
542 }
543
544 static void
amd_iommu_enable_interrupts(amd_iommu_t * iommu)545 amd_iommu_enable_interrupts(amd_iommu_t *iommu)
546 {
547 ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
548 AMD_IOMMU_CMDBUF_RUN) == 0);
549 ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
550 AMD_IOMMU_EVENT_LOG_RUN) == 0);
551
552 /* Must be set prior to enabling command buffer */
553 /* Must be set prior to enabling event logging */
554 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
555 AMD_IOMMU_CMDBUF_ENABLE, 1);
556 /* No interrupts for completion wait - too heavy weight. use polling */
557 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
558 AMD_IOMMU_COMWAITINT_ENABLE, 0);
559 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
560 AMD_IOMMU_EVENTLOG_ENABLE, 1);
561 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
562 AMD_IOMMU_EVENTINT_ENABLE, 1);
563 }
564
565 static int
amd_iommu_setup_exclusion(amd_iommu_t * iommu)566 amd_iommu_setup_exclusion(amd_iommu_t *iommu)
567 {
568 amd_iommu_acpi_ivmd_t *minfop;
569
570 minfop = amd_iommu_lookup_all_ivmd();
571
572 if (minfop && minfop->acm_ExclRange == 1) {
573 cmn_err(CE_NOTE, "Programming exclusion range");
574 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
575 AMD_IOMMU_EXCL_BASE_ADDR,
576 minfop->acm_ivmd_phys_start >> 12);
577 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
578 AMD_IOMMU_EXCL_BASE_ALLOW, 1);
579 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
580 AMD_IOMMU_EXCL_BASE_EXEN, 1);
581 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
582 AMD_IOMMU_EXCL_LIM, (minfop->acm_ivmd_phys_start +
583 minfop->acm_ivmd_phys_len) >> 12);
584 } else {
585 if (amd_iommu_debug) {
586 cmn_err(CE_NOTE, "Skipping exclusion range");
587 }
588 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
589 AMD_IOMMU_EXCL_BASE_ADDR, 0);
590 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
591 AMD_IOMMU_EXCL_BASE_ALLOW, 1);
592 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
593 AMD_IOMMU_EXCL_BASE_EXEN, 0);
594 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
595 AMD_IOMMU_EXCL_LIM, 0);
596 }
597
598 return (DDI_SUCCESS);
599 }
600
601 static void
amd_iommu_teardown_exclusion(amd_iommu_t * iommu)602 amd_iommu_teardown_exclusion(amd_iommu_t *iommu)
603 {
604 (void) amd_iommu_setup_exclusion(iommu);
605 }
606
607 static uint_t
amd_iommu_intr_handler(caddr_t arg1,caddr_t arg2)608 amd_iommu_intr_handler(caddr_t arg1, caddr_t arg2)
609 {
610 /*LINTED*/
611 amd_iommu_t *iommu = (amd_iommu_t *)arg1;
612 dev_info_t *dip = iommu->aiomt_dip;
613 int instance = ddi_get_instance(dip);
614 const char *driver = ddi_driver_name(dip);
615 const char *f = "amd_iommu_intr_handler";
616
617 ASSERT(arg1);
618 ASSERT(arg2 == NULL);
619
620 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
621 cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d. In INTR handler",
622 f, driver, instance, iommu->aiomt_idx);
623 }
624
625 if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
626 AMD_IOMMU_EVENT_LOG_INT) == 1) {
627 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
628 cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d "
629 "Event Log Interrupt", f, driver, instance,
630 iommu->aiomt_idx);
631 }
632 (void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISPLAY);
633 WAIT_SEC(1);
634 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
635 AMD_IOMMU_EVENT_LOG_INT, 1);
636 return (DDI_INTR_CLAIMED);
637 }
638
639 if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
640 AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
641 cmn_err(CE_NOTE, "!%s: %s%d: IOMMU unit idx=%d "
642 "Event Overflow Interrupt", f, driver, instance,
643 iommu->aiomt_idx);
644 (void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISCARD);
645 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
646 AMD_IOMMU_EVENT_LOG_INT, 1);
647 AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
648 AMD_IOMMU_EVENT_OVERFLOW_INT, 1);
649 return (DDI_INTR_CLAIMED);
650 }
651
652 return (DDI_INTR_UNCLAIMED);
653 }
654
655
656 static int
amd_iommu_setup_interrupts(amd_iommu_t * iommu)657 amd_iommu_setup_interrupts(amd_iommu_t *iommu)
658 {
659 dev_info_t *dip = iommu->aiomt_dip;
660 int instance = ddi_get_instance(dip);
661 const char *driver = ddi_driver_name(dip);
662 int intrcap0;
663 int intrcapN;
664 int type;
665 int err;
666 int req;
667 int avail;
668 int p2req;
669 int actual;
670 int i;
671 int j;
672 const char *f = "amd_iommu_setup_interrupts";
673
674 if (ddi_intr_get_supported_types(dip, &type) != DDI_SUCCESS) {
675 cmn_err(CE_WARN, "%s: %s%d: ddi_intr_get_supported_types "
676 "failed: idx=%d", f, driver, instance, iommu->aiomt_idx);
677 return (DDI_FAILURE);
678 }
679
680 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
681 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
682 "Interrupt types supported = 0x%x", f, driver, instance,
683 iommu->aiomt_idx, type);
684 }
685
686 /*
687 * for now we only support MSI
688 */
689 if ((type & DDI_INTR_TYPE_MSI) == 0) {
690 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
691 "MSI interrupts not supported. Failing init.",
692 f, driver, instance, iommu->aiomt_idx);
693 return (DDI_FAILURE);
694 }
695
696 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
697 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. MSI supported",
698 f, driver, instance, iommu->aiomt_idx);
699 }
700
701 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSI, &req);
702 if (err != DDI_SUCCESS) {
703 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
704 "ddi_intr_get_nintrs failed err = %d",
705 f, driver, instance, iommu->aiomt_idx, err);
706 return (DDI_FAILURE);
707 }
708
709 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
710 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
711 "MSI number of interrupts requested: %d",
712 f, driver, instance, iommu->aiomt_idx, req);
713 }
714
715 if (req == 0) {
716 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
717 "interrupts requested. Failing init", f,
718 driver, instance, iommu->aiomt_idx);
719 return (DDI_FAILURE);
720 }
721
722 err = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSI, &avail);
723 if (err != DDI_SUCCESS) {
724 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d "
725 "ddi_intr_get_navail failed err = %d", f,
726 driver, instance, iommu->aiomt_idx, err);
727 return (DDI_FAILURE);
728 }
729
730 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
731 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
732 "MSI number of interrupts available: %d",
733 f, driver, instance, iommu->aiomt_idx, avail);
734 }
735
736 if (avail == 0) {
737 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
738 "interrupts available. Failing init", f,
739 driver, instance, iommu->aiomt_idx);
740 return (DDI_FAILURE);
741 }
742
743 if (avail < req) {
744 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: MSI "
745 "interrupts: requested (%d) > available (%d). "
746 "Failing init", f, driver, instance, iommu->aiomt_idx,
747 req, avail);
748 return (DDI_FAILURE);
749 }
750
751 /* Allocate memory for DDI interrupt handles */
752 iommu->aiomt_intr_htable_sz = req * sizeof (ddi_intr_handle_t);
753 iommu->aiomt_intr_htable = kmem_zalloc(iommu->aiomt_intr_htable_sz,
754 KM_SLEEP);
755
756 iommu->aiomt_intr_state = AMD_IOMMU_INTR_TABLE;
757
758 /* Convert req to a power of two as required by ddi_intr_alloc */
759 p2req = 0;
760 while (1<<p2req <= req)
761 p2req++;
762 p2req--;
763 req = 1<<p2req;
764
765 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
766 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
767 "MSI power of 2 number of interrupts: %d,%d",
768 f, driver, instance, iommu->aiomt_idx, p2req, req);
769 }
770
771 err = ddi_intr_alloc(iommu->aiomt_dip, iommu->aiomt_intr_htable,
772 DDI_INTR_TYPE_MSI, 0, req, &actual, DDI_INTR_ALLOC_STRICT);
773 if (err != DDI_SUCCESS) {
774 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
775 "ddi_intr_alloc failed: err = %d",
776 f, driver, instance, iommu->aiomt_idx, err);
777 amd_iommu_teardown_interrupts(iommu);
778 return (DDI_FAILURE);
779 }
780
781 iommu->aiomt_actual_intrs = actual;
782 iommu->aiomt_intr_state = AMD_IOMMU_INTR_ALLOCED;
783
784 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
785 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
786 "number of interrupts actually allocated %d",
787 f, driver, instance, iommu->aiomt_idx, actual);
788 }
789
790 if (iommu->aiomt_actual_intrs < req) {
791 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
792 "ddi_intr_alloc failed: actual (%d) < req (%d)",
793 f, driver, instance, iommu->aiomt_idx,
794 iommu->aiomt_actual_intrs, req);
795 amd_iommu_teardown_interrupts(iommu);
796 return (DDI_FAILURE);
797 }
798
799 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
800 if (ddi_intr_add_handler(iommu->aiomt_intr_htable[i],
801 amd_iommu_intr_handler, (void *)iommu, NULL)
802 != DDI_SUCCESS) {
803 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
804 "ddi_intr_add_handler failed: intr = %d, err = %d",
805 f, driver, instance, iommu->aiomt_idx, i, err);
806 for (j = 0; j < i; j++) {
807 (void) ddi_intr_remove_handler(
808 iommu->aiomt_intr_htable[j]);
809 }
810 amd_iommu_teardown_interrupts(iommu);
811 return (DDI_FAILURE);
812 }
813 }
814 iommu->aiomt_intr_state = AMD_IOMMU_INTR_HANDLER;
815
816 intrcap0 = intrcapN = -1;
817 if (ddi_intr_get_cap(iommu->aiomt_intr_htable[0], &intrcap0)
818 != DDI_SUCCESS ||
819 ddi_intr_get_cap(
820 iommu->aiomt_intr_htable[iommu->aiomt_actual_intrs - 1], &intrcapN)
821 != DDI_SUCCESS || intrcap0 != intrcapN) {
822 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
823 "ddi_intr_get_cap failed or inconsistent cap among "
824 "interrupts: intrcap0 (%d) < intrcapN (%d)",
825 f, driver, instance, iommu->aiomt_idx, intrcap0, intrcapN);
826 amd_iommu_teardown_interrupts(iommu);
827 return (DDI_FAILURE);
828 }
829 iommu->aiomt_intr_cap = intrcap0;
830
831 if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
832 /* Need to call block enable */
833 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
834 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
835 "Need to call block enable",
836 f, driver, instance, iommu->aiomt_idx);
837 }
838 if (ddi_intr_block_enable(iommu->aiomt_intr_htable,
839 iommu->aiomt_actual_intrs) != DDI_SUCCESS) {
840 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
841 "ddi_intr_block enable failed ", f, driver,
842 instance, iommu->aiomt_idx);
843 (void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
844 iommu->aiomt_actual_intrs);
845 amd_iommu_teardown_interrupts(iommu);
846 return (DDI_FAILURE);
847 }
848 } else {
849 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
850 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
851 "Need to call individual enable",
852 f, driver, instance, iommu->aiomt_idx);
853 }
854 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
855 if (ddi_intr_enable(iommu->aiomt_intr_htable[i])
856 != DDI_SUCCESS) {
857 cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
858 "ddi_intr_enable failed: intr = %d", f,
859 driver, instance, iommu->aiomt_idx, i);
860 for (j = 0; j < i; j++) {
861 (void) ddi_intr_disable(
862 iommu->aiomt_intr_htable[j]);
863 }
864 amd_iommu_teardown_interrupts(iommu);
865 return (DDI_FAILURE);
866 }
867 }
868 }
869 iommu->aiomt_intr_state = AMD_IOMMU_INTR_ENABLED;
870
871 if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
872 cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
873 "Interrupts successfully %s enabled. # of interrupts = %d",
874 f, driver, instance, iommu->aiomt_idx,
875 (intrcap0 & DDI_INTR_FLAG_BLOCK) ? "(block)" :
876 "(individually)", iommu->aiomt_actual_intrs);
877 }
878
879 return (DDI_SUCCESS);
880 }
881
882 static void
amd_iommu_teardown_interrupts(amd_iommu_t * iommu)883 amd_iommu_teardown_interrupts(amd_iommu_t *iommu)
884 {
885 int i;
886
887 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ENABLED) {
888 if (iommu->aiomt_intr_cap & DDI_INTR_FLAG_BLOCK) {
889 (void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
890 iommu->aiomt_actual_intrs);
891 } else {
892 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
893 (void) ddi_intr_disable(
894 iommu->aiomt_intr_htable[i]);
895 }
896 }
897 }
898
899 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_HANDLER) {
900 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
901 (void) ddi_intr_remove_handler(
902 iommu->aiomt_intr_htable[i]);
903 }
904 }
905
906 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ALLOCED) {
907 for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
908 (void) ddi_intr_free(iommu->aiomt_intr_htable[i]);
909 }
910 }
911 if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_TABLE) {
912 kmem_free(iommu->aiomt_intr_htable,
913 iommu->aiomt_intr_htable_sz);
914 }
915 iommu->aiomt_intr_htable = NULL;
916 iommu->aiomt_intr_htable_sz = 0;
917 iommu->aiomt_intr_state = AMD_IOMMU_INTR_INVALID;
918 }
919
920 static amd_iommu_t *
amd_iommu_init(dev_info_t * dip,ddi_acc_handle_t handle,int idx,uint16_t cap_base)921 amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
922 uint16_t cap_base)
923 {
924 amd_iommu_t *iommu;
925 int instance = ddi_get_instance(dip);
926 const char *driver = ddi_driver_name(dip);
927 uint32_t caphdr;
928 uint32_t low_addr32;
929 uint32_t hi_addr32;
930 uint32_t range;
931 uint32_t misc;
932 uint64_t pgoffset;
933 amd_iommu_acpi_global_t *global;
934 amd_iommu_acpi_ivhd_t *hinfop;
935 int bus, device, func;
936 const char *f = "amd_iommu_init";
937
938 low_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
939 AMD_IOMMU_CAP_ADDR_LOW_OFF);
940 if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
941 cmn_err(CE_WARN, "%s: %s%d: capability registers not locked. "
942 "Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
943 instance, idx);
944 return (NULL);
945 }
946
947 iommu = kmem_zalloc(sizeof (amd_iommu_t), KM_SLEEP);
948 mutex_init(&iommu->aiomt_mutex, NULL, MUTEX_DRIVER, NULL);
949 mutex_enter(&iommu->aiomt_mutex);
950
951 mutex_init(&iommu->aiomt_cmdlock, NULL, MUTEX_DRIVER, NULL);
952 mutex_init(&iommu->aiomt_eventlock, NULL, MUTEX_DRIVER, NULL);
953
954 iommu->aiomt_dip = dip;
955 iommu->aiomt_idx = idx;
956
957 if (acpica_get_bdf(iommu->aiomt_dip, &bus, &device, &func)
958 != DDI_SUCCESS) {
959 cmn_err(CE_WARN, "%s: %s%d: Failed to get BDF"
960 "Unable to use IOMMU unit idx=%d - skipping ...",
961 f, driver, instance, idx);
962 return (NULL);
963 }
964
965 iommu->aiomt_bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) |
966 (uint8_t)func;
967
968 /*
969 * Since everything in the capability block is locked and RO at this
970 * point, copy everything into the IOMMU struct
971 */
972
973 /* Get cap header */
974 caphdr = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_HDR_OFF);
975 iommu->aiomt_cap_hdr = caphdr;
976 iommu->aiomt_npcache = AMD_IOMMU_REG_GET32(&caphdr,
977 AMD_IOMMU_CAP_NPCACHE);
978 iommu->aiomt_httun = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_HTTUN);
979
980 global = amd_iommu_lookup_acpi_global();
981 hinfop = amd_iommu_lookup_any_ivhd(iommu);
982
983 if (hinfop)
984 iommu->aiomt_iotlb = hinfop->ach_IotlbSup;
985 else
986 iommu->aiomt_iotlb =
987 AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_IOTLB);
988
989 iommu->aiomt_captype = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
990 iommu->aiomt_capid = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
991
992 /*
993 * Get address of IOMMU control registers
994 */
995 hi_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
996 AMD_IOMMU_CAP_ADDR_HI_OFF);
997 iommu->aiomt_low_addr32 = low_addr32;
998 iommu->aiomt_hi_addr32 = hi_addr32;
999 low_addr32 &= ~AMD_IOMMU_REG_ADDR_LOCKED;
1000
1001 if (hinfop) {
1002 iommu->aiomt_reg_pa = hinfop->ach_IOMMU_reg_base;
1003 ASSERT(hinfop->ach_IOMMU_pci_seg == 0);
1004 } else {
1005 iommu->aiomt_reg_pa = ((uint64_t)hi_addr32 << 32 | low_addr32);
1006 }
1007
1008 /*
1009 * Get cap range reg
1010 */
1011 range = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_RANGE_OFF);
1012 iommu->aiomt_range = range;
1013 iommu->aiomt_rng_valid = AMD_IOMMU_REG_GET32(&range,
1014 AMD_IOMMU_RNG_VALID);
1015 if (iommu->aiomt_rng_valid) {
1016 iommu->aiomt_rng_bus = AMD_IOMMU_REG_GET32(&range,
1017 AMD_IOMMU_RNG_BUS);
1018 iommu->aiomt_first_devfn = AMD_IOMMU_REG_GET32(&range,
1019 AMD_IOMMU_FIRST_DEVFN);
1020 iommu->aiomt_last_devfn = AMD_IOMMU_REG_GET32(&range,
1021 AMD_IOMMU_LAST_DEVFN);
1022 } else {
1023 iommu->aiomt_rng_bus = 0;
1024 iommu->aiomt_first_devfn = 0;
1025 iommu->aiomt_last_devfn = 0;
1026 }
1027
1028 if (hinfop)
1029 iommu->aiomt_ht_unitid = hinfop->ach_IOMMU_UnitID;
1030 else
1031 iommu->aiomt_ht_unitid = AMD_IOMMU_REG_GET32(&range,
1032 AMD_IOMMU_HT_UNITID);
1033
1034 /*
1035 * Get cap misc reg
1036 */
1037 misc = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_MISC_OFF);
1038 iommu->aiomt_misc = misc;
1039
1040 if (global) {
1041 iommu->aiomt_htatsresv = global->acg_HtAtsResv;
1042 iommu->aiomt_vasize = global->acg_VAsize;
1043 iommu->aiomt_pasize = global->acg_PAsize;
1044 } else {
1045 iommu->aiomt_htatsresv = AMD_IOMMU_REG_GET32(&misc,
1046 AMD_IOMMU_HT_ATSRSV);
1047 iommu->aiomt_vasize = AMD_IOMMU_REG_GET32(&misc,
1048 AMD_IOMMU_VA_SIZE);
1049 iommu->aiomt_pasize = AMD_IOMMU_REG_GET32(&misc,
1050 AMD_IOMMU_PA_SIZE);
1051 }
1052
1053 if (hinfop) {
1054 iommu->aiomt_msinum = hinfop->ach_IOMMU_MSInum;
1055 } else {
1056 iommu->aiomt_msinum =
1057 AMD_IOMMU_REG_GET32(&misc, AMD_IOMMU_MSINUM);
1058 }
1059
1060 /*
1061 * Set up mapping between control registers PA and VA
1062 */
1063 pgoffset = iommu->aiomt_reg_pa & MMU_PAGEOFFSET;
1064 ASSERT(pgoffset == 0);
1065 iommu->aiomt_reg_pages = mmu_btopr(AMD_IOMMU_REG_SIZE + pgoffset);
1066 iommu->aiomt_reg_size = mmu_ptob(iommu->aiomt_reg_pages);
1067
1068 iommu->aiomt_va = (uintptr_t)device_arena_alloc(
1069 ptob(iommu->aiomt_reg_pages), VM_SLEEP);
1070 if (iommu->aiomt_va == 0) {
1071 cmn_err(CE_WARN, "%s: %s%d: Failed to alloc VA for IOMMU "
1072 "control regs. Skipping IOMMU idx=%d", f, driver,
1073 instance, idx);
1074 mutex_exit(&iommu->aiomt_mutex);
1075 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1076 return (NULL);
1077 }
1078
1079 hat_devload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1080 iommu->aiomt_reg_size,
1081 mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
1082 | HAT_STRICTORDER, HAT_LOAD_LOCK);
1083
1084 iommu->aiomt_reg_va = iommu->aiomt_va + pgoffset;
1085
1086 /*
1087 * Setup the various control register's VA
1088 */
1089 iommu->aiomt_reg_devtbl_va = iommu->aiomt_reg_va +
1090 AMD_IOMMU_DEVTBL_REG_OFF;
1091 iommu->aiomt_reg_cmdbuf_va = iommu->aiomt_reg_va +
1092 AMD_IOMMU_CMDBUF_REG_OFF;
1093 iommu->aiomt_reg_eventlog_va = iommu->aiomt_reg_va +
1094 AMD_IOMMU_EVENTLOG_REG_OFF;
1095 iommu->aiomt_reg_ctrl_va = iommu->aiomt_reg_va +
1096 AMD_IOMMU_CTRL_REG_OFF;
1097 iommu->aiomt_reg_excl_base_va = iommu->aiomt_reg_va +
1098 AMD_IOMMU_EXCL_BASE_REG_OFF;
1099 iommu->aiomt_reg_excl_lim_va = iommu->aiomt_reg_va +
1100 AMD_IOMMU_EXCL_LIM_REG_OFF;
1101 iommu->aiomt_reg_cmdbuf_head_va = iommu->aiomt_reg_va +
1102 AMD_IOMMU_CMDBUF_HEAD_REG_OFF;
1103 iommu->aiomt_reg_cmdbuf_tail_va = iommu->aiomt_reg_va +
1104 AMD_IOMMU_CMDBUF_TAIL_REG_OFF;
1105 iommu->aiomt_reg_eventlog_head_va = iommu->aiomt_reg_va +
1106 AMD_IOMMU_EVENTLOG_HEAD_REG_OFF;
1107 iommu->aiomt_reg_eventlog_tail_va = iommu->aiomt_reg_va +
1108 AMD_IOMMU_EVENTLOG_TAIL_REG_OFF;
1109 iommu->aiomt_reg_status_va = iommu->aiomt_reg_va +
1110 AMD_IOMMU_STATUS_REG_OFF;
1111
1112
1113 /*
1114 * Setup the DEVICE table, CMD buffer, and LOG buffer in
1115 * memory and setup DMA access to this memory location
1116 */
1117 if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
1118 mutex_exit(&iommu->aiomt_mutex);
1119 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1120 return (NULL);
1121 }
1122
1123 if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
1124 mutex_exit(&iommu->aiomt_mutex);
1125 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1126 return (NULL);
1127 }
1128
1129 amd_iommu_enable_interrupts(iommu);
1130
1131 if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
1132 mutex_exit(&iommu->aiomt_mutex);
1133 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1134 return (NULL);
1135 }
1136
1137 /*
1138 * need to setup domain table before gfx bypass
1139 */
1140 amd_iommu_init_page_tables(iommu);
1141
1142 /*
1143 * Set pass-thru for special devices like IOAPIC and HPET
1144 *
1145 * Also, gfx devices don't use DDI for DMA. No need to register
1146 * before setting up gfx passthru
1147 */
1148 if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
1149 mutex_exit(&iommu->aiomt_mutex);
1150 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1151 return (NULL);
1152 }
1153
1154 /* Initialize device table entries based on ACPI settings */
1155 if (amd_iommu_acpi_init_devtbl(iommu) != DDI_SUCCESS) {
1156 cmn_err(CE_WARN, "%s: %s%d: Can't initialize device table",
1157 f, driver, instance);
1158 mutex_exit(&iommu->aiomt_mutex);
1159 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1160 return (NULL);
1161 }
1162
1163 if (amd_iommu_start(iommu) != DDI_SUCCESS) {
1164 mutex_exit(&iommu->aiomt_mutex);
1165 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1166 return (NULL);
1167 }
1168
1169 /* xxx register/start race */
1170 if (amd_iommu_register(iommu) != DDI_SUCCESS) {
1171 mutex_exit(&iommu->aiomt_mutex);
1172 (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
1173 return (NULL);
1174 }
1175
1176 if (amd_iommu_debug) {
1177 cmn_err(CE_NOTE, "%s: %s%d: IOMMU idx=%d inited.", f, driver,
1178 instance, idx);
1179 }
1180
1181 return (iommu);
1182 }
1183
1184 static int
amd_iommu_fini(amd_iommu_t * iommu,int type)1185 amd_iommu_fini(amd_iommu_t *iommu, int type)
1186 {
1187 int idx = iommu->aiomt_idx;
1188 dev_info_t *dip = iommu->aiomt_dip;
1189 int instance = ddi_get_instance(dip);
1190 const char *driver = ddi_driver_name(dip);
1191 const char *f = "amd_iommu_fini";
1192
1193 if (type == AMD_IOMMU_TEARDOWN) {
1194 mutex_enter(&iommu->aiomt_mutex);
1195 if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
1196 cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
1197 "idx = %d", f, driver, instance, idx);
1198 return (DDI_FAILURE);
1199 }
1200 }
1201
1202 amd_iommu_stop(iommu);
1203
1204 if (type == AMD_IOMMU_TEARDOWN) {
1205 amd_iommu_fini_page_tables(iommu);
1206 amd_iommu_teardown_interrupts(iommu);
1207 amd_iommu_teardown_exclusion(iommu);
1208 }
1209
1210 amd_iommu_teardown_tables_and_buffers(iommu, type);
1211
1212 if (type == AMD_IOMMU_QUIESCE)
1213 return (DDI_SUCCESS);
1214
1215 if (iommu->aiomt_va != 0) {
1216 hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
1217 iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
1218 device_arena_free((void *)(uintptr_t)iommu->aiomt_va,
1219 ptob(iommu->aiomt_reg_pages));
1220 iommu->aiomt_va = 0;
1221 iommu->aiomt_reg_va = 0;
1222 }
1223 mutex_destroy(&iommu->aiomt_eventlock);
1224 mutex_destroy(&iommu->aiomt_cmdlock);
1225 mutex_exit(&iommu->aiomt_mutex);
1226 mutex_destroy(&iommu->aiomt_mutex);
1227 kmem_free(iommu, sizeof (amd_iommu_t));
1228
1229 cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit complete. idx = %d",
1230 f, driver, instance, idx);
1231
1232 return (DDI_SUCCESS);
1233 }
1234
1235 int
amd_iommu_setup(dev_info_t * dip,amd_iommu_state_t * statep)1236 amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
1237 {
1238 int instance = ddi_get_instance(dip);
1239 const char *driver = ddi_driver_name(dip);
1240 ddi_acc_handle_t handle;
1241 uint8_t base_class;
1242 uint8_t sub_class;
1243 uint8_t prog_class;
1244 int idx;
1245 uint32_t id;
1246 uint16_t cap_base;
1247 uint32_t caphdr;
1248 uint8_t cap_type;
1249 uint8_t cap_id;
1250 amd_iommu_t *iommu;
1251 const char *f = "amd_iommu_setup";
1252
1253 ASSERT(instance >= 0);
1254 ASSERT(driver);
1255
1256 /* First setup PCI access to config space */
1257
1258 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
1259 cmn_err(CE_WARN, "%s: PCI config setup failed: %s%d",
1260 f, driver, instance);
1261 return (DDI_FAILURE);
1262 }
1263
1264 /*
1265 * The AMD IOMMU is part of an independent PCI function. There may be
1266 * more than one IOMMU in that PCI function
1267 */
1268 base_class = pci_config_get8(handle, PCI_CONF_BASCLASS);
1269 sub_class = pci_config_get8(handle, PCI_CONF_SUBCLASS);
1270 prog_class = pci_config_get8(handle, PCI_CONF_PROGCLASS);
1271
1272 if (base_class != PCI_CLASS_PERIPH || sub_class != PCI_PERIPH_IOMMU ||
1273 prog_class != AMD_IOMMU_PCI_PROG_IF) {
1274 cmn_err(CE_WARN, "%s: %s%d: invalid PCI class(0x%x)/"
1275 "subclass(0x%x)/programming interface(0x%x)", f, driver,
1276 instance, base_class, sub_class, prog_class);
1277 pci_config_teardown(&handle);
1278 return (DDI_FAILURE);
1279 }
1280
1281 /*
1282 * Find and initialize all IOMMU units in this function
1283 */
1284 for (idx = 0; ; idx++) {
1285 if (pci_cap_probe(handle, idx, &id, &cap_base) != DDI_SUCCESS)
1286 break;
1287
1288 /* check if cap ID is secure device cap id */
1289 if (id != PCI_CAP_ID_SECURE_DEV) {
1290 if (amd_iommu_debug) {
1291 cmn_err(CE_NOTE,
1292 "%s: %s%d: skipping IOMMU: idx(0x%x) "
1293 "cap ID (0x%x) != secure dev capid (0x%x)",
1294 f, driver, instance, idx, id,
1295 PCI_CAP_ID_SECURE_DEV);
1296 }
1297 continue;
1298 }
1299
1300 /* check if cap type is IOMMU cap type */
1301 caphdr = PCI_CAP_GET32(handle, 0, cap_base,
1302 AMD_IOMMU_CAP_HDR_OFF);
1303 cap_type = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
1304 cap_id = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
1305
1306 if (cap_type != AMD_IOMMU_CAP) {
1307 cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1308 "cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
1309 driver, instance, idx, cap_type, AMD_IOMMU_CAP);
1310 continue;
1311 }
1312 ASSERT(cap_id == PCI_CAP_ID_SECURE_DEV);
1313 ASSERT(cap_id == id);
1314
1315 iommu = amd_iommu_init(dip, handle, idx, cap_base);
1316 if (iommu == NULL) {
1317 cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
1318 "failed to init IOMMU", f,
1319 driver, instance, idx);
1320 continue;
1321 }
1322
1323 if (statep->aioms_iommu_start == NULL) {
1324 statep->aioms_iommu_start = iommu;
1325 } else {
1326 statep->aioms_iommu_end->aiomt_next = iommu;
1327 }
1328 statep->aioms_iommu_end = iommu;
1329
1330 statep->aioms_nunits++;
1331 }
1332
1333 pci_config_teardown(&handle);
1334
1335 if (amd_iommu_debug) {
1336 cmn_err(CE_NOTE, "%s: %s%d: state=%p: setup %d IOMMU units",
1337 f, driver, instance, (void *)statep, statep->aioms_nunits);
1338 }
1339
1340 return (DDI_SUCCESS);
1341 }
1342
1343 int
amd_iommu_teardown(dev_info_t * dip,amd_iommu_state_t * statep,int type)1344 amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep, int type)
1345 {
1346 int instance = ddi_get_instance(dip);
1347 const char *driver = ddi_driver_name(dip);
1348 amd_iommu_t *iommu, *next_iommu;
1349 int teardown;
1350 int error = DDI_SUCCESS;
1351 const char *f = "amd_iommu_teardown";
1352
1353 teardown = 0;
1354 for (iommu = statep->aioms_iommu_start; iommu;
1355 iommu = next_iommu) {
1356 ASSERT(statep->aioms_nunits > 0);
1357 next_iommu = iommu->aiomt_next;
1358 if (amd_iommu_fini(iommu, type) != DDI_SUCCESS) {
1359 error = DDI_FAILURE;
1360 continue;
1361 }
1362 statep->aioms_nunits--;
1363 teardown++;
1364 }
1365
1366 cmn_err(CE_NOTE, "%s: %s%d: state=%p: toredown %d units. "
1367 "%d units left", f, driver, instance, (void *)statep,
1368 teardown, statep->aioms_nunits);
1369
1370 return (error);
1371 }
1372
1373 dev_info_t *
amd_iommu_pci_dip(dev_info_t * rdip,const char * path)1374 amd_iommu_pci_dip(dev_info_t *rdip, const char *path)
1375 {
1376 dev_info_t *pdip;
1377 const char *driver = ddi_driver_name(rdip);
1378 int instance = ddi_get_instance(rdip);
1379 const char *f = "amd_iommu_pci_dip";
1380
1381 /* Hold rdip so it and its parents don't go away */
1382 ndi_hold_devi(rdip);
1383
1384 if (ddi_is_pci_dip(rdip))
1385 return (rdip);
1386
1387 pdip = rdip;
1388 while (pdip = ddi_get_parent(pdip)) {
1389 if (ddi_is_pci_dip(pdip)) {
1390 ndi_hold_devi(pdip);
1391 ndi_rele_devi(rdip);
1392 return (pdip);
1393 }
1394 }
1395
1396 #ifdef DEBUG
1397 cmn_err(CE_PANIC, "%s: %s%d dip = %p has no PCI parent, path = %s",
1398 f, driver, instance, (void *)rdip, path);
1399 #else
1400 cmn_err(CE_WARN, "%s: %s%d dip = %p has no PCI parent, path = %s",
1401 f, driver, instance, (void *)rdip, path);
1402 ndi_rele_devi(rdip);
1403 #endif /* DEBUG */
1404
1405 return (NULL);
1406 }
1407
1408 /* Interface with IOMMULIB */
1409 /*ARGSUSED*/
1410 static int
amd_iommu_probe(iommulib_handle_t handle,dev_info_t * rdip)1411 amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip)
1412 {
1413 const char *driver = ddi_driver_name(rdip);
1414 char *s;
1415 int bus, device, func, bdf;
1416 amd_iommu_acpi_ivhd_t *hinfop;
1417 dev_info_t *pci_dip;
1418 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1419 const char *f = "amd_iommu_probe";
1420 int instance = ddi_get_instance(iommu->aiomt_dip);
1421 const char *idriver = ddi_driver_name(iommu->aiomt_dip);
1422 char *path, *pathp;
1423
1424 if (amd_iommu_disable_list) {
1425 s = strstr(amd_iommu_disable_list, driver);
1426 if (s == NULL)
1427 return (DDI_SUCCESS);
1428 if (s == amd_iommu_disable_list || *(s - 1) == ':') {
1429 s += strlen(driver);
1430 if (*s == '\0' || *s == ':') {
1431 amd_iommu_set_passthru(iommu, rdip);
1432 return (DDI_FAILURE);
1433 }
1434 }
1435 }
1436
1437 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1438 if ((pathp = ddi_pathname(rdip, path)) == NULL)
1439 pathp = "<unknown>";
1440
1441 pci_dip = amd_iommu_pci_dip(rdip, path);
1442 if (pci_dip == NULL) {
1443 cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get PCI dip "
1444 "for rdip=%p, path = %s",
1445 f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1446 pathp);
1447 kmem_free(path, MAXPATHLEN);
1448 return (DDI_FAILURE);
1449 }
1450
1451 if (acpica_get_bdf(pci_dip, &bus, &device, &func) != DDI_SUCCESS) {
1452 cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get BDF "
1453 "for rdip=%p, path = %s",
1454 f, idriver, instance, iommu->aiomt_idx, (void *)rdip,
1455 pathp);
1456 kmem_free(path, MAXPATHLEN);
1457 return (DDI_FAILURE);
1458 }
1459 kmem_free(path, MAXPATHLEN);
1460
1461 /*
1462 * See whether device is described by IVRS as being managed
1463 * by this IOMMU
1464 */
1465 bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) | (uint8_t)func;
1466 hinfop = amd_iommu_lookup_ivhd(bdf);
1467 if (hinfop && hinfop->ach_IOMMU_deviceid == iommu->aiomt_bdf)
1468 return (DDI_SUCCESS);
1469
1470 return (DDI_FAILURE);
1471 }
1472
1473 /*ARGSUSED*/
1474 static int
amd_iommu_allochdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * dma_handlep)1475 amd_iommu_allochdl(iommulib_handle_t handle,
1476 dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1477 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
1478 {
1479 return (iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
1480 arg, dma_handlep));
1481 }
1482
1483 /*ARGSUSED*/
1484 static int
amd_iommu_freehdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)1485 amd_iommu_freehdl(iommulib_handle_t handle,
1486 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1487 {
1488 return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
1489 }
1490
1491 /*ARGSUSED*/
1492 static int
map_current_window(amd_iommu_t * iommu,dev_info_t * rdip,ddi_dma_attr_t * attrp,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookie_array,uint_t ccount,int km_flags)1493 map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
1494 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookie_array, uint_t ccount,
1495 int km_flags)
1496 {
1497 const char *driver = ddi_driver_name(iommu->aiomt_dip);
1498 int instance = ddi_get_instance(iommu->aiomt_dip);
1499 int idx = iommu->aiomt_idx;
1500 int i;
1501 uint64_t start_va;
1502 char *path;
1503 int error = DDI_FAILURE;
1504 const char *f = "map_current_window";
1505
1506 path = kmem_alloc(MAXPATHLEN, km_flags);
1507 if (path == NULL) {
1508 return (DDI_DMA_NORESOURCES);
1509 }
1510
1511 (void) ddi_pathname(rdip, path);
1512 mutex_enter(&amd_iommu_pgtable_lock);
1513
1514 if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
1515 cmn_err(CE_NOTE, "%s: %s%d: idx=%d Attempting to get cookies "
1516 "from handle for device %s",
1517 f, driver, instance, idx, path);
1518 }
1519
1520 start_va = 0;
1521 for (i = 0; i < ccount; i++) {
1522 if ((error = amd_iommu_map_pa2va(iommu, rdip, attrp, dmareq,
1523 cookie_array[i].dmac_cookie_addr,
1524 cookie_array[i].dmac_size,
1525 AMD_IOMMU_VMEM_MAP, &start_va, km_flags)) != DDI_SUCCESS) {
1526 break;
1527 }
1528 cookie_array[i].dmac_cookie_addr = (uintptr_t)start_va;
1529 cookie_array[i].dmac_type = 0;
1530 }
1531
1532 if (i != ccount) {
1533 cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot map cookie# %d "
1534 "for device %s", f, driver, instance, idx, i, path);
1535 (void) unmap_current_window(iommu, rdip, cookie_array,
1536 ccount, i, 1);
1537 goto out;
1538 }
1539
1540 if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
1541 cmn_err(CE_NOTE, "%s: return SUCCESS", f);
1542 }
1543
1544 error = DDI_DMA_MAPPED;
1545 out:
1546 mutex_exit(&amd_iommu_pgtable_lock);
1547 kmem_free(path, MAXPATHLEN);
1548 return (error);
1549 }
1550
1551 /*ARGSUSED*/
1552 static int
unmap_current_window(amd_iommu_t * iommu,dev_info_t * rdip,ddi_dma_cookie_t * cookie_array,uint_t ccount,int ncookies,int locked)1553 unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
1554 ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked)
1555 {
1556 const char *driver = ddi_driver_name(iommu->aiomt_dip);
1557 int instance = ddi_get_instance(iommu->aiomt_dip);
1558 int idx = iommu->aiomt_idx;
1559 int i;
1560 int error = DDI_FAILURE;
1561 char *path;
1562 int pathfree;
1563 const char *f = "unmap_current_window";
1564
1565 if (!locked)
1566 mutex_enter(&amd_iommu_pgtable_lock);
1567
1568 path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
1569 if (path) {
1570 (void) ddi_pathname(rdip, path);
1571 pathfree = 1;
1572 } else {
1573 path = "<path-mem-alloc-failed>";
1574 pathfree = 0;
1575 }
1576
1577 if (ncookies == -1)
1578 ncookies = ccount;
1579
1580 for (i = 0; i < ncookies; i++) {
1581 if (amd_iommu_unmap_va(iommu, rdip,
1582 cookie_array[i].dmac_cookie_addr,
1583 cookie_array[i].dmac_size,
1584 AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
1585 break;
1586 }
1587 }
1588
1589 if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT, NULL, 0, 0)
1590 != DDI_SUCCESS) {
1591 cmn_err(CE_WARN, "%s: AMD IOMMU completion wait failed for: %s",
1592 f, path);
1593 }
1594
1595 if (i != ncookies) {
1596 cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot unmap cookie# %d "
1597 "for device %s", f, driver, instance, idx, i, path);
1598 error = DDI_FAILURE;
1599 goto out;
1600 }
1601
1602 error = DDI_SUCCESS;
1603
1604 out:
1605 if (pathfree)
1606 kmem_free(path, MAXPATHLEN);
1607 if (!locked)
1608 mutex_exit(&amd_iommu_pgtable_lock);
1609 return (error);
1610 }
1611
1612 /*ARGSUSED*/
1613 static int
amd_iommu_bindhdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_cookie_t * cookiep,uint_t * ccountp)1614 amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
1615 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1616 struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
1617 uint_t *ccountp)
1618 {
1619 int dma_error = DDI_DMA_NOMAPPING;
1620 int error;
1621 char *path;
1622 ddi_dma_cookie_t *cookie_array = NULL;
1623 uint_t ccount = 0;
1624 ddi_dma_impl_t *hp;
1625 ddi_dma_attr_t *attrp;
1626 int km_flags;
1627 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1628 int instance = ddi_get_instance(rdip);
1629 const char *driver = ddi_driver_name(rdip);
1630 const char *f = "amd_iommu_bindhdl";
1631
1632 dma_error = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
1633 dmareq, cookiep, ccountp);
1634
1635 if (dma_error != DDI_DMA_MAPPED && dma_error != DDI_DMA_PARTIAL_MAP)
1636 return (dma_error);
1637
1638 km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1639
1640 path = kmem_alloc(MAXPATHLEN, km_flags);
1641 if (path) {
1642 (void) ddi_pathname(rdip, path);
1643 } else {
1644 dma_error = DDI_DMA_NORESOURCES;
1645 goto unbind;
1646 }
1647
1648 if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1649 cmn_err(CE_NOTE, "%s: %s got cookie (%p), #cookies: %d",
1650 f, path,
1651 (void *)cookiep->dmac_cookie_addr,
1652 *ccountp);
1653 }
1654
1655 cookie_array = NULL;
1656 ccount = 0;
1657 if ((error = iommulib_iommu_dma_get_cookies(dip, dma_handle,
1658 &cookie_array, &ccount)) != DDI_SUCCESS) {
1659 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1660 "for device %s", f, driver, instance, path);
1661 dma_error = error;
1662 goto unbind;
1663 }
1664
1665 hp = (ddi_dma_impl_t *)dma_handle;
1666 attrp = &hp->dmai_attr;
1667
1668 error = map_current_window(iommu, rdip, attrp, dmareq,
1669 cookie_array, ccount, km_flags);
1670 if (error != DDI_SUCCESS) {
1671 dma_error = error;
1672 goto unbind;
1673 }
1674
1675 if ((error = iommulib_iommu_dma_set_cookies(dip, dma_handle,
1676 cookie_array, ccount)) != DDI_SUCCESS) {
1677 cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1678 "for device %s", f, driver, instance, path);
1679 dma_error = error;
1680 goto unbind;
1681 }
1682
1683 *cookiep = cookie_array[0];
1684
1685 if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
1686 cmn_err(CE_NOTE, "%s: %s remapped cookie (%p), #cookies: %d",
1687 f, path,
1688 (void *)(uintptr_t)cookiep->dmac_cookie_addr,
1689 *ccountp);
1690 }
1691
1692 kmem_free(path, MAXPATHLEN);
1693 ASSERT(dma_error == DDI_DMA_MAPPED || dma_error == DDI_DMA_PARTIAL_MAP);
1694 return (dma_error);
1695 unbind:
1696 kmem_free(path, MAXPATHLEN);
1697 (void) iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle);
1698 return (dma_error);
1699 }
1700
1701 /*ARGSUSED*/
1702 static int
amd_iommu_unbindhdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)1703 amd_iommu_unbindhdl(iommulib_handle_t handle,
1704 dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
1705 {
1706 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1707 ddi_dma_cookie_t *cookie_array = NULL;
1708 uint_t ccount = 0;
1709 int error = DDI_FAILURE;
1710 int instance = ddi_get_instance(rdip);
1711 const char *driver = ddi_driver_name(rdip);
1712 const char *f = "amd_iommu_unbindhdl";
1713
1714 cookie_array = NULL;
1715 ccount = 0;
1716 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1717 &ccount) != DDI_SUCCESS) {
1718 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1719 "for device %p", f, driver, instance, (void *)rdip);
1720 error = DDI_FAILURE;
1721 goto out;
1722 }
1723
1724 if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1725 cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1726 "for device %p", f, driver, instance, (void *)rdip);
1727 error = DDI_FAILURE;
1728 goto out;
1729 }
1730
1731 if (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle)
1732 != DDI_SUCCESS) {
1733 cmn_err(CE_WARN, "%s: %s%d: failed to unbindhdl for dip=%p",
1734 f, driver, instance, (void *)rdip);
1735 error = DDI_FAILURE;
1736 goto out;
1737 }
1738
1739 if (unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0)
1740 != DDI_SUCCESS) {
1741 cmn_err(CE_WARN, "%s: %s%d: failed to unmap current window "
1742 "for dip=%p", f, driver, instance, (void *)rdip);
1743 error = DDI_FAILURE;
1744 } else {
1745 error = DDI_SUCCESS;
1746 }
1747 out:
1748 if (cookie_array)
1749 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1750 return (error);
1751 }
1752
1753 /*ARGSUSED*/
1754 static int
amd_iommu_sync(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,off_t off,size_t len,uint_t cache_flags)1755 amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
1756 dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
1757 size_t len, uint_t cache_flags)
1758 {
1759 ddi_dma_cookie_t *cookie_array = NULL;
1760 uint_t ccount = 0;
1761 int error;
1762 const char *f = "amd_iommu_sync";
1763
1764 cookie_array = NULL;
1765 ccount = 0;
1766 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1767 &ccount) != DDI_SUCCESS) {
1768 ASSERT(cookie_array == NULL);
1769 cmn_err(CE_WARN, "%s: Cannot get cookies "
1770 "for device %p", f, (void *)rdip);
1771 error = DDI_FAILURE;
1772 goto out;
1773 }
1774
1775 if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1776 cmn_err(CE_WARN, "%s: Cannot clear cookies "
1777 "for device %p", f, (void *)rdip);
1778 error = DDI_FAILURE;
1779 goto out;
1780 }
1781
1782 error = iommulib_iommu_dma_sync(dip, rdip, dma_handle, off,
1783 len, cache_flags);
1784
1785 if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1786 ccount) != DDI_SUCCESS) {
1787 cmn_err(CE_WARN, "%s: Cannot set cookies "
1788 "for device %p", f, (void *)rdip);
1789 error = DDI_FAILURE;
1790 } else {
1791 cookie_array = NULL;
1792 ccount = 0;
1793 }
1794
1795 out:
1796 if (cookie_array)
1797 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1798 return (error);
1799 }
1800
1801 /*ARGSUSED*/
1802 static int
amd_iommu_win(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)1803 amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
1804 dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
1805 off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
1806 uint_t *ccountp)
1807 {
1808 int error = DDI_FAILURE;
1809 amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
1810 ddi_dma_cookie_t *cookie_array = NULL;
1811 uint_t ccount = 0;
1812 int km_flags;
1813 ddi_dma_impl_t *hp;
1814 ddi_dma_attr_t *attrp;
1815 struct ddi_dma_req sdmareq = {0};
1816 int instance = ddi_get_instance(rdip);
1817 const char *driver = ddi_driver_name(rdip);
1818 const char *f = "amd_iommu_win";
1819
1820 km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
1821
1822 cookie_array = NULL;
1823 ccount = 0;
1824 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1825 &ccount) != DDI_SUCCESS) {
1826 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1827 "for device %p", f, driver, instance, (void *)rdip);
1828 error = DDI_FAILURE;
1829 goto out;
1830 }
1831
1832 if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
1833 cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
1834 "for device %p", f, driver, instance, (void *)rdip);
1835 error = DDI_FAILURE;
1836 goto out;
1837 }
1838
1839 if (iommulib_iommu_dma_win(dip, rdip, dma_handle, win,
1840 offp, lenp, cookiep, ccountp) != DDI_SUCCESS) {
1841 cmn_err(CE_WARN, "%s: %s%d: failed switch windows for dip=%p",
1842 f, driver, instance, (void *)rdip);
1843 error = DDI_FAILURE;
1844 goto out;
1845 }
1846
1847 (void) unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0);
1848
1849 if (cookie_array) {
1850 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1851 cookie_array = NULL;
1852 ccount = 0;
1853 }
1854
1855 cookie_array = NULL;
1856 ccount = 0;
1857 if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
1858 &ccount) != DDI_SUCCESS) {
1859 cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
1860 "for device %p", f, driver, instance, (void *)rdip);
1861 error = DDI_FAILURE;
1862 goto out;
1863 }
1864
1865 hp = (ddi_dma_impl_t *)dma_handle;
1866 attrp = &hp->dmai_attr;
1867
1868 sdmareq.dmar_flags = DDI_DMA_RDWR;
1869 error = map_current_window(iommu, rdip, attrp, &sdmareq,
1870 cookie_array, ccount, km_flags);
1871
1872 if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
1873 ccount) != DDI_SUCCESS) {
1874 cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
1875 "for device %p", f, driver, instance, (void *)rdip);
1876 error = DDI_FAILURE;
1877 goto out;
1878 }
1879
1880 *cookiep = cookie_array[0];
1881
1882 return (error == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1883 out:
1884 if (cookie_array)
1885 kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
1886
1887 return (error);
1888 }
1889
1890 /*ARGSUSED*/
1891 static int
amd_iommu_mapobject(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_obj_t * dmao)1892 amd_iommu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
1893 dev_info_t *rdip, ddi_dma_handle_t dma_handle,
1894 struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao)
1895 {
1896 return (DDI_ENOTSUP);
1897 }
1898
1899 /*ARGSUSED*/
1900 static int
amd_iommu_unmapobject(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,ddi_dma_obj_t * dmao)1901 amd_iommu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
1902 dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
1903 {
1904 return (DDI_ENOTSUP);
1905 }
1906
1907 uint64_t
amd_iommu_reg_get64_workaround(uint64_t * regp,uint32_t bits)1908 amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits)
1909 {
1910 split_t s;
1911 uint32_t *ptr32 = (uint32_t *)regp;
1912 uint64_t *s64p = &(s.u64);
1913
1914 s.u32[0] = ptr32[0];
1915 s.u32[1] = ptr32[1];
1916
1917 return (AMD_IOMMU_REG_GET64_IMPL(s64p, bits));
1918 }
1919
1920 uint64_t
amd_iommu_reg_set64_workaround(uint64_t * regp,uint32_t bits,uint64_t value)1921 amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits, uint64_t value)
1922 {
1923 split_t s;
1924 uint32_t *ptr32 = (uint32_t *)regp;
1925 uint64_t *s64p = &(s.u64);
1926
1927 s.u32[0] = ptr32[0];
1928 s.u32[1] = ptr32[1];
1929
1930 AMD_IOMMU_REG_SET64_IMPL(s64p, bits, value);
1931
1932 *regp = s.u64;
1933
1934 return (s.u64);
1935 }
1936
1937 void
amd_iommu_read_boot_props(void)1938 amd_iommu_read_boot_props(void)
1939 {
1940 char *propval;
1941
1942 /*
1943 * if "amd-iommu = no/false" boot property is set,
1944 * ignore AMD iommu
1945 */
1946 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1947 DDI_PROP_DONTPASS, "amd-iommu", &propval) == DDI_SUCCESS) {
1948 if (strcmp(propval, "no") == 0 ||
1949 strcmp(propval, "false") == 0) {
1950 amd_iommu_disable = 1;
1951 }
1952 ddi_prop_free(propval);
1953 }
1954
1955 /*
1956 * Copy the list of drivers for which IOMMU is disabled by user.
1957 */
1958 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
1959 DDI_PROP_DONTPASS, "amd-iommu-disable-list", &propval)
1960 == DDI_SUCCESS) {
1961 amd_iommu_disable_list = kmem_alloc(strlen(propval) + 1,
1962 KM_SLEEP);
1963 (void) strcpy(amd_iommu_disable_list, propval);
1964 ddi_prop_free(propval);
1965 }
1966
1967 }
1968
1969 void
amd_iommu_lookup_conf_props(dev_info_t * dip)1970 amd_iommu_lookup_conf_props(dev_info_t *dip)
1971 {
1972 char *disable;
1973
1974 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1975 DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu", &disable)
1976 == DDI_PROP_SUCCESS) {
1977 if (strcmp(disable, "no") == 0) {
1978 amd_iommu_disable = 1;
1979 }
1980 ddi_prop_free(disable);
1981 }
1982
1983 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1984 DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu-disable-list",
1985 &disable) == DDI_PROP_SUCCESS) {
1986 amd_iommu_disable_list = kmem_alloc(strlen(disable) + 1,
1987 KM_SLEEP);
1988 (void) strcpy(amd_iommu_disable_list, disable);
1989 ddi_prop_free(disable);
1990 }
1991 }
1992