1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2016, Anish Gupta (anish@freebsd.org)
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bus.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/malloc.h>
35 #include <sys/pcpu.h>
36 #include <sys/rman.h>
37 #include <sys/smp.h>
38 #include <sys/sysctl.h>
39
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcireg.h>
45
46 #include <machine/resource.h>
47 #include <machine/vmm.h>
48 #include <machine/pmap.h>
49 #include <machine/vmparam.h>
50 #include <machine/pci_cfgreg.h>
51
52 #include "ivhd_if.h"
53 #include "pcib_if.h"
54
55 #include "io/iommu.h"
56 #include "amdvi_priv.h"
57
58 SYSCTL_DECL(_hw_vmm);
59 SYSCTL_NODE(_hw_vmm, OID_AUTO, amdvi, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
60 NULL);
61
62 #define MOD_INC(a, s, m) (((a) + (s)) % ((m) * (s)))
63 #define MOD_DEC(a, s, m) (((a) - (s)) % ((m) * (s)))
64
65 /* Print RID or device ID in PCI string format. */
66 #define RID2PCI_STR(d) PCI_RID2BUS(d), PCI_RID2SLOT(d), PCI_RID2FUNC(d)
67
68 static void amdvi_dump_cmds(struct amdvi_softc *softc, int count);
69 static void amdvi_print_dev_cap(struct amdvi_softc *softc);
70
71 MALLOC_DEFINE(M_AMDVI, "amdvi", "amdvi");
72
73 extern device_t *ivhd_devs;
74
75 extern int ivhd_count;
76 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, count, CTLFLAG_RDTUN, &ivhd_count,
77 0, NULL);
78
79 static int amdvi_enable_user = 0;
80 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, enable, CTLFLAG_RDTUN,
81 &amdvi_enable_user, 0, NULL);
82 TUNABLE_INT("hw.vmm.amdvi_enable", &amdvi_enable_user);
83
84 #ifdef AMDVI_ATS_ENABLE
85 /* XXX: ATS is not tested. */
86 static int amdvi_enable_iotlb = 1;
87 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, iotlb_enabled, CTLFLAG_RDTUN,
88 &amdvi_enable_iotlb, 0, NULL);
89 TUNABLE_INT("hw.vmm.enable_iotlb", &amdvi_enable_iotlb);
90 #endif
91
92 static int amdvi_host_ptp = 1; /* Use page tables for host. */
93 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, host_ptp, CTLFLAG_RDTUN,
94 &amdvi_host_ptp, 0, NULL);
95 TUNABLE_INT("hw.vmm.amdvi.host_ptp", &amdvi_host_ptp);
96
97 /* Page table level used <= supported by h/w[v1=7]. */
98 int amdvi_ptp_level = 4;
99 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, ptp_level, CTLFLAG_RDTUN,
100 &amdvi_ptp_level, 0, NULL);
101 TUNABLE_INT("hw.vmm.amdvi.ptp_level", &amdvi_ptp_level);
102
103 /* Disable fault event reporting. */
104 static int amdvi_disable_io_fault = 0;
105 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, disable_io_fault, CTLFLAG_RDTUN,
106 &amdvi_disable_io_fault, 0, NULL);
107 TUNABLE_INT("hw.vmm.amdvi.disable_io_fault", &amdvi_disable_io_fault);
108
109 static uint32_t amdvi_dom_id = 0; /* 0 is reserved for host. */
110 SYSCTL_UINT(_hw_vmm_amdvi, OID_AUTO, domain_id, CTLFLAG_RD,
111 &amdvi_dom_id, 0, NULL);
112 /*
113 * Device table entry.
114 * Bus(256) x Dev(32) x Fun(8) x DTE(256 bits or 32 bytes).
115 * = 256 * 2 * PAGE_SIZE.
116 */
117 static struct amdvi_dte amdvi_dte[PCI_NUM_DEV_MAX] __aligned(PAGE_SIZE);
118 CTASSERT(PCI_NUM_DEV_MAX == 0x10000);
119 CTASSERT(sizeof(amdvi_dte) == 0x200000);
120
121 static SLIST_HEAD (, amdvi_domain) dom_head;
122
123 static inline uint32_t
amdvi_pci_read(struct amdvi_softc * softc,int off)124 amdvi_pci_read(struct amdvi_softc *softc, int off)
125 {
126
127 return (pci_cfgregread(softc->pci_seg, PCI_RID2BUS(softc->pci_rid),
128 PCI_RID2SLOT(softc->pci_rid), PCI_RID2FUNC(softc->pci_rid),
129 off, 4));
130 }
131
132 #ifdef AMDVI_ATS_ENABLE
133 /* XXX: Should be in pci.c */
134 /*
135 * Check if device has ATS capability and its enabled.
136 * If ATS is absent or disabled, return (-1), otherwise ATS
137 * queue length.
138 */
139 static int
amdvi_find_ats_qlen(uint16_t devid)140 amdvi_find_ats_qlen(uint16_t devid)
141 {
142 device_t dev;
143 uint32_t off, cap;
144 int qlen = -1;
145
146 dev = pci_find_bsf(PCI_RID2BUS(devid), PCI_RID2SLOT(devid),
147 PCI_RID2FUNC(devid));
148
149 if (!dev) {
150 return (-1);
151 }
152 #define PCIM_ATS_EN BIT(31)
153
154 if (pci_find_extcap(dev, PCIZ_ATS, &off) == 0) {
155 cap = pci_read_config(dev, off + 4, 4);
156 qlen = (cap & 0x1F);
157 qlen = qlen ? qlen : 32;
158 printf("AMD-Vi: PCI device %d.%d.%d ATS %s qlen=%d\n",
159 RID2PCI_STR(devid),
160 (cap & PCIM_ATS_EN) ? "enabled" : "Disabled",
161 qlen);
162 qlen = (cap & PCIM_ATS_EN) ? qlen : -1;
163 }
164
165 return (qlen);
166 }
167
168 /*
169 * Check if an endpoint device support device IOTLB or ATS.
170 */
171 static inline bool
amdvi_dev_support_iotlb(struct amdvi_softc * softc,uint16_t devid)172 amdvi_dev_support_iotlb(struct amdvi_softc *softc, uint16_t devid)
173 {
174 struct ivhd_dev_cfg *cfg;
175 int qlen, i;
176 bool pci_ats, ivhd_ats;
177
178 qlen = amdvi_find_ats_qlen(devid);
179 if (qlen < 0)
180 return (false);
181
182 KASSERT(softc, ("softc is NULL"));
183 cfg = softc->dev_cfg;
184
185 ivhd_ats = false;
186 for (i = 0; i < softc->dev_cfg_cnt; i++) {
187 if ((cfg->start_id <= devid) && (cfg->end_id >= devid)) {
188 ivhd_ats = cfg->enable_ats;
189 break;
190 }
191 cfg++;
192 }
193
194 pci_ats = (qlen < 0) ? false : true;
195 if (pci_ats != ivhd_ats)
196 device_printf(softc->dev,
197 "BIOS bug: mismatch in ATS setting for %d.%d.%d,"
198 "ATS inv qlen = %d\n", RID2PCI_STR(devid), qlen);
199
200 /* Ignore IVRS setting and respect PCI setting. */
201 return (pci_ats);
202 }
203 #endif
204
205 /* Enable IOTLB support for IOMMU if its supported. */
206 static inline void
amdvi_hw_enable_iotlb(struct amdvi_softc * softc)207 amdvi_hw_enable_iotlb(struct amdvi_softc *softc)
208 {
209 #ifndef AMDVI_ATS_ENABLE
210 softc->iotlb = false;
211 #else
212 bool supported;
213
214 supported = (softc->ivhd_flag & IVHD_FLAG_IOTLB) ? true : false;
215
216 if (softc->pci_cap & AMDVI_PCI_CAP_IOTLB) {
217 if (!supported)
218 device_printf(softc->dev, "IOTLB disabled by BIOS.\n");
219
220 if (supported && !amdvi_enable_iotlb) {
221 device_printf(softc->dev, "IOTLB disabled by user.\n");
222 supported = false;
223 }
224 } else
225 supported = false;
226
227 softc->iotlb = supported;
228
229 #endif
230 }
231
232 static int
amdvi_init_cmd(struct amdvi_softc * softc)233 amdvi_init_cmd(struct amdvi_softc *softc)
234 {
235 struct amdvi_ctrl *ctrl = softc->ctrl;
236
237 ctrl->cmd.len = 8; /* Use 256 command buffer entries. */
238 softc->cmd_max = 1 << ctrl->cmd.len;
239
240 softc->cmd = malloc(sizeof(struct amdvi_cmd) *
241 softc->cmd_max, M_AMDVI, M_WAITOK | M_ZERO);
242
243 if ((uintptr_t)softc->cmd & PAGE_MASK)
244 panic("AMDVi: Command buffer not aligned on page boundary.");
245
246 ctrl->cmd.base = vtophys(softc->cmd) / PAGE_SIZE;
247 /*
248 * XXX: Reset the h/w pointers in case IOMMU is restarting,
249 * h/w doesn't clear these pointers based on empirical data.
250 */
251 ctrl->cmd_tail = 0;
252 ctrl->cmd_head = 0;
253
254 return (0);
255 }
256
257 /*
258 * Note: Update tail pointer after we have written the command since tail
259 * pointer update cause h/w to execute new commands, see section 3.3
260 * of AMD IOMMU spec ver 2.0.
261 */
262 /* Get the command tail pointer w/o updating it. */
263 static struct amdvi_cmd *
amdvi_get_cmd_tail(struct amdvi_softc * softc)264 amdvi_get_cmd_tail(struct amdvi_softc *softc)
265 {
266 struct amdvi_ctrl *ctrl;
267 struct amdvi_cmd *tail;
268
269 KASSERT(softc, ("softc is NULL"));
270 KASSERT(softc->cmd != NULL, ("cmd is NULL"));
271
272 ctrl = softc->ctrl;
273 KASSERT(ctrl != NULL, ("ctrl is NULL"));
274
275 tail = (struct amdvi_cmd *)((uint8_t *)softc->cmd +
276 ctrl->cmd_tail);
277 memset(tail, 0, sizeof(*tail));
278
279 return (tail);
280 }
281
282 /*
283 * Update the command tail pointer which will start command execution.
284 */
285 static void
amdvi_update_cmd_tail(struct amdvi_softc * softc)286 amdvi_update_cmd_tail(struct amdvi_softc *softc)
287 {
288 struct amdvi_ctrl *ctrl;
289 int size;
290
291 size = sizeof(struct amdvi_cmd);
292 KASSERT(softc->cmd != NULL, ("cmd is NULL"));
293
294 ctrl = softc->ctrl;
295 KASSERT(ctrl != NULL, ("ctrl is NULL"));
296
297 ctrl->cmd_tail = MOD_INC(ctrl->cmd_tail, size, softc->cmd_max);
298 softc->total_cmd++;
299
300 #ifdef AMDVI_DEBUG_CMD
301 device_printf(softc->dev, "cmd_tail: %s Tail:0x%x, Head:0x%x.\n",
302 ctrl->cmd_tail,
303 ctrl->cmd_head);
304 #endif
305
306 }
307
308 /*
309 * Various commands supported by IOMMU.
310 */
311
312 /* Completion wait command. */
313 static void
amdvi_cmd_cmp(struct amdvi_softc * softc,const uint64_t data)314 amdvi_cmd_cmp(struct amdvi_softc *softc, const uint64_t data)
315 {
316 struct amdvi_cmd *cmd;
317 uint64_t pa;
318
319 cmd = amdvi_get_cmd_tail(softc);
320
321 pa = vtophys(&softc->cmp_data);
322 cmd->opcode = AMDVI_CMP_WAIT_OPCODE;
323 cmd->word0 = (pa & 0xFFFFFFF8) | AMDVI_CMP_WAIT_STORE;
324 cmd->word1 = (pa >> 32) & 0xFFFFF;
325 cmd->addr = data;
326
327 amdvi_update_cmd_tail(softc);
328 }
329
330 /* Invalidate device table entry. */
331 static void
amdvi_cmd_inv_dte(struct amdvi_softc * softc,uint16_t devid)332 amdvi_cmd_inv_dte(struct amdvi_softc *softc, uint16_t devid)
333 {
334 struct amdvi_cmd *cmd;
335
336 cmd = amdvi_get_cmd_tail(softc);
337 cmd->opcode = AMDVI_INVD_DTE_OPCODE;
338 cmd->word0 = devid;
339 amdvi_update_cmd_tail(softc);
340 #ifdef AMDVI_DEBUG_CMD
341 device_printf(softc->dev, "Invalidated DTE:0x%x\n", devid);
342 #endif
343 }
344
345 /* Invalidate IOMMU page, use for invalidation of domain. */
346 static void
amdvi_cmd_inv_iommu_pages(struct amdvi_softc * softc,uint16_t domain_id,uint64_t addr,bool guest_nested,bool pde,bool page)347 amdvi_cmd_inv_iommu_pages(struct amdvi_softc *softc, uint16_t domain_id,
348 uint64_t addr, bool guest_nested,
349 bool pde, bool page)
350 {
351 struct amdvi_cmd *cmd;
352
353 cmd = amdvi_get_cmd_tail(softc);
354
355 cmd->opcode = AMDVI_INVD_PAGE_OPCODE;
356 cmd->word1 = domain_id;
357 /*
358 * Invalidate all addresses for this domain.
359 */
360 cmd->addr = addr;
361 cmd->addr |= pde ? AMDVI_INVD_PAGE_PDE : 0;
362 cmd->addr |= page ? AMDVI_INVD_PAGE_S : 0;
363
364 amdvi_update_cmd_tail(softc);
365 }
366
367 #ifdef AMDVI_ATS_ENABLE
368 /* Invalidate device IOTLB. */
369 static void
amdvi_cmd_inv_iotlb(struct amdvi_softc * softc,uint16_t devid)370 amdvi_cmd_inv_iotlb(struct amdvi_softc *softc, uint16_t devid)
371 {
372 struct amdvi_cmd *cmd;
373 int qlen;
374
375 if (!softc->iotlb)
376 return;
377
378 qlen = amdvi_find_ats_qlen(devid);
379 if (qlen < 0) {
380 panic("AMDVI: Invalid ATS qlen(%d) for device %d.%d.%d\n",
381 qlen, RID2PCI_STR(devid));
382 }
383 cmd = amdvi_get_cmd_tail(softc);
384
385 #ifdef AMDVI_DEBUG_CMD
386 device_printf(softc->dev, "Invalidate IOTLB devID 0x%x"
387 " Qlen:%d\n", devid, qlen);
388 #endif
389 cmd->opcode = AMDVI_INVD_IOTLB_OPCODE;
390 cmd->word0 = devid;
391 cmd->word1 = qlen;
392 cmd->addr = AMDVI_INVD_IOTLB_ALL_ADDR |
393 AMDVI_INVD_IOTLB_S;
394 amdvi_update_cmd_tail(softc);
395 }
396 #endif
397
398 #ifdef notyet /* For Interrupt Remap. */
399 static void
amdvi_cmd_inv_intr_map(struct amdvi_softc * softc,uint16_t devid)400 amdvi_cmd_inv_intr_map(struct amdvi_softc *softc,
401 uint16_t devid)
402 {
403 struct amdvi_cmd *cmd;
404
405 cmd = amdvi_get_cmd_tail(softc);
406 cmd->opcode = AMDVI_INVD_INTR_OPCODE;
407 cmd->word0 = devid;
408 amdvi_update_cmd_tail(softc);
409 #ifdef AMDVI_DEBUG_CMD
410 device_printf(softc->dev, "Invalidate INTR map of devID 0x%x\n", devid);
411 #endif
412 }
413 #endif
414
415 /* Invalidate domain using INVALIDATE_IOMMU_PAGES command. */
416 static void
amdvi_inv_domain(struct amdvi_softc * softc,uint16_t domain_id)417 amdvi_inv_domain(struct amdvi_softc *softc, uint16_t domain_id)
418 {
419
420 /*
421 * See section 3.3.3 of IOMMU spec rev 2.0, software note
422 * for invalidating domain.
423 */
424 amdvi_cmd_inv_iommu_pages(softc, domain_id, AMDVI_INVD_PAGE_ALL_ADDR,
425 false, true, true);
426
427 #ifdef AMDVI_DEBUG_CMD
428 device_printf(softc->dev, "Invalidate domain:0x%x\n", domain_id);
429
430 #endif
431 }
432
433 static bool
amdvi_cmp_wait(struct amdvi_softc * softc)434 amdvi_cmp_wait(struct amdvi_softc *softc)
435 {
436 #ifdef AMDVI_DEBUG_CMD
437 struct amdvi_ctrl *ctrl = softc->ctrl;
438 #endif
439 const uint64_t VERIFY = 0xA5A5;
440 volatile uint64_t *read;
441 int i;
442 bool status;
443
444 read = &softc->cmp_data;
445 *read = 0;
446 amdvi_cmd_cmp(softc, VERIFY);
447 /* Wait for h/w to update completion data. */
448 for (i = 0; i < 100 && (*read != VERIFY); i++) {
449 DELAY(1000); /* 1 ms */
450 }
451 status = (VERIFY == softc->cmp_data) ? true : false;
452
453 #ifdef AMDVI_DEBUG_CMD
454 if (status)
455 device_printf(softc->dev, "CMD completion DONE Tail:0x%x, "
456 "Head:0x%x, loop:%d.\n", ctrl->cmd_tail,
457 ctrl->cmd_head, loop);
458 #endif
459 return (status);
460 }
461
462 static void
amdvi_wait(struct amdvi_softc * softc)463 amdvi_wait(struct amdvi_softc *softc)
464 {
465 struct amdvi_ctrl *ctrl;
466 int i;
467
468 KASSERT(softc, ("softc is NULL"));
469
470 ctrl = softc->ctrl;
471 KASSERT(ctrl != NULL, ("ctrl is NULL"));
472 /* Don't wait if h/w is not enabled. */
473 if ((ctrl->control & AMDVI_CTRL_EN) == 0)
474 return;
475
476 for (i = 0; i < 10; i++) {
477 if (amdvi_cmp_wait(softc))
478 return;
479 }
480
481 device_printf(softc->dev, "Error: completion failed"
482 " tail:0x%x, head:0x%x.\n",
483 ctrl->cmd_tail, ctrl->cmd_head);
484 /* Dump the last command. */
485 amdvi_dump_cmds(softc, 1);
486 }
487
488 static void
amdvi_dump_cmds(struct amdvi_softc * softc,int count)489 amdvi_dump_cmds(struct amdvi_softc *softc, int count)
490 {
491 struct amdvi_ctrl *ctrl;
492 struct amdvi_cmd *cmd;
493 int off, i;
494
495 ctrl = softc->ctrl;
496 device_printf(softc->dev, "Dump last %d command(s):\n", count);
497 /*
498 * If h/w is stuck in completion, it is the previous command,
499 * start dumping from previous command onward.
500 */
501 off = MOD_DEC(ctrl->cmd_head, sizeof(struct amdvi_cmd),
502 softc->cmd_max);
503 for (i = 0; off != ctrl->cmd_tail && i < count; i++) {
504 cmd = (struct amdvi_cmd *)((uint8_t *)softc->cmd + off);
505 printf(" [CMD%d, off:0x%x] opcode= 0x%x 0x%x"
506 " 0x%x 0x%lx\n", i, off, cmd->opcode,
507 cmd->word0, cmd->word1, cmd->addr);
508 off = MOD_INC(off, sizeof(struct amdvi_cmd), softc->cmd_max);
509 }
510 }
511
512 static int
amdvi_init_event(struct amdvi_softc * softc)513 amdvi_init_event(struct amdvi_softc *softc)
514 {
515 struct amdvi_ctrl *ctrl;
516
517 ctrl = softc->ctrl;
518 ctrl->event.len = 8;
519 softc->event_max = 1 << ctrl->event.len;
520 softc->event = malloc(sizeof(struct amdvi_event) *
521 softc->event_max, M_AMDVI, M_WAITOK | M_ZERO);
522 if ((uintptr_t)softc->event & PAGE_MASK) {
523 device_printf(softc->dev, "Event buffer not aligned on page.");
524 return (false);
525 }
526 ctrl->event.base = vtophys(softc->event) / PAGE_SIZE;
527
528 /* Reset the pointers. */
529 ctrl->evt_head = 0;
530 ctrl->evt_tail = 0;
531
532 return (0);
533 }
534
535 static inline void
amdvi_decode_evt_flag(uint16_t flag)536 amdvi_decode_evt_flag(uint16_t flag)
537 {
538
539 flag &= AMDVI_EVENT_FLAG_MASK;
540 printf(" 0x%b]\n", flag,
541 "\020"
542 "\001GN"
543 "\002NX"
544 "\003US"
545 "\004I"
546 "\005PR"
547 "\006RW"
548 "\007PE"
549 "\010RZ"
550 "\011TR"
551 );
552 }
553
554 /* See section 2.5.4 of AMD IOMMU spec ver 2.62.*/
555 static inline void
amdvi_decode_evt_flag_type(uint8_t type)556 amdvi_decode_evt_flag_type(uint8_t type)
557 {
558
559 switch (AMDVI_EVENT_FLAG_TYPE(type)) {
560 case 0:
561 printf("RSVD\n");
562 break;
563 case 1:
564 printf("Master Abort\n");
565 break;
566 case 2:
567 printf("Target Abort\n");
568 break;
569 case 3:
570 printf("Data Err\n");
571 break;
572 default:
573 break;
574 }
575 }
576
577 static void
amdvi_decode_inv_dte_evt(uint16_t devid,uint16_t domid,uint64_t addr,uint16_t flag)578 amdvi_decode_inv_dte_evt(uint16_t devid, uint16_t domid, uint64_t addr,
579 uint16_t flag)
580 {
581
582 printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x"
583 " Addr:0x%lx",
584 devid, domid, addr);
585 amdvi_decode_evt_flag(flag);
586 }
587
588 static void
amdvi_decode_pf_evt(uint16_t devid,uint16_t domid,uint64_t addr,uint16_t flag)589 amdvi_decode_pf_evt(uint16_t devid, uint16_t domid, uint64_t addr,
590 uint16_t flag)
591 {
592
593 printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x"
594 " Addr:0x%lx",
595 devid, domid, addr);
596 amdvi_decode_evt_flag(flag);
597 }
598
599 static void
amdvi_decode_dte_hwerr_evt(uint16_t devid,uint16_t domid,uint64_t addr,uint16_t flag)600 amdvi_decode_dte_hwerr_evt(uint16_t devid, uint16_t domid,
601 uint64_t addr, uint16_t flag)
602 {
603
604 printf("\t[DEV_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x"
605 " Addr:0x%lx", devid, domid, addr);
606 amdvi_decode_evt_flag(flag);
607 amdvi_decode_evt_flag_type(flag);
608 }
609
610 static void
amdvi_decode_page_hwerr_evt(uint16_t devid,uint16_t domid,uint64_t addr,uint16_t flag)611 amdvi_decode_page_hwerr_evt(uint16_t devid, uint16_t domid, uint64_t addr,
612 uint16_t flag)
613 {
614
615 printf("\t[PAGE_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x"
616 " Addr:0x%lx", devid, domid, addr);
617 amdvi_decode_evt_flag(flag);
618 amdvi_decode_evt_flag_type(AMDVI_EVENT_FLAG_TYPE(flag));
619 }
620
621 static void
amdvi_decode_evt(struct amdvi_event * evt)622 amdvi_decode_evt(struct amdvi_event *evt)
623 {
624 struct amdvi_cmd *cmd;
625
626 switch (evt->opcode) {
627 case AMDVI_EVENT_INVALID_DTE:
628 amdvi_decode_inv_dte_evt(evt->devid, evt->pasid_domid,
629 evt->addr, evt->flag);
630 break;
631
632 case AMDVI_EVENT_PFAULT:
633 amdvi_decode_pf_evt(evt->devid, evt->pasid_domid,
634 evt->addr, evt->flag);
635 break;
636
637 case AMDVI_EVENT_DTE_HW_ERROR:
638 amdvi_decode_dte_hwerr_evt(evt->devid, evt->pasid_domid,
639 evt->addr, evt->flag);
640 break;
641
642 case AMDVI_EVENT_PAGE_HW_ERROR:
643 amdvi_decode_page_hwerr_evt(evt->devid, evt->pasid_domid,
644 evt->addr, evt->flag);
645 break;
646
647 case AMDVI_EVENT_ILLEGAL_CMD:
648 /* FALL THROUGH */
649 case AMDVI_EVENT_CMD_HW_ERROR:
650 printf("\t[%s EVT]\n", (evt->opcode == AMDVI_EVENT_ILLEGAL_CMD) ?
651 "ILLEGAL CMD" : "CMD HW ERR");
652 cmd = (struct amdvi_cmd *)PHYS_TO_DMAP(evt->addr);
653 printf("\tCMD opcode= 0x%x 0x%x 0x%x 0x%lx\n",
654 cmd->opcode, cmd->word0, cmd->word1, cmd->addr);
655 break;
656
657 case AMDVI_EVENT_IOTLB_TIMEOUT:
658 printf("\t[IOTLB_INV_TIMEOUT devid:0x%x addr:0x%lx]\n",
659 evt->devid, evt->addr);
660 break;
661
662 case AMDVI_EVENT_INVALID_DTE_REQ:
663 printf("\t[INV_DTE devid:0x%x addr:0x%lx type:0x%x tr:%d]\n",
664 evt->devid, evt->addr, evt->flag >> 9,
665 (evt->flag >> 8) & 1);
666 break;
667
668 case AMDVI_EVENT_INVALID_PPR_REQ:
669 case AMDVI_EVENT_COUNTER_ZERO:
670 printf("AMD-Vi: v2 events.\n");
671 break;
672
673 default:
674 printf("Unsupported AMD-Vi event:%d\n", evt->opcode);
675 }
676 }
677
678 static void
amdvi_print_events(struct amdvi_softc * softc)679 amdvi_print_events(struct amdvi_softc *softc)
680 {
681 struct amdvi_ctrl *ctrl;
682 struct amdvi_event *event;
683 int i, size;
684
685 ctrl = softc->ctrl;
686 size = sizeof(struct amdvi_event);
687 for (i = 0; i < softc->event_max; i++) {
688 event = &softc->event[ctrl->evt_head / size];
689 if (!event->opcode)
690 break;
691 device_printf(softc->dev, "\t[Event%d: Head:0x%x Tail:0x%x]\n",
692 i, ctrl->evt_head, ctrl->evt_tail);
693 amdvi_decode_evt(event);
694 ctrl->evt_head = MOD_INC(ctrl->evt_head, size,
695 softc->event_max);
696 }
697 }
698
699 static int
amdvi_init_dte(struct amdvi_softc * softc)700 amdvi_init_dte(struct amdvi_softc *softc)
701 {
702 struct amdvi_ctrl *ctrl;
703
704 ctrl = softc->ctrl;
705 ctrl->dte.base = vtophys(amdvi_dte) / PAGE_SIZE;
706 ctrl->dte.size = 0x1FF; /* 2MB device table. */
707
708 return (0);
709 }
710
711 /*
712 * Not all capabilities of IOMMU are available in ACPI IVHD flag
713 * or EFR entry, read directly from device.
714 */
715 static int
amdvi_print_pci_cap(device_t dev)716 amdvi_print_pci_cap(device_t dev)
717 {
718 struct amdvi_softc *softc;
719 uint32_t off, cap;
720
721 softc = device_get_softc(dev);
722 off = softc->cap_off;
723
724 /*
725 * Section 3.7.1 of IOMMU sepc rev 2.0.
726 * Read capability from device.
727 */
728 cap = amdvi_pci_read(softc, off);
729
730 /* Make sure capability type[18:16] is 3. */
731 KASSERT((((cap >> 16) & 0x7) == 0x3),
732 ("Not a IOMMU capability 0x%x@0x%x", cap, off));
733
734 softc->pci_cap = cap >> 24;
735 device_printf(softc->dev, "PCI cap 0x%x@0x%x feature:%b\n",
736 cap, off, softc->pci_cap,
737 "\20\1IOTLB\2HT\3NPCache\4EFR\5CapExt");
738
739 return (0);
740 }
741
742 static void
amdvi_event_intr(void * arg)743 amdvi_event_intr(void *arg)
744 {
745 struct amdvi_softc *softc;
746 struct amdvi_ctrl *ctrl;
747
748 softc = (struct amdvi_softc *)arg;
749 ctrl = softc->ctrl;
750 device_printf(softc->dev, "EVT INTR %ld Status:0x%x"
751 " EVT Head:0x%x Tail:0x%x]\n", softc->event_intr_cnt++,
752 ctrl->status, ctrl->evt_head, ctrl->evt_tail);
753 printf(" [CMD Total 0x%lx] Tail:0x%x, Head:0x%x.\n",
754 softc->total_cmd, ctrl->cmd_tail, ctrl->cmd_head);
755
756 amdvi_print_events(softc);
757 ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR;
758 }
759
760 static void
amdvi_free_evt_intr_res(device_t dev)761 amdvi_free_evt_intr_res(device_t dev)
762 {
763
764 struct amdvi_softc *softc;
765 device_t mmio_dev;
766
767 softc = device_get_softc(dev);
768 mmio_dev = softc->pci_dev;
769
770 IVHD_TEARDOWN_INTR(mmio_dev);
771 }
772
773 static bool
amdvi_alloc_intr_resources(struct amdvi_softc * softc)774 amdvi_alloc_intr_resources(struct amdvi_softc *softc)
775 {
776 struct amdvi_ctrl *ctrl;
777 device_t dev, mmio_dev;
778 int err;
779
780 dev = softc->dev;
781 mmio_dev = softc->pci_dev;
782
783 /* Clear interrupt status bits. */
784 ctrl = softc->ctrl;
785 ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR;
786
787 err = IVHD_SETUP_INTR(mmio_dev, amdvi_event_intr, softc, "fault");
788 if (err)
789 device_printf(dev, "Interrupt setup failed on %s\n",
790 device_get_nameunit(mmio_dev));
791 return (err);
792 }
793
794 static void
amdvi_print_dev_cap(struct amdvi_softc * softc)795 amdvi_print_dev_cap(struct amdvi_softc *softc)
796 {
797 struct ivhd_dev_cfg *cfg;
798 int i;
799
800 cfg = softc->dev_cfg;
801 for (i = 0; i < softc->dev_cfg_cnt; i++) {
802 device_printf(softc->dev, "device [0x%x - 0x%x] "
803 "config:%b%s\n", cfg->start_id, cfg->end_id,
804 cfg->data,
805 "\020\001INIT\002ExtInt\003NMI"
806 "\007LINT0\010LINT1",
807 cfg->enable_ats ? "ATS enabled" : "");
808 cfg++;
809 }
810 }
811
812 static int
amdvi_handle_sysctl(SYSCTL_HANDLER_ARGS)813 amdvi_handle_sysctl(SYSCTL_HANDLER_ARGS)
814 {
815 struct amdvi_softc *softc;
816 int result, type, error = 0;
817
818 softc = (struct amdvi_softc *)arg1;
819 type = arg2;
820
821 switch (type) {
822 case 0:
823 result = softc->ctrl->cmd_head;
824 error = sysctl_handle_int(oidp, &result, 0,
825 req);
826 break;
827 case 1:
828 result = softc->ctrl->cmd_tail;
829 error = sysctl_handle_int(oidp, &result, 0,
830 req);
831 break;
832 case 2:
833 result = softc->ctrl->evt_head;
834 error = sysctl_handle_int(oidp, &result, 0,
835 req);
836 break;
837 case 3:
838 result = softc->ctrl->evt_tail;
839 error = sysctl_handle_int(oidp, &result, 0,
840 req);
841 break;
842
843 default:
844 device_printf(softc->dev, "Unknown sysctl:%d\n", type);
845 }
846
847 return (error);
848 }
849
850 static void
amdvi_add_sysctl(struct amdvi_softc * softc)851 amdvi_add_sysctl(struct amdvi_softc *softc)
852 {
853 struct sysctl_oid_list *child;
854 struct sysctl_ctx_list *ctx;
855 device_t dev;
856
857 dev = softc->dev;
858 ctx = device_get_sysctl_ctx(dev);
859 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
860
861 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "event_intr_count", CTLFLAG_RD,
862 &softc->event_intr_cnt, "Event interrupt count");
863 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "command_count", CTLFLAG_RD,
864 &softc->total_cmd, "Command submitted count");
865 SYSCTL_ADD_U16(ctx, child, OID_AUTO, "pci_rid", CTLFLAG_RD,
866 &softc->pci_rid, 0, "IOMMU RID");
867 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_head",
868 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 0,
869 amdvi_handle_sysctl, "IU", "Command head");
870 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_tail",
871 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 1,
872 amdvi_handle_sysctl, "IU", "Command tail");
873 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_head",
874 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 2,
875 amdvi_handle_sysctl, "IU", "Command head");
876 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_tail",
877 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 3,
878 amdvi_handle_sysctl, "IU", "Command tail");
879 }
880
881 int
amdvi_setup_hw(struct amdvi_softc * softc)882 amdvi_setup_hw(struct amdvi_softc *softc)
883 {
884 device_t dev;
885 int status;
886
887 dev = softc->dev;
888
889 amdvi_hw_enable_iotlb(softc);
890
891 amdvi_print_dev_cap(softc);
892
893 if ((status = amdvi_print_pci_cap(dev)) != 0) {
894 device_printf(dev, "PCI capability.\n");
895 return (status);
896 }
897 if ((status = amdvi_init_cmd(softc)) != 0) {
898 device_printf(dev, "Couldn't configure command buffer.\n");
899 return (status);
900 }
901 if ((status = amdvi_init_event(softc)) != 0) {
902 device_printf(dev, "Couldn't configure event buffer.\n");
903 return (status);
904 }
905 if ((status = amdvi_init_dte(softc)) != 0) {
906 device_printf(dev, "Couldn't configure device table.\n");
907 return (status);
908 }
909 if ((status = amdvi_alloc_intr_resources(softc)) != 0) {
910 return (status);
911 }
912 amdvi_add_sysctl(softc);
913 return (0);
914 }
915
916 int
amdvi_teardown_hw(struct amdvi_softc * softc)917 amdvi_teardown_hw(struct amdvi_softc *softc)
918 {
919 device_t dev;
920
921 dev = softc->dev;
922
923 /*
924 * Called after disable, h/w is stopped by now, free all the resources.
925 */
926 amdvi_free_evt_intr_res(dev);
927
928 if (softc->cmd)
929 free(softc->cmd, M_AMDVI);
930
931 if (softc->event)
932 free(softc->event, M_AMDVI);
933
934 return (0);
935 }
936
937 /*********** bhyve interfaces *********************/
938 static int
amdvi_init(void)939 amdvi_init(void)
940 {
941 if (!ivhd_count) {
942 return (EIO);
943 }
944 if (!amdvi_enable_user && ivhd_count) {
945 printf("bhyve: Found %d AMD-Vi/IOMMU device(s), "
946 "use hw.vmm.amdvi.enable=1 to enable pass-through.\n",
947 ivhd_count);
948 return (EINVAL);
949 }
950 return (0);
951 }
952
953 static void
amdvi_cleanup(void)954 amdvi_cleanup(void)
955 {
956 /* Nothing. */
957 }
958
959 static uint16_t
amdvi_domainId(void)960 amdvi_domainId(void)
961 {
962
963 /*
964 * If we hit maximum domain limit, rollover leaving host
965 * domain(0).
966 * XXX: make sure that this domain is not used.
967 */
968 if (amdvi_dom_id == AMDVI_MAX_DOMAIN)
969 amdvi_dom_id = 1;
970
971 return ((uint16_t)amdvi_dom_id++);
972 }
973
974 static void
amdvi_do_inv_domain(uint16_t domain_id,bool create)975 amdvi_do_inv_domain(uint16_t domain_id, bool create)
976 {
977 struct amdvi_softc *softc;
978 int i;
979
980 for (i = 0; i < ivhd_count; i++) {
981 softc = device_get_softc(ivhd_devs[i]);
982 KASSERT(softc, ("softc is NULL"));
983 /*
984 * If not present pages are cached, invalidate page after
985 * creating domain.
986 */
987 #if 0
988 if (create && ((softc->pci_cap & AMDVI_PCI_CAP_NPCACHE) == 0))
989 continue;
990 #endif
991 amdvi_inv_domain(softc, domain_id);
992 amdvi_wait(softc);
993 }
994 }
995
996 static void *
amdvi_create_domain(vm_paddr_t maxaddr)997 amdvi_create_domain(vm_paddr_t maxaddr)
998 {
999 struct amdvi_domain *dom;
1000
1001 dom = malloc(sizeof(struct amdvi_domain), M_AMDVI, M_ZERO | M_WAITOK);
1002 dom->id = amdvi_domainId();
1003 //dom->maxaddr = maxaddr;
1004 #ifdef AMDVI_DEBUG_CMD
1005 printf("Created domain #%d\n", dom->id);
1006 #endif
1007 /*
1008 * Host domain(#0) don't create translation table.
1009 */
1010 if (dom->id || amdvi_host_ptp)
1011 dom->ptp = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO);
1012
1013 dom->ptp_level = amdvi_ptp_level;
1014
1015 amdvi_do_inv_domain(dom->id, true);
1016 SLIST_INSERT_HEAD(&dom_head, dom, next);
1017
1018 return (dom);
1019 }
1020
1021 static void
amdvi_free_ptp(uint64_t * ptp,int level)1022 amdvi_free_ptp(uint64_t *ptp, int level)
1023 {
1024 int i;
1025
1026 if (level < 1)
1027 return;
1028
1029 for (i = 0; i < NPTEPG ; i++) {
1030 if ((ptp[i] & AMDVI_PT_PRESENT) == 0)
1031 continue;
1032 /* XXX: Add super-page or PTE mapping > 4KB. */
1033 #ifdef notyet
1034 /* Super-page mapping. */
1035 if (AMDVI_PD_SUPER(ptp[i]))
1036 continue;
1037 #endif
1038
1039 amdvi_free_ptp((uint64_t *)PHYS_TO_DMAP(ptp[i]
1040 & AMDVI_PT_MASK), level - 1);
1041 }
1042
1043 free(ptp, M_AMDVI);
1044 }
1045
1046 static void
amdvi_destroy_domain(void * arg)1047 amdvi_destroy_domain(void *arg)
1048 {
1049 struct amdvi_domain *domain;
1050
1051 domain = (struct amdvi_domain *)arg;
1052 KASSERT(domain, ("domain is NULL"));
1053 #ifdef AMDVI_DEBUG_CMD
1054 printf("Destroying domain %d\n", domain->id);
1055 #endif
1056 if (domain->ptp)
1057 amdvi_free_ptp(domain->ptp, domain->ptp_level);
1058
1059 amdvi_do_inv_domain(domain->id, false);
1060 SLIST_REMOVE(&dom_head, domain, amdvi_domain, next);
1061 free(domain, M_AMDVI);
1062 }
1063
1064 static uint64_t
amdvi_set_pt(uint64_t * pt,int level,vm_paddr_t gpa,vm_paddr_t hpa,uint64_t pg_size,bool create)1065 amdvi_set_pt(uint64_t *pt, int level, vm_paddr_t gpa,
1066 vm_paddr_t hpa, uint64_t pg_size, bool create)
1067 {
1068 uint64_t *page, pa;
1069 int shift, index;
1070 const int PT_SHIFT = 9;
1071 const int PT_INDEX_MASK = (1 << PT_SHIFT) - 1; /* Based on PT_SHIFT */
1072
1073 if (!pg_size)
1074 return (0);
1075
1076 if (hpa & (pg_size - 1)) {
1077 printf("HPA is not size aligned.\n");
1078 return (0);
1079 }
1080 if (gpa & (pg_size - 1)) {
1081 printf("HPA is not size aligned.\n");
1082 return (0);
1083 }
1084 shift = PML4SHIFT;
1085 while ((shift > PAGE_SHIFT) && (pg_size < (1UL << shift))) {
1086 index = (gpa >> shift) & PT_INDEX_MASK;
1087
1088 if ((pt[index] == 0) && create) {
1089 page = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO);
1090 pa = vtophys(page);
1091 pt[index] = pa | AMDVI_PT_PRESENT | AMDVI_PT_RW |
1092 ((level - 1) << AMDVI_PD_LEVEL_SHIFT);
1093 }
1094 #ifdef AMDVI_DEBUG_PTE
1095 if ((gpa % 0x1000000) == 0)
1096 printf("[level%d, shift = %d]PTE:0x%lx\n",
1097 level, shift, pt[index]);
1098 #endif
1099 #define PTE2PA(x) ((uint64_t)(x) & AMDVI_PT_MASK)
1100 pa = PTE2PA(pt[index]);
1101 pt = (uint64_t *)PHYS_TO_DMAP(pa);
1102 shift -= PT_SHIFT;
1103 level--;
1104 }
1105
1106 /* Leaf entry. */
1107 index = (gpa >> shift) & PT_INDEX_MASK;
1108
1109 if (create) {
1110 pt[index] = hpa | AMDVI_PT_RW | AMDVI_PT_PRESENT;
1111 } else
1112 pt[index] = 0;
1113
1114 #ifdef AMDVI_DEBUG_PTE
1115 if ((gpa % 0x1000000) == 0)
1116 printf("[Last level%d, shift = %d]PTE:0x%lx\n",
1117 level, shift, pt[index]);
1118 #endif
1119 return (1ULL << shift);
1120 }
1121
1122 static uint64_t
amdvi_update_mapping(struct amdvi_domain * domain,vm_paddr_t gpa,vm_paddr_t hpa,uint64_t size,bool create)1123 amdvi_update_mapping(struct amdvi_domain *domain, vm_paddr_t gpa,
1124 vm_paddr_t hpa, uint64_t size, bool create)
1125 {
1126 uint64_t mapped, *ptp, len;
1127 int level;
1128
1129 KASSERT(domain, ("domain is NULL"));
1130 level = domain->ptp_level;
1131 KASSERT(level, ("Page table level is 0"));
1132
1133 ptp = domain->ptp;
1134 KASSERT(ptp, ("PTP is NULL"));
1135 mapped = 0;
1136 while (mapped < size) {
1137 len = amdvi_set_pt(ptp, level, gpa + mapped, hpa + mapped,
1138 PAGE_SIZE, create);
1139 if (!len) {
1140 printf("Error: Couldn't map HPA:0x%lx GPA:0x%lx\n",
1141 hpa, gpa);
1142 return (0);
1143 }
1144 mapped += len;
1145 }
1146
1147 return (mapped);
1148 }
1149
1150 static int
amdvi_create_mapping(void * arg,vm_paddr_t gpa,vm_paddr_t hpa,uint64_t len,uint64_t * res_len)1151 amdvi_create_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa,
1152 uint64_t len, uint64_t *res_len)
1153 {
1154 struct amdvi_domain *domain;
1155
1156 domain = (struct amdvi_domain *)arg;
1157
1158 if (domain->id && !domain->ptp) {
1159 printf("ptp is NULL");
1160 return (EINVAL);
1161 }
1162
1163 /*
1164 * If host domain is created w/o page table, skip IOMMU page
1165 * table set-up.
1166 */
1167 if (domain->ptp)
1168 *res_len = amdvi_update_mapping(domain, gpa, hpa, len, true);
1169 else
1170 *res_len = len;
1171 return (0);
1172 }
1173
1174 static int
amdvi_remove_mapping(void * arg,vm_paddr_t gpa,uint64_t len,uint64_t * res_len)1175 amdvi_remove_mapping(void *arg, vm_paddr_t gpa, uint64_t len, uint64_t *res_len)
1176 {
1177 struct amdvi_domain *domain;
1178
1179 domain = (struct amdvi_domain *)arg;
1180 /*
1181 * If host domain is created w/o page table, skip IOMMU page
1182 * table set-up.
1183 */
1184 if (domain->ptp)
1185 *res_len = amdvi_update_mapping(domain, gpa, 0, len, false);
1186 else
1187 *res_len = len;
1188 return (0);
1189 }
1190
1191 static struct amdvi_softc *
amdvi_find_iommu(uint16_t devid)1192 amdvi_find_iommu(uint16_t devid)
1193 {
1194 struct amdvi_softc *softc;
1195 int i, j;
1196
1197 for (i = 0; i < ivhd_count; i++) {
1198 softc = device_get_softc(ivhd_devs[i]);
1199 for (j = 0; j < softc->dev_cfg_cnt; j++)
1200 if ((devid >= softc->dev_cfg[j].start_id) &&
1201 (devid <= softc->dev_cfg[j].end_id))
1202 return (softc);
1203 }
1204
1205 return (NULL);
1206 }
1207
1208 /*
1209 * Set-up device table entry.
1210 * IOMMU spec Rev 2.0, section 3.2.2.2, some of the fields must
1211 * be set concurrently, e.g. read and write bits.
1212 */
1213 static void
amdvi_set_dte(struct amdvi_domain * domain,struct amdvi_softc * softc,uint16_t devid,bool enable)1214 amdvi_set_dte(struct amdvi_domain *domain, struct amdvi_softc *softc,
1215 uint16_t devid, bool enable)
1216 {
1217 struct amdvi_dte* temp;
1218
1219 KASSERT(domain, ("domain is NULL for pci_rid:0x%x\n", devid));
1220 KASSERT(softc, ("softc is NULL for pci_rid:0x%x\n", devid));
1221
1222 temp = &amdvi_dte[devid];
1223
1224 #ifdef AMDVI_ATS_ENABLE
1225 /* If IOMMU and device support IOTLB, enable it. */
1226 if (amdvi_dev_support_iotlb(softc, devid) && softc->iotlb)
1227 temp->iotlb_enable = 1;
1228 #endif
1229
1230 /* Avoid duplicate I/O faults. */
1231 temp->sup_second_io_fault = 1;
1232 temp->sup_all_io_fault = amdvi_disable_io_fault;
1233
1234 temp->dt_valid = 1;
1235 temp->domain_id = domain->id;
1236
1237 if (enable) {
1238 if (domain->ptp) {
1239 temp->pt_base = vtophys(domain->ptp) >> 12;
1240 temp->pt_level = amdvi_ptp_level;
1241 }
1242 /*
1243 * XXX: Page table valid[TV] bit must be set even if host domain
1244 * page tables are not enabled.
1245 */
1246 temp->pt_valid = 1;
1247 temp->read_allow = 1;
1248 temp->write_allow = 1;
1249 }
1250 }
1251
1252 static void
amdvi_inv_device(struct amdvi_softc * softc,uint16_t devid)1253 amdvi_inv_device(struct amdvi_softc *softc, uint16_t devid)
1254 {
1255 KASSERT(softc, ("softc is NULL"));
1256
1257 amdvi_cmd_inv_dte(softc, devid);
1258 #ifdef AMDVI_ATS_ENABLE
1259 if (amdvi_dev_support_iotlb(softc, devid))
1260 amdvi_cmd_inv_iotlb(softc, devid);
1261 #endif
1262 amdvi_wait(softc);
1263 }
1264
1265 static int
amdvi_add_device(void * arg,device_t dev __unused,uint16_t devid)1266 amdvi_add_device(void *arg, device_t dev __unused, uint16_t devid)
1267 {
1268 struct amdvi_domain *domain;
1269 struct amdvi_softc *softc;
1270
1271 domain = (struct amdvi_domain *)arg;
1272 KASSERT(domain != NULL, ("domain is NULL"));
1273 #ifdef AMDVI_DEBUG_CMD
1274 printf("Assigning device(%d.%d.%d) to domain:%d\n",
1275 RID2PCI_STR(devid), domain->id);
1276 #endif
1277 softc = amdvi_find_iommu(devid);
1278 if (softc == NULL)
1279 return (ENXIO);
1280 amdvi_set_dte(domain, softc, devid, true);
1281 amdvi_inv_device(softc, devid);
1282 return (0);
1283 }
1284
1285 static int
amdvi_remove_device(void * arg,device_t dev __unused,uint16_t devid)1286 amdvi_remove_device(void *arg, device_t dev __unused, uint16_t devid)
1287 {
1288 struct amdvi_domain *domain;
1289 struct amdvi_softc *softc;
1290
1291 domain = (struct amdvi_domain *)arg;
1292 #ifdef AMDVI_DEBUG_CMD
1293 printf("Remove device(0x%x) from domain:%d\n",
1294 devid, domain->id);
1295 #endif
1296 softc = amdvi_find_iommu(devid);
1297 if (softc == NULL)
1298 return (ENXIO);
1299 amdvi_set_dte(domain, softc, devid, false);
1300 amdvi_inv_device(softc, devid);
1301 return (0);
1302 }
1303
1304 static void
amdvi_enable(void)1305 amdvi_enable(void)
1306 {
1307 struct amdvi_ctrl *ctrl;
1308 struct amdvi_softc *softc;
1309 uint64_t val;
1310 int i;
1311
1312 for (i = 0; i < ivhd_count; i++) {
1313 softc = device_get_softc(ivhd_devs[i]);
1314 KASSERT(softc, ("softc is NULL\n"));
1315 ctrl = softc->ctrl;
1316 KASSERT(ctrl, ("ctrl is NULL\n"));
1317
1318 val = ( AMDVI_CTRL_EN |
1319 AMDVI_CTRL_CMD |
1320 AMDVI_CTRL_ELOG |
1321 AMDVI_CTRL_ELOGINT |
1322 AMDVI_CTRL_INV_TO_1S);
1323
1324 if (softc->ivhd_flag & IVHD_FLAG_COH)
1325 val |= AMDVI_CTRL_COH;
1326 if (softc->ivhd_flag & IVHD_FLAG_HTT)
1327 val |= AMDVI_CTRL_HTT;
1328 if (softc->ivhd_flag & IVHD_FLAG_RPPW)
1329 val |= AMDVI_CTRL_RPPW;
1330 if (softc->ivhd_flag & IVHD_FLAG_PPW)
1331 val |= AMDVI_CTRL_PPW;
1332 if (softc->ivhd_flag & IVHD_FLAG_ISOC)
1333 val |= AMDVI_CTRL_ISOC;
1334
1335 ctrl->control = val;
1336 }
1337 }
1338
1339 static void
amdvi_disable(void)1340 amdvi_disable(void)
1341 {
1342 struct amdvi_ctrl *ctrl;
1343 struct amdvi_softc *softc;
1344 int i;
1345
1346 for (i = 0; i < ivhd_count; i++) {
1347 softc = device_get_softc(ivhd_devs[i]);
1348 KASSERT(softc, ("softc is NULL\n"));
1349 ctrl = softc->ctrl;
1350 KASSERT(ctrl, ("ctrl is NULL\n"));
1351
1352 ctrl->control = 0;
1353 }
1354 }
1355
1356 static int
amdvi_invalidate_tlb(void * arg)1357 amdvi_invalidate_tlb(void *arg)
1358 {
1359 struct amdvi_domain *domain;
1360
1361 domain = (struct amdvi_domain *)arg;
1362 KASSERT(domain, ("domain is NULL"));
1363 amdvi_do_inv_domain(domain->id, false);
1364 return (0);
1365 }
1366
1367 const struct iommu_ops iommu_ops_amd = {
1368 .init = amdvi_init,
1369 .cleanup = amdvi_cleanup,
1370 .enable = amdvi_enable,
1371 .disable = amdvi_disable,
1372 .create_domain = amdvi_create_domain,
1373 .destroy_domain = amdvi_destroy_domain,
1374 .create_mapping = amdvi_create_mapping,
1375 .remove_mapping = amdvi_remove_mapping,
1376 .add_device = amdvi_add_device,
1377 .remove_device = amdvi_remove_device,
1378 .invalidate_tlb = amdvi_invalidate_tlb,
1379 };
1380