xref: /freebsd/sys/x86/iommu/amd_cmd.c (revision 0f5116d7efe33c81f0b24b56eec78af37898f500)
1*0f5116d7SKonstantin Belousov /*-
2*0f5116d7SKonstantin Belousov  * SPDX-License-Identifier: BSD-2-Clause
3*0f5116d7SKonstantin Belousov  *
4*0f5116d7SKonstantin Belousov  * Copyright (c) 2024 The FreeBSD Foundation
5*0f5116d7SKonstantin Belousov  *
6*0f5116d7SKonstantin Belousov  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7*0f5116d7SKonstantin Belousov  * under sponsorship from the FreeBSD Foundation.
8*0f5116d7SKonstantin Belousov  *
9*0f5116d7SKonstantin Belousov  * Redistribution and use in source and binary forms, with or without
10*0f5116d7SKonstantin Belousov  * modification, are permitted provided that the following conditions
11*0f5116d7SKonstantin Belousov  * are met:
12*0f5116d7SKonstantin Belousov  * 1. Redistributions of source code must retain the above copyright
13*0f5116d7SKonstantin Belousov  *    notice, this list of conditions and the following disclaimer.
14*0f5116d7SKonstantin Belousov  * 2. Redistributions in binary form must reproduce the above copyright
15*0f5116d7SKonstantin Belousov  *    notice, this list of conditions and the following disclaimer in the
16*0f5116d7SKonstantin Belousov  *    documentation and/or other materials provided with the distribution.
17*0f5116d7SKonstantin Belousov  *
18*0f5116d7SKonstantin Belousov  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19*0f5116d7SKonstantin Belousov  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*0f5116d7SKonstantin Belousov  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*0f5116d7SKonstantin Belousov  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22*0f5116d7SKonstantin Belousov  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23*0f5116d7SKonstantin Belousov  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24*0f5116d7SKonstantin Belousov  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25*0f5116d7SKonstantin Belousov  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26*0f5116d7SKonstantin Belousov  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27*0f5116d7SKonstantin Belousov  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28*0f5116d7SKonstantin Belousov  * SUCH DAMAGE.
29*0f5116d7SKonstantin Belousov  */
30*0f5116d7SKonstantin Belousov 
31*0f5116d7SKonstantin Belousov #include "opt_acpi.h"
32*0f5116d7SKonstantin Belousov 
33*0f5116d7SKonstantin Belousov #include <sys/param.h>
34*0f5116d7SKonstantin Belousov #include <sys/bus.h>
35*0f5116d7SKonstantin Belousov #include <sys/kernel.h>
36*0f5116d7SKonstantin Belousov #include <sys/malloc.h>
37*0f5116d7SKonstantin Belousov #include <sys/memdesc.h>
38*0f5116d7SKonstantin Belousov #include <sys/module.h>
39*0f5116d7SKonstantin Belousov #include <sys/rman.h>
40*0f5116d7SKonstantin Belousov #include <sys/taskqueue.h>
41*0f5116d7SKonstantin Belousov #include <sys/time.h>
42*0f5116d7SKonstantin Belousov #include <sys/tree.h>
43*0f5116d7SKonstantin Belousov #include <sys/vmem.h>
44*0f5116d7SKonstantin Belousov #include <vm/vm.h>
45*0f5116d7SKonstantin Belousov #include <vm/vm_extern.h>
46*0f5116d7SKonstantin Belousov #include <vm/vm_kern.h>
47*0f5116d7SKonstantin Belousov #include <vm/vm_page.h>
48*0f5116d7SKonstantin Belousov #include <vm/vm_map.h>
49*0f5116d7SKonstantin Belousov #include <contrib/dev/acpica/include/acpi.h>
50*0f5116d7SKonstantin Belousov #include <contrib/dev/acpica/include/accommon.h>
51*0f5116d7SKonstantin Belousov #include <dev/acpica/acpivar.h>
52*0f5116d7SKonstantin Belousov #include <dev/pci/pcireg.h>
53*0f5116d7SKonstantin Belousov #include <machine/bus.h>
54*0f5116d7SKonstantin Belousov #include <machine/cpu.h>
55*0f5116d7SKonstantin Belousov #include <x86/include/busdma_impl.h>
56*0f5116d7SKonstantin Belousov #include <dev/iommu/busdma_iommu.h>
57*0f5116d7SKonstantin Belousov #include <x86/iommu/amd_reg.h>
58*0f5116d7SKonstantin Belousov #include <x86/iommu/x86_iommu.h>
59*0f5116d7SKonstantin Belousov #include <x86/iommu/amd_iommu.h>
60*0f5116d7SKonstantin Belousov 
61*0f5116d7SKonstantin Belousov static void
amdiommu_enable_cmdbuf(struct amdiommu_unit * unit)62*0f5116d7SKonstantin Belousov amdiommu_enable_cmdbuf(struct amdiommu_unit *unit)
63*0f5116d7SKonstantin Belousov {
64*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
65*0f5116d7SKonstantin Belousov 
66*0f5116d7SKonstantin Belousov 	unit->hw_ctrl |= AMDIOMMU_CTRL_CMDBUF_EN;
67*0f5116d7SKonstantin Belousov 	amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl);
68*0f5116d7SKonstantin Belousov }
69*0f5116d7SKonstantin Belousov 
70*0f5116d7SKonstantin Belousov static void
amdiommu_disable_cmdbuf(struct amdiommu_unit * unit)71*0f5116d7SKonstantin Belousov amdiommu_disable_cmdbuf(struct amdiommu_unit *unit)
72*0f5116d7SKonstantin Belousov {
73*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
74*0f5116d7SKonstantin Belousov 
75*0f5116d7SKonstantin Belousov 	unit->hw_ctrl &= ~AMDIOMMU_CTRL_CMDBUF_EN;
76*0f5116d7SKonstantin Belousov 	amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl);
77*0f5116d7SKonstantin Belousov }
78*0f5116d7SKonstantin Belousov 
79*0f5116d7SKonstantin Belousov 
80*0f5116d7SKonstantin Belousov static void
amdiommu_enable_qi_intr(struct iommu_unit * iommu)81*0f5116d7SKonstantin Belousov amdiommu_enable_qi_intr(struct iommu_unit *iommu)
82*0f5116d7SKonstantin Belousov {
83*0f5116d7SKonstantin Belousov 	struct amdiommu_unit *unit;
84*0f5116d7SKonstantin Belousov 
85*0f5116d7SKonstantin Belousov 	unit = IOMMU2AMD(iommu);
86*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
87*0f5116d7SKonstantin Belousov 	unit->hw_ctrl |= AMDIOMMU_CTRL_COMWINT_EN;
88*0f5116d7SKonstantin Belousov 	amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl);
89*0f5116d7SKonstantin Belousov 	amdiommu_write8(unit, AMDIOMMU_CMDEV_STATUS,
90*0f5116d7SKonstantin Belousov 	    AMDIOMMU_CMDEVS_COMWAITINT);
91*0f5116d7SKonstantin Belousov }
92*0f5116d7SKonstantin Belousov 
93*0f5116d7SKonstantin Belousov static void
amdiommu_disable_qi_intr(struct iommu_unit * iommu)94*0f5116d7SKonstantin Belousov amdiommu_disable_qi_intr(struct iommu_unit *iommu)
95*0f5116d7SKonstantin Belousov {
96*0f5116d7SKonstantin Belousov 	struct amdiommu_unit *unit;
97*0f5116d7SKonstantin Belousov 
98*0f5116d7SKonstantin Belousov 	unit = IOMMU2AMD(iommu);
99*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
100*0f5116d7SKonstantin Belousov 	unit->hw_ctrl &= ~AMDIOMMU_CTRL_COMWINT_EN;
101*0f5116d7SKonstantin Belousov 	amdiommu_write8(unit, AMDIOMMU_CTRL, unit->hw_ctrl);
102*0f5116d7SKonstantin Belousov }
103*0f5116d7SKonstantin Belousov 
104*0f5116d7SKonstantin Belousov static void
amdiommu_cmd_advance_tail(struct iommu_unit * iommu)105*0f5116d7SKonstantin Belousov amdiommu_cmd_advance_tail(struct iommu_unit *iommu)
106*0f5116d7SKonstantin Belousov {
107*0f5116d7SKonstantin Belousov 	struct amdiommu_unit *unit;
108*0f5116d7SKonstantin Belousov 
109*0f5116d7SKonstantin Belousov 	unit = IOMMU2AMD(iommu);
110*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
111*0f5116d7SKonstantin Belousov 	amdiommu_write8(unit, AMDIOMMU_CMDBUF_TAIL, unit->x86c.inv_queue_tail);
112*0f5116d7SKonstantin Belousov }
113*0f5116d7SKonstantin Belousov 
114*0f5116d7SKonstantin Belousov static void
amdiommu_cmd_ensure(struct iommu_unit * iommu,int descr_count)115*0f5116d7SKonstantin Belousov amdiommu_cmd_ensure(struct iommu_unit *iommu, int descr_count)
116*0f5116d7SKonstantin Belousov {
117*0f5116d7SKonstantin Belousov 	struct amdiommu_unit *unit;
118*0f5116d7SKonstantin Belousov 	uint64_t head;
119*0f5116d7SKonstantin Belousov 	int bytes;
120*0f5116d7SKonstantin Belousov 
121*0f5116d7SKonstantin Belousov 	unit = IOMMU2AMD(iommu);
122*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
123*0f5116d7SKonstantin Belousov 	bytes = descr_count << AMDIOMMU_CMD_SZ_SHIFT;
124*0f5116d7SKonstantin Belousov 	for (;;) {
125*0f5116d7SKonstantin Belousov 		if (bytes <= unit->x86c.inv_queue_avail)
126*0f5116d7SKonstantin Belousov 			break;
127*0f5116d7SKonstantin Belousov 		/* refill */
128*0f5116d7SKonstantin Belousov 		head = amdiommu_read8(unit, AMDIOMMU_CMDBUF_HEAD);
129*0f5116d7SKonstantin Belousov 		head &= AMDIOMMU_CMDPTR_MASK;
130*0f5116d7SKonstantin Belousov 		unit->x86c.inv_queue_avail = head - unit->x86c.inv_queue_tail -
131*0f5116d7SKonstantin Belousov 		    AMDIOMMU_CMD_SZ;
132*0f5116d7SKonstantin Belousov 		if (head <= unit->x86c.inv_queue_tail)
133*0f5116d7SKonstantin Belousov 			unit->x86c.inv_queue_avail += unit->x86c.inv_queue_size;
134*0f5116d7SKonstantin Belousov 		if (bytes <= unit->x86c.inv_queue_avail)
135*0f5116d7SKonstantin Belousov 			break;
136*0f5116d7SKonstantin Belousov 
137*0f5116d7SKonstantin Belousov 		/*
138*0f5116d7SKonstantin Belousov 		 * No space in the queue, do busy wait.  Hardware must
139*0f5116d7SKonstantin Belousov 		 * make a progress.  But first advance the tail to
140*0f5116d7SKonstantin Belousov 		 * inform the descriptor streamer about entries we
141*0f5116d7SKonstantin Belousov 		 * might have already filled, otherwise they could
142*0f5116d7SKonstantin Belousov 		 * clog the whole queue..
143*0f5116d7SKonstantin Belousov 		 *
144*0f5116d7SKonstantin Belousov 		 * See dmar_qi_invalidate_locked() for a discussion
145*0f5116d7SKonstantin Belousov 		 * about data race prevention.
146*0f5116d7SKonstantin Belousov 		 */
147*0f5116d7SKonstantin Belousov 		amdiommu_cmd_advance_tail(iommu);
148*0f5116d7SKonstantin Belousov 		unit->x86c.inv_queue_full++;
149*0f5116d7SKonstantin Belousov 		cpu_spinwait();
150*0f5116d7SKonstantin Belousov 	}
151*0f5116d7SKonstantin Belousov 	unit->x86c.inv_queue_avail -= bytes;
152*0f5116d7SKonstantin Belousov }
153*0f5116d7SKonstantin Belousov 
154*0f5116d7SKonstantin Belousov static void
amdiommu_cmd_emit(struct amdiommu_unit * unit,const struct amdiommu_cmd_generic * cmd)155*0f5116d7SKonstantin Belousov amdiommu_cmd_emit(struct amdiommu_unit *unit, const struct
156*0f5116d7SKonstantin Belousov     amdiommu_cmd_generic *cmd)
157*0f5116d7SKonstantin Belousov {
158*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
159*0f5116d7SKonstantin Belousov 
160*0f5116d7SKonstantin Belousov 	memcpy(unit->x86c.inv_queue + unit->x86c.inv_queue_tail, cmd,
161*0f5116d7SKonstantin Belousov 	    sizeof(*cmd));
162*0f5116d7SKonstantin Belousov 	unit->x86c.inv_queue_tail += AMDIOMMU_CMD_SZ;
163*0f5116d7SKonstantin Belousov 	KASSERT(unit->x86c.inv_queue_tail <= unit->x86c.inv_queue_size,
164*0f5116d7SKonstantin Belousov 	    ("tail overflow 0x%x 0x%jx", unit->x86c.inv_queue_tail,
165*0f5116d7SKonstantin Belousov 	    (uintmax_t)unit->x86c.inv_queue_size));
166*0f5116d7SKonstantin Belousov 	unit->x86c.inv_queue_tail &= unit->x86c.inv_queue_size - 1;
167*0f5116d7SKonstantin Belousov }
168*0f5116d7SKonstantin Belousov 
169*0f5116d7SKonstantin Belousov static void
amdiommu_cmd_emit_wait_descr(struct iommu_unit * iommu,uint32_t seq,bool intr,bool memw,bool fence)170*0f5116d7SKonstantin Belousov amdiommu_cmd_emit_wait_descr(struct iommu_unit *iommu, uint32_t seq,
171*0f5116d7SKonstantin Belousov     bool intr, bool memw, bool fence)
172*0f5116d7SKonstantin Belousov {
173*0f5116d7SKonstantin Belousov 	struct amdiommu_unit *unit;
174*0f5116d7SKonstantin Belousov 	struct amdiommu_cmd_completion_wait c;
175*0f5116d7SKonstantin Belousov 
176*0f5116d7SKonstantin Belousov 	unit = IOMMU2AMD(iommu);
177*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
178*0f5116d7SKonstantin Belousov 
179*0f5116d7SKonstantin Belousov 	bzero(&c, sizeof(c));
180*0f5116d7SKonstantin Belousov 	c.op = AMDIOMMU_CMD_COMPLETION_WAIT;
181*0f5116d7SKonstantin Belousov 	if (memw) {
182*0f5116d7SKonstantin Belousov 		uint32_t x;
183*0f5116d7SKonstantin Belousov 
184*0f5116d7SKonstantin Belousov 		c.s = 1;
185*0f5116d7SKonstantin Belousov 		x = unit->x86c.inv_waitd_seq_hw_phys;
186*0f5116d7SKonstantin Belousov 		x >>= 3;
187*0f5116d7SKonstantin Belousov 		c.address0 = x;
188*0f5116d7SKonstantin Belousov 		x = unit->x86c.inv_waitd_seq_hw_phys >> 32;
189*0f5116d7SKonstantin Belousov 		c.address1 = x;
190*0f5116d7SKonstantin Belousov 		c.data0 = seq;
191*0f5116d7SKonstantin Belousov 	}
192*0f5116d7SKonstantin Belousov 	if (fence)
193*0f5116d7SKonstantin Belousov 		c.f = 1;
194*0f5116d7SKonstantin Belousov 	if (intr)
195*0f5116d7SKonstantin Belousov 		c.i = 1;
196*0f5116d7SKonstantin Belousov 	amdiommu_cmd_emit(unit, (struct amdiommu_cmd_generic *)&c);
197*0f5116d7SKonstantin Belousov }
198*0f5116d7SKonstantin Belousov 
199*0f5116d7SKonstantin Belousov static void
amdiommu_qi_invalidate_emit(struct iommu_domain * adomain,iommu_gaddr_t base,iommu_gaddr_t size,struct iommu_qi_genseq * pseq,bool emit_wait)200*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_emit(struct iommu_domain *adomain, iommu_gaddr_t base,
201*0f5116d7SKonstantin Belousov     iommu_gaddr_t size, struct iommu_qi_genseq *pseq, bool emit_wait)
202*0f5116d7SKonstantin Belousov {
203*0f5116d7SKonstantin Belousov 	struct amdiommu_domain *domain;
204*0f5116d7SKonstantin Belousov 	struct amdiommu_unit *unit;
205*0f5116d7SKonstantin Belousov 	struct amdiommu_cmd_invalidate_iommu_pages c;
206*0f5116d7SKonstantin Belousov 	u_int isize;
207*0f5116d7SKonstantin Belousov 
208*0f5116d7SKonstantin Belousov 	domain = IODOM2DOM(adomain);
209*0f5116d7SKonstantin Belousov 	unit = domain->unit;
210*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
211*0f5116d7SKonstantin Belousov 	bzero(&c, sizeof(c));
212*0f5116d7SKonstantin Belousov 	c.op = AMDIOMMU_CMD_INVALIDATE_IOMMU_PAGES;
213*0f5116d7SKonstantin Belousov 	c.domainid = domain->domain;
214*0f5116d7SKonstantin Belousov 	isize = IOMMU_PAGE_SIZE; /* XXXKIB handle superpages */
215*0f5116d7SKonstantin Belousov 
216*0f5116d7SKonstantin Belousov 	for (; size > 0; base += isize, size -= isize) {
217*0f5116d7SKonstantin Belousov 		amdiommu_cmd_ensure(AMD2IOMMU(unit), 1);
218*0f5116d7SKonstantin Belousov 		c.s = 0;
219*0f5116d7SKonstantin Belousov 		c.pde = 1;
220*0f5116d7SKonstantin Belousov 		c.address = base >> IOMMU_PAGE_SHIFT;
221*0f5116d7SKonstantin Belousov 		amdiommu_cmd_emit(unit, (struct amdiommu_cmd_generic *)&c);
222*0f5116d7SKonstantin Belousov 	}
223*0f5116d7SKonstantin Belousov 	iommu_qi_emit_wait_seq(AMD2IOMMU(unit), pseq, emit_wait);
224*0f5116d7SKonstantin Belousov }
225*0f5116d7SKonstantin Belousov 
226*0f5116d7SKonstantin Belousov void
amdiommu_qi_invalidate_all_pages_locked_nowait(struct amdiommu_domain * domain)227*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_all_pages_locked_nowait(struct amdiommu_domain *domain)
228*0f5116d7SKonstantin Belousov {
229*0f5116d7SKonstantin Belousov 	struct amdiommu_unit *unit;
230*0f5116d7SKonstantin Belousov 	struct amdiommu_cmd_invalidate_iommu_pages c;
231*0f5116d7SKonstantin Belousov 
232*0f5116d7SKonstantin Belousov 	unit = domain->unit;
233*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
234*0f5116d7SKonstantin Belousov 	bzero(&c, sizeof(c));
235*0f5116d7SKonstantin Belousov 	c.op = AMDIOMMU_CMD_INVALIDATE_IOMMU_PAGES;
236*0f5116d7SKonstantin Belousov 	c.domainid = domain->domain;
237*0f5116d7SKonstantin Belousov 
238*0f5116d7SKonstantin Belousov 	/*
239*0f5116d7SKonstantin Belousov 	 * The magic specified in the note for INVALIDATE_IOMMU_PAGES
240*0f5116d7SKonstantin Belousov 	 * description.
241*0f5116d7SKonstantin Belousov 	 */
242*0f5116d7SKonstantin Belousov 	c.s = 1;
243*0f5116d7SKonstantin Belousov 	c.pde = 1;
244*0f5116d7SKonstantin Belousov 	c.address = 0x7ffffffffffff;
245*0f5116d7SKonstantin Belousov 
246*0f5116d7SKonstantin Belousov 	amdiommu_cmd_ensure(AMD2IOMMU(unit), 1);
247*0f5116d7SKonstantin Belousov 	amdiommu_cmd_emit(unit, (struct amdiommu_cmd_generic *)&c);
248*0f5116d7SKonstantin Belousov }
249*0f5116d7SKonstantin Belousov 
250*0f5116d7SKonstantin Belousov void
amdiommu_qi_invalidate_wait_sync(struct iommu_unit * iommu)251*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_wait_sync(struct iommu_unit *iommu)
252*0f5116d7SKonstantin Belousov {
253*0f5116d7SKonstantin Belousov 	struct iommu_qi_genseq gseq;
254*0f5116d7SKonstantin Belousov 
255*0f5116d7SKonstantin Belousov 	amdiommu_cmd_ensure(iommu, 1);
256*0f5116d7SKonstantin Belousov 	iommu_qi_emit_wait_seq(iommu, &gseq, true);
257*0f5116d7SKonstantin Belousov 	IOMMU2AMD(iommu)->x86c.inv_seq_waiters++;
258*0f5116d7SKonstantin Belousov 	amdiommu_cmd_advance_tail(iommu);
259*0f5116d7SKonstantin Belousov 	iommu_qi_wait_for_seq(iommu, &gseq, true);
260*0f5116d7SKonstantin Belousov }
261*0f5116d7SKonstantin Belousov 
262*0f5116d7SKonstantin Belousov void
amdiommu_qi_invalidate_ctx_locked_nowait(struct amdiommu_ctx * ctx)263*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_ctx_locked_nowait(struct amdiommu_ctx *ctx)
264*0f5116d7SKonstantin Belousov {
265*0f5116d7SKonstantin Belousov 	struct amdiommu_cmd_invalidate_devtab_entry c;
266*0f5116d7SKonstantin Belousov 
267*0f5116d7SKonstantin Belousov 	amdiommu_cmd_ensure(AMD2IOMMU(CTX2AMD(ctx)), 1);
268*0f5116d7SKonstantin Belousov 	bzero(&c, sizeof(c));
269*0f5116d7SKonstantin Belousov 	c.op = AMDIOMMU_CMD_INVALIDATE_DEVTAB_ENTRY;
270*0f5116d7SKonstantin Belousov 	c.devid = ctx->context.rid;
271*0f5116d7SKonstantin Belousov 	amdiommu_cmd_emit(CTX2AMD(ctx), (struct amdiommu_cmd_generic *)&c);
272*0f5116d7SKonstantin Belousov }
273*0f5116d7SKonstantin Belousov 
274*0f5116d7SKonstantin Belousov 
275*0f5116d7SKonstantin Belousov void
amdiommu_qi_invalidate_ctx_locked(struct amdiommu_ctx * ctx)276*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_ctx_locked(struct amdiommu_ctx *ctx)
277*0f5116d7SKonstantin Belousov {
278*0f5116d7SKonstantin Belousov 	amdiommu_qi_invalidate_ctx_locked_nowait(ctx);
279*0f5116d7SKonstantin Belousov 	amdiommu_qi_invalidate_wait_sync(AMD2IOMMU(CTX2AMD(ctx)));
280*0f5116d7SKonstantin Belousov }
281*0f5116d7SKonstantin Belousov 
282*0f5116d7SKonstantin Belousov void
amdiommu_qi_invalidate_ir_locked_nowait(struct amdiommu_unit * unit,uint16_t devid)283*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_ir_locked_nowait(struct amdiommu_unit *unit,
284*0f5116d7SKonstantin Belousov     uint16_t devid)
285*0f5116d7SKonstantin Belousov {
286*0f5116d7SKonstantin Belousov 	struct amdiommu_cmd_invalidate_interrupt_table c;
287*0f5116d7SKonstantin Belousov 
288*0f5116d7SKonstantin Belousov 	AMDIOMMU_ASSERT_LOCKED(unit);
289*0f5116d7SKonstantin Belousov 
290*0f5116d7SKonstantin Belousov 	amdiommu_cmd_ensure(AMD2IOMMU(unit), 1);
291*0f5116d7SKonstantin Belousov 	bzero(&c, sizeof(c));
292*0f5116d7SKonstantin Belousov 	c.op = AMDIOMMU_CMD_INVALIDATE_INTERRUPT_TABLE;
293*0f5116d7SKonstantin Belousov 	c.devid = devid;
294*0f5116d7SKonstantin Belousov 	amdiommu_cmd_emit(unit, (struct amdiommu_cmd_generic *)&c);
295*0f5116d7SKonstantin Belousov }
296*0f5116d7SKonstantin Belousov 
297*0f5116d7SKonstantin Belousov void
amdiommu_qi_invalidate_ir_locked(struct amdiommu_unit * unit,uint16_t devid)298*0f5116d7SKonstantin Belousov amdiommu_qi_invalidate_ir_locked(struct amdiommu_unit *unit, uint16_t devid)
299*0f5116d7SKonstantin Belousov {
300*0f5116d7SKonstantin Belousov 	amdiommu_qi_invalidate_ir_locked_nowait(unit, devid);
301*0f5116d7SKonstantin Belousov 	amdiommu_qi_invalidate_wait_sync(AMD2IOMMU(unit));
302*0f5116d7SKonstantin Belousov }
303*0f5116d7SKonstantin Belousov 
304*0f5116d7SKonstantin Belousov static void
amdiommu_qi_task(void * arg,int pending __unused)305*0f5116d7SKonstantin Belousov amdiommu_qi_task(void *arg, int pending __unused)
306*0f5116d7SKonstantin Belousov {
307*0f5116d7SKonstantin Belousov 	struct amdiommu_unit *unit;
308*0f5116d7SKonstantin Belousov 
309*0f5116d7SKonstantin Belousov 	unit = IOMMU2AMD(arg);
310*0f5116d7SKonstantin Belousov 	iommu_qi_drain_tlb_flush(AMD2IOMMU(unit));
311*0f5116d7SKonstantin Belousov 
312*0f5116d7SKonstantin Belousov 	AMDIOMMU_LOCK(unit);
313*0f5116d7SKonstantin Belousov 	if (unit->x86c.inv_seq_waiters > 0)
314*0f5116d7SKonstantin Belousov 		wakeup(&unit->x86c.inv_seq_waiters);
315*0f5116d7SKonstantin Belousov 	AMDIOMMU_UNLOCK(unit);
316*0f5116d7SKonstantin Belousov }
317*0f5116d7SKonstantin Belousov 
318*0f5116d7SKonstantin Belousov int
amdiommu_init_cmd(struct amdiommu_unit * unit)319*0f5116d7SKonstantin Belousov amdiommu_init_cmd(struct amdiommu_unit *unit)
320*0f5116d7SKonstantin Belousov {
321*0f5116d7SKonstantin Belousov 	uint64_t qi_sz, rv;
322*0f5116d7SKonstantin Belousov 
323*0f5116d7SKonstantin Belousov 	unit->x86c.qi_buf_maxsz = ilog2(AMDIOMMU_CMDBUF_MAX / PAGE_SIZE);
324*0f5116d7SKonstantin Belousov 	unit->x86c.qi_cmd_sz = AMDIOMMU_CMD_SZ;
325*0f5116d7SKonstantin Belousov 	iommu_qi_common_init(AMD2IOMMU(unit), amdiommu_qi_task);
326*0f5116d7SKonstantin Belousov 	get_x86_iommu()->qi_ensure = amdiommu_cmd_ensure;
327*0f5116d7SKonstantin Belousov 	get_x86_iommu()->qi_emit_wait_descr = amdiommu_cmd_emit_wait_descr;
328*0f5116d7SKonstantin Belousov 	get_x86_iommu()->qi_advance_tail = amdiommu_cmd_advance_tail;
329*0f5116d7SKonstantin Belousov 	get_x86_iommu()->qi_invalidate_emit = amdiommu_qi_invalidate_emit;
330*0f5116d7SKonstantin Belousov 
331*0f5116d7SKonstantin Belousov 	rv = pmap_kextract((uintptr_t)unit->x86c.inv_queue);
332*0f5116d7SKonstantin Belousov 
333*0f5116d7SKonstantin Belousov 	/*
334*0f5116d7SKonstantin Belousov 	 * See the description of the ComLen encoding for Command
335*0f5116d7SKonstantin Belousov 	 * buffer Base Address Register.
336*0f5116d7SKonstantin Belousov 	 */
337*0f5116d7SKonstantin Belousov 	qi_sz = ilog2(unit->x86c.inv_queue_size / PAGE_SIZE) + 8;
338*0f5116d7SKonstantin Belousov 	rv |= qi_sz << AMDIOMMU_CMDBUF_BASE_SZSHIFT;
339*0f5116d7SKonstantin Belousov 
340*0f5116d7SKonstantin Belousov 	AMDIOMMU_LOCK(unit);
341*0f5116d7SKonstantin Belousov 	amdiommu_write8(unit, AMDIOMMU_CMDBUF_BASE, rv);
342*0f5116d7SKonstantin Belousov 	amdiommu_enable_cmdbuf(unit);
343*0f5116d7SKonstantin Belousov 	amdiommu_enable_qi_intr(AMD2IOMMU(unit));
344*0f5116d7SKonstantin Belousov 	AMDIOMMU_UNLOCK(unit);
345*0f5116d7SKonstantin Belousov 
346*0f5116d7SKonstantin Belousov 	return (0);
347*0f5116d7SKonstantin Belousov }
348*0f5116d7SKonstantin Belousov 
349*0f5116d7SKonstantin Belousov static void
amdiommu_fini_cmd_helper(struct iommu_unit * iommu)350*0f5116d7SKonstantin Belousov amdiommu_fini_cmd_helper(struct iommu_unit *iommu)
351*0f5116d7SKonstantin Belousov {
352*0f5116d7SKonstantin Belousov 	amdiommu_disable_cmdbuf(IOMMU2AMD(iommu));
353*0f5116d7SKonstantin Belousov 	amdiommu_disable_qi_intr(iommu);
354*0f5116d7SKonstantin Belousov }
355*0f5116d7SKonstantin Belousov 
356*0f5116d7SKonstantin Belousov void
amdiommu_fini_cmd(struct amdiommu_unit * unit)357*0f5116d7SKonstantin Belousov amdiommu_fini_cmd(struct amdiommu_unit *unit)
358*0f5116d7SKonstantin Belousov {
359*0f5116d7SKonstantin Belousov 	iommu_qi_common_fini(AMD2IOMMU(unit), amdiommu_fini_cmd_helper);
360*0f5116d7SKonstantin Belousov }
361