168eeb96aSKonstantin Belousov /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
3ebf5747bSPedro F. Giffuni *
468eeb96aSKonstantin Belousov * Copyright (c) 2013 The FreeBSD Foundation
568eeb96aSKonstantin Belousov *
668eeb96aSKonstantin Belousov * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
768eeb96aSKonstantin Belousov * under sponsorship from the FreeBSD Foundation.
868eeb96aSKonstantin Belousov *
968eeb96aSKonstantin Belousov * Redistribution and use in source and binary forms, with or without
1068eeb96aSKonstantin Belousov * modification, are permitted provided that the following conditions
1168eeb96aSKonstantin Belousov * are met:
1268eeb96aSKonstantin Belousov * 1. Redistributions of source code must retain the above copyright
1368eeb96aSKonstantin Belousov * notice, this list of conditions and the following disclaimer.
1468eeb96aSKonstantin Belousov * 2. Redistributions in binary form must reproduce the above copyright
1568eeb96aSKonstantin Belousov * notice, this list of conditions and the following disclaimer in the
1668eeb96aSKonstantin Belousov * documentation and/or other materials provided with the distribution.
1768eeb96aSKonstantin Belousov *
1868eeb96aSKonstantin Belousov * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1968eeb96aSKonstantin Belousov * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2068eeb96aSKonstantin Belousov * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2168eeb96aSKonstantin Belousov * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2268eeb96aSKonstantin Belousov * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2368eeb96aSKonstantin Belousov * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2468eeb96aSKonstantin Belousov * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2568eeb96aSKonstantin Belousov * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2668eeb96aSKonstantin Belousov * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2768eeb96aSKonstantin Belousov * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2868eeb96aSKonstantin Belousov * SUCH DAMAGE.
2968eeb96aSKonstantin Belousov */
3068eeb96aSKonstantin Belousov
3168eeb96aSKonstantin Belousov #include "opt_acpi.h"
3268eeb96aSKonstantin Belousov
3368eeb96aSKonstantin Belousov #include <sys/param.h>
3468eeb96aSKonstantin Belousov #include <sys/bus.h>
3568eeb96aSKonstantin Belousov #include <sys/kernel.h>
3668eeb96aSKonstantin Belousov #include <sys/malloc.h>
3768eeb96aSKonstantin Belousov #include <sys/memdesc.h>
3868eeb96aSKonstantin Belousov #include <sys/module.h>
3968eeb96aSKonstantin Belousov #include <sys/rman.h>
4068eeb96aSKonstantin Belousov #include <sys/taskqueue.h>
41476358b3SKonstantin Belousov #include <sys/time.h>
4268eeb96aSKonstantin Belousov #include <sys/tree.h>
430a110d5bSKonstantin Belousov #include <sys/vmem.h>
4468eeb96aSKonstantin Belousov #include <vm/vm.h>
4568eeb96aSKonstantin Belousov #include <vm/vm_extern.h>
4668eeb96aSKonstantin Belousov #include <vm/vm_kern.h>
4768eeb96aSKonstantin Belousov #include <vm/vm_page.h>
4868eeb96aSKonstantin Belousov #include <vm/vm_map.h>
49c8597a1fSRuslan Bukin #include <contrib/dev/acpica/include/acpi.h>
50c8597a1fSRuslan Bukin #include <contrib/dev/acpica/include/accommon.h>
51c8597a1fSRuslan Bukin #include <dev/acpica/acpivar.h>
52c8597a1fSRuslan Bukin #include <dev/pci/pcireg.h>
53c8597a1fSRuslan Bukin #include <machine/bus.h>
5468eeb96aSKonstantin Belousov #include <machine/cpu.h>
5568eeb96aSKonstantin Belousov #include <x86/include/busdma_impl.h>
56f2b2f317SRuslan Bukin #include <dev/iommu/busdma_iommu.h>
57c8597a1fSRuslan Bukin #include <x86/iommu/intel_reg.h>
5840d951bcSKonstantin Belousov #include <x86/iommu/x86_iommu.h>
5968eeb96aSKonstantin Belousov #include <x86/iommu/intel_dmar.h>
6068eeb96aSKonstantin Belousov
6168eeb96aSKonstantin Belousov static int
dmar_enable_qi(struct dmar_unit * unit)6268eeb96aSKonstantin Belousov dmar_enable_qi(struct dmar_unit *unit)
6368eeb96aSKonstantin Belousov {
64476358b3SKonstantin Belousov int error;
6568eeb96aSKonstantin Belousov
6668eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
6768eeb96aSKonstantin Belousov unit->hw_gcmd |= DMAR_GCMD_QIE;
6868eeb96aSKonstantin Belousov dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
69476358b3SKonstantin Belousov DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES)
70476358b3SKonstantin Belousov != 0));
71476358b3SKonstantin Belousov return (error);
7268eeb96aSKonstantin Belousov }
7368eeb96aSKonstantin Belousov
7468eeb96aSKonstantin Belousov static int
dmar_disable_qi(struct dmar_unit * unit)7568eeb96aSKonstantin Belousov dmar_disable_qi(struct dmar_unit *unit)
7668eeb96aSKonstantin Belousov {
77476358b3SKonstantin Belousov int error;
7868eeb96aSKonstantin Belousov
7968eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
8068eeb96aSKonstantin Belousov unit->hw_gcmd &= ~DMAR_GCMD_QIE;
8168eeb96aSKonstantin Belousov dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
82476358b3SKonstantin Belousov DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES)
83476358b3SKonstantin Belousov == 0));
84476358b3SKonstantin Belousov return (error);
8568eeb96aSKonstantin Belousov }
8668eeb96aSKonstantin Belousov
8768eeb96aSKonstantin Belousov static void
dmar_qi_advance_tail(struct iommu_unit * iommu)88ad794e6dSKonstantin Belousov dmar_qi_advance_tail(struct iommu_unit *iommu)
8968eeb96aSKonstantin Belousov {
90ad794e6dSKonstantin Belousov struct dmar_unit *unit;
9168eeb96aSKonstantin Belousov
92ad794e6dSKonstantin Belousov unit = IOMMU2DMAR(iommu);
9368eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
94ad794e6dSKonstantin Belousov dmar_write4(unit, DMAR_IQT_REG, unit->x86c.inv_queue_tail);
9568eeb96aSKonstantin Belousov }
9668eeb96aSKonstantin Belousov
9768eeb96aSKonstantin Belousov static void
dmar_qi_ensure(struct iommu_unit * iommu,int descr_count)98ad794e6dSKonstantin Belousov dmar_qi_ensure(struct iommu_unit *iommu, int descr_count)
9968eeb96aSKonstantin Belousov {
100ad794e6dSKonstantin Belousov struct dmar_unit *unit;
10168eeb96aSKonstantin Belousov uint32_t head;
10268eeb96aSKonstantin Belousov int bytes;
10368eeb96aSKonstantin Belousov
104ad794e6dSKonstantin Belousov unit = IOMMU2DMAR(iommu);
10568eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
10668eeb96aSKonstantin Belousov bytes = descr_count << DMAR_IQ_DESCR_SZ_SHIFT;
10768eeb96aSKonstantin Belousov for (;;) {
108ad794e6dSKonstantin Belousov if (bytes <= unit->x86c.inv_queue_avail)
10968eeb96aSKonstantin Belousov break;
11068eeb96aSKonstantin Belousov /* refill */
11168eeb96aSKonstantin Belousov head = dmar_read4(unit, DMAR_IQH_REG);
11268eeb96aSKonstantin Belousov head &= DMAR_IQH_MASK;
113ad794e6dSKonstantin Belousov unit->x86c.inv_queue_avail = head - unit->x86c.inv_queue_tail -
11468eeb96aSKonstantin Belousov DMAR_IQ_DESCR_SZ;
115ad794e6dSKonstantin Belousov if (head <= unit->x86c.inv_queue_tail)
116ad794e6dSKonstantin Belousov unit->x86c.inv_queue_avail += unit->x86c.inv_queue_size;
117ad794e6dSKonstantin Belousov if (bytes <= unit->x86c.inv_queue_avail)
11868eeb96aSKonstantin Belousov break;
11968eeb96aSKonstantin Belousov
12068eeb96aSKonstantin Belousov /*
12168eeb96aSKonstantin Belousov * No space in the queue, do busy wait. Hardware must
12268eeb96aSKonstantin Belousov * make a progress. But first advance the tail to
12368eeb96aSKonstantin Belousov * inform the descriptor streamer about entries we
12468eeb96aSKonstantin Belousov * might have already filled, otherwise they could
12568eeb96aSKonstantin Belousov * clog the whole queue..
12642736dc4SAlan Cox *
12742736dc4SAlan Cox * See dmar_qi_invalidate_locked() for a discussion
12842736dc4SAlan Cox * about data race prevention.
12968eeb96aSKonstantin Belousov */
130ad794e6dSKonstantin Belousov dmar_qi_advance_tail(DMAR2IOMMU(unit));
131ad794e6dSKonstantin Belousov unit->x86c.inv_queue_full++;
13268eeb96aSKonstantin Belousov cpu_spinwait();
13368eeb96aSKonstantin Belousov }
134ad794e6dSKonstantin Belousov unit->x86c.inv_queue_avail -= bytes;
13568eeb96aSKonstantin Belousov }
13668eeb96aSKonstantin Belousov
13768eeb96aSKonstantin Belousov static void
dmar_qi_emit(struct dmar_unit * unit,uint64_t data1,uint64_t data2)13868eeb96aSKonstantin Belousov dmar_qi_emit(struct dmar_unit *unit, uint64_t data1, uint64_t data2)
13968eeb96aSKonstantin Belousov {
14068eeb96aSKonstantin Belousov
14168eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
142b563be66SKonstantin Belousov #ifdef __LP64__
143b563be66SKonstantin Belousov atomic_store_64((uint64_t *)(unit->x86c.inv_queue +
144b563be66SKonstantin Belousov unit->x86c.inv_queue_tail), data1);
145b563be66SKonstantin Belousov #else
146ad794e6dSKonstantin Belousov *(volatile uint64_t *)(unit->x86c.inv_queue +
147ad794e6dSKonstantin Belousov unit->x86c.inv_queue_tail) = data1;
148b563be66SKonstantin Belousov #endif
149ad794e6dSKonstantin Belousov unit->x86c.inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
150ad794e6dSKonstantin Belousov KASSERT(unit->x86c.inv_queue_tail <= unit->x86c.inv_queue_size,
151ad794e6dSKonstantin Belousov ("tail overflow 0x%x 0x%jx", unit->x86c.inv_queue_tail,
152ad794e6dSKonstantin Belousov (uintmax_t)unit->x86c.inv_queue_size));
153ad794e6dSKonstantin Belousov unit->x86c.inv_queue_tail &= unit->x86c.inv_queue_size - 1;
154b563be66SKonstantin Belousov #ifdef __LP64__
155b563be66SKonstantin Belousov atomic_store_64((uint64_t *)(unit->x86c.inv_queue +
156b563be66SKonstantin Belousov unit->x86c.inv_queue_tail), data2);
157b563be66SKonstantin Belousov #else
158ad794e6dSKonstantin Belousov *(volatile uint64_t *)(unit->x86c.inv_queue +
159ad794e6dSKonstantin Belousov unit->x86c.inv_queue_tail) = data2;
160b563be66SKonstantin Belousov #endif
161ad794e6dSKonstantin Belousov unit->x86c.inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
162ad794e6dSKonstantin Belousov KASSERT(unit->x86c.inv_queue_tail <= unit->x86c.inv_queue_size,
163ad794e6dSKonstantin Belousov ("tail overflow 0x%x 0x%jx", unit->x86c.inv_queue_tail,
164ad794e6dSKonstantin Belousov (uintmax_t)unit->x86c.inv_queue_size));
165ad794e6dSKonstantin Belousov unit->x86c.inv_queue_tail &= unit->x86c.inv_queue_size - 1;
16668eeb96aSKonstantin Belousov }
16768eeb96aSKonstantin Belousov
16868eeb96aSKonstantin Belousov static void
dmar_qi_emit_wait_descr(struct iommu_unit * iommu,uint32_t seq,bool intr,bool memw,bool fence)169ad794e6dSKonstantin Belousov dmar_qi_emit_wait_descr(struct iommu_unit *iommu, uint32_t seq, bool intr,
17068eeb96aSKonstantin Belousov bool memw, bool fence)
17168eeb96aSKonstantin Belousov {
172ad794e6dSKonstantin Belousov struct dmar_unit *unit;
17368eeb96aSKonstantin Belousov
174ad794e6dSKonstantin Belousov unit = IOMMU2DMAR(iommu);
17568eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
17668eeb96aSKonstantin Belousov dmar_qi_emit(unit, DMAR_IQ_DESCR_WAIT_ID |
17768eeb96aSKonstantin Belousov (intr ? DMAR_IQ_DESCR_WAIT_IF : 0) |
17868eeb96aSKonstantin Belousov (memw ? DMAR_IQ_DESCR_WAIT_SW : 0) |
17968eeb96aSKonstantin Belousov (fence ? DMAR_IQ_DESCR_WAIT_FN : 0) |
18068eeb96aSKonstantin Belousov (memw ? DMAR_IQ_DESCR_WAIT_SD(seq) : 0),
181ad794e6dSKonstantin Belousov memw ? unit->x86c.inv_waitd_seq_hw_phys : 0);
18268eeb96aSKonstantin Belousov }
18368eeb96aSKonstantin Belousov
18468eeb96aSKonstantin Belousov static void
dmar_qi_invalidate_emit(struct iommu_domain * idomain,iommu_gaddr_t base,iommu_gaddr_t size,struct iommu_qi_genseq * pseq,bool emit_wait)185ad794e6dSKonstantin Belousov dmar_qi_invalidate_emit(struct iommu_domain *idomain, iommu_gaddr_t base,
18659e37c8aSRuslan Bukin iommu_gaddr_t size, struct iommu_qi_genseq *pseq, bool emit_wait)
18768eeb96aSKonstantin Belousov {
18868eeb96aSKonstantin Belousov struct dmar_unit *unit;
189ad794e6dSKonstantin Belousov struct dmar_domain *domain;
19059e37c8aSRuslan Bukin iommu_gaddr_t isize;
19168eeb96aSKonstantin Belousov int am;
19268eeb96aSKonstantin Belousov
193ad794e6dSKonstantin Belousov domain = __containerof(idomain, struct dmar_domain, iodom);
1941abfd355SKonstantin Belousov unit = domain->dmar;
19568eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
19668eeb96aSKonstantin Belousov for (; size > 0; base += isize, size -= isize) {
19768eeb96aSKonstantin Belousov am = calc_am(unit, base, size, &isize);
198ad794e6dSKonstantin Belousov dmar_qi_ensure(DMAR2IOMMU(unit), 1);
19968eeb96aSKonstantin Belousov dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV |
20068eeb96aSKonstantin Belousov DMAR_IQ_DESCR_IOTLB_PAGE | DMAR_IQ_DESCR_IOTLB_DW |
20168eeb96aSKonstantin Belousov DMAR_IQ_DESCR_IOTLB_DR |
2021abfd355SKonstantin Belousov DMAR_IQ_DESCR_IOTLB_DID(domain->domain),
20368eeb96aSKonstantin Belousov base | am);
20468eeb96aSKonstantin Belousov }
205ad794e6dSKonstantin Belousov iommu_qi_emit_wait_seq(DMAR2IOMMU(unit), pseq, emit_wait);
2068bc36738SAlan Cox }
2078bc36738SAlan Cox
208f776a2b8SKonstantin Belousov static void
dmar_qi_invalidate_glob_impl(struct dmar_unit * unit,uint64_t data1)209f776a2b8SKonstantin Belousov dmar_qi_invalidate_glob_impl(struct dmar_unit *unit, uint64_t data1)
21068eeb96aSKonstantin Belousov {
21159e37c8aSRuslan Bukin struct iommu_qi_genseq gseq;
21268eeb96aSKonstantin Belousov
21368eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
214ad794e6dSKonstantin Belousov dmar_qi_ensure(DMAR2IOMMU(unit), 2);
215f776a2b8SKonstantin Belousov dmar_qi_emit(unit, data1, 0);
216ad794e6dSKonstantin Belousov iommu_qi_emit_wait_seq(DMAR2IOMMU(unit), &gseq, true);
21742736dc4SAlan Cox /* See dmar_qi_invalidate_sync(). */
218ad794e6dSKonstantin Belousov unit->x86c.inv_seq_waiters++;
219ad794e6dSKonstantin Belousov dmar_qi_advance_tail(DMAR2IOMMU(unit));
220ad794e6dSKonstantin Belousov iommu_qi_wait_for_seq(DMAR2IOMMU(unit), &gseq, false);
22168eeb96aSKonstantin Belousov }
22268eeb96aSKonstantin Belousov
22368eeb96aSKonstantin Belousov void
dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit * unit)224f776a2b8SKonstantin Belousov dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
225f776a2b8SKonstantin Belousov {
226f776a2b8SKonstantin Belousov dmar_qi_invalidate_glob_impl(unit, DMAR_IQ_DESCR_CTX_INV |
227f776a2b8SKonstantin Belousov DMAR_IQ_DESCR_CTX_GLOB);
228f776a2b8SKonstantin Belousov }
229f776a2b8SKonstantin Belousov
230f776a2b8SKonstantin Belousov void
dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit * unit)23168eeb96aSKonstantin Belousov dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit)
23268eeb96aSKonstantin Belousov {
233f776a2b8SKonstantin Belousov dmar_qi_invalidate_glob_impl(unit, DMAR_IQ_DESCR_IOTLB_INV |
234f776a2b8SKonstantin Belousov DMAR_IQ_DESCR_IOTLB_GLOB | DMAR_IQ_DESCR_IOTLB_DW |
235f776a2b8SKonstantin Belousov DMAR_IQ_DESCR_IOTLB_DR);
2360a110d5bSKonstantin Belousov }
2370a110d5bSKonstantin Belousov
2380a110d5bSKonstantin Belousov void
dmar_qi_invalidate_iec_glob(struct dmar_unit * unit)2390a110d5bSKonstantin Belousov dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
2400a110d5bSKonstantin Belousov {
241f776a2b8SKonstantin Belousov dmar_qi_invalidate_glob_impl(unit, DMAR_IQ_DESCR_IEC_INV);
2420a110d5bSKonstantin Belousov }
2430a110d5bSKonstantin Belousov
2440a110d5bSKonstantin Belousov void
dmar_qi_invalidate_iec(struct dmar_unit * unit,u_int start,u_int cnt)2450a110d5bSKonstantin Belousov dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
2460a110d5bSKonstantin Belousov {
24759e37c8aSRuslan Bukin struct iommu_qi_genseq gseq;
2480a110d5bSKonstantin Belousov u_int c, l;
2490a110d5bSKonstantin Belousov
2500a110d5bSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
2510a110d5bSKonstantin Belousov KASSERT(start < unit->irte_cnt && start < start + cnt &&
2520a110d5bSKonstantin Belousov start + cnt <= unit->irte_cnt,
2530a110d5bSKonstantin Belousov ("inv iec overflow %d %d %d", unit->irte_cnt, start, cnt));
2540a110d5bSKonstantin Belousov for (; cnt > 0; cnt -= c, start += c) {
2550a110d5bSKonstantin Belousov l = ffs(start | cnt) - 1;
2560a110d5bSKonstantin Belousov c = 1 << l;
257ad794e6dSKonstantin Belousov dmar_qi_ensure(DMAR2IOMMU(unit), 1);
2580a110d5bSKonstantin Belousov dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV |
2590a110d5bSKonstantin Belousov DMAR_IQ_DESCR_IEC_IDX | DMAR_IQ_DESCR_IEC_IIDX(start) |
2600a110d5bSKonstantin Belousov DMAR_IQ_DESCR_IEC_IM(l), 0);
2610a110d5bSKonstantin Belousov }
262ad794e6dSKonstantin Belousov dmar_qi_ensure(DMAR2IOMMU(unit), 1);
263ad794e6dSKonstantin Belousov iommu_qi_emit_wait_seq(DMAR2IOMMU(unit), &gseq, true);
26442736dc4SAlan Cox
26542736dc4SAlan Cox /*
266ad794e6dSKonstantin Belousov * Since iommu_qi_wait_for_seq() will not sleep, this increment's
26742736dc4SAlan Cox * placement relative to advancing the tail doesn't matter.
26842736dc4SAlan Cox */
269ad794e6dSKonstantin Belousov unit->x86c.inv_seq_waiters++;
27042736dc4SAlan Cox
271ad794e6dSKonstantin Belousov dmar_qi_advance_tail(DMAR2IOMMU(unit));
2720a110d5bSKonstantin Belousov
2730a110d5bSKonstantin Belousov /*
2740a110d5bSKonstantin Belousov * The caller of the function, in particular,
2750a110d5bSKonstantin Belousov * dmar_ir_program_irte(), may be called from the context
2760a110d5bSKonstantin Belousov * where the sleeping is forbidden (in fact, the
2770a110d5bSKonstantin Belousov * intr_table_lock mutex may be held, locked from
2780a110d5bSKonstantin Belousov * intr_shuffle_irqs()). Wait for the invalidation completion
2790a110d5bSKonstantin Belousov * using the busy wait.
2800a110d5bSKonstantin Belousov *
2810a110d5bSKonstantin Belousov * The impact on the interrupt input setup code is small, the
2820a110d5bSKonstantin Belousov * expected overhead is comparable with the chipset register
2830a110d5bSKonstantin Belousov * read. It is more harmful for the parallel DMA operations,
2840a110d5bSKonstantin Belousov * since we own the dmar unit lock until whole invalidation
2850a110d5bSKonstantin Belousov * queue is processed, which includes requests possibly issued
2860a110d5bSKonstantin Belousov * before our request.
2870a110d5bSKonstantin Belousov */
288ad794e6dSKonstantin Belousov iommu_qi_wait_for_seq(DMAR2IOMMU(unit), &gseq, true);
28968eeb96aSKonstantin Belousov }
29068eeb96aSKonstantin Belousov
29168eeb96aSKonstantin Belousov int
dmar_qi_intr(void * arg)29268eeb96aSKonstantin Belousov dmar_qi_intr(void *arg)
29368eeb96aSKonstantin Belousov {
29468eeb96aSKonstantin Belousov struct dmar_unit *unit;
29568eeb96aSKonstantin Belousov
296*5967352aSKonstantin Belousov unit = IOMMU2DMAR((struct iommu_unit *)arg);
29759e37c8aSRuslan Bukin KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled",
29859e37c8aSRuslan Bukin unit->iommu.unit));
299ad794e6dSKonstantin Belousov taskqueue_enqueue(unit->x86c.qi_taskqueue, &unit->x86c.qi_task);
30068eeb96aSKonstantin Belousov return (FILTER_HANDLED);
30168eeb96aSKonstantin Belousov }
30268eeb96aSKonstantin Belousov
30368eeb96aSKonstantin Belousov static void
dmar_qi_task(void * arg,int pending __unused)30468eeb96aSKonstantin Belousov dmar_qi_task(void *arg, int pending __unused)
30568eeb96aSKonstantin Belousov {
30668eeb96aSKonstantin Belousov struct dmar_unit *unit;
30768eeb96aSKonstantin Belousov uint32_t ics;
30868eeb96aSKonstantin Belousov
309ad794e6dSKonstantin Belousov unit = IOMMU2DMAR(arg);
310ad794e6dSKonstantin Belousov iommu_qi_drain_tlb_flush(DMAR2IOMMU(unit));
31168eeb96aSKonstantin Belousov
3124eaaacc7SAlan Cox /*
3134eaaacc7SAlan Cox * Request an interrupt on the completion of the next invalidation
3144eaaacc7SAlan Cox * wait descriptor with the IF field set.
3154eaaacc7SAlan Cox */
3164eaaacc7SAlan Cox ics = dmar_read4(unit, DMAR_ICS_REG);
3174eaaacc7SAlan Cox if ((ics & DMAR_ICS_IWC) != 0) {
3184eaaacc7SAlan Cox ics = DMAR_ICS_IWC;
3194eaaacc7SAlan Cox dmar_write4(unit, DMAR_ICS_REG, ics);
3207f46deccSAlan Cox
3217f46deccSAlan Cox /*
3227f46deccSAlan Cox * Drain a second time in case the DMAR processes an entry
3237f46deccSAlan Cox * after the first call and before clearing DMAR_ICS_IWC.
3247f46deccSAlan Cox * Otherwise, such entries will linger until a later entry
3257f46deccSAlan Cox * that requests an interrupt is processed.
3267f46deccSAlan Cox */
327ad794e6dSKonstantin Belousov iommu_qi_drain_tlb_flush(DMAR2IOMMU(unit));
3284eaaacc7SAlan Cox }
3294eaaacc7SAlan Cox
330ad794e6dSKonstantin Belousov if (unit->x86c.inv_seq_waiters > 0) {
33142736dc4SAlan Cox /*
33242736dc4SAlan Cox * Acquire the DMAR lock so that wakeup() is called only after
33342736dc4SAlan Cox * the waiter is sleeping.
33442736dc4SAlan Cox */
33542736dc4SAlan Cox DMAR_LOCK(unit);
336ad794e6dSKonstantin Belousov wakeup(&unit->x86c.inv_seq_waiters);
33768eeb96aSKonstantin Belousov DMAR_UNLOCK(unit);
33868eeb96aSKonstantin Belousov }
33942736dc4SAlan Cox }
34068eeb96aSKonstantin Belousov
34168eeb96aSKonstantin Belousov int
dmar_init_qi(struct dmar_unit * unit)34268eeb96aSKonstantin Belousov dmar_init_qi(struct dmar_unit *unit)
34368eeb96aSKonstantin Belousov {
34468eeb96aSKonstantin Belousov uint64_t iqa;
34568eeb96aSKonstantin Belousov uint32_t ics;
346ad794e6dSKonstantin Belousov u_int qi_sz;
34768eeb96aSKonstantin Belousov
34868eeb96aSKonstantin Belousov if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
34968eeb96aSKonstantin Belousov return (0);
35068eeb96aSKonstantin Belousov unit->qi_enabled = 1;
35168eeb96aSKonstantin Belousov TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
35268eeb96aSKonstantin Belousov if (!unit->qi_enabled)
35368eeb96aSKonstantin Belousov return (0);
35468eeb96aSKonstantin Belousov
355ad794e6dSKonstantin Belousov unit->x86c.qi_buf_maxsz = DMAR_IQA_QS_MAX;
356ad794e6dSKonstantin Belousov unit->x86c.qi_cmd_sz = DMAR_IQ_DESCR_SZ;
357ad794e6dSKonstantin Belousov iommu_qi_common_init(DMAR2IOMMU(unit), dmar_qi_task);
358ad794e6dSKonstantin Belousov get_x86_iommu()->qi_ensure = dmar_qi_ensure;
359ad794e6dSKonstantin Belousov get_x86_iommu()->qi_emit_wait_descr = dmar_qi_emit_wait_descr;
360ad794e6dSKonstantin Belousov get_x86_iommu()->qi_advance_tail = dmar_qi_advance_tail;
361ad794e6dSKonstantin Belousov get_x86_iommu()->qi_invalidate_emit = dmar_qi_invalidate_emit;
36268eeb96aSKonstantin Belousov
363ad794e6dSKonstantin Belousov qi_sz = ilog2(unit->x86c.inv_queue_size / PAGE_SIZE);
36468eeb96aSKonstantin Belousov
36568eeb96aSKonstantin Belousov DMAR_LOCK(unit);
36668eeb96aSKonstantin Belousov dmar_write8(unit, DMAR_IQT_REG, 0);
367ad794e6dSKonstantin Belousov iqa = pmap_kextract((uintptr_t)unit->x86c.inv_queue);
36868eeb96aSKonstantin Belousov iqa |= qi_sz;
36968eeb96aSKonstantin Belousov dmar_write8(unit, DMAR_IQA_REG, iqa);
37068eeb96aSKonstantin Belousov dmar_enable_qi(unit);
37168eeb96aSKonstantin Belousov ics = dmar_read4(unit, DMAR_ICS_REG);
37268eeb96aSKonstantin Belousov if ((ics & DMAR_ICS_IWC) != 0) {
37368eeb96aSKonstantin Belousov ics = DMAR_ICS_IWC;
37468eeb96aSKonstantin Belousov dmar_write4(unit, DMAR_ICS_REG, ics);
37568eeb96aSKonstantin Belousov }
376*5967352aSKonstantin Belousov dmar_enable_qi_intr(DMAR2IOMMU(unit));
37768eeb96aSKonstantin Belousov DMAR_UNLOCK(unit);
37868eeb96aSKonstantin Belousov
37968eeb96aSKonstantin Belousov return (0);
38068eeb96aSKonstantin Belousov }
38168eeb96aSKonstantin Belousov
382ad794e6dSKonstantin Belousov static void
dmar_fini_qi_helper(struct iommu_unit * iommu)383ad794e6dSKonstantin Belousov dmar_fini_qi_helper(struct iommu_unit *iommu)
384ad794e6dSKonstantin Belousov {
385*5967352aSKonstantin Belousov dmar_disable_qi_intr(iommu);
386ad794e6dSKonstantin Belousov dmar_disable_qi(IOMMU2DMAR(iommu));
387ad794e6dSKonstantin Belousov }
388ad794e6dSKonstantin Belousov
38968eeb96aSKonstantin Belousov void
dmar_fini_qi(struct dmar_unit * unit)39068eeb96aSKonstantin Belousov dmar_fini_qi(struct dmar_unit *unit)
39168eeb96aSKonstantin Belousov {
39224408112SRyan Libby if (!unit->qi_enabled)
39368eeb96aSKonstantin Belousov return;
394ad794e6dSKonstantin Belousov iommu_qi_common_fini(DMAR2IOMMU(unit), dmar_fini_qi_helper);
39568eeb96aSKonstantin Belousov unit->qi_enabled = 0;
39668eeb96aSKonstantin Belousov }
39768eeb96aSKonstantin Belousov
39868eeb96aSKonstantin Belousov void
dmar_enable_qi_intr(struct iommu_unit * iommu)399*5967352aSKonstantin Belousov dmar_enable_qi_intr(struct iommu_unit *iommu)
40068eeb96aSKonstantin Belousov {
401*5967352aSKonstantin Belousov struct dmar_unit *unit;
40268eeb96aSKonstantin Belousov uint32_t iectl;
40368eeb96aSKonstantin Belousov
404*5967352aSKonstantin Belousov unit = IOMMU2DMAR(iommu);
40568eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
40659e37c8aSRuslan Bukin KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
40759e37c8aSRuslan Bukin unit->iommu.unit));
40868eeb96aSKonstantin Belousov iectl = dmar_read4(unit, DMAR_IECTL_REG);
40968eeb96aSKonstantin Belousov iectl &= ~DMAR_IECTL_IM;
41068eeb96aSKonstantin Belousov dmar_write4(unit, DMAR_IECTL_REG, iectl);
41168eeb96aSKonstantin Belousov }
41268eeb96aSKonstantin Belousov
41368eeb96aSKonstantin Belousov void
dmar_disable_qi_intr(struct iommu_unit * iommu)414*5967352aSKonstantin Belousov dmar_disable_qi_intr(struct iommu_unit *iommu)
41568eeb96aSKonstantin Belousov {
416*5967352aSKonstantin Belousov struct dmar_unit *unit;
41768eeb96aSKonstantin Belousov uint32_t iectl;
41868eeb96aSKonstantin Belousov
419*5967352aSKonstantin Belousov unit = IOMMU2DMAR(iommu);
42068eeb96aSKonstantin Belousov DMAR_ASSERT_LOCKED(unit);
42159e37c8aSRuslan Bukin KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
42259e37c8aSRuslan Bukin unit->iommu.unit));
42368eeb96aSKonstantin Belousov iectl = dmar_read4(unit, DMAR_IECTL_REG);
42468eeb96aSKonstantin Belousov dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM);
42568eeb96aSKonstantin Belousov }
426