xref: /linux/tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
1*2223587dSDavid Matlack // SPDX-License-Identifier: GPL-2.0-only
2*2223587dSDavid Matlack #include <stdint.h>
3*2223587dSDavid Matlack #include <unistd.h>
4*2223587dSDavid Matlack 
5*2223587dSDavid Matlack #include <linux/errno.h>
6*2223587dSDavid Matlack #include <linux/io.h>
7*2223587dSDavid Matlack #include <linux/pci_ids.h>
8*2223587dSDavid Matlack #include <linux/sizes.h>
9*2223587dSDavid Matlack 
10*2223587dSDavid Matlack #include <vfio_util.h>
11*2223587dSDavid Matlack 
12*2223587dSDavid Matlack #include "hw.h"
13*2223587dSDavid Matlack #include "registers.h"
14*2223587dSDavid Matlack 
15*2223587dSDavid Matlack #define IOAT_DMACOUNT_MAX UINT16_MAX
16*2223587dSDavid Matlack 
17*2223587dSDavid Matlack struct ioat_state {
18*2223587dSDavid Matlack 	/* Single descriptor used to issue DMA memcpy operations */
19*2223587dSDavid Matlack 	struct ioat_dma_descriptor desc;
20*2223587dSDavid Matlack 
21*2223587dSDavid Matlack 	/* Copy buffers used by ioat_send_msi() to generate an interrupt. */
22*2223587dSDavid Matlack 	u64 send_msi_src;
23*2223587dSDavid Matlack 	u64 send_msi_dst;
24*2223587dSDavid Matlack };
25*2223587dSDavid Matlack 
26*2223587dSDavid Matlack static inline struct ioat_state *to_ioat_state(struct vfio_pci_device *device)
27*2223587dSDavid Matlack {
28*2223587dSDavid Matlack 	return device->driver.region.vaddr;
29*2223587dSDavid Matlack }
30*2223587dSDavid Matlack 
31*2223587dSDavid Matlack static inline void *ioat_channel_registers(struct vfio_pci_device *device)
32*2223587dSDavid Matlack {
33*2223587dSDavid Matlack 	return device->bars[0].vaddr + IOAT_CHANNEL_MMIO_SIZE;
34*2223587dSDavid Matlack }
35*2223587dSDavid Matlack 
36*2223587dSDavid Matlack static int ioat_probe(struct vfio_pci_device *device)
37*2223587dSDavid Matlack {
38*2223587dSDavid Matlack 	u8 version;
39*2223587dSDavid Matlack 	int r;
40*2223587dSDavid Matlack 
41*2223587dSDavid Matlack 	if (!vfio_pci_device_match(device, PCI_VENDOR_ID_INTEL,
42*2223587dSDavid Matlack 				   PCI_DEVICE_ID_INTEL_IOAT_SKX))
43*2223587dSDavid Matlack 		return -EINVAL;
44*2223587dSDavid Matlack 
45*2223587dSDavid Matlack 	VFIO_ASSERT_NOT_NULL(device->bars[0].vaddr);
46*2223587dSDavid Matlack 
47*2223587dSDavid Matlack 	version = readb(device->bars[0].vaddr + IOAT_VER_OFFSET);
48*2223587dSDavid Matlack 	switch (version) {
49*2223587dSDavid Matlack 	case IOAT_VER_3_2:
50*2223587dSDavid Matlack 	case IOAT_VER_3_3:
51*2223587dSDavid Matlack 		r = 0;
52*2223587dSDavid Matlack 		break;
53*2223587dSDavid Matlack 	default:
54*2223587dSDavid Matlack 		printf("ioat: Unsupported version: 0x%x\n", version);
55*2223587dSDavid Matlack 		r = -EINVAL;
56*2223587dSDavid Matlack 	}
57*2223587dSDavid Matlack 	return r;
58*2223587dSDavid Matlack }
59*2223587dSDavid Matlack 
60*2223587dSDavid Matlack static u64 ioat_channel_status(void *bar)
61*2223587dSDavid Matlack {
62*2223587dSDavid Matlack 	return readq(bar + IOAT_CHANSTS_OFFSET) & IOAT_CHANSTS_STATUS;
63*2223587dSDavid Matlack }
64*2223587dSDavid Matlack 
65*2223587dSDavid Matlack static void ioat_clear_errors(struct vfio_pci_device *device)
66*2223587dSDavid Matlack {
67*2223587dSDavid Matlack 	void *registers = ioat_channel_registers(device);
68*2223587dSDavid Matlack 	u32 errors;
69*2223587dSDavid Matlack 
70*2223587dSDavid Matlack 	errors = vfio_pci_config_readl(device, IOAT_PCI_CHANERR_INT_OFFSET);
71*2223587dSDavid Matlack 	vfio_pci_config_writel(device, IOAT_PCI_CHANERR_INT_OFFSET, errors);
72*2223587dSDavid Matlack 
73*2223587dSDavid Matlack 	errors = vfio_pci_config_readl(device, IOAT_PCI_DMAUNCERRSTS_OFFSET);
74*2223587dSDavid Matlack 	vfio_pci_config_writel(device, IOAT_PCI_CHANERR_INT_OFFSET, errors);
75*2223587dSDavid Matlack 
76*2223587dSDavid Matlack 	errors = readl(registers + IOAT_CHANERR_OFFSET);
77*2223587dSDavid Matlack 	writel(errors, registers + IOAT_CHANERR_OFFSET);
78*2223587dSDavid Matlack }
79*2223587dSDavid Matlack 
80*2223587dSDavid Matlack static void ioat_reset(struct vfio_pci_device *device)
81*2223587dSDavid Matlack {
82*2223587dSDavid Matlack 	void *registers = ioat_channel_registers(device);
83*2223587dSDavid Matlack 	u32 sleep_ms = 1, attempts = 5000 / sleep_ms;
84*2223587dSDavid Matlack 	u8 chancmd;
85*2223587dSDavid Matlack 
86*2223587dSDavid Matlack 	ioat_clear_errors(device);
87*2223587dSDavid Matlack 
88*2223587dSDavid Matlack 	writeb(IOAT_CHANCMD_RESET, registers + IOAT2_CHANCMD_OFFSET);
89*2223587dSDavid Matlack 
90*2223587dSDavid Matlack 	for (;;) {
91*2223587dSDavid Matlack 		chancmd = readb(registers + IOAT2_CHANCMD_OFFSET);
92*2223587dSDavid Matlack 		if (!(chancmd & IOAT_CHANCMD_RESET))
93*2223587dSDavid Matlack 			break;
94*2223587dSDavid Matlack 
95*2223587dSDavid Matlack 		VFIO_ASSERT_GT(--attempts, 0);
96*2223587dSDavid Matlack 		usleep(sleep_ms * 1000);
97*2223587dSDavid Matlack 	}
98*2223587dSDavid Matlack 
99*2223587dSDavid Matlack 	VFIO_ASSERT_EQ(ioat_channel_status(registers), IOAT_CHANSTS_HALTED);
100*2223587dSDavid Matlack }
101*2223587dSDavid Matlack 
102*2223587dSDavid Matlack static void ioat_init(struct vfio_pci_device *device)
103*2223587dSDavid Matlack {
104*2223587dSDavid Matlack 	struct ioat_state *ioat = to_ioat_state(device);
105*2223587dSDavid Matlack 	u8 intrctrl;
106*2223587dSDavid Matlack 
107*2223587dSDavid Matlack 	VFIO_ASSERT_GE(device->driver.region.size, sizeof(*ioat));
108*2223587dSDavid Matlack 
109*2223587dSDavid Matlack 	vfio_pci_config_writew(device, PCI_COMMAND,
110*2223587dSDavid Matlack 			       PCI_COMMAND_MEMORY |
111*2223587dSDavid Matlack 			       PCI_COMMAND_MASTER |
112*2223587dSDavid Matlack 			       PCI_COMMAND_INTX_DISABLE);
113*2223587dSDavid Matlack 
114*2223587dSDavid Matlack 	ioat_reset(device);
115*2223587dSDavid Matlack 
116*2223587dSDavid Matlack 	/* Enable the use of MXI-x interrupts for channel interrupts. */
117*2223587dSDavid Matlack 	intrctrl = IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
118*2223587dSDavid Matlack 	writeb(intrctrl, device->bars[0].vaddr + IOAT_INTRCTRL_OFFSET);
119*2223587dSDavid Matlack 
120*2223587dSDavid Matlack 	vfio_pci_msix_enable(device, 0, device->msix_info.count);
121*2223587dSDavid Matlack 
122*2223587dSDavid Matlack 	device->driver.msi = 0;
123*2223587dSDavid Matlack 	device->driver.max_memcpy_size =
124*2223587dSDavid Matlack 		1UL << readb(device->bars[0].vaddr + IOAT_XFERCAP_OFFSET);
125*2223587dSDavid Matlack 	device->driver.max_memcpy_count = IOAT_DMACOUNT_MAX;
126*2223587dSDavid Matlack }
127*2223587dSDavid Matlack 
128*2223587dSDavid Matlack static void ioat_remove(struct vfio_pci_device *device)
129*2223587dSDavid Matlack {
130*2223587dSDavid Matlack 	ioat_reset(device);
131*2223587dSDavid Matlack 	vfio_pci_msix_disable(device);
132*2223587dSDavid Matlack }
133*2223587dSDavid Matlack 
134*2223587dSDavid Matlack static void ioat_handle_error(struct vfio_pci_device *device)
135*2223587dSDavid Matlack {
136*2223587dSDavid Matlack 	void *registers = ioat_channel_registers(device);
137*2223587dSDavid Matlack 
138*2223587dSDavid Matlack 	printf("Error detected during memcpy operation!\n"
139*2223587dSDavid Matlack 	       "  CHANERR: 0x%x\n"
140*2223587dSDavid Matlack 	       "  CHANERR_INT: 0x%x\n"
141*2223587dSDavid Matlack 	       "  DMAUNCERRSTS: 0x%x\n",
142*2223587dSDavid Matlack 	       readl(registers + IOAT_CHANERR_OFFSET),
143*2223587dSDavid Matlack 	       vfio_pci_config_readl(device, IOAT_PCI_CHANERR_INT_OFFSET),
144*2223587dSDavid Matlack 	       vfio_pci_config_readl(device, IOAT_PCI_DMAUNCERRSTS_OFFSET));
145*2223587dSDavid Matlack 
146*2223587dSDavid Matlack 	ioat_reset(device);
147*2223587dSDavid Matlack }
148*2223587dSDavid Matlack 
149*2223587dSDavid Matlack static int ioat_memcpy_wait(struct vfio_pci_device *device)
150*2223587dSDavid Matlack {
151*2223587dSDavid Matlack 	void *registers = ioat_channel_registers(device);
152*2223587dSDavid Matlack 	u64 status;
153*2223587dSDavid Matlack 	int r = 0;
154*2223587dSDavid Matlack 
155*2223587dSDavid Matlack 	/* Wait until all operations complete. */
156*2223587dSDavid Matlack 	for (;;) {
157*2223587dSDavid Matlack 		status = ioat_channel_status(registers);
158*2223587dSDavid Matlack 		if (status == IOAT_CHANSTS_DONE)
159*2223587dSDavid Matlack 			break;
160*2223587dSDavid Matlack 
161*2223587dSDavid Matlack 		if (status == IOAT_CHANSTS_HALTED) {
162*2223587dSDavid Matlack 			ioat_handle_error(device);
163*2223587dSDavid Matlack 			return -1;
164*2223587dSDavid Matlack 		}
165*2223587dSDavid Matlack 	}
166*2223587dSDavid Matlack 
167*2223587dSDavid Matlack 	/* Put the channel into the SUSPENDED state. */
168*2223587dSDavid Matlack 	writeb(IOAT_CHANCMD_SUSPEND, registers + IOAT2_CHANCMD_OFFSET);
169*2223587dSDavid Matlack 	for (;;) {
170*2223587dSDavid Matlack 		status = ioat_channel_status(registers);
171*2223587dSDavid Matlack 		if (status == IOAT_CHANSTS_SUSPENDED)
172*2223587dSDavid Matlack 			break;
173*2223587dSDavid Matlack 	}
174*2223587dSDavid Matlack 
175*2223587dSDavid Matlack 	return r;
176*2223587dSDavid Matlack }
177*2223587dSDavid Matlack 
178*2223587dSDavid Matlack static void __ioat_memcpy_start(struct vfio_pci_device *device,
179*2223587dSDavid Matlack 				iova_t src, iova_t dst, u64 size,
180*2223587dSDavid Matlack 				u16 count, bool interrupt)
181*2223587dSDavid Matlack {
182*2223587dSDavid Matlack 	void *registers = ioat_channel_registers(device);
183*2223587dSDavid Matlack 	struct ioat_state *ioat = to_ioat_state(device);
184*2223587dSDavid Matlack 	u64 desc_iova;
185*2223587dSDavid Matlack 	u16 chanctrl;
186*2223587dSDavid Matlack 
187*2223587dSDavid Matlack 	desc_iova = to_iova(device, &ioat->desc);
188*2223587dSDavid Matlack 	ioat->desc = (struct ioat_dma_descriptor) {
189*2223587dSDavid Matlack 		.ctl_f.op = IOAT_OP_COPY,
190*2223587dSDavid Matlack 		.ctl_f.int_en = interrupt,
191*2223587dSDavid Matlack 		.src_addr = src,
192*2223587dSDavid Matlack 		.dst_addr = dst,
193*2223587dSDavid Matlack 		.size = size,
194*2223587dSDavid Matlack 		.next = desc_iova,
195*2223587dSDavid Matlack 	};
196*2223587dSDavid Matlack 
197*2223587dSDavid Matlack 	/* Tell the device the address of the descriptor. */
198*2223587dSDavid Matlack 	writeq(desc_iova, registers + IOAT2_CHAINADDR_OFFSET);
199*2223587dSDavid Matlack 
200*2223587dSDavid Matlack 	/* (Re)Enable the channel interrupt and abort on any errors */
201*2223587dSDavid Matlack 	chanctrl = IOAT_CHANCTRL_INT_REARM | IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
202*2223587dSDavid Matlack 	writew(chanctrl, registers + IOAT_CHANCTRL_OFFSET);
203*2223587dSDavid Matlack 
204*2223587dSDavid Matlack 	/* Kick off @count DMA copy operation(s). */
205*2223587dSDavid Matlack 	writew(count, registers + IOAT_CHAN_DMACOUNT_OFFSET);
206*2223587dSDavid Matlack }
207*2223587dSDavid Matlack 
208*2223587dSDavid Matlack static void ioat_memcpy_start(struct vfio_pci_device *device,
209*2223587dSDavid Matlack 			      iova_t src, iova_t dst, u64 size,
210*2223587dSDavid Matlack 			      u64 count)
211*2223587dSDavid Matlack {
212*2223587dSDavid Matlack 	__ioat_memcpy_start(device, src, dst, size, count, false);
213*2223587dSDavid Matlack }
214*2223587dSDavid Matlack 
215*2223587dSDavid Matlack static void ioat_send_msi(struct vfio_pci_device *device)
216*2223587dSDavid Matlack {
217*2223587dSDavid Matlack 	struct ioat_state *ioat = to_ioat_state(device);
218*2223587dSDavid Matlack 
219*2223587dSDavid Matlack 	__ioat_memcpy_start(device,
220*2223587dSDavid Matlack 			    to_iova(device, &ioat->send_msi_src),
221*2223587dSDavid Matlack 			    to_iova(device, &ioat->send_msi_dst),
222*2223587dSDavid Matlack 			    sizeof(ioat->send_msi_src), 1, true);
223*2223587dSDavid Matlack 
224*2223587dSDavid Matlack 	VFIO_ASSERT_EQ(ioat_memcpy_wait(device), 0);
225*2223587dSDavid Matlack }
226*2223587dSDavid Matlack 
227*2223587dSDavid Matlack const struct vfio_pci_driver_ops ioat_ops = {
228*2223587dSDavid Matlack 	.name = "ioat",
229*2223587dSDavid Matlack 	.probe = ioat_probe,
230*2223587dSDavid Matlack 	.init = ioat_init,
231*2223587dSDavid Matlack 	.remove = ioat_remove,
232*2223587dSDavid Matlack 	.memcpy_start = ioat_memcpy_start,
233*2223587dSDavid Matlack 	.memcpy_wait = ioat_memcpy_wait,
234*2223587dSDavid Matlack 	.send_msi = ioat_send_msi,
235*2223587dSDavid Matlack };
236