1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <stdint.h>
3 #include <unistd.h>
4
5 #include <linux/errno.h>
6 #include <linux/io.h>
7 #include <linux/pci_ids.h>
8 #include <linux/sizes.h>
9
10 #include <libvfio.h>
11
12 #include "hw.h"
13 #include "registers.h"
14
15 #define IOAT_DMACOUNT_MAX UINT16_MAX
16
17 struct ioat_state {
18 /* Single descriptor used to issue DMA memcpy operations */
19 struct ioat_dma_descriptor desc;
20
21 /* Copy buffers used by ioat_send_msi() to generate an interrupt. */
22 u64 send_msi_src;
23 u64 send_msi_dst;
24 };
25
to_ioat_state(struct vfio_pci_device * device)26 static inline struct ioat_state *to_ioat_state(struct vfio_pci_device *device)
27 {
28 return device->driver.region.vaddr;
29 }
30
ioat_channel_registers(struct vfio_pci_device * device)31 static inline void *ioat_channel_registers(struct vfio_pci_device *device)
32 {
33 return device->bars[0].vaddr + IOAT_CHANNEL_MMIO_SIZE;
34 }
35
ioat_probe(struct vfio_pci_device * device)36 static int ioat_probe(struct vfio_pci_device *device)
37 {
38 u8 version;
39 int r;
40
41 if (!vfio_pci_device_match(device, PCI_VENDOR_ID_INTEL,
42 PCI_DEVICE_ID_INTEL_IOAT_SKX))
43 return -EINVAL;
44
45 VFIO_ASSERT_NOT_NULL(device->bars[0].vaddr);
46
47 version = readb(device->bars[0].vaddr + IOAT_VER_OFFSET);
48 switch (version) {
49 case IOAT_VER_3_2:
50 case IOAT_VER_3_3:
51 r = 0;
52 break;
53 default:
54 dev_err(device, "ioat: Unsupported version: 0x%x\n", version);
55 r = -EINVAL;
56 }
57 return r;
58 }
59
ioat_channel_status(void * bar)60 static u64 ioat_channel_status(void *bar)
61 {
62 return readq(bar + IOAT_CHANSTS_OFFSET) & IOAT_CHANSTS_STATUS;
63 }
64
ioat_clear_errors(struct vfio_pci_device * device)65 static void ioat_clear_errors(struct vfio_pci_device *device)
66 {
67 void *registers = ioat_channel_registers(device);
68 u32 errors;
69
70 errors = vfio_pci_config_readl(device, IOAT_PCI_CHANERR_INT_OFFSET);
71 vfio_pci_config_writel(device, IOAT_PCI_CHANERR_INT_OFFSET, errors);
72
73 errors = vfio_pci_config_readl(device, IOAT_PCI_DMAUNCERRSTS_OFFSET);
74 vfio_pci_config_writel(device, IOAT_PCI_CHANERR_INT_OFFSET, errors);
75
76 errors = readl(registers + IOAT_CHANERR_OFFSET);
77 writel(errors, registers + IOAT_CHANERR_OFFSET);
78 }
79
ioat_reset(struct vfio_pci_device * device)80 static void ioat_reset(struct vfio_pci_device *device)
81 {
82 void *registers = ioat_channel_registers(device);
83 u32 sleep_ms = 1, attempts = 5000 / sleep_ms;
84 u8 chancmd;
85
86 ioat_clear_errors(device);
87
88 writeb(IOAT_CHANCMD_RESET, registers + IOAT2_CHANCMD_OFFSET);
89
90 for (;;) {
91 chancmd = readb(registers + IOAT2_CHANCMD_OFFSET);
92 if (!(chancmd & IOAT_CHANCMD_RESET))
93 break;
94
95 VFIO_ASSERT_GT(--attempts, 0);
96 usleep(sleep_ms * 1000);
97 }
98
99 VFIO_ASSERT_EQ(ioat_channel_status(registers), IOAT_CHANSTS_HALTED);
100 }
101
ioat_init(struct vfio_pci_device * device)102 static void ioat_init(struct vfio_pci_device *device)
103 {
104 struct ioat_state *ioat = to_ioat_state(device);
105 u8 intrctrl;
106
107 VFIO_ASSERT_GE(device->driver.region.size, sizeof(*ioat));
108
109 vfio_pci_config_writew(device, PCI_COMMAND,
110 PCI_COMMAND_MEMORY |
111 PCI_COMMAND_MASTER |
112 PCI_COMMAND_INTX_DISABLE);
113
114 ioat_reset(device);
115
116 /* Enable the use of MXI-x interrupts for channel interrupts. */
117 intrctrl = IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
118 writeb(intrctrl, device->bars[0].vaddr + IOAT_INTRCTRL_OFFSET);
119
120 vfio_pci_msix_enable(device, 0, device->msix_info.count);
121
122 device->driver.msi = 0;
123 device->driver.max_memcpy_size =
124 1UL << readb(device->bars[0].vaddr + IOAT_XFERCAP_OFFSET);
125 device->driver.max_memcpy_count = IOAT_DMACOUNT_MAX;
126 }
127
ioat_remove(struct vfio_pci_device * device)128 static void ioat_remove(struct vfio_pci_device *device)
129 {
130 ioat_reset(device);
131 vfio_pci_msix_disable(device);
132 }
133
ioat_handle_error(struct vfio_pci_device * device)134 static void ioat_handle_error(struct vfio_pci_device *device)
135 {
136 void *registers = ioat_channel_registers(device);
137
138 dev_err(device, "Error detected during memcpy operation!\n"
139 " CHANERR: 0x%x\n"
140 " CHANERR_INT: 0x%x\n"
141 " DMAUNCERRSTS: 0x%x\n",
142 readl(registers + IOAT_CHANERR_OFFSET),
143 vfio_pci_config_readl(device, IOAT_PCI_CHANERR_INT_OFFSET),
144 vfio_pci_config_readl(device, IOAT_PCI_DMAUNCERRSTS_OFFSET));
145
146 ioat_reset(device);
147 }
148
ioat_memcpy_wait(struct vfio_pci_device * device)149 static int ioat_memcpy_wait(struct vfio_pci_device *device)
150 {
151 void *registers = ioat_channel_registers(device);
152 u64 status;
153 int r = 0;
154
155 /* Wait until all operations complete. */
156 for (;;) {
157 status = ioat_channel_status(registers);
158 if (status == IOAT_CHANSTS_DONE)
159 break;
160
161 if (status == IOAT_CHANSTS_HALTED) {
162 ioat_handle_error(device);
163 return -1;
164 }
165 }
166
167 /* Put the channel into the SUSPENDED state. */
168 writeb(IOAT_CHANCMD_SUSPEND, registers + IOAT2_CHANCMD_OFFSET);
169 for (;;) {
170 status = ioat_channel_status(registers);
171 if (status == IOAT_CHANSTS_SUSPENDED)
172 break;
173 }
174
175 return r;
176 }
177
__ioat_memcpy_start(struct vfio_pci_device * device,iova_t src,iova_t dst,u64 size,u16 count,bool interrupt)178 static void __ioat_memcpy_start(struct vfio_pci_device *device,
179 iova_t src, iova_t dst, u64 size,
180 u16 count, bool interrupt)
181 {
182 void *registers = ioat_channel_registers(device);
183 struct ioat_state *ioat = to_ioat_state(device);
184 u64 desc_iova;
185 u16 chanctrl;
186
187 desc_iova = to_iova(device, &ioat->desc);
188 ioat->desc = (struct ioat_dma_descriptor) {
189 .ctl_f.op = IOAT_OP_COPY,
190 .ctl_f.int_en = interrupt,
191 .src_addr = src,
192 .dst_addr = dst,
193 .size = size,
194 .next = desc_iova,
195 };
196
197 /* Tell the device the address of the descriptor. */
198 writeq(desc_iova, registers + IOAT2_CHAINADDR_OFFSET);
199
200 /* (Re)Enable the channel interrupt and abort on any errors */
201 chanctrl = IOAT_CHANCTRL_INT_REARM | IOAT_CHANCTRL_ANY_ERR_ABORT_EN;
202 writew(chanctrl, registers + IOAT_CHANCTRL_OFFSET);
203
204 /* Kick off @count DMA copy operation(s). */
205 writew(count, registers + IOAT_CHAN_DMACOUNT_OFFSET);
206 }
207
ioat_memcpy_start(struct vfio_pci_device * device,iova_t src,iova_t dst,u64 size,u64 count)208 static void ioat_memcpy_start(struct vfio_pci_device *device,
209 iova_t src, iova_t dst, u64 size,
210 u64 count)
211 {
212 __ioat_memcpy_start(device, src, dst, size, count, false);
213 }
214
ioat_send_msi(struct vfio_pci_device * device)215 static void ioat_send_msi(struct vfio_pci_device *device)
216 {
217 struct ioat_state *ioat = to_ioat_state(device);
218
219 __ioat_memcpy_start(device,
220 to_iova(device, &ioat->send_msi_src),
221 to_iova(device, &ioat->send_msi_dst),
222 sizeof(ioat->send_msi_src), 1, true);
223
224 VFIO_ASSERT_EQ(ioat_memcpy_wait(device), 0);
225 }
226
227 const struct vfio_pci_driver_ops ioat_ops = {
228 .name = "ioat",
229 .probe = ioat_probe,
230 .init = ioat_init,
231 .remove = ioat_remove,
232 .memcpy_start = ioat_memcpy_start,
233 .memcpy_wait = ioat_memcpy_wait,
234 .send_msi = ioat_send_msi,
235 };
236