xref: /linux/tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <stdint.h>
3 #include <unistd.h>
4 
5 #include <linux/bits.h>
6 #include <linux/errno.h>
7 #include <linux/idxd.h>
8 #include <linux/io.h>
9 #include <linux/pci_ids.h>
10 #include <linux/sizes.h>
11 
12 #include <vfio_util.h>
13 
14 #include "registers.h"
15 
16 /* Vectors 1+ are available for work queue completion interrupts. */
17 #define MSIX_VECTOR 1
18 
19 struct dsa_state {
20 	/* Descriptors for copy and batch operations. */
21 	struct dsa_hw_desc batch[32];
22 	struct dsa_hw_desc copy[1024];
23 
24 	/* Completion records for copy and batch operations. */
25 	struct dsa_completion_record copy_completion;
26 	struct dsa_completion_record batch_completion;
27 
28 	/* Cached device registers (and derived data) for easy access */
29 	union gen_cap_reg gen_cap;
30 	union wq_cap_reg wq_cap;
31 	union group_cap_reg group_cap;
32 	union engine_cap_reg engine_cap;
33 	union offsets_reg table_offsets;
34 	void *wqcfg_table;
35 	void *grpcfg_table;
36 	u64 max_batches;
37 	u64 max_copies_per_batch;
38 
39 	/* The number of ongoing memcpy operations. */
40 	u64 memcpy_count;
41 
42 	/* Buffers used by dsa_send_msi() to generate an interrupt */
43 	u64 send_msi_src;
44 	u64 send_msi_dst;
45 };
46 
47 static inline struct dsa_state *to_dsa_state(struct vfio_pci_device *device)
48 {
49 	return device->driver.region.vaddr;
50 }
51 
52 static bool dsa_int_handle_request_required(struct vfio_pci_device *device)
53 {
54 	void *bar0 = device->bars[0].vaddr;
55 	union gen_cap_reg gen_cap;
56 	u32 cmd_cap;
57 
58 	gen_cap.bits = readq(bar0 + IDXD_GENCAP_OFFSET);
59 	if (!gen_cap.cmd_cap)
60 		return false;
61 
62 	cmd_cap = readl(bar0 + IDXD_CMDCAP_OFFSET);
63 	return (cmd_cap >> IDXD_CMD_REQUEST_INT_HANDLE) & 1;
64 }
65 
66 static int dsa_probe(struct vfio_pci_device *device)
67 {
68 	if (!vfio_pci_device_match(device, PCI_VENDOR_ID_INTEL,
69 				   PCI_DEVICE_ID_INTEL_DSA_SPR0))
70 		return -EINVAL;
71 
72 	if (dsa_int_handle_request_required(device)) {
73 		printf("Device requires requesting interrupt handles\n");
74 		return -EINVAL;
75 	}
76 
77 	return 0;
78 }
79 
80 static void dsa_check_sw_err(struct vfio_pci_device *device)
81 {
82 	void *reg = device->bars[0].vaddr + IDXD_SWERR_OFFSET;
83 	union sw_err_reg err = {};
84 	int i;
85 
86 	for (i = 0; i < ARRAY_SIZE(err.bits); i++) {
87 		err.bits[i] = readq(reg + offsetof(union sw_err_reg, bits[i]));
88 
89 		/* No errors */
90 		if (i == 0 && !err.valid)
91 			return;
92 	}
93 
94 	fprintf(stderr, "SWERR: 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
95 		err.bits[0], err.bits[1], err.bits[2], err.bits[3]);
96 
97 	fprintf(stderr, "  valid: 0x%x\n", err.valid);
98 	fprintf(stderr, "  overflow: 0x%x\n", err.overflow);
99 	fprintf(stderr, "  desc_valid: 0x%x\n", err.desc_valid);
100 	fprintf(stderr, "  wq_idx_valid: 0x%x\n", err.wq_idx_valid);
101 	fprintf(stderr, "  batch: 0x%x\n", err.batch);
102 	fprintf(stderr, "  fault_rw: 0x%x\n", err.fault_rw);
103 	fprintf(stderr, "  priv: 0x%x\n", err.priv);
104 	fprintf(stderr, "  error: 0x%x\n", err.error);
105 	fprintf(stderr, "  wq_idx: 0x%x\n", err.wq_idx);
106 	fprintf(stderr, "  operation: 0x%x\n", err.operation);
107 	fprintf(stderr, "  pasid: 0x%x\n", err.pasid);
108 	fprintf(stderr, "  batch_idx: 0x%x\n", err.batch_idx);
109 	fprintf(stderr, "  invalid_flags: 0x%x\n", err.invalid_flags);
110 	fprintf(stderr, "  fault_addr: 0x%lx\n", err.fault_addr);
111 
112 	VFIO_FAIL("Software Error Detected!\n");
113 }
114 
115 static void dsa_command(struct vfio_pci_device *device, u32 cmd)
116 {
117 	union idxd_command_reg cmd_reg = { .cmd = cmd };
118 	u32 sleep_ms = 1, attempts = 5000 / sleep_ms;
119 	void *bar0 = device->bars[0].vaddr;
120 	u32 status;
121 	u8 err;
122 
123 	writel(cmd_reg.bits, bar0 + IDXD_CMD_OFFSET);
124 
125 	for (;;) {
126 		dsa_check_sw_err(device);
127 
128 		status = readl(bar0 + IDXD_CMDSTS_OFFSET);
129 		if (!(status & IDXD_CMDSTS_ACTIVE))
130 			break;
131 
132 		VFIO_ASSERT_GT(--attempts, 0);
133 		usleep(sleep_ms * 1000);
134 	}
135 
136 	err = status & IDXD_CMDSTS_ERR_MASK;
137 	VFIO_ASSERT_EQ(err, 0, "Error issuing command 0x%x: 0x%x\n", cmd, err);
138 }
139 
140 static void dsa_wq_init(struct vfio_pci_device *device)
141 {
142 	struct dsa_state *dsa = to_dsa_state(device);
143 	union wq_cap_reg wq_cap = dsa->wq_cap;
144 	union wqcfg wqcfg;
145 	u64 wqcfg_size;
146 	int i;
147 
148 	VFIO_ASSERT_GT((u32)wq_cap.num_wqs, 0);
149 
150 	wqcfg = (union wqcfg) {
151 		.wq_size = wq_cap.total_wq_size,
152 		.mode = 1,
153 		.priority = 1,
154 		/*
155 		 * Disable Address Translation Service (if enabled) so that VFIO
156 		 * selftests using this driver can generate I/O page faults.
157 		 */
158 		.wq_ats_disable = wq_cap.wq_ats_support,
159 		.max_xfer_shift = dsa->gen_cap.max_xfer_shift,
160 		.max_batch_shift = dsa->gen_cap.max_batch_shift,
161 		.op_config[0] = BIT(DSA_OPCODE_MEMMOVE) | BIT(DSA_OPCODE_BATCH),
162 	};
163 
164 	wqcfg_size = 1UL << (wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
165 
166 	for (i = 0; i < wqcfg_size / sizeof(wqcfg.bits[0]); i++)
167 		writel(wqcfg.bits[i], dsa->wqcfg_table + offsetof(union wqcfg, bits[i]));
168 }
169 
170 static void dsa_group_init(struct vfio_pci_device *device)
171 {
172 	struct dsa_state *dsa = to_dsa_state(device);
173 	union group_cap_reg group_cap = dsa->group_cap;
174 	union engine_cap_reg engine_cap = dsa->engine_cap;
175 
176 	VFIO_ASSERT_GT((u32)group_cap.num_groups, 0);
177 	VFIO_ASSERT_GT((u32)engine_cap.num_engines, 0);
178 
179 	/* Assign work queue 0 and engine 0 to group 0 */
180 	writeq(1, dsa->grpcfg_table + offsetof(struct grpcfg, wqs[0]));
181 	writeq(1, dsa->grpcfg_table + offsetof(struct grpcfg, engines));
182 }
183 
184 static void dsa_register_cache_init(struct vfio_pci_device *device)
185 {
186 	struct dsa_state *dsa = to_dsa_state(device);
187 	void *bar0 = device->bars[0].vaddr;
188 
189 	dsa->gen_cap.bits = readq(bar0 + IDXD_GENCAP_OFFSET);
190 	dsa->wq_cap.bits = readq(bar0 + IDXD_WQCAP_OFFSET);
191 	dsa->group_cap.bits = readq(bar0 + IDXD_GRPCAP_OFFSET);
192 	dsa->engine_cap.bits = readq(bar0 + IDXD_ENGCAP_OFFSET);
193 
194 	dsa->table_offsets.bits[0] = readq(bar0 + IDXD_TABLE_OFFSET);
195 	dsa->table_offsets.bits[1] = readq(bar0 + IDXD_TABLE_OFFSET + 8);
196 
197 	dsa->wqcfg_table = bar0 + dsa->table_offsets.wqcfg * IDXD_TABLE_MULT;
198 	dsa->grpcfg_table = bar0 + dsa->table_offsets.grpcfg * IDXD_TABLE_MULT;
199 
200 	dsa->max_batches = 1U << (dsa->wq_cap.total_wq_size + IDXD_WQCFG_MIN);
201 	dsa->max_batches = min(dsa->max_batches, ARRAY_SIZE(dsa->batch));
202 
203 	dsa->max_copies_per_batch = 1UL << dsa->gen_cap.max_batch_shift;
204 	dsa->max_copies_per_batch = min(dsa->max_copies_per_batch, ARRAY_SIZE(dsa->copy));
205 }
206 
207 static void dsa_init(struct vfio_pci_device *device)
208 {
209 	struct dsa_state *dsa = to_dsa_state(device);
210 
211 	VFIO_ASSERT_GE(device->driver.region.size, sizeof(*dsa));
212 
213 	vfio_pci_config_writew(device, PCI_COMMAND,
214 			       PCI_COMMAND_MEMORY |
215 			       PCI_COMMAND_MASTER |
216 			       PCI_COMMAND_INTX_DISABLE);
217 
218 	dsa_command(device, IDXD_CMD_RESET_DEVICE);
219 
220 	dsa_register_cache_init(device);
221 	dsa_wq_init(device);
222 	dsa_group_init(device);
223 
224 	dsa_command(device, IDXD_CMD_ENABLE_DEVICE);
225 	dsa_command(device, IDXD_CMD_ENABLE_WQ);
226 
227 	vfio_pci_msix_enable(device, MSIX_VECTOR, 1);
228 
229 	device->driver.max_memcpy_count =
230 		dsa->max_batches * dsa->max_copies_per_batch;
231 	device->driver.max_memcpy_size = 1UL << dsa->gen_cap.max_xfer_shift;
232 	device->driver.msi = MSIX_VECTOR;
233 }
234 
235 static void dsa_remove(struct vfio_pci_device *device)
236 {
237 	dsa_command(device, IDXD_CMD_RESET_DEVICE);
238 	vfio_pci_msix_disable(device);
239 }
240 
241 static int dsa_completion_wait(struct vfio_pci_device *device,
242 			       struct dsa_completion_record *completion)
243 {
244 	u8 status;
245 
246 	for (;;) {
247 		dsa_check_sw_err(device);
248 
249 		status = READ_ONCE(completion->status);
250 		if (status)
251 			break;
252 
253 		usleep(1000);
254 	}
255 
256 	if (status == DSA_COMP_SUCCESS)
257 		return 0;
258 
259 	printf("Error detected during memcpy operation: 0x%x\n", status);
260 	return -1;
261 }
262 
263 static void dsa_copy_desc_init(struct vfio_pci_device *device,
264 			       struct dsa_hw_desc *desc,
265 			       iova_t src, iova_t dst, u64 size,
266 			       bool interrupt)
267 {
268 	struct dsa_state *dsa = to_dsa_state(device);
269 	u16 flags;
270 
271 	flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
272 
273 	if (interrupt)
274 		flags |= IDXD_OP_FLAG_RCI;
275 
276 	*desc = (struct dsa_hw_desc) {
277 		.opcode = DSA_OPCODE_MEMMOVE,
278 		.flags = flags,
279 		.priv = 1,
280 		.src_addr = src,
281 		.dst_addr = dst,
282 		.xfer_size = size,
283 		.completion_addr = to_iova(device, &dsa->copy_completion),
284 		.int_handle = interrupt ? MSIX_VECTOR : 0,
285 	};
286 }
287 
288 static void dsa_batch_desc_init(struct vfio_pci_device *device,
289 				struct dsa_hw_desc *desc,
290 				u64 count)
291 {
292 	struct dsa_state *dsa = to_dsa_state(device);
293 
294 	*desc = (struct dsa_hw_desc) {
295 		.opcode = DSA_OPCODE_BATCH,
296 		.flags = IDXD_OP_FLAG_CRAV,
297 		.priv = 1,
298 		.completion_addr = to_iova(device, &dsa->batch_completion),
299 		.desc_list_addr = to_iova(device, &dsa->copy[0]),
300 		.desc_count = count,
301 	};
302 }
303 
304 static void dsa_desc_write(struct vfio_pci_device *device, struct dsa_hw_desc *desc)
305 {
306 	/* Write the contents (not address) of the 64-byte descriptor to the device. */
307 	iosubmit_cmds512(device->bars[2].vaddr, desc, 1);
308 }
309 
310 static void dsa_memcpy_one(struct vfio_pci_device *device,
311 			   iova_t src, iova_t dst, u64 size, bool interrupt)
312 {
313 	struct dsa_state *dsa = to_dsa_state(device);
314 
315 	memset(&dsa->copy_completion, 0, sizeof(dsa->copy_completion));
316 
317 	dsa_copy_desc_init(device, &dsa->copy[0], src, dst, size, interrupt);
318 	dsa_desc_write(device, &dsa->copy[0]);
319 }
320 
321 static void dsa_memcpy_batch(struct vfio_pci_device *device,
322 			     iova_t src, iova_t dst, u64 size, u64 count)
323 {
324 	struct dsa_state *dsa = to_dsa_state(device);
325 	int i;
326 
327 	memset(&dsa->batch_completion, 0, sizeof(dsa->batch_completion));
328 
329 	for (i = 0; i < ARRAY_SIZE(dsa->copy); i++) {
330 		struct dsa_hw_desc *copy_desc = &dsa->copy[i];
331 
332 		dsa_copy_desc_init(device, copy_desc, src, dst, size, false);
333 
334 		/* Don't request completions for individual copies. */
335 		copy_desc->flags &= ~IDXD_OP_FLAG_RCR;
336 	}
337 
338 	for (i = 0; i < ARRAY_SIZE(dsa->batch) && count; i++) {
339 		struct dsa_hw_desc *batch_desc = &dsa->batch[i];
340 		int nr_copies;
341 
342 		nr_copies = min(count, dsa->max_copies_per_batch);
343 		count -= nr_copies;
344 
345 		/*
346 		 * Batches must have at least 2 copies, so handle the case where
347 		 * there is exactly 1 copy left by doing one less copy in this
348 		 * batch and then 2 in the next.
349 		 */
350 		if (count == 1) {
351 			nr_copies--;
352 			count++;
353 		}
354 
355 		dsa_batch_desc_init(device, batch_desc, nr_copies);
356 
357 		/* Request a completion for the last batch. */
358 		if (!count)
359 			batch_desc->flags |= IDXD_OP_FLAG_RCR;
360 
361 		dsa_desc_write(device, batch_desc);
362 	}
363 
364 	VFIO_ASSERT_EQ(count, 0, "Failed to start %lu copies.\n", count);
365 }
366 
367 static void dsa_memcpy_start(struct vfio_pci_device *device,
368 			     iova_t src, iova_t dst, u64 size, u64 count)
369 {
370 	struct dsa_state *dsa = to_dsa_state(device);
371 
372 	/* DSA devices require at least 2 copies per batch. */
373 	if (count == 1)
374 		dsa_memcpy_one(device, src, dst, size, false);
375 	else
376 		dsa_memcpy_batch(device, src, dst, size, count);
377 
378 	dsa->memcpy_count = count;
379 }
380 
381 static int dsa_memcpy_wait(struct vfio_pci_device *device)
382 {
383 	struct dsa_state *dsa = to_dsa_state(device);
384 	int r;
385 
386 	if (dsa->memcpy_count == 1)
387 		r = dsa_completion_wait(device, &dsa->copy_completion);
388 	else
389 		r = dsa_completion_wait(device, &dsa->batch_completion);
390 
391 	dsa->memcpy_count = 0;
392 
393 	return r;
394 }
395 
396 static void dsa_send_msi(struct vfio_pci_device *device)
397 {
398 	struct dsa_state *dsa = to_dsa_state(device);
399 
400 	dsa_memcpy_one(device,
401 		       to_iova(device, &dsa->send_msi_src),
402 		       to_iova(device, &dsa->send_msi_dst),
403 		       sizeof(dsa->send_msi_src), true);
404 
405 	VFIO_ASSERT_EQ(dsa_completion_wait(device, &dsa->copy_completion), 0);
406 }
407 
408 const struct vfio_pci_driver_ops dsa_ops = {
409 	.name = "dsa",
410 	.probe = dsa_probe,
411 	.init = dsa_init,
412 	.remove = dsa_remove,
413 	.memcpy_start = dsa_memcpy_start,
414 	.memcpy_wait = dsa_memcpy_wait,
415 	.send_msi = dsa_send_msi,
416 };
417