xref: /linux/drivers/pci/endpoint/functions/pci-epf-test.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/msi.h>
15 #include <linux/slab.h>
16 #include <linux/pci_ids.h>
17 #include <linux/random.h>
18 
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
21 #include <linux/pci-ep-msi.h>
22 #include <linux/pci_regs.h>
23 
24 #define IRQ_TYPE_INTX			0
25 #define IRQ_TYPE_MSI			1
26 #define IRQ_TYPE_MSIX			2
27 
28 #define COMMAND_RAISE_INTX_IRQ		BIT(0)
29 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
30 #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
31 #define COMMAND_READ			BIT(3)
32 #define COMMAND_WRITE			BIT(4)
33 #define COMMAND_COPY			BIT(5)
34 #define COMMAND_ENABLE_DOORBELL		BIT(6)
35 #define COMMAND_DISABLE_DOORBELL	BIT(7)
36 #define COMMAND_BAR_SUBRANGE_SETUP	BIT(8)
37 #define COMMAND_BAR_SUBRANGE_CLEAR	BIT(9)
38 
39 #define STATUS_READ_SUCCESS		BIT(0)
40 #define STATUS_READ_FAIL		BIT(1)
41 #define STATUS_WRITE_SUCCESS		BIT(2)
42 #define STATUS_WRITE_FAIL		BIT(3)
43 #define STATUS_COPY_SUCCESS		BIT(4)
44 #define STATUS_COPY_FAIL		BIT(5)
45 #define STATUS_IRQ_RAISED		BIT(6)
46 #define STATUS_SRC_ADDR_INVALID		BIT(7)
47 #define STATUS_DST_ADDR_INVALID		BIT(8)
48 #define STATUS_DOORBELL_SUCCESS		BIT(9)
49 #define STATUS_DOORBELL_ENABLE_SUCCESS	BIT(10)
50 #define STATUS_DOORBELL_ENABLE_FAIL	BIT(11)
51 #define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
52 #define STATUS_DOORBELL_DISABLE_FAIL	BIT(13)
53 #define STATUS_BAR_SUBRANGE_SETUP_SUCCESS	BIT(14)
54 #define STATUS_BAR_SUBRANGE_SETUP_FAIL		BIT(15)
55 #define STATUS_BAR_SUBRANGE_CLEAR_SUCCESS	BIT(16)
56 #define STATUS_BAR_SUBRANGE_CLEAR_FAIL		BIT(17)
57 
58 #define FLAG_USE_DMA			BIT(0)
59 
60 #define TIMER_RESOLUTION		1
61 
62 #define CAP_UNALIGNED_ACCESS		BIT(0)
63 #define CAP_MSI				BIT(1)
64 #define CAP_MSIX			BIT(2)
65 #define CAP_INTX			BIT(3)
66 #define CAP_SUBRANGE_MAPPING		BIT(4)
67 
68 #define PCI_EPF_TEST_BAR_SUBRANGE_NSUB	2
69 
70 static struct workqueue_struct *kpcitest_workqueue;
71 
72 struct pci_epf_test {
73 	void			*reg[PCI_STD_NUM_BARS];
74 	struct pci_epf		*epf;
75 	struct config_group	group;
76 	enum pci_barno		test_reg_bar;
77 	size_t			msix_table_offset;
78 	struct delayed_work	cmd_handler;
79 	struct dma_chan		*dma_chan_tx;
80 	struct dma_chan		*dma_chan_rx;
81 	struct dma_chan		*transfer_chan;
82 	dma_cookie_t		transfer_cookie;
83 	enum dma_status		transfer_status;
84 	struct completion	transfer_complete;
85 	bool			dma_supported;
86 	bool			dma_private;
87 	const struct pci_epc_features *epc_features;
88 	struct pci_epf_bar	db_bar;
89 	size_t			bar_size[PCI_STD_NUM_BARS];
90 };
91 
92 struct pci_epf_test_reg {
93 	__le32 magic;
94 	__le32 command;
95 	__le32 status;
96 	__le64 src_addr;
97 	__le64 dst_addr;
98 	__le32 size;
99 	__le32 checksum;
100 	__le32 irq_type;
101 	__le32 irq_number;
102 	__le32 flags;
103 	__le32 caps;
104 	__le32 doorbell_bar;
105 	__le32 doorbell_offset;
106 	__le32 doorbell_data;
107 } __packed;
108 
109 static struct pci_epf_header test_header = {
110 	.vendorid	= PCI_ANY_ID,
111 	.deviceid	= PCI_ANY_ID,
112 	.baseclass_code = PCI_CLASS_OTHERS,
113 	.interrupt_pin	= PCI_INTERRUPT_INTA,
114 };
115 
116 /* default BAR sizes, can be overridden by the user using configfs */
117 static size_t default_bar_size[] = { 131072, 131072, 131072, 131072, 131072, 1048576 };
118 
pci_epf_test_dma_callback(void * param)119 static void pci_epf_test_dma_callback(void *param)
120 {
121 	struct pci_epf_test *epf_test = param;
122 	struct dma_tx_state state;
123 
124 	epf_test->transfer_status =
125 		dmaengine_tx_status(epf_test->transfer_chan,
126 				    epf_test->transfer_cookie, &state);
127 	if (epf_test->transfer_status == DMA_COMPLETE ||
128 	    epf_test->transfer_status == DMA_ERROR)
129 		complete(&epf_test->transfer_complete);
130 }
131 
132 /**
133  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
134  *				  data between PCIe EP and remote PCIe RC
135  * @epf_test: the EPF test device that performs the data transfer operation
136  * @dma_dst: The destination address of the data transfer. It can be a physical
137  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
138  * @dma_src: The source address of the data transfer. It can be a physical
139  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
140  * @len: The size of the data transfer
141  * @dma_remote: remote RC physical address
142  * @dir: DMA transfer direction
143  *
144  * Function that uses dmaengine API to transfer data between PCIe EP and remote
145  * PCIe RC. The source and destination address can be a physical address given
146  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
147  *
148  * The function returns '0' on success and negative value on failure.
149  */
pci_epf_test_data_transfer(struct pci_epf_test * epf_test,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,dma_addr_t dma_remote,enum dma_transfer_direction dir)150 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
151 				      dma_addr_t dma_dst, dma_addr_t dma_src,
152 				      size_t len, dma_addr_t dma_remote,
153 				      enum dma_transfer_direction dir)
154 {
155 	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
156 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
157 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
158 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
159 	struct pci_epf *epf = epf_test->epf;
160 	struct dma_async_tx_descriptor *tx;
161 	struct dma_slave_config sconf = {};
162 	struct device *dev = &epf->dev;
163 	int ret;
164 
165 	if (IS_ERR_OR_NULL(chan)) {
166 		dev_err(dev, "Invalid DMA memcpy channel\n");
167 		return -EINVAL;
168 	}
169 
170 	if (epf_test->dma_private) {
171 		sconf.direction = dir;
172 		if (dir == DMA_MEM_TO_DEV)
173 			sconf.dst_addr = dma_remote;
174 		else
175 			sconf.src_addr = dma_remote;
176 
177 		if (dmaengine_slave_config(chan, &sconf)) {
178 			dev_err(dev, "DMA slave config fail\n");
179 			return -EIO;
180 		}
181 		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
182 						 flags);
183 	} else {
184 		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
185 					       flags);
186 	}
187 
188 	if (!tx) {
189 		dev_err(dev, "Failed to prepare DMA memcpy\n");
190 		return -EIO;
191 	}
192 
193 	reinit_completion(&epf_test->transfer_complete);
194 	epf_test->transfer_chan = chan;
195 	tx->callback = pci_epf_test_dma_callback;
196 	tx->callback_param = epf_test;
197 	epf_test->transfer_cookie = dmaengine_submit(tx);
198 
199 	ret = dma_submit_error(epf_test->transfer_cookie);
200 	if (ret) {
201 		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
202 		goto terminate;
203 	}
204 
205 	dma_async_issue_pending(chan);
206 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
207 	if (ret < 0) {
208 		dev_err(dev, "DMA wait_for_completion interrupted\n");
209 		goto terminate;
210 	}
211 
212 	if (epf_test->transfer_status == DMA_ERROR) {
213 		dev_err(dev, "DMA transfer failed\n");
214 		ret = -EIO;
215 	}
216 
217 terminate:
218 	dmaengine_terminate_sync(chan);
219 
220 	return ret;
221 }
222 
223 struct epf_dma_filter {
224 	struct device *dev;
225 	u32 dma_mask;
226 };
227 
epf_dma_filter_fn(struct dma_chan * chan,void * node)228 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
229 {
230 	struct epf_dma_filter *filter = node;
231 	struct dma_slave_caps caps;
232 
233 	memset(&caps, 0, sizeof(caps));
234 	dma_get_slave_caps(chan, &caps);
235 
236 	return chan->device->dev == filter->dev
237 		&& (filter->dma_mask & caps.directions);
238 }
239 
240 /**
241  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
242  * @epf_test: the EPF test device that performs data transfer operation
243  *
244  * Function to initialize EPF test DMA channel.
245  */
pci_epf_test_init_dma_chan(struct pci_epf_test * epf_test)246 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
247 {
248 	struct pci_epf *epf = epf_test->epf;
249 	struct device *dev = &epf->dev;
250 	struct epf_dma_filter filter;
251 	struct dma_chan *dma_chan;
252 	dma_cap_mask_t mask;
253 	int ret;
254 
255 	filter.dev = epf->epc->dev.parent;
256 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
257 
258 	dma_cap_zero(mask);
259 	dma_cap_set(DMA_SLAVE, mask);
260 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
261 	if (!dma_chan) {
262 		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
263 		goto fail_back_tx;
264 	}
265 
266 	epf_test->dma_chan_rx = dma_chan;
267 
268 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
269 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
270 
271 	if (!dma_chan) {
272 		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
273 		goto fail_back_rx;
274 	}
275 
276 	epf_test->dma_chan_tx = dma_chan;
277 	epf_test->dma_private = true;
278 
279 	init_completion(&epf_test->transfer_complete);
280 
281 	return 0;
282 
283 fail_back_rx:
284 	dma_release_channel(epf_test->dma_chan_rx);
285 	epf_test->dma_chan_rx = NULL;
286 
287 fail_back_tx:
288 	dma_cap_zero(mask);
289 	dma_cap_set(DMA_MEMCPY, mask);
290 
291 	dma_chan = dma_request_chan_by_mask(&mask);
292 	if (IS_ERR(dma_chan)) {
293 		ret = PTR_ERR(dma_chan);
294 		if (ret != -EPROBE_DEFER)
295 			dev_err(dev, "Failed to get DMA channel\n");
296 		return ret;
297 	}
298 	init_completion(&epf_test->transfer_complete);
299 
300 	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
301 
302 	return 0;
303 }
304 
305 /**
306  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
307  * @epf_test: the EPF test device that performs data transfer operation
308  *
309  * Helper to cleanup EPF test DMA channel.
310  */
pci_epf_test_clean_dma_chan(struct pci_epf_test * epf_test)311 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
312 {
313 	if (!epf_test->dma_supported)
314 		return;
315 
316 	if (epf_test->dma_chan_tx) {
317 		dma_release_channel(epf_test->dma_chan_tx);
318 		if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
319 			epf_test->dma_chan_tx = NULL;
320 			epf_test->dma_chan_rx = NULL;
321 			return;
322 		}
323 		epf_test->dma_chan_tx = NULL;
324 	}
325 
326 	if (epf_test->dma_chan_rx) {
327 		dma_release_channel(epf_test->dma_chan_rx);
328 		epf_test->dma_chan_rx = NULL;
329 	}
330 }
331 
pci_epf_test_print_rate(struct pci_epf_test * epf_test,const char * op,u64 size,struct timespec64 * start,struct timespec64 * end,bool dma)332 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
333 				    const char *op, u64 size,
334 				    struct timespec64 *start,
335 				    struct timespec64 *end, bool dma)
336 {
337 	struct timespec64 ts = timespec64_sub(*end, *start);
338 	u64 rate = 0, ns;
339 
340 	/* calculate the rate */
341 	ns = timespec64_to_ns(&ts);
342 	if (ns)
343 		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
344 
345 	dev_info(&epf_test->epf->dev,
346 		 "%s => Size: %llu B, DMA: %s, Time: %ptSp s, Rate: %llu KB/s\n",
347 		 op, size, dma ? "YES" : "NO", &ts, rate);
348 }
349 
pci_epf_test_copy(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)350 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
351 			      struct pci_epf_test_reg *reg)
352 {
353 	int ret = 0;
354 	struct timespec64 start, end;
355 	struct pci_epf *epf = epf_test->epf;
356 	struct pci_epc *epc = epf->epc;
357 	struct device *dev = &epf->dev;
358 	struct pci_epc_map src_map, dst_map;
359 	u64 src_addr = le64_to_cpu(reg->src_addr);
360 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
361 	size_t orig_size, copy_size;
362 	ssize_t map_size = 0;
363 	u32 flags = le32_to_cpu(reg->flags);
364 	u32 status = 0;
365 	void *copy_buf = NULL, *buf;
366 
367 	orig_size = copy_size = le32_to_cpu(reg->size);
368 
369 	if (flags & FLAG_USE_DMA) {
370 		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
371 			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
372 			ret = -EINVAL;
373 			goto set_status;
374 		}
375 	} else {
376 		copy_buf = kzalloc(copy_size, GFP_KERNEL);
377 		if (!copy_buf) {
378 			ret = -ENOMEM;
379 			goto set_status;
380 		}
381 		buf = copy_buf;
382 	}
383 
384 	while (copy_size) {
385 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
386 				      src_addr, copy_size, &src_map);
387 		if (ret) {
388 			dev_err(dev, "Failed to map source address\n");
389 			status = STATUS_SRC_ADDR_INVALID;
390 			goto free_buf;
391 		}
392 
393 		ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
394 					   dst_addr, copy_size, &dst_map);
395 		if (ret) {
396 			dev_err(dev, "Failed to map destination address\n");
397 			status = STATUS_DST_ADDR_INVALID;
398 			pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
399 					  &src_map);
400 			goto free_buf;
401 		}
402 
403 		map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
404 
405 		ktime_get_ts64(&start);
406 		if (flags & FLAG_USE_DMA) {
407 			ret = pci_epf_test_data_transfer(epf_test,
408 					dst_map.phys_addr, src_map.phys_addr,
409 					map_size, 0, DMA_MEM_TO_MEM);
410 			if (ret) {
411 				dev_err(dev, "Data transfer failed\n");
412 				goto unmap;
413 			}
414 		} else {
415 			memcpy_fromio(buf, src_map.virt_addr, map_size);
416 			memcpy_toio(dst_map.virt_addr, buf, map_size);
417 			buf += map_size;
418 		}
419 		ktime_get_ts64(&end);
420 
421 		copy_size -= map_size;
422 		src_addr += map_size;
423 		dst_addr += map_size;
424 
425 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
426 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
427 		map_size = 0;
428 	}
429 
430 	pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
431 				flags & FLAG_USE_DMA);
432 
433 unmap:
434 	if (map_size) {
435 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
436 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
437 	}
438 
439 free_buf:
440 	kfree(copy_buf);
441 
442 set_status:
443 	if (!ret)
444 		status |= STATUS_COPY_SUCCESS;
445 	else
446 		status |= STATUS_COPY_FAIL;
447 	reg->status = cpu_to_le32(status);
448 }
449 
pci_epf_test_read(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)450 static void pci_epf_test_read(struct pci_epf_test *epf_test,
451 			      struct pci_epf_test_reg *reg)
452 {
453 	int ret = 0;
454 	void *src_buf, *buf;
455 	u32 crc32;
456 	struct pci_epc_map map;
457 	phys_addr_t dst_phys_addr;
458 	struct timespec64 start, end;
459 	struct pci_epf *epf = epf_test->epf;
460 	struct pci_epc *epc = epf->epc;
461 	struct device *dev = &epf->dev;
462 	struct device *dma_dev = epf->epc->dev.parent;
463 	u64 src_addr = le64_to_cpu(reg->src_addr);
464 	size_t orig_size, src_size;
465 	ssize_t map_size = 0;
466 	u32 flags = le32_to_cpu(reg->flags);
467 	u32 checksum = le32_to_cpu(reg->checksum);
468 	u32 status = 0;
469 
470 	orig_size = src_size = le32_to_cpu(reg->size);
471 
472 	src_buf = kzalloc(src_size, GFP_KERNEL);
473 	if (!src_buf) {
474 		ret = -ENOMEM;
475 		goto set_status;
476 	}
477 	buf = src_buf;
478 
479 	while (src_size) {
480 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
481 					   src_addr, src_size, &map);
482 		if (ret) {
483 			dev_err(dev, "Failed to map address\n");
484 			status = STATUS_SRC_ADDR_INVALID;
485 			goto free_buf;
486 		}
487 
488 		map_size = map.pci_size;
489 		if (flags & FLAG_USE_DMA) {
490 			dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
491 						       DMA_FROM_DEVICE);
492 			if (dma_mapping_error(dma_dev, dst_phys_addr)) {
493 				dev_err(dev,
494 					"Failed to map destination buffer addr\n");
495 				ret = -ENOMEM;
496 				goto unmap;
497 			}
498 
499 			ktime_get_ts64(&start);
500 			ret = pci_epf_test_data_transfer(epf_test,
501 					dst_phys_addr, map.phys_addr,
502 					map_size, src_addr, DMA_DEV_TO_MEM);
503 			if (ret)
504 				dev_err(dev, "Data transfer failed\n");
505 			ktime_get_ts64(&end);
506 
507 			dma_unmap_single(dma_dev, dst_phys_addr, map_size,
508 					 DMA_FROM_DEVICE);
509 
510 			if (ret)
511 				goto unmap;
512 		} else {
513 			ktime_get_ts64(&start);
514 			memcpy_fromio(buf, map.virt_addr, map_size);
515 			ktime_get_ts64(&end);
516 		}
517 
518 		src_size -= map_size;
519 		src_addr += map_size;
520 		buf += map_size;
521 
522 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
523 		map_size = 0;
524 	}
525 
526 	pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
527 				flags & FLAG_USE_DMA);
528 
529 	crc32 = crc32_le(~0, src_buf, orig_size);
530 	if (crc32 != checksum)
531 		ret = -EIO;
532 
533 unmap:
534 	if (map_size)
535 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
536 
537 free_buf:
538 	kfree(src_buf);
539 
540 set_status:
541 	if (!ret)
542 		status |= STATUS_READ_SUCCESS;
543 	else
544 		status |= STATUS_READ_FAIL;
545 	reg->status = cpu_to_le32(status);
546 }
547 
pci_epf_test_write(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)548 static void pci_epf_test_write(struct pci_epf_test *epf_test,
549 			       struct pci_epf_test_reg *reg)
550 {
551 	int ret = 0;
552 	void *dst_buf, *buf;
553 	struct pci_epc_map map;
554 	phys_addr_t src_phys_addr;
555 	struct timespec64 start, end;
556 	struct pci_epf *epf = epf_test->epf;
557 	struct pci_epc *epc = epf->epc;
558 	struct device *dev = &epf->dev;
559 	struct device *dma_dev = epf->epc->dev.parent;
560 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
561 	size_t orig_size, dst_size;
562 	ssize_t map_size = 0;
563 	u32 flags = le32_to_cpu(reg->flags);
564 	u32 status = 0;
565 
566 	orig_size = dst_size = le32_to_cpu(reg->size);
567 
568 	dst_buf = kzalloc(dst_size, GFP_KERNEL);
569 	if (!dst_buf) {
570 		ret = -ENOMEM;
571 		goto set_status;
572 	}
573 	get_random_bytes(dst_buf, dst_size);
574 	reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
575 	buf = dst_buf;
576 
577 	while (dst_size) {
578 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
579 					   dst_addr, dst_size, &map);
580 		if (ret) {
581 			dev_err(dev, "Failed to map address\n");
582 			status = STATUS_DST_ADDR_INVALID;
583 			goto free_buf;
584 		}
585 
586 		map_size = map.pci_size;
587 		if (flags & FLAG_USE_DMA) {
588 			src_phys_addr = dma_map_single(dma_dev, buf, map_size,
589 						       DMA_TO_DEVICE);
590 			if (dma_mapping_error(dma_dev, src_phys_addr)) {
591 				dev_err(dev,
592 					"Failed to map source buffer addr\n");
593 				ret = -ENOMEM;
594 				goto unmap;
595 			}
596 
597 			ktime_get_ts64(&start);
598 
599 			ret = pci_epf_test_data_transfer(epf_test,
600 						map.phys_addr, src_phys_addr,
601 						map_size, dst_addr,
602 						DMA_MEM_TO_DEV);
603 			if (ret)
604 				dev_err(dev, "Data transfer failed\n");
605 			ktime_get_ts64(&end);
606 
607 			dma_unmap_single(dma_dev, src_phys_addr, map_size,
608 					 DMA_TO_DEVICE);
609 
610 			if (ret)
611 				goto unmap;
612 		} else {
613 			ktime_get_ts64(&start);
614 			memcpy_toio(map.virt_addr, buf, map_size);
615 			ktime_get_ts64(&end);
616 		}
617 
618 		dst_size -= map_size;
619 		dst_addr += map_size;
620 		buf += map_size;
621 
622 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
623 		map_size = 0;
624 	}
625 
626 	pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
627 				flags & FLAG_USE_DMA);
628 
629 	/*
630 	 * wait 1ms inorder for the write to complete. Without this delay L3
631 	 * error in observed in the host system.
632 	 */
633 	usleep_range(1000, 2000);
634 
635 unmap:
636 	if (map_size)
637 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
638 
639 free_buf:
640 	kfree(dst_buf);
641 
642 set_status:
643 	if (!ret)
644 		status |= STATUS_WRITE_SUCCESS;
645 	else
646 		status |= STATUS_WRITE_FAIL;
647 	reg->status = cpu_to_le32(status);
648 }
649 
pci_epf_test_raise_irq(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)650 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
651 				   struct pci_epf_test_reg *reg)
652 {
653 	struct pci_epf *epf = epf_test->epf;
654 	struct device *dev = &epf->dev;
655 	struct pci_epc *epc = epf->epc;
656 	u32 status = le32_to_cpu(reg->status);
657 	u32 irq_number = le32_to_cpu(reg->irq_number);
658 	u32 irq_type = le32_to_cpu(reg->irq_type);
659 	int count;
660 
661 	/*
662 	 * Set the status before raising the IRQ to ensure that the host sees
663 	 * the updated value when it gets the IRQ.
664 	 */
665 	status |= STATUS_IRQ_RAISED;
666 	WRITE_ONCE(reg->status, cpu_to_le32(status));
667 
668 	switch (irq_type) {
669 	case IRQ_TYPE_INTX:
670 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
671 				  PCI_IRQ_INTX, 0);
672 		break;
673 	case IRQ_TYPE_MSI:
674 		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
675 		if (irq_number > count || count <= 0) {
676 			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
677 				irq_number, count);
678 			return;
679 		}
680 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
681 				  PCI_IRQ_MSI, irq_number);
682 		break;
683 	case IRQ_TYPE_MSIX:
684 		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
685 		if (irq_number > count || count <= 0) {
686 			dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
687 				irq_number, count);
688 			return;
689 		}
690 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
691 				  PCI_IRQ_MSIX, irq_number);
692 		break;
693 	default:
694 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
695 		break;
696 	}
697 }
698 
pci_epf_test_doorbell_handler(int irq,void * data)699 static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data)
700 {
701 	struct pci_epf_test *epf_test = data;
702 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
703 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
704 	u32 status = le32_to_cpu(reg->status);
705 
706 	status |= STATUS_DOORBELL_SUCCESS;
707 	reg->status = cpu_to_le32(status);
708 	pci_epf_test_raise_irq(epf_test, reg);
709 
710 	return IRQ_HANDLED;
711 }
712 
pci_epf_test_doorbell_cleanup(struct pci_epf_test * epf_test)713 static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
714 {
715 	struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
716 	struct pci_epf *epf = epf_test->epf;
717 
718 	free_irq(epf->db_msg[0].virq, epf_test);
719 	reg->doorbell_bar = cpu_to_le32(NO_BAR);
720 
721 	pci_epf_free_doorbell(epf);
722 }
723 
pci_epf_test_enable_doorbell(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)724 static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
725 					 struct pci_epf_test_reg *reg)
726 {
727 	u32 status = le32_to_cpu(reg->status);
728 	struct pci_epf *epf = epf_test->epf;
729 	struct pci_epc *epc = epf->epc;
730 	struct msi_msg *msg;
731 	enum pci_barno bar;
732 	size_t offset;
733 	int ret;
734 
735 	ret = pci_epf_alloc_doorbell(epf, 1);
736 	if (ret)
737 		goto set_status_err;
738 
739 	msg = &epf->db_msg[0].msg;
740 	bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
741 	if (bar < BAR_0)
742 		goto err_doorbell_cleanup;
743 
744 	ret = request_threaded_irq(epf->db_msg[0].virq, NULL,
745 				   pci_epf_test_doorbell_handler, IRQF_ONESHOT,
746 				   "pci-ep-test-doorbell", epf_test);
747 	if (ret) {
748 		dev_err(&epf->dev,
749 			"Failed to request doorbell IRQ: %d\n",
750 			epf->db_msg[0].virq);
751 		goto err_doorbell_cleanup;
752 	}
753 
754 	reg->doorbell_data = cpu_to_le32(msg->data);
755 	reg->doorbell_bar = cpu_to_le32(bar);
756 
757 	msg = &epf->db_msg[0].msg;
758 	ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
759 					 &epf_test->db_bar.phys_addr, &offset);
760 
761 	if (ret)
762 		goto err_doorbell_cleanup;
763 
764 	reg->doorbell_offset = cpu_to_le32(offset);
765 
766 	epf_test->db_bar.barno = bar;
767 	epf_test->db_bar.size = epf->bar[bar].size;
768 	epf_test->db_bar.flags = epf->bar[bar].flags;
769 
770 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
771 	if (ret)
772 		goto err_doorbell_cleanup;
773 
774 	status |= STATUS_DOORBELL_ENABLE_SUCCESS;
775 	reg->status = cpu_to_le32(status);
776 	return;
777 
778 err_doorbell_cleanup:
779 	pci_epf_test_doorbell_cleanup(epf_test);
780 set_status_err:
781 	status |= STATUS_DOORBELL_ENABLE_FAIL;
782 	reg->status = cpu_to_le32(status);
783 }
784 
pci_epf_test_disable_doorbell(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)785 static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
786 					  struct pci_epf_test_reg *reg)
787 {
788 	enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
789 	u32 status = le32_to_cpu(reg->status);
790 	struct pci_epf *epf = epf_test->epf;
791 	struct pci_epc *epc = epf->epc;
792 	int ret;
793 
794 	if (bar < BAR_0)
795 		goto set_status_err;
796 
797 	pci_epf_test_doorbell_cleanup(epf_test);
798 
799 	/*
800 	 * The doorbell feature temporarily overrides the inbound translation
801 	 * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
802 	 * it calls set_bar() twice without ever calling clear_bar(), as
803 	 * calling clear_bar() would clear the BAR's PCI address assigned by
804 	 * the host. Thus, when disabling the doorbell, restore the inbound
805 	 * translation to point to the memory allocated for the BAR.
806 	 */
807 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
808 	if (ret)
809 		goto set_status_err;
810 
811 	status |= STATUS_DOORBELL_DISABLE_SUCCESS;
812 	reg->status = cpu_to_le32(status);
813 
814 	return;
815 
816 set_status_err:
817 	status |= STATUS_DOORBELL_DISABLE_FAIL;
818 	reg->status = cpu_to_le32(status);
819 }
820 
pci_epf_test_subrange_sig_byte(enum pci_barno barno,unsigned int subno)821 static u8 pci_epf_test_subrange_sig_byte(enum pci_barno barno,
822 					 unsigned int subno)
823 {
824 	return 0x50 + (barno * 8) + subno;
825 }
826 
pci_epf_test_bar_subrange_setup(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)827 static void pci_epf_test_bar_subrange_setup(struct pci_epf_test *epf_test,
828 					    struct pci_epf_test_reg *reg)
829 {
830 	struct pci_epf_bar_submap *submap, *old_submap;
831 	struct pci_epf *epf = epf_test->epf;
832 	struct pci_epc *epc = epf->epc;
833 	struct pci_epf_bar *bar;
834 	unsigned int nsub = PCI_EPF_TEST_BAR_SUBRANGE_NSUB, old_nsub;
835 	/* reg->size carries BAR number for BAR_SUBRANGE_* commands. */
836 	enum pci_barno barno = le32_to_cpu(reg->size);
837 	u32 status = le32_to_cpu(reg->status);
838 	unsigned int i, phys_idx;
839 	size_t sub_size;
840 	u8 *addr;
841 	int ret;
842 
843 	if (barno >= PCI_STD_NUM_BARS) {
844 		dev_err(&epf->dev, "Invalid barno: %d\n", barno);
845 		goto err;
846 	}
847 
848 	/* Host side should've avoided test_reg_bar, this is a safeguard. */
849 	if (barno == epf_test->test_reg_bar) {
850 		dev_err(&epf->dev, "test_reg_bar cannot be used for subrange test\n");
851 		goto err;
852 	}
853 
854 	if (!epf_test->epc_features->dynamic_inbound_mapping ||
855 	    !epf_test->epc_features->subrange_mapping) {
856 		dev_err(&epf->dev, "epc driver does not support subrange mapping\n");
857 		goto err;
858 	}
859 
860 	bar = &epf->bar[barno];
861 	if (!bar->size || !bar->addr) {
862 		dev_err(&epf->dev, "bar size/addr (%zu/%p) is invalid\n",
863 			bar->size, bar->addr);
864 		goto err;
865 	}
866 
867 	if (bar->size % nsub) {
868 		dev_err(&epf->dev, "BAR size %zu is not divisible by %u\n",
869 			bar->size, nsub);
870 		goto err;
871 	}
872 
873 	sub_size = bar->size / nsub;
874 
875 	submap = kzalloc_objs(*submap, nsub);
876 	if (!submap)
877 		goto err;
878 
879 	for (i = 0; i < nsub; i++) {
880 		/* Swap the two halves so RC can verify ordering. */
881 		phys_idx = i ^ 1;
882 		submap[i].phys_addr = bar->phys_addr + (phys_idx * sub_size);
883 		submap[i].size = sub_size;
884 	}
885 
886 	old_submap = bar->submap;
887 	old_nsub = bar->num_submap;
888 
889 	bar->submap = submap;
890 	bar->num_submap = nsub;
891 
892 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
893 	if (ret) {
894 		dev_err(&epf->dev, "pci_epc_set_bar() failed: %d\n", ret);
895 		bar->submap = old_submap;
896 		bar->num_submap = old_nsub;
897 		kfree(submap);
898 		goto err;
899 	}
900 	kfree(old_submap);
901 
902 	/*
903 	 * Fill deterministic signatures into the physical regions that
904 	 * each BAR subrange maps to. RC verifies these to ensure the
905 	 * submap order is really applied.
906 	 */
907 	addr = (u8 *)bar->addr;
908 	for (i = 0; i < nsub; i++) {
909 		phys_idx = i ^ 1;
910 		memset(addr + (phys_idx * sub_size),
911 		       pci_epf_test_subrange_sig_byte(barno, i),
912 		       sub_size);
913 	}
914 
915 	status |= STATUS_BAR_SUBRANGE_SETUP_SUCCESS;
916 	reg->status = cpu_to_le32(status);
917 	return;
918 
919 err:
920 	status |= STATUS_BAR_SUBRANGE_SETUP_FAIL;
921 	reg->status = cpu_to_le32(status);
922 }
923 
pci_epf_test_bar_subrange_clear(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)924 static void pci_epf_test_bar_subrange_clear(struct pci_epf_test *epf_test,
925 					    struct pci_epf_test_reg *reg)
926 {
927 	struct pci_epf *epf = epf_test->epf;
928 	struct pci_epf_bar_submap *submap;
929 	struct pci_epc *epc = epf->epc;
930 	/* reg->size carries BAR number for BAR_SUBRANGE_* commands. */
931 	enum pci_barno barno = le32_to_cpu(reg->size);
932 	u32 status = le32_to_cpu(reg->status);
933 	struct pci_epf_bar *bar;
934 	unsigned int nsub;
935 	int ret;
936 
937 	if (barno >= PCI_STD_NUM_BARS) {
938 		dev_err(&epf->dev, "Invalid barno: %d\n", barno);
939 		goto err;
940 	}
941 
942 	bar = &epf->bar[barno];
943 	submap = bar->submap;
944 	nsub = bar->num_submap;
945 
946 	if (!submap || !nsub)
947 		goto err;
948 
949 	bar->submap = NULL;
950 	bar->num_submap = 0;
951 
952 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
953 	if (ret) {
954 		bar->submap = submap;
955 		bar->num_submap = nsub;
956 		dev_err(&epf->dev, "pci_epc_set_bar() failed: %d\n", ret);
957 		goto err;
958 	}
959 	kfree(submap);
960 
961 	status |= STATUS_BAR_SUBRANGE_CLEAR_SUCCESS;
962 	reg->status = cpu_to_le32(status);
963 	return;
964 
965 err:
966 	status |= STATUS_BAR_SUBRANGE_CLEAR_FAIL;
967 	reg->status = cpu_to_le32(status);
968 }
969 
pci_epf_test_cmd_handler(struct work_struct * work)970 static void pci_epf_test_cmd_handler(struct work_struct *work)
971 {
972 	u32 command;
973 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
974 						     cmd_handler.work);
975 	struct pci_epf *epf = epf_test->epf;
976 	struct device *dev = &epf->dev;
977 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
978 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
979 	u32 irq_type = le32_to_cpu(reg->irq_type);
980 
981 	command = le32_to_cpu(READ_ONCE(reg->command));
982 	if (!command)
983 		goto reset_handler;
984 
985 	WRITE_ONCE(reg->command, 0);
986 	WRITE_ONCE(reg->status, 0);
987 
988 	if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
989 	    !epf_test->dma_supported) {
990 		dev_err(dev, "Cannot transfer data using DMA\n");
991 		goto reset_handler;
992 	}
993 
994 	if (irq_type > IRQ_TYPE_MSIX) {
995 		dev_err(dev, "Failed to detect IRQ type\n");
996 		goto reset_handler;
997 	}
998 
999 	switch (command) {
1000 	case COMMAND_RAISE_INTX_IRQ:
1001 	case COMMAND_RAISE_MSI_IRQ:
1002 	case COMMAND_RAISE_MSIX_IRQ:
1003 		pci_epf_test_raise_irq(epf_test, reg);
1004 		break;
1005 	case COMMAND_WRITE:
1006 		pci_epf_test_write(epf_test, reg);
1007 		pci_epf_test_raise_irq(epf_test, reg);
1008 		break;
1009 	case COMMAND_READ:
1010 		pci_epf_test_read(epf_test, reg);
1011 		pci_epf_test_raise_irq(epf_test, reg);
1012 		break;
1013 	case COMMAND_COPY:
1014 		pci_epf_test_copy(epf_test, reg);
1015 		pci_epf_test_raise_irq(epf_test, reg);
1016 		break;
1017 	case COMMAND_ENABLE_DOORBELL:
1018 		pci_epf_test_enable_doorbell(epf_test, reg);
1019 		pci_epf_test_raise_irq(epf_test, reg);
1020 		break;
1021 	case COMMAND_DISABLE_DOORBELL:
1022 		pci_epf_test_disable_doorbell(epf_test, reg);
1023 		pci_epf_test_raise_irq(epf_test, reg);
1024 		break;
1025 	case COMMAND_BAR_SUBRANGE_SETUP:
1026 		pci_epf_test_bar_subrange_setup(epf_test, reg);
1027 		pci_epf_test_raise_irq(epf_test, reg);
1028 		break;
1029 	case COMMAND_BAR_SUBRANGE_CLEAR:
1030 		pci_epf_test_bar_subrange_clear(epf_test, reg);
1031 		pci_epf_test_raise_irq(epf_test, reg);
1032 		break;
1033 	default:
1034 		dev_err(dev, "Invalid command 0x%x\n", command);
1035 		break;
1036 	}
1037 
1038 reset_handler:
1039 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
1040 			   msecs_to_jiffies(1));
1041 }
1042 
pci_epf_test_set_bar(struct pci_epf * epf)1043 static int pci_epf_test_set_bar(struct pci_epf *epf)
1044 {
1045 	int bar, ret;
1046 	struct pci_epc *epc = epf->epc;
1047 	struct device *dev = &epf->dev;
1048 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1049 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1050 
1051 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1052 		if (!epf_test->reg[bar])
1053 			continue;
1054 
1055 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
1056 				      &epf->bar[bar]);
1057 		if (ret) {
1058 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
1059 					   PRIMARY_INTERFACE);
1060 			epf_test->reg[bar] = NULL;
1061 			dev_err(dev, "Failed to set BAR%d\n", bar);
1062 			if (bar == test_reg_bar)
1063 				return ret;
1064 		}
1065 	}
1066 
1067 	return 0;
1068 }
1069 
pci_epf_test_clear_bar(struct pci_epf * epf)1070 static void pci_epf_test_clear_bar(struct pci_epf *epf)
1071 {
1072 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1073 	struct pci_epc *epc = epf->epc;
1074 	int bar;
1075 
1076 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1077 		if (!epf_test->reg[bar])
1078 			continue;
1079 
1080 		pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
1081 				  &epf->bar[bar]);
1082 	}
1083 }
1084 
pci_epf_test_set_capabilities(struct pci_epf * epf)1085 static void pci_epf_test_set_capabilities(struct pci_epf *epf)
1086 {
1087 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1088 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1089 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
1090 	struct pci_epc *epc = epf->epc;
1091 	u32 caps = 0;
1092 
1093 	if (epc->ops->align_addr)
1094 		caps |= CAP_UNALIGNED_ACCESS;
1095 
1096 	if (epf_test->epc_features->msi_capable)
1097 		caps |= CAP_MSI;
1098 
1099 	if (epf_test->epc_features->msix_capable)
1100 		caps |= CAP_MSIX;
1101 
1102 	if (epf_test->epc_features->intx_capable)
1103 		caps |= CAP_INTX;
1104 
1105 	if (epf_test->epc_features->dynamic_inbound_mapping &&
1106 	    epf_test->epc_features->subrange_mapping)
1107 		caps |= CAP_SUBRANGE_MAPPING;
1108 
1109 	reg->caps = cpu_to_le32(caps);
1110 }
1111 
pci_epf_test_epc_init(struct pci_epf * epf)1112 static int pci_epf_test_epc_init(struct pci_epf *epf)
1113 {
1114 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1115 	struct pci_epf_header *header = epf->header;
1116 	const struct pci_epc_features *epc_features = epf_test->epc_features;
1117 	struct pci_epc *epc = epf->epc;
1118 	struct device *dev = &epf->dev;
1119 	bool linkup_notifier = false;
1120 	int ret;
1121 
1122 	epf_test->dma_supported = true;
1123 
1124 	ret = pci_epf_test_init_dma_chan(epf_test);
1125 	if (ret)
1126 		epf_test->dma_supported = false;
1127 
1128 	if (epf->vfunc_no <= 1) {
1129 		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
1130 		if (ret) {
1131 			dev_err(dev, "Configuration header write failed\n");
1132 			return ret;
1133 		}
1134 	}
1135 
1136 	pci_epf_test_set_capabilities(epf);
1137 
1138 	ret = pci_epf_test_set_bar(epf);
1139 	if (ret)
1140 		return ret;
1141 
1142 	if (epc_features->msi_capable) {
1143 		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
1144 				      epf->msi_interrupts);
1145 		if (ret) {
1146 			dev_err(dev, "MSI configuration failed\n");
1147 			return ret;
1148 		}
1149 	}
1150 
1151 	if (epc_features->msix_capable) {
1152 		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
1153 				       epf->msix_interrupts,
1154 				       epf_test->test_reg_bar,
1155 				       epf_test->msix_table_offset);
1156 		if (ret) {
1157 			dev_err(dev, "MSI-X configuration failed\n");
1158 			return ret;
1159 		}
1160 	}
1161 
1162 	linkup_notifier = epc_features->linkup_notifier;
1163 	if (!linkup_notifier)
1164 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
1165 
1166 	return 0;
1167 }
1168 
pci_epf_test_epc_deinit(struct pci_epf * epf)1169 static void pci_epf_test_epc_deinit(struct pci_epf *epf)
1170 {
1171 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1172 
1173 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1174 	pci_epf_test_clean_dma_chan(epf_test);
1175 	pci_epf_test_clear_bar(epf);
1176 }
1177 
pci_epf_test_link_up(struct pci_epf * epf)1178 static int pci_epf_test_link_up(struct pci_epf *epf)
1179 {
1180 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1181 
1182 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
1183 			   msecs_to_jiffies(1));
1184 
1185 	return 0;
1186 }
1187 
pci_epf_test_link_down(struct pci_epf * epf)1188 static int pci_epf_test_link_down(struct pci_epf *epf)
1189 {
1190 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1191 
1192 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1193 
1194 	return 0;
1195 }
1196 
1197 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
1198 	.epc_init = pci_epf_test_epc_init,
1199 	.epc_deinit = pci_epf_test_epc_deinit,
1200 	.link_up = pci_epf_test_link_up,
1201 	.link_down = pci_epf_test_link_down,
1202 };
1203 
pci_epf_test_alloc_space(struct pci_epf * epf)1204 static int pci_epf_test_alloc_space(struct pci_epf *epf)
1205 {
1206 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1207 	struct device *dev = &epf->dev;
1208 	size_t msix_table_size = 0;
1209 	size_t test_reg_bar_size;
1210 	size_t pba_size = 0;
1211 	void *base;
1212 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1213 	enum pci_barno bar;
1214 	const struct pci_epc_features *epc_features = epf_test->epc_features;
1215 	size_t test_reg_size;
1216 
1217 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
1218 
1219 	if (epc_features->msix_capable) {
1220 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
1221 		epf_test->msix_table_offset = test_reg_bar_size;
1222 		/* Align to QWORD or 8 Bytes */
1223 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
1224 	}
1225 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
1226 
1227 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
1228 				   epc_features, PRIMARY_INTERFACE);
1229 	if (!base) {
1230 		dev_err(dev, "Failed to allocated register space\n");
1231 		return -ENOMEM;
1232 	}
1233 	epf_test->reg[test_reg_bar] = base;
1234 
1235 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
1236 		bar = pci_epc_get_next_free_bar(epc_features, bar);
1237 		if (bar == NO_BAR)
1238 			break;
1239 
1240 		if (bar == test_reg_bar)
1241 			continue;
1242 
1243 		if (epc_features->bar[bar].type == BAR_FIXED)
1244 			test_reg_size = epc_features->bar[bar].fixed_size;
1245 		else
1246 			test_reg_size = epf_test->bar_size[bar];
1247 
1248 		base = pci_epf_alloc_space(epf, test_reg_size, bar,
1249 					   epc_features, PRIMARY_INTERFACE);
1250 		if (!base)
1251 			dev_err(dev, "Failed to allocate space for BAR%d\n",
1252 				bar);
1253 		epf_test->reg[bar] = base;
1254 	}
1255 
1256 	return 0;
1257 }
1258 
pci_epf_test_free_space(struct pci_epf * epf)1259 static void pci_epf_test_free_space(struct pci_epf *epf)
1260 {
1261 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1262 	int bar;
1263 
1264 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1265 		if (!epf_test->reg[bar])
1266 			continue;
1267 
1268 		pci_epf_free_space(epf, epf_test->reg[bar], bar,
1269 				   PRIMARY_INTERFACE);
1270 		epf_test->reg[bar] = NULL;
1271 	}
1272 }
1273 
pci_epf_test_bind(struct pci_epf * epf)1274 static int pci_epf_test_bind(struct pci_epf *epf)
1275 {
1276 	int ret;
1277 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1278 	const struct pci_epc_features *epc_features;
1279 	enum pci_barno test_reg_bar = BAR_0;
1280 	struct pci_epc *epc = epf->epc;
1281 
1282 	if (WARN_ON_ONCE(!epc))
1283 		return -EINVAL;
1284 
1285 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
1286 	if (!epc_features) {
1287 		dev_err(&epf->dev, "epc_features not implemented\n");
1288 		return -EOPNOTSUPP;
1289 	}
1290 
1291 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
1292 	if (test_reg_bar < 0)
1293 		return -EINVAL;
1294 
1295 	epf_test->test_reg_bar = test_reg_bar;
1296 	epf_test->epc_features = epc_features;
1297 
1298 	ret = pci_epf_test_alloc_space(epf);
1299 	if (ret)
1300 		return ret;
1301 
1302 	return 0;
1303 }
1304 
pci_epf_test_unbind(struct pci_epf * epf)1305 static void pci_epf_test_unbind(struct pci_epf *epf)
1306 {
1307 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1308 	struct pci_epc *epc = epf->epc;
1309 
1310 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1311 	if (epc->init_complete) {
1312 		pci_epf_test_clean_dma_chan(epf_test);
1313 		pci_epf_test_clear_bar(epf);
1314 	}
1315 	pci_epf_test_free_space(epf);
1316 }
1317 
1318 #define PCI_EPF_TEST_BAR_SIZE_R(_name, _id)				\
1319 static ssize_t pci_epf_test_##_name##_show(struct config_item *item,	\
1320 					   char *page)			\
1321 {									\
1322 	struct config_group *group = to_config_group(item);		\
1323 	struct pci_epf_test *epf_test =					\
1324 		container_of(group, struct pci_epf_test, group);	\
1325 									\
1326 	return sysfs_emit(page, "%zu\n", epf_test->bar_size[_id]);	\
1327 }
1328 
1329 #define PCI_EPF_TEST_BAR_SIZE_W(_name, _id)				\
1330 static ssize_t pci_epf_test_##_name##_store(struct config_item *item,	\
1331 					    const char *page,		\
1332 					    size_t len)			\
1333 {									\
1334 	struct config_group *group = to_config_group(item);		\
1335 	struct pci_epf_test *epf_test =					\
1336 		container_of(group, struct pci_epf_test, group);	\
1337 	int val, ret;							\
1338 									\
1339 	/*								\
1340 	 * BAR sizes can only be modified before binding to an EPC,	\
1341 	 * because pci_epf_test_alloc_space() is called in .bind().	\
1342 	 */								\
1343 	if (epf_test->epf->epc)						\
1344 		return -EOPNOTSUPP;					\
1345 									\
1346 	ret = kstrtouint(page, 0, &val);				\
1347 	if (ret)							\
1348 		return ret;						\
1349 									\
1350 	if (!is_power_of_2(val))					\
1351 		return -EINVAL;						\
1352 									\
1353 	epf_test->bar_size[_id] = val;					\
1354 									\
1355 	return len;							\
1356 }
1357 
1358 PCI_EPF_TEST_BAR_SIZE_R(bar0_size, BAR_0)
1359 PCI_EPF_TEST_BAR_SIZE_W(bar0_size, BAR_0)
1360 PCI_EPF_TEST_BAR_SIZE_R(bar1_size, BAR_1)
1361 PCI_EPF_TEST_BAR_SIZE_W(bar1_size, BAR_1)
1362 PCI_EPF_TEST_BAR_SIZE_R(bar2_size, BAR_2)
1363 PCI_EPF_TEST_BAR_SIZE_W(bar2_size, BAR_2)
1364 PCI_EPF_TEST_BAR_SIZE_R(bar3_size, BAR_3)
1365 PCI_EPF_TEST_BAR_SIZE_W(bar3_size, BAR_3)
1366 PCI_EPF_TEST_BAR_SIZE_R(bar4_size, BAR_4)
1367 PCI_EPF_TEST_BAR_SIZE_W(bar4_size, BAR_4)
1368 PCI_EPF_TEST_BAR_SIZE_R(bar5_size, BAR_5)
1369 PCI_EPF_TEST_BAR_SIZE_W(bar5_size, BAR_5)
1370 
1371 CONFIGFS_ATTR(pci_epf_test_, bar0_size);
1372 CONFIGFS_ATTR(pci_epf_test_, bar1_size);
1373 CONFIGFS_ATTR(pci_epf_test_, bar2_size);
1374 CONFIGFS_ATTR(pci_epf_test_, bar3_size);
1375 CONFIGFS_ATTR(pci_epf_test_, bar4_size);
1376 CONFIGFS_ATTR(pci_epf_test_, bar5_size);
1377 
1378 static struct configfs_attribute *pci_epf_test_attrs[] = {
1379 	&pci_epf_test_attr_bar0_size,
1380 	&pci_epf_test_attr_bar1_size,
1381 	&pci_epf_test_attr_bar2_size,
1382 	&pci_epf_test_attr_bar3_size,
1383 	&pci_epf_test_attr_bar4_size,
1384 	&pci_epf_test_attr_bar5_size,
1385 	NULL,
1386 };
1387 
1388 static const struct config_item_type pci_epf_test_group_type = {
1389 	.ct_attrs	= pci_epf_test_attrs,
1390 	.ct_owner	= THIS_MODULE,
1391 };
1392 
pci_epf_test_add_cfs(struct pci_epf * epf,struct config_group * group)1393 static struct config_group *pci_epf_test_add_cfs(struct pci_epf *epf,
1394 						 struct config_group *group)
1395 {
1396 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1397 	struct config_group *epf_group = &epf_test->group;
1398 	struct device *dev = &epf->dev;
1399 
1400 	config_group_init_type_name(epf_group, dev_name(dev),
1401 				    &pci_epf_test_group_type);
1402 
1403 	return epf_group;
1404 }
1405 
1406 static const struct pci_epf_device_id pci_epf_test_ids[] = {
1407 	{
1408 		.name = "pci_epf_test",
1409 	},
1410 	{},
1411 };
1412 
pci_epf_test_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)1413 static int pci_epf_test_probe(struct pci_epf *epf,
1414 			      const struct pci_epf_device_id *id)
1415 {
1416 	struct pci_epf_test *epf_test;
1417 	struct device *dev = &epf->dev;
1418 	enum pci_barno bar;
1419 
1420 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
1421 	if (!epf_test)
1422 		return -ENOMEM;
1423 
1424 	epf->header = &test_header;
1425 	epf_test->epf = epf;
1426 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++)
1427 		epf_test->bar_size[bar] = default_bar_size[bar];
1428 
1429 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1430 
1431 	epf->event_ops = &pci_epf_test_event_ops;
1432 
1433 	epf_set_drvdata(epf, epf_test);
1434 	return 0;
1435 }
1436 
1437 static const struct pci_epf_ops ops = {
1438 	.unbind	= pci_epf_test_unbind,
1439 	.bind	= pci_epf_test_bind,
1440 	.add_cfs = pci_epf_test_add_cfs,
1441 };
1442 
1443 static struct pci_epf_driver test_driver = {
1444 	.driver.name	= "pci_epf_test",
1445 	.probe		= pci_epf_test_probe,
1446 	.id_table	= pci_epf_test_ids,
1447 	.ops		= &ops,
1448 	.owner		= THIS_MODULE,
1449 };
1450 
pci_epf_test_init(void)1451 static int __init pci_epf_test_init(void)
1452 {
1453 	int ret;
1454 
1455 	kpcitest_workqueue = alloc_workqueue("kpcitest",
1456 				    WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU, 0);
1457 	if (!kpcitest_workqueue) {
1458 		pr_err("Failed to allocate the kpcitest work queue\n");
1459 		return -ENOMEM;
1460 	}
1461 
1462 	ret = pci_epf_register_driver(&test_driver);
1463 	if (ret) {
1464 		destroy_workqueue(kpcitest_workqueue);
1465 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1466 		return ret;
1467 	}
1468 
1469 	return 0;
1470 }
1471 module_init(pci_epf_test_init);
1472 
pci_epf_test_exit(void)1473 static void __exit pci_epf_test_exit(void)
1474 {
1475 	if (kpcitest_workqueue)
1476 		destroy_workqueue(kpcitest_workqueue);
1477 	pci_epf_unregister_driver(&test_driver);
1478 }
1479 module_exit(pci_epf_test_exit);
1480 
1481 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1482 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1483 MODULE_LICENSE("GPL v2");
1484