xref: /linux/drivers/pci/endpoint/functions/pci-epf-test.c (revision 40286d6379aacfcc053253ef78dc78b09addffda)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/msi.h>
15 #include <linux/slab.h>
16 #include <linux/pci_ids.h>
17 #include <linux/random.h>
18 
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
21 #include <linux/pci-ep-msi.h>
22 #include <linux/pci_regs.h>
23 
24 #define IRQ_TYPE_INTX			0
25 #define IRQ_TYPE_MSI			1
26 #define IRQ_TYPE_MSIX			2
27 
28 #define COMMAND_RAISE_INTX_IRQ		BIT(0)
29 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
30 #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
31 #define COMMAND_READ			BIT(3)
32 #define COMMAND_WRITE			BIT(4)
33 #define COMMAND_COPY			BIT(5)
34 #define COMMAND_ENABLE_DOORBELL		BIT(6)
35 #define COMMAND_DISABLE_DOORBELL	BIT(7)
36 #define COMMAND_BAR_SUBRANGE_SETUP	BIT(8)
37 #define COMMAND_BAR_SUBRANGE_CLEAR	BIT(9)
38 
39 #define STATUS_READ_SUCCESS		BIT(0)
40 #define STATUS_READ_FAIL		BIT(1)
41 #define STATUS_WRITE_SUCCESS		BIT(2)
42 #define STATUS_WRITE_FAIL		BIT(3)
43 #define STATUS_COPY_SUCCESS		BIT(4)
44 #define STATUS_COPY_FAIL		BIT(5)
45 #define STATUS_IRQ_RAISED		BIT(6)
46 #define STATUS_SRC_ADDR_INVALID		BIT(7)
47 #define STATUS_DST_ADDR_INVALID		BIT(8)
48 #define STATUS_DOORBELL_SUCCESS		BIT(9)
49 #define STATUS_DOORBELL_ENABLE_SUCCESS	BIT(10)
50 #define STATUS_DOORBELL_ENABLE_FAIL	BIT(11)
51 #define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
52 #define STATUS_DOORBELL_DISABLE_FAIL	BIT(13)
53 #define STATUS_BAR_SUBRANGE_SETUP_SUCCESS	BIT(14)
54 #define STATUS_BAR_SUBRANGE_SETUP_FAIL		BIT(15)
55 #define STATUS_BAR_SUBRANGE_CLEAR_SUCCESS	BIT(16)
56 #define STATUS_BAR_SUBRANGE_CLEAR_FAIL		BIT(17)
57 #define STATUS_NO_RESOURCE		BIT(18)
58 
59 #define FLAG_USE_DMA			BIT(0)
60 
61 #define TIMER_RESOLUTION		1
62 
63 #define CAP_UNALIGNED_ACCESS		BIT(0)
64 #define CAP_MSI				BIT(1)
65 #define CAP_MSIX			BIT(2)
66 #define CAP_INTX			BIT(3)
67 #define CAP_SUBRANGE_MAPPING		BIT(4)
68 #define CAP_DYNAMIC_INBOUND_MAPPING	BIT(5)
69 #define CAP_BAR0_RESERVED		BIT(6)
70 #define CAP_BAR1_RESERVED		BIT(7)
71 #define CAP_BAR2_RESERVED		BIT(8)
72 #define CAP_BAR3_RESERVED		BIT(9)
73 #define CAP_BAR4_RESERVED		BIT(10)
74 #define CAP_BAR5_RESERVED		BIT(11)
75 
76 #define PCI_EPF_TEST_BAR_SUBRANGE_NSUB	2
77 
78 static struct workqueue_struct *kpcitest_workqueue;
79 
80 struct pci_epf_test {
81 	void			*reg[PCI_STD_NUM_BARS];
82 	struct pci_epf		*epf;
83 	struct config_group	group;
84 	enum pci_barno		test_reg_bar;
85 	size_t			msix_table_offset;
86 	struct delayed_work	cmd_handler;
87 	struct dma_chan		*dma_chan_tx;
88 	struct dma_chan		*dma_chan_rx;
89 	struct dma_chan		*transfer_chan;
90 	dma_cookie_t		transfer_cookie;
91 	enum dma_status		transfer_status;
92 	struct completion	transfer_complete;
93 	bool			dma_supported;
94 	bool			dma_private;
95 	const struct pci_epc_features *epc_features;
96 	struct pci_epf_bar	db_bar;
97 	size_t			bar_size[PCI_STD_NUM_BARS];
98 };
99 
100 struct pci_epf_test_reg {
101 	__le32 magic;
102 	__le32 command;
103 	__le32 status;
104 	__le64 src_addr;
105 	__le64 dst_addr;
106 	__le32 size;
107 	__le32 checksum;
108 	__le32 irq_type;
109 	__le32 irq_number;
110 	__le32 flags;
111 	__le32 caps;
112 	__le32 doorbell_bar;
113 	__le32 doorbell_offset;
114 	__le32 doorbell_data;
115 } __packed;
116 
117 static struct pci_epf_header test_header = {
118 	.vendorid	= PCI_ANY_ID,
119 	.deviceid	= PCI_ANY_ID,
120 	.baseclass_code = PCI_CLASS_OTHERS,
121 	.interrupt_pin	= PCI_INTERRUPT_INTA,
122 };
123 
124 /* default BAR sizes, can be overridden by the user using configfs */
125 static size_t default_bar_size[] = { 131072, 131072, 131072, 131072, 131072, 1048576 };
126 
127 static void pci_epf_test_dma_callback(void *param)
128 {
129 	struct pci_epf_test *epf_test = param;
130 	struct dma_tx_state state;
131 
132 	epf_test->transfer_status =
133 		dmaengine_tx_status(epf_test->transfer_chan,
134 				    epf_test->transfer_cookie, &state);
135 	if (epf_test->transfer_status == DMA_COMPLETE ||
136 	    epf_test->transfer_status == DMA_ERROR)
137 		complete(&epf_test->transfer_complete);
138 }
139 
140 /**
141  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
142  *				  data between PCIe EP and remote PCIe RC
143  * @epf_test: the EPF test device that performs the data transfer operation
144  * @dma_dst: The destination address of the data transfer. It can be a physical
145  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
146  * @dma_src: The source address of the data transfer. It can be a physical
147  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
148  * @len: The size of the data transfer
149  * @dma_remote: remote RC physical address
150  * @dir: DMA transfer direction
151  *
152  * Function that uses dmaengine API to transfer data between PCIe EP and remote
153  * PCIe RC. The source and destination address can be a physical address given
154  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
155  *
156  * The function returns '0' on success and negative value on failure.
157  */
158 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
159 				      dma_addr_t dma_dst, dma_addr_t dma_src,
160 				      size_t len, dma_addr_t dma_remote,
161 				      enum dma_transfer_direction dir)
162 {
163 	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
164 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
165 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
166 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
167 	struct pci_epf *epf = epf_test->epf;
168 	struct dma_async_tx_descriptor *tx;
169 	struct dma_slave_config sconf = {};
170 	struct device *dev = &epf->dev;
171 	int ret;
172 
173 	if (IS_ERR_OR_NULL(chan)) {
174 		dev_err(dev, "Invalid DMA memcpy channel\n");
175 		return -EINVAL;
176 	}
177 
178 	if (epf_test->dma_private) {
179 		sconf.direction = dir;
180 		if (dir == DMA_MEM_TO_DEV)
181 			sconf.dst_addr = dma_remote;
182 		else
183 			sconf.src_addr = dma_remote;
184 
185 		if (dmaengine_slave_config(chan, &sconf)) {
186 			dev_err(dev, "DMA slave config fail\n");
187 			return -EIO;
188 		}
189 		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
190 						 flags);
191 	} else {
192 		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
193 					       flags);
194 	}
195 
196 	if (!tx) {
197 		dev_err(dev, "Failed to prepare DMA memcpy\n");
198 		return -EIO;
199 	}
200 
201 	reinit_completion(&epf_test->transfer_complete);
202 	epf_test->transfer_chan = chan;
203 	tx->callback = pci_epf_test_dma_callback;
204 	tx->callback_param = epf_test;
205 	epf_test->transfer_cookie = dmaengine_submit(tx);
206 
207 	ret = dma_submit_error(epf_test->transfer_cookie);
208 	if (ret) {
209 		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
210 		goto terminate;
211 	}
212 
213 	dma_async_issue_pending(chan);
214 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
215 	if (ret < 0) {
216 		dev_err(dev, "DMA wait_for_completion interrupted\n");
217 		goto terminate;
218 	}
219 
220 	if (epf_test->transfer_status == DMA_ERROR) {
221 		dev_err(dev, "DMA transfer failed\n");
222 		ret = -EIO;
223 	}
224 
225 terminate:
226 	dmaengine_terminate_sync(chan);
227 
228 	return ret;
229 }
230 
231 struct epf_dma_filter {
232 	struct device *dev;
233 	u32 dma_mask;
234 };
235 
236 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
237 {
238 	struct epf_dma_filter *filter = node;
239 	struct dma_slave_caps caps;
240 
241 	memset(&caps, 0, sizeof(caps));
242 	dma_get_slave_caps(chan, &caps);
243 
244 	return chan->device->dev == filter->dev
245 		&& (filter->dma_mask & caps.directions);
246 }
247 
248 /**
249  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
250  * @epf_test: the EPF test device that performs data transfer operation
251  *
252  * Function to initialize EPF test DMA channel.
253  */
254 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
255 {
256 	struct pci_epf *epf = epf_test->epf;
257 	struct device *dev = &epf->dev;
258 	struct epf_dma_filter filter;
259 	struct dma_chan *dma_chan;
260 	dma_cap_mask_t mask;
261 	int ret;
262 
263 	filter.dev = epf->epc->dev.parent;
264 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
265 
266 	dma_cap_zero(mask);
267 	dma_cap_set(DMA_SLAVE, mask);
268 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
269 	if (!dma_chan) {
270 		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
271 		goto fail_back_tx;
272 	}
273 
274 	epf_test->dma_chan_rx = dma_chan;
275 
276 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
277 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
278 
279 	if (!dma_chan) {
280 		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
281 		goto fail_back_rx;
282 	}
283 
284 	epf_test->dma_chan_tx = dma_chan;
285 	epf_test->dma_private = true;
286 
287 	init_completion(&epf_test->transfer_complete);
288 
289 	return 0;
290 
291 fail_back_rx:
292 	dma_release_channel(epf_test->dma_chan_rx);
293 	epf_test->dma_chan_rx = NULL;
294 
295 fail_back_tx:
296 	dma_cap_zero(mask);
297 	dma_cap_set(DMA_MEMCPY, mask);
298 
299 	dma_chan = dma_request_chan_by_mask(&mask);
300 	if (IS_ERR(dma_chan)) {
301 		ret = PTR_ERR(dma_chan);
302 		if (ret != -EPROBE_DEFER)
303 			dev_err(dev, "Failed to get DMA channel\n");
304 		return ret;
305 	}
306 	init_completion(&epf_test->transfer_complete);
307 
308 	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
309 
310 	return 0;
311 }
312 
313 /**
314  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
315  * @epf_test: the EPF test device that performs data transfer operation
316  *
317  * Helper to cleanup EPF test DMA channel.
318  */
319 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
320 {
321 	if (!epf_test->dma_supported)
322 		return;
323 
324 	if (epf_test->dma_chan_tx) {
325 		dma_release_channel(epf_test->dma_chan_tx);
326 		if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
327 			epf_test->dma_chan_tx = NULL;
328 			epf_test->dma_chan_rx = NULL;
329 			return;
330 		}
331 		epf_test->dma_chan_tx = NULL;
332 	}
333 
334 	if (epf_test->dma_chan_rx) {
335 		dma_release_channel(epf_test->dma_chan_rx);
336 		epf_test->dma_chan_rx = NULL;
337 	}
338 }
339 
340 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
341 				    const char *op, u64 size,
342 				    struct timespec64 *start,
343 				    struct timespec64 *end, bool dma)
344 {
345 	struct timespec64 ts = timespec64_sub(*end, *start);
346 	u64 rate = 0, ns;
347 
348 	/* calculate the rate */
349 	ns = timespec64_to_ns(&ts);
350 	if (ns)
351 		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
352 
353 	dev_info(&epf_test->epf->dev,
354 		 "%s => Size: %llu B, DMA: %s, Time: %ptSp s, Rate: %llu KB/s\n",
355 		 op, size, dma ? "YES" : "NO", &ts, rate);
356 }
357 
358 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
359 			      struct pci_epf_test_reg *reg)
360 {
361 	int ret = 0;
362 	struct timespec64 start, end;
363 	struct pci_epf *epf = epf_test->epf;
364 	struct pci_epc *epc = epf->epc;
365 	struct device *dev = &epf->dev;
366 	struct pci_epc_map src_map, dst_map;
367 	u64 src_addr = le64_to_cpu(reg->src_addr);
368 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
369 	size_t orig_size, copy_size;
370 	ssize_t map_size = 0;
371 	u32 flags = le32_to_cpu(reg->flags);
372 	u32 status = 0;
373 	void *copy_buf = NULL, *buf;
374 
375 	orig_size = copy_size = le32_to_cpu(reg->size);
376 
377 	if (flags & FLAG_USE_DMA) {
378 		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
379 			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
380 			ret = -EINVAL;
381 			goto set_status;
382 		}
383 	} else {
384 		copy_buf = kzalloc(copy_size, GFP_KERNEL);
385 		if (!copy_buf) {
386 			ret = -ENOMEM;
387 			goto set_status;
388 		}
389 		buf = copy_buf;
390 	}
391 
392 	while (copy_size) {
393 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
394 				      src_addr, copy_size, &src_map);
395 		if (ret) {
396 			dev_err(dev, "Failed to map source address\n");
397 			status = STATUS_SRC_ADDR_INVALID;
398 			goto free_buf;
399 		}
400 
401 		ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
402 					   dst_addr, copy_size, &dst_map);
403 		if (ret) {
404 			dev_err(dev, "Failed to map destination address\n");
405 			status = STATUS_DST_ADDR_INVALID;
406 			pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
407 					  &src_map);
408 			goto free_buf;
409 		}
410 
411 		map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
412 
413 		ktime_get_ts64(&start);
414 		if (flags & FLAG_USE_DMA) {
415 			ret = pci_epf_test_data_transfer(epf_test,
416 					dst_map.phys_addr, src_map.phys_addr,
417 					map_size, 0, DMA_MEM_TO_MEM);
418 			if (ret) {
419 				dev_err(dev, "Data transfer failed\n");
420 				goto unmap;
421 			}
422 		} else {
423 			memcpy_fromio(buf, src_map.virt_addr, map_size);
424 			memcpy_toio(dst_map.virt_addr, buf, map_size);
425 			buf += map_size;
426 		}
427 		ktime_get_ts64(&end);
428 
429 		copy_size -= map_size;
430 		src_addr += map_size;
431 		dst_addr += map_size;
432 
433 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
434 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
435 		map_size = 0;
436 	}
437 
438 	pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
439 				flags & FLAG_USE_DMA);
440 
441 unmap:
442 	if (map_size) {
443 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
444 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
445 	}
446 
447 free_buf:
448 	kfree(copy_buf);
449 
450 set_status:
451 	if (!ret)
452 		status |= STATUS_COPY_SUCCESS;
453 	else
454 		status |= STATUS_COPY_FAIL;
455 	reg->status = cpu_to_le32(status);
456 }
457 
458 static void pci_epf_test_read(struct pci_epf_test *epf_test,
459 			      struct pci_epf_test_reg *reg)
460 {
461 	int ret = 0;
462 	void *src_buf, *buf;
463 	u32 crc32;
464 	struct pci_epc_map map;
465 	phys_addr_t dst_phys_addr;
466 	struct timespec64 start, end;
467 	struct pci_epf *epf = epf_test->epf;
468 	struct pci_epc *epc = epf->epc;
469 	struct device *dev = &epf->dev;
470 	struct device *dma_dev = epf->epc->dev.parent;
471 	u64 src_addr = le64_to_cpu(reg->src_addr);
472 	size_t orig_size, src_size;
473 	ssize_t map_size = 0;
474 	u32 flags = le32_to_cpu(reg->flags);
475 	u32 checksum = le32_to_cpu(reg->checksum);
476 	u32 status = 0;
477 
478 	orig_size = src_size = le32_to_cpu(reg->size);
479 
480 	src_buf = kzalloc(src_size, GFP_KERNEL);
481 	if (!src_buf) {
482 		ret = -ENOMEM;
483 		goto set_status;
484 	}
485 	buf = src_buf;
486 
487 	while (src_size) {
488 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
489 					   src_addr, src_size, &map);
490 		if (ret) {
491 			dev_err(dev, "Failed to map address\n");
492 			status = STATUS_SRC_ADDR_INVALID;
493 			goto free_buf;
494 		}
495 
496 		map_size = map.pci_size;
497 		if (flags & FLAG_USE_DMA) {
498 			dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
499 						       DMA_FROM_DEVICE);
500 			if (dma_mapping_error(dma_dev, dst_phys_addr)) {
501 				dev_err(dev,
502 					"Failed to map destination buffer addr\n");
503 				ret = -ENOMEM;
504 				goto unmap;
505 			}
506 
507 			ktime_get_ts64(&start);
508 			ret = pci_epf_test_data_transfer(epf_test,
509 					dst_phys_addr, map.phys_addr,
510 					map_size, src_addr, DMA_DEV_TO_MEM);
511 			if (ret)
512 				dev_err(dev, "Data transfer failed\n");
513 			ktime_get_ts64(&end);
514 
515 			dma_unmap_single(dma_dev, dst_phys_addr, map_size,
516 					 DMA_FROM_DEVICE);
517 
518 			if (ret)
519 				goto unmap;
520 		} else {
521 			ktime_get_ts64(&start);
522 			memcpy_fromio(buf, map.virt_addr, map_size);
523 			ktime_get_ts64(&end);
524 		}
525 
526 		src_size -= map_size;
527 		src_addr += map_size;
528 		buf += map_size;
529 
530 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
531 		map_size = 0;
532 	}
533 
534 	pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
535 				flags & FLAG_USE_DMA);
536 
537 	crc32 = crc32_le(~0, src_buf, orig_size);
538 	if (crc32 != checksum)
539 		ret = -EIO;
540 
541 unmap:
542 	if (map_size)
543 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
544 
545 free_buf:
546 	kfree(src_buf);
547 
548 set_status:
549 	if (!ret)
550 		status |= STATUS_READ_SUCCESS;
551 	else
552 		status |= STATUS_READ_FAIL;
553 	reg->status = cpu_to_le32(status);
554 }
555 
556 static void pci_epf_test_write(struct pci_epf_test *epf_test,
557 			       struct pci_epf_test_reg *reg)
558 {
559 	int ret = 0;
560 	void *dst_buf, *buf;
561 	struct pci_epc_map map;
562 	phys_addr_t src_phys_addr;
563 	struct timespec64 start, end;
564 	struct pci_epf *epf = epf_test->epf;
565 	struct pci_epc *epc = epf->epc;
566 	struct device *dev = &epf->dev;
567 	struct device *dma_dev = epf->epc->dev.parent;
568 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
569 	size_t orig_size, dst_size;
570 	ssize_t map_size = 0;
571 	u32 flags = le32_to_cpu(reg->flags);
572 	u32 status = 0;
573 
574 	orig_size = dst_size = le32_to_cpu(reg->size);
575 
576 	dst_buf = kzalloc(dst_size, GFP_KERNEL);
577 	if (!dst_buf) {
578 		ret = -ENOMEM;
579 		goto set_status;
580 	}
581 	get_random_bytes(dst_buf, dst_size);
582 	reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
583 	buf = dst_buf;
584 
585 	while (dst_size) {
586 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
587 					   dst_addr, dst_size, &map);
588 		if (ret) {
589 			dev_err(dev, "Failed to map address\n");
590 			status = STATUS_DST_ADDR_INVALID;
591 			goto free_buf;
592 		}
593 
594 		map_size = map.pci_size;
595 		if (flags & FLAG_USE_DMA) {
596 			src_phys_addr = dma_map_single(dma_dev, buf, map_size,
597 						       DMA_TO_DEVICE);
598 			if (dma_mapping_error(dma_dev, src_phys_addr)) {
599 				dev_err(dev,
600 					"Failed to map source buffer addr\n");
601 				ret = -ENOMEM;
602 				goto unmap;
603 			}
604 
605 			ktime_get_ts64(&start);
606 
607 			ret = pci_epf_test_data_transfer(epf_test,
608 						map.phys_addr, src_phys_addr,
609 						map_size, dst_addr,
610 						DMA_MEM_TO_DEV);
611 			if (ret)
612 				dev_err(dev, "Data transfer failed\n");
613 			ktime_get_ts64(&end);
614 
615 			dma_unmap_single(dma_dev, src_phys_addr, map_size,
616 					 DMA_TO_DEVICE);
617 
618 			if (ret)
619 				goto unmap;
620 		} else {
621 			ktime_get_ts64(&start);
622 			memcpy_toio(map.virt_addr, buf, map_size);
623 			ktime_get_ts64(&end);
624 		}
625 
626 		dst_size -= map_size;
627 		dst_addr += map_size;
628 		buf += map_size;
629 
630 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
631 		map_size = 0;
632 	}
633 
634 	pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
635 				flags & FLAG_USE_DMA);
636 
637 	/*
638 	 * wait 1ms inorder for the write to complete. Without this delay L3
639 	 * error in observed in the host system.
640 	 */
641 	usleep_range(1000, 2000);
642 
643 unmap:
644 	if (map_size)
645 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
646 
647 free_buf:
648 	kfree(dst_buf);
649 
650 set_status:
651 	if (!ret)
652 		status |= STATUS_WRITE_SUCCESS;
653 	else
654 		status |= STATUS_WRITE_FAIL;
655 	reg->status = cpu_to_le32(status);
656 }
657 
658 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
659 				   struct pci_epf_test_reg *reg)
660 {
661 	struct pci_epf *epf = epf_test->epf;
662 	struct device *dev = &epf->dev;
663 	struct pci_epc *epc = epf->epc;
664 	u32 status = le32_to_cpu(reg->status);
665 	u32 irq_number = le32_to_cpu(reg->irq_number);
666 	u32 irq_type = le32_to_cpu(reg->irq_type);
667 	int count;
668 
669 	/*
670 	 * Set the status before raising the IRQ to ensure that the host sees
671 	 * the updated value when it gets the IRQ.
672 	 */
673 	status |= STATUS_IRQ_RAISED;
674 	WRITE_ONCE(reg->status, cpu_to_le32(status));
675 
676 	switch (irq_type) {
677 	case IRQ_TYPE_INTX:
678 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
679 				  PCI_IRQ_INTX, 0);
680 		break;
681 	case IRQ_TYPE_MSI:
682 		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
683 		if (irq_number > count || count <= 0) {
684 			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
685 				irq_number, count);
686 			return;
687 		}
688 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
689 				  PCI_IRQ_MSI, irq_number);
690 		break;
691 	case IRQ_TYPE_MSIX:
692 		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
693 		if (irq_number > count || count <= 0) {
694 			dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
695 				irq_number, count);
696 			return;
697 		}
698 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
699 				  PCI_IRQ_MSIX, irq_number);
700 		break;
701 	default:
702 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
703 		break;
704 	}
705 }
706 
707 static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data)
708 {
709 	struct pci_epf_test *epf_test = data;
710 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
711 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
712 	u32 status = le32_to_cpu(reg->status);
713 
714 	status |= STATUS_DOORBELL_SUCCESS;
715 	reg->status = cpu_to_le32(status);
716 	pci_epf_test_raise_irq(epf_test, reg);
717 
718 	return IRQ_HANDLED;
719 }
720 
721 static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
722 {
723 	struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
724 	struct pci_epf *epf = epf_test->epf;
725 
726 	reg->doorbell_bar = cpu_to_le32(NO_BAR);
727 
728 	pci_epf_free_doorbell(epf);
729 }
730 
731 static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
732 					 struct pci_epf_test_reg *reg)
733 {
734 	u32 status = le32_to_cpu(reg->status);
735 	struct pci_epf *epf = epf_test->epf;
736 	struct pci_epc *epc = epf->epc;
737 	struct msi_msg *msg;
738 	enum pci_barno bar;
739 	size_t offset;
740 	int ret;
741 
742 	ret = pci_epf_alloc_doorbell(epf, 1);
743 	if (ret)
744 		goto set_status_err;
745 
746 	msg = &epf->db_msg[0].msg;
747 	bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
748 	if (bar < BAR_0)
749 		goto err_doorbell_cleanup;
750 
751 	ret = request_threaded_irq(epf->db_msg[0].virq, NULL,
752 				   pci_epf_test_doorbell_handler, IRQF_ONESHOT,
753 				   "pci-ep-test-doorbell", epf_test);
754 	if (ret) {
755 		dev_err(&epf->dev,
756 			"Failed to request doorbell IRQ: %d\n",
757 			epf->db_msg[0].virq);
758 		goto err_doorbell_cleanup;
759 	}
760 
761 	reg->doorbell_data = cpu_to_le32(msg->data);
762 	reg->doorbell_bar = cpu_to_le32(bar);
763 
764 	msg = &epf->db_msg[0].msg;
765 	ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
766 					 &epf_test->db_bar.phys_addr, &offset);
767 
768 	if (ret)
769 		goto err_free_irq;
770 
771 	reg->doorbell_offset = cpu_to_le32(offset);
772 
773 	epf_test->db_bar.barno = bar;
774 	epf_test->db_bar.size = epf->bar[bar].size;
775 	epf_test->db_bar.flags = epf->bar[bar].flags;
776 
777 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
778 	if (ret)
779 		goto err_free_irq;
780 
781 	status |= STATUS_DOORBELL_ENABLE_SUCCESS;
782 	reg->status = cpu_to_le32(status);
783 	return;
784 
785 err_free_irq:
786 	free_irq(epf->db_msg[0].virq, epf_test);
787 err_doorbell_cleanup:
788 	pci_epf_test_doorbell_cleanup(epf_test);
789 set_status_err:
790 	status |= STATUS_DOORBELL_ENABLE_FAIL;
791 	reg->status = cpu_to_le32(status);
792 }
793 
794 static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
795 					  struct pci_epf_test_reg *reg)
796 {
797 	enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
798 	u32 status = le32_to_cpu(reg->status);
799 	struct pci_epf *epf = epf_test->epf;
800 	struct pci_epc *epc = epf->epc;
801 	int ret;
802 
803 	if (bar < BAR_0)
804 		goto set_status_err;
805 
806 	free_irq(epf->db_msg[0].virq, epf_test);
807 	pci_epf_test_doorbell_cleanup(epf_test);
808 
809 	/*
810 	 * The doorbell feature temporarily overrides the inbound translation
811 	 * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
812 	 * it calls set_bar() twice without ever calling clear_bar(), as
813 	 * calling clear_bar() would clear the BAR's PCI address assigned by
814 	 * the host. Thus, when disabling the doorbell, restore the inbound
815 	 * translation to point to the memory allocated for the BAR.
816 	 */
817 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
818 	if (ret)
819 		goto set_status_err;
820 
821 	status |= STATUS_DOORBELL_DISABLE_SUCCESS;
822 	reg->status = cpu_to_le32(status);
823 
824 	return;
825 
826 set_status_err:
827 	status |= STATUS_DOORBELL_DISABLE_FAIL;
828 	reg->status = cpu_to_le32(status);
829 }
830 
831 static u8 pci_epf_test_subrange_sig_byte(enum pci_barno barno,
832 					 unsigned int subno)
833 {
834 	return 0x50 + (barno * 8) + subno;
835 }
836 
837 static void pci_epf_test_bar_subrange_setup(struct pci_epf_test *epf_test,
838 					    struct pci_epf_test_reg *reg)
839 {
840 	struct pci_epf_bar_submap *submap, *old_submap;
841 	struct pci_epf *epf = epf_test->epf;
842 	struct pci_epc *epc = epf->epc;
843 	struct pci_epf_bar *bar;
844 	unsigned int nsub = PCI_EPF_TEST_BAR_SUBRANGE_NSUB, old_nsub;
845 	/* reg->size carries BAR number for BAR_SUBRANGE_* commands. */
846 	enum pci_barno barno = le32_to_cpu(reg->size);
847 	u32 status = le32_to_cpu(reg->status);
848 	unsigned int i, phys_idx;
849 	size_t sub_size;
850 	u8 *addr;
851 	int ret;
852 
853 	if (barno >= PCI_STD_NUM_BARS) {
854 		dev_err(&epf->dev, "Invalid barno: %d\n", barno);
855 		goto err;
856 	}
857 
858 	/* Host side should've avoided test_reg_bar, this is a safeguard. */
859 	if (barno == epf_test->test_reg_bar) {
860 		dev_err(&epf->dev, "test_reg_bar cannot be used for subrange test\n");
861 		goto err;
862 	}
863 
864 	if (!epf_test->epc_features->dynamic_inbound_mapping ||
865 	    !epf_test->epc_features->subrange_mapping) {
866 		dev_err(&epf->dev, "epc driver does not support subrange mapping\n");
867 		goto err;
868 	}
869 
870 	bar = &epf->bar[barno];
871 	if (!bar->size || !bar->addr) {
872 		dev_err(&epf->dev, "bar size/addr (%zu/%p) is invalid\n",
873 			bar->size, bar->addr);
874 		goto err;
875 	}
876 
877 	if (bar->size % nsub) {
878 		dev_err(&epf->dev, "BAR size %zu is not divisible by %u\n",
879 			bar->size, nsub);
880 		goto err;
881 	}
882 
883 	sub_size = bar->size / nsub;
884 
885 	submap = kzalloc_objs(*submap, nsub);
886 	if (!submap)
887 		goto err;
888 
889 	for (i = 0; i < nsub; i++) {
890 		/* Swap the two halves so RC can verify ordering. */
891 		phys_idx = i ^ 1;
892 		submap[i].phys_addr = bar->phys_addr + (phys_idx * sub_size);
893 		submap[i].size = sub_size;
894 	}
895 
896 	old_submap = bar->submap;
897 	old_nsub = bar->num_submap;
898 
899 	bar->submap = submap;
900 	bar->num_submap = nsub;
901 
902 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
903 	if (ret) {
904 		dev_err(&epf->dev, "pci_epc_set_bar() failed: %d\n", ret);
905 		if (ret == -ENOSPC)
906 			status |= STATUS_NO_RESOURCE;
907 		bar->submap = old_submap;
908 		bar->num_submap = old_nsub;
909 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
910 		if (ret)
911 			dev_warn(&epf->dev, "Failed to restore the original BAR mapping: %d\n",
912 				 ret);
913 
914 		kfree(submap);
915 		goto err;
916 	}
917 	kfree(old_submap);
918 
919 	/*
920 	 * Fill deterministic signatures into the physical regions that
921 	 * each BAR subrange maps to. RC verifies these to ensure the
922 	 * submap order is really applied.
923 	 */
924 	addr = (u8 *)bar->addr;
925 	for (i = 0; i < nsub; i++) {
926 		phys_idx = i ^ 1;
927 		memset(addr + (phys_idx * sub_size),
928 		       pci_epf_test_subrange_sig_byte(barno, i),
929 		       sub_size);
930 	}
931 
932 	status |= STATUS_BAR_SUBRANGE_SETUP_SUCCESS;
933 	reg->status = cpu_to_le32(status);
934 	return;
935 
936 err:
937 	status |= STATUS_BAR_SUBRANGE_SETUP_FAIL;
938 	reg->status = cpu_to_le32(status);
939 }
940 
941 static void pci_epf_test_bar_subrange_clear(struct pci_epf_test *epf_test,
942 					    struct pci_epf_test_reg *reg)
943 {
944 	struct pci_epf *epf = epf_test->epf;
945 	struct pci_epf_bar_submap *submap;
946 	struct pci_epc *epc = epf->epc;
947 	/* reg->size carries BAR number for BAR_SUBRANGE_* commands. */
948 	enum pci_barno barno = le32_to_cpu(reg->size);
949 	u32 status = le32_to_cpu(reg->status);
950 	struct pci_epf_bar *bar;
951 	unsigned int nsub;
952 	int ret;
953 
954 	if (barno >= PCI_STD_NUM_BARS) {
955 		dev_err(&epf->dev, "Invalid barno: %d\n", barno);
956 		goto err;
957 	}
958 
959 	bar = &epf->bar[barno];
960 	submap = bar->submap;
961 	nsub = bar->num_submap;
962 
963 	if (!submap || !nsub)
964 		goto err;
965 
966 	bar->submap = NULL;
967 	bar->num_submap = 0;
968 
969 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
970 	if (ret) {
971 		bar->submap = submap;
972 		bar->num_submap = nsub;
973 		dev_err(&epf->dev, "pci_epc_set_bar() failed: %d\n", ret);
974 		goto err;
975 	}
976 	kfree(submap);
977 
978 	status |= STATUS_BAR_SUBRANGE_CLEAR_SUCCESS;
979 	reg->status = cpu_to_le32(status);
980 	return;
981 
982 err:
983 	status |= STATUS_BAR_SUBRANGE_CLEAR_FAIL;
984 	reg->status = cpu_to_le32(status);
985 }
986 
987 static void pci_epf_test_cmd_handler(struct work_struct *work)
988 {
989 	u32 command;
990 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
991 						     cmd_handler.work);
992 	struct pci_epf *epf = epf_test->epf;
993 	struct device *dev = &epf->dev;
994 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
995 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
996 	u32 irq_type = le32_to_cpu(reg->irq_type);
997 
998 	command = le32_to_cpu(READ_ONCE(reg->command));
999 	if (!command)
1000 		goto reset_handler;
1001 
1002 	WRITE_ONCE(reg->command, 0);
1003 	WRITE_ONCE(reg->status, 0);
1004 
1005 	if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
1006 	    !epf_test->dma_supported) {
1007 		dev_err(dev, "Cannot transfer data using DMA\n");
1008 		goto reset_handler;
1009 	}
1010 
1011 	if (irq_type > IRQ_TYPE_MSIX) {
1012 		dev_err(dev, "Failed to detect IRQ type\n");
1013 		goto reset_handler;
1014 	}
1015 
1016 	switch (command) {
1017 	case COMMAND_RAISE_INTX_IRQ:
1018 	case COMMAND_RAISE_MSI_IRQ:
1019 	case COMMAND_RAISE_MSIX_IRQ:
1020 		pci_epf_test_raise_irq(epf_test, reg);
1021 		break;
1022 	case COMMAND_WRITE:
1023 		pci_epf_test_write(epf_test, reg);
1024 		pci_epf_test_raise_irq(epf_test, reg);
1025 		break;
1026 	case COMMAND_READ:
1027 		pci_epf_test_read(epf_test, reg);
1028 		pci_epf_test_raise_irq(epf_test, reg);
1029 		break;
1030 	case COMMAND_COPY:
1031 		pci_epf_test_copy(epf_test, reg);
1032 		pci_epf_test_raise_irq(epf_test, reg);
1033 		break;
1034 	case COMMAND_ENABLE_DOORBELL:
1035 		pci_epf_test_enable_doorbell(epf_test, reg);
1036 		pci_epf_test_raise_irq(epf_test, reg);
1037 		break;
1038 	case COMMAND_DISABLE_DOORBELL:
1039 		pci_epf_test_disable_doorbell(epf_test, reg);
1040 		pci_epf_test_raise_irq(epf_test, reg);
1041 		break;
1042 	case COMMAND_BAR_SUBRANGE_SETUP:
1043 		pci_epf_test_bar_subrange_setup(epf_test, reg);
1044 		pci_epf_test_raise_irq(epf_test, reg);
1045 		break;
1046 	case COMMAND_BAR_SUBRANGE_CLEAR:
1047 		pci_epf_test_bar_subrange_clear(epf_test, reg);
1048 		pci_epf_test_raise_irq(epf_test, reg);
1049 		break;
1050 	default:
1051 		dev_err(dev, "Invalid command 0x%x\n", command);
1052 		break;
1053 	}
1054 
1055 reset_handler:
1056 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
1057 			   msecs_to_jiffies(1));
1058 }
1059 
1060 static int pci_epf_test_set_bar(struct pci_epf *epf)
1061 {
1062 	int bar, ret;
1063 	struct pci_epc *epc = epf->epc;
1064 	struct device *dev = &epf->dev;
1065 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1066 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1067 
1068 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1069 		if (!epf_test->reg[bar])
1070 			continue;
1071 
1072 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
1073 				      &epf->bar[bar]);
1074 		if (ret) {
1075 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
1076 					   PRIMARY_INTERFACE);
1077 			epf_test->reg[bar] = NULL;
1078 			dev_err(dev, "Failed to set BAR%d\n", bar);
1079 			if (bar == test_reg_bar)
1080 				return ret;
1081 		}
1082 	}
1083 
1084 	return 0;
1085 }
1086 
1087 static void pci_epf_test_clear_bar(struct pci_epf *epf)
1088 {
1089 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1090 	struct pci_epc *epc = epf->epc;
1091 	int bar;
1092 
1093 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1094 		if (!epf_test->reg[bar])
1095 			continue;
1096 
1097 		pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
1098 				  &epf->bar[bar]);
1099 	}
1100 }
1101 
1102 static void pci_epf_test_set_capabilities(struct pci_epf *epf)
1103 {
1104 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1105 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1106 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
1107 	struct pci_epc *epc = epf->epc;
1108 	u32 caps = 0;
1109 
1110 	if (epc->ops->align_addr)
1111 		caps |= CAP_UNALIGNED_ACCESS;
1112 
1113 	if (epf_test->epc_features->msi_capable)
1114 		caps |= CAP_MSI;
1115 
1116 	if (epf_test->epc_features->msix_capable)
1117 		caps |= CAP_MSIX;
1118 
1119 	if (epf_test->epc_features->intx_capable)
1120 		caps |= CAP_INTX;
1121 
1122 	if (epf_test->epc_features->dynamic_inbound_mapping)
1123 		caps |= CAP_DYNAMIC_INBOUND_MAPPING;
1124 
1125 	if (epf_test->epc_features->dynamic_inbound_mapping &&
1126 	    epf_test->epc_features->subrange_mapping)
1127 		caps |= CAP_SUBRANGE_MAPPING;
1128 
1129 	if (epf_test->epc_features->bar[BAR_0].type == BAR_RESERVED)
1130 		caps |= CAP_BAR0_RESERVED;
1131 
1132 	if (epf_test->epc_features->bar[BAR_1].type == BAR_RESERVED)
1133 		caps |= CAP_BAR1_RESERVED;
1134 
1135 	if (epf_test->epc_features->bar[BAR_2].type == BAR_RESERVED)
1136 		caps |= CAP_BAR2_RESERVED;
1137 
1138 	if (epf_test->epc_features->bar[BAR_3].type == BAR_RESERVED)
1139 		caps |= CAP_BAR3_RESERVED;
1140 
1141 	if (epf_test->epc_features->bar[BAR_4].type == BAR_RESERVED)
1142 		caps |= CAP_BAR4_RESERVED;
1143 
1144 	if (epf_test->epc_features->bar[BAR_5].type == BAR_RESERVED)
1145 		caps |= CAP_BAR5_RESERVED;
1146 
1147 	reg->caps = cpu_to_le32(caps);
1148 }
1149 
1150 static int pci_epf_test_epc_init(struct pci_epf *epf)
1151 {
1152 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1153 	struct pci_epf_header *header = epf->header;
1154 	const struct pci_epc_features *epc_features = epf_test->epc_features;
1155 	struct pci_epc *epc = epf->epc;
1156 	struct device *dev = &epf->dev;
1157 	bool linkup_notifier = false;
1158 	int ret;
1159 
1160 	epf_test->dma_supported = true;
1161 
1162 	ret = pci_epf_test_init_dma_chan(epf_test);
1163 	if (ret)
1164 		epf_test->dma_supported = false;
1165 
1166 	if (epf->vfunc_no <= 1) {
1167 		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
1168 		if (ret) {
1169 			dev_err(dev, "Configuration header write failed\n");
1170 			return ret;
1171 		}
1172 	}
1173 
1174 	pci_epf_test_set_capabilities(epf);
1175 
1176 	ret = pci_epf_test_set_bar(epf);
1177 	if (ret)
1178 		return ret;
1179 
1180 	if (epc_features->msi_capable) {
1181 		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
1182 				      epf->msi_interrupts);
1183 		if (ret) {
1184 			dev_err(dev, "MSI configuration failed\n");
1185 			return ret;
1186 		}
1187 	}
1188 
1189 	if (epc_features->msix_capable) {
1190 		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
1191 				       epf->msix_interrupts,
1192 				       epf_test->test_reg_bar,
1193 				       epf_test->msix_table_offset);
1194 		if (ret) {
1195 			dev_err(dev, "MSI-X configuration failed\n");
1196 			return ret;
1197 		}
1198 	}
1199 
1200 	linkup_notifier = epc_features->linkup_notifier;
1201 	if (!linkup_notifier)
1202 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
1203 
1204 	return 0;
1205 }
1206 
1207 static void pci_epf_test_epc_deinit(struct pci_epf *epf)
1208 {
1209 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1210 
1211 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1212 	pci_epf_test_clean_dma_chan(epf_test);
1213 	pci_epf_test_clear_bar(epf);
1214 }
1215 
1216 static int pci_epf_test_link_up(struct pci_epf *epf)
1217 {
1218 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1219 
1220 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
1221 			   msecs_to_jiffies(1));
1222 
1223 	return 0;
1224 }
1225 
1226 static int pci_epf_test_link_down(struct pci_epf *epf)
1227 {
1228 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1229 
1230 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1231 
1232 	return 0;
1233 }
1234 
1235 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
1236 	.epc_init = pci_epf_test_epc_init,
1237 	.epc_deinit = pci_epf_test_epc_deinit,
1238 	.link_up = pci_epf_test_link_up,
1239 	.link_down = pci_epf_test_link_down,
1240 };
1241 
1242 static int pci_epf_test_alloc_space(struct pci_epf *epf)
1243 {
1244 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1245 	struct device *dev = &epf->dev;
1246 	size_t msix_table_size = 0;
1247 	size_t test_reg_bar_size;
1248 	size_t pba_size = 0;
1249 	void *base;
1250 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1251 	enum pci_barno bar;
1252 	const struct pci_epc_features *epc_features = epf_test->epc_features;
1253 	size_t test_reg_size;
1254 
1255 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
1256 
1257 	if (epc_features->msix_capable) {
1258 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
1259 		epf_test->msix_table_offset = test_reg_bar_size;
1260 		/* Align to QWORD or 8 Bytes */
1261 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
1262 	}
1263 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
1264 
1265 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
1266 				   epc_features, PRIMARY_INTERFACE);
1267 	if (!base) {
1268 		dev_err(dev, "Failed to allocated register space\n");
1269 		return -ENOMEM;
1270 	}
1271 	epf_test->reg[test_reg_bar] = base;
1272 
1273 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
1274 		bar = pci_epc_get_next_free_bar(epc_features, bar);
1275 		if (bar == NO_BAR)
1276 			break;
1277 
1278 		if (bar == test_reg_bar)
1279 			continue;
1280 
1281 		if (epc_features->bar[bar].type == BAR_FIXED)
1282 			test_reg_size = epc_features->bar[bar].fixed_size;
1283 		else
1284 			test_reg_size = epf_test->bar_size[bar];
1285 
1286 		base = pci_epf_alloc_space(epf, test_reg_size, bar,
1287 					   epc_features, PRIMARY_INTERFACE);
1288 		if (!base)
1289 			dev_err(dev, "Failed to allocate space for BAR%d\n",
1290 				bar);
1291 		epf_test->reg[bar] = base;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static void pci_epf_test_free_space(struct pci_epf *epf)
1298 {
1299 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1300 	int bar;
1301 
1302 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1303 		if (!epf_test->reg[bar])
1304 			continue;
1305 
1306 		pci_epf_free_space(epf, epf_test->reg[bar], bar,
1307 				   PRIMARY_INTERFACE);
1308 		epf_test->reg[bar] = NULL;
1309 	}
1310 }
1311 
1312 static int pci_epf_test_bind(struct pci_epf *epf)
1313 {
1314 	int ret;
1315 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1316 	const struct pci_epc_features *epc_features;
1317 	enum pci_barno test_reg_bar = BAR_0;
1318 	struct pci_epc *epc = epf->epc;
1319 
1320 	if (WARN_ON_ONCE(!epc))
1321 		return -EINVAL;
1322 
1323 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
1324 	if (!epc_features) {
1325 		dev_err(&epf->dev, "epc_features not implemented\n");
1326 		return -EOPNOTSUPP;
1327 	}
1328 
1329 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
1330 	if (test_reg_bar < 0)
1331 		return -EINVAL;
1332 
1333 	epf_test->test_reg_bar = test_reg_bar;
1334 	epf_test->epc_features = epc_features;
1335 
1336 	ret = pci_epf_test_alloc_space(epf);
1337 	if (ret)
1338 		return ret;
1339 
1340 	return 0;
1341 }
1342 
1343 static void pci_epf_test_unbind(struct pci_epf *epf)
1344 {
1345 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1346 	struct pci_epc *epc = epf->epc;
1347 
1348 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1349 	if (epc->init_complete) {
1350 		pci_epf_test_clean_dma_chan(epf_test);
1351 		pci_epf_test_clear_bar(epf);
1352 	}
1353 	pci_epf_test_free_space(epf);
1354 }
1355 
1356 #define PCI_EPF_TEST_BAR_SIZE_R(_name, _id)				\
1357 static ssize_t pci_epf_test_##_name##_show(struct config_item *item,	\
1358 					   char *page)			\
1359 {									\
1360 	struct config_group *group = to_config_group(item);		\
1361 	struct pci_epf_test *epf_test =					\
1362 		container_of(group, struct pci_epf_test, group);	\
1363 									\
1364 	return sysfs_emit(page, "%zu\n", epf_test->bar_size[_id]);	\
1365 }
1366 
1367 #define PCI_EPF_TEST_BAR_SIZE_W(_name, _id)				\
1368 static ssize_t pci_epf_test_##_name##_store(struct config_item *item,	\
1369 					    const char *page,		\
1370 					    size_t len)			\
1371 {									\
1372 	struct config_group *group = to_config_group(item);		\
1373 	struct pci_epf_test *epf_test =					\
1374 		container_of(group, struct pci_epf_test, group);	\
1375 	int val, ret;							\
1376 									\
1377 	/*								\
1378 	 * BAR sizes can only be modified before binding to an EPC,	\
1379 	 * because pci_epf_test_alloc_space() is called in .bind().	\
1380 	 */								\
1381 	if (epf_test->epf->epc)						\
1382 		return -EOPNOTSUPP;					\
1383 									\
1384 	ret = kstrtouint(page, 0, &val);				\
1385 	if (ret)							\
1386 		return ret;						\
1387 									\
1388 	if (!is_power_of_2(val))					\
1389 		return -EINVAL;						\
1390 									\
1391 	epf_test->bar_size[_id] = val;					\
1392 									\
1393 	return len;							\
1394 }
1395 
1396 PCI_EPF_TEST_BAR_SIZE_R(bar0_size, BAR_0)
1397 PCI_EPF_TEST_BAR_SIZE_W(bar0_size, BAR_0)
1398 PCI_EPF_TEST_BAR_SIZE_R(bar1_size, BAR_1)
1399 PCI_EPF_TEST_BAR_SIZE_W(bar1_size, BAR_1)
1400 PCI_EPF_TEST_BAR_SIZE_R(bar2_size, BAR_2)
1401 PCI_EPF_TEST_BAR_SIZE_W(bar2_size, BAR_2)
1402 PCI_EPF_TEST_BAR_SIZE_R(bar3_size, BAR_3)
1403 PCI_EPF_TEST_BAR_SIZE_W(bar3_size, BAR_3)
1404 PCI_EPF_TEST_BAR_SIZE_R(bar4_size, BAR_4)
1405 PCI_EPF_TEST_BAR_SIZE_W(bar4_size, BAR_4)
1406 PCI_EPF_TEST_BAR_SIZE_R(bar5_size, BAR_5)
1407 PCI_EPF_TEST_BAR_SIZE_W(bar5_size, BAR_5)
1408 
1409 CONFIGFS_ATTR(pci_epf_test_, bar0_size);
1410 CONFIGFS_ATTR(pci_epf_test_, bar1_size);
1411 CONFIGFS_ATTR(pci_epf_test_, bar2_size);
1412 CONFIGFS_ATTR(pci_epf_test_, bar3_size);
1413 CONFIGFS_ATTR(pci_epf_test_, bar4_size);
1414 CONFIGFS_ATTR(pci_epf_test_, bar5_size);
1415 
1416 static struct configfs_attribute *pci_epf_test_attrs[] = {
1417 	&pci_epf_test_attr_bar0_size,
1418 	&pci_epf_test_attr_bar1_size,
1419 	&pci_epf_test_attr_bar2_size,
1420 	&pci_epf_test_attr_bar3_size,
1421 	&pci_epf_test_attr_bar4_size,
1422 	&pci_epf_test_attr_bar5_size,
1423 	NULL,
1424 };
1425 
1426 static const struct config_item_type pci_epf_test_group_type = {
1427 	.ct_attrs	= pci_epf_test_attrs,
1428 	.ct_owner	= THIS_MODULE,
1429 };
1430 
1431 static struct config_group *pci_epf_test_add_cfs(struct pci_epf *epf,
1432 						 struct config_group *group)
1433 {
1434 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1435 	struct config_group *epf_group = &epf_test->group;
1436 	struct device *dev = &epf->dev;
1437 
1438 	config_group_init_type_name(epf_group, dev_name(dev),
1439 				    &pci_epf_test_group_type);
1440 
1441 	return epf_group;
1442 }
1443 
1444 static const struct pci_epf_device_id pci_epf_test_ids[] = {
1445 	{
1446 		.name = "pci_epf_test",
1447 	},
1448 	{},
1449 };
1450 
1451 static int pci_epf_test_probe(struct pci_epf *epf,
1452 			      const struct pci_epf_device_id *id)
1453 {
1454 	struct pci_epf_test *epf_test;
1455 	struct device *dev = &epf->dev;
1456 	enum pci_barno bar;
1457 
1458 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
1459 	if (!epf_test)
1460 		return -ENOMEM;
1461 
1462 	epf->header = &test_header;
1463 	epf_test->epf = epf;
1464 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++)
1465 		epf_test->bar_size[bar] = default_bar_size[bar];
1466 
1467 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1468 
1469 	epf->event_ops = &pci_epf_test_event_ops;
1470 
1471 	epf_set_drvdata(epf, epf_test);
1472 	return 0;
1473 }
1474 
1475 static const struct pci_epf_ops ops = {
1476 	.unbind	= pci_epf_test_unbind,
1477 	.bind	= pci_epf_test_bind,
1478 	.add_cfs = pci_epf_test_add_cfs,
1479 };
1480 
1481 static struct pci_epf_driver test_driver = {
1482 	.driver.name	= "pci_epf_test",
1483 	.probe		= pci_epf_test_probe,
1484 	.id_table	= pci_epf_test_ids,
1485 	.ops		= &ops,
1486 	.owner		= THIS_MODULE,
1487 };
1488 
1489 static int __init pci_epf_test_init(void)
1490 {
1491 	int ret;
1492 
1493 	kpcitest_workqueue = alloc_workqueue("kpcitest",
1494 				    WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU, 0);
1495 	if (!kpcitest_workqueue) {
1496 		pr_err("Failed to allocate the kpcitest work queue\n");
1497 		return -ENOMEM;
1498 	}
1499 
1500 	ret = pci_epf_register_driver(&test_driver);
1501 	if (ret) {
1502 		destroy_workqueue(kpcitest_workqueue);
1503 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1504 		return ret;
1505 	}
1506 
1507 	return 0;
1508 }
1509 module_init(pci_epf_test_init);
1510 
1511 static void __exit pci_epf_test_exit(void)
1512 {
1513 	if (kpcitest_workqueue)
1514 		destroy_workqueue(kpcitest_workqueue);
1515 	pci_epf_unregister_driver(&test_driver);
1516 }
1517 module_exit(pci_epf_test_exit);
1518 
1519 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1520 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1521 MODULE_LICENSE("GPL v2");
1522