xref: /linux/drivers/pci/endpoint/functions/pci-epf-test.c (revision 21647677ba9af2cb6bc460e17d9f29a7132c40c3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/msi.h>
15 #include <linux/slab.h>
16 #include <linux/pci_ids.h>
17 #include <linux/random.h>
18 
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
21 #include <linux/pci-ep-msi.h>
22 #include <linux/pci_regs.h>
23 
24 #define IRQ_TYPE_INTX			0
25 #define IRQ_TYPE_MSI			1
26 #define IRQ_TYPE_MSIX			2
27 
28 #define COMMAND_RAISE_INTX_IRQ		BIT(0)
29 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
30 #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
31 #define COMMAND_READ			BIT(3)
32 #define COMMAND_WRITE			BIT(4)
33 #define COMMAND_COPY			BIT(5)
34 #define COMMAND_ENABLE_DOORBELL		BIT(6)
35 #define COMMAND_DISABLE_DOORBELL	BIT(7)
36 #define COMMAND_BAR_SUBRANGE_SETUP	BIT(8)
37 #define COMMAND_BAR_SUBRANGE_CLEAR	BIT(9)
38 
39 #define STATUS_READ_SUCCESS		BIT(0)
40 #define STATUS_READ_FAIL		BIT(1)
41 #define STATUS_WRITE_SUCCESS		BIT(2)
42 #define STATUS_WRITE_FAIL		BIT(3)
43 #define STATUS_COPY_SUCCESS		BIT(4)
44 #define STATUS_COPY_FAIL		BIT(5)
45 #define STATUS_IRQ_RAISED		BIT(6)
46 #define STATUS_SRC_ADDR_INVALID		BIT(7)
47 #define STATUS_DST_ADDR_INVALID		BIT(8)
48 #define STATUS_DOORBELL_SUCCESS		BIT(9)
49 #define STATUS_DOORBELL_ENABLE_SUCCESS	BIT(10)
50 #define STATUS_DOORBELL_ENABLE_FAIL	BIT(11)
51 #define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
52 #define STATUS_DOORBELL_DISABLE_FAIL	BIT(13)
53 #define STATUS_BAR_SUBRANGE_SETUP_SUCCESS	BIT(14)
54 #define STATUS_BAR_SUBRANGE_SETUP_FAIL		BIT(15)
55 #define STATUS_BAR_SUBRANGE_CLEAR_SUCCESS	BIT(16)
56 #define STATUS_BAR_SUBRANGE_CLEAR_FAIL		BIT(17)
57 
58 #define FLAG_USE_DMA			BIT(0)
59 
60 #define TIMER_RESOLUTION		1
61 
62 #define CAP_UNALIGNED_ACCESS		BIT(0)
63 #define CAP_MSI				BIT(1)
64 #define CAP_MSIX			BIT(2)
65 #define CAP_INTX			BIT(3)
66 #define CAP_SUBRANGE_MAPPING		BIT(4)
67 
68 #define PCI_EPF_TEST_BAR_SUBRANGE_NSUB	2
69 
70 static struct workqueue_struct *kpcitest_workqueue;
71 
72 struct pci_epf_test {
73 	void			*reg[PCI_STD_NUM_BARS];
74 	struct pci_epf		*epf;
75 	struct config_group	group;
76 	enum pci_barno		test_reg_bar;
77 	size_t			msix_table_offset;
78 	struct delayed_work	cmd_handler;
79 	struct dma_chan		*dma_chan_tx;
80 	struct dma_chan		*dma_chan_rx;
81 	struct dma_chan		*transfer_chan;
82 	dma_cookie_t		transfer_cookie;
83 	enum dma_status		transfer_status;
84 	struct completion	transfer_complete;
85 	bool			dma_supported;
86 	bool			dma_private;
87 	const struct pci_epc_features *epc_features;
88 	struct pci_epf_bar	db_bar;
89 	size_t			bar_size[PCI_STD_NUM_BARS];
90 };
91 
92 struct pci_epf_test_reg {
93 	__le32 magic;
94 	__le32 command;
95 	__le32 status;
96 	__le64 src_addr;
97 	__le64 dst_addr;
98 	__le32 size;
99 	__le32 checksum;
100 	__le32 irq_type;
101 	__le32 irq_number;
102 	__le32 flags;
103 	__le32 caps;
104 	__le32 doorbell_bar;
105 	__le32 doorbell_offset;
106 	__le32 doorbell_data;
107 } __packed;
108 
109 static struct pci_epf_header test_header = {
110 	.vendorid	= PCI_ANY_ID,
111 	.deviceid	= PCI_ANY_ID,
112 	.baseclass_code = PCI_CLASS_OTHERS,
113 	.interrupt_pin	= PCI_INTERRUPT_INTA,
114 };
115 
116 /* default BAR sizes, can be overridden by the user using configfs */
117 static size_t default_bar_size[] = { 131072, 131072, 131072, 131072, 131072, 1048576 };
118 
119 static void pci_epf_test_dma_callback(void *param)
120 {
121 	struct pci_epf_test *epf_test = param;
122 	struct dma_tx_state state;
123 
124 	epf_test->transfer_status =
125 		dmaengine_tx_status(epf_test->transfer_chan,
126 				    epf_test->transfer_cookie, &state);
127 	if (epf_test->transfer_status == DMA_COMPLETE ||
128 	    epf_test->transfer_status == DMA_ERROR)
129 		complete(&epf_test->transfer_complete);
130 }
131 
132 /**
133  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
134  *				  data between PCIe EP and remote PCIe RC
135  * @epf_test: the EPF test device that performs the data transfer operation
136  * @dma_dst: The destination address of the data transfer. It can be a physical
137  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
138  * @dma_src: The source address of the data transfer. It can be a physical
139  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
140  * @len: The size of the data transfer
141  * @dma_remote: remote RC physical address
142  * @dir: DMA transfer direction
143  *
144  * Function that uses dmaengine API to transfer data between PCIe EP and remote
145  * PCIe RC. The source and destination address can be a physical address given
146  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
147  *
148  * The function returns '0' on success and negative value on failure.
149  */
150 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
151 				      dma_addr_t dma_dst, dma_addr_t dma_src,
152 				      size_t len, dma_addr_t dma_remote,
153 				      enum dma_transfer_direction dir)
154 {
155 	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
156 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
157 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
158 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
159 	struct pci_epf *epf = epf_test->epf;
160 	struct dma_async_tx_descriptor *tx;
161 	struct dma_slave_config sconf = {};
162 	struct device *dev = &epf->dev;
163 	int ret;
164 
165 	if (IS_ERR_OR_NULL(chan)) {
166 		dev_err(dev, "Invalid DMA memcpy channel\n");
167 		return -EINVAL;
168 	}
169 
170 	if (epf_test->dma_private) {
171 		sconf.direction = dir;
172 		if (dir == DMA_MEM_TO_DEV)
173 			sconf.dst_addr = dma_remote;
174 		else
175 			sconf.src_addr = dma_remote;
176 
177 		if (dmaengine_slave_config(chan, &sconf)) {
178 			dev_err(dev, "DMA slave config fail\n");
179 			return -EIO;
180 		}
181 		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
182 						 flags);
183 	} else {
184 		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
185 					       flags);
186 	}
187 
188 	if (!tx) {
189 		dev_err(dev, "Failed to prepare DMA memcpy\n");
190 		return -EIO;
191 	}
192 
193 	reinit_completion(&epf_test->transfer_complete);
194 	epf_test->transfer_chan = chan;
195 	tx->callback = pci_epf_test_dma_callback;
196 	tx->callback_param = epf_test;
197 	epf_test->transfer_cookie = dmaengine_submit(tx);
198 
199 	ret = dma_submit_error(epf_test->transfer_cookie);
200 	if (ret) {
201 		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
202 		goto terminate;
203 	}
204 
205 	dma_async_issue_pending(chan);
206 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
207 	if (ret < 0) {
208 		dev_err(dev, "DMA wait_for_completion interrupted\n");
209 		goto terminate;
210 	}
211 
212 	if (epf_test->transfer_status == DMA_ERROR) {
213 		dev_err(dev, "DMA transfer failed\n");
214 		ret = -EIO;
215 	}
216 
217 terminate:
218 	dmaengine_terminate_sync(chan);
219 
220 	return ret;
221 }
222 
223 struct epf_dma_filter {
224 	struct device *dev;
225 	u32 dma_mask;
226 };
227 
228 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
229 {
230 	struct epf_dma_filter *filter = node;
231 	struct dma_slave_caps caps;
232 
233 	memset(&caps, 0, sizeof(caps));
234 	dma_get_slave_caps(chan, &caps);
235 
236 	return chan->device->dev == filter->dev
237 		&& (filter->dma_mask & caps.directions);
238 }
239 
240 /**
241  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
242  * @epf_test: the EPF test device that performs data transfer operation
243  *
244  * Function to initialize EPF test DMA channel.
245  */
246 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
247 {
248 	struct pci_epf *epf = epf_test->epf;
249 	struct device *dev = &epf->dev;
250 	struct epf_dma_filter filter;
251 	struct dma_chan *dma_chan;
252 	dma_cap_mask_t mask;
253 	int ret;
254 
255 	filter.dev = epf->epc->dev.parent;
256 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
257 
258 	dma_cap_zero(mask);
259 	dma_cap_set(DMA_SLAVE, mask);
260 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
261 	if (!dma_chan) {
262 		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
263 		goto fail_back_tx;
264 	}
265 
266 	epf_test->dma_chan_rx = dma_chan;
267 
268 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
269 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
270 
271 	if (!dma_chan) {
272 		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
273 		goto fail_back_rx;
274 	}
275 
276 	epf_test->dma_chan_tx = dma_chan;
277 	epf_test->dma_private = true;
278 
279 	init_completion(&epf_test->transfer_complete);
280 
281 	return 0;
282 
283 fail_back_rx:
284 	dma_release_channel(epf_test->dma_chan_rx);
285 	epf_test->dma_chan_rx = NULL;
286 
287 fail_back_tx:
288 	dma_cap_zero(mask);
289 	dma_cap_set(DMA_MEMCPY, mask);
290 
291 	dma_chan = dma_request_chan_by_mask(&mask);
292 	if (IS_ERR(dma_chan)) {
293 		ret = PTR_ERR(dma_chan);
294 		if (ret != -EPROBE_DEFER)
295 			dev_err(dev, "Failed to get DMA channel\n");
296 		return ret;
297 	}
298 	init_completion(&epf_test->transfer_complete);
299 
300 	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
301 
302 	return 0;
303 }
304 
305 /**
306  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
307  * @epf_test: the EPF test device that performs data transfer operation
308  *
309  * Helper to cleanup EPF test DMA channel.
310  */
311 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
312 {
313 	if (!epf_test->dma_supported)
314 		return;
315 
316 	if (epf_test->dma_chan_tx) {
317 		dma_release_channel(epf_test->dma_chan_tx);
318 		if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
319 			epf_test->dma_chan_tx = NULL;
320 			epf_test->dma_chan_rx = NULL;
321 			return;
322 		}
323 		epf_test->dma_chan_tx = NULL;
324 	}
325 
326 	if (epf_test->dma_chan_rx) {
327 		dma_release_channel(epf_test->dma_chan_rx);
328 		epf_test->dma_chan_rx = NULL;
329 	}
330 }
331 
332 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
333 				    const char *op, u64 size,
334 				    struct timespec64 *start,
335 				    struct timespec64 *end, bool dma)
336 {
337 	struct timespec64 ts = timespec64_sub(*end, *start);
338 	u64 rate = 0, ns;
339 
340 	/* calculate the rate */
341 	ns = timespec64_to_ns(&ts);
342 	if (ns)
343 		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
344 
345 	dev_info(&epf_test->epf->dev,
346 		 "%s => Size: %llu B, DMA: %s, Time: %ptSp s, Rate: %llu KB/s\n",
347 		 op, size, dma ? "YES" : "NO", &ts, rate);
348 }
349 
350 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
351 			      struct pci_epf_test_reg *reg)
352 {
353 	int ret = 0;
354 	struct timespec64 start, end;
355 	struct pci_epf *epf = epf_test->epf;
356 	struct pci_epc *epc = epf->epc;
357 	struct device *dev = &epf->dev;
358 	struct pci_epc_map src_map, dst_map;
359 	u64 src_addr = le64_to_cpu(reg->src_addr);
360 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
361 	size_t orig_size, copy_size;
362 	ssize_t map_size = 0;
363 	u32 flags = le32_to_cpu(reg->flags);
364 	u32 status = 0;
365 	void *copy_buf = NULL, *buf;
366 
367 	orig_size = copy_size = le32_to_cpu(reg->size);
368 
369 	if (flags & FLAG_USE_DMA) {
370 		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
371 			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
372 			ret = -EINVAL;
373 			goto set_status;
374 		}
375 	} else {
376 		copy_buf = kzalloc(copy_size, GFP_KERNEL);
377 		if (!copy_buf) {
378 			ret = -ENOMEM;
379 			goto set_status;
380 		}
381 		buf = copy_buf;
382 	}
383 
384 	while (copy_size) {
385 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
386 				      src_addr, copy_size, &src_map);
387 		if (ret) {
388 			dev_err(dev, "Failed to map source address\n");
389 			status = STATUS_SRC_ADDR_INVALID;
390 			goto free_buf;
391 		}
392 
393 		ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
394 					   dst_addr, copy_size, &dst_map);
395 		if (ret) {
396 			dev_err(dev, "Failed to map destination address\n");
397 			status = STATUS_DST_ADDR_INVALID;
398 			pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
399 					  &src_map);
400 			goto free_buf;
401 		}
402 
403 		map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
404 
405 		ktime_get_ts64(&start);
406 		if (flags & FLAG_USE_DMA) {
407 			ret = pci_epf_test_data_transfer(epf_test,
408 					dst_map.phys_addr, src_map.phys_addr,
409 					map_size, 0, DMA_MEM_TO_MEM);
410 			if (ret) {
411 				dev_err(dev, "Data transfer failed\n");
412 				goto unmap;
413 			}
414 		} else {
415 			memcpy_fromio(buf, src_map.virt_addr, map_size);
416 			memcpy_toio(dst_map.virt_addr, buf, map_size);
417 			buf += map_size;
418 		}
419 		ktime_get_ts64(&end);
420 
421 		copy_size -= map_size;
422 		src_addr += map_size;
423 		dst_addr += map_size;
424 
425 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
426 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
427 		map_size = 0;
428 	}
429 
430 	pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
431 				flags & FLAG_USE_DMA);
432 
433 unmap:
434 	if (map_size) {
435 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
436 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
437 	}
438 
439 free_buf:
440 	kfree(copy_buf);
441 
442 set_status:
443 	if (!ret)
444 		status |= STATUS_COPY_SUCCESS;
445 	else
446 		status |= STATUS_COPY_FAIL;
447 	reg->status = cpu_to_le32(status);
448 }
449 
450 static void pci_epf_test_read(struct pci_epf_test *epf_test,
451 			      struct pci_epf_test_reg *reg)
452 {
453 	int ret = 0;
454 	void *src_buf, *buf;
455 	u32 crc32;
456 	struct pci_epc_map map;
457 	phys_addr_t dst_phys_addr;
458 	struct timespec64 start, end;
459 	struct pci_epf *epf = epf_test->epf;
460 	struct pci_epc *epc = epf->epc;
461 	struct device *dev = &epf->dev;
462 	struct device *dma_dev = epf->epc->dev.parent;
463 	u64 src_addr = le64_to_cpu(reg->src_addr);
464 	size_t orig_size, src_size;
465 	ssize_t map_size = 0;
466 	u32 flags = le32_to_cpu(reg->flags);
467 	u32 checksum = le32_to_cpu(reg->checksum);
468 	u32 status = 0;
469 
470 	orig_size = src_size = le32_to_cpu(reg->size);
471 
472 	src_buf = kzalloc(src_size, GFP_KERNEL);
473 	if (!src_buf) {
474 		ret = -ENOMEM;
475 		goto set_status;
476 	}
477 	buf = src_buf;
478 
479 	while (src_size) {
480 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
481 					   src_addr, src_size, &map);
482 		if (ret) {
483 			dev_err(dev, "Failed to map address\n");
484 			status = STATUS_SRC_ADDR_INVALID;
485 			goto free_buf;
486 		}
487 
488 		map_size = map.pci_size;
489 		if (flags & FLAG_USE_DMA) {
490 			dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
491 						       DMA_FROM_DEVICE);
492 			if (dma_mapping_error(dma_dev, dst_phys_addr)) {
493 				dev_err(dev,
494 					"Failed to map destination buffer addr\n");
495 				ret = -ENOMEM;
496 				goto unmap;
497 			}
498 
499 			ktime_get_ts64(&start);
500 			ret = pci_epf_test_data_transfer(epf_test,
501 					dst_phys_addr, map.phys_addr,
502 					map_size, src_addr, DMA_DEV_TO_MEM);
503 			if (ret)
504 				dev_err(dev, "Data transfer failed\n");
505 			ktime_get_ts64(&end);
506 
507 			dma_unmap_single(dma_dev, dst_phys_addr, map_size,
508 					 DMA_FROM_DEVICE);
509 
510 			if (ret)
511 				goto unmap;
512 		} else {
513 			ktime_get_ts64(&start);
514 			memcpy_fromio(buf, map.virt_addr, map_size);
515 			ktime_get_ts64(&end);
516 		}
517 
518 		src_size -= map_size;
519 		src_addr += map_size;
520 		buf += map_size;
521 
522 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
523 		map_size = 0;
524 	}
525 
526 	pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
527 				flags & FLAG_USE_DMA);
528 
529 	crc32 = crc32_le(~0, src_buf, orig_size);
530 	if (crc32 != checksum)
531 		ret = -EIO;
532 
533 unmap:
534 	if (map_size)
535 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
536 
537 free_buf:
538 	kfree(src_buf);
539 
540 set_status:
541 	if (!ret)
542 		status |= STATUS_READ_SUCCESS;
543 	else
544 		status |= STATUS_READ_FAIL;
545 	reg->status = cpu_to_le32(status);
546 }
547 
548 static void pci_epf_test_write(struct pci_epf_test *epf_test,
549 			       struct pci_epf_test_reg *reg)
550 {
551 	int ret = 0;
552 	void *dst_buf, *buf;
553 	struct pci_epc_map map;
554 	phys_addr_t src_phys_addr;
555 	struct timespec64 start, end;
556 	struct pci_epf *epf = epf_test->epf;
557 	struct pci_epc *epc = epf->epc;
558 	struct device *dev = &epf->dev;
559 	struct device *dma_dev = epf->epc->dev.parent;
560 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
561 	size_t orig_size, dst_size;
562 	ssize_t map_size = 0;
563 	u32 flags = le32_to_cpu(reg->flags);
564 	u32 status = 0;
565 
566 	orig_size = dst_size = le32_to_cpu(reg->size);
567 
568 	dst_buf = kzalloc(dst_size, GFP_KERNEL);
569 	if (!dst_buf) {
570 		ret = -ENOMEM;
571 		goto set_status;
572 	}
573 	get_random_bytes(dst_buf, dst_size);
574 	reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
575 	buf = dst_buf;
576 
577 	while (dst_size) {
578 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
579 					   dst_addr, dst_size, &map);
580 		if (ret) {
581 			dev_err(dev, "Failed to map address\n");
582 			status = STATUS_DST_ADDR_INVALID;
583 			goto free_buf;
584 		}
585 
586 		map_size = map.pci_size;
587 		if (flags & FLAG_USE_DMA) {
588 			src_phys_addr = dma_map_single(dma_dev, buf, map_size,
589 						       DMA_TO_DEVICE);
590 			if (dma_mapping_error(dma_dev, src_phys_addr)) {
591 				dev_err(dev,
592 					"Failed to map source buffer addr\n");
593 				ret = -ENOMEM;
594 				goto unmap;
595 			}
596 
597 			ktime_get_ts64(&start);
598 
599 			ret = pci_epf_test_data_transfer(epf_test,
600 						map.phys_addr, src_phys_addr,
601 						map_size, dst_addr,
602 						DMA_MEM_TO_DEV);
603 			if (ret)
604 				dev_err(dev, "Data transfer failed\n");
605 			ktime_get_ts64(&end);
606 
607 			dma_unmap_single(dma_dev, src_phys_addr, map_size,
608 					 DMA_TO_DEVICE);
609 
610 			if (ret)
611 				goto unmap;
612 		} else {
613 			ktime_get_ts64(&start);
614 			memcpy_toio(map.virt_addr, buf, map_size);
615 			ktime_get_ts64(&end);
616 		}
617 
618 		dst_size -= map_size;
619 		dst_addr += map_size;
620 		buf += map_size;
621 
622 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
623 		map_size = 0;
624 	}
625 
626 	pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
627 				flags & FLAG_USE_DMA);
628 
629 	/*
630 	 * wait 1ms inorder for the write to complete. Without this delay L3
631 	 * error in observed in the host system.
632 	 */
633 	usleep_range(1000, 2000);
634 
635 unmap:
636 	if (map_size)
637 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
638 
639 free_buf:
640 	kfree(dst_buf);
641 
642 set_status:
643 	if (!ret)
644 		status |= STATUS_WRITE_SUCCESS;
645 	else
646 		status |= STATUS_WRITE_FAIL;
647 	reg->status = cpu_to_le32(status);
648 }
649 
650 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
651 				   struct pci_epf_test_reg *reg)
652 {
653 	struct pci_epf *epf = epf_test->epf;
654 	struct device *dev = &epf->dev;
655 	struct pci_epc *epc = epf->epc;
656 	u32 status = le32_to_cpu(reg->status);
657 	u32 irq_number = le32_to_cpu(reg->irq_number);
658 	u32 irq_type = le32_to_cpu(reg->irq_type);
659 	int count;
660 
661 	/*
662 	 * Set the status before raising the IRQ to ensure that the host sees
663 	 * the updated value when it gets the IRQ.
664 	 */
665 	status |= STATUS_IRQ_RAISED;
666 	WRITE_ONCE(reg->status, cpu_to_le32(status));
667 
668 	switch (irq_type) {
669 	case IRQ_TYPE_INTX:
670 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
671 				  PCI_IRQ_INTX, 0);
672 		break;
673 	case IRQ_TYPE_MSI:
674 		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
675 		if (irq_number > count || count <= 0) {
676 			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
677 				irq_number, count);
678 			return;
679 		}
680 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
681 				  PCI_IRQ_MSI, irq_number);
682 		break;
683 	case IRQ_TYPE_MSIX:
684 		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
685 		if (irq_number > count || count <= 0) {
686 			dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
687 				irq_number, count);
688 			return;
689 		}
690 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
691 				  PCI_IRQ_MSIX, irq_number);
692 		break;
693 	default:
694 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
695 		break;
696 	}
697 }
698 
699 static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data)
700 {
701 	struct pci_epf_test *epf_test = data;
702 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
703 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
704 	u32 status = le32_to_cpu(reg->status);
705 
706 	status |= STATUS_DOORBELL_SUCCESS;
707 	reg->status = cpu_to_le32(status);
708 	pci_epf_test_raise_irq(epf_test, reg);
709 
710 	return IRQ_HANDLED;
711 }
712 
713 static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
714 {
715 	struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
716 	struct pci_epf *epf = epf_test->epf;
717 
718 	free_irq(epf->db_msg[0].virq, epf_test);
719 	reg->doorbell_bar = cpu_to_le32(NO_BAR);
720 
721 	pci_epf_free_doorbell(epf);
722 }
723 
724 static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
725 					 struct pci_epf_test_reg *reg)
726 {
727 	u32 status = le32_to_cpu(reg->status);
728 	struct pci_epf *epf = epf_test->epf;
729 	struct pci_epc *epc = epf->epc;
730 	struct msi_msg *msg;
731 	enum pci_barno bar;
732 	size_t offset;
733 	int ret;
734 
735 	ret = pci_epf_alloc_doorbell(epf, 1);
736 	if (ret)
737 		goto set_status_err;
738 
739 	msg = &epf->db_msg[0].msg;
740 	bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
741 	if (bar < BAR_0)
742 		goto err_doorbell_cleanup;
743 
744 	ret = request_threaded_irq(epf->db_msg[0].virq, NULL,
745 				   pci_epf_test_doorbell_handler, IRQF_ONESHOT,
746 				   "pci-ep-test-doorbell", epf_test);
747 	if (ret) {
748 		dev_err(&epf->dev,
749 			"Failed to request doorbell IRQ: %d\n",
750 			epf->db_msg[0].virq);
751 		goto err_doorbell_cleanup;
752 	}
753 
754 	reg->doorbell_data = cpu_to_le32(msg->data);
755 	reg->doorbell_bar = cpu_to_le32(bar);
756 
757 	msg = &epf->db_msg[0].msg;
758 	ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
759 					 &epf_test->db_bar.phys_addr, &offset);
760 
761 	if (ret)
762 		goto err_doorbell_cleanup;
763 
764 	reg->doorbell_offset = cpu_to_le32(offset);
765 
766 	epf_test->db_bar.barno = bar;
767 	epf_test->db_bar.size = epf->bar[bar].size;
768 	epf_test->db_bar.flags = epf->bar[bar].flags;
769 
770 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
771 	if (ret)
772 		goto err_doorbell_cleanup;
773 
774 	status |= STATUS_DOORBELL_ENABLE_SUCCESS;
775 	reg->status = cpu_to_le32(status);
776 	return;
777 
778 err_doorbell_cleanup:
779 	pci_epf_test_doorbell_cleanup(epf_test);
780 set_status_err:
781 	status |= STATUS_DOORBELL_ENABLE_FAIL;
782 	reg->status = cpu_to_le32(status);
783 }
784 
785 static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
786 					  struct pci_epf_test_reg *reg)
787 {
788 	enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
789 	u32 status = le32_to_cpu(reg->status);
790 	struct pci_epf *epf = epf_test->epf;
791 	struct pci_epc *epc = epf->epc;
792 	int ret;
793 
794 	if (bar < BAR_0)
795 		goto set_status_err;
796 
797 	pci_epf_test_doorbell_cleanup(epf_test);
798 
799 	/*
800 	 * The doorbell feature temporarily overrides the inbound translation
801 	 * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
802 	 * it calls set_bar() twice without ever calling clear_bar(), as
803 	 * calling clear_bar() would clear the BAR's PCI address assigned by
804 	 * the host. Thus, when disabling the doorbell, restore the inbound
805 	 * translation to point to the memory allocated for the BAR.
806 	 */
807 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
808 	if (ret)
809 		goto set_status_err;
810 
811 	status |= STATUS_DOORBELL_DISABLE_SUCCESS;
812 	reg->status = cpu_to_le32(status);
813 
814 	return;
815 
816 set_status_err:
817 	status |= STATUS_DOORBELL_DISABLE_FAIL;
818 	reg->status = cpu_to_le32(status);
819 }
820 
821 static u8 pci_epf_test_subrange_sig_byte(enum pci_barno barno,
822 					 unsigned int subno)
823 {
824 	return 0x50 + (barno * 8) + subno;
825 }
826 
827 static void pci_epf_test_bar_subrange_setup(struct pci_epf_test *epf_test,
828 					    struct pci_epf_test_reg *reg)
829 {
830 	struct pci_epf_bar_submap *submap, *old_submap;
831 	struct pci_epf *epf = epf_test->epf;
832 	struct pci_epc *epc = epf->epc;
833 	struct pci_epf_bar *bar;
834 	unsigned int nsub = PCI_EPF_TEST_BAR_SUBRANGE_NSUB, old_nsub;
835 	/* reg->size carries BAR number for BAR_SUBRANGE_* commands. */
836 	enum pci_barno barno = le32_to_cpu(reg->size);
837 	u32 status = le32_to_cpu(reg->status);
838 	unsigned int i, phys_idx;
839 	size_t sub_size;
840 	u8 *addr;
841 	int ret;
842 
843 	if (barno >= PCI_STD_NUM_BARS) {
844 		dev_err(&epf->dev, "Invalid barno: %d\n", barno);
845 		goto err;
846 	}
847 
848 	/* Host side should've avoided test_reg_bar, this is a safeguard. */
849 	if (barno == epf_test->test_reg_bar) {
850 		dev_err(&epf->dev, "test_reg_bar cannot be used for subrange test\n");
851 		goto err;
852 	}
853 
854 	if (!epf_test->epc_features->dynamic_inbound_mapping ||
855 	    !epf_test->epc_features->subrange_mapping) {
856 		dev_err(&epf->dev, "epc driver does not support subrange mapping\n");
857 		goto err;
858 	}
859 
860 	bar = &epf->bar[barno];
861 	if (!bar->size || !bar->addr) {
862 		dev_err(&epf->dev, "bar size/addr (%zu/%p) is invalid\n",
863 			bar->size, bar->addr);
864 		goto err;
865 	}
866 
867 	if (bar->size % nsub) {
868 		dev_err(&epf->dev, "BAR size %zu is not divisible by %u\n",
869 			bar->size, nsub);
870 		goto err;
871 	}
872 
873 	sub_size = bar->size / nsub;
874 
875 	submap = kzalloc_objs(*submap, nsub);
876 	if (!submap)
877 		goto err;
878 
879 	for (i = 0; i < nsub; i++) {
880 		/* Swap the two halves so RC can verify ordering. */
881 		phys_idx = i ^ 1;
882 		submap[i].phys_addr = bar->phys_addr + (phys_idx * sub_size);
883 		submap[i].size = sub_size;
884 	}
885 
886 	old_submap = bar->submap;
887 	old_nsub = bar->num_submap;
888 
889 	bar->submap = submap;
890 	bar->num_submap = nsub;
891 
892 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
893 	if (ret) {
894 		dev_err(&epf->dev, "pci_epc_set_bar() failed: %d\n", ret);
895 		bar->submap = old_submap;
896 		bar->num_submap = old_nsub;
897 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
898 		if (ret)
899 			dev_warn(&epf->dev, "Failed to restore the original BAR mapping: %d\n",
900 				 ret);
901 
902 		kfree(submap);
903 		goto err;
904 	}
905 	kfree(old_submap);
906 
907 	/*
908 	 * Fill deterministic signatures into the physical regions that
909 	 * each BAR subrange maps to. RC verifies these to ensure the
910 	 * submap order is really applied.
911 	 */
912 	addr = (u8 *)bar->addr;
913 	for (i = 0; i < nsub; i++) {
914 		phys_idx = i ^ 1;
915 		memset(addr + (phys_idx * sub_size),
916 		       pci_epf_test_subrange_sig_byte(barno, i),
917 		       sub_size);
918 	}
919 
920 	status |= STATUS_BAR_SUBRANGE_SETUP_SUCCESS;
921 	reg->status = cpu_to_le32(status);
922 	return;
923 
924 err:
925 	status |= STATUS_BAR_SUBRANGE_SETUP_FAIL;
926 	reg->status = cpu_to_le32(status);
927 }
928 
929 static void pci_epf_test_bar_subrange_clear(struct pci_epf_test *epf_test,
930 					    struct pci_epf_test_reg *reg)
931 {
932 	struct pci_epf *epf = epf_test->epf;
933 	struct pci_epf_bar_submap *submap;
934 	struct pci_epc *epc = epf->epc;
935 	/* reg->size carries BAR number for BAR_SUBRANGE_* commands. */
936 	enum pci_barno barno = le32_to_cpu(reg->size);
937 	u32 status = le32_to_cpu(reg->status);
938 	struct pci_epf_bar *bar;
939 	unsigned int nsub;
940 	int ret;
941 
942 	if (barno >= PCI_STD_NUM_BARS) {
943 		dev_err(&epf->dev, "Invalid barno: %d\n", barno);
944 		goto err;
945 	}
946 
947 	bar = &epf->bar[barno];
948 	submap = bar->submap;
949 	nsub = bar->num_submap;
950 
951 	if (!submap || !nsub)
952 		goto err;
953 
954 	bar->submap = NULL;
955 	bar->num_submap = 0;
956 
957 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar);
958 	if (ret) {
959 		bar->submap = submap;
960 		bar->num_submap = nsub;
961 		dev_err(&epf->dev, "pci_epc_set_bar() failed: %d\n", ret);
962 		goto err;
963 	}
964 	kfree(submap);
965 
966 	status |= STATUS_BAR_SUBRANGE_CLEAR_SUCCESS;
967 	reg->status = cpu_to_le32(status);
968 	return;
969 
970 err:
971 	status |= STATUS_BAR_SUBRANGE_CLEAR_FAIL;
972 	reg->status = cpu_to_le32(status);
973 }
974 
975 static void pci_epf_test_cmd_handler(struct work_struct *work)
976 {
977 	u32 command;
978 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
979 						     cmd_handler.work);
980 	struct pci_epf *epf = epf_test->epf;
981 	struct device *dev = &epf->dev;
982 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
983 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
984 	u32 irq_type = le32_to_cpu(reg->irq_type);
985 
986 	command = le32_to_cpu(READ_ONCE(reg->command));
987 	if (!command)
988 		goto reset_handler;
989 
990 	WRITE_ONCE(reg->command, 0);
991 	WRITE_ONCE(reg->status, 0);
992 
993 	if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
994 	    !epf_test->dma_supported) {
995 		dev_err(dev, "Cannot transfer data using DMA\n");
996 		goto reset_handler;
997 	}
998 
999 	if (irq_type > IRQ_TYPE_MSIX) {
1000 		dev_err(dev, "Failed to detect IRQ type\n");
1001 		goto reset_handler;
1002 	}
1003 
1004 	switch (command) {
1005 	case COMMAND_RAISE_INTX_IRQ:
1006 	case COMMAND_RAISE_MSI_IRQ:
1007 	case COMMAND_RAISE_MSIX_IRQ:
1008 		pci_epf_test_raise_irq(epf_test, reg);
1009 		break;
1010 	case COMMAND_WRITE:
1011 		pci_epf_test_write(epf_test, reg);
1012 		pci_epf_test_raise_irq(epf_test, reg);
1013 		break;
1014 	case COMMAND_READ:
1015 		pci_epf_test_read(epf_test, reg);
1016 		pci_epf_test_raise_irq(epf_test, reg);
1017 		break;
1018 	case COMMAND_COPY:
1019 		pci_epf_test_copy(epf_test, reg);
1020 		pci_epf_test_raise_irq(epf_test, reg);
1021 		break;
1022 	case COMMAND_ENABLE_DOORBELL:
1023 		pci_epf_test_enable_doorbell(epf_test, reg);
1024 		pci_epf_test_raise_irq(epf_test, reg);
1025 		break;
1026 	case COMMAND_DISABLE_DOORBELL:
1027 		pci_epf_test_disable_doorbell(epf_test, reg);
1028 		pci_epf_test_raise_irq(epf_test, reg);
1029 		break;
1030 	case COMMAND_BAR_SUBRANGE_SETUP:
1031 		pci_epf_test_bar_subrange_setup(epf_test, reg);
1032 		pci_epf_test_raise_irq(epf_test, reg);
1033 		break;
1034 	case COMMAND_BAR_SUBRANGE_CLEAR:
1035 		pci_epf_test_bar_subrange_clear(epf_test, reg);
1036 		pci_epf_test_raise_irq(epf_test, reg);
1037 		break;
1038 	default:
1039 		dev_err(dev, "Invalid command 0x%x\n", command);
1040 		break;
1041 	}
1042 
1043 reset_handler:
1044 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
1045 			   msecs_to_jiffies(1));
1046 }
1047 
1048 static int pci_epf_test_set_bar(struct pci_epf *epf)
1049 {
1050 	int bar, ret;
1051 	struct pci_epc *epc = epf->epc;
1052 	struct device *dev = &epf->dev;
1053 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1054 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1055 
1056 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1057 		if (!epf_test->reg[bar])
1058 			continue;
1059 
1060 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
1061 				      &epf->bar[bar]);
1062 		if (ret) {
1063 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
1064 					   PRIMARY_INTERFACE);
1065 			epf_test->reg[bar] = NULL;
1066 			dev_err(dev, "Failed to set BAR%d\n", bar);
1067 			if (bar == test_reg_bar)
1068 				return ret;
1069 		}
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 static void pci_epf_test_clear_bar(struct pci_epf *epf)
1076 {
1077 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1078 	struct pci_epc *epc = epf->epc;
1079 	int bar;
1080 
1081 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1082 		if (!epf_test->reg[bar])
1083 			continue;
1084 
1085 		pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
1086 				  &epf->bar[bar]);
1087 	}
1088 }
1089 
1090 static void pci_epf_test_set_capabilities(struct pci_epf *epf)
1091 {
1092 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1093 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1094 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
1095 	struct pci_epc *epc = epf->epc;
1096 	u32 caps = 0;
1097 
1098 	if (epc->ops->align_addr)
1099 		caps |= CAP_UNALIGNED_ACCESS;
1100 
1101 	if (epf_test->epc_features->msi_capable)
1102 		caps |= CAP_MSI;
1103 
1104 	if (epf_test->epc_features->msix_capable)
1105 		caps |= CAP_MSIX;
1106 
1107 	if (epf_test->epc_features->intx_capable)
1108 		caps |= CAP_INTX;
1109 
1110 	if (epf_test->epc_features->dynamic_inbound_mapping &&
1111 	    epf_test->epc_features->subrange_mapping)
1112 		caps |= CAP_SUBRANGE_MAPPING;
1113 
1114 	reg->caps = cpu_to_le32(caps);
1115 }
1116 
1117 static int pci_epf_test_epc_init(struct pci_epf *epf)
1118 {
1119 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1120 	struct pci_epf_header *header = epf->header;
1121 	const struct pci_epc_features *epc_features = epf_test->epc_features;
1122 	struct pci_epc *epc = epf->epc;
1123 	struct device *dev = &epf->dev;
1124 	bool linkup_notifier = false;
1125 	int ret;
1126 
1127 	epf_test->dma_supported = true;
1128 
1129 	ret = pci_epf_test_init_dma_chan(epf_test);
1130 	if (ret)
1131 		epf_test->dma_supported = false;
1132 
1133 	if (epf->vfunc_no <= 1) {
1134 		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
1135 		if (ret) {
1136 			dev_err(dev, "Configuration header write failed\n");
1137 			return ret;
1138 		}
1139 	}
1140 
1141 	pci_epf_test_set_capabilities(epf);
1142 
1143 	ret = pci_epf_test_set_bar(epf);
1144 	if (ret)
1145 		return ret;
1146 
1147 	if (epc_features->msi_capable) {
1148 		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
1149 				      epf->msi_interrupts);
1150 		if (ret) {
1151 			dev_err(dev, "MSI configuration failed\n");
1152 			return ret;
1153 		}
1154 	}
1155 
1156 	if (epc_features->msix_capable) {
1157 		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
1158 				       epf->msix_interrupts,
1159 				       epf_test->test_reg_bar,
1160 				       epf_test->msix_table_offset);
1161 		if (ret) {
1162 			dev_err(dev, "MSI-X configuration failed\n");
1163 			return ret;
1164 		}
1165 	}
1166 
1167 	linkup_notifier = epc_features->linkup_notifier;
1168 	if (!linkup_notifier)
1169 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
1170 
1171 	return 0;
1172 }
1173 
1174 static void pci_epf_test_epc_deinit(struct pci_epf *epf)
1175 {
1176 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1177 
1178 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1179 	pci_epf_test_clean_dma_chan(epf_test);
1180 	pci_epf_test_clear_bar(epf);
1181 }
1182 
1183 static int pci_epf_test_link_up(struct pci_epf *epf)
1184 {
1185 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1186 
1187 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
1188 			   msecs_to_jiffies(1));
1189 
1190 	return 0;
1191 }
1192 
1193 static int pci_epf_test_link_down(struct pci_epf *epf)
1194 {
1195 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1196 
1197 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1198 
1199 	return 0;
1200 }
1201 
1202 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
1203 	.epc_init = pci_epf_test_epc_init,
1204 	.epc_deinit = pci_epf_test_epc_deinit,
1205 	.link_up = pci_epf_test_link_up,
1206 	.link_down = pci_epf_test_link_down,
1207 };
1208 
1209 static int pci_epf_test_alloc_space(struct pci_epf *epf)
1210 {
1211 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1212 	struct device *dev = &epf->dev;
1213 	size_t msix_table_size = 0;
1214 	size_t test_reg_bar_size;
1215 	size_t pba_size = 0;
1216 	void *base;
1217 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1218 	enum pci_barno bar;
1219 	const struct pci_epc_features *epc_features = epf_test->epc_features;
1220 	size_t test_reg_size;
1221 
1222 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
1223 
1224 	if (epc_features->msix_capable) {
1225 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
1226 		epf_test->msix_table_offset = test_reg_bar_size;
1227 		/* Align to QWORD or 8 Bytes */
1228 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
1229 	}
1230 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
1231 
1232 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
1233 				   epc_features, PRIMARY_INTERFACE);
1234 	if (!base) {
1235 		dev_err(dev, "Failed to allocated register space\n");
1236 		return -ENOMEM;
1237 	}
1238 	epf_test->reg[test_reg_bar] = base;
1239 
1240 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
1241 		bar = pci_epc_get_next_free_bar(epc_features, bar);
1242 		if (bar == NO_BAR)
1243 			break;
1244 
1245 		if (bar == test_reg_bar)
1246 			continue;
1247 
1248 		if (epc_features->bar[bar].type == BAR_FIXED)
1249 			test_reg_size = epc_features->bar[bar].fixed_size;
1250 		else
1251 			test_reg_size = epf_test->bar_size[bar];
1252 
1253 		base = pci_epf_alloc_space(epf, test_reg_size, bar,
1254 					   epc_features, PRIMARY_INTERFACE);
1255 		if (!base)
1256 			dev_err(dev, "Failed to allocate space for BAR%d\n",
1257 				bar);
1258 		epf_test->reg[bar] = base;
1259 	}
1260 
1261 	return 0;
1262 }
1263 
1264 static void pci_epf_test_free_space(struct pci_epf *epf)
1265 {
1266 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1267 	int bar;
1268 
1269 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1270 		if (!epf_test->reg[bar])
1271 			continue;
1272 
1273 		pci_epf_free_space(epf, epf_test->reg[bar], bar,
1274 				   PRIMARY_INTERFACE);
1275 		epf_test->reg[bar] = NULL;
1276 	}
1277 }
1278 
1279 static int pci_epf_test_bind(struct pci_epf *epf)
1280 {
1281 	int ret;
1282 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1283 	const struct pci_epc_features *epc_features;
1284 	enum pci_barno test_reg_bar = BAR_0;
1285 	struct pci_epc *epc = epf->epc;
1286 
1287 	if (WARN_ON_ONCE(!epc))
1288 		return -EINVAL;
1289 
1290 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
1291 	if (!epc_features) {
1292 		dev_err(&epf->dev, "epc_features not implemented\n");
1293 		return -EOPNOTSUPP;
1294 	}
1295 
1296 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
1297 	if (test_reg_bar < 0)
1298 		return -EINVAL;
1299 
1300 	epf_test->test_reg_bar = test_reg_bar;
1301 	epf_test->epc_features = epc_features;
1302 
1303 	ret = pci_epf_test_alloc_space(epf);
1304 	if (ret)
1305 		return ret;
1306 
1307 	return 0;
1308 }
1309 
1310 static void pci_epf_test_unbind(struct pci_epf *epf)
1311 {
1312 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1313 	struct pci_epc *epc = epf->epc;
1314 
1315 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1316 	if (epc->init_complete) {
1317 		pci_epf_test_clean_dma_chan(epf_test);
1318 		pci_epf_test_clear_bar(epf);
1319 	}
1320 	pci_epf_test_free_space(epf);
1321 }
1322 
1323 #define PCI_EPF_TEST_BAR_SIZE_R(_name, _id)				\
1324 static ssize_t pci_epf_test_##_name##_show(struct config_item *item,	\
1325 					   char *page)			\
1326 {									\
1327 	struct config_group *group = to_config_group(item);		\
1328 	struct pci_epf_test *epf_test =					\
1329 		container_of(group, struct pci_epf_test, group);	\
1330 									\
1331 	return sysfs_emit(page, "%zu\n", epf_test->bar_size[_id]);	\
1332 }
1333 
1334 #define PCI_EPF_TEST_BAR_SIZE_W(_name, _id)				\
1335 static ssize_t pci_epf_test_##_name##_store(struct config_item *item,	\
1336 					    const char *page,		\
1337 					    size_t len)			\
1338 {									\
1339 	struct config_group *group = to_config_group(item);		\
1340 	struct pci_epf_test *epf_test =					\
1341 		container_of(group, struct pci_epf_test, group);	\
1342 	int val, ret;							\
1343 									\
1344 	/*								\
1345 	 * BAR sizes can only be modified before binding to an EPC,	\
1346 	 * because pci_epf_test_alloc_space() is called in .bind().	\
1347 	 */								\
1348 	if (epf_test->epf->epc)						\
1349 		return -EOPNOTSUPP;					\
1350 									\
1351 	ret = kstrtouint(page, 0, &val);				\
1352 	if (ret)							\
1353 		return ret;						\
1354 									\
1355 	if (!is_power_of_2(val))					\
1356 		return -EINVAL;						\
1357 									\
1358 	epf_test->bar_size[_id] = val;					\
1359 									\
1360 	return len;							\
1361 }
1362 
1363 PCI_EPF_TEST_BAR_SIZE_R(bar0_size, BAR_0)
1364 PCI_EPF_TEST_BAR_SIZE_W(bar0_size, BAR_0)
1365 PCI_EPF_TEST_BAR_SIZE_R(bar1_size, BAR_1)
1366 PCI_EPF_TEST_BAR_SIZE_W(bar1_size, BAR_1)
1367 PCI_EPF_TEST_BAR_SIZE_R(bar2_size, BAR_2)
1368 PCI_EPF_TEST_BAR_SIZE_W(bar2_size, BAR_2)
1369 PCI_EPF_TEST_BAR_SIZE_R(bar3_size, BAR_3)
1370 PCI_EPF_TEST_BAR_SIZE_W(bar3_size, BAR_3)
1371 PCI_EPF_TEST_BAR_SIZE_R(bar4_size, BAR_4)
1372 PCI_EPF_TEST_BAR_SIZE_W(bar4_size, BAR_4)
1373 PCI_EPF_TEST_BAR_SIZE_R(bar5_size, BAR_5)
1374 PCI_EPF_TEST_BAR_SIZE_W(bar5_size, BAR_5)
1375 
1376 CONFIGFS_ATTR(pci_epf_test_, bar0_size);
1377 CONFIGFS_ATTR(pci_epf_test_, bar1_size);
1378 CONFIGFS_ATTR(pci_epf_test_, bar2_size);
1379 CONFIGFS_ATTR(pci_epf_test_, bar3_size);
1380 CONFIGFS_ATTR(pci_epf_test_, bar4_size);
1381 CONFIGFS_ATTR(pci_epf_test_, bar5_size);
1382 
1383 static struct configfs_attribute *pci_epf_test_attrs[] = {
1384 	&pci_epf_test_attr_bar0_size,
1385 	&pci_epf_test_attr_bar1_size,
1386 	&pci_epf_test_attr_bar2_size,
1387 	&pci_epf_test_attr_bar3_size,
1388 	&pci_epf_test_attr_bar4_size,
1389 	&pci_epf_test_attr_bar5_size,
1390 	NULL,
1391 };
1392 
1393 static const struct config_item_type pci_epf_test_group_type = {
1394 	.ct_attrs	= pci_epf_test_attrs,
1395 	.ct_owner	= THIS_MODULE,
1396 };
1397 
1398 static struct config_group *pci_epf_test_add_cfs(struct pci_epf *epf,
1399 						 struct config_group *group)
1400 {
1401 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1402 	struct config_group *epf_group = &epf_test->group;
1403 	struct device *dev = &epf->dev;
1404 
1405 	config_group_init_type_name(epf_group, dev_name(dev),
1406 				    &pci_epf_test_group_type);
1407 
1408 	return epf_group;
1409 }
1410 
1411 static const struct pci_epf_device_id pci_epf_test_ids[] = {
1412 	{
1413 		.name = "pci_epf_test",
1414 	},
1415 	{},
1416 };
1417 
1418 static int pci_epf_test_probe(struct pci_epf *epf,
1419 			      const struct pci_epf_device_id *id)
1420 {
1421 	struct pci_epf_test *epf_test;
1422 	struct device *dev = &epf->dev;
1423 	enum pci_barno bar;
1424 
1425 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
1426 	if (!epf_test)
1427 		return -ENOMEM;
1428 
1429 	epf->header = &test_header;
1430 	epf_test->epf = epf;
1431 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++)
1432 		epf_test->bar_size[bar] = default_bar_size[bar];
1433 
1434 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1435 
1436 	epf->event_ops = &pci_epf_test_event_ops;
1437 
1438 	epf_set_drvdata(epf, epf_test);
1439 	return 0;
1440 }
1441 
1442 static const struct pci_epf_ops ops = {
1443 	.unbind	= pci_epf_test_unbind,
1444 	.bind	= pci_epf_test_bind,
1445 	.add_cfs = pci_epf_test_add_cfs,
1446 };
1447 
1448 static struct pci_epf_driver test_driver = {
1449 	.driver.name	= "pci_epf_test",
1450 	.probe		= pci_epf_test_probe,
1451 	.id_table	= pci_epf_test_ids,
1452 	.ops		= &ops,
1453 	.owner		= THIS_MODULE,
1454 };
1455 
1456 static int __init pci_epf_test_init(void)
1457 {
1458 	int ret;
1459 
1460 	kpcitest_workqueue = alloc_workqueue("kpcitest",
1461 				    WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU, 0);
1462 	if (!kpcitest_workqueue) {
1463 		pr_err("Failed to allocate the kpcitest work queue\n");
1464 		return -ENOMEM;
1465 	}
1466 
1467 	ret = pci_epf_register_driver(&test_driver);
1468 	if (ret) {
1469 		destroy_workqueue(kpcitest_workqueue);
1470 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1471 		return ret;
1472 	}
1473 
1474 	return 0;
1475 }
1476 module_init(pci_epf_test_init);
1477 
1478 static void __exit pci_epf_test_exit(void)
1479 {
1480 	if (kpcitest_workqueue)
1481 		destroy_workqueue(kpcitest_workqueue);
1482 	pci_epf_unregister_driver(&test_driver);
1483 }
1484 module_exit(pci_epf_test_exit);
1485 
1486 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1487 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1488 MODULE_LICENSE("GPL v2");
1489