xref: /linux/drivers/pci/endpoint/functions/pci-epf-test.c (revision 2f2c7254931f41b5736e3ba12aaa9ac1bbeeeb92)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/msi.h>
15 #include <linux/slab.h>
16 #include <linux/pci_ids.h>
17 #include <linux/random.h>
18 
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
21 #include <linux/pci-ep-msi.h>
22 #include <linux/pci_regs.h>
23 
24 #define IRQ_TYPE_INTX			0
25 #define IRQ_TYPE_MSI			1
26 #define IRQ_TYPE_MSIX			2
27 
28 #define COMMAND_RAISE_INTX_IRQ		BIT(0)
29 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
30 #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
31 #define COMMAND_READ			BIT(3)
32 #define COMMAND_WRITE			BIT(4)
33 #define COMMAND_COPY			BIT(5)
34 #define COMMAND_ENABLE_DOORBELL		BIT(6)
35 #define COMMAND_DISABLE_DOORBELL	BIT(7)
36 
37 #define STATUS_READ_SUCCESS		BIT(0)
38 #define STATUS_READ_FAIL		BIT(1)
39 #define STATUS_WRITE_SUCCESS		BIT(2)
40 #define STATUS_WRITE_FAIL		BIT(3)
41 #define STATUS_COPY_SUCCESS		BIT(4)
42 #define STATUS_COPY_FAIL		BIT(5)
43 #define STATUS_IRQ_RAISED		BIT(6)
44 #define STATUS_SRC_ADDR_INVALID		BIT(7)
45 #define STATUS_DST_ADDR_INVALID		BIT(8)
46 #define STATUS_DOORBELL_SUCCESS		BIT(9)
47 #define STATUS_DOORBELL_ENABLE_SUCCESS	BIT(10)
48 #define STATUS_DOORBELL_ENABLE_FAIL	BIT(11)
49 #define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
50 #define STATUS_DOORBELL_DISABLE_FAIL	BIT(13)
51 
52 #define FLAG_USE_DMA			BIT(0)
53 
54 #define TIMER_RESOLUTION		1
55 
56 #define CAP_UNALIGNED_ACCESS		BIT(0)
57 #define CAP_MSI				BIT(1)
58 #define CAP_MSIX			BIT(2)
59 #define CAP_INTX			BIT(3)
60 
61 static struct workqueue_struct *kpcitest_workqueue;
62 
63 struct pci_epf_test {
64 	void			*reg[PCI_STD_NUM_BARS];
65 	struct pci_epf		*epf;
66 	enum pci_barno		test_reg_bar;
67 	size_t			msix_table_offset;
68 	struct delayed_work	cmd_handler;
69 	struct dma_chan		*dma_chan_tx;
70 	struct dma_chan		*dma_chan_rx;
71 	struct dma_chan		*transfer_chan;
72 	dma_cookie_t		transfer_cookie;
73 	enum dma_status		transfer_status;
74 	struct completion	transfer_complete;
75 	bool			dma_supported;
76 	bool			dma_private;
77 	const struct pci_epc_features *epc_features;
78 	struct pci_epf_bar	db_bar;
79 };
80 
81 struct pci_epf_test_reg {
82 	__le32 magic;
83 	__le32 command;
84 	__le32 status;
85 	__le64 src_addr;
86 	__le64 dst_addr;
87 	__le32 size;
88 	__le32 checksum;
89 	__le32 irq_type;
90 	__le32 irq_number;
91 	__le32 flags;
92 	__le32 caps;
93 	__le32 doorbell_bar;
94 	__le32 doorbell_offset;
95 	__le32 doorbell_data;
96 } __packed;
97 
98 static struct pci_epf_header test_header = {
99 	.vendorid	= PCI_ANY_ID,
100 	.deviceid	= PCI_ANY_ID,
101 	.baseclass_code = PCI_CLASS_OTHERS,
102 	.interrupt_pin	= PCI_INTERRUPT_INTA,
103 };
104 
105 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
106 
pci_epf_test_dma_callback(void * param)107 static void pci_epf_test_dma_callback(void *param)
108 {
109 	struct pci_epf_test *epf_test = param;
110 	struct dma_tx_state state;
111 
112 	epf_test->transfer_status =
113 		dmaengine_tx_status(epf_test->transfer_chan,
114 				    epf_test->transfer_cookie, &state);
115 	if (epf_test->transfer_status == DMA_COMPLETE ||
116 	    epf_test->transfer_status == DMA_ERROR)
117 		complete(&epf_test->transfer_complete);
118 }
119 
120 /**
121  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
122  *				  data between PCIe EP and remote PCIe RC
123  * @epf_test: the EPF test device that performs the data transfer operation
124  * @dma_dst: The destination address of the data transfer. It can be a physical
125  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
126  * @dma_src: The source address of the data transfer. It can be a physical
127  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
128  * @len: The size of the data transfer
129  * @dma_remote: remote RC physical address
130  * @dir: DMA transfer direction
131  *
132  * Function that uses dmaengine API to transfer data between PCIe EP and remote
133  * PCIe RC. The source and destination address can be a physical address given
134  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
135  *
136  * The function returns '0' on success and negative value on failure.
137  */
pci_epf_test_data_transfer(struct pci_epf_test * epf_test,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,dma_addr_t dma_remote,enum dma_transfer_direction dir)138 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
139 				      dma_addr_t dma_dst, dma_addr_t dma_src,
140 				      size_t len, dma_addr_t dma_remote,
141 				      enum dma_transfer_direction dir)
142 {
143 	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
144 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
145 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
146 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
147 	struct pci_epf *epf = epf_test->epf;
148 	struct dma_async_tx_descriptor *tx;
149 	struct dma_slave_config sconf = {};
150 	struct device *dev = &epf->dev;
151 	int ret;
152 
153 	if (IS_ERR_OR_NULL(chan)) {
154 		dev_err(dev, "Invalid DMA memcpy channel\n");
155 		return -EINVAL;
156 	}
157 
158 	if (epf_test->dma_private) {
159 		sconf.direction = dir;
160 		if (dir == DMA_MEM_TO_DEV)
161 			sconf.dst_addr = dma_remote;
162 		else
163 			sconf.src_addr = dma_remote;
164 
165 		if (dmaengine_slave_config(chan, &sconf)) {
166 			dev_err(dev, "DMA slave config fail\n");
167 			return -EIO;
168 		}
169 		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
170 						 flags);
171 	} else {
172 		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
173 					       flags);
174 	}
175 
176 	if (!tx) {
177 		dev_err(dev, "Failed to prepare DMA memcpy\n");
178 		return -EIO;
179 	}
180 
181 	reinit_completion(&epf_test->transfer_complete);
182 	epf_test->transfer_chan = chan;
183 	tx->callback = pci_epf_test_dma_callback;
184 	tx->callback_param = epf_test;
185 	epf_test->transfer_cookie = dmaengine_submit(tx);
186 
187 	ret = dma_submit_error(epf_test->transfer_cookie);
188 	if (ret) {
189 		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
190 		goto terminate;
191 	}
192 
193 	dma_async_issue_pending(chan);
194 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
195 	if (ret < 0) {
196 		dev_err(dev, "DMA wait_for_completion interrupted\n");
197 		goto terminate;
198 	}
199 
200 	if (epf_test->transfer_status == DMA_ERROR) {
201 		dev_err(dev, "DMA transfer failed\n");
202 		ret = -EIO;
203 	}
204 
205 terminate:
206 	dmaengine_terminate_sync(chan);
207 
208 	return ret;
209 }
210 
211 struct epf_dma_filter {
212 	struct device *dev;
213 	u32 dma_mask;
214 };
215 
epf_dma_filter_fn(struct dma_chan * chan,void * node)216 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
217 {
218 	struct epf_dma_filter *filter = node;
219 	struct dma_slave_caps caps;
220 
221 	memset(&caps, 0, sizeof(caps));
222 	dma_get_slave_caps(chan, &caps);
223 
224 	return chan->device->dev == filter->dev
225 		&& (filter->dma_mask & caps.directions);
226 }
227 
228 /**
229  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
230  * @epf_test: the EPF test device that performs data transfer operation
231  *
232  * Function to initialize EPF test DMA channel.
233  */
pci_epf_test_init_dma_chan(struct pci_epf_test * epf_test)234 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
235 {
236 	struct pci_epf *epf = epf_test->epf;
237 	struct device *dev = &epf->dev;
238 	struct epf_dma_filter filter;
239 	struct dma_chan *dma_chan;
240 	dma_cap_mask_t mask;
241 	int ret;
242 
243 	filter.dev = epf->epc->dev.parent;
244 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
245 
246 	dma_cap_zero(mask);
247 	dma_cap_set(DMA_SLAVE, mask);
248 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
249 	if (!dma_chan) {
250 		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
251 		goto fail_back_tx;
252 	}
253 
254 	epf_test->dma_chan_rx = dma_chan;
255 
256 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
257 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
258 
259 	if (!dma_chan) {
260 		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
261 		goto fail_back_rx;
262 	}
263 
264 	epf_test->dma_chan_tx = dma_chan;
265 	epf_test->dma_private = true;
266 
267 	init_completion(&epf_test->transfer_complete);
268 
269 	return 0;
270 
271 fail_back_rx:
272 	dma_release_channel(epf_test->dma_chan_rx);
273 	epf_test->dma_chan_rx = NULL;
274 
275 fail_back_tx:
276 	dma_cap_zero(mask);
277 	dma_cap_set(DMA_MEMCPY, mask);
278 
279 	dma_chan = dma_request_chan_by_mask(&mask);
280 	if (IS_ERR(dma_chan)) {
281 		ret = PTR_ERR(dma_chan);
282 		if (ret != -EPROBE_DEFER)
283 			dev_err(dev, "Failed to get DMA channel\n");
284 		return ret;
285 	}
286 	init_completion(&epf_test->transfer_complete);
287 
288 	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
289 
290 	return 0;
291 }
292 
293 /**
294  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
295  * @epf_test: the EPF test device that performs data transfer operation
296  *
297  * Helper to cleanup EPF test DMA channel.
298  */
pci_epf_test_clean_dma_chan(struct pci_epf_test * epf_test)299 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
300 {
301 	if (!epf_test->dma_supported)
302 		return;
303 
304 	if (epf_test->dma_chan_tx) {
305 		dma_release_channel(epf_test->dma_chan_tx);
306 		if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
307 			epf_test->dma_chan_tx = NULL;
308 			epf_test->dma_chan_rx = NULL;
309 			return;
310 		}
311 		epf_test->dma_chan_tx = NULL;
312 	}
313 
314 	if (epf_test->dma_chan_rx) {
315 		dma_release_channel(epf_test->dma_chan_rx);
316 		epf_test->dma_chan_rx = NULL;
317 	}
318 }
319 
pci_epf_test_print_rate(struct pci_epf_test * epf_test,const char * op,u64 size,struct timespec64 * start,struct timespec64 * end,bool dma)320 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
321 				    const char *op, u64 size,
322 				    struct timespec64 *start,
323 				    struct timespec64 *end, bool dma)
324 {
325 	struct timespec64 ts = timespec64_sub(*end, *start);
326 	u64 rate = 0, ns;
327 
328 	/* calculate the rate */
329 	ns = timespec64_to_ns(&ts);
330 	if (ns)
331 		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
332 
333 	dev_info(&epf_test->epf->dev,
334 		 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
335 		 op, size, dma ? "YES" : "NO",
336 		 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
337 }
338 
pci_epf_test_copy(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)339 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
340 			      struct pci_epf_test_reg *reg)
341 {
342 	int ret = 0;
343 	struct timespec64 start, end;
344 	struct pci_epf *epf = epf_test->epf;
345 	struct pci_epc *epc = epf->epc;
346 	struct device *dev = &epf->dev;
347 	struct pci_epc_map src_map, dst_map;
348 	u64 src_addr = le64_to_cpu(reg->src_addr);
349 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
350 	size_t orig_size, copy_size;
351 	ssize_t map_size = 0;
352 	u32 flags = le32_to_cpu(reg->flags);
353 	u32 status = 0;
354 	void *copy_buf = NULL, *buf;
355 
356 	orig_size = copy_size = le32_to_cpu(reg->size);
357 
358 	if (flags & FLAG_USE_DMA) {
359 		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
360 			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
361 			ret = -EINVAL;
362 			goto set_status;
363 		}
364 	} else {
365 		copy_buf = kzalloc(copy_size, GFP_KERNEL);
366 		if (!copy_buf) {
367 			ret = -ENOMEM;
368 			goto set_status;
369 		}
370 		buf = copy_buf;
371 	}
372 
373 	while (copy_size) {
374 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
375 				      src_addr, copy_size, &src_map);
376 		if (ret) {
377 			dev_err(dev, "Failed to map source address\n");
378 			status = STATUS_SRC_ADDR_INVALID;
379 			goto free_buf;
380 		}
381 
382 		ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
383 					   dst_addr, copy_size, &dst_map);
384 		if (ret) {
385 			dev_err(dev, "Failed to map destination address\n");
386 			status = STATUS_DST_ADDR_INVALID;
387 			pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
388 					  &src_map);
389 			goto free_buf;
390 		}
391 
392 		map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
393 
394 		ktime_get_ts64(&start);
395 		if (flags & FLAG_USE_DMA) {
396 			ret = pci_epf_test_data_transfer(epf_test,
397 					dst_map.phys_addr, src_map.phys_addr,
398 					map_size, 0, DMA_MEM_TO_MEM);
399 			if (ret) {
400 				dev_err(dev, "Data transfer failed\n");
401 				goto unmap;
402 			}
403 		} else {
404 			memcpy_fromio(buf, src_map.virt_addr, map_size);
405 			memcpy_toio(dst_map.virt_addr, buf, map_size);
406 			buf += map_size;
407 		}
408 		ktime_get_ts64(&end);
409 
410 		copy_size -= map_size;
411 		src_addr += map_size;
412 		dst_addr += map_size;
413 
414 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
415 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
416 		map_size = 0;
417 	}
418 
419 	pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
420 				flags & FLAG_USE_DMA);
421 
422 unmap:
423 	if (map_size) {
424 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
425 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
426 	}
427 
428 free_buf:
429 	kfree(copy_buf);
430 
431 set_status:
432 	if (!ret)
433 		status |= STATUS_COPY_SUCCESS;
434 	else
435 		status |= STATUS_COPY_FAIL;
436 	reg->status = cpu_to_le32(status);
437 }
438 
pci_epf_test_read(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)439 static void pci_epf_test_read(struct pci_epf_test *epf_test,
440 			      struct pci_epf_test_reg *reg)
441 {
442 	int ret = 0;
443 	void *src_buf, *buf;
444 	u32 crc32;
445 	struct pci_epc_map map;
446 	phys_addr_t dst_phys_addr;
447 	struct timespec64 start, end;
448 	struct pci_epf *epf = epf_test->epf;
449 	struct pci_epc *epc = epf->epc;
450 	struct device *dev = &epf->dev;
451 	struct device *dma_dev = epf->epc->dev.parent;
452 	u64 src_addr = le64_to_cpu(reg->src_addr);
453 	size_t orig_size, src_size;
454 	ssize_t map_size = 0;
455 	u32 flags = le32_to_cpu(reg->flags);
456 	u32 checksum = le32_to_cpu(reg->checksum);
457 	u32 status = 0;
458 
459 	orig_size = src_size = le32_to_cpu(reg->size);
460 
461 	src_buf = kzalloc(src_size, GFP_KERNEL);
462 	if (!src_buf) {
463 		ret = -ENOMEM;
464 		goto set_status;
465 	}
466 	buf = src_buf;
467 
468 	while (src_size) {
469 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
470 					   src_addr, src_size, &map);
471 		if (ret) {
472 			dev_err(dev, "Failed to map address\n");
473 			status = STATUS_SRC_ADDR_INVALID;
474 			goto free_buf;
475 		}
476 
477 		map_size = map.pci_size;
478 		if (flags & FLAG_USE_DMA) {
479 			dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
480 						       DMA_FROM_DEVICE);
481 			if (dma_mapping_error(dma_dev, dst_phys_addr)) {
482 				dev_err(dev,
483 					"Failed to map destination buffer addr\n");
484 				ret = -ENOMEM;
485 				goto unmap;
486 			}
487 
488 			ktime_get_ts64(&start);
489 			ret = pci_epf_test_data_transfer(epf_test,
490 					dst_phys_addr, map.phys_addr,
491 					map_size, src_addr, DMA_DEV_TO_MEM);
492 			if (ret)
493 				dev_err(dev, "Data transfer failed\n");
494 			ktime_get_ts64(&end);
495 
496 			dma_unmap_single(dma_dev, dst_phys_addr, map_size,
497 					 DMA_FROM_DEVICE);
498 
499 			if (ret)
500 				goto unmap;
501 		} else {
502 			ktime_get_ts64(&start);
503 			memcpy_fromio(buf, map.virt_addr, map_size);
504 			ktime_get_ts64(&end);
505 		}
506 
507 		src_size -= map_size;
508 		src_addr += map_size;
509 		buf += map_size;
510 
511 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
512 		map_size = 0;
513 	}
514 
515 	pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
516 				flags & FLAG_USE_DMA);
517 
518 	crc32 = crc32_le(~0, src_buf, orig_size);
519 	if (crc32 != checksum)
520 		ret = -EIO;
521 
522 unmap:
523 	if (map_size)
524 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
525 
526 free_buf:
527 	kfree(src_buf);
528 
529 set_status:
530 	if (!ret)
531 		status |= STATUS_READ_SUCCESS;
532 	else
533 		status |= STATUS_READ_FAIL;
534 	reg->status = cpu_to_le32(status);
535 }
536 
pci_epf_test_write(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)537 static void pci_epf_test_write(struct pci_epf_test *epf_test,
538 			       struct pci_epf_test_reg *reg)
539 {
540 	int ret = 0;
541 	void *dst_buf, *buf;
542 	struct pci_epc_map map;
543 	phys_addr_t src_phys_addr;
544 	struct timespec64 start, end;
545 	struct pci_epf *epf = epf_test->epf;
546 	struct pci_epc *epc = epf->epc;
547 	struct device *dev = &epf->dev;
548 	struct device *dma_dev = epf->epc->dev.parent;
549 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
550 	size_t orig_size, dst_size;
551 	ssize_t map_size = 0;
552 	u32 flags = le32_to_cpu(reg->flags);
553 	u32 status = 0;
554 
555 	orig_size = dst_size = le32_to_cpu(reg->size);
556 
557 	dst_buf = kzalloc(dst_size, GFP_KERNEL);
558 	if (!dst_buf) {
559 		ret = -ENOMEM;
560 		goto set_status;
561 	}
562 	get_random_bytes(dst_buf, dst_size);
563 	reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
564 	buf = dst_buf;
565 
566 	while (dst_size) {
567 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
568 					   dst_addr, dst_size, &map);
569 		if (ret) {
570 			dev_err(dev, "Failed to map address\n");
571 			status = STATUS_DST_ADDR_INVALID;
572 			goto free_buf;
573 		}
574 
575 		map_size = map.pci_size;
576 		if (flags & FLAG_USE_DMA) {
577 			src_phys_addr = dma_map_single(dma_dev, buf, map_size,
578 						       DMA_TO_DEVICE);
579 			if (dma_mapping_error(dma_dev, src_phys_addr)) {
580 				dev_err(dev,
581 					"Failed to map source buffer addr\n");
582 				ret = -ENOMEM;
583 				goto unmap;
584 			}
585 
586 			ktime_get_ts64(&start);
587 
588 			ret = pci_epf_test_data_transfer(epf_test,
589 						map.phys_addr, src_phys_addr,
590 						map_size, dst_addr,
591 						DMA_MEM_TO_DEV);
592 			if (ret)
593 				dev_err(dev, "Data transfer failed\n");
594 			ktime_get_ts64(&end);
595 
596 			dma_unmap_single(dma_dev, src_phys_addr, map_size,
597 					 DMA_TO_DEVICE);
598 
599 			if (ret)
600 				goto unmap;
601 		} else {
602 			ktime_get_ts64(&start);
603 			memcpy_toio(map.virt_addr, buf, map_size);
604 			ktime_get_ts64(&end);
605 		}
606 
607 		dst_size -= map_size;
608 		dst_addr += map_size;
609 		buf += map_size;
610 
611 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
612 		map_size = 0;
613 	}
614 
615 	pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
616 				flags & FLAG_USE_DMA);
617 
618 	/*
619 	 * wait 1ms inorder for the write to complete. Without this delay L3
620 	 * error in observed in the host system.
621 	 */
622 	usleep_range(1000, 2000);
623 
624 unmap:
625 	if (map_size)
626 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
627 
628 free_buf:
629 	kfree(dst_buf);
630 
631 set_status:
632 	if (!ret)
633 		status |= STATUS_WRITE_SUCCESS;
634 	else
635 		status |= STATUS_WRITE_FAIL;
636 	reg->status = cpu_to_le32(status);
637 }
638 
pci_epf_test_raise_irq(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)639 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
640 				   struct pci_epf_test_reg *reg)
641 {
642 	struct pci_epf *epf = epf_test->epf;
643 	struct device *dev = &epf->dev;
644 	struct pci_epc *epc = epf->epc;
645 	u32 status = le32_to_cpu(reg->status);
646 	u32 irq_number = le32_to_cpu(reg->irq_number);
647 	u32 irq_type = le32_to_cpu(reg->irq_type);
648 	int count;
649 
650 	/*
651 	 * Set the status before raising the IRQ to ensure that the host sees
652 	 * the updated value when it gets the IRQ.
653 	 */
654 	status |= STATUS_IRQ_RAISED;
655 	WRITE_ONCE(reg->status, cpu_to_le32(status));
656 
657 	switch (irq_type) {
658 	case IRQ_TYPE_INTX:
659 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
660 				  PCI_IRQ_INTX, 0);
661 		break;
662 	case IRQ_TYPE_MSI:
663 		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
664 		if (irq_number > count || count <= 0) {
665 			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
666 				irq_number, count);
667 			return;
668 		}
669 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
670 				  PCI_IRQ_MSI, irq_number);
671 		break;
672 	case IRQ_TYPE_MSIX:
673 		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
674 		if (irq_number > count || count <= 0) {
675 			dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
676 				irq_number, count);
677 			return;
678 		}
679 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
680 				  PCI_IRQ_MSIX, irq_number);
681 		break;
682 	default:
683 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
684 		break;
685 	}
686 }
687 
pci_epf_test_doorbell_handler(int irq,void * data)688 static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data)
689 {
690 	struct pci_epf_test *epf_test = data;
691 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
692 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
693 	u32 status = le32_to_cpu(reg->status);
694 
695 	status |= STATUS_DOORBELL_SUCCESS;
696 	reg->status = cpu_to_le32(status);
697 	pci_epf_test_raise_irq(epf_test, reg);
698 
699 	return IRQ_HANDLED;
700 }
701 
pci_epf_test_doorbell_cleanup(struct pci_epf_test * epf_test)702 static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
703 {
704 	struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
705 	struct pci_epf *epf = epf_test->epf;
706 
707 	free_irq(epf->db_msg[0].virq, epf_test);
708 	reg->doorbell_bar = cpu_to_le32(NO_BAR);
709 
710 	pci_epf_free_doorbell(epf);
711 }
712 
pci_epf_test_enable_doorbell(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)713 static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
714 					 struct pci_epf_test_reg *reg)
715 {
716 	u32 status = le32_to_cpu(reg->status);
717 	struct pci_epf *epf = epf_test->epf;
718 	struct pci_epc *epc = epf->epc;
719 	struct msi_msg *msg;
720 	enum pci_barno bar;
721 	size_t offset;
722 	int ret;
723 
724 	ret = pci_epf_alloc_doorbell(epf, 1);
725 	if (ret)
726 		goto set_status_err;
727 
728 	msg = &epf->db_msg[0].msg;
729 	bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
730 	if (bar < BAR_0)
731 		goto err_doorbell_cleanup;
732 
733 	ret = request_irq(epf->db_msg[0].virq, pci_epf_test_doorbell_handler, 0,
734 			  "pci-ep-test-doorbell", epf_test);
735 	if (ret) {
736 		dev_err(&epf->dev,
737 			"Failed to request doorbell IRQ: %d\n",
738 			epf->db_msg[0].virq);
739 		goto err_doorbell_cleanup;
740 	}
741 
742 	reg->doorbell_data = cpu_to_le32(msg->data);
743 	reg->doorbell_bar = cpu_to_le32(bar);
744 
745 	msg = &epf->db_msg[0].msg;
746 	ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
747 					 &epf_test->db_bar.phys_addr, &offset);
748 
749 	if (ret)
750 		goto err_doorbell_cleanup;
751 
752 	reg->doorbell_offset = cpu_to_le32(offset);
753 
754 	epf_test->db_bar.barno = bar;
755 	epf_test->db_bar.size = epf->bar[bar].size;
756 	epf_test->db_bar.flags = epf->bar[bar].flags;
757 
758 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
759 	if (ret)
760 		goto err_doorbell_cleanup;
761 
762 	status |= STATUS_DOORBELL_ENABLE_SUCCESS;
763 	reg->status = cpu_to_le32(status);
764 	return;
765 
766 err_doorbell_cleanup:
767 	pci_epf_test_doorbell_cleanup(epf_test);
768 set_status_err:
769 	status |= STATUS_DOORBELL_ENABLE_FAIL;
770 	reg->status = cpu_to_le32(status);
771 }
772 
pci_epf_test_disable_doorbell(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)773 static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
774 					  struct pci_epf_test_reg *reg)
775 {
776 	enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
777 	u32 status = le32_to_cpu(reg->status);
778 	struct pci_epf *epf = epf_test->epf;
779 	struct pci_epc *epc = epf->epc;
780 	int ret;
781 
782 	if (bar < BAR_0)
783 		goto set_status_err;
784 
785 	pci_epf_test_doorbell_cleanup(epf_test);
786 
787 	/*
788 	 * The doorbell feature temporarily overrides the inbound translation
789 	 * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
790 	 * it calls set_bar() twice without ever calling clear_bar(), as
791 	 * calling clear_bar() would clear the BAR's PCI address assigned by
792 	 * the host. Thus, when disabling the doorbell, restore the inbound
793 	 * translation to point to the memory allocated for the BAR.
794 	 */
795 	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
796 	if (ret)
797 		goto set_status_err;
798 
799 	status |= STATUS_DOORBELL_DISABLE_SUCCESS;
800 	reg->status = cpu_to_le32(status);
801 
802 	return;
803 
804 set_status_err:
805 	status |= STATUS_DOORBELL_DISABLE_FAIL;
806 	reg->status = cpu_to_le32(status);
807 }
808 
pci_epf_test_cmd_handler(struct work_struct * work)809 static void pci_epf_test_cmd_handler(struct work_struct *work)
810 {
811 	u32 command;
812 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
813 						     cmd_handler.work);
814 	struct pci_epf *epf = epf_test->epf;
815 	struct device *dev = &epf->dev;
816 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
817 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
818 	u32 irq_type = le32_to_cpu(reg->irq_type);
819 
820 	command = le32_to_cpu(READ_ONCE(reg->command));
821 	if (!command)
822 		goto reset_handler;
823 
824 	WRITE_ONCE(reg->command, 0);
825 	WRITE_ONCE(reg->status, 0);
826 
827 	if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
828 	    !epf_test->dma_supported) {
829 		dev_err(dev, "Cannot transfer data using DMA\n");
830 		goto reset_handler;
831 	}
832 
833 	if (irq_type > IRQ_TYPE_MSIX) {
834 		dev_err(dev, "Failed to detect IRQ type\n");
835 		goto reset_handler;
836 	}
837 
838 	switch (command) {
839 	case COMMAND_RAISE_INTX_IRQ:
840 	case COMMAND_RAISE_MSI_IRQ:
841 	case COMMAND_RAISE_MSIX_IRQ:
842 		pci_epf_test_raise_irq(epf_test, reg);
843 		break;
844 	case COMMAND_WRITE:
845 		pci_epf_test_write(epf_test, reg);
846 		pci_epf_test_raise_irq(epf_test, reg);
847 		break;
848 	case COMMAND_READ:
849 		pci_epf_test_read(epf_test, reg);
850 		pci_epf_test_raise_irq(epf_test, reg);
851 		break;
852 	case COMMAND_COPY:
853 		pci_epf_test_copy(epf_test, reg);
854 		pci_epf_test_raise_irq(epf_test, reg);
855 		break;
856 	case COMMAND_ENABLE_DOORBELL:
857 		pci_epf_test_enable_doorbell(epf_test, reg);
858 		pci_epf_test_raise_irq(epf_test, reg);
859 		break;
860 	case COMMAND_DISABLE_DOORBELL:
861 		pci_epf_test_disable_doorbell(epf_test, reg);
862 		pci_epf_test_raise_irq(epf_test, reg);
863 		break;
864 	default:
865 		dev_err(dev, "Invalid command 0x%x\n", command);
866 		break;
867 	}
868 
869 reset_handler:
870 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
871 			   msecs_to_jiffies(1));
872 }
873 
pci_epf_test_set_bar(struct pci_epf * epf)874 static int pci_epf_test_set_bar(struct pci_epf *epf)
875 {
876 	int bar, ret;
877 	struct pci_epc *epc = epf->epc;
878 	struct device *dev = &epf->dev;
879 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
880 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
881 
882 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
883 		if (!epf_test->reg[bar])
884 			continue;
885 
886 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
887 				      &epf->bar[bar]);
888 		if (ret) {
889 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
890 					   PRIMARY_INTERFACE);
891 			epf_test->reg[bar] = NULL;
892 			dev_err(dev, "Failed to set BAR%d\n", bar);
893 			if (bar == test_reg_bar)
894 				return ret;
895 		}
896 	}
897 
898 	return 0;
899 }
900 
pci_epf_test_clear_bar(struct pci_epf * epf)901 static void pci_epf_test_clear_bar(struct pci_epf *epf)
902 {
903 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
904 	struct pci_epc *epc = epf->epc;
905 	int bar;
906 
907 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
908 		if (!epf_test->reg[bar])
909 			continue;
910 
911 		pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
912 				  &epf->bar[bar]);
913 	}
914 }
915 
pci_epf_test_set_capabilities(struct pci_epf * epf)916 static void pci_epf_test_set_capabilities(struct pci_epf *epf)
917 {
918 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
919 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
920 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
921 	struct pci_epc *epc = epf->epc;
922 	u32 caps = 0;
923 
924 	if (epc->ops->align_addr)
925 		caps |= CAP_UNALIGNED_ACCESS;
926 
927 	if (epf_test->epc_features->msi_capable)
928 		caps |= CAP_MSI;
929 
930 	if (epf_test->epc_features->msix_capable)
931 		caps |= CAP_MSIX;
932 
933 	if (epf_test->epc_features->intx_capable)
934 		caps |= CAP_INTX;
935 
936 	reg->caps = cpu_to_le32(caps);
937 }
938 
pci_epf_test_epc_init(struct pci_epf * epf)939 static int pci_epf_test_epc_init(struct pci_epf *epf)
940 {
941 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
942 	struct pci_epf_header *header = epf->header;
943 	const struct pci_epc_features *epc_features = epf_test->epc_features;
944 	struct pci_epc *epc = epf->epc;
945 	struct device *dev = &epf->dev;
946 	bool linkup_notifier = false;
947 	int ret;
948 
949 	epf_test->dma_supported = true;
950 
951 	ret = pci_epf_test_init_dma_chan(epf_test);
952 	if (ret)
953 		epf_test->dma_supported = false;
954 
955 	if (epf->vfunc_no <= 1) {
956 		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
957 		if (ret) {
958 			dev_err(dev, "Configuration header write failed\n");
959 			return ret;
960 		}
961 	}
962 
963 	pci_epf_test_set_capabilities(epf);
964 
965 	ret = pci_epf_test_set_bar(epf);
966 	if (ret)
967 		return ret;
968 
969 	if (epc_features->msi_capable) {
970 		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
971 				      epf->msi_interrupts);
972 		if (ret) {
973 			dev_err(dev, "MSI configuration failed\n");
974 			return ret;
975 		}
976 	}
977 
978 	if (epc_features->msix_capable) {
979 		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
980 				       epf->msix_interrupts,
981 				       epf_test->test_reg_bar,
982 				       epf_test->msix_table_offset);
983 		if (ret) {
984 			dev_err(dev, "MSI-X configuration failed\n");
985 			return ret;
986 		}
987 	}
988 
989 	linkup_notifier = epc_features->linkup_notifier;
990 	if (!linkup_notifier)
991 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
992 
993 	return 0;
994 }
995 
pci_epf_test_epc_deinit(struct pci_epf * epf)996 static void pci_epf_test_epc_deinit(struct pci_epf *epf)
997 {
998 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
999 
1000 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1001 	pci_epf_test_clean_dma_chan(epf_test);
1002 	pci_epf_test_clear_bar(epf);
1003 }
1004 
pci_epf_test_link_up(struct pci_epf * epf)1005 static int pci_epf_test_link_up(struct pci_epf *epf)
1006 {
1007 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1008 
1009 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
1010 			   msecs_to_jiffies(1));
1011 
1012 	return 0;
1013 }
1014 
pci_epf_test_link_down(struct pci_epf * epf)1015 static int pci_epf_test_link_down(struct pci_epf *epf)
1016 {
1017 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1018 
1019 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1020 
1021 	return 0;
1022 }
1023 
1024 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
1025 	.epc_init = pci_epf_test_epc_init,
1026 	.epc_deinit = pci_epf_test_epc_deinit,
1027 	.link_up = pci_epf_test_link_up,
1028 	.link_down = pci_epf_test_link_down,
1029 };
1030 
pci_epf_test_alloc_space(struct pci_epf * epf)1031 static int pci_epf_test_alloc_space(struct pci_epf *epf)
1032 {
1033 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1034 	struct device *dev = &epf->dev;
1035 	size_t msix_table_size = 0;
1036 	size_t test_reg_bar_size;
1037 	size_t pba_size = 0;
1038 	void *base;
1039 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
1040 	enum pci_barno bar;
1041 	const struct pci_epc_features *epc_features = epf_test->epc_features;
1042 	size_t test_reg_size;
1043 
1044 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
1045 
1046 	if (epc_features->msix_capable) {
1047 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
1048 		epf_test->msix_table_offset = test_reg_bar_size;
1049 		/* Align to QWORD or 8 Bytes */
1050 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
1051 	}
1052 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
1053 
1054 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
1055 				   epc_features, PRIMARY_INTERFACE);
1056 	if (!base) {
1057 		dev_err(dev, "Failed to allocated register space\n");
1058 		return -ENOMEM;
1059 	}
1060 	epf_test->reg[test_reg_bar] = base;
1061 
1062 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
1063 		bar = pci_epc_get_next_free_bar(epc_features, bar);
1064 		if (bar == NO_BAR)
1065 			break;
1066 
1067 		if (bar == test_reg_bar)
1068 			continue;
1069 
1070 		if (epc_features->bar[bar].type == BAR_FIXED)
1071 			test_reg_size = epc_features->bar[bar].fixed_size;
1072 		else
1073 			test_reg_size = bar_size[bar];
1074 
1075 		base = pci_epf_alloc_space(epf, test_reg_size, bar,
1076 					   epc_features, PRIMARY_INTERFACE);
1077 		if (!base)
1078 			dev_err(dev, "Failed to allocate space for BAR%d\n",
1079 				bar);
1080 		epf_test->reg[bar] = base;
1081 	}
1082 
1083 	return 0;
1084 }
1085 
pci_epf_test_free_space(struct pci_epf * epf)1086 static void pci_epf_test_free_space(struct pci_epf *epf)
1087 {
1088 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1089 	int bar;
1090 
1091 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1092 		if (!epf_test->reg[bar])
1093 			continue;
1094 
1095 		pci_epf_free_space(epf, epf_test->reg[bar], bar,
1096 				   PRIMARY_INTERFACE);
1097 		epf_test->reg[bar] = NULL;
1098 	}
1099 }
1100 
pci_epf_test_bind(struct pci_epf * epf)1101 static int pci_epf_test_bind(struct pci_epf *epf)
1102 {
1103 	int ret;
1104 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1105 	const struct pci_epc_features *epc_features;
1106 	enum pci_barno test_reg_bar = BAR_0;
1107 	struct pci_epc *epc = epf->epc;
1108 
1109 	if (WARN_ON_ONCE(!epc))
1110 		return -EINVAL;
1111 
1112 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
1113 	if (!epc_features) {
1114 		dev_err(&epf->dev, "epc_features not implemented\n");
1115 		return -EOPNOTSUPP;
1116 	}
1117 
1118 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
1119 	if (test_reg_bar < 0)
1120 		return -EINVAL;
1121 
1122 	epf_test->test_reg_bar = test_reg_bar;
1123 	epf_test->epc_features = epc_features;
1124 
1125 	ret = pci_epf_test_alloc_space(epf);
1126 	if (ret)
1127 		return ret;
1128 
1129 	return 0;
1130 }
1131 
pci_epf_test_unbind(struct pci_epf * epf)1132 static void pci_epf_test_unbind(struct pci_epf *epf)
1133 {
1134 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
1135 	struct pci_epc *epc = epf->epc;
1136 
1137 	cancel_delayed_work_sync(&epf_test->cmd_handler);
1138 	if (epc->init_complete) {
1139 		pci_epf_test_clean_dma_chan(epf_test);
1140 		pci_epf_test_clear_bar(epf);
1141 	}
1142 	pci_epf_test_free_space(epf);
1143 }
1144 
1145 static const struct pci_epf_device_id pci_epf_test_ids[] = {
1146 	{
1147 		.name = "pci_epf_test",
1148 	},
1149 	{},
1150 };
1151 
pci_epf_test_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)1152 static int pci_epf_test_probe(struct pci_epf *epf,
1153 			      const struct pci_epf_device_id *id)
1154 {
1155 	struct pci_epf_test *epf_test;
1156 	struct device *dev = &epf->dev;
1157 
1158 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
1159 	if (!epf_test)
1160 		return -ENOMEM;
1161 
1162 	epf->header = &test_header;
1163 	epf_test->epf = epf;
1164 
1165 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1166 
1167 	epf->event_ops = &pci_epf_test_event_ops;
1168 
1169 	epf_set_drvdata(epf, epf_test);
1170 	return 0;
1171 }
1172 
1173 static const struct pci_epf_ops ops = {
1174 	.unbind	= pci_epf_test_unbind,
1175 	.bind	= pci_epf_test_bind,
1176 };
1177 
1178 static struct pci_epf_driver test_driver = {
1179 	.driver.name	= "pci_epf_test",
1180 	.probe		= pci_epf_test_probe,
1181 	.id_table	= pci_epf_test_ids,
1182 	.ops		= &ops,
1183 	.owner		= THIS_MODULE,
1184 };
1185 
pci_epf_test_init(void)1186 static int __init pci_epf_test_init(void)
1187 {
1188 	int ret;
1189 
1190 	kpcitest_workqueue = alloc_workqueue("kpcitest",
1191 					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1192 	if (!kpcitest_workqueue) {
1193 		pr_err("Failed to allocate the kpcitest work queue\n");
1194 		return -ENOMEM;
1195 	}
1196 
1197 	ret = pci_epf_register_driver(&test_driver);
1198 	if (ret) {
1199 		destroy_workqueue(kpcitest_workqueue);
1200 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1201 		return ret;
1202 	}
1203 
1204 	return 0;
1205 }
1206 module_init(pci_epf_test_init);
1207 
pci_epf_test_exit(void)1208 static void __exit pci_epf_test_exit(void)
1209 {
1210 	if (kpcitest_workqueue)
1211 		destroy_workqueue(kpcitest_workqueue);
1212 	pci_epf_unregister_driver(&test_driver);
1213 }
1214 module_exit(pci_epf_test_exit);
1215 
1216 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1217 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1218 MODULE_LICENSE("GPL v2");
1219