xref: /linux/mm/dmapool_test.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1 #include <linux/device.h>
2 #include <linux/dma-map-ops.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/dmapool.h>
5 #include <linux/kernel.h>
6 #include <linux/ktime.h>
7 #include <linux/module.h>
8 
9 #define NR_TESTS (100)
10 
11 struct dma_pool_pair {
12 	dma_addr_t dma;
13 	void *v;
14 };
15 
16 struct dmapool_parms {
17 	size_t size;
18 	size_t align;
19 	size_t boundary;
20 };
21 
22 static const struct dmapool_parms pool_parms[] = {
23 	{ .size = 16, .align = 16, .boundary = 0 },
24 	{ .size = 64, .align = 64, .boundary = 0 },
25 	{ .size = 256, .align = 256, .boundary = 0 },
26 	{ .size = 1024, .align = 1024, .boundary = 0 },
27 	{ .size = 4096, .align = 4096, .boundary = 0 },
28 	{ .size = 68, .align = 32, .boundary = 4096 },
29 };
30 
31 static struct dma_pool *pool;
32 static struct device test_dev;
33 static u64 dma_mask;
34 
nr_blocks(int size)35 static inline int nr_blocks(int size)
36 {
37 	return clamp_t(int, (PAGE_SIZE / size) * 512, 1024, 8192);
38 }
39 
dmapool_test_alloc(struct dma_pool_pair * p,int blocks)40 static int dmapool_test_alloc(struct dma_pool_pair *p, int blocks)
41 {
42 	int i;
43 
44 	for (i = 0; i < blocks; i++) {
45 		p[i].v = dma_pool_alloc(pool, GFP_KERNEL,
46 					&p[i].dma);
47 		if (!p[i].v)
48 			goto pool_fail;
49 	}
50 
51 	for (i = 0; i < blocks; i++)
52 		dma_pool_free(pool, p[i].v, p[i].dma);
53 
54 	return 0;
55 
56 pool_fail:
57 	for (--i; i >= 0; i--)
58 		dma_pool_free(pool, p[i].v, p[i].dma);
59 	return -ENOMEM;
60 }
61 
dmapool_test_block(const struct dmapool_parms * parms)62 static int dmapool_test_block(const struct dmapool_parms *parms)
63 {
64 	int blocks = nr_blocks(parms->size);
65 	ktime_t start_time, end_time;
66 	struct dma_pool_pair *p;
67 	int i, ret;
68 
69 	p = kcalloc(blocks, sizeof(*p), GFP_KERNEL);
70 	if (!p)
71 		return -ENOMEM;
72 
73 	pool = dma_pool_create("test pool", &test_dev, parms->size,
74 			       parms->align, parms->boundary);
75 	if (!pool) {
76 		ret = -ENOMEM;
77 		goto free_pairs;
78 	}
79 
80 	start_time = ktime_get();
81 	for (i = 0; i < NR_TESTS; i++) {
82 		ret = dmapool_test_alloc(p, blocks);
83 		if (ret)
84 			goto free_pool;
85 		if (need_resched())
86 			cond_resched();
87 	}
88 	end_time = ktime_get();
89 
90 	printk("dmapool test: size:%-4zu align:%-4zu blocks:%-4d time:%llu\n",
91 		parms->size, parms->align, blocks,
92 		ktime_us_delta(end_time, start_time));
93 
94 free_pool:
95 	dma_pool_destroy(pool);
96 free_pairs:
97 	kfree(p);
98 	return ret;
99 }
100 
dmapool_test_release(struct device * dev)101 static void dmapool_test_release(struct device *dev)
102 {
103 }
104 
dmapool_checks(void)105 static int dmapool_checks(void)
106 {
107 	int i, ret;
108 
109 	ret = dev_set_name(&test_dev, "dmapool-test");
110 	if (ret)
111 		return ret;
112 
113 	ret = device_register(&test_dev);
114 	if (ret) {
115 		printk("%s: register failed:%d\n", __func__, ret);
116 		goto put_device;
117 	}
118 
119 	test_dev.release = dmapool_test_release;
120 	set_dma_ops(&test_dev, NULL);
121 	test_dev.dma_mask = &dma_mask;
122 	ret = dma_set_mask_and_coherent(&test_dev, DMA_BIT_MASK(64));
123 	if (ret) {
124 		printk("%s: mask failed:%d\n", __func__, ret);
125 		goto del_device;
126 	}
127 
128 	for (i = 0; i < ARRAY_SIZE(pool_parms); i++) {
129 		ret = dmapool_test_block(&pool_parms[i]);
130 		if (ret)
131 			break;
132 	}
133 
134 del_device:
135 	device_del(&test_dev);
136 put_device:
137 	put_device(&test_dev);
138 	return ret;
139 }
140 
dmapool_exit(void)141 static void dmapool_exit(void)
142 {
143 }
144 
145 module_init(dmapool_checks);
146 module_exit(dmapool_exit);
147 MODULE_LICENSE("GPL");
148