xref: /linux/drivers/soc/fsl/qe/qe_common.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Common CPM code
4  *
5  * Author: Scott Wood <scottwood@freescale.com>
6  *
7  * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
8  *
9  * Some parts derived from commproc.c/cpm2_common.c, which is:
10  * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
11  * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
12  * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
13  * 2006 (c) MontaVista Software, Inc.
14  * Vitaly Bordug <vbordug@ru.mvista.com>
15  */
16 #include <linux/device.h>
17 #include <linux/genalloc.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/export.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/slab.h>
25 #include <linux/io.h>
26 #include <soc/fsl/qe/qe.h>
27 
28 static struct gen_pool *muram_pool;
29 static DEFINE_SPINLOCK(cpm_muram_lock);
30 static void __iomem *muram_vbase;
31 static phys_addr_t muram_pbase;
32 
33 struct muram_block {
34 	struct list_head head;
35 	s32 start;
36 	int size;
37 };
38 
39 static LIST_HEAD(muram_block_list);
40 
41 /* max address size we deal with */
42 #define OF_MAX_ADDR_CELLS	4
43 #define GENPOOL_OFFSET		(4096 * 8)
44 
45 int cpm_muram_init(void)
46 {
47 	struct device_node *np;
48 	struct resource r;
49 	__be32 zero[OF_MAX_ADDR_CELLS] = {};
50 	resource_size_t max = 0;
51 	int i = 0;
52 	int ret = 0;
53 
54 	if (muram_pbase)
55 		return 0;
56 
57 	np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
58 	if (!np) {
59 		/* try legacy bindings */
60 		np = of_find_node_by_name(NULL, "data-only");
61 		if (!np) {
62 			pr_err("Cannot find CPM muram data node");
63 			ret = -ENODEV;
64 			goto out_muram;
65 		}
66 	}
67 
68 	muram_pool = gen_pool_create(0, -1);
69 	if (!muram_pool) {
70 		pr_err("Cannot allocate memory pool for CPM/QE muram");
71 		ret = -ENOMEM;
72 		goto out_muram;
73 	}
74 	muram_pbase = of_translate_address(np, zero);
75 	if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
76 		pr_err("Cannot translate zero through CPM muram node");
77 		ret = -ENODEV;
78 		goto out_pool;
79 	}
80 
81 	while (of_address_to_resource(np, i++, &r) == 0) {
82 		if (r.end > max)
83 			max = r.end;
84 		ret = gen_pool_add(muram_pool, r.start - muram_pbase +
85 				   GENPOOL_OFFSET, resource_size(&r), -1);
86 		if (ret) {
87 			pr_err("QE: couldn't add muram to pool!\n");
88 			goto out_pool;
89 		}
90 	}
91 
92 	muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
93 	if (!muram_vbase) {
94 		pr_err("Cannot map QE muram");
95 		ret = -ENOMEM;
96 		goto out_pool;
97 	}
98 	goto out_muram;
99 out_pool:
100 	gen_pool_destroy(muram_pool);
101 out_muram:
102 	of_node_put(np);
103 	return ret;
104 }
105 
106 /*
107  * cpm_muram_alloc_common - cpm_muram_alloc common code
108  * @size: number of bytes to allocate
109  * @algo: algorithm for alloc.
110  * @data: data for genalloc's algorithm.
111  *
112  * This function returns a non-negative offset into the muram area, or
113  * a negative errno on failure.
114  */
115 static s32 cpm_muram_alloc_common(unsigned long size,
116 				  genpool_algo_t algo, void *data)
117 {
118 	struct muram_block *entry;
119 	s32 start;
120 
121 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
122 	if (!entry)
123 		return -ENOMEM;
124 	start = gen_pool_alloc_algo(muram_pool, size, algo, data);
125 	if (!start) {
126 		kfree(entry);
127 		return -ENOMEM;
128 	}
129 	start = start - GENPOOL_OFFSET;
130 	memset_io(cpm_muram_addr(start), 0, size);
131 	entry->start = start;
132 	entry->size = size;
133 	list_add(&entry->head, &muram_block_list);
134 
135 	return start;
136 }
137 
138 /*
139  * cpm_muram_alloc - allocate the requested size worth of multi-user ram
140  * @size: number of bytes to allocate
141  * @align: requested alignment, in bytes
142  *
143  * This function returns a non-negative offset into the muram area, or
144  * a negative errno on failure.
145  * Use cpm_muram_addr() to get the virtual address of the area.
146  * Use cpm_muram_free() to free the allocation.
147  */
148 s32 cpm_muram_alloc(unsigned long size, unsigned long align)
149 {
150 	s32 start;
151 	unsigned long flags;
152 	struct genpool_data_align muram_pool_data;
153 
154 	spin_lock_irqsave(&cpm_muram_lock, flags);
155 	muram_pool_data.align = align;
156 	start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
157 				       &muram_pool_data);
158 	spin_unlock_irqrestore(&cpm_muram_lock, flags);
159 	return start;
160 }
161 EXPORT_SYMBOL(cpm_muram_alloc);
162 
163 /**
164  * cpm_muram_free - free a chunk of multi-user ram
165  * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
166  */
167 void cpm_muram_free(s32 offset)
168 {
169 	unsigned long flags;
170 	int size;
171 	struct muram_block *tmp;
172 
173 	if (offset < 0)
174 		return;
175 
176 	size = 0;
177 	spin_lock_irqsave(&cpm_muram_lock, flags);
178 	list_for_each_entry(tmp, &muram_block_list, head) {
179 		if (tmp->start == offset) {
180 			size = tmp->size;
181 			list_del(&tmp->head);
182 			kfree(tmp);
183 			break;
184 		}
185 	}
186 	gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
187 	spin_unlock_irqrestore(&cpm_muram_lock, flags);
188 }
189 EXPORT_SYMBOL(cpm_muram_free);
190 
191 static void devm_cpm_muram_release(struct device *dev, void *res)
192 {
193 	s32 *info = res;
194 
195 	cpm_muram_free(*info);
196 }
197 
198 /**
199  * devm_cpm_muram_alloc - Resource-managed cpm_muram_alloc
200  * @dev: Device to allocate memory for
201  * @size: number of bytes to allocate
202  * @align: requested alignment, in bytes
203  *
204  * This function returns a non-negative offset into the muram area, or
205  * a negative errno on failure as cpm_muram_alloc() does.
206  * Use cpm_muram_addr() to get the virtual address of the area.
207  *
208  * Compare against cpm_muram_alloc(), the memory allocated by this
209  * resource-managed version is automatically freed on driver detach and so,
210  * cpm_muram_free() must not be called to release the allocated memory.
211  */
212 s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
213 			 unsigned long align)
214 {
215 	s32 info;
216 	s32 *dr;
217 
218 	dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL);
219 	if (!dr)
220 		return -ENOMEM;
221 
222 	info = cpm_muram_alloc(size, align);
223 	if (info >= 0) {
224 		*dr = info;
225 		devres_add(dev, dr);
226 	} else {
227 		devres_free(dr);
228 	}
229 
230 	return info;
231 }
232 EXPORT_SYMBOL(devm_cpm_muram_alloc);
233 
234 /*
235  * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
236  * @offset: offset of allocation start address
237  * @size: number of bytes to allocate
238  * This function returns @offset if the area was available, a negative
239  * errno otherwise.
240  * Use cpm_muram_addr() to get the virtual address of the area.
241  * Use cpm_muram_free() to free the allocation.
242  */
243 s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
244 {
245 	s32 start;
246 	unsigned long flags;
247 	struct genpool_data_fixed muram_pool_data_fixed;
248 
249 	spin_lock_irqsave(&cpm_muram_lock, flags);
250 	muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
251 	start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
252 				       &muram_pool_data_fixed);
253 	spin_unlock_irqrestore(&cpm_muram_lock, flags);
254 	return start;
255 }
256 EXPORT_SYMBOL(cpm_muram_alloc_fixed);
257 
258 /**
259  * devm_cpm_muram_alloc_fixed - Resource-managed cpm_muram_alloc_fixed
260  * @dev: Device to allocate memory for
261  * @offset: offset of allocation start address
262  * @size: number of bytes to allocate
263  *
264  * This function returns a non-negative offset into the muram area, or
265  * a negative errno on failure as cpm_muram_alloc_fixed() does.
266  * Use cpm_muram_addr() to get the virtual address of the area.
267  *
268  * Compare against cpm_muram_alloc_fixed(), the memory allocated by this
269  * resource-managed version is automatically freed on driver detach and so,
270  * cpm_muram_free() must not be called to release the allocated memory.
271  */
272 s32 devm_cpm_muram_alloc_fixed(struct device *dev, unsigned long offset,
273 			       unsigned long size)
274 {
275 	s32 info;
276 	s32 *dr;
277 
278 	dr = devres_alloc(devm_cpm_muram_release, sizeof(*dr), GFP_KERNEL);
279 	if (!dr)
280 		return -ENOMEM;
281 
282 	info = cpm_muram_alloc_fixed(offset, size);
283 	if (info >= 0) {
284 		*dr = info;
285 		devres_add(dev, dr);
286 	} else {
287 		devres_free(dr);
288 	}
289 
290 	return info;
291 }
292 EXPORT_SYMBOL(devm_cpm_muram_alloc_fixed);
293 
294 /**
295  * cpm_muram_addr - turn a muram offset into a virtual address
296  * @offset: muram offset to convert
297  */
298 void __iomem *cpm_muram_addr(unsigned long offset)
299 {
300 	return muram_vbase + offset;
301 }
302 EXPORT_SYMBOL(cpm_muram_addr);
303 
304 unsigned long cpm_muram_offset(const void __iomem *addr)
305 {
306 	return addr - muram_vbase;
307 }
308 EXPORT_SYMBOL(cpm_muram_offset);
309 
310 /**
311  * cpm_muram_dma - turn a muram virtual address into a DMA address
312  * @addr: virtual address from cpm_muram_addr() to convert
313  */
314 dma_addr_t cpm_muram_dma(void __iomem *addr)
315 {
316 	return muram_pbase + (addr - muram_vbase);
317 }
318 EXPORT_SYMBOL(cpm_muram_dma);
319 
320 /*
321  * As cpm_muram_free, but takes the virtual address rather than the
322  * muram offset.
323  */
324 void cpm_muram_free_addr(const void __iomem *addr)
325 {
326 	if (!addr)
327 		return;
328 	cpm_muram_free(cpm_muram_offset(addr));
329 }
330 EXPORT_SYMBOL(cpm_muram_free_addr);
331