1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Common CPM code 4 * 5 * Author: Scott Wood <scottwood@freescale.com> 6 * 7 * Copyright 2007-2008,2010 Freescale Semiconductor, Inc. 8 * 9 * Some parts derived from commproc.c/cpm2_common.c, which is: 10 * Copyright (c) 1997 Dan error_act (dmalek@jlc.net) 11 * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com> 12 * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) 13 * 2006 (c) MontaVista Software, Inc. 14 * Vitaly Bordug <vbordug@ru.mvista.com> 15 */ 16 #include <linux/genalloc.h> 17 #include <linux/init.h> 18 #include <linux/list.h> 19 #include <linux/of_device.h> 20 #include <linux/spinlock.h> 21 #include <linux/export.h> 22 #include <linux/of.h> 23 #include <linux/of_address.h> 24 #include <linux/slab.h> 25 #include <linux/io.h> 26 #include <soc/fsl/qe/qe.h> 27 28 static struct gen_pool *muram_pool; 29 static spinlock_t cpm_muram_lock; 30 static void __iomem *muram_vbase; 31 static phys_addr_t muram_pbase; 32 33 struct muram_block { 34 struct list_head head; 35 s32 start; 36 int size; 37 }; 38 39 static LIST_HEAD(muram_block_list); 40 41 /* max address size we deal with */ 42 #define OF_MAX_ADDR_CELLS 4 43 #define GENPOOL_OFFSET (4096 * 8) 44 45 int cpm_muram_init(void) 46 { 47 struct device_node *np; 48 struct resource r; 49 __be32 zero[OF_MAX_ADDR_CELLS] = {}; 50 resource_size_t max = 0; 51 int i = 0; 52 int ret = 0; 53 54 if (muram_pbase) 55 return 0; 56 57 spin_lock_init(&cpm_muram_lock); 58 np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data"); 59 if (!np) { 60 /* try legacy bindings */ 61 np = of_find_node_by_name(NULL, "data-only"); 62 if (!np) { 63 pr_err("Cannot find CPM muram data node"); 64 ret = -ENODEV; 65 goto out_muram; 66 } 67 } 68 69 muram_pool = gen_pool_create(0, -1); 70 if (!muram_pool) { 71 pr_err("Cannot allocate memory pool for CPM/QE muram"); 72 ret = -ENOMEM; 73 goto out_muram; 74 } 75 muram_pbase = of_translate_address(np, zero); 76 if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { 77 pr_err("Cannot translate zero through CPM muram node"); 78 ret = -ENODEV; 79 goto out_pool; 80 } 81 82 while (of_address_to_resource(np, i++, &r) == 0) { 83 if (r.end > max) 84 max = r.end; 85 ret = gen_pool_add(muram_pool, r.start - muram_pbase + 86 GENPOOL_OFFSET, resource_size(&r), -1); 87 if (ret) { 88 pr_err("QE: couldn't add muram to pool!\n"); 89 goto out_pool; 90 } 91 } 92 93 muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1); 94 if (!muram_vbase) { 95 pr_err("Cannot map QE muram"); 96 ret = -ENOMEM; 97 goto out_pool; 98 } 99 goto out_muram; 100 out_pool: 101 gen_pool_destroy(muram_pool); 102 out_muram: 103 of_node_put(np); 104 return ret; 105 } 106 107 /* 108 * cpm_muram_alloc_common - cpm_muram_alloc common code 109 * @size: number of bytes to allocate 110 * @algo: algorithm for alloc. 111 * @data: data for genalloc's algorithm. 112 * 113 * This function returns a non-negative offset into the muram area, or 114 * a negative errno on failure. 115 */ 116 static s32 cpm_muram_alloc_common(unsigned long size, 117 genpool_algo_t algo, void *data) 118 { 119 struct muram_block *entry; 120 s32 start; 121 122 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 123 if (!entry) 124 return -ENOMEM; 125 start = gen_pool_alloc_algo(muram_pool, size, algo, data); 126 if (!start) { 127 kfree(entry); 128 return -ENOMEM; 129 } 130 start = start - GENPOOL_OFFSET; 131 memset_io(cpm_muram_addr(start), 0, size); 132 entry->start = start; 133 entry->size = size; 134 list_add(&entry->head, &muram_block_list); 135 136 return start; 137 } 138 139 /* 140 * cpm_muram_alloc - allocate the requested size worth of multi-user ram 141 * @size: number of bytes to allocate 142 * @align: requested alignment, in bytes 143 * 144 * This function returns a non-negative offset into the muram area, or 145 * a negative errno on failure. 146 * Use cpm_dpram_addr() to get the virtual address of the area. 147 * Use cpm_muram_free() to free the allocation. 148 */ 149 s32 cpm_muram_alloc(unsigned long size, unsigned long align) 150 { 151 s32 start; 152 unsigned long flags; 153 struct genpool_data_align muram_pool_data; 154 155 spin_lock_irqsave(&cpm_muram_lock, flags); 156 muram_pool_data.align = align; 157 start = cpm_muram_alloc_common(size, gen_pool_first_fit_align, 158 &muram_pool_data); 159 spin_unlock_irqrestore(&cpm_muram_lock, flags); 160 return start; 161 } 162 EXPORT_SYMBOL(cpm_muram_alloc); 163 164 /** 165 * cpm_muram_free - free a chunk of multi-user ram 166 * @offset: The beginning of the chunk as returned by cpm_muram_alloc(). 167 */ 168 void cpm_muram_free(s32 offset) 169 { 170 unsigned long flags; 171 int size; 172 struct muram_block *tmp; 173 174 if (offset < 0) 175 return; 176 177 size = 0; 178 spin_lock_irqsave(&cpm_muram_lock, flags); 179 list_for_each_entry(tmp, &muram_block_list, head) { 180 if (tmp->start == offset) { 181 size = tmp->size; 182 list_del(&tmp->head); 183 kfree(tmp); 184 break; 185 } 186 } 187 gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size); 188 spin_unlock_irqrestore(&cpm_muram_lock, flags); 189 } 190 EXPORT_SYMBOL(cpm_muram_free); 191 192 /* 193 * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram 194 * @offset: offset of allocation start address 195 * @size: number of bytes to allocate 196 * This function returns @offset if the area was available, a negative 197 * errno otherwise. 198 * Use cpm_dpram_addr() to get the virtual address of the area. 199 * Use cpm_muram_free() to free the allocation. 200 */ 201 s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) 202 { 203 s32 start; 204 unsigned long flags; 205 struct genpool_data_fixed muram_pool_data_fixed; 206 207 spin_lock_irqsave(&cpm_muram_lock, flags); 208 muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET; 209 start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc, 210 &muram_pool_data_fixed); 211 spin_unlock_irqrestore(&cpm_muram_lock, flags); 212 return start; 213 } 214 EXPORT_SYMBOL(cpm_muram_alloc_fixed); 215 216 /** 217 * cpm_muram_addr - turn a muram offset into a virtual address 218 * @offset: muram offset to convert 219 */ 220 void __iomem *cpm_muram_addr(unsigned long offset) 221 { 222 return muram_vbase + offset; 223 } 224 EXPORT_SYMBOL(cpm_muram_addr); 225 226 unsigned long cpm_muram_offset(const void __iomem *addr) 227 { 228 return addr - muram_vbase; 229 } 230 EXPORT_SYMBOL(cpm_muram_offset); 231 232 /** 233 * cpm_muram_dma - turn a muram virtual address into a DMA address 234 * @addr: virtual address from cpm_muram_addr() to convert 235 */ 236 dma_addr_t cpm_muram_dma(void __iomem *addr) 237 { 238 return muram_pbase + (addr - muram_vbase); 239 } 240 EXPORT_SYMBOL(cpm_muram_dma); 241 242 /* 243 * As cpm_muram_free, but takes the virtual address rather than the 244 * muram offset. 245 */ 246 void cpm_muram_free_addr(const void __iomem *addr) 247 { 248 if (!addr) 249 return; 250 cpm_muram_free(cpm_muram_offset(addr)); 251 } 252 EXPORT_SYMBOL(cpm_muram_free_addr); 253