1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 4 * Takashi Iwai <tiwai@suse.de> 5 * 6 * Generic memory allocators 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/mm.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/genalloc.h> 13 #include <linux/vmalloc.h> 14 #ifdef CONFIG_X86 15 #include <asm/set_memory.h> 16 #endif 17 #include <sound/memalloc.h> 18 19 /* 20 * 21 * Bus-specific memory allocators 22 * 23 */ 24 25 #ifdef CONFIG_HAS_DMA 26 /* allocate the coherent DMA pages */ 27 static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size) 28 { 29 gfp_t gfp_flags; 30 31 gfp_flags = GFP_KERNEL 32 | __GFP_COMP /* compound page lets parts be mapped */ 33 | __GFP_NORETRY /* don't trigger OOM-killer */ 34 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ 35 dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, 36 gfp_flags); 37 #ifdef CONFIG_X86 38 if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) 39 set_memory_wc((unsigned long)dmab->area, 40 PAGE_ALIGN(size) >> PAGE_SHIFT); 41 #endif 42 } 43 44 /* free the coherent DMA pages */ 45 static void snd_free_dev_pages(struct snd_dma_buffer *dmab) 46 { 47 #ifdef CONFIG_X86 48 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) 49 set_memory_wb((unsigned long)dmab->area, 50 PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); 51 #endif 52 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); 53 } 54 55 #ifdef CONFIG_GENERIC_ALLOCATOR 56 /** 57 * snd_malloc_dev_iram - allocate memory from on-chip internal ram 58 * @dmab: buffer allocation record to store the allocated data 59 * @size: number of bytes to allocate from the iram 60 * 61 * This function requires iram phandle provided via of_node 62 */ 63 static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size) 64 { 65 struct device *dev = dmab->dev.dev; 66 struct gen_pool *pool = NULL; 67 68 dmab->area = NULL; 69 dmab->addr = 0; 70 71 if (dev->of_node) 72 pool = of_gen_pool_get(dev->of_node, "iram", 0); 73 74 if (!pool) 75 return; 76 77 /* Assign the pool into private_data field */ 78 dmab->private_data = pool; 79 80 dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, 81 PAGE_SIZE); 82 } 83 84 /** 85 * snd_free_dev_iram - free allocated specific memory from on-chip internal ram 86 * @dmab: buffer allocation record to store the allocated data 87 */ 88 static void snd_free_dev_iram(struct snd_dma_buffer *dmab) 89 { 90 struct gen_pool *pool = dmab->private_data; 91 92 if (pool && dmab->area) 93 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); 94 } 95 #endif /* CONFIG_GENERIC_ALLOCATOR */ 96 #endif /* CONFIG_HAS_DMA */ 97 98 /* 99 * 100 * ALSA generic memory management 101 * 102 */ 103 104 static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev, 105 gfp_t default_gfp) 106 { 107 if (!dev) 108 return default_gfp; 109 else 110 return (__force gfp_t)(unsigned long)dev; 111 } 112 113 /** 114 * snd_dma_alloc_pages - allocate the buffer area according to the given type 115 * @type: the DMA buffer type 116 * @device: the device pointer 117 * @size: the buffer size to allocate 118 * @dmab: buffer allocation record to store the allocated data 119 * 120 * Calls the memory-allocator function for the corresponding 121 * buffer type. 122 * 123 * Return: Zero if the buffer with the given size is allocated successfully, 124 * otherwise a negative value on error. 125 */ 126 int snd_dma_alloc_pages(int type, struct device *device, size_t size, 127 struct snd_dma_buffer *dmab) 128 { 129 gfp_t gfp; 130 131 if (WARN_ON(!size)) 132 return -ENXIO; 133 if (WARN_ON(!dmab)) 134 return -ENXIO; 135 136 size = PAGE_ALIGN(size); 137 dmab->dev.type = type; 138 dmab->dev.dev = device; 139 dmab->bytes = 0; 140 dmab->area = NULL; 141 dmab->addr = 0; 142 dmab->private_data = NULL; 143 switch (type) { 144 case SNDRV_DMA_TYPE_CONTINUOUS: 145 gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL); 146 dmab->area = alloc_pages_exact(size, gfp); 147 break; 148 case SNDRV_DMA_TYPE_VMALLOC: 149 gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM); 150 dmab->area = __vmalloc(size, gfp); 151 break; 152 #ifdef CONFIG_HAS_DMA 153 #ifdef CONFIG_GENERIC_ALLOCATOR 154 case SNDRV_DMA_TYPE_DEV_IRAM: 155 snd_malloc_dev_iram(dmab, size); 156 if (dmab->area) 157 break; 158 /* Internal memory might have limited size and no enough space, 159 * so if we fail to malloc, try to fetch memory traditionally. 160 */ 161 dmab->dev.type = SNDRV_DMA_TYPE_DEV; 162 fallthrough; 163 #endif /* CONFIG_GENERIC_ALLOCATOR */ 164 case SNDRV_DMA_TYPE_DEV: 165 case SNDRV_DMA_TYPE_DEV_UC: 166 snd_malloc_dev_pages(dmab, size); 167 break; 168 #endif 169 #ifdef CONFIG_SND_DMA_SGBUF 170 case SNDRV_DMA_TYPE_DEV_SG: 171 case SNDRV_DMA_TYPE_DEV_UC_SG: 172 snd_malloc_sgbuf_pages(device, size, dmab, NULL); 173 break; 174 #endif 175 default: 176 pr_err("snd-malloc: invalid device type %d\n", type); 177 return -ENXIO; 178 } 179 if (! dmab->area) 180 return -ENOMEM; 181 dmab->bytes = size; 182 return 0; 183 } 184 EXPORT_SYMBOL(snd_dma_alloc_pages); 185 186 /** 187 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback 188 * @type: the DMA buffer type 189 * @device: the device pointer 190 * @size: the buffer size to allocate 191 * @dmab: buffer allocation record to store the allocated data 192 * 193 * Calls the memory-allocator function for the corresponding 194 * buffer type. When no space is left, this function reduces the size and 195 * tries to allocate again. The size actually allocated is stored in 196 * res_size argument. 197 * 198 * Return: Zero if the buffer with the given size is allocated successfully, 199 * otherwise a negative value on error. 200 */ 201 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, 202 struct snd_dma_buffer *dmab) 203 { 204 int err; 205 206 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { 207 if (err != -ENOMEM) 208 return err; 209 if (size <= PAGE_SIZE) 210 return -ENOMEM; 211 size >>= 1; 212 size = PAGE_SIZE << get_order(size); 213 } 214 if (! dmab->area) 215 return -ENOMEM; 216 return 0; 217 } 218 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); 219 220 221 /** 222 * snd_dma_free_pages - release the allocated buffer 223 * @dmab: the buffer allocation record to release 224 * 225 * Releases the allocated buffer via snd_dma_alloc_pages(). 226 */ 227 void snd_dma_free_pages(struct snd_dma_buffer *dmab) 228 { 229 switch (dmab->dev.type) { 230 case SNDRV_DMA_TYPE_CONTINUOUS: 231 free_pages_exact(dmab->area, dmab->bytes); 232 break; 233 case SNDRV_DMA_TYPE_VMALLOC: 234 vfree(dmab->area); 235 break; 236 #ifdef CONFIG_HAS_DMA 237 #ifdef CONFIG_GENERIC_ALLOCATOR 238 case SNDRV_DMA_TYPE_DEV_IRAM: 239 snd_free_dev_iram(dmab); 240 break; 241 #endif /* CONFIG_GENERIC_ALLOCATOR */ 242 case SNDRV_DMA_TYPE_DEV: 243 case SNDRV_DMA_TYPE_DEV_UC: 244 snd_free_dev_pages(dmab); 245 break; 246 #endif 247 #ifdef CONFIG_SND_DMA_SGBUF 248 case SNDRV_DMA_TYPE_DEV_SG: 249 case SNDRV_DMA_TYPE_DEV_UC_SG: 250 snd_free_sgbuf_pages(dmab); 251 break; 252 #endif 253 default: 254 pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type); 255 } 256 } 257 EXPORT_SYMBOL(snd_dma_free_pages); 258