1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Dynamic DMA mapping support.
4 *
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 *
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
19 */
20
21 #define pr_fmt(fmt) "software IO TLB: " fmt
22
23 #include <linux/cache.h>
24 #include <linux/cc_platform.h>
25 #include <linux/ctype.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <linux/highmem.h>
32 #include <linux/io.h>
33 #include <linux/kmsan-checks.h>
34 #include <linux/iommu-helper.h>
35 #include <linux/init.h>
36 #include <linux/memblock.h>
37 #include <linux/mm.h>
38 #include <linux/pfn.h>
39 #include <linux/rculist.h>
40 #include <linux/scatterlist.h>
41 #include <linux/set_memory.h>
42 #include <linux/spinlock.h>
43 #include <linux/string.h>
44 #include <linux/swiotlb.h>
45 #include <linux/types.h>
46 #ifdef CONFIG_DMA_RESTRICTED_POOL
47 #include <linux/of.h>
48 #include <linux/of_fdt.h>
49 #include <linux/of_reserved_mem.h>
50 #include <linux/slab.h>
51 #endif
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/swiotlb.h>
55
56 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
57
58 /*
59 * Minimum IO TLB size to bother booting with. Systems with mainly
60 * 64bit capable cards will only lightly use the swiotlb. If we can't
61 * allocate a contiguous 1MB, we're probably in trouble anyway.
62 */
63 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
64
65 /**
66 * struct io_tlb_slot - IO TLB slot descriptor
67 * @orig_addr: The original address corresponding to a mapped entry.
68 * @alloc_size: Size of the allocated buffer.
69 * @list: The free list describing the number of free entries available
70 * from each index.
71 * @pad_slots: Number of preceding padding slots. Valid only in the first
72 * allocated non-padding slot.
73 */
74 struct io_tlb_slot {
75 phys_addr_t orig_addr;
76 size_t alloc_size;
77 unsigned short list;
78 unsigned short pad_slots;
79 };
80
81 static bool swiotlb_force_bounce;
82 static bool swiotlb_force_disable;
83
84 #ifdef CONFIG_SWIOTLB_DYNAMIC
85
86 static void swiotlb_dyn_alloc(struct work_struct *work);
87
88 static struct io_tlb_mem io_tlb_default_mem = {
89 .lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
90 .pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
91 .dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
92 swiotlb_dyn_alloc),
93 };
94
95 #else /* !CONFIG_SWIOTLB_DYNAMIC */
96
97 static struct io_tlb_mem io_tlb_default_mem;
98
99 #endif /* CONFIG_SWIOTLB_DYNAMIC */
100
101 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
102 static unsigned long default_nareas;
103
104 /**
105 * struct io_tlb_area - IO TLB memory area descriptor
106 *
107 * This is a single area with a single lock.
108 *
109 * @used: The number of used IO TLB block.
110 * @index: The slot index to start searching in this area for next round.
111 * @lock: The lock to protect the above data structures in the map and
112 * unmap calls.
113 */
114 struct io_tlb_area {
115 unsigned long used;
116 unsigned int index;
117 spinlock_t lock;
118 };
119
120 /*
121 * Round up number of slabs to the next power of 2. The last area is going
122 * be smaller than the rest if default_nslabs is not power of two.
123 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
124 * otherwise a segment may span two or more areas. It conflicts with free
125 * contiguous slots tracking: free slots are treated contiguous no matter
126 * whether they cross an area boundary.
127 *
128 * Return true if default_nslabs is rounded up.
129 */
round_up_default_nslabs(void)130 static bool round_up_default_nslabs(void)
131 {
132 if (!default_nareas)
133 return false;
134
135 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
136 default_nslabs = IO_TLB_SEGSIZE * default_nareas;
137 else if (is_power_of_2(default_nslabs))
138 return false;
139 default_nslabs = roundup_pow_of_two(default_nslabs);
140 return true;
141 }
142
143 /**
144 * swiotlb_adjust_nareas() - adjust the number of areas and slots
145 * @nareas: Desired number of areas. Zero is treated as 1.
146 *
147 * Adjust the default number of areas in a memory pool.
148 * The default size of the memory pool may also change to meet minimum area
149 * size requirements.
150 */
swiotlb_adjust_nareas(unsigned int nareas)151 static void swiotlb_adjust_nareas(unsigned int nareas)
152 {
153 if (!nareas)
154 nareas = 1;
155 else if (!is_power_of_2(nareas))
156 nareas = roundup_pow_of_two(nareas);
157
158 default_nareas = nareas;
159
160 pr_info("area num %d.\n", nareas);
161 if (round_up_default_nslabs())
162 pr_info("SWIOTLB bounce buffer size roundup to %luMB",
163 (default_nslabs << IO_TLB_SHIFT) >> 20);
164 }
165
166 /**
167 * limit_nareas() - get the maximum number of areas for a given memory pool size
168 * @nareas: Desired number of areas.
169 * @nslots: Total number of slots in the memory pool.
170 *
171 * Limit the number of areas to the maximum possible number of areas in
172 * a memory pool of the given size.
173 *
174 * Return: Maximum possible number of areas.
175 */
limit_nareas(unsigned int nareas,unsigned long nslots)176 static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
177 {
178 if (nslots < nareas * IO_TLB_SEGSIZE)
179 return nslots / IO_TLB_SEGSIZE;
180 return nareas;
181 }
182
183 static int __init
setup_io_tlb_npages(char * str)184 setup_io_tlb_npages(char *str)
185 {
186 if (isdigit(*str)) {
187 /* avoid tail segment of size < IO_TLB_SEGSIZE */
188 default_nslabs =
189 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
190 }
191 if (*str == ',')
192 ++str;
193 if (isdigit(*str))
194 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
195 if (*str == ',')
196 ++str;
197 if (!strcmp(str, "force"))
198 swiotlb_force_bounce = true;
199 else if (!strcmp(str, "noforce"))
200 swiotlb_force_disable = true;
201
202 return 0;
203 }
204 early_param("swiotlb", setup_io_tlb_npages);
205
swiotlb_size_or_default(void)206 unsigned long swiotlb_size_or_default(void)
207 {
208 return default_nslabs << IO_TLB_SHIFT;
209 }
210
swiotlb_adjust_size(unsigned long size)211 void __init swiotlb_adjust_size(unsigned long size)
212 {
213 /*
214 * If swiotlb parameter has not been specified, give a chance to
215 * architectures such as those supporting memory encryption to
216 * adjust/expand SWIOTLB size for their use.
217 */
218 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
219 return;
220
221 size = ALIGN(size, IO_TLB_SIZE);
222 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
223 if (round_up_default_nslabs())
224 size = default_nslabs << IO_TLB_SHIFT;
225 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
226 }
227
swiotlb_print_info(void)228 void swiotlb_print_info(void)
229 {
230 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
231
232 if (!mem->nslabs) {
233 pr_warn("No low mem\n");
234 return;
235 }
236
237 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
238 (mem->nslabs << IO_TLB_SHIFT) >> 20);
239 }
240
io_tlb_offset(unsigned long val)241 static inline unsigned long io_tlb_offset(unsigned long val)
242 {
243 return val & (IO_TLB_SEGSIZE - 1);
244 }
245
nr_slots(u64 val)246 static inline unsigned long nr_slots(u64 val)
247 {
248 return DIV_ROUND_UP(val, IO_TLB_SIZE);
249 }
250
251 /*
252 * Early SWIOTLB allocation may be too early to allow an architecture to
253 * perform the desired operations. This function allows the architecture to
254 * call SWIOTLB when the operations are possible. It needs to be called
255 * before the SWIOTLB memory is used.
256 */
swiotlb_update_mem_attributes(void)257 void __init swiotlb_update_mem_attributes(void)
258 {
259 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
260 unsigned long bytes;
261
262 if (!mem->nslabs || mem->late_alloc)
263 return;
264 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
265 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
266 }
267
swiotlb_init_io_tlb_pool(struct io_tlb_pool * mem,phys_addr_t start,unsigned long nslabs,bool late_alloc,unsigned int nareas)268 static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
269 unsigned long nslabs, bool late_alloc, unsigned int nareas)
270 {
271 void *vaddr = phys_to_virt(start);
272 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
273
274 mem->nslabs = nslabs;
275 mem->start = start;
276 mem->end = mem->start + bytes;
277 mem->late_alloc = late_alloc;
278 mem->nareas = nareas;
279 mem->area_nslabs = nslabs / mem->nareas;
280
281 for (i = 0; i < mem->nareas; i++) {
282 spin_lock_init(&mem->areas[i].lock);
283 mem->areas[i].index = 0;
284 mem->areas[i].used = 0;
285 }
286
287 for (i = 0; i < mem->nslabs; i++) {
288 mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
289 mem->nslabs - i);
290 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
291 mem->slots[i].alloc_size = 0;
292 mem->slots[i].pad_slots = 0;
293 }
294
295 memset(vaddr, 0, bytes);
296 mem->vaddr = vaddr;
297 return;
298 }
299
300 /**
301 * add_mem_pool() - add a memory pool to the allocator
302 * @mem: Software IO TLB allocator.
303 * @pool: Memory pool to be added.
304 */
add_mem_pool(struct io_tlb_mem * mem,struct io_tlb_pool * pool)305 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
306 {
307 #ifdef CONFIG_SWIOTLB_DYNAMIC
308 spin_lock(&mem->lock);
309 list_add_rcu(&pool->node, &mem->pools);
310 mem->nslabs += pool->nslabs;
311 spin_unlock(&mem->lock);
312 #else
313 mem->nslabs = pool->nslabs;
314 #endif
315 }
316
swiotlb_memblock_alloc(unsigned long nslabs,unsigned int flags,int (* remap)(void * tlb,unsigned long nslabs))317 static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
318 unsigned int flags,
319 int (*remap)(void *tlb, unsigned long nslabs))
320 {
321 size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
322 void *tlb;
323
324 /*
325 * By default allocate the bounce buffer memory from low memory, but
326 * allow to pick a location everywhere for hypervisors with guest
327 * memory encryption.
328 */
329 if (flags & SWIOTLB_ANY)
330 tlb = memblock_alloc(bytes, PAGE_SIZE);
331 else
332 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
333
334 if (!tlb) {
335 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
336 __func__, bytes);
337 return NULL;
338 }
339
340 if (remap && remap(tlb, nslabs) < 0) {
341 memblock_free(tlb, PAGE_ALIGN(bytes));
342 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
343 return NULL;
344 }
345
346 return tlb;
347 }
348
349 /*
350 * Statically reserve bounce buffer space and initialize bounce buffer data
351 * structures for the software IO TLB used to implement the DMA API.
352 */
swiotlb_init_remap(bool addressing_limit,unsigned int flags,int (* remap)(void * tlb,unsigned long nslabs))353 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
354 int (*remap)(void *tlb, unsigned long nslabs))
355 {
356 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
357 unsigned long nslabs;
358 unsigned int nareas;
359 size_t alloc_size;
360 void *tlb;
361
362 if (!addressing_limit && !swiotlb_force_bounce)
363 return;
364 if (swiotlb_force_disable)
365 return;
366
367 io_tlb_default_mem.force_bounce =
368 swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
369
370 #ifdef CONFIG_SWIOTLB_DYNAMIC
371 if (!remap)
372 io_tlb_default_mem.can_grow = true;
373 if (flags & SWIOTLB_ANY)
374 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
375 else
376 io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
377 #endif
378
379 if (!default_nareas)
380 swiotlb_adjust_nareas(num_possible_cpus());
381
382 nslabs = default_nslabs;
383 nareas = limit_nareas(default_nareas, nslabs);
384 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
385 if (nslabs <= IO_TLB_MIN_SLABS)
386 return;
387 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
388 nareas = limit_nareas(nareas, nslabs);
389 }
390
391 if (default_nslabs != nslabs) {
392 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
393 default_nslabs, nslabs);
394 default_nslabs = nslabs;
395 }
396
397 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
398 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
399 if (!mem->slots) {
400 pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
401 __func__, alloc_size, PAGE_SIZE);
402 return;
403 }
404
405 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
406 nareas), SMP_CACHE_BYTES);
407 if (!mem->areas) {
408 pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
409 return;
410 }
411
412 swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
413 add_mem_pool(&io_tlb_default_mem, mem);
414
415 if (flags & SWIOTLB_VERBOSE)
416 swiotlb_print_info();
417 }
418
swiotlb_init(bool addressing_limit,unsigned int flags)419 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
420 {
421 swiotlb_init_remap(addressing_limit, flags, NULL);
422 }
423
424 /*
425 * Systems with larger DMA zones (those that don't support ISA) can
426 * initialize the swiotlb later using the slab allocator if needed.
427 * This should be just like above, but with some error catching.
428 */
swiotlb_init_late(size_t size,gfp_t gfp_mask,int (* remap)(void * tlb,unsigned long nslabs))429 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
430 int (*remap)(void *tlb, unsigned long nslabs))
431 {
432 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
433 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
434 unsigned int nareas;
435 unsigned char *vstart = NULL;
436 unsigned int order, area_order;
437 bool retried = false;
438 int rc = 0;
439
440 if (io_tlb_default_mem.nslabs)
441 return 0;
442
443 if (swiotlb_force_disable)
444 return 0;
445
446 io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
447
448 #ifdef CONFIG_SWIOTLB_DYNAMIC
449 if (!remap)
450 io_tlb_default_mem.can_grow = true;
451 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
452 io_tlb_default_mem.phys_limit = zone_dma_limit;
453 else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
454 io_tlb_default_mem.phys_limit = max(DMA_BIT_MASK(32), zone_dma_limit);
455 else
456 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
457 #endif
458
459 if (!default_nareas)
460 swiotlb_adjust_nareas(num_possible_cpus());
461
462 retry:
463 order = get_order(nslabs << IO_TLB_SHIFT);
464 nslabs = SLABS_PER_PAGE << order;
465
466 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
467 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
468 order);
469 if (vstart)
470 break;
471 order--;
472 nslabs = SLABS_PER_PAGE << order;
473 retried = true;
474 }
475
476 if (!vstart)
477 return -ENOMEM;
478
479 if (remap)
480 rc = remap(vstart, nslabs);
481 if (rc) {
482 free_pages((unsigned long)vstart, order);
483
484 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
485 if (nslabs < IO_TLB_MIN_SLABS)
486 return rc;
487 retried = true;
488 goto retry;
489 }
490
491 if (retried) {
492 pr_warn("only able to allocate %ld MB\n",
493 (PAGE_SIZE << order) >> 20);
494 }
495
496 nareas = limit_nareas(default_nareas, nslabs);
497 area_order = get_order(array_size(sizeof(*mem->areas), nareas));
498 mem->areas = (struct io_tlb_area *)
499 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
500 if (!mem->areas)
501 goto error_area;
502
503 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
504 get_order(array_size(sizeof(*mem->slots), nslabs)));
505 if (!mem->slots)
506 goto error_slots;
507
508 set_memory_decrypted((unsigned long)vstart,
509 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
510 swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
511 nareas);
512 add_mem_pool(&io_tlb_default_mem, mem);
513
514 swiotlb_print_info();
515 return 0;
516
517 error_slots:
518 free_pages((unsigned long)mem->areas, area_order);
519 error_area:
520 free_pages((unsigned long)vstart, order);
521 return -ENOMEM;
522 }
523
swiotlb_exit(void)524 void __init swiotlb_exit(void)
525 {
526 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
527 unsigned long tbl_vaddr;
528 size_t tbl_size, slots_size;
529 unsigned int area_order;
530
531 if (swiotlb_force_bounce)
532 return;
533
534 if (!mem->nslabs)
535 return;
536
537 pr_info("tearing down default memory pool\n");
538 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
539 tbl_size = PAGE_ALIGN(mem->end - mem->start);
540 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
541
542 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
543 if (mem->late_alloc) {
544 area_order = get_order(array_size(sizeof(*mem->areas),
545 mem->nareas));
546 free_pages((unsigned long)mem->areas, area_order);
547 free_pages(tbl_vaddr, get_order(tbl_size));
548 free_pages((unsigned long)mem->slots, get_order(slots_size));
549 } else {
550 memblock_free(mem->areas,
551 array_size(sizeof(*mem->areas), mem->nareas));
552 memblock_phys_free(mem->start, tbl_size);
553 memblock_free(mem->slots, slots_size);
554 }
555
556 memset(mem, 0, sizeof(*mem));
557 }
558
559 #ifdef CONFIG_SWIOTLB_DYNAMIC
560
561 /**
562 * alloc_dma_pages() - allocate pages to be used for DMA
563 * @gfp: GFP flags for the allocation.
564 * @bytes: Size of the buffer.
565 * @phys_limit: Maximum allowed physical address of the buffer.
566 *
567 * Allocate pages from the buddy allocator. If successful, make the allocated
568 * pages decrypted that they can be used for DMA.
569 *
570 * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
571 * if the allocated physical address was above @phys_limit.
572 */
alloc_dma_pages(gfp_t gfp,size_t bytes,u64 phys_limit)573 static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
574 {
575 unsigned int order = get_order(bytes);
576 struct page *page;
577 phys_addr_t paddr;
578 void *vaddr;
579
580 page = alloc_pages(gfp, order);
581 if (!page)
582 return NULL;
583
584 paddr = page_to_phys(page);
585 if (paddr + bytes - 1 > phys_limit) {
586 __free_pages(page, order);
587 return ERR_PTR(-EAGAIN);
588 }
589
590 vaddr = phys_to_virt(paddr);
591 if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
592 goto error;
593 return page;
594
595 error:
596 /* Intentional leak if pages cannot be encrypted again. */
597 if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
598 __free_pages(page, order);
599 return NULL;
600 }
601
602 /**
603 * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
604 * @dev: Device for which a memory pool is allocated.
605 * @bytes: Size of the buffer.
606 * @phys_limit: Maximum allowed physical address of the buffer.
607 * @gfp: GFP flags for the allocation.
608 *
609 * Return: Allocated pages, or %NULL on allocation failure.
610 */
swiotlb_alloc_tlb(struct device * dev,size_t bytes,u64 phys_limit,gfp_t gfp)611 static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
612 u64 phys_limit, gfp_t gfp)
613 {
614 struct page *page;
615
616 /*
617 * Allocate from the atomic pools if memory is encrypted and
618 * the allocation is atomic, because decrypting may block.
619 */
620 if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) {
621 void *vaddr;
622
623 if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
624 return NULL;
625
626 return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
627 dma_coherent_ok);
628 }
629
630 gfp &= ~GFP_ZONEMASK;
631 if (phys_limit <= zone_dma_limit)
632 gfp |= __GFP_DMA;
633 else if (phys_limit <= DMA_BIT_MASK(32))
634 gfp |= __GFP_DMA32;
635
636 while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
637 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
638 phys_limit < DMA_BIT_MASK(64) &&
639 !(gfp & (__GFP_DMA32 | __GFP_DMA)))
640 gfp |= __GFP_DMA32;
641 else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
642 !(gfp & __GFP_DMA))
643 gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA;
644 else
645 return NULL;
646 }
647
648 return page;
649 }
650
651 /**
652 * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
653 * @vaddr: Virtual address of the buffer.
654 * @bytes: Size of the buffer.
655 */
swiotlb_free_tlb(void * vaddr,size_t bytes)656 static void swiotlb_free_tlb(void *vaddr, size_t bytes)
657 {
658 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
659 dma_free_from_pool(NULL, vaddr, bytes))
660 return;
661
662 /* Intentional leak if pages cannot be encrypted again. */
663 if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
664 __free_pages(virt_to_page(vaddr), get_order(bytes));
665 }
666
667 /**
668 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
669 * @dev: Device for which a memory pool is allocated.
670 * @minslabs: Minimum number of slabs.
671 * @nslabs: Desired (maximum) number of slabs.
672 * @nareas: Number of areas.
673 * @phys_limit: Maximum DMA buffer physical address.
674 * @gfp: GFP flags for the allocations.
675 *
676 * Allocate and initialize a new IO TLB memory pool. The actual number of
677 * slabs may be reduced if allocation of @nslabs fails. If even
678 * @minslabs cannot be allocated, this function fails.
679 *
680 * Return: New memory pool, or %NULL on allocation failure.
681 */
swiotlb_alloc_pool(struct device * dev,unsigned long minslabs,unsigned long nslabs,unsigned int nareas,u64 phys_limit,gfp_t gfp)682 static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
683 unsigned long minslabs, unsigned long nslabs,
684 unsigned int nareas, u64 phys_limit, gfp_t gfp)
685 {
686 struct io_tlb_pool *pool;
687 unsigned int slot_order;
688 struct page *tlb;
689 size_t pool_size;
690 size_t tlb_size;
691
692 if (nslabs > SLABS_PER_PAGE << MAX_PAGE_ORDER) {
693 nslabs = SLABS_PER_PAGE << MAX_PAGE_ORDER;
694 nareas = limit_nareas(nareas, nslabs);
695 }
696
697 pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
698 pool = kzalloc(pool_size, gfp);
699 if (!pool)
700 goto error;
701 pool->areas = (void *)pool + sizeof(*pool);
702
703 tlb_size = nslabs << IO_TLB_SHIFT;
704 while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
705 if (nslabs <= minslabs)
706 goto error_tlb;
707 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
708 nareas = limit_nareas(nareas, nslabs);
709 tlb_size = nslabs << IO_TLB_SHIFT;
710 }
711
712 slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
713 pool->slots = (struct io_tlb_slot *)
714 __get_free_pages(gfp, slot_order);
715 if (!pool->slots)
716 goto error_slots;
717
718 swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
719 return pool;
720
721 error_slots:
722 swiotlb_free_tlb(page_address(tlb), tlb_size);
723 error_tlb:
724 kfree(pool);
725 error:
726 return NULL;
727 }
728
729 /**
730 * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
731 * @work: Pointer to dyn_alloc in struct io_tlb_mem.
732 */
swiotlb_dyn_alloc(struct work_struct * work)733 static void swiotlb_dyn_alloc(struct work_struct *work)
734 {
735 struct io_tlb_mem *mem =
736 container_of(work, struct io_tlb_mem, dyn_alloc);
737 struct io_tlb_pool *pool;
738
739 pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
740 default_nareas, mem->phys_limit, GFP_KERNEL);
741 if (!pool) {
742 pr_warn_ratelimited("Failed to allocate new pool");
743 return;
744 }
745
746 add_mem_pool(mem, pool);
747 }
748
749 /**
750 * swiotlb_dyn_free() - RCU callback to free a memory pool
751 * @rcu: RCU head in the corresponding struct io_tlb_pool.
752 */
swiotlb_dyn_free(struct rcu_head * rcu)753 static void swiotlb_dyn_free(struct rcu_head *rcu)
754 {
755 struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
756 size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
757 size_t tlb_size = pool->end - pool->start;
758
759 free_pages((unsigned long)pool->slots, get_order(slots_size));
760 swiotlb_free_tlb(pool->vaddr, tlb_size);
761 kfree(pool);
762 }
763
764 /**
765 * __swiotlb_find_pool() - find the IO TLB pool for a physical address
766 * @dev: Device which has mapped the DMA buffer.
767 * @paddr: Physical address within the DMA buffer.
768 *
769 * Find the IO TLB memory pool descriptor which contains the given physical
770 * address, if any. This function is for use only when the dev is known to
771 * be using swiotlb. Use swiotlb_find_pool() for the more general case
772 * when this condition is not met.
773 *
774 * Return: Memory pool which contains @paddr, or %NULL if none.
775 */
__swiotlb_find_pool(struct device * dev,phys_addr_t paddr)776 struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
777 {
778 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
779 struct io_tlb_pool *pool;
780
781 rcu_read_lock();
782 list_for_each_entry_rcu(pool, &mem->pools, node) {
783 if (paddr >= pool->start && paddr < pool->end)
784 goto out;
785 }
786
787 list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
788 if (paddr >= pool->start && paddr < pool->end)
789 goto out;
790 }
791 pool = NULL;
792 out:
793 rcu_read_unlock();
794 return pool;
795 }
796
797 /**
798 * swiotlb_del_pool() - remove an IO TLB pool from a device
799 * @dev: Owning device.
800 * @pool: Memory pool to be removed.
801 */
swiotlb_del_pool(struct device * dev,struct io_tlb_pool * pool)802 static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
803 {
804 unsigned long flags;
805
806 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
807 list_del_rcu(&pool->node);
808 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
809
810 call_rcu(&pool->rcu, swiotlb_dyn_free);
811 }
812
813 #endif /* CONFIG_SWIOTLB_DYNAMIC */
814
815 /**
816 * swiotlb_dev_init() - initialize swiotlb fields in &struct device
817 * @dev: Device to be initialized.
818 */
swiotlb_dev_init(struct device * dev)819 void swiotlb_dev_init(struct device *dev)
820 {
821 dev->dma_io_tlb_mem = &io_tlb_default_mem;
822 #ifdef CONFIG_SWIOTLB_DYNAMIC
823 INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
824 spin_lock_init(&dev->dma_io_tlb_lock);
825 dev->dma_uses_io_tlb = false;
826 #endif
827 }
828
829 /**
830 * swiotlb_align_offset() - Get required offset into an IO TLB allocation.
831 * @dev: Owning device.
832 * @align_mask: Allocation alignment mask.
833 * @addr: DMA address.
834 *
835 * Return the minimum offset from the start of an IO TLB allocation which is
836 * required for a given buffer address and allocation alignment to keep the
837 * device happy.
838 *
839 * First, the address bits covered by min_align_mask must be identical in the
840 * original address and the bounce buffer address. High bits are preserved by
841 * choosing a suitable IO TLB slot, but bits below IO_TLB_SHIFT require extra
842 * padding bytes before the bounce buffer.
843 *
844 * Second, @align_mask specifies which bits of the first allocated slot must
845 * be zero. This may require allocating additional padding slots, and then the
846 * offset (in bytes) from the first such padding slot is returned.
847 */
swiotlb_align_offset(struct device * dev,unsigned int align_mask,u64 addr)848 static unsigned int swiotlb_align_offset(struct device *dev,
849 unsigned int align_mask, u64 addr)
850 {
851 return addr & dma_get_min_align_mask(dev) &
852 (align_mask | (IO_TLB_SIZE - 1));
853 }
854
855 /*
856 * Bounce: copy the swiotlb buffer from or back to the original dma location
857 */
swiotlb_bounce(struct device * dev,phys_addr_t tlb_addr,size_t size,enum dma_data_direction dir,struct io_tlb_pool * mem)858 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
859 enum dma_data_direction dir, struct io_tlb_pool *mem)
860 {
861 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
862 phys_addr_t orig_addr = mem->slots[index].orig_addr;
863 size_t alloc_size = mem->slots[index].alloc_size;
864 unsigned long pfn = PFN_DOWN(orig_addr);
865 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
866 int tlb_offset;
867
868 if (orig_addr == INVALID_PHYS_ADDR)
869 return;
870
871 if (dir == DMA_FROM_DEVICE && !dev_is_dma_coherent(dev))
872 arch_sync_dma_flush();
873
874 /*
875 * It's valid for tlb_offset to be negative. This can happen when the
876 * "offset" returned by swiotlb_align_offset() is non-zero, and the
877 * tlb_addr is pointing within the first "offset" bytes of the second
878 * or subsequent slots of the allocated swiotlb area. While it's not
879 * valid for tlb_addr to be pointing within the first "offset" bytes
880 * of the first slot, there's no way to check for such an error since
881 * this function can't distinguish the first slot from the second and
882 * subsequent slots.
883 */
884 tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
885 swiotlb_align_offset(dev, 0, orig_addr);
886
887 orig_addr += tlb_offset;
888 alloc_size -= tlb_offset;
889
890 if (size > alloc_size) {
891 dev_WARN_ONCE(dev, 1,
892 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
893 alloc_size, size);
894 size = alloc_size;
895 }
896
897 if (PageHighMem(pfn_to_page(pfn))) {
898 unsigned int offset = orig_addr & ~PAGE_MASK;
899 struct page *page;
900 unsigned int sz = 0;
901 unsigned long flags;
902
903 while (size) {
904 sz = min_t(size_t, PAGE_SIZE - offset, size);
905
906 local_irq_save(flags);
907 page = pfn_to_page(pfn);
908 if (dir == DMA_TO_DEVICE) {
909 /*
910 * Ideally, kmsan_check_highmem_page()
911 * could be used here to detect infoleaks,
912 * but callers may map uninitialized buffers
913 * that will be written by the device,
914 * causing false positives.
915 */
916 memcpy_from_page(vaddr, page, offset, sz);
917 } else {
918 kmsan_unpoison_memory(vaddr, sz);
919 memcpy_to_page(page, offset, vaddr, sz);
920 }
921 local_irq_restore(flags);
922
923 size -= sz;
924 pfn++;
925 vaddr += sz;
926 offset = 0;
927 }
928 } else if (dir == DMA_TO_DEVICE) {
929 /*
930 * Ideally, kmsan_check_memory() could be used here to detect
931 * infoleaks (uninitialized data being sent to device), but
932 * callers may map uninitialized buffers that will be written
933 * by the device, causing false positives.
934 */
935 memcpy(vaddr, phys_to_virt(orig_addr), size);
936 } else {
937 kmsan_unpoison_memory(vaddr, size);
938 memcpy(phys_to_virt(orig_addr), vaddr, size);
939 }
940 }
941
slot_addr(phys_addr_t start,phys_addr_t idx)942 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
943 {
944 return start + (idx << IO_TLB_SHIFT);
945 }
946
947 /*
948 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
949 */
get_max_slots(unsigned long boundary_mask)950 static inline unsigned long get_max_slots(unsigned long boundary_mask)
951 {
952 return (boundary_mask >> IO_TLB_SHIFT) + 1;
953 }
954
wrap_area_index(struct io_tlb_pool * mem,unsigned int index)955 static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index)
956 {
957 if (index >= mem->area_nslabs)
958 return 0;
959 return index;
960 }
961
962 /*
963 * Track the total used slots with a global atomic value in order to have
964 * correct information to determine the high water mark. The mem_used()
965 * function gives imprecise results because there's no locking across
966 * multiple areas.
967 */
968 #ifdef CONFIG_DEBUG_FS
inc_used_and_hiwater(struct io_tlb_mem * mem,unsigned int nslots)969 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
970 {
971 unsigned long old_hiwater, new_used;
972
973 new_used = atomic_long_add_return(nslots, &mem->total_used);
974 old_hiwater = atomic_long_read(&mem->used_hiwater);
975 do {
976 if (new_used <= old_hiwater)
977 break;
978 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater,
979 &old_hiwater, new_used));
980 }
981
dec_used(struct io_tlb_mem * mem,unsigned int nslots)982 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
983 {
984 atomic_long_sub(nslots, &mem->total_used);
985 }
986
987 #else /* !CONFIG_DEBUG_FS */
inc_used_and_hiwater(struct io_tlb_mem * mem,unsigned int nslots)988 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
989 {
990 }
dec_used(struct io_tlb_mem * mem,unsigned int nslots)991 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
992 {
993 }
994 #endif /* CONFIG_DEBUG_FS */
995
996 #ifdef CONFIG_SWIOTLB_DYNAMIC
997 #ifdef CONFIG_DEBUG_FS
inc_transient_used(struct io_tlb_mem * mem,unsigned int nslots)998 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
999 {
1000 atomic_long_add(nslots, &mem->transient_nslabs);
1001 }
1002
dec_transient_used(struct io_tlb_mem * mem,unsigned int nslots)1003 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
1004 {
1005 atomic_long_sub(nslots, &mem->transient_nslabs);
1006 }
1007
1008 #else /* !CONFIG_DEBUG_FS */
inc_transient_used(struct io_tlb_mem * mem,unsigned int nslots)1009 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
1010 {
1011 }
dec_transient_used(struct io_tlb_mem * mem,unsigned int nslots)1012 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
1013 {
1014 }
1015 #endif /* CONFIG_DEBUG_FS */
1016 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1017
1018 /**
1019 * swiotlb_search_pool_area() - search one memory area in one pool
1020 * @dev: Device which maps the buffer.
1021 * @pool: Memory pool to be searched.
1022 * @area_index: Index of the IO TLB memory area to be searched.
1023 * @orig_addr: Original (non-bounced) IO buffer address.
1024 * @alloc_size: Total requested size of the bounce buffer,
1025 * including initial alignment padding.
1026 * @alloc_align_mask: Required alignment of the allocated buffer.
1027 *
1028 * Find a suitable sequence of IO TLB entries for the request and allocate
1029 * a buffer from the given IO TLB memory area.
1030 * This function takes care of locking.
1031 *
1032 * Return: Index of the first allocated slot, or -1 on error.
1033 */
swiotlb_search_pool_area(struct device * dev,struct io_tlb_pool * pool,int area_index,phys_addr_t orig_addr,size_t alloc_size,unsigned int alloc_align_mask)1034 static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool,
1035 int area_index, phys_addr_t orig_addr, size_t alloc_size,
1036 unsigned int alloc_align_mask)
1037 {
1038 struct io_tlb_area *area = pool->areas + area_index;
1039 unsigned long boundary_mask = dma_get_seg_boundary(dev);
1040 dma_addr_t tbl_dma_addr =
1041 phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
1042 unsigned long max_slots = get_max_slots(boundary_mask);
1043 unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
1044 unsigned int nslots = nr_slots(alloc_size), stride;
1045 unsigned int offset = swiotlb_align_offset(dev, 0, orig_addr);
1046 unsigned int index, slots_checked, count = 0, i;
1047 unsigned long flags;
1048 unsigned int slot_base;
1049 unsigned int slot_index;
1050
1051 BUG_ON(!nslots);
1052 BUG_ON(area_index >= pool->nareas);
1053
1054 /*
1055 * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
1056 * page-aligned in the absence of any other alignment requirements.
1057 * 'alloc_align_mask' was later introduced to specify the alignment
1058 * explicitly, however this is passed as zero for streaming mappings
1059 * and so we preserve the old behaviour there in case any drivers are
1060 * relying on it.
1061 */
1062 if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
1063 alloc_align_mask = PAGE_SIZE - 1;
1064
1065 /*
1066 * Ensure that the allocation is at least slot-aligned and update
1067 * 'iotlb_align_mask' to ignore bits that will be preserved when
1068 * offsetting into the allocation.
1069 */
1070 alloc_align_mask |= (IO_TLB_SIZE - 1);
1071 iotlb_align_mask &= ~alloc_align_mask;
1072
1073 /*
1074 * For mappings with an alignment requirement don't bother looping to
1075 * unaligned slots once we found an aligned one.
1076 */
1077 stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
1078
1079 spin_lock_irqsave(&area->lock, flags);
1080 if (unlikely(nslots > pool->area_nslabs - area->used))
1081 goto not_found;
1082
1083 slot_base = area_index * pool->area_nslabs;
1084 index = area->index;
1085
1086 for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
1087 phys_addr_t tlb_addr;
1088
1089 slot_index = slot_base + index;
1090 tlb_addr = slot_addr(tbl_dma_addr, slot_index);
1091
1092 if ((tlb_addr & alloc_align_mask) ||
1093 (orig_addr && (tlb_addr & iotlb_align_mask) !=
1094 (orig_addr & iotlb_align_mask))) {
1095 index = wrap_area_index(pool, index + 1);
1096 slots_checked++;
1097 continue;
1098 }
1099
1100 if (!iommu_is_span_boundary(slot_index, nslots,
1101 nr_slots(tbl_dma_addr),
1102 max_slots)) {
1103 if (pool->slots[slot_index].list >= nslots)
1104 goto found;
1105 }
1106 index = wrap_area_index(pool, index + stride);
1107 slots_checked += stride;
1108 }
1109
1110 not_found:
1111 spin_unlock_irqrestore(&area->lock, flags);
1112 return -1;
1113
1114 found:
1115 /*
1116 * If we find a slot that indicates we have 'nslots' number of
1117 * contiguous buffers, we allocate the buffers from that slot onwards
1118 * and set the list of free entries to '0' indicating unavailable.
1119 */
1120 for (i = slot_index; i < slot_index + nslots; i++) {
1121 pool->slots[i].list = 0;
1122 pool->slots[i].alloc_size = alloc_size - (offset +
1123 ((i - slot_index) << IO_TLB_SHIFT));
1124 }
1125 for (i = slot_index - 1;
1126 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
1127 pool->slots[i].list; i--)
1128 pool->slots[i].list = ++count;
1129
1130 /*
1131 * Update the indices to avoid searching in the next round.
1132 */
1133 area->index = wrap_area_index(pool, index + nslots);
1134 area->used += nslots;
1135 spin_unlock_irqrestore(&area->lock, flags);
1136
1137 inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots);
1138 return slot_index;
1139 }
1140
1141 #ifdef CONFIG_SWIOTLB_DYNAMIC
1142
1143 /**
1144 * swiotlb_search_area() - search one memory area in all pools
1145 * @dev: Device which maps the buffer.
1146 * @start_cpu: Start CPU number.
1147 * @cpu_offset: Offset from @start_cpu.
1148 * @orig_addr: Original (non-bounced) IO buffer address.
1149 * @alloc_size: Total requested size of the bounce buffer,
1150 * including initial alignment padding.
1151 * @alloc_align_mask: Required alignment of the allocated buffer.
1152 * @retpool: Used memory pool, updated on return.
1153 *
1154 * Search one memory area in all pools for a sequence of slots that match the
1155 * allocation constraints.
1156 *
1157 * Return: Index of the first allocated slot, or -1 on error.
1158 */
swiotlb_search_area(struct device * dev,int start_cpu,int cpu_offset,phys_addr_t orig_addr,size_t alloc_size,unsigned int alloc_align_mask,struct io_tlb_pool ** retpool)1159 static int swiotlb_search_area(struct device *dev, int start_cpu,
1160 int cpu_offset, phys_addr_t orig_addr, size_t alloc_size,
1161 unsigned int alloc_align_mask, struct io_tlb_pool **retpool)
1162 {
1163 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1164 struct io_tlb_pool *pool;
1165 int area_index;
1166 int index = -1;
1167
1168 rcu_read_lock();
1169 list_for_each_entry_rcu(pool, &mem->pools, node) {
1170 if (cpu_offset >= pool->nareas)
1171 continue;
1172 area_index = (start_cpu + cpu_offset) & (pool->nareas - 1);
1173 index = swiotlb_search_pool_area(dev, pool, area_index,
1174 orig_addr, alloc_size,
1175 alloc_align_mask);
1176 if (index >= 0) {
1177 *retpool = pool;
1178 break;
1179 }
1180 }
1181 rcu_read_unlock();
1182 return index;
1183 }
1184
1185 /**
1186 * swiotlb_find_slots() - search for slots in the whole swiotlb
1187 * @dev: Device which maps the buffer.
1188 * @orig_addr: Original (non-bounced) IO buffer address.
1189 * @alloc_size: Total requested size of the bounce buffer,
1190 * including initial alignment padding.
1191 * @alloc_align_mask: Required alignment of the allocated buffer.
1192 * @retpool: Used memory pool, updated on return.
1193 *
1194 * Search through the whole software IO TLB to find a sequence of slots that
1195 * match the allocation constraints.
1196 *
1197 * Return: Index of the first allocated slot, or -1 on error.
1198 */
swiotlb_find_slots(struct device * dev,phys_addr_t orig_addr,size_t alloc_size,unsigned int alloc_align_mask,struct io_tlb_pool ** retpool)1199 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1200 size_t alloc_size, unsigned int alloc_align_mask,
1201 struct io_tlb_pool **retpool)
1202 {
1203 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1204 struct io_tlb_pool *pool;
1205 unsigned long nslabs;
1206 unsigned long flags;
1207 u64 phys_limit;
1208 int cpu, i;
1209 int index;
1210
1211 if (alloc_size > IO_TLB_SEGSIZE * IO_TLB_SIZE)
1212 return -1;
1213
1214 cpu = raw_smp_processor_id();
1215 for (i = 0; i < default_nareas; ++i) {
1216 index = swiotlb_search_area(dev, cpu, i, orig_addr, alloc_size,
1217 alloc_align_mask, &pool);
1218 if (index >= 0)
1219 goto found;
1220 }
1221
1222 if (!mem->can_grow)
1223 return -1;
1224
1225 schedule_work(&mem->dyn_alloc);
1226
1227 nslabs = nr_slots(alloc_size);
1228 phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
1229 pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
1230 GFP_NOWAIT);
1231 if (!pool)
1232 return -1;
1233
1234 index = swiotlb_search_pool_area(dev, pool, 0, orig_addr,
1235 alloc_size, alloc_align_mask);
1236 if (index < 0) {
1237 swiotlb_dyn_free(&pool->rcu);
1238 return -1;
1239 }
1240
1241 pool->transient = true;
1242 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
1243 list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
1244 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
1245 inc_transient_used(mem, pool->nslabs);
1246
1247 found:
1248 WRITE_ONCE(dev->dma_uses_io_tlb, true);
1249
1250 /*
1251 * The general barrier orders reads and writes against a presumed store
1252 * of the SWIOTLB buffer address by a device driver (to a driver private
1253 * data structure). It serves two purposes.
1254 *
1255 * First, the store to dev->dma_uses_io_tlb must be ordered before the
1256 * presumed store. This guarantees that the returned buffer address
1257 * cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
1258 *
1259 * Second, the load from mem->pools must be ordered before the same
1260 * presumed store. This guarantees that the returned buffer address
1261 * cannot be observed by another CPU before an update of the RCU list
1262 * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
1263 * atomicity).
1264 *
1265 * See also the comment in swiotlb_find_pool().
1266 */
1267 smp_mb();
1268
1269 *retpool = pool;
1270 return index;
1271 }
1272
1273 #else /* !CONFIG_SWIOTLB_DYNAMIC */
1274
swiotlb_find_slots(struct device * dev,phys_addr_t orig_addr,size_t alloc_size,unsigned int alloc_align_mask,struct io_tlb_pool ** retpool)1275 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1276 size_t alloc_size, unsigned int alloc_align_mask,
1277 struct io_tlb_pool **retpool)
1278 {
1279 struct io_tlb_pool *pool;
1280 int start, i;
1281 int index;
1282
1283 *retpool = pool = &dev->dma_io_tlb_mem->defpool;
1284 i = start = raw_smp_processor_id() & (pool->nareas - 1);
1285 do {
1286 index = swiotlb_search_pool_area(dev, pool, i, orig_addr,
1287 alloc_size, alloc_align_mask);
1288 if (index >= 0)
1289 return index;
1290 if (++i >= pool->nareas)
1291 i = 0;
1292 } while (i != start);
1293 return -1;
1294 }
1295
1296 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1297
1298 #ifdef CONFIG_DEBUG_FS
1299
1300 /**
1301 * mem_used() - get number of used slots in an allocator
1302 * @mem: Software IO TLB allocator.
1303 *
1304 * The result is accurate in this version of the function, because an atomic
1305 * counter is available if CONFIG_DEBUG_FS is set.
1306 *
1307 * Return: Number of used slots.
1308 */
mem_used(struct io_tlb_mem * mem)1309 static unsigned long mem_used(struct io_tlb_mem *mem)
1310 {
1311 return atomic_long_read(&mem->total_used);
1312 }
1313
1314 #else /* !CONFIG_DEBUG_FS */
1315
1316 /**
1317 * mem_pool_used() - get number of used slots in a memory pool
1318 * @pool: Software IO TLB memory pool.
1319 *
1320 * The result is not accurate, see mem_used().
1321 *
1322 * Return: Approximate number of used slots.
1323 */
mem_pool_used(struct io_tlb_pool * pool)1324 static unsigned long mem_pool_used(struct io_tlb_pool *pool)
1325 {
1326 int i;
1327 unsigned long used = 0;
1328
1329 for (i = 0; i < pool->nareas; i++)
1330 used += pool->areas[i].used;
1331 return used;
1332 }
1333
1334 /**
1335 * mem_used() - get number of used slots in an allocator
1336 * @mem: Software IO TLB allocator.
1337 *
1338 * The result is not accurate, because there is no locking of individual
1339 * areas.
1340 *
1341 * Return: Approximate number of used slots.
1342 */
mem_used(struct io_tlb_mem * mem)1343 static unsigned long mem_used(struct io_tlb_mem *mem)
1344 {
1345 #ifdef CONFIG_SWIOTLB_DYNAMIC
1346 struct io_tlb_pool *pool;
1347 unsigned long used = 0;
1348
1349 rcu_read_lock();
1350 list_for_each_entry_rcu(pool, &mem->pools, node)
1351 used += mem_pool_used(pool);
1352 rcu_read_unlock();
1353
1354 return used;
1355 #else
1356 return mem_pool_used(&mem->defpool);
1357 #endif
1358 }
1359
1360 #endif /* CONFIG_DEBUG_FS */
1361
1362 /**
1363 * swiotlb_tbl_map_single() - bounce buffer map a single contiguous physical area
1364 * @dev: Device which maps the buffer.
1365 * @orig_addr: Original (non-bounced) physical IO buffer address
1366 * @mapping_size: Requested size of the actual bounce buffer, excluding
1367 * any pre- or post-padding for alignment
1368 * @alloc_align_mask: Required start and end alignment of the allocated buffer
1369 * @dir: DMA direction
1370 * @attrs: Optional DMA attributes for the map operation
1371 *
1372 * Find and allocate a suitable sequence of IO TLB slots for the request.
1373 * The allocated space starts at an alignment specified by alloc_align_mask,
1374 * and the size of the allocated space is rounded up so that the total amount
1375 * of allocated space is a multiple of (alloc_align_mask + 1). If
1376 * alloc_align_mask is zero, the allocated space may be at any alignment and
1377 * the size is not rounded up.
1378 *
1379 * The returned address is within the allocated space and matches the bits
1380 * of orig_addr that are specified in the DMA min_align_mask for the device. As
1381 * such, this returned address may be offset from the beginning of the allocated
1382 * space. The bounce buffer space starting at the returned address for
1383 * mapping_size bytes is initialized to the contents of the original IO buffer
1384 * area. Any pre-padding (due to an offset) and any post-padding (due to
1385 * rounding-up the size) is not initialized.
1386 */
swiotlb_tbl_map_single(struct device * dev,phys_addr_t orig_addr,size_t mapping_size,unsigned int alloc_align_mask,enum dma_data_direction dir,unsigned long attrs)1387 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
1388 size_t mapping_size, unsigned int alloc_align_mask,
1389 enum dma_data_direction dir, unsigned long attrs)
1390 {
1391 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1392 unsigned int offset;
1393 struct io_tlb_pool *pool;
1394 unsigned int i;
1395 size_t size;
1396 int index;
1397 phys_addr_t tlb_addr;
1398 unsigned short pad_slots;
1399
1400 if (!mem || !mem->nslabs) {
1401 dev_warn_ratelimited(dev,
1402 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
1403 return (phys_addr_t)DMA_MAPPING_ERROR;
1404 }
1405
1406 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
1407 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
1408
1409 /*
1410 * The default swiotlb memory pool is allocated with PAGE_SIZE
1411 * alignment. If a mapping is requested with larger alignment,
1412 * the mapping may be unable to use the initial slot(s) in all
1413 * sets of IO_TLB_SEGSIZE slots. In such case, a mapping request
1414 * of or near the maximum mapping size would always fail.
1415 */
1416 dev_WARN_ONCE(dev, alloc_align_mask > ~PAGE_MASK,
1417 "Alloc alignment may prevent fulfilling requests with max mapping_size\n");
1418
1419 offset = swiotlb_align_offset(dev, alloc_align_mask, orig_addr);
1420 size = ALIGN(mapping_size + offset, alloc_align_mask + 1);
1421 index = swiotlb_find_slots(dev, orig_addr, size, alloc_align_mask, &pool);
1422 if (index == -1) {
1423 if (!(attrs & DMA_ATTR_NO_WARN))
1424 dev_warn_ratelimited(dev,
1425 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
1426 size, mem->nslabs, mem_used(mem));
1427 return (phys_addr_t)DMA_MAPPING_ERROR;
1428 }
1429
1430 /*
1431 * If dma_skip_sync was set, reset it on first SWIOTLB buffer
1432 * mapping to always sync SWIOTLB buffers.
1433 */
1434 dma_reset_need_sync(dev);
1435
1436 /*
1437 * Save away the mapping from the original address to the DMA address.
1438 * This is needed when we sync the memory. Then we sync the buffer if
1439 * needed.
1440 */
1441 pad_slots = offset >> IO_TLB_SHIFT;
1442 offset &= (IO_TLB_SIZE - 1);
1443 index += pad_slots;
1444 pool->slots[index].pad_slots = pad_slots;
1445 for (i = 0; i < (nr_slots(size) - pad_slots); i++)
1446 pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
1447 tlb_addr = slot_addr(pool->start, index) + offset;
1448 /*
1449 * When the device is writing memory, i.e. dir == DMA_FROM_DEVICE, copy
1450 * the original buffer to the TLB buffer before initiating DMA in order
1451 * to preserve the original's data if the device does a partial write,
1452 * i.e. if the device doesn't overwrite the entire buffer. Preserving
1453 * the original data, even if it's garbage, is necessary to match
1454 * hardware behavior. Use of swiotlb is supposed to be transparent,
1455 * i.e. swiotlb must not corrupt memory by clobbering unwritten bytes.
1456 */
1457 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE, pool);
1458 return tlb_addr;
1459 }
1460
swiotlb_release_slots(struct device * dev,phys_addr_t tlb_addr,struct io_tlb_pool * mem)1461 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr,
1462 struct io_tlb_pool *mem)
1463 {
1464 unsigned long flags;
1465 unsigned int offset = swiotlb_align_offset(dev, 0, tlb_addr);
1466 int index, nslots, aindex;
1467 struct io_tlb_area *area;
1468 int count, i;
1469
1470 index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
1471 index -= mem->slots[index].pad_slots;
1472 nslots = nr_slots(mem->slots[index].alloc_size + offset);
1473 aindex = index / mem->area_nslabs;
1474 area = &mem->areas[aindex];
1475
1476 /*
1477 * Return the buffer to the free list by setting the corresponding
1478 * entries to indicate the number of contiguous entries available.
1479 * While returning the entries to the free list, we merge the entries
1480 * with slots below and above the pool being returned.
1481 */
1482 BUG_ON(aindex >= mem->nareas);
1483
1484 spin_lock_irqsave(&area->lock, flags);
1485 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
1486 count = mem->slots[index + nslots].list;
1487 else
1488 count = 0;
1489
1490 /*
1491 * Step 1: return the slots to the free list, merging the slots with
1492 * superceeding slots
1493 */
1494 for (i = index + nslots - 1; i >= index; i--) {
1495 mem->slots[i].list = ++count;
1496 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
1497 mem->slots[i].alloc_size = 0;
1498 mem->slots[i].pad_slots = 0;
1499 }
1500
1501 /*
1502 * Step 2: merge the returned slots with the preceding slots, if
1503 * available (non zero)
1504 */
1505 for (i = index - 1;
1506 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
1507 i--)
1508 mem->slots[i].list = ++count;
1509 area->used -= nslots;
1510 spin_unlock_irqrestore(&area->lock, flags);
1511
1512 dec_used(dev->dma_io_tlb_mem, nslots);
1513 }
1514
1515 #ifdef CONFIG_SWIOTLB_DYNAMIC
1516
1517 /**
1518 * swiotlb_del_transient() - delete a transient memory pool
1519 * @dev: Device which mapped the buffer.
1520 * @tlb_addr: Physical address within a bounce buffer.
1521 * @pool: Pointer to the transient memory pool to be checked and deleted.
1522 *
1523 * Check whether the address belongs to a transient SWIOTLB memory pool.
1524 * If yes, then delete the pool.
1525 *
1526 * Return: %true if @tlb_addr belonged to a transient pool that was released.
1527 */
swiotlb_del_transient(struct device * dev,phys_addr_t tlb_addr,struct io_tlb_pool * pool)1528 static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr,
1529 struct io_tlb_pool *pool)
1530 {
1531 if (!pool->transient)
1532 return false;
1533
1534 dec_used(dev->dma_io_tlb_mem, pool->nslabs);
1535 swiotlb_del_pool(dev, pool);
1536 dec_transient_used(dev->dma_io_tlb_mem, pool->nslabs);
1537 return true;
1538 }
1539
1540 #else /* !CONFIG_SWIOTLB_DYNAMIC */
1541
swiotlb_del_transient(struct device * dev,phys_addr_t tlb_addr,struct io_tlb_pool * pool)1542 static inline bool swiotlb_del_transient(struct device *dev,
1543 phys_addr_t tlb_addr, struct io_tlb_pool *pool)
1544 {
1545 return false;
1546 }
1547
1548 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1549
1550 /*
1551 * tlb_addr is the physical address of the bounce buffer to unmap.
1552 */
__swiotlb_tbl_unmap_single(struct device * dev,phys_addr_t tlb_addr,size_t mapping_size,enum dma_data_direction dir,unsigned long attrs,struct io_tlb_pool * pool)1553 void __swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
1554 size_t mapping_size, enum dma_data_direction dir,
1555 unsigned long attrs, struct io_tlb_pool *pool)
1556 {
1557 /*
1558 * First, sync the memory before unmapping the entry
1559 */
1560 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
1561 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
1562 swiotlb_bounce(dev, tlb_addr, mapping_size,
1563 DMA_FROM_DEVICE, pool);
1564
1565 if (swiotlb_del_transient(dev, tlb_addr, pool))
1566 return;
1567 swiotlb_release_slots(dev, tlb_addr, pool);
1568 }
1569
__swiotlb_sync_single_for_device(struct device * dev,phys_addr_t tlb_addr,size_t size,enum dma_data_direction dir,struct io_tlb_pool * pool)1570 void __swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
1571 size_t size, enum dma_data_direction dir,
1572 struct io_tlb_pool *pool)
1573 {
1574 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
1575 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE, pool);
1576 else
1577 BUG_ON(dir != DMA_FROM_DEVICE);
1578 }
1579
__swiotlb_sync_single_for_cpu(struct device * dev,phys_addr_t tlb_addr,size_t size,enum dma_data_direction dir,struct io_tlb_pool * pool)1580 void __swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
1581 size_t size, enum dma_data_direction dir,
1582 struct io_tlb_pool *pool)
1583 {
1584 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
1585 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE, pool);
1586 else
1587 BUG_ON(dir != DMA_TO_DEVICE);
1588 }
1589
1590 /*
1591 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
1592 * to the device copy the data into it as well.
1593 */
swiotlb_map(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir,unsigned long attrs)1594 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
1595 enum dma_data_direction dir, unsigned long attrs)
1596 {
1597 phys_addr_t swiotlb_addr;
1598 dma_addr_t dma_addr;
1599
1600 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
1601
1602 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, 0, dir, attrs);
1603 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
1604 return DMA_MAPPING_ERROR;
1605
1606 /* Ensure that the address returned is DMA'ble */
1607 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
1608 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
1609 __swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
1610 attrs | DMA_ATTR_SKIP_CPU_SYNC,
1611 swiotlb_find_pool(dev, swiotlb_addr));
1612 dev_WARN_ONCE(dev, 1,
1613 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
1614 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
1615 return DMA_MAPPING_ERROR;
1616 }
1617
1618 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
1619 arch_sync_dma_for_device(swiotlb_addr, size, dir);
1620 arch_sync_dma_flush();
1621 }
1622 return dma_addr;
1623 }
1624
swiotlb_max_mapping_size(struct device * dev)1625 size_t swiotlb_max_mapping_size(struct device *dev)
1626 {
1627 int min_align_mask = dma_get_min_align_mask(dev);
1628 int min_align = 0;
1629
1630 /*
1631 * swiotlb_find_slots() skips slots according to
1632 * min align mask. This affects max mapping size.
1633 * Take it into acount here.
1634 */
1635 if (min_align_mask)
1636 min_align = roundup(min_align_mask, IO_TLB_SIZE);
1637
1638 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
1639 }
1640
1641 /**
1642 * is_swiotlb_allocated() - check if the default software IO TLB is initialized
1643 */
is_swiotlb_allocated(void)1644 bool is_swiotlb_allocated(void)
1645 {
1646 return io_tlb_default_mem.nslabs;
1647 }
1648
is_swiotlb_active(struct device * dev)1649 bool is_swiotlb_active(struct device *dev)
1650 {
1651 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1652
1653 return mem && mem->nslabs;
1654 }
1655
1656 /**
1657 * default_swiotlb_base() - get the base address of the default SWIOTLB
1658 *
1659 * Get the lowest physical address used by the default software IO TLB pool.
1660 */
default_swiotlb_base(void)1661 phys_addr_t default_swiotlb_base(void)
1662 {
1663 #ifdef CONFIG_SWIOTLB_DYNAMIC
1664 io_tlb_default_mem.can_grow = false;
1665 #endif
1666 return io_tlb_default_mem.defpool.start;
1667 }
1668
1669 /**
1670 * default_swiotlb_limit() - get the address limit of the default SWIOTLB
1671 *
1672 * Get the highest physical address used by the default software IO TLB pool.
1673 */
default_swiotlb_limit(void)1674 phys_addr_t default_swiotlb_limit(void)
1675 {
1676 #ifdef CONFIG_SWIOTLB_DYNAMIC
1677 return io_tlb_default_mem.phys_limit;
1678 #else
1679 return io_tlb_default_mem.defpool.end - 1;
1680 #endif
1681 }
1682
1683 #ifdef CONFIG_DEBUG_FS
1684 #ifdef CONFIG_SWIOTLB_DYNAMIC
mem_transient_used(struct io_tlb_mem * mem)1685 static unsigned long mem_transient_used(struct io_tlb_mem *mem)
1686 {
1687 return atomic_long_read(&mem->transient_nslabs);
1688 }
1689
io_tlb_transient_used_get(void * data,u64 * val)1690 static int io_tlb_transient_used_get(void *data, u64 *val)
1691 {
1692 struct io_tlb_mem *mem = data;
1693
1694 *val = mem_transient_used(mem);
1695 return 0;
1696 }
1697
1698 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_transient_used, io_tlb_transient_used_get,
1699 NULL, "%llu\n");
1700 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1701
io_tlb_used_get(void * data,u64 * val)1702 static int io_tlb_used_get(void *data, u64 *val)
1703 {
1704 struct io_tlb_mem *mem = data;
1705
1706 *val = mem_used(mem);
1707 return 0;
1708 }
1709
io_tlb_hiwater_get(void * data,u64 * val)1710 static int io_tlb_hiwater_get(void *data, u64 *val)
1711 {
1712 struct io_tlb_mem *mem = data;
1713
1714 *val = atomic_long_read(&mem->used_hiwater);
1715 return 0;
1716 }
1717
io_tlb_hiwater_set(void * data,u64 val)1718 static int io_tlb_hiwater_set(void *data, u64 val)
1719 {
1720 struct io_tlb_mem *mem = data;
1721
1722 /* Only allow setting to zero */
1723 if (val != 0)
1724 return -EINVAL;
1725
1726 atomic_long_set(&mem->used_hiwater, val);
1727 return 0;
1728 }
1729
1730 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
1731 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
1732 io_tlb_hiwater_set, "%llu\n");
1733
swiotlb_create_debugfs_files(struct io_tlb_mem * mem,const char * dirname)1734 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1735 const char *dirname)
1736 {
1737 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
1738 if (!mem->nslabs)
1739 return;
1740
1741 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
1742 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
1743 &fops_io_tlb_used);
1744 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
1745 &fops_io_tlb_hiwater);
1746 #ifdef CONFIG_SWIOTLB_DYNAMIC
1747 debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs,
1748 mem, &fops_io_tlb_transient_used);
1749 #endif
1750 }
1751
swiotlb_create_default_debugfs(void)1752 static int __init swiotlb_create_default_debugfs(void)
1753 {
1754 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
1755 return 0;
1756 }
1757
1758 late_initcall(swiotlb_create_default_debugfs);
1759
1760 #else /* !CONFIG_DEBUG_FS */
1761
swiotlb_create_debugfs_files(struct io_tlb_mem * mem,const char * dirname)1762 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1763 const char *dirname)
1764 {
1765 }
1766
1767 #endif /* CONFIG_DEBUG_FS */
1768
1769 #ifdef CONFIG_DMA_RESTRICTED_POOL
1770
swiotlb_alloc(struct device * dev,size_t size)1771 struct page *swiotlb_alloc(struct device *dev, size_t size)
1772 {
1773 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1774 struct io_tlb_pool *pool;
1775 phys_addr_t tlb_addr;
1776 unsigned int align;
1777 int index;
1778
1779 if (!mem)
1780 return NULL;
1781
1782 align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
1783 index = swiotlb_find_slots(dev, 0, size, align, &pool);
1784 if (index == -1)
1785 return NULL;
1786
1787 tlb_addr = slot_addr(pool->start, index);
1788 if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
1789 dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
1790 &tlb_addr);
1791 swiotlb_release_slots(dev, tlb_addr, pool);
1792 return NULL;
1793 }
1794
1795 return pfn_to_page(PFN_DOWN(tlb_addr));
1796 }
1797
swiotlb_free(struct device * dev,struct page * page,size_t size)1798 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
1799 {
1800 phys_addr_t tlb_addr = page_to_phys(page);
1801 struct io_tlb_pool *pool;
1802
1803 pool = swiotlb_find_pool(dev, tlb_addr);
1804 if (!pool)
1805 return false;
1806
1807 swiotlb_release_slots(dev, tlb_addr, pool);
1808
1809 return true;
1810 }
1811
rmem_swiotlb_device_init(struct reserved_mem * rmem,struct device * dev)1812 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
1813 struct device *dev)
1814 {
1815 struct io_tlb_mem *mem = rmem->priv;
1816 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1817
1818 /* Set Per-device io tlb area to one */
1819 unsigned int nareas = 1;
1820
1821 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1822 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
1823 return -EINVAL;
1824 }
1825
1826 /*
1827 * Since multiple devices can share the same pool, the private data,
1828 * io_tlb_mem struct, will be initialized by the first device attached
1829 * to it.
1830 */
1831 if (!mem) {
1832 struct io_tlb_pool *pool;
1833
1834 mem = kzalloc_obj(*mem);
1835 if (!mem)
1836 return -ENOMEM;
1837 pool = &mem->defpool;
1838
1839 pool->slots = kzalloc_objs(*pool->slots, nslabs);
1840 if (!pool->slots) {
1841 kfree(mem);
1842 return -ENOMEM;
1843 }
1844
1845 pool->areas = kzalloc_objs(*pool->areas, nareas);
1846 if (!pool->areas) {
1847 kfree(pool->slots);
1848 kfree(mem);
1849 return -ENOMEM;
1850 }
1851
1852 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1853 rmem->size >> PAGE_SHIFT);
1854 swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
1855 false, nareas);
1856 mem->force_bounce = true;
1857 mem->for_alloc = true;
1858 #ifdef CONFIG_SWIOTLB_DYNAMIC
1859 spin_lock_init(&mem->lock);
1860 INIT_LIST_HEAD_RCU(&mem->pools);
1861 #endif
1862 add_mem_pool(mem, pool);
1863
1864 rmem->priv = mem;
1865
1866 swiotlb_create_debugfs_files(mem, rmem->name);
1867 }
1868
1869 dev->dma_io_tlb_mem = mem;
1870
1871 return 0;
1872 }
1873
rmem_swiotlb_device_release(struct reserved_mem * rmem,struct device * dev)1874 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1875 struct device *dev)
1876 {
1877 dev->dma_io_tlb_mem = &io_tlb_default_mem;
1878 }
1879
rmem_swiotlb_setup(unsigned long node,struct reserved_mem * rmem)1880 static int __init rmem_swiotlb_setup(unsigned long node,
1881 struct reserved_mem *rmem)
1882 {
1883 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1884 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1885 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1886 of_get_flat_dt_prop(node, "no-map", NULL))
1887 return -EINVAL;
1888
1889 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1890 &rmem->base, (unsigned long)rmem->size / SZ_1M);
1891 return 0;
1892 }
1893
1894 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1895 .node_init = rmem_swiotlb_setup,
1896 .device_init = rmem_swiotlb_device_init,
1897 .device_release = rmem_swiotlb_device_release,
1898 };
1899
1900 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", &rmem_swiotlb_ops);
1901 #endif /* CONFIG_DMA_RESTRICTED_POOL */
1902