1f5af4a4fSJason Gunthorpe // SPDX-License-Identifier: GPL-2.0-only
2f5af4a4fSJason Gunthorpe /*
3f5af4a4fSJason Gunthorpe * Copyright (c) 2024, Google LLC.
4f5af4a4fSJason Gunthorpe * Pasha Tatashin <pasha.tatashin@soleen.com>
5f5af4a4fSJason Gunthorpe */
6f5af4a4fSJason Gunthorpe #include "iommu-pages.h"
7f5af4a4fSJason Gunthorpe #include <linux/gfp.h>
8f5af4a4fSJason Gunthorpe #include <linux/mm.h>
9f5af4a4fSJason Gunthorpe
10212fcf36SJason Gunthorpe #define IOPTDESC_MATCH(pg_elm, elm) \
11212fcf36SJason Gunthorpe static_assert(offsetof(struct page, pg_elm) == \
12212fcf36SJason Gunthorpe offsetof(struct ioptdesc, elm))
13212fcf36SJason Gunthorpe IOPTDESC_MATCH(flags, __page_flags);
14212fcf36SJason Gunthorpe IOPTDESC_MATCH(lru, iopt_freelist_elm); /* Ensure bit 0 is clear */
15212fcf36SJason Gunthorpe IOPTDESC_MATCH(mapping, __page_mapping);
16212fcf36SJason Gunthorpe IOPTDESC_MATCH(private, _private);
17212fcf36SJason Gunthorpe IOPTDESC_MATCH(page_type, __page_type);
18212fcf36SJason Gunthorpe IOPTDESC_MATCH(_refcount, __page_refcount);
19212fcf36SJason Gunthorpe #ifdef CONFIG_MEMCG
20212fcf36SJason Gunthorpe IOPTDESC_MATCH(memcg_data, memcg_data);
21212fcf36SJason Gunthorpe #endif
22212fcf36SJason Gunthorpe #undef IOPTDESC_MATCH
23212fcf36SJason Gunthorpe static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
24212fcf36SJason Gunthorpe
25f5af4a4fSJason Gunthorpe /**
26b3efacc4SJason Gunthorpe * iommu_alloc_pages_node_sz - Allocate a zeroed page of a given size from
27f5af4a4fSJason Gunthorpe * specific NUMA node
28f5af4a4fSJason Gunthorpe * @nid: memory NUMA node id
29f5af4a4fSJason Gunthorpe * @gfp: buddy allocator flags
30b3efacc4SJason Gunthorpe * @size: Memory size to allocate, rounded up to a power of 2
31f5af4a4fSJason Gunthorpe *
32b3efacc4SJason Gunthorpe * Returns the virtual address of the allocated page. The page must be freed
33b3efacc4SJason Gunthorpe * either by calling iommu_free_pages() or via iommu_put_pages_list(). The
34b3efacc4SJason Gunthorpe * returned allocation is round_up_pow_two(size) big, and is physically aligned
35b3efacc4SJason Gunthorpe * to its size.
36f5af4a4fSJason Gunthorpe */
iommu_alloc_pages_node_sz(int nid,gfp_t gfp,size_t size)37b3efacc4SJason Gunthorpe void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size)
38f5af4a4fSJason Gunthorpe {
39b3efacc4SJason Gunthorpe unsigned long pgcnt;
40212fcf36SJason Gunthorpe struct folio *folio;
41b3efacc4SJason Gunthorpe unsigned int order;
42f5af4a4fSJason Gunthorpe
43580ccca4SJason Gunthorpe /* This uses page_address() on the memory. */
44580ccca4SJason Gunthorpe if (WARN_ON(gfp & __GFP_HIGHMEM))
45580ccca4SJason Gunthorpe return NULL;
46580ccca4SJason Gunthorpe
47212fcf36SJason Gunthorpe /*
48b3efacc4SJason Gunthorpe * Currently sub page allocations result in a full page being returned.
49b3efacc4SJason Gunthorpe */
50b3efacc4SJason Gunthorpe order = get_order(size);
51b3efacc4SJason Gunthorpe
52b3efacc4SJason Gunthorpe /*
53212fcf36SJason Gunthorpe * __folio_alloc_node() does not handle NUMA_NO_NODE like
54212fcf36SJason Gunthorpe * alloc_pages_node() did.
55212fcf36SJason Gunthorpe */
56212fcf36SJason Gunthorpe if (nid == NUMA_NO_NODE)
57212fcf36SJason Gunthorpe nid = numa_mem_id();
58212fcf36SJason Gunthorpe
59212fcf36SJason Gunthorpe folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid);
60212fcf36SJason Gunthorpe if (unlikely(!folio))
61f5af4a4fSJason Gunthorpe return NULL;
62f5af4a4fSJason Gunthorpe
63f5af4a4fSJason Gunthorpe /*
64f5af4a4fSJason Gunthorpe * All page allocations that should be reported to as "iommu-pagetables"
65f5af4a4fSJason Gunthorpe * to userspace must use one of the functions below. This includes
66f5af4a4fSJason Gunthorpe * allocations of page-tables and other per-iommu_domain configuration
67f5af4a4fSJason Gunthorpe * structures.
68f5af4a4fSJason Gunthorpe *
69f5af4a4fSJason Gunthorpe * This is necessary for the proper accounting as IOMMU state can be
70f5af4a4fSJason Gunthorpe * rather large, i.e. multiple gigabytes in size.
71f5af4a4fSJason Gunthorpe */
72b3efacc4SJason Gunthorpe pgcnt = 1UL << order;
73212fcf36SJason Gunthorpe mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt);
74212fcf36SJason Gunthorpe lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt);
75f5af4a4fSJason Gunthorpe
76212fcf36SJason Gunthorpe return folio_address(folio);
77f5af4a4fSJason Gunthorpe }
78b3efacc4SJason Gunthorpe EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz);
79f5af4a4fSJason Gunthorpe
__iommu_free_desc(struct ioptdesc * iopt)80212fcf36SJason Gunthorpe static void __iommu_free_desc(struct ioptdesc *iopt)
81f5af4a4fSJason Gunthorpe {
82212fcf36SJason Gunthorpe struct folio *folio = ioptdesc_folio(iopt);
83212fcf36SJason Gunthorpe const unsigned long pgcnt = 1UL << folio_order(folio);
84f5af4a4fSJason Gunthorpe
85212fcf36SJason Gunthorpe mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt);
86212fcf36SJason Gunthorpe lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt);
87212fcf36SJason Gunthorpe folio_put(folio);
88f5af4a4fSJason Gunthorpe }
89f5af4a4fSJason Gunthorpe
90f5af4a4fSJason Gunthorpe /**
91f5af4a4fSJason Gunthorpe * iommu_free_pages - free pages
92f5af4a4fSJason Gunthorpe * @virt: virtual address of the page to be freed.
93f5af4a4fSJason Gunthorpe *
94b3efacc4SJason Gunthorpe * The page must have have been allocated by iommu_alloc_pages_node_sz()
95f5af4a4fSJason Gunthorpe */
iommu_free_pages(void * virt)96f5af4a4fSJason Gunthorpe void iommu_free_pages(void *virt)
97f5af4a4fSJason Gunthorpe {
98f5af4a4fSJason Gunthorpe if (!virt)
99f5af4a4fSJason Gunthorpe return;
100212fcf36SJason Gunthorpe __iommu_free_desc(virt_to_ioptdesc(virt));
101f5af4a4fSJason Gunthorpe }
102f5af4a4fSJason Gunthorpe EXPORT_SYMBOL_GPL(iommu_free_pages);
103f5af4a4fSJason Gunthorpe
104f5af4a4fSJason Gunthorpe /**
10527bc9f71SJason Gunthorpe * iommu_put_pages_list - free a list of pages.
10613f43d7cSJason Gunthorpe * @list: The list of pages to be freed
107f5af4a4fSJason Gunthorpe *
108*5e2ff240SJason Gunthorpe * Frees a list of pages allocated by iommu_alloc_pages_node_sz(). On return the
109*5e2ff240SJason Gunthorpe * passed list is invalid, the caller must use IOMMU_PAGES_LIST_INIT to reinit
110*5e2ff240SJason Gunthorpe * the list if it expects to use it again.
111f5af4a4fSJason Gunthorpe */
iommu_put_pages_list(struct iommu_pages_list * list)11227bc9f71SJason Gunthorpe void iommu_put_pages_list(struct iommu_pages_list *list)
113f5af4a4fSJason Gunthorpe {
114212fcf36SJason Gunthorpe struct ioptdesc *iopt, *tmp;
115f5af4a4fSJason Gunthorpe
116212fcf36SJason Gunthorpe list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm)
117212fcf36SJason Gunthorpe __iommu_free_desc(iopt);
118f5af4a4fSJason Gunthorpe }
11927bc9f71SJason Gunthorpe EXPORT_SYMBOL_GPL(iommu_put_pages_list);
120