xref: /linux/drivers/iommu/iommu-pages.h (revision 594ce0b8a998aa4d05827cd7c0d0dcec9a1e3ae2)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2024, Google LLC.
4  * Pasha Tatashin <pasha.tatashin@soleen.com>
5  */
6 
7 #ifndef __IOMMU_PAGES_H
8 #define __IOMMU_PAGES_H
9 
10 #include <linux/vmstat.h>
11 #include <linux/gfp.h>
12 #include <linux/mm.h>
13 
14 /*
15  * All page allocations that should be reported to as "iommu-pagetables" to
16  * userspace must use one of the functions below.  This includes allocations of
17  * page-tables and other per-iommu_domain configuration structures.
18  *
19  * This is necessary for the proper accounting as IOMMU state can be rather
20  * large, i.e. multiple gigabytes in size.
21  */
22 
23 /**
24  * __iommu_alloc_account - account for newly allocated page.
25  * @page: head struct page of the page.
26  * @order: order of the page
27  */
28 static inline void __iommu_alloc_account(struct page *page, int order)
29 {
30 	const long pgcnt = 1l << order;
31 
32 	mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
33 	mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
34 }
35 
36 /**
37  * __iommu_free_account - account a page that is about to be freed.
38  * @page: head struct page of the page.
39  * @order: order of the page
40  */
41 static inline void __iommu_free_account(struct page *page, int order)
42 {
43 	const long pgcnt = 1l << order;
44 
45 	mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
46 	mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
47 }
48 
49 /**
50  * __iommu_alloc_pages - allocate a zeroed page of a given order.
51  * @gfp: buddy allocator flags
52  * @order: page order
53  *
54  * returns the head struct page of the allocated page.
55  */
56 static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
57 {
58 	struct page *page;
59 
60 	page = alloc_pages(gfp | __GFP_ZERO, order);
61 	if (unlikely(!page))
62 		return NULL;
63 
64 	__iommu_alloc_account(page, order);
65 
66 	return page;
67 }
68 
69 /**
70  * __iommu_free_pages - free page of a given order
71  * @page: head struct page of the page
72  * @order: page order
73  */
74 static inline void __iommu_free_pages(struct page *page, int order)
75 {
76 	if (!page)
77 		return;
78 
79 	__iommu_free_account(page, order);
80 	__free_pages(page, order);
81 }
82 
83 /**
84  * iommu_alloc_pages_node - allocate a zeroed page of a given order from
85  * specific NUMA node.
86  * @nid: memory NUMA node id
87  * @gfp: buddy allocator flags
88  * @order: page order
89  *
90  * returns the virtual address of the allocated page
91  */
92 static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
93 {
94 	struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
95 
96 	if (unlikely(!page))
97 		return NULL;
98 
99 	__iommu_alloc_account(page, order);
100 
101 	return page_address(page);
102 }
103 
104 /**
105  * iommu_alloc_pages - allocate a zeroed page of a given order
106  * @gfp: buddy allocator flags
107  * @order: page order
108  *
109  * returns the virtual address of the allocated page
110  */
111 static inline void *iommu_alloc_pages(gfp_t gfp, int order)
112 {
113 	struct page *page = __iommu_alloc_pages(gfp, order);
114 
115 	if (unlikely(!page))
116 		return NULL;
117 
118 	return page_address(page);
119 }
120 
121 /**
122  * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
123  * @nid: memory NUMA node id
124  * @gfp: buddy allocator flags
125  *
126  * returns the virtual address of the allocated page
127  */
128 static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
129 {
130 	return iommu_alloc_pages_node(nid, gfp, 0);
131 }
132 
133 /**
134  * iommu_alloc_page - allocate a zeroed page
135  * @gfp: buddy allocator flags
136  *
137  * returns the virtual address of the allocated page
138  */
139 static inline void *iommu_alloc_page(gfp_t gfp)
140 {
141 	return iommu_alloc_pages(gfp, 0);
142 }
143 
144 /**
145  * iommu_free_pages - free page of a given order
146  * @virt: virtual address of the page to be freed.
147  * @order: page order
148  */
149 static inline void iommu_free_pages(void *virt, int order)
150 {
151 	if (!virt)
152 		return;
153 
154 	__iommu_free_pages(virt_to_page(virt), order);
155 }
156 
157 /**
158  * iommu_free_page - free page
159  * @virt: virtual address of the page to be freed.
160  */
161 static inline void iommu_free_page(void *virt)
162 {
163 	iommu_free_pages(virt, 0);
164 }
165 
166 /**
167  * iommu_put_pages_list - free a list of pages.
168  * @page: the head of the lru list to be freed.
169  *
170  * There are no locking requirement for these pages, as they are going to be
171  * put on a free list as soon as refcount reaches 0. Pages are put on this LRU
172  * list once they are removed from the IOMMU page tables. However, they can
173  * still be access through debugfs.
174  */
175 static inline void iommu_put_pages_list(struct list_head *page)
176 {
177 	while (!list_empty(page)) {
178 		struct page *p = list_entry(page->prev, struct page, lru);
179 
180 		list_del(&p->lru);
181 		__iommu_free_account(p, 0);
182 		put_page(p);
183 	}
184 }
185 
186 #endif	/* __IOMMU_PAGES_H */
187