xref: /linux/drivers/iommu/iommu-pages.h (revision bd3520a93a84cd8c3897283e5891a9106fcf5acc)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2024, Google LLC.
4  * Pasha Tatashin <pasha.tatashin@soleen.com>
5  */
6 
7 #ifndef __IOMMU_PAGES_H
8 #define __IOMMU_PAGES_H
9 
10 #include <linux/vmstat.h>
11 #include <linux/gfp.h>
12 #include <linux/mm.h>
13 
14 /*
15  * All page allocations that should be reported to as "iommu-pagetables" to
16  * userspace must use one of the functions below.  This includes allocations of
17  * page-tables and other per-iommu_domain configuration structures.
18  *
19  * This is necessary for the proper accounting as IOMMU state can be rather
20  * large, i.e. multiple gigabytes in size.
21  */
22 
23 /**
24  * __iommu_alloc_account - account for newly allocated page.
25  * @page: head struct page of the page.
26  * @order: order of the page
27  */
28 static inline void __iommu_alloc_account(struct page *page, int order)
29 {
30 	const long pgcnt = 1l << order;
31 
32 	mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
33 }
34 
35 /**
36  * __iommu_free_account - account a page that is about to be freed.
37  * @page: head struct page of the page.
38  * @order: order of the page
39  */
40 static inline void __iommu_free_account(struct page *page, int order)
41 {
42 	const long pgcnt = 1l << order;
43 
44 	mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
45 }
46 
47 /**
48  * __iommu_alloc_pages - allocate a zeroed page of a given order.
49  * @gfp: buddy allocator flags
50  * @order: page order
51  *
52  * returns the head struct page of the allocated page.
53  */
54 static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
55 {
56 	struct page *page;
57 
58 	page = alloc_pages(gfp | __GFP_ZERO, order);
59 	if (unlikely(!page))
60 		return NULL;
61 
62 	__iommu_alloc_account(page, order);
63 
64 	return page;
65 }
66 
67 /**
68  * __iommu_free_pages - free page of a given order
69  * @page: head struct page of the page
70  * @order: page order
71  */
72 static inline void __iommu_free_pages(struct page *page, int order)
73 {
74 	if (!page)
75 		return;
76 
77 	__iommu_free_account(page, order);
78 	__free_pages(page, order);
79 }
80 
81 /**
82  * iommu_alloc_pages_node - allocate a zeroed page of a given order from
83  * specific NUMA node.
84  * @nid: memory NUMA node id
85  * @gfp: buddy allocator flags
86  * @order: page order
87  *
88  * returns the virtual address of the allocated page
89  */
90 static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
91 {
92 	struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
93 
94 	if (unlikely(!page))
95 		return NULL;
96 
97 	__iommu_alloc_account(page, order);
98 
99 	return page_address(page);
100 }
101 
102 /**
103  * iommu_alloc_pages - allocate a zeroed page of a given order
104  * @gfp: buddy allocator flags
105  * @order: page order
106  *
107  * returns the virtual address of the allocated page
108  */
109 static inline void *iommu_alloc_pages(gfp_t gfp, int order)
110 {
111 	struct page *page = __iommu_alloc_pages(gfp, order);
112 
113 	if (unlikely(!page))
114 		return NULL;
115 
116 	return page_address(page);
117 }
118 
119 /**
120  * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
121  * @nid: memory NUMA node id
122  * @gfp: buddy allocator flags
123  *
124  * returns the virtual address of the allocated page
125  */
126 static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
127 {
128 	return iommu_alloc_pages_node(nid, gfp, 0);
129 }
130 
131 /**
132  * iommu_alloc_page - allocate a zeroed page
133  * @gfp: buddy allocator flags
134  *
135  * returns the virtual address of the allocated page
136  */
137 static inline void *iommu_alloc_page(gfp_t gfp)
138 {
139 	return iommu_alloc_pages(gfp, 0);
140 }
141 
142 /**
143  * iommu_free_pages - free page of a given order
144  * @virt: virtual address of the page to be freed.
145  * @order: page order
146  */
147 static inline void iommu_free_pages(void *virt, int order)
148 {
149 	if (!virt)
150 		return;
151 
152 	__iommu_free_pages(virt_to_page(virt), order);
153 }
154 
155 /**
156  * iommu_free_page - free page
157  * @virt: virtual address of the page to be freed.
158  */
159 static inline void iommu_free_page(void *virt)
160 {
161 	iommu_free_pages(virt, 0);
162 }
163 
164 /**
165  * iommu_put_pages_list - free a list of pages.
166  * @page: the head of the lru list to be freed.
167  *
168  * There are no locking requirement for these pages, as they are going to be
169  * put on a free list as soon as refcount reaches 0. Pages are put on this LRU
170  * list once they are removed from the IOMMU page tables. However, they can
171  * still be access through debugfs.
172  */
173 static inline void iommu_put_pages_list(struct list_head *page)
174 {
175 	while (!list_empty(page)) {
176 		struct page *p = list_entry(page->prev, struct page, lru);
177 
178 		list_del(&p->lru);
179 		__iommu_free_account(p, 0);
180 		put_page(p);
181 	}
182 }
183 
184 #endif	/* __IOMMU_PAGES_H */
185