xref: /linux/drivers/iommu/iommu-pages.h (revision 0cc6f45cecb46cefe89c17ec816dc8cd58a2229a)
106c37505SPasha Tatashin /* SPDX-License-Identifier: GPL-2.0-only */
206c37505SPasha Tatashin /*
306c37505SPasha Tatashin  * Copyright (c) 2024, Google LLC.
406c37505SPasha Tatashin  * Pasha Tatashin <pasha.tatashin@soleen.com>
506c37505SPasha Tatashin  */
606c37505SPasha Tatashin 
706c37505SPasha Tatashin #ifndef __IOMMU_PAGES_H
806c37505SPasha Tatashin #define __IOMMU_PAGES_H
906c37505SPasha Tatashin 
1006c37505SPasha Tatashin #include <linux/vmstat.h>
1106c37505SPasha Tatashin #include <linux/gfp.h>
1206c37505SPasha Tatashin #include <linux/mm.h>
1306c37505SPasha Tatashin 
1406c37505SPasha Tatashin /*
1506c37505SPasha Tatashin  * All page allocations that should be reported to as "iommu-pagetables" to
1606c37505SPasha Tatashin  * userspace must use one of the functions below.  This includes allocations of
1706c37505SPasha Tatashin  * page-tables and other per-iommu_domain configuration structures.
1806c37505SPasha Tatashin  *
1906c37505SPasha Tatashin  * This is necessary for the proper accounting as IOMMU state can be rather
2006c37505SPasha Tatashin  * large, i.e. multiple gigabytes in size.
2106c37505SPasha Tatashin  */
2206c37505SPasha Tatashin 
2306c37505SPasha Tatashin /**
24bd3520a9SPasha Tatashin  * __iommu_alloc_account - account for newly allocated page.
25bd3520a9SPasha Tatashin  * @page: head struct page of the page.
26bd3520a9SPasha Tatashin  * @order: order of the page
27bd3520a9SPasha Tatashin  */
__iommu_alloc_account(struct page * page,int order)28bd3520a9SPasha Tatashin static inline void __iommu_alloc_account(struct page *page, int order)
29bd3520a9SPasha Tatashin {
30bd3520a9SPasha Tatashin 	const long pgcnt = 1l << order;
31bd3520a9SPasha Tatashin 
32bd3520a9SPasha Tatashin 	mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
33*212c5c07SPasha Tatashin 	mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
34bd3520a9SPasha Tatashin }
35bd3520a9SPasha Tatashin 
36bd3520a9SPasha Tatashin /**
37bd3520a9SPasha Tatashin  * __iommu_free_account - account a page that is about to be freed.
38bd3520a9SPasha Tatashin  * @page: head struct page of the page.
39bd3520a9SPasha Tatashin  * @order: order of the page
40bd3520a9SPasha Tatashin  */
__iommu_free_account(struct page * page,int order)41bd3520a9SPasha Tatashin static inline void __iommu_free_account(struct page *page, int order)
42bd3520a9SPasha Tatashin {
43bd3520a9SPasha Tatashin 	const long pgcnt = 1l << order;
44bd3520a9SPasha Tatashin 
45bd3520a9SPasha Tatashin 	mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
46*212c5c07SPasha Tatashin 	mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
47bd3520a9SPasha Tatashin }
48bd3520a9SPasha Tatashin 
49bd3520a9SPasha Tatashin /**
5006c37505SPasha Tatashin  * __iommu_alloc_pages - allocate a zeroed page of a given order.
5106c37505SPasha Tatashin  * @gfp: buddy allocator flags
5206c37505SPasha Tatashin  * @order: page order
5306c37505SPasha Tatashin  *
5406c37505SPasha Tatashin  * returns the head struct page of the allocated page.
5506c37505SPasha Tatashin  */
__iommu_alloc_pages(gfp_t gfp,int order)5606c37505SPasha Tatashin static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
5706c37505SPasha Tatashin {
5806c37505SPasha Tatashin 	struct page *page;
5906c37505SPasha Tatashin 
6006c37505SPasha Tatashin 	page = alloc_pages(gfp | __GFP_ZERO, order);
6106c37505SPasha Tatashin 	if (unlikely(!page))
6206c37505SPasha Tatashin 		return NULL;
6306c37505SPasha Tatashin 
64bd3520a9SPasha Tatashin 	__iommu_alloc_account(page, order);
65bd3520a9SPasha Tatashin 
6606c37505SPasha Tatashin 	return page;
6706c37505SPasha Tatashin }
6806c37505SPasha Tatashin 
6906c37505SPasha Tatashin /**
7006c37505SPasha Tatashin  * __iommu_free_pages - free page of a given order
7106c37505SPasha Tatashin  * @page: head struct page of the page
7206c37505SPasha Tatashin  * @order: page order
7306c37505SPasha Tatashin  */
__iommu_free_pages(struct page * page,int order)7406c37505SPasha Tatashin static inline void __iommu_free_pages(struct page *page, int order)
7506c37505SPasha Tatashin {
7606c37505SPasha Tatashin 	if (!page)
7706c37505SPasha Tatashin 		return;
7806c37505SPasha Tatashin 
79bd3520a9SPasha Tatashin 	__iommu_free_account(page, order);
8006c37505SPasha Tatashin 	__free_pages(page, order);
8106c37505SPasha Tatashin }
8206c37505SPasha Tatashin 
8306c37505SPasha Tatashin /**
8406c37505SPasha Tatashin  * iommu_alloc_pages_node - allocate a zeroed page of a given order from
8506c37505SPasha Tatashin  * specific NUMA node.
8606c37505SPasha Tatashin  * @nid: memory NUMA node id
8706c37505SPasha Tatashin  * @gfp: buddy allocator flags
8806c37505SPasha Tatashin  * @order: page order
8906c37505SPasha Tatashin  *
9006c37505SPasha Tatashin  * returns the virtual address of the allocated page
9106c37505SPasha Tatashin  */
iommu_alloc_pages_node(int nid,gfp_t gfp,int order)9206c37505SPasha Tatashin static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
9306c37505SPasha Tatashin {
9406c37505SPasha Tatashin 	struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
9506c37505SPasha Tatashin 
9606c37505SPasha Tatashin 	if (unlikely(!page))
9706c37505SPasha Tatashin 		return NULL;
9806c37505SPasha Tatashin 
99bd3520a9SPasha Tatashin 	__iommu_alloc_account(page, order);
100bd3520a9SPasha Tatashin 
10106c37505SPasha Tatashin 	return page_address(page);
10206c37505SPasha Tatashin }
10306c37505SPasha Tatashin 
10406c37505SPasha Tatashin /**
10506c37505SPasha Tatashin  * iommu_alloc_pages - allocate a zeroed page of a given order
10606c37505SPasha Tatashin  * @gfp: buddy allocator flags
10706c37505SPasha Tatashin  * @order: page order
10806c37505SPasha Tatashin  *
10906c37505SPasha Tatashin  * returns the virtual address of the allocated page
11006c37505SPasha Tatashin  */
iommu_alloc_pages(gfp_t gfp,int order)11106c37505SPasha Tatashin static inline void *iommu_alloc_pages(gfp_t gfp, int order)
11206c37505SPasha Tatashin {
11306c37505SPasha Tatashin 	struct page *page = __iommu_alloc_pages(gfp, order);
11406c37505SPasha Tatashin 
11506c37505SPasha Tatashin 	if (unlikely(!page))
11606c37505SPasha Tatashin 		return NULL;
11706c37505SPasha Tatashin 
11806c37505SPasha Tatashin 	return page_address(page);
11906c37505SPasha Tatashin }
12006c37505SPasha Tatashin 
12106c37505SPasha Tatashin /**
12206c37505SPasha Tatashin  * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
12306c37505SPasha Tatashin  * @nid: memory NUMA node id
12406c37505SPasha Tatashin  * @gfp: buddy allocator flags
12506c37505SPasha Tatashin  *
12606c37505SPasha Tatashin  * returns the virtual address of the allocated page
12706c37505SPasha Tatashin  */
iommu_alloc_page_node(int nid,gfp_t gfp)12806c37505SPasha Tatashin static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
12906c37505SPasha Tatashin {
13006c37505SPasha Tatashin 	return iommu_alloc_pages_node(nid, gfp, 0);
13106c37505SPasha Tatashin }
13206c37505SPasha Tatashin 
13306c37505SPasha Tatashin /**
13406c37505SPasha Tatashin  * iommu_alloc_page - allocate a zeroed page
13506c37505SPasha Tatashin  * @gfp: buddy allocator flags
13606c37505SPasha Tatashin  *
13706c37505SPasha Tatashin  * returns the virtual address of the allocated page
13806c37505SPasha Tatashin  */
iommu_alloc_page(gfp_t gfp)13906c37505SPasha Tatashin static inline void *iommu_alloc_page(gfp_t gfp)
14006c37505SPasha Tatashin {
14106c37505SPasha Tatashin 	return iommu_alloc_pages(gfp, 0);
14206c37505SPasha Tatashin }
14306c37505SPasha Tatashin 
14406c37505SPasha Tatashin /**
14506c37505SPasha Tatashin  * iommu_free_pages - free page of a given order
14606c37505SPasha Tatashin  * @virt: virtual address of the page to be freed.
14706c37505SPasha Tatashin  * @order: page order
14806c37505SPasha Tatashin  */
iommu_free_pages(void * virt,int order)14906c37505SPasha Tatashin static inline void iommu_free_pages(void *virt, int order)
15006c37505SPasha Tatashin {
15106c37505SPasha Tatashin 	if (!virt)
15206c37505SPasha Tatashin 		return;
15306c37505SPasha Tatashin 
15406c37505SPasha Tatashin 	__iommu_free_pages(virt_to_page(virt), order);
15506c37505SPasha Tatashin }
15606c37505SPasha Tatashin 
15706c37505SPasha Tatashin /**
15806c37505SPasha Tatashin  * iommu_free_page - free page
15906c37505SPasha Tatashin  * @virt: virtual address of the page to be freed.
16006c37505SPasha Tatashin  */
iommu_free_page(void * virt)16106c37505SPasha Tatashin static inline void iommu_free_page(void *virt)
16206c37505SPasha Tatashin {
16306c37505SPasha Tatashin 	iommu_free_pages(virt, 0);
16406c37505SPasha Tatashin }
16506c37505SPasha Tatashin 
16606c37505SPasha Tatashin /**
16706c37505SPasha Tatashin  * iommu_put_pages_list - free a list of pages.
16806c37505SPasha Tatashin  * @page: the head of the lru list to be freed.
16906c37505SPasha Tatashin  *
17006c37505SPasha Tatashin  * There are no locking requirement for these pages, as they are going to be
17106c37505SPasha Tatashin  * put on a free list as soon as refcount reaches 0. Pages are put on this LRU
17206c37505SPasha Tatashin  * list once they are removed from the IOMMU page tables. However, they can
17306c37505SPasha Tatashin  * still be access through debugfs.
17406c37505SPasha Tatashin  */
iommu_put_pages_list(struct list_head * page)17506c37505SPasha Tatashin static inline void iommu_put_pages_list(struct list_head *page)
17606c37505SPasha Tatashin {
17706c37505SPasha Tatashin 	while (!list_empty(page)) {
17806c37505SPasha Tatashin 		struct page *p = list_entry(page->prev, struct page, lru);
17906c37505SPasha Tatashin 
18006c37505SPasha Tatashin 		list_del(&p->lru);
181bd3520a9SPasha Tatashin 		__iommu_free_account(p, 0);
18206c37505SPasha Tatashin 		put_page(p);
18306c37505SPasha Tatashin 	}
18406c37505SPasha Tatashin }
18506c37505SPasha Tatashin 
18606c37505SPasha Tatashin #endif	/* __IOMMU_PAGES_H */
187