xref: /linux/drivers/iommu/iommu-pages.h (revision ce5cfb0fa20dc6454da039612e34325b7b4a8243)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (c) 2024, Google LLC.
4  * Pasha Tatashin <pasha.tatashin@soleen.com>
5  */
6 
7 #ifndef __IOMMU_PAGES_H
8 #define __IOMMU_PAGES_H
9 
10 #include <linux/iommu.h>
11 
12 /**
13  * struct ioptdesc - Memory descriptor for IOMMU page tables
14  * @iopt_freelist_elm: List element for a struct iommu_pages_list
15  *
16  * This struct overlays struct page for now. Do not modify without a good
17  * understanding of the issues.
18  */
19 struct ioptdesc {
20 	unsigned long __page_flags;
21 
22 	struct list_head iopt_freelist_elm;
23 	unsigned long __page_mapping;
24 	union {
25 		u8 incoherent;
26 		pgoff_t __index;
27 	};
28 	void *_private;
29 
30 	unsigned int __page_type;
31 	atomic_t __page_refcount;
32 #ifdef CONFIG_MEMCG
33 	unsigned long memcg_data;
34 #endif
35 };
36 
folio_ioptdesc(struct folio * folio)37 static inline struct ioptdesc *folio_ioptdesc(struct folio *folio)
38 {
39 	return (struct ioptdesc *)folio;
40 }
41 
ioptdesc_folio(struct ioptdesc * iopt)42 static inline struct folio *ioptdesc_folio(struct ioptdesc *iopt)
43 {
44 	return (struct folio *)iopt;
45 }
46 
virt_to_ioptdesc(void * virt)47 static inline struct ioptdesc *virt_to_ioptdesc(void *virt)
48 {
49 	return folio_ioptdesc(virt_to_folio(virt));
50 }
51 
52 void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size);
53 void iommu_free_pages(void *virt);
54 void iommu_put_pages_list(struct iommu_pages_list *list);
55 
56 /**
57  * iommu_pages_list_add - add the page to a iommu_pages_list
58  * @list: List to add the page to
59  * @virt: Address returned from iommu_alloc_pages_node_sz()
60  */
iommu_pages_list_add(struct iommu_pages_list * list,void * virt)61 static inline void iommu_pages_list_add(struct iommu_pages_list *list,
62 					void *virt)
63 {
64 	list_add_tail(&virt_to_ioptdesc(virt)->iopt_freelist_elm, &list->pages);
65 }
66 
67 /**
68  * iommu_pages_list_splice - Put all the pages in list from into list to
69  * @from: Source list of pages
70  * @to: Destination list of pages
71  *
72  * from must be re-initialized after calling this function if it is to be
73  * used again.
74  */
iommu_pages_list_splice(struct iommu_pages_list * from,struct iommu_pages_list * to)75 static inline void iommu_pages_list_splice(struct iommu_pages_list *from,
76 					   struct iommu_pages_list *to)
77 {
78 	list_splice(&from->pages, &to->pages);
79 }
80 
81 /**
82  * iommu_pages_list_empty - True if the list is empty
83  * @list: List to check
84  */
iommu_pages_list_empty(struct iommu_pages_list * list)85 static inline bool iommu_pages_list_empty(struct iommu_pages_list *list)
86 {
87 	return list_empty(&list->pages);
88 }
89 
90 /**
91  * iommu_alloc_pages_sz - Allocate a zeroed page of a given size from
92  *                          specific NUMA node
93  * @nid: memory NUMA node id
94  * @gfp: buddy allocator flags
95  * @size: Memory size to allocate, this is rounded up to a power of 2
96  *
97  * Returns the virtual address of the allocated page.
98  */
iommu_alloc_pages_sz(gfp_t gfp,size_t size)99 static inline void *iommu_alloc_pages_sz(gfp_t gfp, size_t size)
100 {
101 	return iommu_alloc_pages_node_sz(NUMA_NO_NODE, gfp, size);
102 }
103 
104 int iommu_pages_start_incoherent(void *virt, struct device *dma_dev);
105 int iommu_pages_start_incoherent_list(struct iommu_pages_list *list,
106 				      struct device *dma_dev);
107 
108 #ifdef CONFIG_X86
109 #define IOMMU_PAGES_USE_DMA_API 0
110 #include <linux/cacheflush.h>
111 
iommu_pages_flush_incoherent(struct device * dma_dev,void * virt,size_t offset,size_t len)112 static inline void iommu_pages_flush_incoherent(struct device *dma_dev,
113 						void *virt, size_t offset,
114 						size_t len)
115 {
116 	clflush_cache_range(virt + offset, len);
117 }
118 static inline void
iommu_pages_stop_incoherent_list(struct iommu_pages_list * list,struct device * dma_dev)119 iommu_pages_stop_incoherent_list(struct iommu_pages_list *list,
120 				 struct device *dma_dev)
121 {
122 	/*
123 	 * For performance leave the incoherent flag alone which turns this into
124 	 * a NOP. For X86 the rest of the stop/free flow ignores the flag.
125 	 */
126 }
iommu_pages_free_incoherent(void * virt,struct device * dma_dev)127 static inline void iommu_pages_free_incoherent(void *virt,
128 					       struct device *dma_dev)
129 {
130 	iommu_free_pages(virt);
131 }
132 #else
133 #define IOMMU_PAGES_USE_DMA_API 1
134 #include <linux/dma-mapping.h>
135 
iommu_pages_flush_incoherent(struct device * dma_dev,void * virt,size_t offset,size_t len)136 static inline void iommu_pages_flush_incoherent(struct device *dma_dev,
137 						void *virt, size_t offset,
138 						size_t len)
139 {
140 	dma_sync_single_for_device(dma_dev, (uintptr_t)virt + offset, len,
141 				   DMA_TO_DEVICE);
142 }
143 void iommu_pages_stop_incoherent_list(struct iommu_pages_list *list,
144 				      struct device *dma_dev);
145 void iommu_pages_free_incoherent(void *virt, struct device *dma_dev);
146 #endif
147 
148 #endif /* __IOMMU_PAGES_H */
149