xref: /linux/include/linux/iommufd.h (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2021 Intel Corporation
4  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
5  */
6 #ifndef __LINUX_IOMMUFD_H
7 #define __LINUX_IOMMUFD_H
8 
9 #include <linux/err.h>
10 #include <linux/errno.h>
11 #include <linux/refcount.h>
12 #include <linux/types.h>
13 #include <linux/xarray.h>
14 
15 struct device;
16 struct file;
17 struct iommu_group;
18 struct iommu_user_data;
19 struct iommu_user_data_array;
20 struct iommufd_access;
21 struct iommufd_ctx;
22 struct iommufd_device;
23 struct iommufd_viommu_ops;
24 struct page;
25 
26 enum iommufd_object_type {
27 	IOMMUFD_OBJ_NONE,
28 	IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE,
29 	IOMMUFD_OBJ_DEVICE,
30 	IOMMUFD_OBJ_HWPT_PAGING,
31 	IOMMUFD_OBJ_HWPT_NESTED,
32 	IOMMUFD_OBJ_IOAS,
33 	IOMMUFD_OBJ_ACCESS,
34 	IOMMUFD_OBJ_FAULT,
35 	IOMMUFD_OBJ_VIOMMU,
36 	IOMMUFD_OBJ_VDEVICE,
37 #ifdef CONFIG_IOMMUFD_TEST
38 	IOMMUFD_OBJ_SELFTEST,
39 #endif
40 	IOMMUFD_OBJ_MAX,
41 };
42 
43 /* Base struct for all objects with a userspace ID handle. */
44 struct iommufd_object {
45 	refcount_t shortterm_users;
46 	refcount_t users;
47 	enum iommufd_object_type type;
48 	unsigned int id;
49 };
50 
51 struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
52 					   struct device *dev, u32 *id);
53 void iommufd_device_unbind(struct iommufd_device *idev);
54 
55 int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id);
56 int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id);
57 void iommufd_device_detach(struct iommufd_device *idev);
58 
59 struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
60 u32 iommufd_device_to_id(struct iommufd_device *idev);
61 
62 struct iommufd_access_ops {
63 	u8 needs_pin_pages : 1;
64 	void (*unmap)(void *data, unsigned long iova, unsigned long length);
65 };
66 
67 enum {
68 	IOMMUFD_ACCESS_RW_READ = 0,
69 	IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
70 	/* Set if the caller is in a kthread then rw will use kthread_use_mm() */
71 	IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
72 
73 	/* Only for use by selftest */
74 	__IOMMUFD_ACCESS_RW_SLOW_PATH = 1 << 2,
75 };
76 
77 struct iommufd_access *
78 iommufd_access_create(struct iommufd_ctx *ictx,
79 		      const struct iommufd_access_ops *ops, void *data, u32 *id);
80 void iommufd_access_destroy(struct iommufd_access *access);
81 int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
82 int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id);
83 void iommufd_access_detach(struct iommufd_access *access);
84 
85 void iommufd_ctx_get(struct iommufd_ctx *ictx);
86 
87 struct iommufd_viommu {
88 	struct iommufd_object obj;
89 	struct iommufd_ctx *ictx;
90 	struct iommu_device *iommu_dev;
91 	struct iommufd_hwpt_paging *hwpt;
92 
93 	const struct iommufd_viommu_ops *ops;
94 
95 	struct xarray vdevs;
96 
97 	unsigned int type;
98 };
99 
100 /**
101  * struct iommufd_viommu_ops - vIOMMU specific operations
102  * @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory
103  *           of the vIOMMU will be free-ed by iommufd core after calling this op
104  * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a
105  *                       nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data
106  *                       must be defined in include/uapi/linux/iommufd.h.
107  *                       It must fully initialize the new iommu_domain before
108  *                       returning. Upon failure, ERR_PTR must be returned.
109  * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for
110  *                    any IOMMU hardware specific cache: TLB and device cache.
111  *                    The @array passes in the cache invalidation requests, in
112  *                    form of a driver data structure. A driver must update the
113  *                    array->entry_num to report the number of handled requests.
114  *                    The data structure of the array entry must be defined in
115  *                    include/uapi/linux/iommufd.h
116  */
117 struct iommufd_viommu_ops {
118 	void (*destroy)(struct iommufd_viommu *viommu);
119 	struct iommu_domain *(*alloc_domain_nested)(
120 		struct iommufd_viommu *viommu, u32 flags,
121 		const struct iommu_user_data *user_data);
122 	int (*cache_invalidate)(struct iommufd_viommu *viommu,
123 				struct iommu_user_data_array *array);
124 };
125 
126 #if IS_ENABLED(CONFIG_IOMMUFD)
127 struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
128 struct iommufd_ctx *iommufd_ctx_from_fd(int fd);
129 void iommufd_ctx_put(struct iommufd_ctx *ictx);
130 bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group);
131 
132 int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
133 			     unsigned long length, struct page **out_pages,
134 			     unsigned int flags);
135 void iommufd_access_unpin_pages(struct iommufd_access *access,
136 				unsigned long iova, unsigned long length);
137 int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
138 		      void *data, size_t len, unsigned int flags);
139 int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
140 int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx);
141 int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx);
142 #else /* !CONFIG_IOMMUFD */
iommufd_ctx_from_file(struct file * file)143 static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
144 {
145 	return ERR_PTR(-EOPNOTSUPP);
146 }
147 
iommufd_ctx_put(struct iommufd_ctx * ictx)148 static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
149 {
150 }
151 
iommufd_access_pin_pages(struct iommufd_access * access,unsigned long iova,unsigned long length,struct page ** out_pages,unsigned int flags)152 static inline int iommufd_access_pin_pages(struct iommufd_access *access,
153 					   unsigned long iova,
154 					   unsigned long length,
155 					   struct page **out_pages,
156 					   unsigned int flags)
157 {
158 	return -EOPNOTSUPP;
159 }
160 
iommufd_access_unpin_pages(struct iommufd_access * access,unsigned long iova,unsigned long length)161 static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
162 					      unsigned long iova,
163 					      unsigned long length)
164 {
165 }
166 
iommufd_access_rw(struct iommufd_access * access,unsigned long iova,void * data,size_t len,unsigned int flags)167 static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
168 		      void *data, size_t len, unsigned int flags)
169 {
170 	return -EOPNOTSUPP;
171 }
172 
iommufd_vfio_compat_ioas_create(struct iommufd_ctx * ictx)173 static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
174 {
175 	return -EOPNOTSUPP;
176 }
177 
iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx * ictx)178 static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
179 {
180 	return -EOPNOTSUPP;
181 }
182 #endif /* CONFIG_IOMMUFD */
183 
184 #if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE)
185 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
186 					     size_t size,
187 					     enum iommufd_object_type type);
188 struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
189 				       unsigned long vdev_id);
190 #else /* !CONFIG_IOMMUFD_DRIVER_CORE */
191 static inline struct iommufd_object *
_iommufd_object_alloc(struct iommufd_ctx * ictx,size_t size,enum iommufd_object_type type)192 _iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size,
193 		      enum iommufd_object_type type)
194 {
195 	return ERR_PTR(-EOPNOTSUPP);
196 }
197 
198 static inline struct device *
iommufd_viommu_find_dev(struct iommufd_viommu * viommu,unsigned long vdev_id)199 iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
200 {
201 	return NULL;
202 }
203 #endif /* CONFIG_IOMMUFD_DRIVER_CORE */
204 
205 /*
206  * Helpers for IOMMU driver to allocate driver structures that will be freed by
207  * the iommufd core. The free op will be called prior to freeing the memory.
208  */
209 #define iommufd_viommu_alloc(ictx, drv_struct, member, viommu_ops)             \
210 	({                                                                     \
211 		drv_struct *ret;                                               \
212 									       \
213 		static_assert(__same_type(struct iommufd_viommu,               \
214 					  ((drv_struct *)NULL)->member));      \
215 		static_assert(offsetof(drv_struct, member.obj) == 0);          \
216 		ret = (drv_struct *)_iommufd_object_alloc(                     \
217 			ictx, sizeof(drv_struct), IOMMUFD_OBJ_VIOMMU);         \
218 		if (!IS_ERR(ret))                                              \
219 			ret->member.ops = viommu_ops;                          \
220 		ret;                                                           \
221 	})
222 #endif
223