xref: /linux/include/linux/iommu.h (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <joerg.roedel@amd.com>
5  */
6 
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9 
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/iova_bitmap.h>
17 #include <uapi/linux/iommufd.h>
18 
19 #define IOMMU_READ	(1 << 0)
20 #define IOMMU_WRITE	(1 << 1)
21 #define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
22 #define IOMMU_NOEXEC	(1 << 3)
23 #define IOMMU_MMIO	(1 << 4) /* e.g. things like MSI doorbells */
24 /*
25  * Where the bus hardware includes a privilege level as part of its access type
26  * markings, and certain devices are capable of issuing transactions marked as
27  * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
28  * given permission flags only apply to accesses at the higher privilege level,
29  * and that unprivileged transactions should have as little access as possible.
30  * This would usually imply the same permissions as kernel mappings on the CPU,
31  * if the IOMMU page table format is equivalent.
32  */
33 #define IOMMU_PRIV	(1 << 5)
34 
35 struct iommu_ops;
36 struct iommu_group;
37 struct bus_type;
38 struct device;
39 struct iommu_domain;
40 struct iommu_domain_ops;
41 struct iommu_dirty_ops;
42 struct notifier_block;
43 struct iommu_sva;
44 struct iommu_dma_cookie;
45 struct iommu_dma_msi_cookie;
46 struct iommu_fault_param;
47 struct iommufd_ctx;
48 struct iommufd_viommu;
49 struct msi_desc;
50 struct msi_msg;
51 
52 #define IOMMU_FAULT_PERM_READ	(1 << 0) /* read */
53 #define IOMMU_FAULT_PERM_WRITE	(1 << 1) /* write */
54 #define IOMMU_FAULT_PERM_EXEC	(1 << 2) /* exec */
55 #define IOMMU_FAULT_PERM_PRIV	(1 << 3) /* privileged */
56 
57 /* Generic fault types, can be expanded IRQ remapping fault */
58 enum iommu_fault_type {
59 	IOMMU_FAULT_PAGE_REQ = 1,	/* page request fault */
60 };
61 
62 /**
63  * struct iommu_fault_page_request - Page Request data
64  * @flags: encodes whether the corresponding fields are valid and whether this
65  *         is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
66  *         When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
67  *         must have the same PASID value as the page request. When it is clear,
68  *         the page response should not have a PASID.
69  * @pasid: Process Address Space ID
70  * @grpid: Page Request Group Index
71  * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
72  * @addr: page address
73  * @private_data: device-specific private information
74  */
75 struct iommu_fault_page_request {
76 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID	(1 << 0)
77 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE	(1 << 1)
78 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID	(1 << 2)
79 	u32	flags;
80 	u32	pasid;
81 	u32	grpid;
82 	u32	perm;
83 	u64	addr;
84 	u64	private_data[2];
85 };
86 
87 /**
88  * struct iommu_fault - Generic fault data
89  * @type: fault type from &enum iommu_fault_type
90  * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
91  */
92 struct iommu_fault {
93 	u32 type;
94 	struct iommu_fault_page_request prm;
95 };
96 
97 /**
98  * enum iommu_page_response_code - Return status of fault handlers
99  * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
100  *	populated, retry the access. This is "Success" in PCI PRI.
101  * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
102  *	this device if possible. This is "Response Failure" in PCI PRI.
103  * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
104  *	access. This is "Invalid Request" in PCI PRI.
105  */
106 enum iommu_page_response_code {
107 	IOMMU_PAGE_RESP_SUCCESS = 0,
108 	IOMMU_PAGE_RESP_INVALID,
109 	IOMMU_PAGE_RESP_FAILURE,
110 };
111 
112 /**
113  * struct iommu_page_response - Generic page response information
114  * @pasid: Process Address Space ID
115  * @grpid: Page Request Group Index
116  * @code: response code from &enum iommu_page_response_code
117  */
118 struct iommu_page_response {
119 	u32	pasid;
120 	u32	grpid;
121 	u32	code;
122 };
123 
124 struct iopf_fault {
125 	struct iommu_fault fault;
126 	/* node for pending lists */
127 	struct list_head list;
128 };
129 
130 struct iopf_group {
131 	struct iopf_fault last_fault;
132 	struct list_head faults;
133 	size_t fault_count;
134 	/* list node for iommu_fault_param::faults */
135 	struct list_head pending_node;
136 	struct work_struct work;
137 	struct iommu_attach_handle *attach_handle;
138 	/* The device's fault data parameter. */
139 	struct iommu_fault_param *fault_param;
140 	/* Used by handler provider to hook the group on its own lists. */
141 	struct list_head node;
142 	u32 cookie;
143 };
144 
145 /**
146  * struct iopf_queue - IO Page Fault queue
147  * @wq: the fault workqueue
148  * @devices: devices attached to this queue
149  * @lock: protects the device list
150  */
151 struct iopf_queue {
152 	struct workqueue_struct *wq;
153 	struct list_head devices;
154 	struct mutex lock;
155 };
156 
157 /* iommu fault flags */
158 #define IOMMU_FAULT_READ	0x0
159 #define IOMMU_FAULT_WRITE	0x1
160 
161 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
162 			struct device *, unsigned long, int, void *);
163 
164 struct iommu_domain_geometry {
165 	dma_addr_t aperture_start; /* First address that can be mapped    */
166 	dma_addr_t aperture_end;   /* Last address that can be mapped     */
167 	bool force_aperture;       /* DMA only allowed in mappable range? */
168 };
169 
170 enum iommu_domain_cookie_type {
171 	IOMMU_COOKIE_NONE,
172 	IOMMU_COOKIE_DMA_IOVA,
173 	IOMMU_COOKIE_DMA_MSI,
174 	IOMMU_COOKIE_FAULT_HANDLER,
175 	IOMMU_COOKIE_SVA,
176 	IOMMU_COOKIE_IOMMUFD,
177 };
178 
179 /* Domain feature flags */
180 #define __IOMMU_DOMAIN_PAGING	(1U << 0)  /* Support for iommu_map/unmap */
181 #define __IOMMU_DOMAIN_DMA_API	(1U << 1)  /* Domain for use in DMA-API
182 					      implementation              */
183 #define __IOMMU_DOMAIN_PT	(1U << 2)  /* Domain is identity mapped   */
184 #define __IOMMU_DOMAIN_DMA_FQ	(1U << 3)  /* DMA-API uses flush queue    */
185 
186 #define __IOMMU_DOMAIN_SVA	(1U << 4)  /* Shared process address space */
187 #define __IOMMU_DOMAIN_PLATFORM	(1U << 5)
188 
189 #define __IOMMU_DOMAIN_NESTED	(1U << 6)  /* User-managed address space nested
190 					      on a stage-2 translation        */
191 
192 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
193 /*
194  * This are the possible domain-types
195  *
196  *	IOMMU_DOMAIN_BLOCKED	- All DMA is blocked, can be used to isolate
197  *				  devices
198  *	IOMMU_DOMAIN_IDENTITY	- DMA addresses are system physical addresses
199  *	IOMMU_DOMAIN_UNMANAGED	- DMA mappings managed by IOMMU-API user, used
200  *				  for VMs
201  *	IOMMU_DOMAIN_DMA	- Internally used for DMA-API implementations.
202  *				  This flag allows IOMMU drivers to implement
203  *				  certain optimizations for these domains
204  *	IOMMU_DOMAIN_DMA_FQ	- As above, but definitely using batched TLB
205  *				  invalidation.
206  *	IOMMU_DOMAIN_SVA	- DMA addresses are shared process addresses
207  *				  represented by mm_struct's.
208  *	IOMMU_DOMAIN_PLATFORM	- Legacy domain for drivers that do their own
209  *				  dma_api stuff. Do not use in new drivers.
210  */
211 #define IOMMU_DOMAIN_BLOCKED	(0U)
212 #define IOMMU_DOMAIN_IDENTITY	(__IOMMU_DOMAIN_PT)
213 #define IOMMU_DOMAIN_UNMANAGED	(__IOMMU_DOMAIN_PAGING)
214 #define IOMMU_DOMAIN_DMA	(__IOMMU_DOMAIN_PAGING |	\
215 				 __IOMMU_DOMAIN_DMA_API)
216 #define IOMMU_DOMAIN_DMA_FQ	(__IOMMU_DOMAIN_PAGING |	\
217 				 __IOMMU_DOMAIN_DMA_API |	\
218 				 __IOMMU_DOMAIN_DMA_FQ)
219 #define IOMMU_DOMAIN_SVA	(__IOMMU_DOMAIN_SVA)
220 #define IOMMU_DOMAIN_PLATFORM	(__IOMMU_DOMAIN_PLATFORM)
221 #define IOMMU_DOMAIN_NESTED	(__IOMMU_DOMAIN_NESTED)
222 
223 struct iommu_domain {
224 	unsigned type;
225 	enum iommu_domain_cookie_type cookie_type;
226 	const struct iommu_domain_ops *ops;
227 	const struct iommu_dirty_ops *dirty_ops;
228 	const struct iommu_ops *owner; /* Whose domain_alloc we came from */
229 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
230 	struct iommu_domain_geometry geometry;
231 	int (*iopf_handler)(struct iopf_group *group);
232 
233 	union { /* cookie */
234 		struct iommu_dma_cookie *iova_cookie;
235 		struct iommu_dma_msi_cookie *msi_cookie;
236 		struct iommufd_hw_pagetable *iommufd_hwpt;
237 		struct {
238 			iommu_fault_handler_t handler;
239 			void *handler_token;
240 		};
241 		struct {	/* IOMMU_DOMAIN_SVA */
242 			struct mm_struct *mm;
243 			int users;
244 			/*
245 			 * Next iommu_domain in mm->iommu_mm->sva-domains list
246 			 * protected by iommu_sva_lock.
247 			 */
248 			struct list_head next;
249 		};
250 	};
251 };
252 
iommu_is_dma_domain(struct iommu_domain * domain)253 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
254 {
255 	return domain->type & __IOMMU_DOMAIN_DMA_API;
256 }
257 
258 enum iommu_cap {
259 	IOMMU_CAP_CACHE_COHERENCY,	/* IOMMU_CACHE is supported */
260 	IOMMU_CAP_NOEXEC,		/* IOMMU_NOEXEC flag */
261 	IOMMU_CAP_PRE_BOOT_PROTECTION,	/* Firmware says it used the IOMMU for
262 					   DMA protection and we should too */
263 	/*
264 	 * Per-device flag indicating if enforce_cache_coherency() will work on
265 	 * this device.
266 	 */
267 	IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
268 	/*
269 	 * IOMMU driver does not issue TLB maintenance during .unmap, so can
270 	 * usefully support the non-strict DMA flush queue.
271 	 */
272 	IOMMU_CAP_DEFERRED_FLUSH,
273 	IOMMU_CAP_DIRTY_TRACKING,	/* IOMMU supports dirty tracking */
274 };
275 
276 /* These are the possible reserved region types */
277 enum iommu_resv_type {
278 	/* Memory regions which must be mapped 1:1 at all times */
279 	IOMMU_RESV_DIRECT,
280 	/*
281 	 * Memory regions which are advertised to be 1:1 but are
282 	 * commonly considered relaxable in some conditions,
283 	 * for instance in device assignment use case (USB, Graphics)
284 	 */
285 	IOMMU_RESV_DIRECT_RELAXABLE,
286 	/* Arbitrary "never map this or give it to a device" address ranges */
287 	IOMMU_RESV_RESERVED,
288 	/* Hardware MSI region (untranslated) */
289 	IOMMU_RESV_MSI,
290 	/* Software-managed MSI translation window */
291 	IOMMU_RESV_SW_MSI,
292 };
293 
294 /**
295  * struct iommu_resv_region - descriptor for a reserved memory region
296  * @list: Linked list pointers
297  * @start: System physical start address of the region
298  * @length: Length of the region in bytes
299  * @prot: IOMMU Protection flags (READ/WRITE/...)
300  * @type: Type of the reserved region
301  * @free: Callback to free associated memory allocations
302  */
303 struct iommu_resv_region {
304 	struct list_head	list;
305 	phys_addr_t		start;
306 	size_t			length;
307 	int			prot;
308 	enum iommu_resv_type	type;
309 	void (*free)(struct device *dev, struct iommu_resv_region *region);
310 };
311 
312 struct iommu_iort_rmr_data {
313 	struct iommu_resv_region rr;
314 
315 	/* Stream IDs associated with IORT RMR entry */
316 	const u32 *sids;
317 	u32 num_sids;
318 };
319 
320 #define IOMMU_NO_PASID	(0U) /* Reserved for DMA w/o PASID */
321 #define IOMMU_FIRST_GLOBAL_PASID	(1U) /*starting range for allocation */
322 #define IOMMU_PASID_INVALID	(-1U)
323 typedef unsigned int ioasid_t;
324 
325 /* Read but do not clear any dirty bits */
326 #define IOMMU_DIRTY_NO_CLEAR (1 << 0)
327 
328 /*
329  * Pages allocated through iommu_alloc_pages_node_sz() can be placed on this
330  * list using iommu_pages_list_add(). Note: ONLY pages from
331  * iommu_alloc_pages_node_sz() can be used this way!
332  */
333 struct iommu_pages_list {
334 	struct list_head pages;
335 };
336 
337 #define IOMMU_PAGES_LIST_INIT(name) \
338 	((struct iommu_pages_list){ .pages = LIST_HEAD_INIT(name.pages) })
339 
340 #ifdef CONFIG_IOMMU_API
341 
342 /**
343  * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
344  *
345  * @start: IOVA representing the start of the range to be flushed
346  * @end: IOVA representing the end of the range to be flushed (inclusive)
347  * @pgsize: The interval at which to perform the flush
348  * @freelist: Removed pages to free after sync
349  * @queued: Indicates that the flush will be queued
350  *
351  * This structure is intended to be updated by multiple calls to the
352  * ->unmap() function in struct iommu_ops before eventually being passed
353  * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
354  * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
355  * them. @queued is set to indicate when ->iotlb_flush_all() will be called
356  * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
357  */
358 struct iommu_iotlb_gather {
359 	unsigned long		start;
360 	unsigned long		end;
361 	size_t			pgsize;
362 	struct iommu_pages_list	freelist;
363 	bool			queued;
364 };
365 
366 /**
367  * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
368  * @bitmap: IOVA bitmap
369  * @gather: Range information for a pending IOTLB flush
370  */
371 struct iommu_dirty_bitmap {
372 	struct iova_bitmap *bitmap;
373 	struct iommu_iotlb_gather *gather;
374 };
375 
376 /**
377  * struct iommu_dirty_ops - domain specific dirty tracking operations
378  * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
379  * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
380  *                        into a bitmap, with a bit represented as a page.
381  *                        Reads the dirty PTE bits and clears it from IO
382  *                        pagetables.
383  */
384 struct iommu_dirty_ops {
385 	int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
386 	int (*read_and_clear_dirty)(struct iommu_domain *domain,
387 				    unsigned long iova, size_t size,
388 				    unsigned long flags,
389 				    struct iommu_dirty_bitmap *dirty);
390 };
391 
392 /**
393  * struct iommu_user_data - iommu driver specific user space data info
394  * @type: The data type of the user buffer
395  * @uptr: Pointer to the user buffer for copy_from_user()
396  * @len: The length of the user buffer in bytes
397  *
398  * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
399  * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
400  */
401 struct iommu_user_data {
402 	unsigned int type;
403 	void __user *uptr;
404 	size_t len;
405 };
406 
407 /**
408  * struct iommu_user_data_array - iommu driver specific user space data array
409  * @type: The data type of all the entries in the user buffer array
410  * @uptr: Pointer to the user buffer array
411  * @entry_len: The fixed-width length of an entry in the array, in bytes
412  * @entry_num: The number of total entries in the array
413  *
414  * The user buffer includes an array of requests with format defined in
415  * include/uapi/linux/iommufd.h
416  */
417 struct iommu_user_data_array {
418 	unsigned int type;
419 	void __user *uptr;
420 	size_t entry_len;
421 	u32 entry_num;
422 };
423 
424 /**
425  * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
426  * @dst_data: Pointer to an iommu driver specific user data that is defined in
427  *            include/uapi/linux/iommufd.h
428  * @src_data: Pointer to a struct iommu_user_data for user space data info
429  * @data_type: The data type of the @dst_data. Must match with @src_data.type
430  * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
431  * @min_len: Initial length of user data structure for backward compatibility.
432  *           This should be offsetofend using the last member in the user data
433  *           struct that was initially added to include/uapi/linux/iommufd.h
434  */
__iommu_copy_struct_from_user(void * dst_data,const struct iommu_user_data * src_data,unsigned int data_type,size_t data_len,size_t min_len)435 static inline int __iommu_copy_struct_from_user(
436 	void *dst_data, const struct iommu_user_data *src_data,
437 	unsigned int data_type, size_t data_len, size_t min_len)
438 {
439 	if (WARN_ON(!dst_data || !src_data))
440 		return -EINVAL;
441 	if (src_data->type != data_type)
442 		return -EINVAL;
443 	if (src_data->len < min_len || data_len < src_data->len)
444 		return -EINVAL;
445 	return copy_struct_from_user(dst_data, data_len, src_data->uptr,
446 				     src_data->len);
447 }
448 
449 /**
450  * iommu_copy_struct_from_user - Copy iommu driver specific user space data
451  * @kdst: Pointer to an iommu driver specific user data that is defined in
452  *        include/uapi/linux/iommufd.h
453  * @user_data: Pointer to a struct iommu_user_data for user space data info
454  * @data_type: The data type of the @kdst. Must match with @user_data->type
455  * @min_last: The last member of the data structure @kdst points in the initial
456  *            version.
457  * Return 0 for success, otherwise -error.
458  */
459 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
460 	__iommu_copy_struct_from_user(kdst, user_data, data_type,         \
461 				      sizeof(*kdst),                      \
462 				      offsetofend(typeof(*kdst), min_last))
463 
464 /**
465  * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
466  *                                       data from an iommu_user_data_array
467  * @dst_data: Pointer to an iommu driver specific user data that is defined in
468  *            include/uapi/linux/iommufd.h
469  * @src_array: Pointer to a struct iommu_user_data_array for a user space array
470  * @data_type: The data type of the @dst_data. Must match with @src_array.type
471  * @index: Index to the location in the array to copy user data from
472  * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
473  * @min_len: Initial length of user data structure for backward compatibility.
474  *           This should be offsetofend using the last member in the user data
475  *           struct that was initially added to include/uapi/linux/iommufd.h
476  */
__iommu_copy_struct_from_user_array(void * dst_data,const struct iommu_user_data_array * src_array,unsigned int data_type,unsigned int index,size_t data_len,size_t min_len)477 static inline int __iommu_copy_struct_from_user_array(
478 	void *dst_data, const struct iommu_user_data_array *src_array,
479 	unsigned int data_type, unsigned int index, size_t data_len,
480 	size_t min_len)
481 {
482 	struct iommu_user_data src_data;
483 
484 	if (WARN_ON(!src_array || index >= src_array->entry_num))
485 		return -EINVAL;
486 	if (!src_array->entry_num)
487 		return -EINVAL;
488 	src_data.uptr = src_array->uptr + src_array->entry_len * index;
489 	src_data.len = src_array->entry_len;
490 	src_data.type = src_array->type;
491 
492 	return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
493 					     data_len, min_len);
494 }
495 
496 /**
497  * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
498  *                                     data from an iommu_user_data_array
499  * @kdst: Pointer to an iommu driver specific user data that is defined in
500  *        include/uapi/linux/iommufd.h
501  * @user_array: Pointer to a struct iommu_user_data_array for a user space
502  *              array
503  * @data_type: The data type of the @kdst. Must match with @user_array->type
504  * @index: Index to the location in the array to copy user data from
505  * @min_last: The last member of the data structure @kdst points in the
506  *            initial version.
507  *
508  * Copy a single entry from a user array. Return 0 for success, otherwise
509  * -error.
510  */
511 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
512 					  min_last)                           \
513 	__iommu_copy_struct_from_user_array(                                  \
514 		kdst, user_array, data_type, index, sizeof(*(kdst)),          \
515 		offsetofend(typeof(*(kdst)), min_last))
516 
517 /**
518  * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
519  *         space data from an iommu_user_data_array
520  * @kdst: Pointer to an iommu driver specific user data that is defined in
521  *        include/uapi/linux/iommufd.h
522  * @kdst_entry_size: sizeof(*kdst)
523  * @user_array: Pointer to a struct iommu_user_data_array for a user space
524  *              array
525  * @data_type: The data type of the @kdst. Must match with @user_array->type
526  *
527  * Copy the entire user array. kdst must have room for kdst_entry_size *
528  * user_array->entry_num bytes. Return 0 for success, otherwise -error.
529  */
530 static inline int
iommu_copy_struct_from_full_user_array(void * kdst,size_t kdst_entry_size,struct iommu_user_data_array * user_array,unsigned int data_type)531 iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
532 				       struct iommu_user_data_array *user_array,
533 				       unsigned int data_type)
534 {
535 	unsigned int i;
536 	int ret;
537 
538 	if (user_array->type != data_type)
539 		return -EINVAL;
540 	if (!user_array->entry_num)
541 		return -EINVAL;
542 	if (likely(user_array->entry_len == kdst_entry_size)) {
543 		if (copy_from_user(kdst, user_array->uptr,
544 				   user_array->entry_num *
545 					   user_array->entry_len))
546 			return -EFAULT;
547 	}
548 
549 	/* Copy item by item */
550 	for (i = 0; i != user_array->entry_num; i++) {
551 		ret = copy_struct_from_user(
552 			kdst + kdst_entry_size * i, kdst_entry_size,
553 			user_array->uptr + user_array->entry_len * i,
554 			user_array->entry_len);
555 		if (ret)
556 			return ret;
557 	}
558 	return 0;
559 }
560 
561 /**
562  * __iommu_copy_struct_to_user - Report iommu driver specific user space data
563  * @dst_data: Pointer to a struct iommu_user_data for user space data location
564  * @src_data: Pointer to an iommu driver specific user data that is defined in
565  *            include/uapi/linux/iommufd.h
566  * @data_type: The data type of the @src_data. Must match with @dst_data.type
567  * @data_len: Length of current user data structure, i.e. sizeof(struct _src)
568  * @min_len: Initial length of user data structure for backward compatibility.
569  *           This should be offsetofend using the last member in the user data
570  *           struct that was initially added to include/uapi/linux/iommufd.h
571  */
572 static inline int
__iommu_copy_struct_to_user(const struct iommu_user_data * dst_data,void * src_data,unsigned int data_type,size_t data_len,size_t min_len)573 __iommu_copy_struct_to_user(const struct iommu_user_data *dst_data,
574 			    void *src_data, unsigned int data_type,
575 			    size_t data_len, size_t min_len)
576 {
577 	if (WARN_ON(!dst_data || !src_data))
578 		return -EINVAL;
579 	if (dst_data->type != data_type)
580 		return -EINVAL;
581 	if (dst_data->len < min_len || data_len < dst_data->len)
582 		return -EINVAL;
583 	return copy_struct_to_user(dst_data->uptr, dst_data->len, src_data,
584 				   data_len, NULL);
585 }
586 
587 /**
588  * iommu_copy_struct_to_user - Report iommu driver specific user space data
589  * @user_data: Pointer to a struct iommu_user_data for user space data location
590  * @ksrc: Pointer to an iommu driver specific user data that is defined in
591  *        include/uapi/linux/iommufd.h
592  * @data_type: The data type of the @ksrc. Must match with @user_data->type
593  * @min_last: The last member of the data structure @ksrc points in the initial
594  *            version.
595  * Return 0 for success, otherwise -error.
596  */
597 #define iommu_copy_struct_to_user(user_data, ksrc, data_type, min_last)        \
598 	__iommu_copy_struct_to_user(user_data, ksrc, data_type, sizeof(*ksrc), \
599 				    offsetofend(typeof(*ksrc), min_last))
600 
601 /**
602  * struct iommu_ops - iommu ops and capabilities
603  * @capable: check capability
604  * @hw_info: report iommu hardware information. The data buffer returned by this
605  *           op is allocated in the iommu driver and freed by the caller after
606  *           use. @type can input a requested type and output a supported type.
607  *           Driver should reject an unsupported data @type input
608  * @domain_alloc: Do not use in new drivers
609  * @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to
610  *                         use identity_domain instead. This should only be used
611  *                         if dynamic logic is necessary.
612  * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
613  *                     input parameters as defined in
614  *                     include/uapi/linux/iommufd.h. The @user_data can be
615  *                     optionally provided, the new domain must support
616  *                     __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
617  *                     returned.
618  * @domain_alloc_paging: Allocate an iommu_domain that can be used for
619  *                       UNMANAGED, DMA, and DMA_FQ domain types. This is the
620  *                       same as invoking domain_alloc_paging_flags() with
621  *                       @flags=0, @user_data=NULL. A driver should implement
622  *                       only one of the two ops.
623  * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
624  * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
625  * @probe_device: Add device to iommu driver handling
626  * @release_device: Remove device from iommu driver handling
627  * @probe_finalize: Do final setup work after the device is added to an IOMMU
628  *                  group and attached to the groups domain
629  * @device_group: find iommu group for a particular device
630  * @get_resv_regions: Request list of reserved regions for a device
631  * @of_xlate: add OF master IDs to iommu grouping
632  * @is_attach_deferred: Check if domain attach should be deferred from iommu
633  *                      driver init to device driver init (default no)
634  * @page_response: handle page request response
635  * @def_domain_type: device default domain type, return value:
636  *		- IOMMU_DOMAIN_IDENTITY: must use an identity domain
637  *		- IOMMU_DOMAIN_DMA: must use a dma domain
638  *		- 0: use the default setting
639  * @default_domain_ops: the default ops for domains
640  * @get_viommu_size: Get the size of a driver-level vIOMMU structure for a given
641  *                   @dev corresponding to @viommu_type. Driver should return 0
642  *                   if vIOMMU isn't supported accordingly. It is required for
643  *                   driver to use the VIOMMU_STRUCT_SIZE macro to sanitize the
644  *                   driver-level vIOMMU structure related to the core one
645  * @viommu_init: Init the driver-level struct of an iommufd_viommu on a physical
646  *               IOMMU instance @viommu->iommu_dev, as the set of virtualization
647  *               resources shared/passed to user space IOMMU instance. Associate
648  *               it with a nesting @parent_domain. It is required for driver to
649  *               set @viommu->ops pointing to its own viommu_ops
650  * @owner: Driver module providing these ops
651  * @identity_domain: An always available, always attachable identity
652  *                   translation.
653  * @blocked_domain: An always available, always attachable blocking
654  *                  translation.
655  * @default_domain: If not NULL this will always be set as the default domain.
656  *                  This should be an IDENTITY/BLOCKED/PLATFORM domain.
657  *                  Do not use in new drivers.
658  * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
659  *                    no user domain for each PASID and the I/O page faults are
660  *                    forwarded through the user domain attached to the device
661  *                    RID.
662  */
663 struct iommu_ops {
664 	bool (*capable)(struct device *dev, enum iommu_cap);
665 	void *(*hw_info)(struct device *dev, u32 *length,
666 			 enum iommu_hw_info_type *type);
667 
668 	/* Domain allocation and freeing by the iommu driver */
669 #if IS_ENABLED(CONFIG_FSL_PAMU)
670 	struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
671 #endif
672 	struct iommu_domain *(*domain_alloc_identity)(struct device *dev);
673 	struct iommu_domain *(*domain_alloc_paging_flags)(
674 		struct device *dev, u32 flags,
675 		const struct iommu_user_data *user_data);
676 	struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
677 	struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
678 						 struct mm_struct *mm);
679 	struct iommu_domain *(*domain_alloc_nested)(
680 		struct device *dev, struct iommu_domain *parent, u32 flags,
681 		const struct iommu_user_data *user_data);
682 
683 	struct iommu_device *(*probe_device)(struct device *dev);
684 	void (*release_device)(struct device *dev);
685 	void (*probe_finalize)(struct device *dev);
686 	struct iommu_group *(*device_group)(struct device *dev);
687 
688 	/* Request/Free a list of reserved regions for a device */
689 	void (*get_resv_regions)(struct device *dev, struct list_head *list);
690 
691 	int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
692 	bool (*is_attach_deferred)(struct device *dev);
693 
694 	/* Per device IOMMU features */
695 	void (*page_response)(struct device *dev, struct iopf_fault *evt,
696 			      struct iommu_page_response *msg);
697 
698 	int (*def_domain_type)(struct device *dev);
699 
700 	size_t (*get_viommu_size)(struct device *dev,
701 				  enum iommu_viommu_type viommu_type);
702 	int (*viommu_init)(struct iommufd_viommu *viommu,
703 			   struct iommu_domain *parent_domain,
704 			   const struct iommu_user_data *user_data);
705 
706 	const struct iommu_domain_ops *default_domain_ops;
707 	struct module *owner;
708 	struct iommu_domain *identity_domain;
709 	struct iommu_domain *blocked_domain;
710 	struct iommu_domain *release_domain;
711 	struct iommu_domain *default_domain;
712 	u8 user_pasid_table:1;
713 };
714 
715 /**
716  * struct iommu_domain_ops - domain specific operations
717  * @attach_dev: attach an iommu domain to a device
718  *  Return:
719  * * 0		- success
720  * * EINVAL	- can indicate that device and domain are incompatible due to
721  *		  some previous configuration of the domain, in which case the
722  *		  driver shouldn't log an error, since it is legitimate for a
723  *		  caller to test reuse of existing domains. Otherwise, it may
724  *		  still represent some other fundamental problem
725  * * ENOMEM	- out of memory
726  * * ENOSPC	- non-ENOMEM type of resource allocation failures
727  * * EBUSY	- device is attached to a domain and cannot be changed
728  * * ENODEV	- device specific errors, not able to be attached
729  * * <others>	- treated as ENODEV by the caller. Use is discouraged
730  * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of
731  *                 the device should be left in the old config in error case.
732  * @map_pages: map a physically contiguous set of pages of the same size to
733  *             an iommu domain.
734  * @unmap_pages: unmap a number of pages of the same size from an iommu domain
735  * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
736  * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
737  * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
738  *            queue
739  * @cache_invalidate_user: Flush hardware cache for user space IO page table.
740  *                         The @domain must be IOMMU_DOMAIN_NESTED. The @array
741  *                         passes in the cache invalidation requests, in form
742  *                         of a driver data structure. The driver must update
743  *                         array->entry_num to report the number of handled
744  *                         invalidation requests. The driver data structure
745  *                         must be defined in include/uapi/linux/iommufd.h
746  * @iova_to_phys: translate iova to physical address
747  * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
748  *                           including no-snoop TLPs on PCIe or other platform
749  *                           specific mechanisms.
750  * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
751  * @free: Release the domain after use.
752  */
753 struct iommu_domain_ops {
754 	int (*attach_dev)(struct iommu_domain *domain, struct device *dev,
755 			  struct iommu_domain *old);
756 	int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
757 			     ioasid_t pasid, struct iommu_domain *old);
758 
759 	int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
760 			 phys_addr_t paddr, size_t pgsize, size_t pgcount,
761 			 int prot, gfp_t gfp, size_t *mapped);
762 	size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
763 			      size_t pgsize, size_t pgcount,
764 			      struct iommu_iotlb_gather *iotlb_gather);
765 
766 	void (*flush_iotlb_all)(struct iommu_domain *domain);
767 	int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
768 			      size_t size);
769 	void (*iotlb_sync)(struct iommu_domain *domain,
770 			   struct iommu_iotlb_gather *iotlb_gather);
771 	int (*cache_invalidate_user)(struct iommu_domain *domain,
772 				     struct iommu_user_data_array *array);
773 
774 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
775 				    dma_addr_t iova);
776 
777 	bool (*enforce_cache_coherency)(struct iommu_domain *domain);
778 	int (*set_pgtable_quirks)(struct iommu_domain *domain,
779 				  unsigned long quirks);
780 
781 	void (*free)(struct iommu_domain *domain);
782 };
783 
784 /**
785  * struct iommu_device - IOMMU core representation of one IOMMU hardware
786  *			 instance
787  * @list: Used by the iommu-core to keep a list of registered iommus
788  * @ops: iommu-ops for talking to this iommu
789  * @dev: struct device for sysfs handling
790  * @singleton_group: Used internally for drivers that have only one group
791  * @max_pasids: number of supported PASIDs
792  * @ready: set once iommu_device_register() has completed successfully
793  */
794 struct iommu_device {
795 	struct list_head list;
796 	const struct iommu_ops *ops;
797 	struct fwnode_handle *fwnode;
798 	struct device *dev;
799 	struct iommu_group *singleton_group;
800 	u32 max_pasids;
801 	bool ready;
802 };
803 
804 /**
805  * struct iommu_fault_param - per-device IOMMU fault data
806  * @lock: protect pending faults list
807  * @users: user counter to manage the lifetime of the data
808  * @rcu: rcu head for kfree_rcu()
809  * @dev: the device that owns this param
810  * @queue: IOPF queue
811  * @queue_list: index into queue->devices
812  * @partial: faults that are part of a Page Request Group for which the last
813  *           request hasn't been submitted yet.
814  * @faults: holds the pending faults which need response
815  */
816 struct iommu_fault_param {
817 	struct mutex lock;
818 	refcount_t users;
819 	struct rcu_head rcu;
820 
821 	struct device *dev;
822 	struct iopf_queue *queue;
823 	struct list_head queue_list;
824 
825 	struct list_head partial;
826 	struct list_head faults;
827 };
828 
829 /**
830  * struct dev_iommu - Collection of per-device IOMMU data
831  *
832  * @fault_param: IOMMU detected device fault reporting data
833  * @fwspec:	 IOMMU fwspec data
834  * @iommu_dev:	 IOMMU device this device is linked to
835  * @priv:	 IOMMU Driver private data
836  * @max_pasids:  number of PASIDs this device can consume
837  * @attach_deferred: the dma domain attachment is deferred
838  * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
839  * @require_direct: device requires IOMMU_RESV_DIRECT regions
840  * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
841  *
842  * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
843  *	struct iommu_group	*iommu_group;
844  */
845 struct dev_iommu {
846 	struct mutex lock;
847 	struct iommu_fault_param __rcu	*fault_param;
848 	struct iommu_fwspec		*fwspec;
849 	struct iommu_device		*iommu_dev;
850 	void				*priv;
851 	u32				max_pasids;
852 	u32				attach_deferred:1;
853 	u32				pci_32bit_workaround:1;
854 	u32				require_direct:1;
855 	u32				shadow_on_flush:1;
856 };
857 
858 int iommu_device_register(struct iommu_device *iommu,
859 			  const struct iommu_ops *ops,
860 			  struct device *hwdev);
861 void iommu_device_unregister(struct iommu_device *iommu);
862 int  iommu_device_sysfs_add(struct iommu_device *iommu,
863 			    struct device *parent,
864 			    const struct attribute_group **groups,
865 			    const char *fmt, ...) __printf(4, 5);
866 void iommu_device_sysfs_remove(struct iommu_device *iommu);
867 int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
868 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
869 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
870 
dev_to_iommu_device(struct device * dev)871 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
872 {
873 	return (struct iommu_device *)dev_get_drvdata(dev);
874 }
875 
876 /**
877  * iommu_get_iommu_dev - Get iommu_device for a device
878  * @dev: an end-point device
879  *
880  * Note that this function must be called from the iommu_ops
881  * to retrieve the iommu_device for a device, which the core code
882  * guarentees it will not invoke the op without an attached iommu.
883  */
__iommu_get_iommu_dev(struct device * dev)884 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
885 {
886 	return dev->iommu->iommu_dev;
887 }
888 
889 #define iommu_get_iommu_dev(dev, type, member) \
890 	container_of(__iommu_get_iommu_dev(dev), type, member)
891 
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)892 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
893 {
894 	*gather = (struct iommu_iotlb_gather) {
895 		.start	= ULONG_MAX,
896 		.freelist = IOMMU_PAGES_LIST_INIT(gather->freelist),
897 	};
898 }
899 
900 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
901 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
902 struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
iommu_paging_domain_alloc(struct device * dev)903 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
904 {
905 	return iommu_paging_domain_alloc_flags(dev, 0);
906 }
907 extern void iommu_domain_free(struct iommu_domain *domain);
908 extern int iommu_attach_device(struct iommu_domain *domain,
909 			       struct device *dev);
910 extern void iommu_detach_device(struct iommu_domain *domain,
911 				struct device *dev);
912 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
913 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
914 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
915 		     phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
916 int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
917 		phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
918 int iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
919 		size_t size);
920 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
921 			  size_t size);
922 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
923 			       unsigned long iova, size_t size,
924 			       struct iommu_iotlb_gather *iotlb_gather);
925 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
926 			    struct scatterlist *sg, unsigned int nents,
927 			    int prot, gfp_t gfp);
928 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
929 extern void iommu_set_fault_handler(struct iommu_domain *domain,
930 			iommu_fault_handler_t handler, void *token);
931 
932 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
933 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
934 extern void iommu_set_default_passthrough(bool cmd_line);
935 extern void iommu_set_default_translated(bool cmd_line);
936 extern bool iommu_default_passthrough(void);
937 extern struct iommu_resv_region *
938 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
939 			enum iommu_resv_type type, gfp_t gfp);
940 extern int iommu_get_group_resv_regions(struct iommu_group *group,
941 					struct list_head *head);
942 
943 extern int iommu_attach_group(struct iommu_domain *domain,
944 			      struct iommu_group *group);
945 extern void iommu_detach_group(struct iommu_domain *domain,
946 			       struct iommu_group *group);
947 extern struct iommu_group *iommu_group_alloc(void);
948 extern void *iommu_group_get_iommudata(struct iommu_group *group);
949 extern void iommu_group_set_iommudata(struct iommu_group *group,
950 				      void *iommu_data,
951 				      void (*release)(void *iommu_data));
952 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
953 extern int iommu_group_add_device(struct iommu_group *group,
954 				  struct device *dev);
955 extern void iommu_group_remove_device(struct device *dev);
956 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
957 				    int (*fn)(struct device *, void *));
958 extern struct iommu_group *iommu_group_get(struct device *dev);
959 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
960 extern void iommu_group_put(struct iommu_group *group);
961 
962 extern int iommu_group_id(struct iommu_group *group);
963 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
964 
965 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
966 		unsigned long quirks);
967 
968 void iommu_set_dma_strict(void);
969 
970 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
971 			      unsigned long iova, int flags);
972 
iommu_flush_iotlb_all(struct iommu_domain * domain)973 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
974 {
975 	if (domain->ops->flush_iotlb_all)
976 		domain->ops->flush_iotlb_all(domain);
977 }
978 
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)979 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
980 				  struct iommu_iotlb_gather *iotlb_gather)
981 {
982 	if (domain->ops->iotlb_sync)
983 		domain->ops->iotlb_sync(domain, iotlb_gather);
984 
985 	iommu_iotlb_gather_init(iotlb_gather);
986 }
987 
988 /**
989  * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
990  *
991  * @gather: TLB gather data
992  * @iova: start of page to invalidate
993  * @size: size of page to invalidate
994  *
995  * Helper for IOMMU drivers to check whether a new range and the gathered range
996  * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
997  * than merging the two, which might lead to unnecessary invalidations.
998  */
999 static inline
iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1000 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
1001 				    unsigned long iova, size_t size)
1002 {
1003 	unsigned long start = iova, end = start + size - 1;
1004 
1005 	return gather->end != 0 &&
1006 		(end + 1 < gather->start || start > gather->end + 1);
1007 }
1008 
1009 
1010 /**
1011  * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
1012  * @gather: TLB gather data
1013  * @iova: start of page to invalidate
1014  * @size: size of page to invalidate
1015  *
1016  * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
1017  * where only the address range matters, and simply minimising intermediate
1018  * syncs is preferred.
1019  */
iommu_iotlb_gather_add_range(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1020 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
1021 						unsigned long iova, size_t size)
1022 {
1023 	unsigned long end = iova + size - 1;
1024 
1025 	if (gather->start > iova)
1026 		gather->start = iova;
1027 	if (gather->end < end)
1028 		gather->end = end;
1029 }
1030 
1031 /**
1032  * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
1033  * @domain: IOMMU domain to be invalidated
1034  * @gather: TLB gather data
1035  * @iova: start of page to invalidate
1036  * @size: size of page to invalidate
1037  *
1038  * Helper for IOMMU drivers to build invalidation commands based on individual
1039  * pages, or with page size/table level hints which cannot be gathered if they
1040  * differ.
1041  */
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1042 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1043 					       struct iommu_iotlb_gather *gather,
1044 					       unsigned long iova, size_t size)
1045 {
1046 	/*
1047 	 * If the new page is disjoint from the current range or is mapped at
1048 	 * a different granularity, then sync the TLB so that the gather
1049 	 * structure can be rewritten.
1050 	 */
1051 	if ((gather->pgsize && gather->pgsize != size) ||
1052 	    iommu_iotlb_gather_is_disjoint(gather, iova, size))
1053 		iommu_iotlb_sync(domain, gather);
1054 
1055 	gather->pgsize = size;
1056 	iommu_iotlb_gather_add_range(gather, iova, size);
1057 }
1058 
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1059 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1060 {
1061 	return gather && gather->queued;
1062 }
1063 
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1064 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1065 					   struct iova_bitmap *bitmap,
1066 					   struct iommu_iotlb_gather *gather)
1067 {
1068 	if (gather)
1069 		iommu_iotlb_gather_init(gather);
1070 
1071 	dirty->bitmap = bitmap;
1072 	dirty->gather = gather;
1073 }
1074 
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1075 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1076 					     unsigned long iova,
1077 					     unsigned long length)
1078 {
1079 	if (dirty->bitmap)
1080 		iova_bitmap_set(dirty->bitmap, iova, length);
1081 
1082 	if (dirty->gather)
1083 		iommu_iotlb_gather_add_range(dirty->gather, iova, length);
1084 }
1085 
1086 /* PCI device grouping function */
1087 extern struct iommu_group *pci_device_group(struct device *dev);
1088 /* Generic device grouping function */
1089 extern struct iommu_group *generic_device_group(struct device *dev);
1090 /* FSL-MC device grouping function */
1091 struct iommu_group *fsl_mc_device_group(struct device *dev);
1092 extern struct iommu_group *generic_single_device_group(struct device *dev);
1093 
1094 /**
1095  * struct iommu_fwspec - per-device IOMMU instance data
1096  * @iommu_fwnode: firmware handle for this device's IOMMU
1097  * @flags: IOMMU_FWSPEC_* flags
1098  * @num_ids: number of associated device IDs
1099  * @ids: IDs which this device may present to the IOMMU
1100  *
1101  * Note that the IDs (and any other information, really) stored in this structure should be
1102  * considered private to the IOMMU device driver and are not to be used directly by IOMMU
1103  * consumers.
1104  */
1105 struct iommu_fwspec {
1106 	struct fwnode_handle	*iommu_fwnode;
1107 	u32			flags;
1108 	unsigned int		num_ids;
1109 	u32			ids[];
1110 };
1111 
1112 /* ATS is supported */
1113 #define IOMMU_FWSPEC_PCI_RC_ATS			(1 << 0)
1114 /* CANWBS is supported */
1115 #define IOMMU_FWSPEC_PCI_RC_CANWBS		(1 << 1)
1116 
1117 /*
1118  * An iommu attach handle represents a relationship between an iommu domain
1119  * and a PASID or RID of a device. It is allocated and managed by the component
1120  * that manages the domain and is stored in the iommu group during the time the
1121  * domain is attached.
1122  */
1123 struct iommu_attach_handle {
1124 	struct iommu_domain		*domain;
1125 };
1126 
1127 /**
1128  * struct iommu_sva - handle to a device-mm bond
1129  */
1130 struct iommu_sva {
1131 	struct iommu_attach_handle	handle;
1132 	struct device			*dev;
1133 	refcount_t			users;
1134 };
1135 
1136 struct iommu_mm_data {
1137 	u32			pasid;
1138 	struct mm_struct	*mm;
1139 	struct list_head	sva_domains;
1140 	struct list_head	mm_list_elm;
1141 };
1142 
1143 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
1144 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
1145 
dev_iommu_fwspec_get(struct device * dev)1146 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1147 {
1148 	if (dev->iommu)
1149 		return dev->iommu->fwspec;
1150 	else
1151 		return NULL;
1152 }
1153 
dev_iommu_fwspec_set(struct device * dev,struct iommu_fwspec * fwspec)1154 static inline void dev_iommu_fwspec_set(struct device *dev,
1155 					struct iommu_fwspec *fwspec)
1156 {
1157 	dev->iommu->fwspec = fwspec;
1158 }
1159 
dev_iommu_priv_get(struct device * dev)1160 static inline void *dev_iommu_priv_get(struct device *dev)
1161 {
1162 	if (dev->iommu)
1163 		return dev->iommu->priv;
1164 	else
1165 		return NULL;
1166 }
1167 
1168 void dev_iommu_priv_set(struct device *dev, void *priv);
1169 
1170 extern struct mutex iommu_probe_device_lock;
1171 int iommu_probe_device(struct device *dev);
1172 
1173 int iommu_device_use_default_domain(struct device *dev);
1174 void iommu_device_unuse_default_domain(struct device *dev);
1175 
1176 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
1177 void iommu_group_release_dma_owner(struct iommu_group *group);
1178 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
1179 
1180 int iommu_device_claim_dma_owner(struct device *dev, void *owner);
1181 void iommu_device_release_dma_owner(struct device *dev);
1182 
1183 int iommu_attach_device_pasid(struct iommu_domain *domain,
1184 			      struct device *dev, ioasid_t pasid,
1185 			      struct iommu_attach_handle *handle);
1186 void iommu_detach_device_pasid(struct iommu_domain *domain,
1187 			       struct device *dev, ioasid_t pasid);
1188 ioasid_t iommu_alloc_global_pasid(struct device *dev);
1189 void iommu_free_global_pasid(ioasid_t pasid);
1190 #else /* CONFIG_IOMMU_API */
1191 
1192 struct iommu_ops {};
1193 struct iommu_group {};
1194 struct iommu_fwspec {};
1195 struct iommu_device {};
1196 struct iommu_fault_param {};
1197 struct iommu_iotlb_gather {};
1198 struct iommu_dirty_bitmap {};
1199 struct iommu_dirty_ops {};
1200 
device_iommu_capable(struct device * dev,enum iommu_cap cap)1201 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1202 {
1203 	return false;
1204 }
1205 
iommu_paging_domain_alloc_flags(struct device * dev,unsigned int flags)1206 static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
1207 						     unsigned int flags)
1208 {
1209 	return ERR_PTR(-ENODEV);
1210 }
1211 
iommu_paging_domain_alloc(struct device * dev)1212 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
1213 {
1214 	return ERR_PTR(-ENODEV);
1215 }
1216 
iommu_domain_free(struct iommu_domain * domain)1217 static inline void iommu_domain_free(struct iommu_domain *domain)
1218 {
1219 }
1220 
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1221 static inline int iommu_attach_device(struct iommu_domain *domain,
1222 				      struct device *dev)
1223 {
1224 	return -ENODEV;
1225 }
1226 
iommu_detach_device(struct iommu_domain * domain,struct device * dev)1227 static inline void iommu_detach_device(struct iommu_domain *domain,
1228 				       struct device *dev)
1229 {
1230 }
1231 
iommu_get_domain_for_dev(struct device * dev)1232 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1233 {
1234 	return NULL;
1235 }
1236 
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1237 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
1238 			    phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1239 {
1240 	return -ENODEV;
1241 }
1242 
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)1243 static inline size_t iommu_unmap(struct iommu_domain *domain,
1244 				 unsigned long iova, size_t size)
1245 {
1246 	return 0;
1247 }
1248 
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,int gfp_order,struct iommu_iotlb_gather * iotlb_gather)1249 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
1250 				      unsigned long iova, int gfp_order,
1251 				      struct iommu_iotlb_gather *iotlb_gather)
1252 {
1253 	return 0;
1254 }
1255 
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)1256 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
1257 				   unsigned long iova, struct scatterlist *sg,
1258 				   unsigned int nents, int prot, gfp_t gfp)
1259 {
1260 	return -ENODEV;
1261 }
1262 
iommu_flush_iotlb_all(struct iommu_domain * domain)1263 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
1264 {
1265 }
1266 
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)1267 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
1268 				  struct iommu_iotlb_gather *iotlb_gather)
1269 {
1270 }
1271 
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1272 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1273 {
1274 	return 0;
1275 }
1276 
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1277 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1278 				iommu_fault_handler_t handler, void *token)
1279 {
1280 }
1281 
iommu_get_resv_regions(struct device * dev,struct list_head * list)1282 static inline void iommu_get_resv_regions(struct device *dev,
1283 					struct list_head *list)
1284 {
1285 }
1286 
iommu_put_resv_regions(struct device * dev,struct list_head * list)1287 static inline void iommu_put_resv_regions(struct device *dev,
1288 					struct list_head *list)
1289 {
1290 }
1291 
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)1292 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1293 					       struct list_head *head)
1294 {
1295 	return -ENODEV;
1296 }
1297 
iommu_set_default_passthrough(bool cmd_line)1298 static inline void iommu_set_default_passthrough(bool cmd_line)
1299 {
1300 }
1301 
iommu_set_default_translated(bool cmd_line)1302 static inline void iommu_set_default_translated(bool cmd_line)
1303 {
1304 }
1305 
iommu_default_passthrough(void)1306 static inline bool iommu_default_passthrough(void)
1307 {
1308 	return true;
1309 }
1310 
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1311 static inline int iommu_attach_group(struct iommu_domain *domain,
1312 				     struct iommu_group *group)
1313 {
1314 	return -ENODEV;
1315 }
1316 
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1317 static inline void iommu_detach_group(struct iommu_domain *domain,
1318 				      struct iommu_group *group)
1319 {
1320 }
1321 
iommu_group_alloc(void)1322 static inline struct iommu_group *iommu_group_alloc(void)
1323 {
1324 	return ERR_PTR(-ENODEV);
1325 }
1326 
iommu_group_get_iommudata(struct iommu_group * group)1327 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
1328 {
1329 	return NULL;
1330 }
1331 
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))1332 static inline void iommu_group_set_iommudata(struct iommu_group *group,
1333 					     void *iommu_data,
1334 					     void (*release)(void *iommu_data))
1335 {
1336 }
1337 
iommu_group_set_name(struct iommu_group * group,const char * name)1338 static inline int iommu_group_set_name(struct iommu_group *group,
1339 				       const char *name)
1340 {
1341 	return -ENODEV;
1342 }
1343 
iommu_group_add_device(struct iommu_group * group,struct device * dev)1344 static inline int iommu_group_add_device(struct iommu_group *group,
1345 					 struct device *dev)
1346 {
1347 	return -ENODEV;
1348 }
1349 
iommu_group_remove_device(struct device * dev)1350 static inline void iommu_group_remove_device(struct device *dev)
1351 {
1352 }
1353 
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))1354 static inline int iommu_group_for_each_dev(struct iommu_group *group,
1355 					   void *data,
1356 					   int (*fn)(struct device *, void *))
1357 {
1358 	return -ENODEV;
1359 }
1360 
iommu_group_get(struct device * dev)1361 static inline struct iommu_group *iommu_group_get(struct device *dev)
1362 {
1363 	return NULL;
1364 }
1365 
iommu_group_put(struct iommu_group * group)1366 static inline void iommu_group_put(struct iommu_group *group)
1367 {
1368 }
1369 
iommu_group_id(struct iommu_group * group)1370 static inline int iommu_group_id(struct iommu_group *group)
1371 {
1372 	return -ENODEV;
1373 }
1374 
iommu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirks)1375 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
1376 		unsigned long quirks)
1377 {
1378 	return 0;
1379 }
1380 
iommu_device_register(struct iommu_device * iommu,const struct iommu_ops * ops,struct device * hwdev)1381 static inline int iommu_device_register(struct iommu_device *iommu,
1382 					const struct iommu_ops *ops,
1383 					struct device *hwdev)
1384 {
1385 	return -ENODEV;
1386 }
1387 
dev_to_iommu_device(struct device * dev)1388 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1389 {
1390 	return NULL;
1391 }
1392 
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)1393 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
1394 {
1395 }
1396 
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1397 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1398 					       struct iommu_iotlb_gather *gather,
1399 					       unsigned long iova, size_t size)
1400 {
1401 }
1402 
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1403 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1404 {
1405 	return false;
1406 }
1407 
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1408 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1409 					   struct iova_bitmap *bitmap,
1410 					   struct iommu_iotlb_gather *gather)
1411 {
1412 }
1413 
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1414 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1415 					     unsigned long iova,
1416 					     unsigned long length)
1417 {
1418 }
1419 
iommu_device_unregister(struct iommu_device * iommu)1420 static inline void iommu_device_unregister(struct iommu_device *iommu)
1421 {
1422 }
1423 
iommu_device_sysfs_add(struct iommu_device * iommu,struct device * parent,const struct attribute_group ** groups,const char * fmt,...)1424 static inline int  iommu_device_sysfs_add(struct iommu_device *iommu,
1425 					  struct device *parent,
1426 					  const struct attribute_group **groups,
1427 					  const char *fmt, ...)
1428 {
1429 	return -ENODEV;
1430 }
1431 
iommu_device_sysfs_remove(struct iommu_device * iommu)1432 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
1433 {
1434 }
1435 
iommu_device_link(struct device * dev,struct device * link)1436 static inline int iommu_device_link(struct device *dev, struct device *link)
1437 {
1438 	return -EINVAL;
1439 }
1440 
iommu_device_unlink(struct device * dev,struct device * link)1441 static inline void iommu_device_unlink(struct device *dev, struct device *link)
1442 {
1443 }
1444 
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode)1445 static inline int iommu_fwspec_init(struct device *dev,
1446 				    struct fwnode_handle *iommu_fwnode)
1447 {
1448 	return -ENODEV;
1449 }
1450 
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)1451 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1452 				       int num_ids)
1453 {
1454 	return -ENODEV;
1455 }
1456 
dev_iommu_fwspec_get(struct device * dev)1457 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1458 {
1459 	return NULL;
1460 }
1461 
iommu_device_use_default_domain(struct device * dev)1462 static inline int iommu_device_use_default_domain(struct device *dev)
1463 {
1464 	return 0;
1465 }
1466 
iommu_device_unuse_default_domain(struct device * dev)1467 static inline void iommu_device_unuse_default_domain(struct device *dev)
1468 {
1469 }
1470 
1471 static inline int
iommu_group_claim_dma_owner(struct iommu_group * group,void * owner)1472 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1473 {
1474 	return -ENODEV;
1475 }
1476 
iommu_group_release_dma_owner(struct iommu_group * group)1477 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1478 {
1479 }
1480 
iommu_group_dma_owner_claimed(struct iommu_group * group)1481 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1482 {
1483 	return false;
1484 }
1485 
iommu_device_release_dma_owner(struct device * dev)1486 static inline void iommu_device_release_dma_owner(struct device *dev)
1487 {
1488 }
1489 
iommu_device_claim_dma_owner(struct device * dev,void * owner)1490 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1491 {
1492 	return -ENODEV;
1493 }
1494 
iommu_attach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_attach_handle * handle)1495 static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1496 					    struct device *dev, ioasid_t pasid,
1497 					    struct iommu_attach_handle *handle)
1498 {
1499 	return -ENODEV;
1500 }
1501 
iommu_detach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)1502 static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1503 					     struct device *dev, ioasid_t pasid)
1504 {
1505 }
1506 
iommu_alloc_global_pasid(struct device * dev)1507 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
1508 {
1509 	return IOMMU_PASID_INVALID;
1510 }
1511 
iommu_free_global_pasid(ioasid_t pasid)1512 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
1513 #endif /* CONFIG_IOMMU_API */
1514 
1515 #ifdef CONFIG_IRQ_MSI_IOMMU
1516 #ifdef CONFIG_IOMMU_API
1517 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1518 #else
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1519 static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
1520 					phys_addr_t msi_addr)
1521 {
1522 	return 0;
1523 }
1524 #endif /* CONFIG_IOMMU_API */
1525 #endif /* CONFIG_IRQ_MSI_IOMMU */
1526 
1527 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
1528 void iommu_group_mutex_assert(struct device *dev);
1529 #else
iommu_group_mutex_assert(struct device * dev)1530 static inline void iommu_group_mutex_assert(struct device *dev)
1531 {
1532 }
1533 #endif
1534 
1535 /**
1536  * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1537  * @domain:	The IOMMU domain to perform the mapping
1538  * @iova:	The start address to map the buffer
1539  * @sgt:	The sg_table object describing the buffer
1540  * @prot:	IOMMU protection bits
1541  *
1542  * Creates a mapping at @iova for the buffer described by a scatterlist
1543  * stored in the given sg_table object in the provided IOMMU domain.
1544  */
iommu_map_sgtable(struct iommu_domain * domain,unsigned long iova,struct sg_table * sgt,int prot)1545 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
1546 			unsigned long iova, struct sg_table *sgt, int prot)
1547 {
1548 	return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1549 			    GFP_KERNEL);
1550 }
1551 
1552 #ifdef CONFIG_IOMMU_DEBUGFS
1553 extern	struct dentry *iommu_debugfs_dir;
1554 void iommu_debugfs_setup(void);
1555 #else
iommu_debugfs_setup(void)1556 static inline void iommu_debugfs_setup(void) {}
1557 #endif
1558 
1559 #ifdef CONFIG_IOMMU_DMA
1560 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1561 #else /* CONFIG_IOMMU_DMA */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)1562 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1563 {
1564 	return -ENODEV;
1565 }
1566 #endif	/* CONFIG_IOMMU_DMA */
1567 
1568 /*
1569  * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1570  * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1571  * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1572  */
1573 #define TEGRA_STREAM_ID_BYPASS 0x7f
1574 
tegra_dev_iommu_get_stream_id(struct device * dev,u32 * stream_id)1575 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1576 {
1577 #ifdef CONFIG_IOMMU_API
1578 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1579 
1580 	if (fwspec && fwspec->num_ids == 1) {
1581 		*stream_id = fwspec->ids[0] & 0xffff;
1582 		return true;
1583 	}
1584 #endif
1585 
1586 	return false;
1587 }
1588 
1589 #ifdef CONFIG_IOMMU_MM_DATA
mm_pasid_init(struct mm_struct * mm)1590 static inline void mm_pasid_init(struct mm_struct *mm)
1591 {
1592 	/*
1593 	 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
1594 	 * the new mm and the old one point to a same iommu_mm instance. When either
1595 	 * one of the two mms gets released, the iommu_mm instance is freed, leaving
1596 	 * the other mm running into a use-after-free/double-free problem. To avoid
1597 	 * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
1598 	 */
1599 	mm->iommu_mm = NULL;
1600 }
1601 
mm_valid_pasid(struct mm_struct * mm)1602 static inline bool mm_valid_pasid(struct mm_struct *mm)
1603 {
1604 	return READ_ONCE(mm->iommu_mm);
1605 }
1606 
mm_get_enqcmd_pasid(struct mm_struct * mm)1607 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1608 {
1609 	struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
1610 
1611 	if (!iommu_mm)
1612 		return IOMMU_PASID_INVALID;
1613 	return iommu_mm->pasid;
1614 }
1615 
1616 void mm_pasid_drop(struct mm_struct *mm);
1617 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1618 					struct mm_struct *mm);
1619 void iommu_sva_unbind_device(struct iommu_sva *handle);
1620 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1621 void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end);
1622 #else
1623 static inline struct iommu_sva *
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)1624 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1625 {
1626 	return ERR_PTR(-ENODEV);
1627 }
1628 
iommu_sva_unbind_device(struct iommu_sva * handle)1629 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1630 {
1631 }
1632 
iommu_sva_get_pasid(struct iommu_sva * handle)1633 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1634 {
1635 	return IOMMU_PASID_INVALID;
1636 }
mm_pasid_init(struct mm_struct * mm)1637 static inline void mm_pasid_init(struct mm_struct *mm) {}
mm_valid_pasid(struct mm_struct * mm)1638 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1639 
mm_get_enqcmd_pasid(struct mm_struct * mm)1640 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1641 {
1642 	return IOMMU_PASID_INVALID;
1643 }
1644 
mm_pasid_drop(struct mm_struct * mm)1645 static inline void mm_pasid_drop(struct mm_struct *mm) {}
iommu_sva_invalidate_kva_range(unsigned long start,unsigned long end)1646 static inline void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end) {}
1647 #endif /* CONFIG_IOMMU_SVA */
1648 
1649 #ifdef CONFIG_IOMMU_IOPF
1650 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
1651 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
1652 int iopf_queue_flush_dev(struct device *dev);
1653 struct iopf_queue *iopf_queue_alloc(const char *name);
1654 void iopf_queue_free(struct iopf_queue *queue);
1655 int iopf_queue_discard_partial(struct iopf_queue *queue);
1656 void iopf_free_group(struct iopf_group *group);
1657 int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
1658 void iopf_group_response(struct iopf_group *group,
1659 			 enum iommu_page_response_code status);
1660 #else
1661 static inline int
iopf_queue_add_device(struct iopf_queue * queue,struct device * dev)1662 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
1663 {
1664 	return -ENODEV;
1665 }
1666 
1667 static inline void
iopf_queue_remove_device(struct iopf_queue * queue,struct device * dev)1668 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
1669 {
1670 }
1671 
iopf_queue_flush_dev(struct device * dev)1672 static inline int iopf_queue_flush_dev(struct device *dev)
1673 {
1674 	return -ENODEV;
1675 }
1676 
iopf_queue_alloc(const char * name)1677 static inline struct iopf_queue *iopf_queue_alloc(const char *name)
1678 {
1679 	return NULL;
1680 }
1681 
iopf_queue_free(struct iopf_queue * queue)1682 static inline void iopf_queue_free(struct iopf_queue *queue)
1683 {
1684 }
1685 
iopf_queue_discard_partial(struct iopf_queue * queue)1686 static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
1687 {
1688 	return -ENODEV;
1689 }
1690 
iopf_free_group(struct iopf_group * group)1691 static inline void iopf_free_group(struct iopf_group *group)
1692 {
1693 }
1694 
1695 static inline int
iommu_report_device_fault(struct device * dev,struct iopf_fault * evt)1696 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
1697 {
1698 	return -ENODEV;
1699 }
1700 
iopf_group_response(struct iopf_group * group,enum iommu_page_response_code status)1701 static inline void iopf_group_response(struct iopf_group *group,
1702 				       enum iommu_page_response_code status)
1703 {
1704 }
1705 #endif /* CONFIG_IOMMU_IOPF */
1706 #endif /* __LINUX_IOMMU_H */
1707