xref: /linux/include/linux/iommu.h (revision 8477ab143069c6b05d6da4a8184ded8b969240f5)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <joerg.roedel@amd.com>
5  */
6 
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9 
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/iova_bitmap.h>
17 
18 #define IOMMU_READ	(1 << 0)
19 #define IOMMU_WRITE	(1 << 1)
20 #define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
21 #define IOMMU_NOEXEC	(1 << 3)
22 #define IOMMU_MMIO	(1 << 4) /* e.g. things like MSI doorbells */
23 /*
24  * Where the bus hardware includes a privilege level as part of its access type
25  * markings, and certain devices are capable of issuing transactions marked as
26  * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
27  * given permission flags only apply to accesses at the higher privilege level,
28  * and that unprivileged transactions should have as little access as possible.
29  * This would usually imply the same permissions as kernel mappings on the CPU,
30  * if the IOMMU page table format is equivalent.
31  */
32 #define IOMMU_PRIV	(1 << 5)
33 
34 struct iommu_ops;
35 struct iommu_group;
36 struct bus_type;
37 struct device;
38 struct iommu_domain;
39 struct iommu_domain_ops;
40 struct iommu_dirty_ops;
41 struct notifier_block;
42 struct iommu_sva;
43 struct iommu_dma_cookie;
44 struct iommu_dma_msi_cookie;
45 struct iommu_fault_param;
46 struct iommufd_ctx;
47 struct iommufd_viommu;
48 struct msi_desc;
49 struct msi_msg;
50 
51 #define IOMMU_FAULT_PERM_READ	(1 << 0) /* read */
52 #define IOMMU_FAULT_PERM_WRITE	(1 << 1) /* write */
53 #define IOMMU_FAULT_PERM_EXEC	(1 << 2) /* exec */
54 #define IOMMU_FAULT_PERM_PRIV	(1 << 3) /* privileged */
55 
56 /* Generic fault types, can be expanded IRQ remapping fault */
57 enum iommu_fault_type {
58 	IOMMU_FAULT_PAGE_REQ = 1,	/* page request fault */
59 };
60 
61 /**
62  * struct iommu_fault_page_request - Page Request data
63  * @flags: encodes whether the corresponding fields are valid and whether this
64  *         is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
65  *         When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
66  *         must have the same PASID value as the page request. When it is clear,
67  *         the page response should not have a PASID.
68  * @pasid: Process Address Space ID
69  * @grpid: Page Request Group Index
70  * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
71  * @addr: page address
72  * @private_data: device-specific private information
73  */
74 struct iommu_fault_page_request {
75 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID	(1 << 0)
76 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE	(1 << 1)
77 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID	(1 << 2)
78 	u32	flags;
79 	u32	pasid;
80 	u32	grpid;
81 	u32	perm;
82 	u64	addr;
83 	u64	private_data[2];
84 };
85 
86 /**
87  * struct iommu_fault - Generic fault data
88  * @type: fault type from &enum iommu_fault_type
89  * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
90  */
91 struct iommu_fault {
92 	u32 type;
93 	struct iommu_fault_page_request prm;
94 };
95 
96 /**
97  * enum iommu_page_response_code - Return status of fault handlers
98  * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
99  *	populated, retry the access. This is "Success" in PCI PRI.
100  * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
101  *	this device if possible. This is "Response Failure" in PCI PRI.
102  * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
103  *	access. This is "Invalid Request" in PCI PRI.
104  */
105 enum iommu_page_response_code {
106 	IOMMU_PAGE_RESP_SUCCESS = 0,
107 	IOMMU_PAGE_RESP_INVALID,
108 	IOMMU_PAGE_RESP_FAILURE,
109 };
110 
111 /**
112  * struct iommu_page_response - Generic page response information
113  * @pasid: Process Address Space ID
114  * @grpid: Page Request Group Index
115  * @code: response code from &enum iommu_page_response_code
116  */
117 struct iommu_page_response {
118 	u32	pasid;
119 	u32	grpid;
120 	u32	code;
121 };
122 
123 struct iopf_fault {
124 	struct iommu_fault fault;
125 	/* node for pending lists */
126 	struct list_head list;
127 };
128 
129 struct iopf_group {
130 	struct iopf_fault last_fault;
131 	struct list_head faults;
132 	size_t fault_count;
133 	/* list node for iommu_fault_param::faults */
134 	struct list_head pending_node;
135 	struct work_struct work;
136 	struct iommu_attach_handle *attach_handle;
137 	/* The device's fault data parameter. */
138 	struct iommu_fault_param *fault_param;
139 	/* Used by handler provider to hook the group on its own lists. */
140 	struct list_head node;
141 	u32 cookie;
142 };
143 
144 /**
145  * struct iopf_queue - IO Page Fault queue
146  * @wq: the fault workqueue
147  * @devices: devices attached to this queue
148  * @lock: protects the device list
149  */
150 struct iopf_queue {
151 	struct workqueue_struct *wq;
152 	struct list_head devices;
153 	struct mutex lock;
154 };
155 
156 /* iommu fault flags */
157 #define IOMMU_FAULT_READ	0x0
158 #define IOMMU_FAULT_WRITE	0x1
159 
160 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
161 			struct device *, unsigned long, int, void *);
162 
163 struct iommu_domain_geometry {
164 	dma_addr_t aperture_start; /* First address that can be mapped    */
165 	dma_addr_t aperture_end;   /* Last address that can be mapped     */
166 	bool force_aperture;       /* DMA only allowed in mappable range? */
167 };
168 
169 enum iommu_domain_cookie_type {
170 	IOMMU_COOKIE_NONE,
171 	IOMMU_COOKIE_DMA_IOVA,
172 	IOMMU_COOKIE_DMA_MSI,
173 	IOMMU_COOKIE_FAULT_HANDLER,
174 	IOMMU_COOKIE_SVA,
175 	IOMMU_COOKIE_IOMMUFD,
176 };
177 
178 /* Domain feature flags */
179 #define __IOMMU_DOMAIN_PAGING	(1U << 0)  /* Support for iommu_map/unmap */
180 #define __IOMMU_DOMAIN_DMA_API	(1U << 1)  /* Domain for use in DMA-API
181 					      implementation              */
182 #define __IOMMU_DOMAIN_PT	(1U << 2)  /* Domain is identity mapped   */
183 #define __IOMMU_DOMAIN_DMA_FQ	(1U << 3)  /* DMA-API uses flush queue    */
184 
185 #define __IOMMU_DOMAIN_SVA	(1U << 4)  /* Shared process address space */
186 #define __IOMMU_DOMAIN_PLATFORM	(1U << 5)
187 
188 #define __IOMMU_DOMAIN_NESTED	(1U << 6)  /* User-managed address space nested
189 					      on a stage-2 translation        */
190 
191 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
192 /*
193  * This are the possible domain-types
194  *
195  *	IOMMU_DOMAIN_BLOCKED	- All DMA is blocked, can be used to isolate
196  *				  devices
197  *	IOMMU_DOMAIN_IDENTITY	- DMA addresses are system physical addresses
198  *	IOMMU_DOMAIN_UNMANAGED	- DMA mappings managed by IOMMU-API user, used
199  *				  for VMs
200  *	IOMMU_DOMAIN_DMA	- Internally used for DMA-API implementations.
201  *				  This flag allows IOMMU drivers to implement
202  *				  certain optimizations for these domains
203  *	IOMMU_DOMAIN_DMA_FQ	- As above, but definitely using batched TLB
204  *				  invalidation.
205  *	IOMMU_DOMAIN_SVA	- DMA addresses are shared process addresses
206  *				  represented by mm_struct's.
207  *	IOMMU_DOMAIN_PLATFORM	- Legacy domain for drivers that do their own
208  *				  dma_api stuff. Do not use in new drivers.
209  */
210 #define IOMMU_DOMAIN_BLOCKED	(0U)
211 #define IOMMU_DOMAIN_IDENTITY	(__IOMMU_DOMAIN_PT)
212 #define IOMMU_DOMAIN_UNMANAGED	(__IOMMU_DOMAIN_PAGING)
213 #define IOMMU_DOMAIN_DMA	(__IOMMU_DOMAIN_PAGING |	\
214 				 __IOMMU_DOMAIN_DMA_API)
215 #define IOMMU_DOMAIN_DMA_FQ	(__IOMMU_DOMAIN_PAGING |	\
216 				 __IOMMU_DOMAIN_DMA_API |	\
217 				 __IOMMU_DOMAIN_DMA_FQ)
218 #define IOMMU_DOMAIN_SVA	(__IOMMU_DOMAIN_SVA)
219 #define IOMMU_DOMAIN_PLATFORM	(__IOMMU_DOMAIN_PLATFORM)
220 #define IOMMU_DOMAIN_NESTED	(__IOMMU_DOMAIN_NESTED)
221 
222 struct iommu_domain {
223 	unsigned type;
224 	enum iommu_domain_cookie_type cookie_type;
225 	const struct iommu_domain_ops *ops;
226 	const struct iommu_dirty_ops *dirty_ops;
227 	const struct iommu_ops *owner; /* Whose domain_alloc we came from */
228 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
229 	struct iommu_domain_geometry geometry;
230 	int (*iopf_handler)(struct iopf_group *group);
231 
232 	union { /* cookie */
233 		struct iommu_dma_cookie *iova_cookie;
234 		struct iommu_dma_msi_cookie *msi_cookie;
235 		struct iommufd_hw_pagetable *iommufd_hwpt;
236 		struct {
237 			iommu_fault_handler_t handler;
238 			void *handler_token;
239 		};
240 		struct {	/* IOMMU_DOMAIN_SVA */
241 			struct mm_struct *mm;
242 			int users;
243 			/*
244 			 * Next iommu_domain in mm->iommu_mm->sva-domains list
245 			 * protected by iommu_sva_lock.
246 			 */
247 			struct list_head next;
248 		};
249 	};
250 };
251 
iommu_is_dma_domain(struct iommu_domain * domain)252 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
253 {
254 	return domain->type & __IOMMU_DOMAIN_DMA_API;
255 }
256 
257 enum iommu_cap {
258 	IOMMU_CAP_CACHE_COHERENCY,	/* IOMMU_CACHE is supported */
259 	IOMMU_CAP_NOEXEC,		/* IOMMU_NOEXEC flag */
260 	IOMMU_CAP_PRE_BOOT_PROTECTION,	/* Firmware says it used the IOMMU for
261 					   DMA protection and we should too */
262 	/*
263 	 * Per-device flag indicating if enforce_cache_coherency() will work on
264 	 * this device.
265 	 */
266 	IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
267 	/*
268 	 * IOMMU driver does not issue TLB maintenance during .unmap, so can
269 	 * usefully support the non-strict DMA flush queue.
270 	 */
271 	IOMMU_CAP_DEFERRED_FLUSH,
272 	IOMMU_CAP_DIRTY_TRACKING,	/* IOMMU supports dirty tracking */
273 };
274 
275 /* These are the possible reserved region types */
276 enum iommu_resv_type {
277 	/* Memory regions which must be mapped 1:1 at all times */
278 	IOMMU_RESV_DIRECT,
279 	/*
280 	 * Memory regions which are advertised to be 1:1 but are
281 	 * commonly considered relaxable in some conditions,
282 	 * for instance in device assignment use case (USB, Graphics)
283 	 */
284 	IOMMU_RESV_DIRECT_RELAXABLE,
285 	/* Arbitrary "never map this or give it to a device" address ranges */
286 	IOMMU_RESV_RESERVED,
287 	/* Hardware MSI region (untranslated) */
288 	IOMMU_RESV_MSI,
289 	/* Software-managed MSI translation window */
290 	IOMMU_RESV_SW_MSI,
291 };
292 
293 /**
294  * struct iommu_resv_region - descriptor for a reserved memory region
295  * @list: Linked list pointers
296  * @start: System physical start address of the region
297  * @length: Length of the region in bytes
298  * @prot: IOMMU Protection flags (READ/WRITE/...)
299  * @type: Type of the reserved region
300  * @free: Callback to free associated memory allocations
301  */
302 struct iommu_resv_region {
303 	struct list_head	list;
304 	phys_addr_t		start;
305 	size_t			length;
306 	int			prot;
307 	enum iommu_resv_type	type;
308 	void (*free)(struct device *dev, struct iommu_resv_region *region);
309 };
310 
311 struct iommu_iort_rmr_data {
312 	struct iommu_resv_region rr;
313 
314 	/* Stream IDs associated with IORT RMR entry */
315 	const u32 *sids;
316 	u32 num_sids;
317 };
318 
319 #define IOMMU_NO_PASID	(0U) /* Reserved for DMA w/o PASID */
320 #define IOMMU_FIRST_GLOBAL_PASID	(1U) /*starting range for allocation */
321 #define IOMMU_PASID_INVALID	(-1U)
322 typedef unsigned int ioasid_t;
323 
324 /* Read but do not clear any dirty bits */
325 #define IOMMU_DIRTY_NO_CLEAR (1 << 0)
326 
327 /*
328  * Pages allocated through iommu_alloc_pages_node_sz() can be placed on this
329  * list using iommu_pages_list_add(). Note: ONLY pages from
330  * iommu_alloc_pages_node_sz() can be used this way!
331  */
332 struct iommu_pages_list {
333 	struct list_head pages;
334 };
335 
336 #define IOMMU_PAGES_LIST_INIT(name) \
337 	((struct iommu_pages_list){ .pages = LIST_HEAD_INIT(name.pages) })
338 
339 #ifdef CONFIG_IOMMU_API
340 
341 /**
342  * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
343  *
344  * @start: IOVA representing the start of the range to be flushed
345  * @end: IOVA representing the end of the range to be flushed (inclusive)
346  * @pgsize: The interval at which to perform the flush
347  * @freelist: Removed pages to free after sync
348  * @queued: Indicates that the flush will be queued
349  *
350  * This structure is intended to be updated by multiple calls to the
351  * ->unmap() function in struct iommu_ops before eventually being passed
352  * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
353  * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
354  * them. @queued is set to indicate when ->iotlb_flush_all() will be called
355  * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
356  */
357 struct iommu_iotlb_gather {
358 	unsigned long		start;
359 	unsigned long		end;
360 	size_t			pgsize;
361 	struct iommu_pages_list	freelist;
362 	bool			queued;
363 };
364 
365 /**
366  * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
367  * @bitmap: IOVA bitmap
368  * @gather: Range information for a pending IOTLB flush
369  */
370 struct iommu_dirty_bitmap {
371 	struct iova_bitmap *bitmap;
372 	struct iommu_iotlb_gather *gather;
373 };
374 
375 /**
376  * struct iommu_dirty_ops - domain specific dirty tracking operations
377  * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
378  * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
379  *                        into a bitmap, with a bit represented as a page.
380  *                        Reads the dirty PTE bits and clears it from IO
381  *                        pagetables.
382  */
383 struct iommu_dirty_ops {
384 	int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
385 	int (*read_and_clear_dirty)(struct iommu_domain *domain,
386 				    unsigned long iova, size_t size,
387 				    unsigned long flags,
388 				    struct iommu_dirty_bitmap *dirty);
389 };
390 
391 /**
392  * struct iommu_user_data - iommu driver specific user space data info
393  * @type: The data type of the user buffer
394  * @uptr: Pointer to the user buffer for copy_from_user()
395  * @len: The length of the user buffer in bytes
396  *
397  * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
398  * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
399  */
400 struct iommu_user_data {
401 	unsigned int type;
402 	void __user *uptr;
403 	size_t len;
404 };
405 
406 /**
407  * struct iommu_user_data_array - iommu driver specific user space data array
408  * @type: The data type of all the entries in the user buffer array
409  * @uptr: Pointer to the user buffer array
410  * @entry_len: The fixed-width length of an entry in the array, in bytes
411  * @entry_num: The number of total entries in the array
412  *
413  * The user buffer includes an array of requests with format defined in
414  * include/uapi/linux/iommufd.h
415  */
416 struct iommu_user_data_array {
417 	unsigned int type;
418 	void __user *uptr;
419 	size_t entry_len;
420 	u32 entry_num;
421 };
422 
423 /**
424  * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
425  * @dst_data: Pointer to an iommu driver specific user data that is defined in
426  *            include/uapi/linux/iommufd.h
427  * @src_data: Pointer to a struct iommu_user_data for user space data info
428  * @data_type: The data type of the @dst_data. Must match with @src_data.type
429  * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
430  * @min_len: Initial length of user data structure for backward compatibility.
431  *           This should be offsetofend using the last member in the user data
432  *           struct that was initially added to include/uapi/linux/iommufd.h
433  */
__iommu_copy_struct_from_user(void * dst_data,const struct iommu_user_data * src_data,unsigned int data_type,size_t data_len,size_t min_len)434 static inline int __iommu_copy_struct_from_user(
435 	void *dst_data, const struct iommu_user_data *src_data,
436 	unsigned int data_type, size_t data_len, size_t min_len)
437 {
438 	if (WARN_ON(!dst_data || !src_data))
439 		return -EINVAL;
440 	if (src_data->type != data_type)
441 		return -EINVAL;
442 	if (src_data->len < min_len || data_len < src_data->len)
443 		return -EINVAL;
444 	return copy_struct_from_user(dst_data, data_len, src_data->uptr,
445 				     src_data->len);
446 }
447 
448 /**
449  * iommu_copy_struct_from_user - Copy iommu driver specific user space data
450  * @kdst: Pointer to an iommu driver specific user data that is defined in
451  *        include/uapi/linux/iommufd.h
452  * @user_data: Pointer to a struct iommu_user_data for user space data info
453  * @data_type: The data type of the @kdst. Must match with @user_data->type
454  * @min_last: The last member of the data structure @kdst points in the initial
455  *            version.
456  * Return 0 for success, otherwise -error.
457  */
458 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
459 	__iommu_copy_struct_from_user(kdst, user_data, data_type,         \
460 				      sizeof(*kdst),                      \
461 				      offsetofend(typeof(*kdst), min_last))
462 
463 /**
464  * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
465  *                                       data from an iommu_user_data_array
466  * @dst_data: Pointer to an iommu driver specific user data that is defined in
467  *            include/uapi/linux/iommufd.h
468  * @src_array: Pointer to a struct iommu_user_data_array for a user space array
469  * @data_type: The data type of the @dst_data. Must match with @src_array.type
470  * @index: Index to the location in the array to copy user data from
471  * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
472  * @min_len: Initial length of user data structure for backward compatibility.
473  *           This should be offsetofend using the last member in the user data
474  *           struct that was initially added to include/uapi/linux/iommufd.h
475  */
__iommu_copy_struct_from_user_array(void * dst_data,const struct iommu_user_data_array * src_array,unsigned int data_type,unsigned int index,size_t data_len,size_t min_len)476 static inline int __iommu_copy_struct_from_user_array(
477 	void *dst_data, const struct iommu_user_data_array *src_array,
478 	unsigned int data_type, unsigned int index, size_t data_len,
479 	size_t min_len)
480 {
481 	struct iommu_user_data src_data;
482 
483 	if (WARN_ON(!src_array || index >= src_array->entry_num))
484 		return -EINVAL;
485 	if (!src_array->entry_num)
486 		return -EINVAL;
487 	src_data.uptr = src_array->uptr + src_array->entry_len * index;
488 	src_data.len = src_array->entry_len;
489 	src_data.type = src_array->type;
490 
491 	return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
492 					     data_len, min_len);
493 }
494 
495 /**
496  * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
497  *                                     data from an iommu_user_data_array
498  * @kdst: Pointer to an iommu driver specific user data that is defined in
499  *        include/uapi/linux/iommufd.h
500  * @user_array: Pointer to a struct iommu_user_data_array for a user space
501  *              array
502  * @data_type: The data type of the @kdst. Must match with @user_array->type
503  * @index: Index to the location in the array to copy user data from
504  * @min_last: The last member of the data structure @kdst points in the
505  *            initial version.
506  *
507  * Copy a single entry from a user array. Return 0 for success, otherwise
508  * -error.
509  */
510 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
511 					  min_last)                           \
512 	__iommu_copy_struct_from_user_array(                                  \
513 		kdst, user_array, data_type, index, sizeof(*(kdst)),          \
514 		offsetofend(typeof(*(kdst)), min_last))
515 
516 /**
517  * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
518  *         space data from an iommu_user_data_array
519  * @kdst: Pointer to an iommu driver specific user data that is defined in
520  *        include/uapi/linux/iommufd.h
521  * @kdst_entry_size: sizeof(*kdst)
522  * @user_array: Pointer to a struct iommu_user_data_array for a user space
523  *              array
524  * @data_type: The data type of the @kdst. Must match with @user_array->type
525  *
526  * Copy the entire user array. kdst must have room for kdst_entry_size *
527  * user_array->entry_num bytes. Return 0 for success, otherwise -error.
528  */
529 static inline int
iommu_copy_struct_from_full_user_array(void * kdst,size_t kdst_entry_size,struct iommu_user_data_array * user_array,unsigned int data_type)530 iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
531 				       struct iommu_user_data_array *user_array,
532 				       unsigned int data_type)
533 {
534 	unsigned int i;
535 	int ret;
536 
537 	if (user_array->type != data_type)
538 		return -EINVAL;
539 	if (!user_array->entry_num)
540 		return -EINVAL;
541 	if (likely(user_array->entry_len == kdst_entry_size)) {
542 		if (copy_from_user(kdst, user_array->uptr,
543 				   user_array->entry_num *
544 					   user_array->entry_len))
545 			return -EFAULT;
546 	}
547 
548 	/* Copy item by item */
549 	for (i = 0; i != user_array->entry_num; i++) {
550 		ret = copy_struct_from_user(
551 			kdst + kdst_entry_size * i, kdst_entry_size,
552 			user_array->uptr + user_array->entry_len * i,
553 			user_array->entry_len);
554 		if (ret)
555 			return ret;
556 	}
557 	return 0;
558 }
559 
560 /**
561  * struct iommu_ops - iommu ops and capabilities
562  * @capable: check capability
563  * @hw_info: report iommu hardware information. The data buffer returned by this
564  *           op is allocated in the iommu driver and freed by the caller after
565  *           use. The information type is one of enum iommu_hw_info_type defined
566  *           in include/uapi/linux/iommufd.h.
567  * @domain_alloc: Do not use in new drivers
568  * @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to
569  *                         use identity_domain instead. This should only be used
570  *                         if dynamic logic is necessary.
571  * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
572  *                     input parameters as defined in
573  *                     include/uapi/linux/iommufd.h. The @user_data can be
574  *                     optionally provided, the new domain must support
575  *                     __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
576  *                     returned.
577  * @domain_alloc_paging: Allocate an iommu_domain that can be used for
578  *                       UNMANAGED, DMA, and DMA_FQ domain types. This is the
579  *                       same as invoking domain_alloc_paging_flags() with
580  *                       @flags=0, @user_data=NULL. A driver should implement
581  *                       only one of the two ops.
582  * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
583  * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
584  * @probe_device: Add device to iommu driver handling
585  * @release_device: Remove device from iommu driver handling
586  * @probe_finalize: Do final setup work after the device is added to an IOMMU
587  *                  group and attached to the groups domain
588  * @device_group: find iommu group for a particular device
589  * @get_resv_regions: Request list of reserved regions for a device
590  * @of_xlate: add OF master IDs to iommu grouping
591  * @is_attach_deferred: Check if domain attach should be deferred from iommu
592  *                      driver init to device driver init (default no)
593  * @page_response: handle page request response
594  * @def_domain_type: device default domain type, return value:
595  *		- IOMMU_DOMAIN_IDENTITY: must use an identity domain
596  *		- IOMMU_DOMAIN_DMA: must use a dma domain
597  *		- 0: use the default setting
598  * @default_domain_ops: the default ops for domains
599  * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind
600  *                the @dev, as the set of virtualization resources shared/passed
601  *                to user space IOMMU instance. And associate it with a nesting
602  *                @parent_domain. The @viommu_type must be defined in the header
603  *                include/uapi/linux/iommufd.h
604  *                It is required to call iommufd_viommu_alloc() helper for
605  *                a bundled allocation of the core and the driver structures,
606  *                using the given @ictx pointer.
607  * @pgsize_bitmap: bitmap of all possible supported page sizes
608  * @owner: Driver module providing these ops
609  * @identity_domain: An always available, always attachable identity
610  *                   translation.
611  * @blocked_domain: An always available, always attachable blocking
612  *                  translation.
613  * @default_domain: If not NULL this will always be set as the default domain.
614  *                  This should be an IDENTITY/BLOCKED/PLATFORM domain.
615  *                  Do not use in new drivers.
616  * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
617  *                    no user domain for each PASID and the I/O page faults are
618  *                    forwarded through the user domain attached to the device
619  *                    RID.
620  */
621 struct iommu_ops {
622 	bool (*capable)(struct device *dev, enum iommu_cap);
623 	void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
624 
625 	/* Domain allocation and freeing by the iommu driver */
626 #if IS_ENABLED(CONFIG_FSL_PAMU)
627 	struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
628 #endif
629 	struct iommu_domain *(*domain_alloc_identity)(struct device *dev);
630 	struct iommu_domain *(*domain_alloc_paging_flags)(
631 		struct device *dev, u32 flags,
632 		const struct iommu_user_data *user_data);
633 	struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
634 	struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
635 						 struct mm_struct *mm);
636 	struct iommu_domain *(*domain_alloc_nested)(
637 		struct device *dev, struct iommu_domain *parent, u32 flags,
638 		const struct iommu_user_data *user_data);
639 
640 	struct iommu_device *(*probe_device)(struct device *dev);
641 	void (*release_device)(struct device *dev);
642 	void (*probe_finalize)(struct device *dev);
643 	struct iommu_group *(*device_group)(struct device *dev);
644 
645 	/* Request/Free a list of reserved regions for a device */
646 	void (*get_resv_regions)(struct device *dev, struct list_head *list);
647 
648 	int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
649 	bool (*is_attach_deferred)(struct device *dev);
650 
651 	/* Per device IOMMU features */
652 	void (*page_response)(struct device *dev, struct iopf_fault *evt,
653 			      struct iommu_page_response *msg);
654 
655 	int (*def_domain_type)(struct device *dev);
656 
657 	struct iommufd_viommu *(*viommu_alloc)(
658 		struct device *dev, struct iommu_domain *parent_domain,
659 		struct iommufd_ctx *ictx, unsigned int viommu_type);
660 
661 	const struct iommu_domain_ops *default_domain_ops;
662 	unsigned long pgsize_bitmap;
663 	struct module *owner;
664 	struct iommu_domain *identity_domain;
665 	struct iommu_domain *blocked_domain;
666 	struct iommu_domain *release_domain;
667 	struct iommu_domain *default_domain;
668 	u8 user_pasid_table:1;
669 };
670 
671 /**
672  * struct iommu_domain_ops - domain specific operations
673  * @attach_dev: attach an iommu domain to a device
674  *  Return:
675  * * 0		- success
676  * * EINVAL	- can indicate that device and domain are incompatible due to
677  *		  some previous configuration of the domain, in which case the
678  *		  driver shouldn't log an error, since it is legitimate for a
679  *		  caller to test reuse of existing domains. Otherwise, it may
680  *		  still represent some other fundamental problem
681  * * ENOMEM	- out of memory
682  * * ENOSPC	- non-ENOMEM type of resource allocation failures
683  * * EBUSY	- device is attached to a domain and cannot be changed
684  * * ENODEV	- device specific errors, not able to be attached
685  * * <others>	- treated as ENODEV by the caller. Use is discouraged
686  * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of
687  *                 the device should be left in the old config in error case.
688  * @map_pages: map a physically contiguous set of pages of the same size to
689  *             an iommu domain.
690  * @unmap_pages: unmap a number of pages of the same size from an iommu domain
691  * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
692  * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
693  * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
694  *            queue
695  * @cache_invalidate_user: Flush hardware cache for user space IO page table.
696  *                         The @domain must be IOMMU_DOMAIN_NESTED. The @array
697  *                         passes in the cache invalidation requests, in form
698  *                         of a driver data structure. The driver must update
699  *                         array->entry_num to report the number of handled
700  *                         invalidation requests. The driver data structure
701  *                         must be defined in include/uapi/linux/iommufd.h
702  * @iova_to_phys: translate iova to physical address
703  * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
704  *                           including no-snoop TLPs on PCIe or other platform
705  *                           specific mechanisms.
706  * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
707  * @free: Release the domain after use.
708  */
709 struct iommu_domain_ops {
710 	int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
711 	int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
712 			     ioasid_t pasid, struct iommu_domain *old);
713 
714 	int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
715 			 phys_addr_t paddr, size_t pgsize, size_t pgcount,
716 			 int prot, gfp_t gfp, size_t *mapped);
717 	size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
718 			      size_t pgsize, size_t pgcount,
719 			      struct iommu_iotlb_gather *iotlb_gather);
720 
721 	void (*flush_iotlb_all)(struct iommu_domain *domain);
722 	int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
723 			      size_t size);
724 	void (*iotlb_sync)(struct iommu_domain *domain,
725 			   struct iommu_iotlb_gather *iotlb_gather);
726 	int (*cache_invalidate_user)(struct iommu_domain *domain,
727 				     struct iommu_user_data_array *array);
728 
729 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
730 				    dma_addr_t iova);
731 
732 	bool (*enforce_cache_coherency)(struct iommu_domain *domain);
733 	int (*set_pgtable_quirks)(struct iommu_domain *domain,
734 				  unsigned long quirks);
735 
736 	void (*free)(struct iommu_domain *domain);
737 };
738 
739 /**
740  * struct iommu_device - IOMMU core representation of one IOMMU hardware
741  *			 instance
742  * @list: Used by the iommu-core to keep a list of registered iommus
743  * @ops: iommu-ops for talking to this iommu
744  * @dev: struct device for sysfs handling
745  * @singleton_group: Used internally for drivers that have only one group
746  * @max_pasids: number of supported PASIDs
747  * @ready: set once iommu_device_register() has completed successfully
748  */
749 struct iommu_device {
750 	struct list_head list;
751 	const struct iommu_ops *ops;
752 	struct fwnode_handle *fwnode;
753 	struct device *dev;
754 	struct iommu_group *singleton_group;
755 	u32 max_pasids;
756 	bool ready;
757 };
758 
759 /**
760  * struct iommu_fault_param - per-device IOMMU fault data
761  * @lock: protect pending faults list
762  * @users: user counter to manage the lifetime of the data
763  * @rcu: rcu head for kfree_rcu()
764  * @dev: the device that owns this param
765  * @queue: IOPF queue
766  * @queue_list: index into queue->devices
767  * @partial: faults that are part of a Page Request Group for which the last
768  *           request hasn't been submitted yet.
769  * @faults: holds the pending faults which need response
770  */
771 struct iommu_fault_param {
772 	struct mutex lock;
773 	refcount_t users;
774 	struct rcu_head rcu;
775 
776 	struct device *dev;
777 	struct iopf_queue *queue;
778 	struct list_head queue_list;
779 
780 	struct list_head partial;
781 	struct list_head faults;
782 };
783 
784 /**
785  * struct dev_iommu - Collection of per-device IOMMU data
786  *
787  * @fault_param: IOMMU detected device fault reporting data
788  * @fwspec:	 IOMMU fwspec data
789  * @iommu_dev:	 IOMMU device this device is linked to
790  * @priv:	 IOMMU Driver private data
791  * @max_pasids:  number of PASIDs this device can consume
792  * @attach_deferred: the dma domain attachment is deferred
793  * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
794  * @require_direct: device requires IOMMU_RESV_DIRECT regions
795  * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
796  *
797  * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
798  *	struct iommu_group	*iommu_group;
799  */
800 struct dev_iommu {
801 	struct mutex lock;
802 	struct iommu_fault_param __rcu	*fault_param;
803 	struct iommu_fwspec		*fwspec;
804 	struct iommu_device		*iommu_dev;
805 	void				*priv;
806 	u32				max_pasids;
807 	u32				attach_deferred:1;
808 	u32				pci_32bit_workaround:1;
809 	u32				require_direct:1;
810 	u32				shadow_on_flush:1;
811 };
812 
813 int iommu_device_register(struct iommu_device *iommu,
814 			  const struct iommu_ops *ops,
815 			  struct device *hwdev);
816 void iommu_device_unregister(struct iommu_device *iommu);
817 int  iommu_device_sysfs_add(struct iommu_device *iommu,
818 			    struct device *parent,
819 			    const struct attribute_group **groups,
820 			    const char *fmt, ...) __printf(4, 5);
821 void iommu_device_sysfs_remove(struct iommu_device *iommu);
822 int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
823 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
824 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
825 
dev_to_iommu_device(struct device * dev)826 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
827 {
828 	return (struct iommu_device *)dev_get_drvdata(dev);
829 }
830 
831 /**
832  * iommu_get_iommu_dev - Get iommu_device for a device
833  * @dev: an end-point device
834  *
835  * Note that this function must be called from the iommu_ops
836  * to retrieve the iommu_device for a device, which the core code
837  * guarentees it will not invoke the op without an attached iommu.
838  */
__iommu_get_iommu_dev(struct device * dev)839 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
840 {
841 	return dev->iommu->iommu_dev;
842 }
843 
844 #define iommu_get_iommu_dev(dev, type, member) \
845 	container_of(__iommu_get_iommu_dev(dev), type, member)
846 
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)847 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
848 {
849 	*gather = (struct iommu_iotlb_gather) {
850 		.start	= ULONG_MAX,
851 		.freelist = IOMMU_PAGES_LIST_INIT(gather->freelist),
852 	};
853 }
854 
855 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
856 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
857 struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
iommu_paging_domain_alloc(struct device * dev)858 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
859 {
860 	return iommu_paging_domain_alloc_flags(dev, 0);
861 }
862 extern void iommu_domain_free(struct iommu_domain *domain);
863 extern int iommu_attach_device(struct iommu_domain *domain,
864 			       struct device *dev);
865 extern void iommu_detach_device(struct iommu_domain *domain,
866 				struct device *dev);
867 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
868 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
869 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
870 		     phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
871 int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
872 		phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
873 int iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
874 		size_t size);
875 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
876 			  size_t size);
877 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
878 			       unsigned long iova, size_t size,
879 			       struct iommu_iotlb_gather *iotlb_gather);
880 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
881 			    struct scatterlist *sg, unsigned int nents,
882 			    int prot, gfp_t gfp);
883 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
884 extern void iommu_set_fault_handler(struct iommu_domain *domain,
885 			iommu_fault_handler_t handler, void *token);
886 
887 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
888 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
889 extern void iommu_set_default_passthrough(bool cmd_line);
890 extern void iommu_set_default_translated(bool cmd_line);
891 extern bool iommu_default_passthrough(void);
892 extern struct iommu_resv_region *
893 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
894 			enum iommu_resv_type type, gfp_t gfp);
895 extern int iommu_get_group_resv_regions(struct iommu_group *group,
896 					struct list_head *head);
897 
898 extern int iommu_attach_group(struct iommu_domain *domain,
899 			      struct iommu_group *group);
900 extern void iommu_detach_group(struct iommu_domain *domain,
901 			       struct iommu_group *group);
902 extern struct iommu_group *iommu_group_alloc(void);
903 extern void *iommu_group_get_iommudata(struct iommu_group *group);
904 extern void iommu_group_set_iommudata(struct iommu_group *group,
905 				      void *iommu_data,
906 				      void (*release)(void *iommu_data));
907 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
908 extern int iommu_group_add_device(struct iommu_group *group,
909 				  struct device *dev);
910 extern void iommu_group_remove_device(struct device *dev);
911 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
912 				    int (*fn)(struct device *, void *));
913 extern struct iommu_group *iommu_group_get(struct device *dev);
914 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
915 extern void iommu_group_put(struct iommu_group *group);
916 
917 extern int iommu_group_id(struct iommu_group *group);
918 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
919 
920 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
921 		unsigned long quirks);
922 
923 void iommu_set_dma_strict(void);
924 
925 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
926 			      unsigned long iova, int flags);
927 
iommu_flush_iotlb_all(struct iommu_domain * domain)928 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
929 {
930 	if (domain->ops->flush_iotlb_all)
931 		domain->ops->flush_iotlb_all(domain);
932 }
933 
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)934 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
935 				  struct iommu_iotlb_gather *iotlb_gather)
936 {
937 	if (domain->ops->iotlb_sync)
938 		domain->ops->iotlb_sync(domain, iotlb_gather);
939 
940 	iommu_iotlb_gather_init(iotlb_gather);
941 }
942 
943 /**
944  * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
945  *
946  * @gather: TLB gather data
947  * @iova: start of page to invalidate
948  * @size: size of page to invalidate
949  *
950  * Helper for IOMMU drivers to check whether a new range and the gathered range
951  * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
952  * than merging the two, which might lead to unnecessary invalidations.
953  */
954 static inline
iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)955 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
956 				    unsigned long iova, size_t size)
957 {
958 	unsigned long start = iova, end = start + size - 1;
959 
960 	return gather->end != 0 &&
961 		(end + 1 < gather->start || start > gather->end + 1);
962 }
963 
964 
965 /**
966  * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
967  * @gather: TLB gather data
968  * @iova: start of page to invalidate
969  * @size: size of page to invalidate
970  *
971  * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
972  * where only the address range matters, and simply minimising intermediate
973  * syncs is preferred.
974  */
iommu_iotlb_gather_add_range(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)975 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
976 						unsigned long iova, size_t size)
977 {
978 	unsigned long end = iova + size - 1;
979 
980 	if (gather->start > iova)
981 		gather->start = iova;
982 	if (gather->end < end)
983 		gather->end = end;
984 }
985 
986 /**
987  * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
988  * @domain: IOMMU domain to be invalidated
989  * @gather: TLB gather data
990  * @iova: start of page to invalidate
991  * @size: size of page to invalidate
992  *
993  * Helper for IOMMU drivers to build invalidation commands based on individual
994  * pages, or with page size/table level hints which cannot be gathered if they
995  * differ.
996  */
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)997 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
998 					       struct iommu_iotlb_gather *gather,
999 					       unsigned long iova, size_t size)
1000 {
1001 	/*
1002 	 * If the new page is disjoint from the current range or is mapped at
1003 	 * a different granularity, then sync the TLB so that the gather
1004 	 * structure can be rewritten.
1005 	 */
1006 	if ((gather->pgsize && gather->pgsize != size) ||
1007 	    iommu_iotlb_gather_is_disjoint(gather, iova, size))
1008 		iommu_iotlb_sync(domain, gather);
1009 
1010 	gather->pgsize = size;
1011 	iommu_iotlb_gather_add_range(gather, iova, size);
1012 }
1013 
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1014 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1015 {
1016 	return gather && gather->queued;
1017 }
1018 
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1019 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1020 					   struct iova_bitmap *bitmap,
1021 					   struct iommu_iotlb_gather *gather)
1022 {
1023 	if (gather)
1024 		iommu_iotlb_gather_init(gather);
1025 
1026 	dirty->bitmap = bitmap;
1027 	dirty->gather = gather;
1028 }
1029 
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1030 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1031 					     unsigned long iova,
1032 					     unsigned long length)
1033 {
1034 	if (dirty->bitmap)
1035 		iova_bitmap_set(dirty->bitmap, iova, length);
1036 
1037 	if (dirty->gather)
1038 		iommu_iotlb_gather_add_range(dirty->gather, iova, length);
1039 }
1040 
1041 /* PCI device grouping function */
1042 extern struct iommu_group *pci_device_group(struct device *dev);
1043 /* Generic device grouping function */
1044 extern struct iommu_group *generic_device_group(struct device *dev);
1045 /* FSL-MC device grouping function */
1046 struct iommu_group *fsl_mc_device_group(struct device *dev);
1047 extern struct iommu_group *generic_single_device_group(struct device *dev);
1048 
1049 /**
1050  * struct iommu_fwspec - per-device IOMMU instance data
1051  * @iommu_fwnode: firmware handle for this device's IOMMU
1052  * @flags: IOMMU_FWSPEC_* flags
1053  * @num_ids: number of associated device IDs
1054  * @ids: IDs which this device may present to the IOMMU
1055  *
1056  * Note that the IDs (and any other information, really) stored in this structure should be
1057  * considered private to the IOMMU device driver and are not to be used directly by IOMMU
1058  * consumers.
1059  */
1060 struct iommu_fwspec {
1061 	struct fwnode_handle	*iommu_fwnode;
1062 	u32			flags;
1063 	unsigned int		num_ids;
1064 	u32			ids[];
1065 };
1066 
1067 /* ATS is supported */
1068 #define IOMMU_FWSPEC_PCI_RC_ATS			(1 << 0)
1069 /* CANWBS is supported */
1070 #define IOMMU_FWSPEC_PCI_RC_CANWBS		(1 << 1)
1071 
1072 /*
1073  * An iommu attach handle represents a relationship between an iommu domain
1074  * and a PASID or RID of a device. It is allocated and managed by the component
1075  * that manages the domain and is stored in the iommu group during the time the
1076  * domain is attached.
1077  */
1078 struct iommu_attach_handle {
1079 	struct iommu_domain		*domain;
1080 };
1081 
1082 /**
1083  * struct iommu_sva - handle to a device-mm bond
1084  */
1085 struct iommu_sva {
1086 	struct iommu_attach_handle	handle;
1087 	struct device			*dev;
1088 	refcount_t			users;
1089 };
1090 
1091 struct iommu_mm_data {
1092 	u32			pasid;
1093 	struct list_head	sva_domains;
1094 };
1095 
1096 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
1097 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
1098 
dev_iommu_fwspec_get(struct device * dev)1099 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1100 {
1101 	if (dev->iommu)
1102 		return dev->iommu->fwspec;
1103 	else
1104 		return NULL;
1105 }
1106 
dev_iommu_fwspec_set(struct device * dev,struct iommu_fwspec * fwspec)1107 static inline void dev_iommu_fwspec_set(struct device *dev,
1108 					struct iommu_fwspec *fwspec)
1109 {
1110 	dev->iommu->fwspec = fwspec;
1111 }
1112 
dev_iommu_priv_get(struct device * dev)1113 static inline void *dev_iommu_priv_get(struct device *dev)
1114 {
1115 	if (dev->iommu)
1116 		return dev->iommu->priv;
1117 	else
1118 		return NULL;
1119 }
1120 
1121 void dev_iommu_priv_set(struct device *dev, void *priv);
1122 
1123 extern struct mutex iommu_probe_device_lock;
1124 int iommu_probe_device(struct device *dev);
1125 
1126 int iommu_device_use_default_domain(struct device *dev);
1127 void iommu_device_unuse_default_domain(struct device *dev);
1128 
1129 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
1130 void iommu_group_release_dma_owner(struct iommu_group *group);
1131 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
1132 
1133 int iommu_device_claim_dma_owner(struct device *dev, void *owner);
1134 void iommu_device_release_dma_owner(struct device *dev);
1135 
1136 int iommu_attach_device_pasid(struct iommu_domain *domain,
1137 			      struct device *dev, ioasid_t pasid,
1138 			      struct iommu_attach_handle *handle);
1139 void iommu_detach_device_pasid(struct iommu_domain *domain,
1140 			       struct device *dev, ioasid_t pasid);
1141 ioasid_t iommu_alloc_global_pasid(struct device *dev);
1142 void iommu_free_global_pasid(ioasid_t pasid);
1143 #else /* CONFIG_IOMMU_API */
1144 
1145 struct iommu_ops {};
1146 struct iommu_group {};
1147 struct iommu_fwspec {};
1148 struct iommu_device {};
1149 struct iommu_fault_param {};
1150 struct iommu_iotlb_gather {};
1151 struct iommu_dirty_bitmap {};
1152 struct iommu_dirty_ops {};
1153 
device_iommu_capable(struct device * dev,enum iommu_cap cap)1154 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1155 {
1156 	return false;
1157 }
1158 
iommu_paging_domain_alloc_flags(struct device * dev,unsigned int flags)1159 static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
1160 						     unsigned int flags)
1161 {
1162 	return ERR_PTR(-ENODEV);
1163 }
1164 
iommu_paging_domain_alloc(struct device * dev)1165 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
1166 {
1167 	return ERR_PTR(-ENODEV);
1168 }
1169 
iommu_domain_free(struct iommu_domain * domain)1170 static inline void iommu_domain_free(struct iommu_domain *domain)
1171 {
1172 }
1173 
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1174 static inline int iommu_attach_device(struct iommu_domain *domain,
1175 				      struct device *dev)
1176 {
1177 	return -ENODEV;
1178 }
1179 
iommu_detach_device(struct iommu_domain * domain,struct device * dev)1180 static inline void iommu_detach_device(struct iommu_domain *domain,
1181 				       struct device *dev)
1182 {
1183 }
1184 
iommu_get_domain_for_dev(struct device * dev)1185 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1186 {
1187 	return NULL;
1188 }
1189 
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1190 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
1191 			    phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1192 {
1193 	return -ENODEV;
1194 }
1195 
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)1196 static inline size_t iommu_unmap(struct iommu_domain *domain,
1197 				 unsigned long iova, size_t size)
1198 {
1199 	return 0;
1200 }
1201 
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,int gfp_order,struct iommu_iotlb_gather * iotlb_gather)1202 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
1203 				      unsigned long iova, int gfp_order,
1204 				      struct iommu_iotlb_gather *iotlb_gather)
1205 {
1206 	return 0;
1207 }
1208 
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)1209 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
1210 				   unsigned long iova, struct scatterlist *sg,
1211 				   unsigned int nents, int prot, gfp_t gfp)
1212 {
1213 	return -ENODEV;
1214 }
1215 
iommu_flush_iotlb_all(struct iommu_domain * domain)1216 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
1217 {
1218 }
1219 
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)1220 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
1221 				  struct iommu_iotlb_gather *iotlb_gather)
1222 {
1223 }
1224 
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1225 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1226 {
1227 	return 0;
1228 }
1229 
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1230 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1231 				iommu_fault_handler_t handler, void *token)
1232 {
1233 }
1234 
iommu_get_resv_regions(struct device * dev,struct list_head * list)1235 static inline void iommu_get_resv_regions(struct device *dev,
1236 					struct list_head *list)
1237 {
1238 }
1239 
iommu_put_resv_regions(struct device * dev,struct list_head * list)1240 static inline void iommu_put_resv_regions(struct device *dev,
1241 					struct list_head *list)
1242 {
1243 }
1244 
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)1245 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1246 					       struct list_head *head)
1247 {
1248 	return -ENODEV;
1249 }
1250 
iommu_set_default_passthrough(bool cmd_line)1251 static inline void iommu_set_default_passthrough(bool cmd_line)
1252 {
1253 }
1254 
iommu_set_default_translated(bool cmd_line)1255 static inline void iommu_set_default_translated(bool cmd_line)
1256 {
1257 }
1258 
iommu_default_passthrough(void)1259 static inline bool iommu_default_passthrough(void)
1260 {
1261 	return true;
1262 }
1263 
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1264 static inline int iommu_attach_group(struct iommu_domain *domain,
1265 				     struct iommu_group *group)
1266 {
1267 	return -ENODEV;
1268 }
1269 
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1270 static inline void iommu_detach_group(struct iommu_domain *domain,
1271 				      struct iommu_group *group)
1272 {
1273 }
1274 
iommu_group_alloc(void)1275 static inline struct iommu_group *iommu_group_alloc(void)
1276 {
1277 	return ERR_PTR(-ENODEV);
1278 }
1279 
iommu_group_get_iommudata(struct iommu_group * group)1280 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
1281 {
1282 	return NULL;
1283 }
1284 
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))1285 static inline void iommu_group_set_iommudata(struct iommu_group *group,
1286 					     void *iommu_data,
1287 					     void (*release)(void *iommu_data))
1288 {
1289 }
1290 
iommu_group_set_name(struct iommu_group * group,const char * name)1291 static inline int iommu_group_set_name(struct iommu_group *group,
1292 				       const char *name)
1293 {
1294 	return -ENODEV;
1295 }
1296 
iommu_group_add_device(struct iommu_group * group,struct device * dev)1297 static inline int iommu_group_add_device(struct iommu_group *group,
1298 					 struct device *dev)
1299 {
1300 	return -ENODEV;
1301 }
1302 
iommu_group_remove_device(struct device * dev)1303 static inline void iommu_group_remove_device(struct device *dev)
1304 {
1305 }
1306 
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))1307 static inline int iommu_group_for_each_dev(struct iommu_group *group,
1308 					   void *data,
1309 					   int (*fn)(struct device *, void *))
1310 {
1311 	return -ENODEV;
1312 }
1313 
iommu_group_get(struct device * dev)1314 static inline struct iommu_group *iommu_group_get(struct device *dev)
1315 {
1316 	return NULL;
1317 }
1318 
iommu_group_put(struct iommu_group * group)1319 static inline void iommu_group_put(struct iommu_group *group)
1320 {
1321 }
1322 
iommu_group_id(struct iommu_group * group)1323 static inline int iommu_group_id(struct iommu_group *group)
1324 {
1325 	return -ENODEV;
1326 }
1327 
iommu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirks)1328 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
1329 		unsigned long quirks)
1330 {
1331 	return 0;
1332 }
1333 
iommu_device_register(struct iommu_device * iommu,const struct iommu_ops * ops,struct device * hwdev)1334 static inline int iommu_device_register(struct iommu_device *iommu,
1335 					const struct iommu_ops *ops,
1336 					struct device *hwdev)
1337 {
1338 	return -ENODEV;
1339 }
1340 
dev_to_iommu_device(struct device * dev)1341 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1342 {
1343 	return NULL;
1344 }
1345 
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)1346 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
1347 {
1348 }
1349 
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1350 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1351 					       struct iommu_iotlb_gather *gather,
1352 					       unsigned long iova, size_t size)
1353 {
1354 }
1355 
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1356 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1357 {
1358 	return false;
1359 }
1360 
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1361 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1362 					   struct iova_bitmap *bitmap,
1363 					   struct iommu_iotlb_gather *gather)
1364 {
1365 }
1366 
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1367 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1368 					     unsigned long iova,
1369 					     unsigned long length)
1370 {
1371 }
1372 
iommu_device_unregister(struct iommu_device * iommu)1373 static inline void iommu_device_unregister(struct iommu_device *iommu)
1374 {
1375 }
1376 
iommu_device_sysfs_add(struct iommu_device * iommu,struct device * parent,const struct attribute_group ** groups,const char * fmt,...)1377 static inline int  iommu_device_sysfs_add(struct iommu_device *iommu,
1378 					  struct device *parent,
1379 					  const struct attribute_group **groups,
1380 					  const char *fmt, ...)
1381 {
1382 	return -ENODEV;
1383 }
1384 
iommu_device_sysfs_remove(struct iommu_device * iommu)1385 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
1386 {
1387 }
1388 
iommu_device_link(struct device * dev,struct device * link)1389 static inline int iommu_device_link(struct device *dev, struct device *link)
1390 {
1391 	return -EINVAL;
1392 }
1393 
iommu_device_unlink(struct device * dev,struct device * link)1394 static inline void iommu_device_unlink(struct device *dev, struct device *link)
1395 {
1396 }
1397 
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode)1398 static inline int iommu_fwspec_init(struct device *dev,
1399 				    struct fwnode_handle *iommu_fwnode)
1400 {
1401 	return -ENODEV;
1402 }
1403 
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)1404 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1405 				       int num_ids)
1406 {
1407 	return -ENODEV;
1408 }
1409 
dev_iommu_fwspec_get(struct device * dev)1410 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1411 {
1412 	return NULL;
1413 }
1414 
iommu_device_use_default_domain(struct device * dev)1415 static inline int iommu_device_use_default_domain(struct device *dev)
1416 {
1417 	return 0;
1418 }
1419 
iommu_device_unuse_default_domain(struct device * dev)1420 static inline void iommu_device_unuse_default_domain(struct device *dev)
1421 {
1422 }
1423 
1424 static inline int
iommu_group_claim_dma_owner(struct iommu_group * group,void * owner)1425 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1426 {
1427 	return -ENODEV;
1428 }
1429 
iommu_group_release_dma_owner(struct iommu_group * group)1430 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1431 {
1432 }
1433 
iommu_group_dma_owner_claimed(struct iommu_group * group)1434 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1435 {
1436 	return false;
1437 }
1438 
iommu_device_release_dma_owner(struct device * dev)1439 static inline void iommu_device_release_dma_owner(struct device *dev)
1440 {
1441 }
1442 
iommu_device_claim_dma_owner(struct device * dev,void * owner)1443 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1444 {
1445 	return -ENODEV;
1446 }
1447 
iommu_attach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_attach_handle * handle)1448 static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1449 					    struct device *dev, ioasid_t pasid,
1450 					    struct iommu_attach_handle *handle)
1451 {
1452 	return -ENODEV;
1453 }
1454 
iommu_detach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)1455 static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1456 					     struct device *dev, ioasid_t pasid)
1457 {
1458 }
1459 
iommu_alloc_global_pasid(struct device * dev)1460 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
1461 {
1462 	return IOMMU_PASID_INVALID;
1463 }
1464 
iommu_free_global_pasid(ioasid_t pasid)1465 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
1466 #endif /* CONFIG_IOMMU_API */
1467 
1468 #ifdef CONFIG_IRQ_MSI_IOMMU
1469 #ifdef CONFIG_IOMMU_API
1470 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1471 #else
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1472 static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
1473 					phys_addr_t msi_addr)
1474 {
1475 	return 0;
1476 }
1477 #endif /* CONFIG_IOMMU_API */
1478 #endif /* CONFIG_IRQ_MSI_IOMMU */
1479 
1480 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
1481 void iommu_group_mutex_assert(struct device *dev);
1482 #else
iommu_group_mutex_assert(struct device * dev)1483 static inline void iommu_group_mutex_assert(struct device *dev)
1484 {
1485 }
1486 #endif
1487 
1488 /**
1489  * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1490  * @domain:	The IOMMU domain to perform the mapping
1491  * @iova:	The start address to map the buffer
1492  * @sgt:	The sg_table object describing the buffer
1493  * @prot:	IOMMU protection bits
1494  *
1495  * Creates a mapping at @iova for the buffer described by a scatterlist
1496  * stored in the given sg_table object in the provided IOMMU domain.
1497  */
iommu_map_sgtable(struct iommu_domain * domain,unsigned long iova,struct sg_table * sgt,int prot)1498 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
1499 			unsigned long iova, struct sg_table *sgt, int prot)
1500 {
1501 	return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1502 			    GFP_KERNEL);
1503 }
1504 
1505 #ifdef CONFIG_IOMMU_DEBUGFS
1506 extern	struct dentry *iommu_debugfs_dir;
1507 void iommu_debugfs_setup(void);
1508 #else
iommu_debugfs_setup(void)1509 static inline void iommu_debugfs_setup(void) {}
1510 #endif
1511 
1512 #ifdef CONFIG_IOMMU_DMA
1513 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1514 #else /* CONFIG_IOMMU_DMA */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)1515 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1516 {
1517 	return -ENODEV;
1518 }
1519 #endif	/* CONFIG_IOMMU_DMA */
1520 
1521 /*
1522  * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1523  * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1524  * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1525  */
1526 #define TEGRA_STREAM_ID_BYPASS 0x7f
1527 
tegra_dev_iommu_get_stream_id(struct device * dev,u32 * stream_id)1528 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1529 {
1530 #ifdef CONFIG_IOMMU_API
1531 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1532 
1533 	if (fwspec && fwspec->num_ids == 1) {
1534 		*stream_id = fwspec->ids[0] & 0xffff;
1535 		return true;
1536 	}
1537 #endif
1538 
1539 	return false;
1540 }
1541 
1542 #ifdef CONFIG_IOMMU_MM_DATA
mm_pasid_init(struct mm_struct * mm)1543 static inline void mm_pasid_init(struct mm_struct *mm)
1544 {
1545 	/*
1546 	 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
1547 	 * the new mm and the old one point to a same iommu_mm instance. When either
1548 	 * one of the two mms gets released, the iommu_mm instance is freed, leaving
1549 	 * the other mm running into a use-after-free/double-free problem. To avoid
1550 	 * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
1551 	 */
1552 	mm->iommu_mm = NULL;
1553 }
1554 
mm_valid_pasid(struct mm_struct * mm)1555 static inline bool mm_valid_pasid(struct mm_struct *mm)
1556 {
1557 	return READ_ONCE(mm->iommu_mm);
1558 }
1559 
mm_get_enqcmd_pasid(struct mm_struct * mm)1560 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1561 {
1562 	struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
1563 
1564 	if (!iommu_mm)
1565 		return IOMMU_PASID_INVALID;
1566 	return iommu_mm->pasid;
1567 }
1568 
1569 void mm_pasid_drop(struct mm_struct *mm);
1570 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1571 					struct mm_struct *mm);
1572 void iommu_sva_unbind_device(struct iommu_sva *handle);
1573 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1574 #else
1575 static inline struct iommu_sva *
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)1576 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1577 {
1578 	return ERR_PTR(-ENODEV);
1579 }
1580 
iommu_sva_unbind_device(struct iommu_sva * handle)1581 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1582 {
1583 }
1584 
iommu_sva_get_pasid(struct iommu_sva * handle)1585 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1586 {
1587 	return IOMMU_PASID_INVALID;
1588 }
mm_pasid_init(struct mm_struct * mm)1589 static inline void mm_pasid_init(struct mm_struct *mm) {}
mm_valid_pasid(struct mm_struct * mm)1590 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1591 
mm_get_enqcmd_pasid(struct mm_struct * mm)1592 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1593 {
1594 	return IOMMU_PASID_INVALID;
1595 }
1596 
mm_pasid_drop(struct mm_struct * mm)1597 static inline void mm_pasid_drop(struct mm_struct *mm) {}
1598 #endif /* CONFIG_IOMMU_SVA */
1599 
1600 #ifdef CONFIG_IOMMU_IOPF
1601 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
1602 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
1603 int iopf_queue_flush_dev(struct device *dev);
1604 struct iopf_queue *iopf_queue_alloc(const char *name);
1605 void iopf_queue_free(struct iopf_queue *queue);
1606 int iopf_queue_discard_partial(struct iopf_queue *queue);
1607 void iopf_free_group(struct iopf_group *group);
1608 int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
1609 void iopf_group_response(struct iopf_group *group,
1610 			 enum iommu_page_response_code status);
1611 #else
1612 static inline int
iopf_queue_add_device(struct iopf_queue * queue,struct device * dev)1613 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
1614 {
1615 	return -ENODEV;
1616 }
1617 
1618 static inline void
iopf_queue_remove_device(struct iopf_queue * queue,struct device * dev)1619 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
1620 {
1621 }
1622 
iopf_queue_flush_dev(struct device * dev)1623 static inline int iopf_queue_flush_dev(struct device *dev)
1624 {
1625 	return -ENODEV;
1626 }
1627 
iopf_queue_alloc(const char * name)1628 static inline struct iopf_queue *iopf_queue_alloc(const char *name)
1629 {
1630 	return NULL;
1631 }
1632 
iopf_queue_free(struct iopf_queue * queue)1633 static inline void iopf_queue_free(struct iopf_queue *queue)
1634 {
1635 }
1636 
iopf_queue_discard_partial(struct iopf_queue * queue)1637 static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
1638 {
1639 	return -ENODEV;
1640 }
1641 
iopf_free_group(struct iopf_group * group)1642 static inline void iopf_free_group(struct iopf_group *group)
1643 {
1644 }
1645 
1646 static inline int
iommu_report_device_fault(struct device * dev,struct iopf_fault * evt)1647 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
1648 {
1649 	return -ENODEV;
1650 }
1651 
iopf_group_response(struct iopf_group * group,enum iommu_page_response_code status)1652 static inline void iopf_group_response(struct iopf_group *group,
1653 				       enum iommu_page_response_code status)
1654 {
1655 }
1656 #endif /* CONFIG_IOMMU_IOPF */
1657 #endif /* __LINUX_IOMMU_H */
1658