1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 */
6
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/iova_bitmap.h>
17
18 #define IOMMU_READ (1 << 0)
19 #define IOMMU_WRITE (1 << 1)
20 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
21 #define IOMMU_NOEXEC (1 << 3)
22 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
23 /*
24 * Where the bus hardware includes a privilege level as part of its access type
25 * markings, and certain devices are capable of issuing transactions marked as
26 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
27 * given permission flags only apply to accesses at the higher privilege level,
28 * and that unprivileged transactions should have as little access as possible.
29 * This would usually imply the same permissions as kernel mappings on the CPU,
30 * if the IOMMU page table format is equivalent.
31 */
32 #define IOMMU_PRIV (1 << 5)
33
34 struct iommu_ops;
35 struct iommu_group;
36 struct bus_type;
37 struct device;
38 struct iommu_domain;
39 struct iommu_domain_ops;
40 struct iommu_dirty_ops;
41 struct notifier_block;
42 struct iommu_sva;
43 struct iommu_dma_cookie;
44 struct iommu_fault_param;
45 struct iommufd_ctx;
46 struct iommufd_viommu;
47 struct msi_desc;
48 struct msi_msg;
49
50 #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
51 #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
52 #define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
53 #define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
54
55 /* Generic fault types, can be expanded IRQ remapping fault */
56 enum iommu_fault_type {
57 IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */
58 };
59
60 /**
61 * struct iommu_fault_page_request - Page Request data
62 * @flags: encodes whether the corresponding fields are valid and whether this
63 * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
64 * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
65 * must have the same PASID value as the page request. When it is clear,
66 * the page response should not have a PASID.
67 * @pasid: Process Address Space ID
68 * @grpid: Page Request Group Index
69 * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
70 * @addr: page address
71 * @private_data: device-specific private information
72 */
73 struct iommu_fault_page_request {
74 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
75 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
76 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2)
77 u32 flags;
78 u32 pasid;
79 u32 grpid;
80 u32 perm;
81 u64 addr;
82 u64 private_data[2];
83 };
84
85 /**
86 * struct iommu_fault - Generic fault data
87 * @type: fault type from &enum iommu_fault_type
88 * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
89 */
90 struct iommu_fault {
91 u32 type;
92 struct iommu_fault_page_request prm;
93 };
94
95 /**
96 * enum iommu_page_response_code - Return status of fault handlers
97 * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
98 * populated, retry the access. This is "Success" in PCI PRI.
99 * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
100 * this device if possible. This is "Response Failure" in PCI PRI.
101 * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
102 * access. This is "Invalid Request" in PCI PRI.
103 */
104 enum iommu_page_response_code {
105 IOMMU_PAGE_RESP_SUCCESS = 0,
106 IOMMU_PAGE_RESP_INVALID,
107 IOMMU_PAGE_RESP_FAILURE,
108 };
109
110 /**
111 * struct iommu_page_response - Generic page response information
112 * @pasid: Process Address Space ID
113 * @grpid: Page Request Group Index
114 * @code: response code from &enum iommu_page_response_code
115 */
116 struct iommu_page_response {
117 u32 pasid;
118 u32 grpid;
119 u32 code;
120 };
121
122 struct iopf_fault {
123 struct iommu_fault fault;
124 /* node for pending lists */
125 struct list_head list;
126 };
127
128 struct iopf_group {
129 struct iopf_fault last_fault;
130 struct list_head faults;
131 size_t fault_count;
132 /* list node for iommu_fault_param::faults */
133 struct list_head pending_node;
134 struct work_struct work;
135 struct iommu_attach_handle *attach_handle;
136 /* The device's fault data parameter. */
137 struct iommu_fault_param *fault_param;
138 /* Used by handler provider to hook the group on its own lists. */
139 struct list_head node;
140 u32 cookie;
141 };
142
143 /**
144 * struct iopf_queue - IO Page Fault queue
145 * @wq: the fault workqueue
146 * @devices: devices attached to this queue
147 * @lock: protects the device list
148 */
149 struct iopf_queue {
150 struct workqueue_struct *wq;
151 struct list_head devices;
152 struct mutex lock;
153 };
154
155 /* iommu fault flags */
156 #define IOMMU_FAULT_READ 0x0
157 #define IOMMU_FAULT_WRITE 0x1
158
159 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
160 struct device *, unsigned long, int, void *);
161
162 struct iommu_domain_geometry {
163 dma_addr_t aperture_start; /* First address that can be mapped */
164 dma_addr_t aperture_end; /* Last address that can be mapped */
165 bool force_aperture; /* DMA only allowed in mappable range? */
166 };
167
168 /* Domain feature flags */
169 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
170 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
171 implementation */
172 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
173 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
174
175 #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
176 #define __IOMMU_DOMAIN_PLATFORM (1U << 5)
177
178 #define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
179 on a stage-2 translation */
180
181 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
182 /*
183 * This are the possible domain-types
184 *
185 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
186 * devices
187 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
188 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
189 * for VMs
190 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
191 * This flag allows IOMMU drivers to implement
192 * certain optimizations for these domains
193 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
194 * invalidation.
195 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
196 * represented by mm_struct's.
197 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
198 * dma_api stuff. Do not use in new drivers.
199 */
200 #define IOMMU_DOMAIN_BLOCKED (0U)
201 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
202 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
203 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
204 __IOMMU_DOMAIN_DMA_API)
205 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
206 __IOMMU_DOMAIN_DMA_API | \
207 __IOMMU_DOMAIN_DMA_FQ)
208 #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
209 #define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
210 #define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
211
212 struct iommu_domain {
213 unsigned type;
214 const struct iommu_domain_ops *ops;
215 const struct iommu_dirty_ops *dirty_ops;
216 const struct iommu_ops *owner; /* Whose domain_alloc we came from */
217 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
218 struct iommu_domain_geometry geometry;
219 struct iommu_dma_cookie *iova_cookie;
220 int (*iopf_handler)(struct iopf_group *group);
221
222 #if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
223 int (*sw_msi)(struct iommu_domain *domain, struct msi_desc *desc,
224 phys_addr_t msi_addr);
225 #endif
226
227 union { /* Pointer usable by owner of the domain */
228 struct iommufd_hw_pagetable *iommufd_hwpt; /* iommufd */
229 };
230 union { /* Fault handler */
231 struct {
232 iommu_fault_handler_t handler;
233 void *handler_token;
234 };
235 struct { /* IOMMU_DOMAIN_SVA */
236 struct mm_struct *mm;
237 int users;
238 /*
239 * Next iommu_domain in mm->iommu_mm->sva-domains list
240 * protected by iommu_sva_lock.
241 */
242 struct list_head next;
243 };
244 };
245 };
246
iommu_domain_set_sw_msi(struct iommu_domain * domain,int (* sw_msi)(struct iommu_domain * domain,struct msi_desc * desc,phys_addr_t msi_addr))247 static inline void iommu_domain_set_sw_msi(
248 struct iommu_domain *domain,
249 int (*sw_msi)(struct iommu_domain *domain, struct msi_desc *desc,
250 phys_addr_t msi_addr))
251 {
252 #if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
253 domain->sw_msi = sw_msi;
254 #endif
255 }
256
iommu_is_dma_domain(struct iommu_domain * domain)257 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
258 {
259 return domain->type & __IOMMU_DOMAIN_DMA_API;
260 }
261
262 enum iommu_cap {
263 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
264 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
265 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
266 DMA protection and we should too */
267 /*
268 * Per-device flag indicating if enforce_cache_coherency() will work on
269 * this device.
270 */
271 IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
272 /*
273 * IOMMU driver does not issue TLB maintenance during .unmap, so can
274 * usefully support the non-strict DMA flush queue.
275 */
276 IOMMU_CAP_DEFERRED_FLUSH,
277 IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
278 };
279
280 /* These are the possible reserved region types */
281 enum iommu_resv_type {
282 /* Memory regions which must be mapped 1:1 at all times */
283 IOMMU_RESV_DIRECT,
284 /*
285 * Memory regions which are advertised to be 1:1 but are
286 * commonly considered relaxable in some conditions,
287 * for instance in device assignment use case (USB, Graphics)
288 */
289 IOMMU_RESV_DIRECT_RELAXABLE,
290 /* Arbitrary "never map this or give it to a device" address ranges */
291 IOMMU_RESV_RESERVED,
292 /* Hardware MSI region (untranslated) */
293 IOMMU_RESV_MSI,
294 /* Software-managed MSI translation window */
295 IOMMU_RESV_SW_MSI,
296 };
297
298 /**
299 * struct iommu_resv_region - descriptor for a reserved memory region
300 * @list: Linked list pointers
301 * @start: System physical start address of the region
302 * @length: Length of the region in bytes
303 * @prot: IOMMU Protection flags (READ/WRITE/...)
304 * @type: Type of the reserved region
305 * @free: Callback to free associated memory allocations
306 */
307 struct iommu_resv_region {
308 struct list_head list;
309 phys_addr_t start;
310 size_t length;
311 int prot;
312 enum iommu_resv_type type;
313 void (*free)(struct device *dev, struct iommu_resv_region *region);
314 };
315
316 struct iommu_iort_rmr_data {
317 struct iommu_resv_region rr;
318
319 /* Stream IDs associated with IORT RMR entry */
320 const u32 *sids;
321 u32 num_sids;
322 };
323
324 /**
325 * enum iommu_dev_features - Per device IOMMU features
326 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
327 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
328 * enabling %IOMMU_DEV_FEAT_SVA requires
329 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
330 * Faults themselves instead of relying on the IOMMU. When
331 * supported, this feature must be enabled before and
332 * disabled after %IOMMU_DEV_FEAT_SVA.
333 *
334 * Device drivers enable a feature using iommu_dev_enable_feature().
335 */
336 enum iommu_dev_features {
337 IOMMU_DEV_FEAT_SVA,
338 IOMMU_DEV_FEAT_IOPF,
339 };
340
341 #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
342 #define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
343 #define IOMMU_PASID_INVALID (-1U)
344 typedef unsigned int ioasid_t;
345
346 /* Read but do not clear any dirty bits */
347 #define IOMMU_DIRTY_NO_CLEAR (1 << 0)
348
349 #ifdef CONFIG_IOMMU_API
350
351 /**
352 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
353 *
354 * @start: IOVA representing the start of the range to be flushed
355 * @end: IOVA representing the end of the range to be flushed (inclusive)
356 * @pgsize: The interval at which to perform the flush
357 * @freelist: Removed pages to free after sync
358 * @queued: Indicates that the flush will be queued
359 *
360 * This structure is intended to be updated by multiple calls to the
361 * ->unmap() function in struct iommu_ops before eventually being passed
362 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
363 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
364 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
365 * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
366 */
367 struct iommu_iotlb_gather {
368 unsigned long start;
369 unsigned long end;
370 size_t pgsize;
371 struct list_head freelist;
372 bool queued;
373 };
374
375 /**
376 * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
377 * @bitmap: IOVA bitmap
378 * @gather: Range information for a pending IOTLB flush
379 */
380 struct iommu_dirty_bitmap {
381 struct iova_bitmap *bitmap;
382 struct iommu_iotlb_gather *gather;
383 };
384
385 /**
386 * struct iommu_dirty_ops - domain specific dirty tracking operations
387 * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
388 * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
389 * into a bitmap, with a bit represented as a page.
390 * Reads the dirty PTE bits and clears it from IO
391 * pagetables.
392 */
393 struct iommu_dirty_ops {
394 int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
395 int (*read_and_clear_dirty)(struct iommu_domain *domain,
396 unsigned long iova, size_t size,
397 unsigned long flags,
398 struct iommu_dirty_bitmap *dirty);
399 };
400
401 /**
402 * struct iommu_user_data - iommu driver specific user space data info
403 * @type: The data type of the user buffer
404 * @uptr: Pointer to the user buffer for copy_from_user()
405 * @len: The length of the user buffer in bytes
406 *
407 * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
408 * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
409 */
410 struct iommu_user_data {
411 unsigned int type;
412 void __user *uptr;
413 size_t len;
414 };
415
416 /**
417 * struct iommu_user_data_array - iommu driver specific user space data array
418 * @type: The data type of all the entries in the user buffer array
419 * @uptr: Pointer to the user buffer array
420 * @entry_len: The fixed-width length of an entry in the array, in bytes
421 * @entry_num: The number of total entries in the array
422 *
423 * The user buffer includes an array of requests with format defined in
424 * include/uapi/linux/iommufd.h
425 */
426 struct iommu_user_data_array {
427 unsigned int type;
428 void __user *uptr;
429 size_t entry_len;
430 u32 entry_num;
431 };
432
433 /**
434 * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
435 * @dst_data: Pointer to an iommu driver specific user data that is defined in
436 * include/uapi/linux/iommufd.h
437 * @src_data: Pointer to a struct iommu_user_data for user space data info
438 * @data_type: The data type of the @dst_data. Must match with @src_data.type
439 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
440 * @min_len: Initial length of user data structure for backward compatibility.
441 * This should be offsetofend using the last member in the user data
442 * struct that was initially added to include/uapi/linux/iommufd.h
443 */
__iommu_copy_struct_from_user(void * dst_data,const struct iommu_user_data * src_data,unsigned int data_type,size_t data_len,size_t min_len)444 static inline int __iommu_copy_struct_from_user(
445 void *dst_data, const struct iommu_user_data *src_data,
446 unsigned int data_type, size_t data_len, size_t min_len)
447 {
448 if (src_data->type != data_type)
449 return -EINVAL;
450 if (WARN_ON(!dst_data || !src_data))
451 return -EINVAL;
452 if (src_data->len < min_len || data_len < src_data->len)
453 return -EINVAL;
454 return copy_struct_from_user(dst_data, data_len, src_data->uptr,
455 src_data->len);
456 }
457
458 /**
459 * iommu_copy_struct_from_user - Copy iommu driver specific user space data
460 * @kdst: Pointer to an iommu driver specific user data that is defined in
461 * include/uapi/linux/iommufd.h
462 * @user_data: Pointer to a struct iommu_user_data for user space data info
463 * @data_type: The data type of the @kdst. Must match with @user_data->type
464 * @min_last: The last memember of the data structure @kdst points in the
465 * initial version.
466 * Return 0 for success, otherwise -error.
467 */
468 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
469 __iommu_copy_struct_from_user(kdst, user_data, data_type, \
470 sizeof(*kdst), \
471 offsetofend(typeof(*kdst), min_last))
472
473 /**
474 * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
475 * data from an iommu_user_data_array
476 * @dst_data: Pointer to an iommu driver specific user data that is defined in
477 * include/uapi/linux/iommufd.h
478 * @src_array: Pointer to a struct iommu_user_data_array for a user space array
479 * @data_type: The data type of the @dst_data. Must match with @src_array.type
480 * @index: Index to the location in the array to copy user data from
481 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
482 * @min_len: Initial length of user data structure for backward compatibility.
483 * This should be offsetofend using the last member in the user data
484 * struct that was initially added to include/uapi/linux/iommufd.h
485 */
__iommu_copy_struct_from_user_array(void * dst_data,const struct iommu_user_data_array * src_array,unsigned int data_type,unsigned int index,size_t data_len,size_t min_len)486 static inline int __iommu_copy_struct_from_user_array(
487 void *dst_data, const struct iommu_user_data_array *src_array,
488 unsigned int data_type, unsigned int index, size_t data_len,
489 size_t min_len)
490 {
491 struct iommu_user_data src_data;
492
493 if (WARN_ON(!src_array || index >= src_array->entry_num))
494 return -EINVAL;
495 if (!src_array->entry_num)
496 return -EINVAL;
497 src_data.uptr = src_array->uptr + src_array->entry_len * index;
498 src_data.len = src_array->entry_len;
499 src_data.type = src_array->type;
500
501 return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
502 data_len, min_len);
503 }
504
505 /**
506 * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
507 * data from an iommu_user_data_array
508 * @kdst: Pointer to an iommu driver specific user data that is defined in
509 * include/uapi/linux/iommufd.h
510 * @user_array: Pointer to a struct iommu_user_data_array for a user space
511 * array
512 * @data_type: The data type of the @kdst. Must match with @user_array->type
513 * @index: Index to the location in the array to copy user data from
514 * @min_last: The last member of the data structure @kdst points in the
515 * initial version.
516 *
517 * Copy a single entry from a user array. Return 0 for success, otherwise
518 * -error.
519 */
520 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
521 min_last) \
522 __iommu_copy_struct_from_user_array( \
523 kdst, user_array, data_type, index, sizeof(*(kdst)), \
524 offsetofend(typeof(*(kdst)), min_last))
525
526 /**
527 * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
528 * space data from an iommu_user_data_array
529 * @kdst: Pointer to an iommu driver specific user data that is defined in
530 * include/uapi/linux/iommufd.h
531 * @kdst_entry_size: sizeof(*kdst)
532 * @user_array: Pointer to a struct iommu_user_data_array for a user space
533 * array
534 * @data_type: The data type of the @kdst. Must match with @user_array->type
535 *
536 * Copy the entire user array. kdst must have room for kdst_entry_size *
537 * user_array->entry_num bytes. Return 0 for success, otherwise -error.
538 */
539 static inline int
iommu_copy_struct_from_full_user_array(void * kdst,size_t kdst_entry_size,struct iommu_user_data_array * user_array,unsigned int data_type)540 iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
541 struct iommu_user_data_array *user_array,
542 unsigned int data_type)
543 {
544 unsigned int i;
545 int ret;
546
547 if (user_array->type != data_type)
548 return -EINVAL;
549 if (!user_array->entry_num)
550 return -EINVAL;
551 if (likely(user_array->entry_len == kdst_entry_size)) {
552 if (copy_from_user(kdst, user_array->uptr,
553 user_array->entry_num *
554 user_array->entry_len))
555 return -EFAULT;
556 }
557
558 /* Copy item by item */
559 for (i = 0; i != user_array->entry_num; i++) {
560 ret = copy_struct_from_user(
561 kdst + kdst_entry_size * i, kdst_entry_size,
562 user_array->uptr + user_array->entry_len * i,
563 user_array->entry_len);
564 if (ret)
565 return ret;
566 }
567 return 0;
568 }
569
570 /**
571 * struct iommu_ops - iommu ops and capabilities
572 * @capable: check capability
573 * @hw_info: report iommu hardware information. The data buffer returned by this
574 * op is allocated in the iommu driver and freed by the caller after
575 * use. The information type is one of enum iommu_hw_info_type defined
576 * in include/uapi/linux/iommufd.h.
577 * @domain_alloc: allocate and return an iommu domain if success. Otherwise
578 * NULL is returned. The domain is not fully initialized until
579 * the caller iommu_domain_alloc() returns.
580 * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
581 * input parameters as defined in
582 * include/uapi/linux/iommufd.h. The @user_data can be
583 * optionally provided, the new domain must support
584 * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
585 * returned.
586 * @domain_alloc_paging: Allocate an iommu_domain that can be used for
587 * UNMANAGED, DMA, and DMA_FQ domain types. This is the
588 * same as invoking domain_alloc_paging_flags() with
589 * @flags=0, @user_data=NULL. A driver should implement
590 * only one of the two ops.
591 * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
592 * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
593 * @probe_device: Add device to iommu driver handling
594 * @release_device: Remove device from iommu driver handling
595 * @probe_finalize: Do final setup work after the device is added to an IOMMU
596 * group and attached to the groups domain
597 * @device_group: find iommu group for a particular device
598 * @get_resv_regions: Request list of reserved regions for a device
599 * @of_xlate: add OF master IDs to iommu grouping
600 * @is_attach_deferred: Check if domain attach should be deferred from iommu
601 * driver init to device driver init (default no)
602 * @dev_enable/disable_feat: per device entries to enable/disable
603 * iommu specific features.
604 * @page_response: handle page request response
605 * @def_domain_type: device default domain type, return value:
606 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
607 * - IOMMU_DOMAIN_DMA: must use a dma domain
608 * - 0: use the default setting
609 * @default_domain_ops: the default ops for domains
610 * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind
611 * the @dev, as the set of virtualization resources shared/passed
612 * to user space IOMMU instance. And associate it with a nesting
613 * @parent_domain. The @viommu_type must be defined in the header
614 * include/uapi/linux/iommufd.h
615 * It is required to call iommufd_viommu_alloc() helper for
616 * a bundled allocation of the core and the driver structures,
617 * using the given @ictx pointer.
618 * @pgsize_bitmap: bitmap of all possible supported page sizes
619 * @owner: Driver module providing these ops
620 * @identity_domain: An always available, always attachable identity
621 * translation.
622 * @blocked_domain: An always available, always attachable blocking
623 * translation.
624 * @default_domain: If not NULL this will always be set as the default domain.
625 * This should be an IDENTITY/BLOCKED/PLATFORM domain.
626 * Do not use in new drivers.
627 * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
628 * no user domain for each PASID and the I/O page faults are
629 * forwarded through the user domain attached to the device
630 * RID.
631 */
632 struct iommu_ops {
633 bool (*capable)(struct device *dev, enum iommu_cap);
634 void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
635
636 /* Domain allocation and freeing by the iommu driver */
637 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
638 struct iommu_domain *(*domain_alloc_paging_flags)(
639 struct device *dev, u32 flags,
640 const struct iommu_user_data *user_data);
641 struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
642 struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
643 struct mm_struct *mm);
644 struct iommu_domain *(*domain_alloc_nested)(
645 struct device *dev, struct iommu_domain *parent, u32 flags,
646 const struct iommu_user_data *user_data);
647
648 struct iommu_device *(*probe_device)(struct device *dev);
649 void (*release_device)(struct device *dev);
650 void (*probe_finalize)(struct device *dev);
651 struct iommu_group *(*device_group)(struct device *dev);
652
653 /* Request/Free a list of reserved regions for a device */
654 void (*get_resv_regions)(struct device *dev, struct list_head *list);
655
656 int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
657 bool (*is_attach_deferred)(struct device *dev);
658
659 /* Per device IOMMU features */
660 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
661 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
662
663 void (*page_response)(struct device *dev, struct iopf_fault *evt,
664 struct iommu_page_response *msg);
665
666 int (*def_domain_type)(struct device *dev);
667
668 struct iommufd_viommu *(*viommu_alloc)(
669 struct device *dev, struct iommu_domain *parent_domain,
670 struct iommufd_ctx *ictx, unsigned int viommu_type);
671
672 const struct iommu_domain_ops *default_domain_ops;
673 unsigned long pgsize_bitmap;
674 struct module *owner;
675 struct iommu_domain *identity_domain;
676 struct iommu_domain *blocked_domain;
677 struct iommu_domain *release_domain;
678 struct iommu_domain *default_domain;
679 u8 user_pasid_table:1;
680 };
681
682 /**
683 * struct iommu_domain_ops - domain specific operations
684 * @attach_dev: attach an iommu domain to a device
685 * Return:
686 * * 0 - success
687 * * EINVAL - can indicate that device and domain are incompatible due to
688 * some previous configuration of the domain, in which case the
689 * driver shouldn't log an error, since it is legitimate for a
690 * caller to test reuse of existing domains. Otherwise, it may
691 * still represent some other fundamental problem
692 * * ENOMEM - out of memory
693 * * ENOSPC - non-ENOMEM type of resource allocation failures
694 * * EBUSY - device is attached to a domain and cannot be changed
695 * * ENODEV - device specific errors, not able to be attached
696 * * <others> - treated as ENODEV by the caller. Use is discouraged
697 * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of
698 * the device should be left in the old config in error case.
699 * @map_pages: map a physically contiguous set of pages of the same size to
700 * an iommu domain.
701 * @unmap_pages: unmap a number of pages of the same size from an iommu domain
702 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
703 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
704 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
705 * queue
706 * @cache_invalidate_user: Flush hardware cache for user space IO page table.
707 * The @domain must be IOMMU_DOMAIN_NESTED. The @array
708 * passes in the cache invalidation requests, in form
709 * of a driver data structure. The driver must update
710 * array->entry_num to report the number of handled
711 * invalidation requests. The driver data structure
712 * must be defined in include/uapi/linux/iommufd.h
713 * @iova_to_phys: translate iova to physical address
714 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
715 * including no-snoop TLPs on PCIe or other platform
716 * specific mechanisms.
717 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
718 * @free: Release the domain after use.
719 */
720 struct iommu_domain_ops {
721 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
722 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
723 ioasid_t pasid, struct iommu_domain *old);
724
725 int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
726 phys_addr_t paddr, size_t pgsize, size_t pgcount,
727 int prot, gfp_t gfp, size_t *mapped);
728 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
729 size_t pgsize, size_t pgcount,
730 struct iommu_iotlb_gather *iotlb_gather);
731
732 void (*flush_iotlb_all)(struct iommu_domain *domain);
733 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
734 size_t size);
735 void (*iotlb_sync)(struct iommu_domain *domain,
736 struct iommu_iotlb_gather *iotlb_gather);
737 int (*cache_invalidate_user)(struct iommu_domain *domain,
738 struct iommu_user_data_array *array);
739
740 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
741 dma_addr_t iova);
742
743 bool (*enforce_cache_coherency)(struct iommu_domain *domain);
744 int (*set_pgtable_quirks)(struct iommu_domain *domain,
745 unsigned long quirks);
746
747 void (*free)(struct iommu_domain *domain);
748 };
749
750 /**
751 * struct iommu_device - IOMMU core representation of one IOMMU hardware
752 * instance
753 * @list: Used by the iommu-core to keep a list of registered iommus
754 * @ops: iommu-ops for talking to this iommu
755 * @dev: struct device for sysfs handling
756 * @singleton_group: Used internally for drivers that have only one group
757 * @max_pasids: number of supported PASIDs
758 */
759 struct iommu_device {
760 struct list_head list;
761 const struct iommu_ops *ops;
762 struct fwnode_handle *fwnode;
763 struct device *dev;
764 struct iommu_group *singleton_group;
765 u32 max_pasids;
766 };
767
768 /**
769 * struct iommu_fault_param - per-device IOMMU fault data
770 * @lock: protect pending faults list
771 * @users: user counter to manage the lifetime of the data
772 * @rcu: rcu head for kfree_rcu()
773 * @dev: the device that owns this param
774 * @queue: IOPF queue
775 * @queue_list: index into queue->devices
776 * @partial: faults that are part of a Page Request Group for which the last
777 * request hasn't been submitted yet.
778 * @faults: holds the pending faults which need response
779 */
780 struct iommu_fault_param {
781 struct mutex lock;
782 refcount_t users;
783 struct rcu_head rcu;
784
785 struct device *dev;
786 struct iopf_queue *queue;
787 struct list_head queue_list;
788
789 struct list_head partial;
790 struct list_head faults;
791 };
792
793 /**
794 * struct dev_iommu - Collection of per-device IOMMU data
795 *
796 * @fault_param: IOMMU detected device fault reporting data
797 * @fwspec: IOMMU fwspec data
798 * @iommu_dev: IOMMU device this device is linked to
799 * @priv: IOMMU Driver private data
800 * @max_pasids: number of PASIDs this device can consume
801 * @attach_deferred: the dma domain attachment is deferred
802 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
803 * @require_direct: device requires IOMMU_RESV_DIRECT regions
804 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
805 *
806 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
807 * struct iommu_group *iommu_group;
808 */
809 struct dev_iommu {
810 struct mutex lock;
811 struct iommu_fault_param __rcu *fault_param;
812 struct iommu_fwspec *fwspec;
813 struct iommu_device *iommu_dev;
814 void *priv;
815 u32 max_pasids;
816 u32 attach_deferred:1;
817 u32 pci_32bit_workaround:1;
818 u32 require_direct:1;
819 u32 shadow_on_flush:1;
820 };
821
822 int iommu_device_register(struct iommu_device *iommu,
823 const struct iommu_ops *ops,
824 struct device *hwdev);
825 void iommu_device_unregister(struct iommu_device *iommu);
826 int iommu_device_sysfs_add(struct iommu_device *iommu,
827 struct device *parent,
828 const struct attribute_group **groups,
829 const char *fmt, ...) __printf(4, 5);
830 void iommu_device_sysfs_remove(struct iommu_device *iommu);
831 int iommu_device_link(struct iommu_device *iommu, struct device *link);
832 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
833 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
834
dev_to_iommu_device(struct device * dev)835 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
836 {
837 return (struct iommu_device *)dev_get_drvdata(dev);
838 }
839
840 /**
841 * iommu_get_iommu_dev - Get iommu_device for a device
842 * @dev: an end-point device
843 *
844 * Note that this function must be called from the iommu_ops
845 * to retrieve the iommu_device for a device, which the core code
846 * guarentees it will not invoke the op without an attached iommu.
847 */
__iommu_get_iommu_dev(struct device * dev)848 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
849 {
850 return dev->iommu->iommu_dev;
851 }
852
853 #define iommu_get_iommu_dev(dev, type, member) \
854 container_of(__iommu_get_iommu_dev(dev), type, member)
855
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)856 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
857 {
858 *gather = (struct iommu_iotlb_gather) {
859 .start = ULONG_MAX,
860 .freelist = LIST_HEAD_INIT(gather->freelist),
861 };
862 }
863
864 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
865 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
866 struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
iommu_paging_domain_alloc(struct device * dev)867 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
868 {
869 return iommu_paging_domain_alloc_flags(dev, 0);
870 }
871 extern void iommu_domain_free(struct iommu_domain *domain);
872 extern int iommu_attach_device(struct iommu_domain *domain,
873 struct device *dev);
874 extern void iommu_detach_device(struct iommu_domain *domain,
875 struct device *dev);
876 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
877 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
878 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
879 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
880 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
881 size_t size);
882 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
883 unsigned long iova, size_t size,
884 struct iommu_iotlb_gather *iotlb_gather);
885 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
886 struct scatterlist *sg, unsigned int nents,
887 int prot, gfp_t gfp);
888 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
889 extern void iommu_set_fault_handler(struct iommu_domain *domain,
890 iommu_fault_handler_t handler, void *token);
891
892 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
893 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
894 extern void iommu_set_default_passthrough(bool cmd_line);
895 extern void iommu_set_default_translated(bool cmd_line);
896 extern bool iommu_default_passthrough(void);
897 extern struct iommu_resv_region *
898 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
899 enum iommu_resv_type type, gfp_t gfp);
900 extern int iommu_get_group_resv_regions(struct iommu_group *group,
901 struct list_head *head);
902
903 extern int iommu_attach_group(struct iommu_domain *domain,
904 struct iommu_group *group);
905 extern void iommu_detach_group(struct iommu_domain *domain,
906 struct iommu_group *group);
907 extern struct iommu_group *iommu_group_alloc(void);
908 extern void *iommu_group_get_iommudata(struct iommu_group *group);
909 extern void iommu_group_set_iommudata(struct iommu_group *group,
910 void *iommu_data,
911 void (*release)(void *iommu_data));
912 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
913 extern int iommu_group_add_device(struct iommu_group *group,
914 struct device *dev);
915 extern void iommu_group_remove_device(struct device *dev);
916 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
917 int (*fn)(struct device *, void *));
918 extern struct iommu_group *iommu_group_get(struct device *dev);
919 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
920 extern void iommu_group_put(struct iommu_group *group);
921
922 extern int iommu_group_id(struct iommu_group *group);
923 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
924
925 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
926 unsigned long quirks);
927
928 void iommu_set_dma_strict(void);
929
930 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
931 unsigned long iova, int flags);
932
iommu_flush_iotlb_all(struct iommu_domain * domain)933 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
934 {
935 if (domain->ops->flush_iotlb_all)
936 domain->ops->flush_iotlb_all(domain);
937 }
938
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)939 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
940 struct iommu_iotlb_gather *iotlb_gather)
941 {
942 if (domain->ops->iotlb_sync)
943 domain->ops->iotlb_sync(domain, iotlb_gather);
944
945 iommu_iotlb_gather_init(iotlb_gather);
946 }
947
948 /**
949 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
950 *
951 * @gather: TLB gather data
952 * @iova: start of page to invalidate
953 * @size: size of page to invalidate
954 *
955 * Helper for IOMMU drivers to check whether a new range and the gathered range
956 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
957 * than merging the two, which might lead to unnecessary invalidations.
958 */
959 static inline
iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)960 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
961 unsigned long iova, size_t size)
962 {
963 unsigned long start = iova, end = start + size - 1;
964
965 return gather->end != 0 &&
966 (end + 1 < gather->start || start > gather->end + 1);
967 }
968
969
970 /**
971 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
972 * @gather: TLB gather data
973 * @iova: start of page to invalidate
974 * @size: size of page to invalidate
975 *
976 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
977 * where only the address range matters, and simply minimising intermediate
978 * syncs is preferred.
979 */
iommu_iotlb_gather_add_range(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)980 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
981 unsigned long iova, size_t size)
982 {
983 unsigned long end = iova + size - 1;
984
985 if (gather->start > iova)
986 gather->start = iova;
987 if (gather->end < end)
988 gather->end = end;
989 }
990
991 /**
992 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
993 * @domain: IOMMU domain to be invalidated
994 * @gather: TLB gather data
995 * @iova: start of page to invalidate
996 * @size: size of page to invalidate
997 *
998 * Helper for IOMMU drivers to build invalidation commands based on individual
999 * pages, or with page size/table level hints which cannot be gathered if they
1000 * differ.
1001 */
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1002 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1003 struct iommu_iotlb_gather *gather,
1004 unsigned long iova, size_t size)
1005 {
1006 /*
1007 * If the new page is disjoint from the current range or is mapped at
1008 * a different granularity, then sync the TLB so that the gather
1009 * structure can be rewritten.
1010 */
1011 if ((gather->pgsize && gather->pgsize != size) ||
1012 iommu_iotlb_gather_is_disjoint(gather, iova, size))
1013 iommu_iotlb_sync(domain, gather);
1014
1015 gather->pgsize = size;
1016 iommu_iotlb_gather_add_range(gather, iova, size);
1017 }
1018
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1019 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1020 {
1021 return gather && gather->queued;
1022 }
1023
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1024 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1025 struct iova_bitmap *bitmap,
1026 struct iommu_iotlb_gather *gather)
1027 {
1028 if (gather)
1029 iommu_iotlb_gather_init(gather);
1030
1031 dirty->bitmap = bitmap;
1032 dirty->gather = gather;
1033 }
1034
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1035 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1036 unsigned long iova,
1037 unsigned long length)
1038 {
1039 if (dirty->bitmap)
1040 iova_bitmap_set(dirty->bitmap, iova, length);
1041
1042 if (dirty->gather)
1043 iommu_iotlb_gather_add_range(dirty->gather, iova, length);
1044 }
1045
1046 /* PCI device grouping function */
1047 extern struct iommu_group *pci_device_group(struct device *dev);
1048 /* Generic device grouping function */
1049 extern struct iommu_group *generic_device_group(struct device *dev);
1050 /* FSL-MC device grouping function */
1051 struct iommu_group *fsl_mc_device_group(struct device *dev);
1052 extern struct iommu_group *generic_single_device_group(struct device *dev);
1053
1054 /**
1055 * struct iommu_fwspec - per-device IOMMU instance data
1056 * @iommu_fwnode: firmware handle for this device's IOMMU
1057 * @flags: IOMMU_FWSPEC_* flags
1058 * @num_ids: number of associated device IDs
1059 * @ids: IDs which this device may present to the IOMMU
1060 *
1061 * Note that the IDs (and any other information, really) stored in this structure should be
1062 * considered private to the IOMMU device driver and are not to be used directly by IOMMU
1063 * consumers.
1064 */
1065 struct iommu_fwspec {
1066 struct fwnode_handle *iommu_fwnode;
1067 u32 flags;
1068 unsigned int num_ids;
1069 u32 ids[];
1070 };
1071
1072 /* ATS is supported */
1073 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
1074 /* CANWBS is supported */
1075 #define IOMMU_FWSPEC_PCI_RC_CANWBS (1 << 1)
1076
1077 /*
1078 * An iommu attach handle represents a relationship between an iommu domain
1079 * and a PASID or RID of a device. It is allocated and managed by the component
1080 * that manages the domain and is stored in the iommu group during the time the
1081 * domain is attached.
1082 */
1083 struct iommu_attach_handle {
1084 struct iommu_domain *domain;
1085 };
1086
1087 /**
1088 * struct iommu_sva - handle to a device-mm bond
1089 */
1090 struct iommu_sva {
1091 struct iommu_attach_handle handle;
1092 struct device *dev;
1093 refcount_t users;
1094 };
1095
1096 struct iommu_mm_data {
1097 u32 pasid;
1098 struct list_head sva_domains;
1099 };
1100
1101 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
1102 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
1103
dev_iommu_fwspec_get(struct device * dev)1104 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1105 {
1106 if (dev->iommu)
1107 return dev->iommu->fwspec;
1108 else
1109 return NULL;
1110 }
1111
dev_iommu_fwspec_set(struct device * dev,struct iommu_fwspec * fwspec)1112 static inline void dev_iommu_fwspec_set(struct device *dev,
1113 struct iommu_fwspec *fwspec)
1114 {
1115 dev->iommu->fwspec = fwspec;
1116 }
1117
dev_iommu_priv_get(struct device * dev)1118 static inline void *dev_iommu_priv_get(struct device *dev)
1119 {
1120 if (dev->iommu)
1121 return dev->iommu->priv;
1122 else
1123 return NULL;
1124 }
1125
1126 void dev_iommu_priv_set(struct device *dev, void *priv);
1127
1128 extern struct mutex iommu_probe_device_lock;
1129 int iommu_probe_device(struct device *dev);
1130
1131 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
1132 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
1133
1134 int iommu_device_use_default_domain(struct device *dev);
1135 void iommu_device_unuse_default_domain(struct device *dev);
1136
1137 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
1138 void iommu_group_release_dma_owner(struct iommu_group *group);
1139 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
1140
1141 int iommu_device_claim_dma_owner(struct device *dev, void *owner);
1142 void iommu_device_release_dma_owner(struct device *dev);
1143
1144 int iommu_attach_device_pasid(struct iommu_domain *domain,
1145 struct device *dev, ioasid_t pasid,
1146 struct iommu_attach_handle *handle);
1147 void iommu_detach_device_pasid(struct iommu_domain *domain,
1148 struct device *dev, ioasid_t pasid);
1149 ioasid_t iommu_alloc_global_pasid(struct device *dev);
1150 void iommu_free_global_pasid(ioasid_t pasid);
1151 #else /* CONFIG_IOMMU_API */
1152
1153 struct iommu_ops {};
1154 struct iommu_group {};
1155 struct iommu_fwspec {};
1156 struct iommu_device {};
1157 struct iommu_fault_param {};
1158 struct iommu_iotlb_gather {};
1159 struct iommu_dirty_bitmap {};
1160 struct iommu_dirty_ops {};
1161
device_iommu_capable(struct device * dev,enum iommu_cap cap)1162 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1163 {
1164 return false;
1165 }
1166
iommu_paging_domain_alloc_flags(struct device * dev,unsigned int flags)1167 static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
1168 unsigned int flags)
1169 {
1170 return ERR_PTR(-ENODEV);
1171 }
1172
iommu_paging_domain_alloc(struct device * dev)1173 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
1174 {
1175 return ERR_PTR(-ENODEV);
1176 }
1177
iommu_domain_free(struct iommu_domain * domain)1178 static inline void iommu_domain_free(struct iommu_domain *domain)
1179 {
1180 }
1181
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1182 static inline int iommu_attach_device(struct iommu_domain *domain,
1183 struct device *dev)
1184 {
1185 return -ENODEV;
1186 }
1187
iommu_detach_device(struct iommu_domain * domain,struct device * dev)1188 static inline void iommu_detach_device(struct iommu_domain *domain,
1189 struct device *dev)
1190 {
1191 }
1192
iommu_get_domain_for_dev(struct device * dev)1193 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1194 {
1195 return NULL;
1196 }
1197
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1198 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
1199 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1200 {
1201 return -ENODEV;
1202 }
1203
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)1204 static inline size_t iommu_unmap(struct iommu_domain *domain,
1205 unsigned long iova, size_t size)
1206 {
1207 return 0;
1208 }
1209
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,int gfp_order,struct iommu_iotlb_gather * iotlb_gather)1210 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
1211 unsigned long iova, int gfp_order,
1212 struct iommu_iotlb_gather *iotlb_gather)
1213 {
1214 return 0;
1215 }
1216
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)1217 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
1218 unsigned long iova, struct scatterlist *sg,
1219 unsigned int nents, int prot, gfp_t gfp)
1220 {
1221 return -ENODEV;
1222 }
1223
iommu_flush_iotlb_all(struct iommu_domain * domain)1224 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
1225 {
1226 }
1227
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)1228 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
1229 struct iommu_iotlb_gather *iotlb_gather)
1230 {
1231 }
1232
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1233 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1234 {
1235 return 0;
1236 }
1237
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1238 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1239 iommu_fault_handler_t handler, void *token)
1240 {
1241 }
1242
iommu_get_resv_regions(struct device * dev,struct list_head * list)1243 static inline void iommu_get_resv_regions(struct device *dev,
1244 struct list_head *list)
1245 {
1246 }
1247
iommu_put_resv_regions(struct device * dev,struct list_head * list)1248 static inline void iommu_put_resv_regions(struct device *dev,
1249 struct list_head *list)
1250 {
1251 }
1252
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)1253 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1254 struct list_head *head)
1255 {
1256 return -ENODEV;
1257 }
1258
iommu_set_default_passthrough(bool cmd_line)1259 static inline void iommu_set_default_passthrough(bool cmd_line)
1260 {
1261 }
1262
iommu_set_default_translated(bool cmd_line)1263 static inline void iommu_set_default_translated(bool cmd_line)
1264 {
1265 }
1266
iommu_default_passthrough(void)1267 static inline bool iommu_default_passthrough(void)
1268 {
1269 return true;
1270 }
1271
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1272 static inline int iommu_attach_group(struct iommu_domain *domain,
1273 struct iommu_group *group)
1274 {
1275 return -ENODEV;
1276 }
1277
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1278 static inline void iommu_detach_group(struct iommu_domain *domain,
1279 struct iommu_group *group)
1280 {
1281 }
1282
iommu_group_alloc(void)1283 static inline struct iommu_group *iommu_group_alloc(void)
1284 {
1285 return ERR_PTR(-ENODEV);
1286 }
1287
iommu_group_get_iommudata(struct iommu_group * group)1288 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
1289 {
1290 return NULL;
1291 }
1292
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))1293 static inline void iommu_group_set_iommudata(struct iommu_group *group,
1294 void *iommu_data,
1295 void (*release)(void *iommu_data))
1296 {
1297 }
1298
iommu_group_set_name(struct iommu_group * group,const char * name)1299 static inline int iommu_group_set_name(struct iommu_group *group,
1300 const char *name)
1301 {
1302 return -ENODEV;
1303 }
1304
iommu_group_add_device(struct iommu_group * group,struct device * dev)1305 static inline int iommu_group_add_device(struct iommu_group *group,
1306 struct device *dev)
1307 {
1308 return -ENODEV;
1309 }
1310
iommu_group_remove_device(struct device * dev)1311 static inline void iommu_group_remove_device(struct device *dev)
1312 {
1313 }
1314
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))1315 static inline int iommu_group_for_each_dev(struct iommu_group *group,
1316 void *data,
1317 int (*fn)(struct device *, void *))
1318 {
1319 return -ENODEV;
1320 }
1321
iommu_group_get(struct device * dev)1322 static inline struct iommu_group *iommu_group_get(struct device *dev)
1323 {
1324 return NULL;
1325 }
1326
iommu_group_put(struct iommu_group * group)1327 static inline void iommu_group_put(struct iommu_group *group)
1328 {
1329 }
1330
iommu_group_id(struct iommu_group * group)1331 static inline int iommu_group_id(struct iommu_group *group)
1332 {
1333 return -ENODEV;
1334 }
1335
iommu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirks)1336 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
1337 unsigned long quirks)
1338 {
1339 return 0;
1340 }
1341
iommu_device_register(struct iommu_device * iommu,const struct iommu_ops * ops,struct device * hwdev)1342 static inline int iommu_device_register(struct iommu_device *iommu,
1343 const struct iommu_ops *ops,
1344 struct device *hwdev)
1345 {
1346 return -ENODEV;
1347 }
1348
dev_to_iommu_device(struct device * dev)1349 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1350 {
1351 return NULL;
1352 }
1353
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)1354 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
1355 {
1356 }
1357
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1358 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1359 struct iommu_iotlb_gather *gather,
1360 unsigned long iova, size_t size)
1361 {
1362 }
1363
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1364 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1365 {
1366 return false;
1367 }
1368
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1369 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1370 struct iova_bitmap *bitmap,
1371 struct iommu_iotlb_gather *gather)
1372 {
1373 }
1374
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1375 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1376 unsigned long iova,
1377 unsigned long length)
1378 {
1379 }
1380
iommu_device_unregister(struct iommu_device * iommu)1381 static inline void iommu_device_unregister(struct iommu_device *iommu)
1382 {
1383 }
1384
iommu_device_sysfs_add(struct iommu_device * iommu,struct device * parent,const struct attribute_group ** groups,const char * fmt,...)1385 static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
1386 struct device *parent,
1387 const struct attribute_group **groups,
1388 const char *fmt, ...)
1389 {
1390 return -ENODEV;
1391 }
1392
iommu_device_sysfs_remove(struct iommu_device * iommu)1393 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
1394 {
1395 }
1396
iommu_device_link(struct device * dev,struct device * link)1397 static inline int iommu_device_link(struct device *dev, struct device *link)
1398 {
1399 return -EINVAL;
1400 }
1401
iommu_device_unlink(struct device * dev,struct device * link)1402 static inline void iommu_device_unlink(struct device *dev, struct device *link)
1403 {
1404 }
1405
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode)1406 static inline int iommu_fwspec_init(struct device *dev,
1407 struct fwnode_handle *iommu_fwnode)
1408 {
1409 return -ENODEV;
1410 }
1411
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)1412 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1413 int num_ids)
1414 {
1415 return -ENODEV;
1416 }
1417
1418 static inline int
iommu_dev_enable_feature(struct device * dev,enum iommu_dev_features feat)1419 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1420 {
1421 return -ENODEV;
1422 }
1423
1424 static inline int
iommu_dev_disable_feature(struct device * dev,enum iommu_dev_features feat)1425 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1426 {
1427 return -ENODEV;
1428 }
1429
dev_iommu_fwspec_get(struct device * dev)1430 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1431 {
1432 return NULL;
1433 }
1434
iommu_device_use_default_domain(struct device * dev)1435 static inline int iommu_device_use_default_domain(struct device *dev)
1436 {
1437 return 0;
1438 }
1439
iommu_device_unuse_default_domain(struct device * dev)1440 static inline void iommu_device_unuse_default_domain(struct device *dev)
1441 {
1442 }
1443
1444 static inline int
iommu_group_claim_dma_owner(struct iommu_group * group,void * owner)1445 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1446 {
1447 return -ENODEV;
1448 }
1449
iommu_group_release_dma_owner(struct iommu_group * group)1450 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1451 {
1452 }
1453
iommu_group_dma_owner_claimed(struct iommu_group * group)1454 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1455 {
1456 return false;
1457 }
1458
iommu_device_release_dma_owner(struct device * dev)1459 static inline void iommu_device_release_dma_owner(struct device *dev)
1460 {
1461 }
1462
iommu_device_claim_dma_owner(struct device * dev,void * owner)1463 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1464 {
1465 return -ENODEV;
1466 }
1467
iommu_attach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_attach_handle * handle)1468 static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1469 struct device *dev, ioasid_t pasid,
1470 struct iommu_attach_handle *handle)
1471 {
1472 return -ENODEV;
1473 }
1474
iommu_detach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)1475 static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1476 struct device *dev, ioasid_t pasid)
1477 {
1478 }
1479
iommu_alloc_global_pasid(struct device * dev)1480 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
1481 {
1482 return IOMMU_PASID_INVALID;
1483 }
1484
iommu_free_global_pasid(ioasid_t pasid)1485 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
1486 #endif /* CONFIG_IOMMU_API */
1487
1488 #ifdef CONFIG_IRQ_MSI_IOMMU
1489 #ifdef CONFIG_IOMMU_API
1490 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1491 #else
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1492 static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
1493 phys_addr_t msi_addr)
1494 {
1495 return 0;
1496 }
1497 #endif /* CONFIG_IOMMU_API */
1498 #endif /* CONFIG_IRQ_MSI_IOMMU */
1499
1500 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
1501 void iommu_group_mutex_assert(struct device *dev);
1502 #else
iommu_group_mutex_assert(struct device * dev)1503 static inline void iommu_group_mutex_assert(struct device *dev)
1504 {
1505 }
1506 #endif
1507
1508 /**
1509 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1510 * @domain: The IOMMU domain to perform the mapping
1511 * @iova: The start address to map the buffer
1512 * @sgt: The sg_table object describing the buffer
1513 * @prot: IOMMU protection bits
1514 *
1515 * Creates a mapping at @iova for the buffer described by a scatterlist
1516 * stored in the given sg_table object in the provided IOMMU domain.
1517 */
iommu_map_sgtable(struct iommu_domain * domain,unsigned long iova,struct sg_table * sgt,int prot)1518 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
1519 unsigned long iova, struct sg_table *sgt, int prot)
1520 {
1521 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1522 GFP_KERNEL);
1523 }
1524
1525 #ifdef CONFIG_IOMMU_DEBUGFS
1526 extern struct dentry *iommu_debugfs_dir;
1527 void iommu_debugfs_setup(void);
1528 #else
iommu_debugfs_setup(void)1529 static inline void iommu_debugfs_setup(void) {}
1530 #endif
1531
1532 #ifdef CONFIG_IOMMU_DMA
1533 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1534 #else /* CONFIG_IOMMU_DMA */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)1535 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1536 {
1537 return -ENODEV;
1538 }
1539 #endif /* CONFIG_IOMMU_DMA */
1540
1541 /*
1542 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1543 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1544 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1545 */
1546 #define TEGRA_STREAM_ID_BYPASS 0x7f
1547
tegra_dev_iommu_get_stream_id(struct device * dev,u32 * stream_id)1548 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1549 {
1550 #ifdef CONFIG_IOMMU_API
1551 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1552
1553 if (fwspec && fwspec->num_ids == 1) {
1554 *stream_id = fwspec->ids[0] & 0xffff;
1555 return true;
1556 }
1557 #endif
1558
1559 return false;
1560 }
1561
1562 #ifdef CONFIG_IOMMU_MM_DATA
mm_pasid_init(struct mm_struct * mm)1563 static inline void mm_pasid_init(struct mm_struct *mm)
1564 {
1565 /*
1566 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
1567 * the new mm and the old one point to a same iommu_mm instance. When either
1568 * one of the two mms gets released, the iommu_mm instance is freed, leaving
1569 * the other mm running into a use-after-free/double-free problem. To avoid
1570 * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
1571 */
1572 mm->iommu_mm = NULL;
1573 }
1574
mm_valid_pasid(struct mm_struct * mm)1575 static inline bool mm_valid_pasid(struct mm_struct *mm)
1576 {
1577 return READ_ONCE(mm->iommu_mm);
1578 }
1579
mm_get_enqcmd_pasid(struct mm_struct * mm)1580 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1581 {
1582 struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
1583
1584 if (!iommu_mm)
1585 return IOMMU_PASID_INVALID;
1586 return iommu_mm->pasid;
1587 }
1588
1589 void mm_pasid_drop(struct mm_struct *mm);
1590 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1591 struct mm_struct *mm);
1592 void iommu_sva_unbind_device(struct iommu_sva *handle);
1593 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1594 #else
1595 static inline struct iommu_sva *
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)1596 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1597 {
1598 return ERR_PTR(-ENODEV);
1599 }
1600
iommu_sva_unbind_device(struct iommu_sva * handle)1601 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1602 {
1603 }
1604
iommu_sva_get_pasid(struct iommu_sva * handle)1605 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1606 {
1607 return IOMMU_PASID_INVALID;
1608 }
mm_pasid_init(struct mm_struct * mm)1609 static inline void mm_pasid_init(struct mm_struct *mm) {}
mm_valid_pasid(struct mm_struct * mm)1610 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1611
mm_get_enqcmd_pasid(struct mm_struct * mm)1612 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1613 {
1614 return IOMMU_PASID_INVALID;
1615 }
1616
mm_pasid_drop(struct mm_struct * mm)1617 static inline void mm_pasid_drop(struct mm_struct *mm) {}
1618 #endif /* CONFIG_IOMMU_SVA */
1619
1620 #ifdef CONFIG_IOMMU_IOPF
1621 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
1622 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
1623 int iopf_queue_flush_dev(struct device *dev);
1624 struct iopf_queue *iopf_queue_alloc(const char *name);
1625 void iopf_queue_free(struct iopf_queue *queue);
1626 int iopf_queue_discard_partial(struct iopf_queue *queue);
1627 void iopf_free_group(struct iopf_group *group);
1628 int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
1629 void iopf_group_response(struct iopf_group *group,
1630 enum iommu_page_response_code status);
1631 #else
1632 static inline int
iopf_queue_add_device(struct iopf_queue * queue,struct device * dev)1633 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
1634 {
1635 return -ENODEV;
1636 }
1637
1638 static inline void
iopf_queue_remove_device(struct iopf_queue * queue,struct device * dev)1639 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
1640 {
1641 }
1642
iopf_queue_flush_dev(struct device * dev)1643 static inline int iopf_queue_flush_dev(struct device *dev)
1644 {
1645 return -ENODEV;
1646 }
1647
iopf_queue_alloc(const char * name)1648 static inline struct iopf_queue *iopf_queue_alloc(const char *name)
1649 {
1650 return NULL;
1651 }
1652
iopf_queue_free(struct iopf_queue * queue)1653 static inline void iopf_queue_free(struct iopf_queue *queue)
1654 {
1655 }
1656
iopf_queue_discard_partial(struct iopf_queue * queue)1657 static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
1658 {
1659 return -ENODEV;
1660 }
1661
iopf_free_group(struct iopf_group * group)1662 static inline void iopf_free_group(struct iopf_group *group)
1663 {
1664 }
1665
1666 static inline int
iommu_report_device_fault(struct device * dev,struct iopf_fault * evt)1667 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
1668 {
1669 return -ENODEV;
1670 }
1671
iopf_group_response(struct iopf_group * group,enum iommu_page_response_code status)1672 static inline void iopf_group_response(struct iopf_group *group,
1673 enum iommu_page_response_code status)
1674 {
1675 }
1676 #endif /* CONFIG_IOMMU_IOPF */
1677 #endif /* __LINUX_IOMMU_H */
1678