1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 */
6
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/iova_bitmap.h>
17
18 #define IOMMU_READ (1 << 0)
19 #define IOMMU_WRITE (1 << 1)
20 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
21 #define IOMMU_NOEXEC (1 << 3)
22 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
23 /*
24 * Where the bus hardware includes a privilege level as part of its access type
25 * markings, and certain devices are capable of issuing transactions marked as
26 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
27 * given permission flags only apply to accesses at the higher privilege level,
28 * and that unprivileged transactions should have as little access as possible.
29 * This would usually imply the same permissions as kernel mappings on the CPU,
30 * if the IOMMU page table format is equivalent.
31 */
32 #define IOMMU_PRIV (1 << 5)
33
34 struct iommu_ops;
35 struct iommu_group;
36 struct bus_type;
37 struct device;
38 struct iommu_domain;
39 struct iommu_domain_ops;
40 struct iommu_dirty_ops;
41 struct notifier_block;
42 struct iommu_sva;
43 struct iommu_dma_cookie;
44 struct iommu_fault_param;
45
46 #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
47 #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
48 #define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
49 #define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
50
51 /* Generic fault types, can be expanded IRQ remapping fault */
52 enum iommu_fault_type {
53 IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */
54 };
55
56 /**
57 * struct iommu_fault_page_request - Page Request data
58 * @flags: encodes whether the corresponding fields are valid and whether this
59 * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
60 * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
61 * must have the same PASID value as the page request. When it is clear,
62 * the page response should not have a PASID.
63 * @pasid: Process Address Space ID
64 * @grpid: Page Request Group Index
65 * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
66 * @addr: page address
67 * @private_data: device-specific private information
68 */
69 struct iommu_fault_page_request {
70 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
71 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
72 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2)
73 u32 flags;
74 u32 pasid;
75 u32 grpid;
76 u32 perm;
77 u64 addr;
78 u64 private_data[2];
79 };
80
81 /**
82 * struct iommu_fault - Generic fault data
83 * @type: fault type from &enum iommu_fault_type
84 * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
85 */
86 struct iommu_fault {
87 u32 type;
88 struct iommu_fault_page_request prm;
89 };
90
91 /**
92 * enum iommu_page_response_code - Return status of fault handlers
93 * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
94 * populated, retry the access. This is "Success" in PCI PRI.
95 * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
96 * this device if possible. This is "Response Failure" in PCI PRI.
97 * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
98 * access. This is "Invalid Request" in PCI PRI.
99 */
100 enum iommu_page_response_code {
101 IOMMU_PAGE_RESP_SUCCESS = 0,
102 IOMMU_PAGE_RESP_INVALID,
103 IOMMU_PAGE_RESP_FAILURE,
104 };
105
106 /**
107 * struct iommu_page_response - Generic page response information
108 * @pasid: Process Address Space ID
109 * @grpid: Page Request Group Index
110 * @code: response code from &enum iommu_page_response_code
111 */
112 struct iommu_page_response {
113 u32 pasid;
114 u32 grpid;
115 u32 code;
116 };
117
118 struct iopf_fault {
119 struct iommu_fault fault;
120 /* node for pending lists */
121 struct list_head list;
122 };
123
124 struct iopf_group {
125 struct iopf_fault last_fault;
126 struct list_head faults;
127 size_t fault_count;
128 /* list node for iommu_fault_param::faults */
129 struct list_head pending_node;
130 struct work_struct work;
131 struct iommu_attach_handle *attach_handle;
132 /* The device's fault data parameter. */
133 struct iommu_fault_param *fault_param;
134 /* Used by handler provider to hook the group on its own lists. */
135 struct list_head node;
136 u32 cookie;
137 };
138
139 /**
140 * struct iopf_queue - IO Page Fault queue
141 * @wq: the fault workqueue
142 * @devices: devices attached to this queue
143 * @lock: protects the device list
144 */
145 struct iopf_queue {
146 struct workqueue_struct *wq;
147 struct list_head devices;
148 struct mutex lock;
149 };
150
151 /* iommu fault flags */
152 #define IOMMU_FAULT_READ 0x0
153 #define IOMMU_FAULT_WRITE 0x1
154
155 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
156 struct device *, unsigned long, int, void *);
157
158 struct iommu_domain_geometry {
159 dma_addr_t aperture_start; /* First address that can be mapped */
160 dma_addr_t aperture_end; /* Last address that can be mapped */
161 bool force_aperture; /* DMA only allowed in mappable range? */
162 };
163
164 /* Domain feature flags */
165 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
166 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
167 implementation */
168 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
169 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
170
171 #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
172 #define __IOMMU_DOMAIN_PLATFORM (1U << 5)
173
174 #define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
175 on a stage-2 translation */
176
177 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
178 /*
179 * This are the possible domain-types
180 *
181 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
182 * devices
183 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
184 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
185 * for VMs
186 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
187 * This flag allows IOMMU drivers to implement
188 * certain optimizations for these domains
189 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
190 * invalidation.
191 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
192 * represented by mm_struct's.
193 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
194 * dma_api stuff. Do not use in new drivers.
195 */
196 #define IOMMU_DOMAIN_BLOCKED (0U)
197 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
198 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
199 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
200 __IOMMU_DOMAIN_DMA_API)
201 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
202 __IOMMU_DOMAIN_DMA_API | \
203 __IOMMU_DOMAIN_DMA_FQ)
204 #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
205 #define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
206 #define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
207
208 struct iommu_domain {
209 unsigned type;
210 const struct iommu_domain_ops *ops;
211 const struct iommu_dirty_ops *dirty_ops;
212 const struct iommu_ops *owner; /* Whose domain_alloc we came from */
213 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
214 struct iommu_domain_geometry geometry;
215 struct iommu_dma_cookie *iova_cookie;
216 int (*iopf_handler)(struct iopf_group *group);
217 void *fault_data;
218 union {
219 struct {
220 iommu_fault_handler_t handler;
221 void *handler_token;
222 };
223 struct { /* IOMMU_DOMAIN_SVA */
224 struct mm_struct *mm;
225 int users;
226 /*
227 * Next iommu_domain in mm->iommu_mm->sva-domains list
228 * protected by iommu_sva_lock.
229 */
230 struct list_head next;
231 };
232 };
233 };
234
iommu_is_dma_domain(struct iommu_domain * domain)235 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
236 {
237 return domain->type & __IOMMU_DOMAIN_DMA_API;
238 }
239
240 enum iommu_cap {
241 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
242 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
243 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
244 DMA protection and we should too */
245 /*
246 * Per-device flag indicating if enforce_cache_coherency() will work on
247 * this device.
248 */
249 IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
250 /*
251 * IOMMU driver does not issue TLB maintenance during .unmap, so can
252 * usefully support the non-strict DMA flush queue.
253 */
254 IOMMU_CAP_DEFERRED_FLUSH,
255 IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
256 };
257
258 /* These are the possible reserved region types */
259 enum iommu_resv_type {
260 /* Memory regions which must be mapped 1:1 at all times */
261 IOMMU_RESV_DIRECT,
262 /*
263 * Memory regions which are advertised to be 1:1 but are
264 * commonly considered relaxable in some conditions,
265 * for instance in device assignment use case (USB, Graphics)
266 */
267 IOMMU_RESV_DIRECT_RELAXABLE,
268 /* Arbitrary "never map this or give it to a device" address ranges */
269 IOMMU_RESV_RESERVED,
270 /* Hardware MSI region (untranslated) */
271 IOMMU_RESV_MSI,
272 /* Software-managed MSI translation window */
273 IOMMU_RESV_SW_MSI,
274 };
275
276 /**
277 * struct iommu_resv_region - descriptor for a reserved memory region
278 * @list: Linked list pointers
279 * @start: System physical start address of the region
280 * @length: Length of the region in bytes
281 * @prot: IOMMU Protection flags (READ/WRITE/...)
282 * @type: Type of the reserved region
283 * @free: Callback to free associated memory allocations
284 */
285 struct iommu_resv_region {
286 struct list_head list;
287 phys_addr_t start;
288 size_t length;
289 int prot;
290 enum iommu_resv_type type;
291 void (*free)(struct device *dev, struct iommu_resv_region *region);
292 };
293
294 struct iommu_iort_rmr_data {
295 struct iommu_resv_region rr;
296
297 /* Stream IDs associated with IORT RMR entry */
298 const u32 *sids;
299 u32 num_sids;
300 };
301
302 /**
303 * enum iommu_dev_features - Per device IOMMU features
304 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
305 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
306 * enabling %IOMMU_DEV_FEAT_SVA requires
307 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
308 * Faults themselves instead of relying on the IOMMU. When
309 * supported, this feature must be enabled before and
310 * disabled after %IOMMU_DEV_FEAT_SVA.
311 *
312 * Device drivers enable a feature using iommu_dev_enable_feature().
313 */
314 enum iommu_dev_features {
315 IOMMU_DEV_FEAT_SVA,
316 IOMMU_DEV_FEAT_IOPF,
317 };
318
319 #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
320 #define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
321 #define IOMMU_PASID_INVALID (-1U)
322 typedef unsigned int ioasid_t;
323
324 /* Read but do not clear any dirty bits */
325 #define IOMMU_DIRTY_NO_CLEAR (1 << 0)
326
327 #ifdef CONFIG_IOMMU_API
328
329 /**
330 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
331 *
332 * @start: IOVA representing the start of the range to be flushed
333 * @end: IOVA representing the end of the range to be flushed (inclusive)
334 * @pgsize: The interval at which to perform the flush
335 * @freelist: Removed pages to free after sync
336 * @queued: Indicates that the flush will be queued
337 *
338 * This structure is intended to be updated by multiple calls to the
339 * ->unmap() function in struct iommu_ops before eventually being passed
340 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
341 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
342 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
343 * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
344 */
345 struct iommu_iotlb_gather {
346 unsigned long start;
347 unsigned long end;
348 size_t pgsize;
349 struct list_head freelist;
350 bool queued;
351 };
352
353 /**
354 * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
355 * @bitmap: IOVA bitmap
356 * @gather: Range information for a pending IOTLB flush
357 */
358 struct iommu_dirty_bitmap {
359 struct iova_bitmap *bitmap;
360 struct iommu_iotlb_gather *gather;
361 };
362
363 /**
364 * struct iommu_dirty_ops - domain specific dirty tracking operations
365 * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
366 * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
367 * into a bitmap, with a bit represented as a page.
368 * Reads the dirty PTE bits and clears it from IO
369 * pagetables.
370 */
371 struct iommu_dirty_ops {
372 int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
373 int (*read_and_clear_dirty)(struct iommu_domain *domain,
374 unsigned long iova, size_t size,
375 unsigned long flags,
376 struct iommu_dirty_bitmap *dirty);
377 };
378
379 /**
380 * struct iommu_user_data - iommu driver specific user space data info
381 * @type: The data type of the user buffer
382 * @uptr: Pointer to the user buffer for copy_from_user()
383 * @len: The length of the user buffer in bytes
384 *
385 * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
386 * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
387 */
388 struct iommu_user_data {
389 unsigned int type;
390 void __user *uptr;
391 size_t len;
392 };
393
394 /**
395 * struct iommu_user_data_array - iommu driver specific user space data array
396 * @type: The data type of all the entries in the user buffer array
397 * @uptr: Pointer to the user buffer array
398 * @entry_len: The fixed-width length of an entry in the array, in bytes
399 * @entry_num: The number of total entries in the array
400 *
401 * The user buffer includes an array of requests with format defined in
402 * include/uapi/linux/iommufd.h
403 */
404 struct iommu_user_data_array {
405 unsigned int type;
406 void __user *uptr;
407 size_t entry_len;
408 u32 entry_num;
409 };
410
411 /**
412 * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
413 * @dst_data: Pointer to an iommu driver specific user data that is defined in
414 * include/uapi/linux/iommufd.h
415 * @src_data: Pointer to a struct iommu_user_data for user space data info
416 * @data_type: The data type of the @dst_data. Must match with @src_data.type
417 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
418 * @min_len: Initial length of user data structure for backward compatibility.
419 * This should be offsetofend using the last member in the user data
420 * struct that was initially added to include/uapi/linux/iommufd.h
421 */
__iommu_copy_struct_from_user(void * dst_data,const struct iommu_user_data * src_data,unsigned int data_type,size_t data_len,size_t min_len)422 static inline int __iommu_copy_struct_from_user(
423 void *dst_data, const struct iommu_user_data *src_data,
424 unsigned int data_type, size_t data_len, size_t min_len)
425 {
426 if (src_data->type != data_type)
427 return -EINVAL;
428 if (WARN_ON(!dst_data || !src_data))
429 return -EINVAL;
430 if (src_data->len < min_len || data_len < src_data->len)
431 return -EINVAL;
432 return copy_struct_from_user(dst_data, data_len, src_data->uptr,
433 src_data->len);
434 }
435
436 /**
437 * iommu_copy_struct_from_user - Copy iommu driver specific user space data
438 * @kdst: Pointer to an iommu driver specific user data that is defined in
439 * include/uapi/linux/iommufd.h
440 * @user_data: Pointer to a struct iommu_user_data for user space data info
441 * @data_type: The data type of the @kdst. Must match with @user_data->type
442 * @min_last: The last memember of the data structure @kdst points in the
443 * initial version.
444 * Return 0 for success, otherwise -error.
445 */
446 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
447 __iommu_copy_struct_from_user(kdst, user_data, data_type, \
448 sizeof(*kdst), \
449 offsetofend(typeof(*kdst), min_last))
450
451 /**
452 * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
453 * data from an iommu_user_data_array
454 * @dst_data: Pointer to an iommu driver specific user data that is defined in
455 * include/uapi/linux/iommufd.h
456 * @src_array: Pointer to a struct iommu_user_data_array for a user space array
457 * @data_type: The data type of the @dst_data. Must match with @src_array.type
458 * @index: Index to the location in the array to copy user data from
459 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
460 * @min_len: Initial length of user data structure for backward compatibility.
461 * This should be offsetofend using the last member in the user data
462 * struct that was initially added to include/uapi/linux/iommufd.h
463 */
__iommu_copy_struct_from_user_array(void * dst_data,const struct iommu_user_data_array * src_array,unsigned int data_type,unsigned int index,size_t data_len,size_t min_len)464 static inline int __iommu_copy_struct_from_user_array(
465 void *dst_data, const struct iommu_user_data_array *src_array,
466 unsigned int data_type, unsigned int index, size_t data_len,
467 size_t min_len)
468 {
469 struct iommu_user_data src_data;
470
471 if (WARN_ON(!src_array || index >= src_array->entry_num))
472 return -EINVAL;
473 if (!src_array->entry_num)
474 return -EINVAL;
475 src_data.uptr = src_array->uptr + src_array->entry_len * index;
476 src_data.len = src_array->entry_len;
477 src_data.type = src_array->type;
478
479 return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
480 data_len, min_len);
481 }
482
483 /**
484 * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
485 * data from an iommu_user_data_array
486 * @kdst: Pointer to an iommu driver specific user data that is defined in
487 * include/uapi/linux/iommufd.h
488 * @user_array: Pointer to a struct iommu_user_data_array for a user space
489 * array
490 * @data_type: The data type of the @kdst. Must match with @user_array->type
491 * @index: Index to the location in the array to copy user data from
492 * @min_last: The last member of the data structure @kdst points in the
493 * initial version.
494 * Return 0 for success, otherwise -error.
495 */
496 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
497 min_last) \
498 __iommu_copy_struct_from_user_array( \
499 kdst, user_array, data_type, index, sizeof(*(kdst)), \
500 offsetofend(typeof(*(kdst)), min_last))
501
502 /**
503 * struct iommu_ops - iommu ops and capabilities
504 * @capable: check capability
505 * @hw_info: report iommu hardware information. The data buffer returned by this
506 * op is allocated in the iommu driver and freed by the caller after
507 * use. The information type is one of enum iommu_hw_info_type defined
508 * in include/uapi/linux/iommufd.h.
509 * @domain_alloc: allocate and return an iommu domain if success. Otherwise
510 * NULL is returned. The domain is not fully initialized until
511 * the caller iommu_domain_alloc() returns.
512 * @domain_alloc_user: Allocate an iommu domain corresponding to the input
513 * parameters as defined in include/uapi/linux/iommufd.h.
514 * Unlike @domain_alloc, it is called only by IOMMUFD and
515 * must fully initialize the new domain before return.
516 * Upon success, if the @user_data is valid and the @parent
517 * points to a kernel-managed domain, the new domain must be
518 * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be
519 * NULL while the @user_data can be optionally provided, the
520 * new domain must support __IOMMU_DOMAIN_PAGING.
521 * Upon failure, ERR_PTR must be returned.
522 * @domain_alloc_paging: Allocate an iommu_domain that can be used for
523 * UNMANAGED, DMA, and DMA_FQ domain types.
524 * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
525 * @probe_device: Add device to iommu driver handling
526 * @release_device: Remove device from iommu driver handling
527 * @probe_finalize: Do final setup work after the device is added to an IOMMU
528 * group and attached to the groups domain
529 * @device_group: find iommu group for a particular device
530 * @get_resv_regions: Request list of reserved regions for a device
531 * @of_xlate: add OF master IDs to iommu grouping
532 * @is_attach_deferred: Check if domain attach should be deferred from iommu
533 * driver init to device driver init (default no)
534 * @dev_enable/disable_feat: per device entries to enable/disable
535 * iommu specific features.
536 * @page_response: handle page request response
537 * @def_domain_type: device default domain type, return value:
538 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
539 * - IOMMU_DOMAIN_DMA: must use a dma domain
540 * - 0: use the default setting
541 * @default_domain_ops: the default ops for domains
542 * @remove_dev_pasid: Remove any translation configurations of a specific
543 * pasid, so that any DMA transactions with this pasid
544 * will be blocked by the hardware.
545 * @pgsize_bitmap: bitmap of all possible supported page sizes
546 * @owner: Driver module providing these ops
547 * @identity_domain: An always available, always attachable identity
548 * translation.
549 * @blocked_domain: An always available, always attachable blocking
550 * translation.
551 * @default_domain: If not NULL this will always be set as the default domain.
552 * This should be an IDENTITY/BLOCKED/PLATFORM domain.
553 * Do not use in new drivers.
554 * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
555 * no user domain for each PASID and the I/O page faults are
556 * forwarded through the user domain attached to the device
557 * RID.
558 */
559 struct iommu_ops {
560 bool (*capable)(struct device *dev, enum iommu_cap);
561 void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
562
563 /* Domain allocation and freeing by the iommu driver */
564 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
565 struct iommu_domain *(*domain_alloc_user)(
566 struct device *dev, u32 flags, struct iommu_domain *parent,
567 const struct iommu_user_data *user_data);
568 struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
569 struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
570 struct mm_struct *mm);
571
572 struct iommu_device *(*probe_device)(struct device *dev);
573 void (*release_device)(struct device *dev);
574 void (*probe_finalize)(struct device *dev);
575 struct iommu_group *(*device_group)(struct device *dev);
576
577 /* Request/Free a list of reserved regions for a device */
578 void (*get_resv_regions)(struct device *dev, struct list_head *list);
579
580 int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
581 bool (*is_attach_deferred)(struct device *dev);
582
583 /* Per device IOMMU features */
584 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
585 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
586
587 void (*page_response)(struct device *dev, struct iopf_fault *evt,
588 struct iommu_page_response *msg);
589
590 int (*def_domain_type)(struct device *dev);
591 void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid,
592 struct iommu_domain *domain);
593
594 const struct iommu_domain_ops *default_domain_ops;
595 unsigned long pgsize_bitmap;
596 struct module *owner;
597 struct iommu_domain *identity_domain;
598 struct iommu_domain *blocked_domain;
599 struct iommu_domain *release_domain;
600 struct iommu_domain *default_domain;
601 u8 user_pasid_table:1;
602 };
603
604 /**
605 * struct iommu_domain_ops - domain specific operations
606 * @attach_dev: attach an iommu domain to a device
607 * Return:
608 * * 0 - success
609 * * EINVAL - can indicate that device and domain are incompatible due to
610 * some previous configuration of the domain, in which case the
611 * driver shouldn't log an error, since it is legitimate for a
612 * caller to test reuse of existing domains. Otherwise, it may
613 * still represent some other fundamental problem
614 * * ENOMEM - out of memory
615 * * ENOSPC - non-ENOMEM type of resource allocation failures
616 * * EBUSY - device is attached to a domain and cannot be changed
617 * * ENODEV - device specific errors, not able to be attached
618 * * <others> - treated as ENODEV by the caller. Use is discouraged
619 * @set_dev_pasid: set an iommu domain to a pasid of device
620 * @map_pages: map a physically contiguous set of pages of the same size to
621 * an iommu domain.
622 * @unmap_pages: unmap a number of pages of the same size from an iommu domain
623 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
624 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
625 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
626 * queue
627 * @cache_invalidate_user: Flush hardware cache for user space IO page table.
628 * The @domain must be IOMMU_DOMAIN_NESTED. The @array
629 * passes in the cache invalidation requests, in form
630 * of a driver data structure. The driver must update
631 * array->entry_num to report the number of handled
632 * invalidation requests. The driver data structure
633 * must be defined in include/uapi/linux/iommufd.h
634 * @iova_to_phys: translate iova to physical address
635 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
636 * including no-snoop TLPs on PCIe or other platform
637 * specific mechanisms.
638 * @enable_nesting: Enable nesting
639 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
640 * @free: Release the domain after use.
641 */
642 struct iommu_domain_ops {
643 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
644 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
645 ioasid_t pasid);
646
647 int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
648 phys_addr_t paddr, size_t pgsize, size_t pgcount,
649 int prot, gfp_t gfp, size_t *mapped);
650 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
651 size_t pgsize, size_t pgcount,
652 struct iommu_iotlb_gather *iotlb_gather);
653
654 void (*flush_iotlb_all)(struct iommu_domain *domain);
655 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
656 size_t size);
657 void (*iotlb_sync)(struct iommu_domain *domain,
658 struct iommu_iotlb_gather *iotlb_gather);
659 int (*cache_invalidate_user)(struct iommu_domain *domain,
660 struct iommu_user_data_array *array);
661
662 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
663 dma_addr_t iova);
664
665 bool (*enforce_cache_coherency)(struct iommu_domain *domain);
666 int (*enable_nesting)(struct iommu_domain *domain);
667 int (*set_pgtable_quirks)(struct iommu_domain *domain,
668 unsigned long quirks);
669
670 void (*free)(struct iommu_domain *domain);
671 };
672
673 /**
674 * struct iommu_device - IOMMU core representation of one IOMMU hardware
675 * instance
676 * @list: Used by the iommu-core to keep a list of registered iommus
677 * @ops: iommu-ops for talking to this iommu
678 * @dev: struct device for sysfs handling
679 * @singleton_group: Used internally for drivers that have only one group
680 * @max_pasids: number of supported PASIDs
681 */
682 struct iommu_device {
683 struct list_head list;
684 const struct iommu_ops *ops;
685 struct fwnode_handle *fwnode;
686 struct device *dev;
687 struct iommu_group *singleton_group;
688 u32 max_pasids;
689 };
690
691 /**
692 * struct iommu_fault_param - per-device IOMMU fault data
693 * @lock: protect pending faults list
694 * @users: user counter to manage the lifetime of the data
695 * @rcu: rcu head for kfree_rcu()
696 * @dev: the device that owns this param
697 * @queue: IOPF queue
698 * @queue_list: index into queue->devices
699 * @partial: faults that are part of a Page Request Group for which the last
700 * request hasn't been submitted yet.
701 * @faults: holds the pending faults which need response
702 */
703 struct iommu_fault_param {
704 struct mutex lock;
705 refcount_t users;
706 struct rcu_head rcu;
707
708 struct device *dev;
709 struct iopf_queue *queue;
710 struct list_head queue_list;
711
712 struct list_head partial;
713 struct list_head faults;
714 };
715
716 /**
717 * struct dev_iommu - Collection of per-device IOMMU data
718 *
719 * @fault_param: IOMMU detected device fault reporting data
720 * @fwspec: IOMMU fwspec data
721 * @iommu_dev: IOMMU device this device is linked to
722 * @priv: IOMMU Driver private data
723 * @max_pasids: number of PASIDs this device can consume
724 * @attach_deferred: the dma domain attachment is deferred
725 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
726 * @require_direct: device requires IOMMU_RESV_DIRECT regions
727 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
728 *
729 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
730 * struct iommu_group *iommu_group;
731 */
732 struct dev_iommu {
733 struct mutex lock;
734 struct iommu_fault_param __rcu *fault_param;
735 struct iommu_fwspec *fwspec;
736 struct iommu_device *iommu_dev;
737 void *priv;
738 u32 max_pasids;
739 u32 attach_deferred:1;
740 u32 pci_32bit_workaround:1;
741 u32 require_direct:1;
742 u32 shadow_on_flush:1;
743 };
744
745 int iommu_device_register(struct iommu_device *iommu,
746 const struct iommu_ops *ops,
747 struct device *hwdev);
748 void iommu_device_unregister(struct iommu_device *iommu);
749 int iommu_device_sysfs_add(struct iommu_device *iommu,
750 struct device *parent,
751 const struct attribute_group **groups,
752 const char *fmt, ...) __printf(4, 5);
753 void iommu_device_sysfs_remove(struct iommu_device *iommu);
754 int iommu_device_link(struct iommu_device *iommu, struct device *link);
755 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
756 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
757
dev_to_iommu_device(struct device * dev)758 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
759 {
760 return (struct iommu_device *)dev_get_drvdata(dev);
761 }
762
763 /**
764 * iommu_get_iommu_dev - Get iommu_device for a device
765 * @dev: an end-point device
766 *
767 * Note that this function must be called from the iommu_ops
768 * to retrieve the iommu_device for a device, which the core code
769 * guarentees it will not invoke the op without an attached iommu.
770 */
__iommu_get_iommu_dev(struct device * dev)771 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
772 {
773 return dev->iommu->iommu_dev;
774 }
775
776 #define iommu_get_iommu_dev(dev, type, member) \
777 container_of(__iommu_get_iommu_dev(dev), type, member)
778
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)779 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
780 {
781 *gather = (struct iommu_iotlb_gather) {
782 .start = ULONG_MAX,
783 .freelist = LIST_HEAD_INIT(gather->freelist),
784 };
785 }
786
787 extern int bus_iommu_probe(const struct bus_type *bus);
788 extern bool iommu_present(const struct bus_type *bus);
789 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
790 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
791 extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
792 struct iommu_domain *iommu_paging_domain_alloc(struct device *dev);
793 extern void iommu_domain_free(struct iommu_domain *domain);
794 extern int iommu_attach_device(struct iommu_domain *domain,
795 struct device *dev);
796 extern void iommu_detach_device(struct iommu_domain *domain,
797 struct device *dev);
798 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
799 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
800 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
801 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
802 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
803 size_t size);
804 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
805 unsigned long iova, size_t size,
806 struct iommu_iotlb_gather *iotlb_gather);
807 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
808 struct scatterlist *sg, unsigned int nents,
809 int prot, gfp_t gfp);
810 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
811 extern void iommu_set_fault_handler(struct iommu_domain *domain,
812 iommu_fault_handler_t handler, void *token);
813
814 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
815 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
816 extern void iommu_set_default_passthrough(bool cmd_line);
817 extern void iommu_set_default_translated(bool cmd_line);
818 extern bool iommu_default_passthrough(void);
819 extern struct iommu_resv_region *
820 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
821 enum iommu_resv_type type, gfp_t gfp);
822 extern int iommu_get_group_resv_regions(struct iommu_group *group,
823 struct list_head *head);
824
825 extern int iommu_attach_group(struct iommu_domain *domain,
826 struct iommu_group *group);
827 extern void iommu_detach_group(struct iommu_domain *domain,
828 struct iommu_group *group);
829 extern struct iommu_group *iommu_group_alloc(void);
830 extern void *iommu_group_get_iommudata(struct iommu_group *group);
831 extern void iommu_group_set_iommudata(struct iommu_group *group,
832 void *iommu_data,
833 void (*release)(void *iommu_data));
834 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
835 extern int iommu_group_add_device(struct iommu_group *group,
836 struct device *dev);
837 extern void iommu_group_remove_device(struct device *dev);
838 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
839 int (*fn)(struct device *, void *));
840 extern struct iommu_group *iommu_group_get(struct device *dev);
841 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
842 extern void iommu_group_put(struct iommu_group *group);
843
844 extern int iommu_group_id(struct iommu_group *group);
845 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
846
847 int iommu_enable_nesting(struct iommu_domain *domain);
848 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
849 unsigned long quirks);
850
851 void iommu_set_dma_strict(void);
852
853 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
854 unsigned long iova, int flags);
855
iommu_flush_iotlb_all(struct iommu_domain * domain)856 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
857 {
858 if (domain->ops->flush_iotlb_all)
859 domain->ops->flush_iotlb_all(domain);
860 }
861
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)862 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
863 struct iommu_iotlb_gather *iotlb_gather)
864 {
865 if (domain->ops->iotlb_sync)
866 domain->ops->iotlb_sync(domain, iotlb_gather);
867
868 iommu_iotlb_gather_init(iotlb_gather);
869 }
870
871 /**
872 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
873 *
874 * @gather: TLB gather data
875 * @iova: start of page to invalidate
876 * @size: size of page to invalidate
877 *
878 * Helper for IOMMU drivers to check whether a new range and the gathered range
879 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
880 * than merging the two, which might lead to unnecessary invalidations.
881 */
882 static inline
iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)883 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
884 unsigned long iova, size_t size)
885 {
886 unsigned long start = iova, end = start + size - 1;
887
888 return gather->end != 0 &&
889 (end + 1 < gather->start || start > gather->end + 1);
890 }
891
892
893 /**
894 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
895 * @gather: TLB gather data
896 * @iova: start of page to invalidate
897 * @size: size of page to invalidate
898 *
899 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
900 * where only the address range matters, and simply minimising intermediate
901 * syncs is preferred.
902 */
iommu_iotlb_gather_add_range(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)903 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
904 unsigned long iova, size_t size)
905 {
906 unsigned long end = iova + size - 1;
907
908 if (gather->start > iova)
909 gather->start = iova;
910 if (gather->end < end)
911 gather->end = end;
912 }
913
914 /**
915 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
916 * @domain: IOMMU domain to be invalidated
917 * @gather: TLB gather data
918 * @iova: start of page to invalidate
919 * @size: size of page to invalidate
920 *
921 * Helper for IOMMU drivers to build invalidation commands based on individual
922 * pages, or with page size/table level hints which cannot be gathered if they
923 * differ.
924 */
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)925 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
926 struct iommu_iotlb_gather *gather,
927 unsigned long iova, size_t size)
928 {
929 /*
930 * If the new page is disjoint from the current range or is mapped at
931 * a different granularity, then sync the TLB so that the gather
932 * structure can be rewritten.
933 */
934 if ((gather->pgsize && gather->pgsize != size) ||
935 iommu_iotlb_gather_is_disjoint(gather, iova, size))
936 iommu_iotlb_sync(domain, gather);
937
938 gather->pgsize = size;
939 iommu_iotlb_gather_add_range(gather, iova, size);
940 }
941
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)942 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
943 {
944 return gather && gather->queued;
945 }
946
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)947 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
948 struct iova_bitmap *bitmap,
949 struct iommu_iotlb_gather *gather)
950 {
951 if (gather)
952 iommu_iotlb_gather_init(gather);
953
954 dirty->bitmap = bitmap;
955 dirty->gather = gather;
956 }
957
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)958 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
959 unsigned long iova,
960 unsigned long length)
961 {
962 if (dirty->bitmap)
963 iova_bitmap_set(dirty->bitmap, iova, length);
964
965 if (dirty->gather)
966 iommu_iotlb_gather_add_range(dirty->gather, iova, length);
967 }
968
969 /* PCI device grouping function */
970 extern struct iommu_group *pci_device_group(struct device *dev);
971 /* Generic device grouping function */
972 extern struct iommu_group *generic_device_group(struct device *dev);
973 /* FSL-MC device grouping function */
974 struct iommu_group *fsl_mc_device_group(struct device *dev);
975 extern struct iommu_group *generic_single_device_group(struct device *dev);
976
977 /**
978 * struct iommu_fwspec - per-device IOMMU instance data
979 * @iommu_fwnode: firmware handle for this device's IOMMU
980 * @flags: IOMMU_FWSPEC_* flags
981 * @num_ids: number of associated device IDs
982 * @ids: IDs which this device may present to the IOMMU
983 *
984 * Note that the IDs (and any other information, really) stored in this structure should be
985 * considered private to the IOMMU device driver and are not to be used directly by IOMMU
986 * consumers.
987 */
988 struct iommu_fwspec {
989 struct fwnode_handle *iommu_fwnode;
990 u32 flags;
991 unsigned int num_ids;
992 u32 ids[];
993 };
994
995 /* ATS is supported */
996 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
997
998 /*
999 * An iommu attach handle represents a relationship between an iommu domain
1000 * and a PASID or RID of a device. It is allocated and managed by the component
1001 * that manages the domain and is stored in the iommu group during the time the
1002 * domain is attached.
1003 */
1004 struct iommu_attach_handle {
1005 struct iommu_domain *domain;
1006 };
1007
1008 /**
1009 * struct iommu_sva - handle to a device-mm bond
1010 */
1011 struct iommu_sva {
1012 struct iommu_attach_handle handle;
1013 struct device *dev;
1014 refcount_t users;
1015 };
1016
1017 struct iommu_mm_data {
1018 u32 pasid;
1019 struct list_head sva_domains;
1020 };
1021
1022 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
1023 void iommu_fwspec_free(struct device *dev);
1024 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
1025
dev_iommu_fwspec_get(struct device * dev)1026 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1027 {
1028 if (dev->iommu)
1029 return dev->iommu->fwspec;
1030 else
1031 return NULL;
1032 }
1033
dev_iommu_fwspec_set(struct device * dev,struct iommu_fwspec * fwspec)1034 static inline void dev_iommu_fwspec_set(struct device *dev,
1035 struct iommu_fwspec *fwspec)
1036 {
1037 dev->iommu->fwspec = fwspec;
1038 }
1039
dev_iommu_priv_get(struct device * dev)1040 static inline void *dev_iommu_priv_get(struct device *dev)
1041 {
1042 if (dev->iommu)
1043 return dev->iommu->priv;
1044 else
1045 return NULL;
1046 }
1047
1048 void dev_iommu_priv_set(struct device *dev, void *priv);
1049
1050 extern struct mutex iommu_probe_device_lock;
1051 int iommu_probe_device(struct device *dev);
1052
1053 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
1054 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
1055
1056 int iommu_device_use_default_domain(struct device *dev);
1057 void iommu_device_unuse_default_domain(struct device *dev);
1058
1059 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
1060 void iommu_group_release_dma_owner(struct iommu_group *group);
1061 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
1062
1063 int iommu_device_claim_dma_owner(struct device *dev, void *owner);
1064 void iommu_device_release_dma_owner(struct device *dev);
1065
1066 int iommu_attach_device_pasid(struct iommu_domain *domain,
1067 struct device *dev, ioasid_t pasid,
1068 struct iommu_attach_handle *handle);
1069 void iommu_detach_device_pasid(struct iommu_domain *domain,
1070 struct device *dev, ioasid_t pasid);
1071 ioasid_t iommu_alloc_global_pasid(struct device *dev);
1072 void iommu_free_global_pasid(ioasid_t pasid);
1073 #else /* CONFIG_IOMMU_API */
1074
1075 struct iommu_ops {};
1076 struct iommu_group {};
1077 struct iommu_fwspec {};
1078 struct iommu_device {};
1079 struct iommu_fault_param {};
1080 struct iommu_iotlb_gather {};
1081 struct iommu_dirty_bitmap {};
1082 struct iommu_dirty_ops {};
1083
iommu_present(const struct bus_type * bus)1084 static inline bool iommu_present(const struct bus_type *bus)
1085 {
1086 return false;
1087 }
1088
device_iommu_capable(struct device * dev,enum iommu_cap cap)1089 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1090 {
1091 return false;
1092 }
1093
iommu_domain_alloc(const struct bus_type * bus)1094 static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
1095 {
1096 return NULL;
1097 }
1098
iommu_paging_domain_alloc(struct device * dev)1099 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
1100 {
1101 return ERR_PTR(-ENODEV);
1102 }
1103
iommu_domain_free(struct iommu_domain * domain)1104 static inline void iommu_domain_free(struct iommu_domain *domain)
1105 {
1106 }
1107
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1108 static inline int iommu_attach_device(struct iommu_domain *domain,
1109 struct device *dev)
1110 {
1111 return -ENODEV;
1112 }
1113
iommu_detach_device(struct iommu_domain * domain,struct device * dev)1114 static inline void iommu_detach_device(struct iommu_domain *domain,
1115 struct device *dev)
1116 {
1117 }
1118
iommu_get_domain_for_dev(struct device * dev)1119 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1120 {
1121 return NULL;
1122 }
1123
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1124 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
1125 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1126 {
1127 return -ENODEV;
1128 }
1129
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)1130 static inline size_t iommu_unmap(struct iommu_domain *domain,
1131 unsigned long iova, size_t size)
1132 {
1133 return 0;
1134 }
1135
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,int gfp_order,struct iommu_iotlb_gather * iotlb_gather)1136 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
1137 unsigned long iova, int gfp_order,
1138 struct iommu_iotlb_gather *iotlb_gather)
1139 {
1140 return 0;
1141 }
1142
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)1143 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
1144 unsigned long iova, struct scatterlist *sg,
1145 unsigned int nents, int prot, gfp_t gfp)
1146 {
1147 return -ENODEV;
1148 }
1149
iommu_flush_iotlb_all(struct iommu_domain * domain)1150 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
1151 {
1152 }
1153
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)1154 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
1155 struct iommu_iotlb_gather *iotlb_gather)
1156 {
1157 }
1158
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1159 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1160 {
1161 return 0;
1162 }
1163
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1164 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1165 iommu_fault_handler_t handler, void *token)
1166 {
1167 }
1168
iommu_get_resv_regions(struct device * dev,struct list_head * list)1169 static inline void iommu_get_resv_regions(struct device *dev,
1170 struct list_head *list)
1171 {
1172 }
1173
iommu_put_resv_regions(struct device * dev,struct list_head * list)1174 static inline void iommu_put_resv_regions(struct device *dev,
1175 struct list_head *list)
1176 {
1177 }
1178
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)1179 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1180 struct list_head *head)
1181 {
1182 return -ENODEV;
1183 }
1184
iommu_set_default_passthrough(bool cmd_line)1185 static inline void iommu_set_default_passthrough(bool cmd_line)
1186 {
1187 }
1188
iommu_set_default_translated(bool cmd_line)1189 static inline void iommu_set_default_translated(bool cmd_line)
1190 {
1191 }
1192
iommu_default_passthrough(void)1193 static inline bool iommu_default_passthrough(void)
1194 {
1195 return true;
1196 }
1197
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1198 static inline int iommu_attach_group(struct iommu_domain *domain,
1199 struct iommu_group *group)
1200 {
1201 return -ENODEV;
1202 }
1203
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1204 static inline void iommu_detach_group(struct iommu_domain *domain,
1205 struct iommu_group *group)
1206 {
1207 }
1208
iommu_group_alloc(void)1209 static inline struct iommu_group *iommu_group_alloc(void)
1210 {
1211 return ERR_PTR(-ENODEV);
1212 }
1213
iommu_group_get_iommudata(struct iommu_group * group)1214 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
1215 {
1216 return NULL;
1217 }
1218
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))1219 static inline void iommu_group_set_iommudata(struct iommu_group *group,
1220 void *iommu_data,
1221 void (*release)(void *iommu_data))
1222 {
1223 }
1224
iommu_group_set_name(struct iommu_group * group,const char * name)1225 static inline int iommu_group_set_name(struct iommu_group *group,
1226 const char *name)
1227 {
1228 return -ENODEV;
1229 }
1230
iommu_group_add_device(struct iommu_group * group,struct device * dev)1231 static inline int iommu_group_add_device(struct iommu_group *group,
1232 struct device *dev)
1233 {
1234 return -ENODEV;
1235 }
1236
iommu_group_remove_device(struct device * dev)1237 static inline void iommu_group_remove_device(struct device *dev)
1238 {
1239 }
1240
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))1241 static inline int iommu_group_for_each_dev(struct iommu_group *group,
1242 void *data,
1243 int (*fn)(struct device *, void *))
1244 {
1245 return -ENODEV;
1246 }
1247
iommu_group_get(struct device * dev)1248 static inline struct iommu_group *iommu_group_get(struct device *dev)
1249 {
1250 return NULL;
1251 }
1252
iommu_group_put(struct iommu_group * group)1253 static inline void iommu_group_put(struct iommu_group *group)
1254 {
1255 }
1256
iommu_group_id(struct iommu_group * group)1257 static inline int iommu_group_id(struct iommu_group *group)
1258 {
1259 return -ENODEV;
1260 }
1261
iommu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirks)1262 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
1263 unsigned long quirks)
1264 {
1265 return 0;
1266 }
1267
iommu_device_register(struct iommu_device * iommu,const struct iommu_ops * ops,struct device * hwdev)1268 static inline int iommu_device_register(struct iommu_device *iommu,
1269 const struct iommu_ops *ops,
1270 struct device *hwdev)
1271 {
1272 return -ENODEV;
1273 }
1274
dev_to_iommu_device(struct device * dev)1275 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1276 {
1277 return NULL;
1278 }
1279
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)1280 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
1281 {
1282 }
1283
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1284 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1285 struct iommu_iotlb_gather *gather,
1286 unsigned long iova, size_t size)
1287 {
1288 }
1289
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1290 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1291 {
1292 return false;
1293 }
1294
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1295 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1296 struct iova_bitmap *bitmap,
1297 struct iommu_iotlb_gather *gather)
1298 {
1299 }
1300
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1301 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1302 unsigned long iova,
1303 unsigned long length)
1304 {
1305 }
1306
iommu_device_unregister(struct iommu_device * iommu)1307 static inline void iommu_device_unregister(struct iommu_device *iommu)
1308 {
1309 }
1310
iommu_device_sysfs_add(struct iommu_device * iommu,struct device * parent,const struct attribute_group ** groups,const char * fmt,...)1311 static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
1312 struct device *parent,
1313 const struct attribute_group **groups,
1314 const char *fmt, ...)
1315 {
1316 return -ENODEV;
1317 }
1318
iommu_device_sysfs_remove(struct iommu_device * iommu)1319 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
1320 {
1321 }
1322
iommu_device_link(struct device * dev,struct device * link)1323 static inline int iommu_device_link(struct device *dev, struct device *link)
1324 {
1325 return -EINVAL;
1326 }
1327
iommu_device_unlink(struct device * dev,struct device * link)1328 static inline void iommu_device_unlink(struct device *dev, struct device *link)
1329 {
1330 }
1331
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode)1332 static inline int iommu_fwspec_init(struct device *dev,
1333 struct fwnode_handle *iommu_fwnode)
1334 {
1335 return -ENODEV;
1336 }
1337
iommu_fwspec_free(struct device * dev)1338 static inline void iommu_fwspec_free(struct device *dev)
1339 {
1340 }
1341
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)1342 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1343 int num_ids)
1344 {
1345 return -ENODEV;
1346 }
1347
1348 static inline int
iommu_dev_enable_feature(struct device * dev,enum iommu_dev_features feat)1349 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1350 {
1351 return -ENODEV;
1352 }
1353
1354 static inline int
iommu_dev_disable_feature(struct device * dev,enum iommu_dev_features feat)1355 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1356 {
1357 return -ENODEV;
1358 }
1359
dev_iommu_fwspec_get(struct device * dev)1360 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1361 {
1362 return NULL;
1363 }
1364
iommu_device_use_default_domain(struct device * dev)1365 static inline int iommu_device_use_default_domain(struct device *dev)
1366 {
1367 return 0;
1368 }
1369
iommu_device_unuse_default_domain(struct device * dev)1370 static inline void iommu_device_unuse_default_domain(struct device *dev)
1371 {
1372 }
1373
1374 static inline int
iommu_group_claim_dma_owner(struct iommu_group * group,void * owner)1375 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1376 {
1377 return -ENODEV;
1378 }
1379
iommu_group_release_dma_owner(struct iommu_group * group)1380 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1381 {
1382 }
1383
iommu_group_dma_owner_claimed(struct iommu_group * group)1384 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1385 {
1386 return false;
1387 }
1388
iommu_device_release_dma_owner(struct device * dev)1389 static inline void iommu_device_release_dma_owner(struct device *dev)
1390 {
1391 }
1392
iommu_device_claim_dma_owner(struct device * dev,void * owner)1393 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1394 {
1395 return -ENODEV;
1396 }
1397
iommu_attach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_attach_handle * handle)1398 static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1399 struct device *dev, ioasid_t pasid,
1400 struct iommu_attach_handle *handle)
1401 {
1402 return -ENODEV;
1403 }
1404
iommu_detach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)1405 static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1406 struct device *dev, ioasid_t pasid)
1407 {
1408 }
1409
iommu_alloc_global_pasid(struct device * dev)1410 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
1411 {
1412 return IOMMU_PASID_INVALID;
1413 }
1414
iommu_free_global_pasid(ioasid_t pasid)1415 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
1416 #endif /* CONFIG_IOMMU_API */
1417
1418 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
1419 void iommu_group_mutex_assert(struct device *dev);
1420 #else
iommu_group_mutex_assert(struct device * dev)1421 static inline void iommu_group_mutex_assert(struct device *dev)
1422 {
1423 }
1424 #endif
1425
1426 /**
1427 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1428 * @domain: The IOMMU domain to perform the mapping
1429 * @iova: The start address to map the buffer
1430 * @sgt: The sg_table object describing the buffer
1431 * @prot: IOMMU protection bits
1432 *
1433 * Creates a mapping at @iova for the buffer described by a scatterlist
1434 * stored in the given sg_table object in the provided IOMMU domain.
1435 */
iommu_map_sgtable(struct iommu_domain * domain,unsigned long iova,struct sg_table * sgt,int prot)1436 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
1437 unsigned long iova, struct sg_table *sgt, int prot)
1438 {
1439 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1440 GFP_KERNEL);
1441 }
1442
1443 #ifdef CONFIG_IOMMU_DEBUGFS
1444 extern struct dentry *iommu_debugfs_dir;
1445 void iommu_debugfs_setup(void);
1446 #else
iommu_debugfs_setup(void)1447 static inline void iommu_debugfs_setup(void) {}
1448 #endif
1449
1450 #ifdef CONFIG_IOMMU_DMA
1451 #include <linux/msi.h>
1452
1453 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1454
1455 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1456 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
1457
1458 #else /* CONFIG_IOMMU_DMA */
1459
1460 struct msi_desc;
1461 struct msi_msg;
1462
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)1463 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1464 {
1465 return -ENODEV;
1466 }
1467
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1468 static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1469 {
1470 return 0;
1471 }
1472
iommu_dma_compose_msi_msg(struct msi_desc * desc,struct msi_msg * msg)1473 static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1474 {
1475 }
1476
1477 #endif /* CONFIG_IOMMU_DMA */
1478
1479 /*
1480 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1481 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1482 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1483 */
1484 #define TEGRA_STREAM_ID_BYPASS 0x7f
1485
tegra_dev_iommu_get_stream_id(struct device * dev,u32 * stream_id)1486 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1487 {
1488 #ifdef CONFIG_IOMMU_API
1489 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1490
1491 if (fwspec && fwspec->num_ids == 1) {
1492 *stream_id = fwspec->ids[0] & 0xffff;
1493 return true;
1494 }
1495 #endif
1496
1497 return false;
1498 }
1499
1500 #ifdef CONFIG_IOMMU_MM_DATA
mm_pasid_init(struct mm_struct * mm)1501 static inline void mm_pasid_init(struct mm_struct *mm)
1502 {
1503 /*
1504 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
1505 * the new mm and the old one point to a same iommu_mm instance. When either
1506 * one of the two mms gets released, the iommu_mm instance is freed, leaving
1507 * the other mm running into a use-after-free/double-free problem. To avoid
1508 * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
1509 */
1510 mm->iommu_mm = NULL;
1511 }
1512
mm_valid_pasid(struct mm_struct * mm)1513 static inline bool mm_valid_pasid(struct mm_struct *mm)
1514 {
1515 return READ_ONCE(mm->iommu_mm);
1516 }
1517
mm_get_enqcmd_pasid(struct mm_struct * mm)1518 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1519 {
1520 struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
1521
1522 if (!iommu_mm)
1523 return IOMMU_PASID_INVALID;
1524 return iommu_mm->pasid;
1525 }
1526
1527 void mm_pasid_drop(struct mm_struct *mm);
1528 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1529 struct mm_struct *mm);
1530 void iommu_sva_unbind_device(struct iommu_sva *handle);
1531 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1532 #else
1533 static inline struct iommu_sva *
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)1534 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1535 {
1536 return ERR_PTR(-ENODEV);
1537 }
1538
iommu_sva_unbind_device(struct iommu_sva * handle)1539 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1540 {
1541 }
1542
iommu_sva_get_pasid(struct iommu_sva * handle)1543 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1544 {
1545 return IOMMU_PASID_INVALID;
1546 }
mm_pasid_init(struct mm_struct * mm)1547 static inline void mm_pasid_init(struct mm_struct *mm) {}
mm_valid_pasid(struct mm_struct * mm)1548 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1549
mm_get_enqcmd_pasid(struct mm_struct * mm)1550 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1551 {
1552 return IOMMU_PASID_INVALID;
1553 }
1554
mm_pasid_drop(struct mm_struct * mm)1555 static inline void mm_pasid_drop(struct mm_struct *mm) {}
1556 #endif /* CONFIG_IOMMU_SVA */
1557
1558 #ifdef CONFIG_IOMMU_IOPF
1559 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
1560 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
1561 int iopf_queue_flush_dev(struct device *dev);
1562 struct iopf_queue *iopf_queue_alloc(const char *name);
1563 void iopf_queue_free(struct iopf_queue *queue);
1564 int iopf_queue_discard_partial(struct iopf_queue *queue);
1565 void iopf_free_group(struct iopf_group *group);
1566 int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
1567 void iopf_group_response(struct iopf_group *group,
1568 enum iommu_page_response_code status);
1569 #else
1570 static inline int
iopf_queue_add_device(struct iopf_queue * queue,struct device * dev)1571 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
1572 {
1573 return -ENODEV;
1574 }
1575
1576 static inline void
iopf_queue_remove_device(struct iopf_queue * queue,struct device * dev)1577 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
1578 {
1579 }
1580
iopf_queue_flush_dev(struct device * dev)1581 static inline int iopf_queue_flush_dev(struct device *dev)
1582 {
1583 return -ENODEV;
1584 }
1585
iopf_queue_alloc(const char * name)1586 static inline struct iopf_queue *iopf_queue_alloc(const char *name)
1587 {
1588 return NULL;
1589 }
1590
iopf_queue_free(struct iopf_queue * queue)1591 static inline void iopf_queue_free(struct iopf_queue *queue)
1592 {
1593 }
1594
iopf_queue_discard_partial(struct iopf_queue * queue)1595 static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
1596 {
1597 return -ENODEV;
1598 }
1599
iopf_free_group(struct iopf_group * group)1600 static inline void iopf_free_group(struct iopf_group *group)
1601 {
1602 }
1603
1604 static inline int
iommu_report_device_fault(struct device * dev,struct iopf_fault * evt)1605 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
1606 {
1607 return -ENODEV;
1608 }
1609
iopf_group_response(struct iopf_group * group,enum iommu_page_response_code status)1610 static inline void iopf_group_response(struct iopf_group *group,
1611 enum iommu_page_response_code status)
1612 {
1613 }
1614 #endif /* CONFIG_IOMMU_IOPF */
1615 #endif /* __LINUX_IOMMU_H */
1616