1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 */ 6 7 #ifndef __LINUX_IOMMU_H 8 #define __LINUX_IOMMU_H 9 10 #include <linux/scatterlist.h> 11 #include <linux/device.h> 12 #include <linux/types.h> 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/of.h> 16 #include <linux/iova_bitmap.h> 17 #include <uapi/linux/iommufd.h> 18 19 #define IOMMU_READ (1 << 0) 20 #define IOMMU_WRITE (1 << 1) 21 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 22 #define IOMMU_NOEXEC (1 << 3) 23 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 24 /* 25 * Where the bus hardware includes a privilege level as part of its access type 26 * markings, and certain devices are capable of issuing transactions marked as 27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 28 * given permission flags only apply to accesses at the higher privilege level, 29 * and that unprivileged transactions should have as little access as possible. 30 * This would usually imply the same permissions as kernel mappings on the CPU, 31 * if the IOMMU page table format is equivalent. 32 */ 33 #define IOMMU_PRIV (1 << 5) 34 35 struct iommu_ops; 36 struct iommu_group; 37 struct bus_type; 38 struct device; 39 struct iommu_domain; 40 struct iommu_domain_ops; 41 struct iommu_dirty_ops; 42 struct notifier_block; 43 struct iommu_sva; 44 struct iommu_dma_cookie; 45 struct iommu_dma_msi_cookie; 46 struct iommu_fault_param; 47 struct iommufd_ctx; 48 struct iommufd_viommu; 49 struct msi_desc; 50 struct msi_msg; 51 52 #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ 53 #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ 54 #define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */ 55 #define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */ 56 57 /* Generic fault types, can be expanded IRQ remapping fault */ 58 enum iommu_fault_type { 59 IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */ 60 }; 61 62 /** 63 * struct iommu_fault_page_request - Page Request data 64 * @flags: encodes whether the corresponding fields are valid and whether this 65 * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values). 66 * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response 67 * must have the same PASID value as the page request. When it is clear, 68 * the page response should not have a PASID. 69 * @pasid: Process Address Space ID 70 * @grpid: Page Request Group Index 71 * @perm: requested page permissions (IOMMU_FAULT_PERM_* values) 72 * @addr: page address 73 * @private_data: device-specific private information 74 */ 75 struct iommu_fault_page_request { 76 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) 77 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) 78 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2) 79 u32 flags; 80 u32 pasid; 81 u32 grpid; 82 u32 perm; 83 u64 addr; 84 u64 private_data[2]; 85 }; 86 87 /** 88 * struct iommu_fault - Generic fault data 89 * @type: fault type from &enum iommu_fault_type 90 * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ 91 */ 92 struct iommu_fault { 93 u32 type; 94 struct iommu_fault_page_request prm; 95 }; 96 97 /** 98 * enum iommu_page_response_code - Return status of fault handlers 99 * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables 100 * populated, retry the access. This is "Success" in PCI PRI. 101 * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from 102 * this device if possible. This is "Response Failure" in PCI PRI. 103 * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the 104 * access. This is "Invalid Request" in PCI PRI. 105 */ 106 enum iommu_page_response_code { 107 IOMMU_PAGE_RESP_SUCCESS = 0, 108 IOMMU_PAGE_RESP_INVALID, 109 IOMMU_PAGE_RESP_FAILURE, 110 }; 111 112 /** 113 * struct iommu_page_response - Generic page response information 114 * @pasid: Process Address Space ID 115 * @grpid: Page Request Group Index 116 * @code: response code from &enum iommu_page_response_code 117 */ 118 struct iommu_page_response { 119 u32 pasid; 120 u32 grpid; 121 u32 code; 122 }; 123 124 struct iopf_fault { 125 struct iommu_fault fault; 126 /* node for pending lists */ 127 struct list_head list; 128 }; 129 130 struct iopf_group { 131 struct iopf_fault last_fault; 132 struct list_head faults; 133 size_t fault_count; 134 /* list node for iommu_fault_param::faults */ 135 struct list_head pending_node; 136 struct work_struct work; 137 struct iommu_attach_handle *attach_handle; 138 /* The device's fault data parameter. */ 139 struct iommu_fault_param *fault_param; 140 /* Used by handler provider to hook the group on its own lists. */ 141 struct list_head node; 142 u32 cookie; 143 }; 144 145 /** 146 * struct iopf_queue - IO Page Fault queue 147 * @wq: the fault workqueue 148 * @devices: devices attached to this queue 149 * @lock: protects the device list 150 */ 151 struct iopf_queue { 152 struct workqueue_struct *wq; 153 struct list_head devices; 154 struct mutex lock; 155 }; 156 157 /* iommu fault flags */ 158 #define IOMMU_FAULT_READ 0x0 159 #define IOMMU_FAULT_WRITE 0x1 160 161 typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 162 struct device *, unsigned long, int, void *); 163 164 struct iommu_domain_geometry { 165 dma_addr_t aperture_start; /* First address that can be mapped */ 166 dma_addr_t aperture_end; /* Last address that can be mapped */ 167 bool force_aperture; /* DMA only allowed in mappable range? */ 168 }; 169 170 enum iommu_domain_cookie_type { 171 IOMMU_COOKIE_NONE, 172 IOMMU_COOKIE_DMA_IOVA, 173 IOMMU_COOKIE_DMA_MSI, 174 IOMMU_COOKIE_FAULT_HANDLER, 175 IOMMU_COOKIE_SVA, 176 IOMMU_COOKIE_IOMMUFD, 177 }; 178 179 /* Domain feature flags */ 180 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 181 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 182 implementation */ 183 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 184 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ 185 186 #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ 187 #define __IOMMU_DOMAIN_PLATFORM (1U << 5) 188 189 #define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested 190 on a stage-2 translation */ 191 192 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ 193 /* 194 * This are the possible domain-types 195 * 196 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 197 * devices 198 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 199 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 200 * for VMs 201 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 202 * This flag allows IOMMU drivers to implement 203 * certain optimizations for these domains 204 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB 205 * invalidation. 206 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses 207 * represented by mm_struct's. 208 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own 209 * dma_api stuff. Do not use in new drivers. 210 */ 211 #define IOMMU_DOMAIN_BLOCKED (0U) 212 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 213 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 214 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 215 __IOMMU_DOMAIN_DMA_API) 216 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ 217 __IOMMU_DOMAIN_DMA_API | \ 218 __IOMMU_DOMAIN_DMA_FQ) 219 #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) 220 #define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM) 221 #define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED) 222 223 struct iommu_domain { 224 unsigned type; 225 enum iommu_domain_cookie_type cookie_type; 226 const struct iommu_domain_ops *ops; 227 const struct iommu_dirty_ops *dirty_ops; 228 const struct iommu_ops *owner; /* Whose domain_alloc we came from */ 229 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 230 struct iommu_domain_geometry geometry; 231 int (*iopf_handler)(struct iopf_group *group); 232 233 union { /* cookie */ 234 struct iommu_dma_cookie *iova_cookie; 235 struct iommu_dma_msi_cookie *msi_cookie; 236 struct iommufd_hw_pagetable *iommufd_hwpt; 237 struct { 238 iommu_fault_handler_t handler; 239 void *handler_token; 240 }; 241 struct { /* IOMMU_DOMAIN_SVA */ 242 struct mm_struct *mm; 243 int users; 244 /* 245 * Next iommu_domain in mm->iommu_mm->sva-domains list 246 * protected by iommu_sva_lock. 247 */ 248 struct list_head next; 249 }; 250 }; 251 }; 252 253 static inline bool iommu_is_dma_domain(struct iommu_domain *domain) 254 { 255 return domain->type & __IOMMU_DOMAIN_DMA_API; 256 } 257 258 enum iommu_cap { 259 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */ 260 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 261 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for 262 DMA protection and we should too */ 263 /* 264 * Per-device flag indicating if enforce_cache_coherency() will work on 265 * this device. 266 */ 267 IOMMU_CAP_ENFORCE_CACHE_COHERENCY, 268 /* 269 * IOMMU driver does not issue TLB maintenance during .unmap, so can 270 * usefully support the non-strict DMA flush queue. 271 */ 272 IOMMU_CAP_DEFERRED_FLUSH, 273 IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ 274 }; 275 276 /* These are the possible reserved region types */ 277 enum iommu_resv_type { 278 /* Memory regions which must be mapped 1:1 at all times */ 279 IOMMU_RESV_DIRECT, 280 /* 281 * Memory regions which are advertised to be 1:1 but are 282 * commonly considered relaxable in some conditions, 283 * for instance in device assignment use case (USB, Graphics) 284 */ 285 IOMMU_RESV_DIRECT_RELAXABLE, 286 /* Arbitrary "never map this or give it to a device" address ranges */ 287 IOMMU_RESV_RESERVED, 288 /* Hardware MSI region (untranslated) */ 289 IOMMU_RESV_MSI, 290 /* Software-managed MSI translation window */ 291 IOMMU_RESV_SW_MSI, 292 }; 293 294 /** 295 * struct iommu_resv_region - descriptor for a reserved memory region 296 * @list: Linked list pointers 297 * @start: System physical start address of the region 298 * @length: Length of the region in bytes 299 * @prot: IOMMU Protection flags (READ/WRITE/...) 300 * @type: Type of the reserved region 301 * @free: Callback to free associated memory allocations 302 */ 303 struct iommu_resv_region { 304 struct list_head list; 305 phys_addr_t start; 306 size_t length; 307 int prot; 308 enum iommu_resv_type type; 309 void (*free)(struct device *dev, struct iommu_resv_region *region); 310 }; 311 312 struct iommu_iort_rmr_data { 313 struct iommu_resv_region rr; 314 315 /* Stream IDs associated with IORT RMR entry */ 316 const u32 *sids; 317 u32 num_sids; 318 }; 319 320 #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */ 321 #define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */ 322 #define IOMMU_PASID_INVALID (-1U) 323 typedef unsigned int ioasid_t; 324 325 /* Read but do not clear any dirty bits */ 326 #define IOMMU_DIRTY_NO_CLEAR (1 << 0) 327 328 /* 329 * Pages allocated through iommu_alloc_pages_node_sz() can be placed on this 330 * list using iommu_pages_list_add(). Note: ONLY pages from 331 * iommu_alloc_pages_node_sz() can be used this way! 332 */ 333 struct iommu_pages_list { 334 struct list_head pages; 335 }; 336 337 #define IOMMU_PAGES_LIST_INIT(name) \ 338 ((struct iommu_pages_list){ .pages = LIST_HEAD_INIT(name.pages) }) 339 340 #ifdef CONFIG_IOMMU_API 341 342 /** 343 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 344 * 345 * @start: IOVA representing the start of the range to be flushed 346 * @end: IOVA representing the end of the range to be flushed (inclusive) 347 * @pgsize: The interval at which to perform the flush 348 * @freelist: Removed pages to free after sync 349 * @queued: Indicates that the flush will be queued 350 * 351 * This structure is intended to be updated by multiple calls to the 352 * ->unmap() function in struct iommu_ops before eventually being passed 353 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after 354 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to 355 * them. @queued is set to indicate when ->iotlb_flush_all() will be called 356 * later instead of ->iotlb_sync(), so drivers may optimise accordingly. 357 */ 358 struct iommu_iotlb_gather { 359 unsigned long start; 360 unsigned long end; 361 size_t pgsize; 362 struct iommu_pages_list freelist; 363 bool queued; 364 }; 365 366 /** 367 * struct iommu_dirty_bitmap - Dirty IOVA bitmap state 368 * @bitmap: IOVA bitmap 369 * @gather: Range information for a pending IOTLB flush 370 */ 371 struct iommu_dirty_bitmap { 372 struct iova_bitmap *bitmap; 373 struct iommu_iotlb_gather *gather; 374 }; 375 376 /** 377 * struct iommu_dirty_ops - domain specific dirty tracking operations 378 * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain 379 * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled 380 * into a bitmap, with a bit represented as a page. 381 * Reads the dirty PTE bits and clears it from IO 382 * pagetables. 383 */ 384 struct iommu_dirty_ops { 385 int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); 386 int (*read_and_clear_dirty)(struct iommu_domain *domain, 387 unsigned long iova, size_t size, 388 unsigned long flags, 389 struct iommu_dirty_bitmap *dirty); 390 }; 391 392 /** 393 * struct iommu_user_data - iommu driver specific user space data info 394 * @type: The data type of the user buffer 395 * @uptr: Pointer to the user buffer for copy_from_user() 396 * @len: The length of the user buffer in bytes 397 * 398 * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h 399 * @type, @uptr and @len should be just copied from an iommufd core uAPI struct. 400 */ 401 struct iommu_user_data { 402 unsigned int type; 403 void __user *uptr; 404 size_t len; 405 }; 406 407 /** 408 * struct iommu_user_data_array - iommu driver specific user space data array 409 * @type: The data type of all the entries in the user buffer array 410 * @uptr: Pointer to the user buffer array 411 * @entry_len: The fixed-width length of an entry in the array, in bytes 412 * @entry_num: The number of total entries in the array 413 * 414 * The user buffer includes an array of requests with format defined in 415 * include/uapi/linux/iommufd.h 416 */ 417 struct iommu_user_data_array { 418 unsigned int type; 419 void __user *uptr; 420 size_t entry_len; 421 u32 entry_num; 422 }; 423 424 /** 425 * __iommu_copy_struct_from_user - Copy iommu driver specific user space data 426 * @dst_data: Pointer to an iommu driver specific user data that is defined in 427 * include/uapi/linux/iommufd.h 428 * @src_data: Pointer to a struct iommu_user_data for user space data info 429 * @data_type: The data type of the @dst_data. Must match with @src_data.type 430 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) 431 * @min_len: Initial length of user data structure for backward compatibility. 432 * This should be offsetofend using the last member in the user data 433 * struct that was initially added to include/uapi/linux/iommufd.h 434 */ 435 static inline int __iommu_copy_struct_from_user( 436 void *dst_data, const struct iommu_user_data *src_data, 437 unsigned int data_type, size_t data_len, size_t min_len) 438 { 439 if (WARN_ON(!dst_data || !src_data)) 440 return -EINVAL; 441 if (src_data->type != data_type) 442 return -EINVAL; 443 if (src_data->len < min_len || data_len < src_data->len) 444 return -EINVAL; 445 return copy_struct_from_user(dst_data, data_len, src_data->uptr, 446 src_data->len); 447 } 448 449 /** 450 * iommu_copy_struct_from_user - Copy iommu driver specific user space data 451 * @kdst: Pointer to an iommu driver specific user data that is defined in 452 * include/uapi/linux/iommufd.h 453 * @user_data: Pointer to a struct iommu_user_data for user space data info 454 * @data_type: The data type of the @kdst. Must match with @user_data->type 455 * @min_last: The last member of the data structure @kdst points in the initial 456 * version. 457 * Return 0 for success, otherwise -error. 458 */ 459 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ 460 __iommu_copy_struct_from_user(kdst, user_data, data_type, \ 461 sizeof(*kdst), \ 462 offsetofend(typeof(*kdst), min_last)) 463 464 /** 465 * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space 466 * data from an iommu_user_data_array 467 * @dst_data: Pointer to an iommu driver specific user data that is defined in 468 * include/uapi/linux/iommufd.h 469 * @src_array: Pointer to a struct iommu_user_data_array for a user space array 470 * @data_type: The data type of the @dst_data. Must match with @src_array.type 471 * @index: Index to the location in the array to copy user data from 472 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) 473 * @min_len: Initial length of user data structure for backward compatibility. 474 * This should be offsetofend using the last member in the user data 475 * struct that was initially added to include/uapi/linux/iommufd.h 476 */ 477 static inline int __iommu_copy_struct_from_user_array( 478 void *dst_data, const struct iommu_user_data_array *src_array, 479 unsigned int data_type, unsigned int index, size_t data_len, 480 size_t min_len) 481 { 482 struct iommu_user_data src_data; 483 484 if (WARN_ON(!src_array || index >= src_array->entry_num)) 485 return -EINVAL; 486 if (!src_array->entry_num) 487 return -EINVAL; 488 src_data.uptr = src_array->uptr + src_array->entry_len * index; 489 src_data.len = src_array->entry_len; 490 src_data.type = src_array->type; 491 492 return __iommu_copy_struct_from_user(dst_data, &src_data, data_type, 493 data_len, min_len); 494 } 495 496 /** 497 * iommu_copy_struct_from_user_array - Copy iommu driver specific user space 498 * data from an iommu_user_data_array 499 * @kdst: Pointer to an iommu driver specific user data that is defined in 500 * include/uapi/linux/iommufd.h 501 * @user_array: Pointer to a struct iommu_user_data_array for a user space 502 * array 503 * @data_type: The data type of the @kdst. Must match with @user_array->type 504 * @index: Index to the location in the array to copy user data from 505 * @min_last: The last member of the data structure @kdst points in the 506 * initial version. 507 * 508 * Copy a single entry from a user array. Return 0 for success, otherwise 509 * -error. 510 */ 511 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \ 512 min_last) \ 513 __iommu_copy_struct_from_user_array( \ 514 kdst, user_array, data_type, index, sizeof(*(kdst)), \ 515 offsetofend(typeof(*(kdst)), min_last)) 516 517 /** 518 * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user 519 * space data from an iommu_user_data_array 520 * @kdst: Pointer to an iommu driver specific user data that is defined in 521 * include/uapi/linux/iommufd.h 522 * @kdst_entry_size: sizeof(*kdst) 523 * @user_array: Pointer to a struct iommu_user_data_array for a user space 524 * array 525 * @data_type: The data type of the @kdst. Must match with @user_array->type 526 * 527 * Copy the entire user array. kdst must have room for kdst_entry_size * 528 * user_array->entry_num bytes. Return 0 for success, otherwise -error. 529 */ 530 static inline int 531 iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size, 532 struct iommu_user_data_array *user_array, 533 unsigned int data_type) 534 { 535 unsigned int i; 536 int ret; 537 538 if (user_array->type != data_type) 539 return -EINVAL; 540 if (!user_array->entry_num) 541 return -EINVAL; 542 if (likely(user_array->entry_len == kdst_entry_size)) { 543 if (copy_from_user(kdst, user_array->uptr, 544 user_array->entry_num * 545 user_array->entry_len)) 546 return -EFAULT; 547 } 548 549 /* Copy item by item */ 550 for (i = 0; i != user_array->entry_num; i++) { 551 ret = copy_struct_from_user( 552 kdst + kdst_entry_size * i, kdst_entry_size, 553 user_array->uptr + user_array->entry_len * i, 554 user_array->entry_len); 555 if (ret) 556 return ret; 557 } 558 return 0; 559 } 560 561 /** 562 * __iommu_copy_struct_to_user - Report iommu driver specific user space data 563 * @dst_data: Pointer to a struct iommu_user_data for user space data location 564 * @src_data: Pointer to an iommu driver specific user data that is defined in 565 * include/uapi/linux/iommufd.h 566 * @data_type: The data type of the @src_data. Must match with @dst_data.type 567 * @data_len: Length of current user data structure, i.e. sizeof(struct _src) 568 * @min_len: Initial length of user data structure for backward compatibility. 569 * This should be offsetofend using the last member in the user data 570 * struct that was initially added to include/uapi/linux/iommufd.h 571 */ 572 static inline int 573 __iommu_copy_struct_to_user(const struct iommu_user_data *dst_data, 574 void *src_data, unsigned int data_type, 575 size_t data_len, size_t min_len) 576 { 577 if (WARN_ON(!dst_data || !src_data)) 578 return -EINVAL; 579 if (dst_data->type != data_type) 580 return -EINVAL; 581 if (dst_data->len < min_len || data_len < dst_data->len) 582 return -EINVAL; 583 return copy_struct_to_user(dst_data->uptr, dst_data->len, src_data, 584 data_len, NULL); 585 } 586 587 /** 588 * iommu_copy_struct_to_user - Report iommu driver specific user space data 589 * @user_data: Pointer to a struct iommu_user_data for user space data location 590 * @ksrc: Pointer to an iommu driver specific user data that is defined in 591 * include/uapi/linux/iommufd.h 592 * @data_type: The data type of the @ksrc. Must match with @user_data->type 593 * @min_last: The last member of the data structure @ksrc points in the initial 594 * version. 595 * Return 0 for success, otherwise -error. 596 */ 597 #define iommu_copy_struct_to_user(user_data, ksrc, data_type, min_last) \ 598 __iommu_copy_struct_to_user(user_data, ksrc, data_type, sizeof(*ksrc), \ 599 offsetofend(typeof(*ksrc), min_last)) 600 601 /** 602 * struct iommu_ops - iommu ops and capabilities 603 * @capable: check capability 604 * @hw_info: report iommu hardware information. The data buffer returned by this 605 * op is allocated in the iommu driver and freed by the caller after 606 * use. @type can input a requested type and output a supported type. 607 * Driver should reject an unsupported data @type input 608 * @domain_alloc: Do not use in new drivers 609 * @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to 610 * use identity_domain instead. This should only be used 611 * if dynamic logic is necessary. 612 * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the 613 * input parameters as defined in 614 * include/uapi/linux/iommufd.h. The @user_data can be 615 * optionally provided, the new domain must support 616 * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be 617 * returned. 618 * @domain_alloc_paging: Allocate an iommu_domain that can be used for 619 * UNMANAGED, DMA, and DMA_FQ domain types. This is the 620 * same as invoking domain_alloc_paging_flags() with 621 * @flags=0, @user_data=NULL. A driver should implement 622 * only one of the two ops. 623 * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. 624 * @domain_alloc_nested: Allocate an iommu_domain for nested translation. 625 * @probe_device: Add device to iommu driver handling 626 * @release_device: Remove device from iommu driver handling 627 * @probe_finalize: Do final setup work after the device is added to an IOMMU 628 * group and attached to the groups domain 629 * @device_group: find iommu group for a particular device 630 * @get_resv_regions: Request list of reserved regions for a device 631 * @of_xlate: add OF master IDs to iommu grouping 632 * @is_attach_deferred: Check if domain attach should be deferred from iommu 633 * driver init to device driver init (default no) 634 * @page_response: handle page request response 635 * @def_domain_type: device default domain type, return value: 636 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 637 * - IOMMU_DOMAIN_DMA: must use a dma domain 638 * - 0: use the default setting 639 * @default_domain_ops: the default ops for domains 640 * @get_viommu_size: Get the size of a driver-level vIOMMU structure for a given 641 * @dev corresponding to @viommu_type. Driver should return 0 642 * if vIOMMU isn't supported accordingly. It is required for 643 * driver to use the VIOMMU_STRUCT_SIZE macro to sanitize the 644 * driver-level vIOMMU structure related to the core one 645 * @viommu_init: Init the driver-level struct of an iommufd_viommu on a physical 646 * IOMMU instance @viommu->iommu_dev, as the set of virtualization 647 * resources shared/passed to user space IOMMU instance. Associate 648 * it with a nesting @parent_domain. It is required for driver to 649 * set @viommu->ops pointing to its own viommu_ops 650 * @owner: Driver module providing these ops 651 * @identity_domain: An always available, always attachable identity 652 * translation. 653 * @blocked_domain: An always available, always attachable blocking 654 * translation. 655 * @default_domain: If not NULL this will always be set as the default domain. 656 * This should be an IDENTITY/BLOCKED/PLATFORM domain. 657 * Do not use in new drivers. 658 * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is 659 * no user domain for each PASID and the I/O page faults are 660 * forwarded through the user domain attached to the device 661 * RID. 662 */ 663 struct iommu_ops { 664 bool (*capable)(struct device *dev, enum iommu_cap); 665 void *(*hw_info)(struct device *dev, u32 *length, 666 enum iommu_hw_info_type *type); 667 668 /* Domain allocation and freeing by the iommu driver */ 669 #if IS_ENABLED(CONFIG_FSL_PAMU) 670 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 671 #endif 672 struct iommu_domain *(*domain_alloc_identity)(struct device *dev); 673 struct iommu_domain *(*domain_alloc_paging_flags)( 674 struct device *dev, u32 flags, 675 const struct iommu_user_data *user_data); 676 struct iommu_domain *(*domain_alloc_paging)(struct device *dev); 677 struct iommu_domain *(*domain_alloc_sva)(struct device *dev, 678 struct mm_struct *mm); 679 struct iommu_domain *(*domain_alloc_nested)( 680 struct device *dev, struct iommu_domain *parent, u32 flags, 681 const struct iommu_user_data *user_data); 682 683 struct iommu_device *(*probe_device)(struct device *dev); 684 void (*release_device)(struct device *dev); 685 void (*probe_finalize)(struct device *dev); 686 struct iommu_group *(*device_group)(struct device *dev); 687 688 /* Request/Free a list of reserved regions for a device */ 689 void (*get_resv_regions)(struct device *dev, struct list_head *list); 690 691 int (*of_xlate)(struct device *dev, const struct of_phandle_args *args); 692 bool (*is_attach_deferred)(struct device *dev); 693 694 /* Per device IOMMU features */ 695 void (*page_response)(struct device *dev, struct iopf_fault *evt, 696 struct iommu_page_response *msg); 697 698 int (*def_domain_type)(struct device *dev); 699 700 size_t (*get_viommu_size)(struct device *dev, 701 enum iommu_viommu_type viommu_type); 702 int (*viommu_init)(struct iommufd_viommu *viommu, 703 struct iommu_domain *parent_domain, 704 const struct iommu_user_data *user_data); 705 706 const struct iommu_domain_ops *default_domain_ops; 707 struct module *owner; 708 struct iommu_domain *identity_domain; 709 struct iommu_domain *blocked_domain; 710 struct iommu_domain *release_domain; 711 struct iommu_domain *default_domain; 712 u8 user_pasid_table:1; 713 }; 714 715 /** 716 * struct iommu_domain_ops - domain specific operations 717 * @attach_dev: attach an iommu domain to a device 718 * Return: 719 * * 0 - success 720 * * EINVAL - can indicate that device and domain are incompatible due to 721 * some previous configuration of the domain, in which case the 722 * driver shouldn't log an error, since it is legitimate for a 723 * caller to test reuse of existing domains. Otherwise, it may 724 * still represent some other fundamental problem 725 * * ENOMEM - out of memory 726 * * ENOSPC - non-ENOMEM type of resource allocation failures 727 * * EBUSY - device is attached to a domain and cannot be changed 728 * * ENODEV - device specific errors, not able to be attached 729 * * <others> - treated as ENODEV by the caller. Use is discouraged 730 * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of 731 * the device should be left in the old config in error case. 732 * @map_pages: map a physically contiguous set of pages of the same size to 733 * an iommu domain. 734 * @unmap_pages: unmap a number of pages of the same size from an iommu domain 735 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 736 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 737 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 738 * queue 739 * @cache_invalidate_user: Flush hardware cache for user space IO page table. 740 * The @domain must be IOMMU_DOMAIN_NESTED. The @array 741 * passes in the cache invalidation requests, in form 742 * of a driver data structure. The driver must update 743 * array->entry_num to report the number of handled 744 * invalidation requests. The driver data structure 745 * must be defined in include/uapi/linux/iommufd.h 746 * @iova_to_phys: translate iova to physical address 747 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, 748 * including no-snoop TLPs on PCIe or other platform 749 * specific mechanisms. 750 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) 751 * @free: Release the domain after use. 752 */ 753 struct iommu_domain_ops { 754 int (*attach_dev)(struct iommu_domain *domain, struct device *dev, 755 struct iommu_domain *old); 756 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, 757 ioasid_t pasid, struct iommu_domain *old); 758 759 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, 760 phys_addr_t paddr, size_t pgsize, size_t pgcount, 761 int prot, gfp_t gfp, size_t *mapped); 762 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, 763 size_t pgsize, size_t pgcount, 764 struct iommu_iotlb_gather *iotlb_gather); 765 766 void (*flush_iotlb_all)(struct iommu_domain *domain); 767 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, 768 size_t size); 769 void (*iotlb_sync)(struct iommu_domain *domain, 770 struct iommu_iotlb_gather *iotlb_gather); 771 int (*cache_invalidate_user)(struct iommu_domain *domain, 772 struct iommu_user_data_array *array); 773 774 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 775 dma_addr_t iova); 776 777 bool (*enforce_cache_coherency)(struct iommu_domain *domain); 778 int (*set_pgtable_quirks)(struct iommu_domain *domain, 779 unsigned long quirks); 780 781 void (*free)(struct iommu_domain *domain); 782 }; 783 784 /** 785 * struct iommu_device - IOMMU core representation of one IOMMU hardware 786 * instance 787 * @list: Used by the iommu-core to keep a list of registered iommus 788 * @ops: iommu-ops for talking to this iommu 789 * @dev: struct device for sysfs handling 790 * @singleton_group: Used internally for drivers that have only one group 791 * @max_pasids: number of supported PASIDs 792 * @ready: set once iommu_device_register() has completed successfully 793 */ 794 struct iommu_device { 795 struct list_head list; 796 const struct iommu_ops *ops; 797 struct fwnode_handle *fwnode; 798 struct device *dev; 799 struct iommu_group *singleton_group; 800 u32 max_pasids; 801 bool ready; 802 }; 803 804 /** 805 * struct iommu_fault_param - per-device IOMMU fault data 806 * @lock: protect pending faults list 807 * @users: user counter to manage the lifetime of the data 808 * @rcu: rcu head for kfree_rcu() 809 * @dev: the device that owns this param 810 * @queue: IOPF queue 811 * @queue_list: index into queue->devices 812 * @partial: faults that are part of a Page Request Group for which the last 813 * request hasn't been submitted yet. 814 * @faults: holds the pending faults which need response 815 */ 816 struct iommu_fault_param { 817 struct mutex lock; 818 refcount_t users; 819 struct rcu_head rcu; 820 821 struct device *dev; 822 struct iopf_queue *queue; 823 struct list_head queue_list; 824 825 struct list_head partial; 826 struct list_head faults; 827 }; 828 829 /** 830 * struct dev_iommu - Collection of per-device IOMMU data 831 * 832 * @fault_param: IOMMU detected device fault reporting data 833 * @fwspec: IOMMU fwspec data 834 * @iommu_dev: IOMMU device this device is linked to 835 * @priv: IOMMU Driver private data 836 * @max_pasids: number of PASIDs this device can consume 837 * @attach_deferred: the dma domain attachment is deferred 838 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs 839 * @require_direct: device requires IOMMU_RESV_DIRECT regions 840 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables 841 * 842 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 843 * struct iommu_group *iommu_group; 844 */ 845 struct dev_iommu { 846 struct mutex lock; 847 struct iommu_fault_param __rcu *fault_param; 848 struct iommu_fwspec *fwspec; 849 struct iommu_device *iommu_dev; 850 void *priv; 851 u32 max_pasids; 852 u32 attach_deferred:1; 853 u32 pci_32bit_workaround:1; 854 u32 require_direct:1; 855 u32 shadow_on_flush:1; 856 }; 857 858 int iommu_device_register(struct iommu_device *iommu, 859 const struct iommu_ops *ops, 860 struct device *hwdev); 861 void iommu_device_unregister(struct iommu_device *iommu); 862 int iommu_device_sysfs_add(struct iommu_device *iommu, 863 struct device *parent, 864 const struct attribute_group **groups, 865 const char *fmt, ...) __printf(4, 5); 866 void iommu_device_sysfs_remove(struct iommu_device *iommu); 867 int iommu_device_link(struct iommu_device *iommu, struct device *link); 868 void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 869 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); 870 871 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 872 { 873 return (struct iommu_device *)dev_get_drvdata(dev); 874 } 875 876 /** 877 * iommu_get_iommu_dev - Get iommu_device for a device 878 * @dev: an end-point device 879 * 880 * Note that this function must be called from the iommu_ops 881 * to retrieve the iommu_device for a device, which the core code 882 * guarentees it will not invoke the op without an attached iommu. 883 */ 884 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev) 885 { 886 return dev->iommu->iommu_dev; 887 } 888 889 #define iommu_get_iommu_dev(dev, type, member) \ 890 container_of(__iommu_get_iommu_dev(dev), type, member) 891 892 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 893 { 894 *gather = (struct iommu_iotlb_gather) { 895 .start = ULONG_MAX, 896 .freelist = IOMMU_PAGES_LIST_INIT(gather->freelist), 897 }; 898 } 899 900 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); 901 extern bool iommu_group_has_isolated_msi(struct iommu_group *group); 902 struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags); 903 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) 904 { 905 return iommu_paging_domain_alloc_flags(dev, 0); 906 } 907 extern void iommu_domain_free(struct iommu_domain *domain); 908 extern int iommu_attach_device(struct iommu_domain *domain, 909 struct device *dev); 910 extern void iommu_detach_device(struct iommu_domain *domain, 911 struct device *dev); 912 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 913 struct iommu_domain *iommu_driver_get_domain_for_dev(struct device *dev); 914 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 915 extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 916 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 917 int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova, 918 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 919 int iommu_sync_map(struct iommu_domain *domain, unsigned long iova, 920 size_t size); 921 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 922 size_t size); 923 extern size_t iommu_unmap_fast(struct iommu_domain *domain, 924 unsigned long iova, size_t size, 925 struct iommu_iotlb_gather *iotlb_gather); 926 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 927 struct scatterlist *sg, unsigned int nents, 928 int prot, gfp_t gfp); 929 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 930 extern void iommu_set_fault_handler(struct iommu_domain *domain, 931 iommu_fault_handler_t handler, void *token); 932 933 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 934 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 935 extern void iommu_set_default_passthrough(bool cmd_line); 936 extern void iommu_set_default_translated(bool cmd_line); 937 extern bool iommu_default_passthrough(void); 938 extern struct iommu_resv_region * 939 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 940 enum iommu_resv_type type, gfp_t gfp); 941 extern int iommu_get_group_resv_regions(struct iommu_group *group, 942 struct list_head *head); 943 944 extern int iommu_attach_group(struct iommu_domain *domain, 945 struct iommu_group *group); 946 extern void iommu_detach_group(struct iommu_domain *domain, 947 struct iommu_group *group); 948 extern struct iommu_group *iommu_group_alloc(void); 949 extern void *iommu_group_get_iommudata(struct iommu_group *group); 950 extern void iommu_group_set_iommudata(struct iommu_group *group, 951 void *iommu_data, 952 void (*release)(void *iommu_data)); 953 extern int iommu_group_set_name(struct iommu_group *group, const char *name); 954 extern int iommu_group_add_device(struct iommu_group *group, 955 struct device *dev); 956 extern void iommu_group_remove_device(struct device *dev); 957 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 958 int (*fn)(struct device *, void *)); 959 extern struct iommu_group *iommu_group_get(struct device *dev); 960 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 961 extern void iommu_group_put(struct iommu_group *group); 962 963 extern int iommu_group_id(struct iommu_group *group); 964 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 965 966 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 967 unsigned long quirks); 968 969 void iommu_set_dma_strict(void); 970 971 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 972 unsigned long iova, int flags); 973 974 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 975 { 976 if (domain->ops->flush_iotlb_all) 977 domain->ops->flush_iotlb_all(domain); 978 } 979 980 static inline void iommu_iotlb_sync(struct iommu_domain *domain, 981 struct iommu_iotlb_gather *iotlb_gather) 982 { 983 if (domain->ops->iotlb_sync) 984 domain->ops->iotlb_sync(domain, iotlb_gather); 985 986 iommu_iotlb_gather_init(iotlb_gather); 987 } 988 989 /** 990 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint 991 * 992 * @gather: TLB gather data 993 * @iova: start of page to invalidate 994 * @size: size of page to invalidate 995 * 996 * Helper for IOMMU drivers to check whether a new range and the gathered range 997 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better 998 * than merging the two, which might lead to unnecessary invalidations. 999 */ 1000 static inline 1001 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, 1002 unsigned long iova, size_t size) 1003 { 1004 unsigned long start = iova, end = start + size - 1; 1005 1006 return gather->end != 0 && 1007 (end + 1 < gather->start || start > gather->end + 1); 1008 } 1009 1010 1011 /** 1012 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation 1013 * @gather: TLB gather data 1014 * @iova: start of page to invalidate 1015 * @size: size of page to invalidate 1016 * 1017 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands 1018 * where only the address range matters, and simply minimising intermediate 1019 * syncs is preferred. 1020 */ 1021 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, 1022 unsigned long iova, size_t size) 1023 { 1024 unsigned long end = iova + size - 1; 1025 1026 if (gather->start > iova) 1027 gather->start = iova; 1028 if (gather->end < end) 1029 gather->end = end; 1030 } 1031 1032 /** 1033 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation 1034 * @domain: IOMMU domain to be invalidated 1035 * @gather: TLB gather data 1036 * @iova: start of page to invalidate 1037 * @size: size of page to invalidate 1038 * 1039 * Helper for IOMMU drivers to build invalidation commands based on individual 1040 * pages, or with page size/table level hints which cannot be gathered if they 1041 * differ. 1042 */ 1043 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 1044 struct iommu_iotlb_gather *gather, 1045 unsigned long iova, size_t size) 1046 { 1047 /* 1048 * If the new page is disjoint from the current range or is mapped at 1049 * a different granularity, then sync the TLB so that the gather 1050 * structure can be rewritten. 1051 */ 1052 if ((gather->pgsize && gather->pgsize != size) || 1053 iommu_iotlb_gather_is_disjoint(gather, iova, size)) 1054 iommu_iotlb_sync(domain, gather); 1055 1056 gather->pgsize = size; 1057 iommu_iotlb_gather_add_range(gather, iova, size); 1058 } 1059 1060 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 1061 { 1062 return gather && gather->queued; 1063 } 1064 1065 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 1066 struct iova_bitmap *bitmap, 1067 struct iommu_iotlb_gather *gather) 1068 { 1069 if (gather) 1070 iommu_iotlb_gather_init(gather); 1071 1072 dirty->bitmap = bitmap; 1073 dirty->gather = gather; 1074 } 1075 1076 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 1077 unsigned long iova, 1078 unsigned long length) 1079 { 1080 if (dirty->bitmap) 1081 iova_bitmap_set(dirty->bitmap, iova, length); 1082 1083 if (dirty->gather) 1084 iommu_iotlb_gather_add_range(dirty->gather, iova, length); 1085 } 1086 1087 /* PCI device grouping function */ 1088 extern struct iommu_group *pci_device_group(struct device *dev); 1089 /* Generic device grouping function */ 1090 extern struct iommu_group *generic_device_group(struct device *dev); 1091 /* FSL-MC device grouping function */ 1092 struct iommu_group *fsl_mc_device_group(struct device *dev); 1093 extern struct iommu_group *generic_single_device_group(struct device *dev); 1094 1095 /** 1096 * struct iommu_fwspec - per-device IOMMU instance data 1097 * @iommu_fwnode: firmware handle for this device's IOMMU 1098 * @flags: IOMMU_FWSPEC_* flags 1099 * @num_ids: number of associated device IDs 1100 * @ids: IDs which this device may present to the IOMMU 1101 * 1102 * Note that the IDs (and any other information, really) stored in this structure should be 1103 * considered private to the IOMMU device driver and are not to be used directly by IOMMU 1104 * consumers. 1105 */ 1106 struct iommu_fwspec { 1107 struct fwnode_handle *iommu_fwnode; 1108 u32 flags; 1109 unsigned int num_ids; 1110 u32 ids[]; 1111 }; 1112 1113 /* ATS is supported */ 1114 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 1115 /* CANWBS is supported */ 1116 #define IOMMU_FWSPEC_PCI_RC_CANWBS (1 << 1) 1117 1118 /* 1119 * An iommu attach handle represents a relationship between an iommu domain 1120 * and a PASID or RID of a device. It is allocated and managed by the component 1121 * that manages the domain and is stored in the iommu group during the time the 1122 * domain is attached. 1123 */ 1124 struct iommu_attach_handle { 1125 struct iommu_domain *domain; 1126 }; 1127 1128 /** 1129 * struct iommu_sva - handle to a device-mm bond 1130 */ 1131 struct iommu_sva { 1132 struct iommu_attach_handle handle; 1133 struct device *dev; 1134 refcount_t users; 1135 }; 1136 1137 struct iommu_mm_data { 1138 u32 pasid; 1139 struct mm_struct *mm; 1140 struct list_head sva_domains; 1141 struct list_head mm_list_elm; 1142 }; 1143 1144 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode); 1145 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids); 1146 1147 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1148 { 1149 if (dev->iommu) 1150 return dev->iommu->fwspec; 1151 else 1152 return NULL; 1153 } 1154 1155 static inline void dev_iommu_fwspec_set(struct device *dev, 1156 struct iommu_fwspec *fwspec) 1157 { 1158 dev->iommu->fwspec = fwspec; 1159 } 1160 1161 static inline void *dev_iommu_priv_get(struct device *dev) 1162 { 1163 if (dev->iommu) 1164 return dev->iommu->priv; 1165 else 1166 return NULL; 1167 } 1168 1169 void dev_iommu_priv_set(struct device *dev, void *priv); 1170 1171 extern struct mutex iommu_probe_device_lock; 1172 int iommu_probe_device(struct device *dev); 1173 1174 int iommu_device_use_default_domain(struct device *dev); 1175 void iommu_device_unuse_default_domain(struct device *dev); 1176 1177 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); 1178 void iommu_group_release_dma_owner(struct iommu_group *group); 1179 bool iommu_group_dma_owner_claimed(struct iommu_group *group); 1180 1181 int iommu_device_claim_dma_owner(struct device *dev, void *owner); 1182 void iommu_device_release_dma_owner(struct device *dev); 1183 1184 int iommu_attach_device_pasid(struct iommu_domain *domain, 1185 struct device *dev, ioasid_t pasid, 1186 struct iommu_attach_handle *handle); 1187 void iommu_detach_device_pasid(struct iommu_domain *domain, 1188 struct device *dev, ioasid_t pasid); 1189 ioasid_t iommu_alloc_global_pasid(struct device *dev); 1190 void iommu_free_global_pasid(ioasid_t pasid); 1191 1192 /* PCI device reset functions */ 1193 int pci_dev_reset_iommu_prepare(struct pci_dev *pdev); 1194 void pci_dev_reset_iommu_done(struct pci_dev *pdev); 1195 #else /* CONFIG_IOMMU_API */ 1196 1197 struct iommu_ops {}; 1198 struct iommu_group {}; 1199 struct iommu_fwspec {}; 1200 struct iommu_device {}; 1201 struct iommu_fault_param {}; 1202 struct iommu_iotlb_gather {}; 1203 struct iommu_dirty_bitmap {}; 1204 struct iommu_dirty_ops {}; 1205 1206 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1207 { 1208 return false; 1209 } 1210 1211 static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, 1212 unsigned int flags) 1213 { 1214 return ERR_PTR(-ENODEV); 1215 } 1216 1217 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) 1218 { 1219 return ERR_PTR(-ENODEV); 1220 } 1221 1222 static inline void iommu_domain_free(struct iommu_domain *domain) 1223 { 1224 } 1225 1226 static inline int iommu_attach_device(struct iommu_domain *domain, 1227 struct device *dev) 1228 { 1229 return -ENODEV; 1230 } 1231 1232 static inline void iommu_detach_device(struct iommu_domain *domain, 1233 struct device *dev) 1234 { 1235 } 1236 1237 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 1238 { 1239 return NULL; 1240 } 1241 1242 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 1243 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 1244 { 1245 return -ENODEV; 1246 } 1247 1248 static inline size_t iommu_unmap(struct iommu_domain *domain, 1249 unsigned long iova, size_t size) 1250 { 1251 return 0; 1252 } 1253 1254 static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 1255 unsigned long iova, int gfp_order, 1256 struct iommu_iotlb_gather *iotlb_gather) 1257 { 1258 return 0; 1259 } 1260 1261 static inline ssize_t iommu_map_sg(struct iommu_domain *domain, 1262 unsigned long iova, struct scatterlist *sg, 1263 unsigned int nents, int prot, gfp_t gfp) 1264 { 1265 return -ENODEV; 1266 } 1267 1268 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 1269 { 1270 } 1271 1272 static inline void iommu_iotlb_sync(struct iommu_domain *domain, 1273 struct iommu_iotlb_gather *iotlb_gather) 1274 { 1275 } 1276 1277 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 1278 { 1279 return 0; 1280 } 1281 1282 static inline void iommu_set_fault_handler(struct iommu_domain *domain, 1283 iommu_fault_handler_t handler, void *token) 1284 { 1285 } 1286 1287 static inline void iommu_get_resv_regions(struct device *dev, 1288 struct list_head *list) 1289 { 1290 } 1291 1292 static inline void iommu_put_resv_regions(struct device *dev, 1293 struct list_head *list) 1294 { 1295 } 1296 1297 static inline int iommu_get_group_resv_regions(struct iommu_group *group, 1298 struct list_head *head) 1299 { 1300 return -ENODEV; 1301 } 1302 1303 static inline void iommu_set_default_passthrough(bool cmd_line) 1304 { 1305 } 1306 1307 static inline void iommu_set_default_translated(bool cmd_line) 1308 { 1309 } 1310 1311 static inline bool iommu_default_passthrough(void) 1312 { 1313 return true; 1314 } 1315 1316 static inline int iommu_attach_group(struct iommu_domain *domain, 1317 struct iommu_group *group) 1318 { 1319 return -ENODEV; 1320 } 1321 1322 static inline void iommu_detach_group(struct iommu_domain *domain, 1323 struct iommu_group *group) 1324 { 1325 } 1326 1327 static inline struct iommu_group *iommu_group_alloc(void) 1328 { 1329 return ERR_PTR(-ENODEV); 1330 } 1331 1332 static inline void *iommu_group_get_iommudata(struct iommu_group *group) 1333 { 1334 return NULL; 1335 } 1336 1337 static inline void iommu_group_set_iommudata(struct iommu_group *group, 1338 void *iommu_data, 1339 void (*release)(void *iommu_data)) 1340 { 1341 } 1342 1343 static inline int iommu_group_set_name(struct iommu_group *group, 1344 const char *name) 1345 { 1346 return -ENODEV; 1347 } 1348 1349 static inline int iommu_group_add_device(struct iommu_group *group, 1350 struct device *dev) 1351 { 1352 return -ENODEV; 1353 } 1354 1355 static inline void iommu_group_remove_device(struct device *dev) 1356 { 1357 } 1358 1359 static inline int iommu_group_for_each_dev(struct iommu_group *group, 1360 void *data, 1361 int (*fn)(struct device *, void *)) 1362 { 1363 return -ENODEV; 1364 } 1365 1366 static inline struct iommu_group *iommu_group_get(struct device *dev) 1367 { 1368 return NULL; 1369 } 1370 1371 static inline void iommu_group_put(struct iommu_group *group) 1372 { 1373 } 1374 1375 static inline int iommu_group_id(struct iommu_group *group) 1376 { 1377 return -ENODEV; 1378 } 1379 1380 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, 1381 unsigned long quirks) 1382 { 1383 return 0; 1384 } 1385 1386 static inline int iommu_device_register(struct iommu_device *iommu, 1387 const struct iommu_ops *ops, 1388 struct device *hwdev) 1389 { 1390 return -ENODEV; 1391 } 1392 1393 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 1394 { 1395 return NULL; 1396 } 1397 1398 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 1399 { 1400 } 1401 1402 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 1403 struct iommu_iotlb_gather *gather, 1404 unsigned long iova, size_t size) 1405 { 1406 } 1407 1408 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 1409 { 1410 return false; 1411 } 1412 1413 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 1414 struct iova_bitmap *bitmap, 1415 struct iommu_iotlb_gather *gather) 1416 { 1417 } 1418 1419 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 1420 unsigned long iova, 1421 unsigned long length) 1422 { 1423 } 1424 1425 static inline void iommu_device_unregister(struct iommu_device *iommu) 1426 { 1427 } 1428 1429 static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 1430 struct device *parent, 1431 const struct attribute_group **groups, 1432 const char *fmt, ...) 1433 { 1434 return -ENODEV; 1435 } 1436 1437 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 1438 { 1439 } 1440 1441 static inline int iommu_device_link(struct device *dev, struct device *link) 1442 { 1443 return -EINVAL; 1444 } 1445 1446 static inline void iommu_device_unlink(struct device *dev, struct device *link) 1447 { 1448 } 1449 1450 static inline int iommu_fwspec_init(struct device *dev, 1451 struct fwnode_handle *iommu_fwnode) 1452 { 1453 return -ENODEV; 1454 } 1455 1456 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 1457 int num_ids) 1458 { 1459 return -ENODEV; 1460 } 1461 1462 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1463 { 1464 return NULL; 1465 } 1466 1467 static inline int iommu_device_use_default_domain(struct device *dev) 1468 { 1469 return 0; 1470 } 1471 1472 static inline void iommu_device_unuse_default_domain(struct device *dev) 1473 { 1474 } 1475 1476 static inline int 1477 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 1478 { 1479 return -ENODEV; 1480 } 1481 1482 static inline void iommu_group_release_dma_owner(struct iommu_group *group) 1483 { 1484 } 1485 1486 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) 1487 { 1488 return false; 1489 } 1490 1491 static inline void iommu_device_release_dma_owner(struct device *dev) 1492 { 1493 } 1494 1495 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) 1496 { 1497 return -ENODEV; 1498 } 1499 1500 static inline int iommu_attach_device_pasid(struct iommu_domain *domain, 1501 struct device *dev, ioasid_t pasid, 1502 struct iommu_attach_handle *handle) 1503 { 1504 return -ENODEV; 1505 } 1506 1507 static inline void iommu_detach_device_pasid(struct iommu_domain *domain, 1508 struct device *dev, ioasid_t pasid) 1509 { 1510 } 1511 1512 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) 1513 { 1514 return IOMMU_PASID_INVALID; 1515 } 1516 1517 static inline void iommu_free_global_pasid(ioasid_t pasid) {} 1518 1519 static inline int pci_dev_reset_iommu_prepare(struct pci_dev *pdev) 1520 { 1521 return 0; 1522 } 1523 1524 static inline void pci_dev_reset_iommu_done(struct pci_dev *pdev) 1525 { 1526 } 1527 #endif /* CONFIG_IOMMU_API */ 1528 1529 #ifdef CONFIG_IRQ_MSI_IOMMU 1530 #ifdef CONFIG_IOMMU_API 1531 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); 1532 #else 1533 static inline int iommu_dma_prepare_msi(struct msi_desc *desc, 1534 phys_addr_t msi_addr) 1535 { 1536 return 0; 1537 } 1538 #endif /* CONFIG_IOMMU_API */ 1539 #endif /* CONFIG_IRQ_MSI_IOMMU */ 1540 1541 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) 1542 void iommu_group_mutex_assert(struct device *dev); 1543 #else 1544 static inline void iommu_group_mutex_assert(struct device *dev) 1545 { 1546 } 1547 #endif 1548 1549 /** 1550 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 1551 * @domain: The IOMMU domain to perform the mapping 1552 * @iova: The start address to map the buffer 1553 * @sgt: The sg_table object describing the buffer 1554 * @prot: IOMMU protection bits 1555 * 1556 * Creates a mapping at @iova for the buffer described by a scatterlist 1557 * stored in the given sg_table object in the provided IOMMU domain. 1558 */ 1559 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain, 1560 unsigned long iova, struct sg_table *sgt, int prot) 1561 { 1562 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot, 1563 GFP_KERNEL); 1564 } 1565 1566 #ifdef CONFIG_IOMMU_DEBUGFS 1567 extern struct dentry *iommu_debugfs_dir; 1568 void iommu_debugfs_setup(void); 1569 #else 1570 static inline void iommu_debugfs_setup(void) {} 1571 #endif 1572 1573 #ifdef CONFIG_IOMMU_DMA 1574 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); 1575 #else /* CONFIG_IOMMU_DMA */ 1576 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 1577 { 1578 return -ENODEV; 1579 } 1580 #endif /* CONFIG_IOMMU_DMA */ 1581 1582 /* 1583 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into 1584 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents 1585 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals. 1586 */ 1587 #define TEGRA_STREAM_ID_BYPASS 0x7f 1588 1589 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id) 1590 { 1591 #ifdef CONFIG_IOMMU_API 1592 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1593 1594 if (fwspec && fwspec->num_ids == 1) { 1595 *stream_id = fwspec->ids[0] & 0xffff; 1596 return true; 1597 } 1598 #endif 1599 1600 return false; 1601 } 1602 1603 #ifdef CONFIG_IOMMU_MM_DATA 1604 static inline void mm_pasid_init(struct mm_struct *mm) 1605 { 1606 /* 1607 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes 1608 * the new mm and the old one point to a same iommu_mm instance. When either 1609 * one of the two mms gets released, the iommu_mm instance is freed, leaving 1610 * the other mm running into a use-after-free/double-free problem. To avoid 1611 * the problem, zeroing the iommu_mm pointer of a new mm is needed here. 1612 */ 1613 mm->iommu_mm = NULL; 1614 } 1615 1616 static inline bool mm_valid_pasid(struct mm_struct *mm) 1617 { 1618 return READ_ONCE(mm->iommu_mm); 1619 } 1620 1621 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) 1622 { 1623 struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm); 1624 1625 if (!iommu_mm) 1626 return IOMMU_PASID_INVALID; 1627 return iommu_mm->pasid; 1628 } 1629 1630 void mm_pasid_drop(struct mm_struct *mm); 1631 struct iommu_sva *iommu_sva_bind_device(struct device *dev, 1632 struct mm_struct *mm); 1633 void iommu_sva_unbind_device(struct iommu_sva *handle); 1634 u32 iommu_sva_get_pasid(struct iommu_sva *handle); 1635 void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end); 1636 #else 1637 static inline struct iommu_sva * 1638 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) 1639 { 1640 return ERR_PTR(-ENODEV); 1641 } 1642 1643 static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1644 { 1645 } 1646 1647 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) 1648 { 1649 return IOMMU_PASID_INVALID; 1650 } 1651 static inline void mm_pasid_init(struct mm_struct *mm) {} 1652 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } 1653 1654 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) 1655 { 1656 return IOMMU_PASID_INVALID; 1657 } 1658 1659 static inline void mm_pasid_drop(struct mm_struct *mm) {} 1660 static inline void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end) {} 1661 #endif /* CONFIG_IOMMU_SVA */ 1662 1663 #ifdef CONFIG_IOMMU_IOPF 1664 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev); 1665 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev); 1666 int iopf_queue_flush_dev(struct device *dev); 1667 struct iopf_queue *iopf_queue_alloc(const char *name); 1668 void iopf_queue_free(struct iopf_queue *queue); 1669 int iopf_queue_discard_partial(struct iopf_queue *queue); 1670 void iopf_free_group(struct iopf_group *group); 1671 int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); 1672 void iopf_group_response(struct iopf_group *group, 1673 enum iommu_page_response_code status); 1674 #else 1675 static inline int 1676 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) 1677 { 1678 return -ENODEV; 1679 } 1680 1681 static inline void 1682 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) 1683 { 1684 } 1685 1686 static inline int iopf_queue_flush_dev(struct device *dev) 1687 { 1688 return -ENODEV; 1689 } 1690 1691 static inline struct iopf_queue *iopf_queue_alloc(const char *name) 1692 { 1693 return NULL; 1694 } 1695 1696 static inline void iopf_queue_free(struct iopf_queue *queue) 1697 { 1698 } 1699 1700 static inline int iopf_queue_discard_partial(struct iopf_queue *queue) 1701 { 1702 return -ENODEV; 1703 } 1704 1705 static inline void iopf_free_group(struct iopf_group *group) 1706 { 1707 } 1708 1709 static inline int 1710 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) 1711 { 1712 return -ENODEV; 1713 } 1714 1715 static inline void iopf_group_response(struct iopf_group *group, 1716 enum iommu_page_response_code status) 1717 { 1718 } 1719 #endif /* CONFIG_IOMMU_IOPF */ 1720 #endif /* __LINUX_IOMMU_H */ 1721