1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 4 * Rewrite, cleanup: 5 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation 6 */ 7 8 #ifndef _ASM_IOMMU_H 9 #define _ASM_IOMMU_H 10 #ifdef __KERNEL__ 11 12 #include <linux/compiler.h> 13 #include <linux/spinlock.h> 14 #include <linux/device.h> 15 #include <linux/dma-map-ops.h> 16 #include <linux/bitops.h> 17 #include <asm/machdep.h> 18 #include <asm/types.h> 19 #include <asm/pci-bridge.h> 20 #include <asm/asm-const.h> 21 22 #define IOMMU_PAGE_SHIFT_4K 12 23 #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) 24 #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) 25 #define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K) 26 27 #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) 28 #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) 29 #define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr)) 30 31 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" 32 #define DMA64_PROPNAME "linux,dma64-ddr-window-info" 33 34 /* Boot time flags */ 35 extern int iommu_is_off; 36 extern int iommu_force_on; 37 38 struct iommu_table_ops { 39 /* 40 * When called with direction==DMA_NONE, it is equal to clear(). 41 * uaddr is a linear map address. 42 */ 43 int (*set)(struct iommu_table *tbl, 44 long index, long npages, 45 unsigned long uaddr, 46 enum dma_data_direction direction, 47 unsigned long attrs); 48 #ifdef CONFIG_IOMMU_API 49 /* 50 * Exchanges existing TCE with new TCE plus direction bits; 51 * returns old TCE and DMA direction mask. 52 * @tce is a physical address. 53 */ 54 int (*xchg_no_kill)(struct iommu_table *tbl, 55 long index, 56 unsigned long *hpa, 57 enum dma_data_direction *direction); 58 59 void (*tce_kill)(struct iommu_table *tbl, 60 unsigned long index, 61 unsigned long pages); 62 63 __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); 64 #endif 65 void (*clear)(struct iommu_table *tbl, 66 long index, long npages); 67 /* get() returns a physical address */ 68 unsigned long (*get)(struct iommu_table *tbl, long index); 69 void (*flush)(struct iommu_table *tbl); 70 void (*free)(struct iommu_table *tbl); 71 }; 72 73 /* These are used by VIO */ 74 extern struct iommu_table_ops iommu_table_lpar_multi_ops; 75 extern struct iommu_table_ops iommu_table_pseries_ops; 76 77 /* 78 * IOMAP_MAX_ORDER defines the largest contiguous block 79 * of dma space we can get. IOMAP_MAX_ORDER = 13 80 * allows up to 2**12 pages (4096 * 4096) = 16 MB 81 */ 82 #define IOMAP_MAX_ORDER 13 83 84 #define IOMMU_POOL_HASHBITS 2 85 #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) 86 87 struct iommu_pool { 88 unsigned long start; 89 unsigned long end; 90 unsigned long hint; 91 spinlock_t lock; 92 } ____cacheline_aligned_in_smp; 93 94 struct iommu_table { 95 unsigned long it_busno; /* Bus number this table belongs to */ 96 unsigned long it_size; /* Size of iommu table in entries */ 97 unsigned long it_indirect_levels; 98 unsigned long it_level_size; 99 unsigned long it_allocated_size; 100 unsigned long it_offset; /* Offset into global table */ 101 unsigned long it_base; /* mapped address of tce table */ 102 unsigned long it_index; /* which iommu table this is */ 103 unsigned long it_type; /* type: PCI or Virtual Bus */ 104 unsigned long it_blocksize; /* Entries in each block (cacheline) */ 105 unsigned long poolsize; 106 unsigned long nr_pools; 107 struct iommu_pool large_pool; 108 struct iommu_pool pools[IOMMU_NR_POOLS]; 109 unsigned long *it_map; /* A simple allocation bitmap for now */ 110 unsigned long it_page_shift;/* table iommu page size */ 111 struct list_head it_group_list;/* List of iommu_table_group_link */ 112 __be64 *it_userspace; /* userspace view of the table */ 113 struct iommu_table_ops *it_ops; 114 struct kref it_kref; 115 int it_nid; 116 unsigned long it_reserved_start; /* Start of not-DMA-able (MMIO) area */ 117 unsigned long it_reserved_end; 118 }; 119 120 #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ 121 ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) 122 #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ 123 ((tbl)->it_ops->useraddrptr((tbl), (entry), true)) 124 125 /* Pure 2^n version of get_order */ 126 static inline __attribute_const__ 127 int get_iommu_order(unsigned long size, struct iommu_table *tbl) 128 { 129 return __ilog2((size - 1) >> tbl->it_page_shift) + 1; 130 } 131 132 133 struct scatterlist; 134 135 #ifdef CONFIG_PPC64 136 137 static inline void set_iommu_table_base(struct device *dev, 138 struct iommu_table *base) 139 { 140 dev->archdata.iommu_table_base = base; 141 } 142 143 static inline void *get_iommu_table_base(struct device *dev) 144 { 145 return dev->archdata.iommu_table_base; 146 } 147 148 extern int dma_iommu_dma_supported(struct device *dev, u64 mask); 149 150 extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl); 151 extern int iommu_tce_table_put(struct iommu_table *tbl); 152 153 /* Initializes an iommu_table based in values set in the passed-in 154 * structure 155 */ 156 extern struct iommu_table *iommu_init_table(struct iommu_table *tbl, 157 int nid, unsigned long res_start, unsigned long res_end); 158 bool iommu_table_in_use(struct iommu_table *tbl); 159 160 #define IOMMU_TABLE_GROUP_MAX_TABLES 2 161 162 struct iommu_table_group; 163 164 struct iommu_table_group_ops { 165 unsigned long (*get_table_size)( 166 __u32 page_shift, 167 __u64 window_size, 168 __u32 levels); 169 long (*create_table)(struct iommu_table_group *table_group, 170 int num, 171 __u32 page_shift, 172 __u64 window_size, 173 __u32 levels, 174 struct iommu_table **ptbl); 175 long (*set_window)(struct iommu_table_group *table_group, 176 int num, 177 struct iommu_table *tblnew); 178 long (*unset_window)(struct iommu_table_group *table_group, 179 int num); 180 /* Switch ownership from platform code to external user (e.g. VFIO) */ 181 long (*take_ownership)(struct iommu_table_group *table_group); 182 /* Switch ownership from external user (e.g. VFIO) back to core */ 183 void (*release_ownership)(struct iommu_table_group *table_group); 184 }; 185 186 struct iommu_table_group_link { 187 struct list_head next; 188 struct rcu_head rcu; 189 struct iommu_table_group *table_group; 190 }; 191 192 struct iommu_table_group { 193 /* IOMMU properties */ 194 __u32 tce32_start; 195 __u32 tce32_size; 196 __u64 pgsizes; /* Bitmap of supported page sizes */ 197 __u32 max_dynamic_windows_supported; 198 __u32 max_levels; 199 200 struct iommu_group *group; 201 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; 202 struct iommu_table_group_ops *ops; 203 }; 204 205 #ifdef CONFIG_IOMMU_API 206 207 extern void iommu_register_group(struct iommu_table_group *table_group, 208 int pci_domain_number, unsigned long pe_num); 209 extern int iommu_add_device(struct iommu_table_group *table_group, 210 struct device *dev); 211 extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, 212 unsigned long entry, unsigned long *hpa, 213 enum dma_data_direction *direction); 214 extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, 215 struct iommu_table *tbl, 216 unsigned long entry, unsigned long *hpa, 217 enum dma_data_direction *direction); 218 extern void iommu_tce_kill(struct iommu_table *tbl, 219 unsigned long entry, unsigned long pages); 220 221 extern struct iommu_table_group_ops spapr_tce_table_group_ops; 222 #else 223 static inline void iommu_register_group(struct iommu_table_group *table_group, 224 int pci_domain_number, 225 unsigned long pe_num) 226 { 227 } 228 229 static inline int iommu_add_device(struct iommu_table_group *table_group, 230 struct device *dev) 231 { 232 return 0; 233 } 234 #endif /* !CONFIG_IOMMU_API */ 235 236 u64 dma_iommu_get_required_mask(struct device *dev); 237 #else 238 239 static inline void *get_iommu_table_base(struct device *dev) 240 { 241 return NULL; 242 } 243 244 static inline int dma_iommu_dma_supported(struct device *dev, u64 mask) 245 { 246 return 0; 247 } 248 249 #endif /* CONFIG_PPC64 */ 250 251 extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, 252 struct scatterlist *sglist, int nelems, 253 unsigned long mask, 254 enum dma_data_direction direction, 255 unsigned long attrs); 256 extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, 257 struct scatterlist *sglist, 258 int nelems, 259 enum dma_data_direction direction, 260 unsigned long attrs); 261 262 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 263 size_t size, dma_addr_t *dma_handle, 264 unsigned long mask, gfp_t flag, int node); 265 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 266 void *vaddr, dma_addr_t dma_handle); 267 extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 268 struct page *page, unsigned long offset, 269 size_t size, unsigned long mask, 270 enum dma_data_direction direction, 271 unsigned long attrs); 272 extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 273 size_t size, enum dma_data_direction direction, 274 unsigned long attrs); 275 276 void __init iommu_init_early_pSeries(void); 277 extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); 278 extern void iommu_init_early_pasemi(void); 279 280 #if defined(CONFIG_PPC64) && defined(CONFIG_PM) 281 static inline void iommu_restore(void) 282 { 283 if (ppc_md.iommu_restore) 284 ppc_md.iommu_restore(); 285 } 286 #endif 287 288 /* The API to support IOMMU operations for VFIO */ 289 extern int iommu_tce_check_ioba(unsigned long page_shift, 290 unsigned long offset, unsigned long size, 291 unsigned long ioba, unsigned long npages); 292 extern int iommu_tce_check_gpa(unsigned long page_shift, 293 unsigned long gpa); 294 295 #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ 296 (iommu_tce_check_ioba((tbl)->it_page_shift, \ 297 (tbl)->it_offset, (tbl)->it_size, \ 298 (ioba), (npages)) || (tce_value)) 299 #define iommu_tce_put_param_check(tbl, ioba, gpa) \ 300 (iommu_tce_check_ioba((tbl)->it_page_shift, \ 301 (tbl)->it_offset, (tbl)->it_size, \ 302 (ioba), 1) || \ 303 iommu_tce_check_gpa((tbl)->it_page_shift, (gpa))) 304 305 extern void iommu_flush_tce(struct iommu_table *tbl); 306 307 extern enum dma_data_direction iommu_tce_direction(unsigned long tce); 308 extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir); 309 310 #ifdef CONFIG_PPC_CELL_NATIVE 311 extern bool iommu_fixed_is_weak; 312 #else 313 #define iommu_fixed_is_weak false 314 #endif 315 316 extern const struct dma_map_ops dma_iommu_ops; 317 318 #endif /* __KERNEL__ */ 319 #endif /* _ASM_IOMMU_H */ 320