1 /* 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 * Rewrite, cleanup: 4 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 #ifndef _ASM_IOMMU_H 22 #define _ASM_IOMMU_H 23 #ifdef __KERNEL__ 24 25 #include <linux/compiler.h> 26 #include <linux/spinlock.h> 27 #include <linux/device.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/bitops.h> 30 #include <asm/machdep.h> 31 #include <asm/types.h> 32 #include <asm/pci-bridge.h> 33 #include <asm/asm-const.h> 34 35 #define IOMMU_PAGE_SHIFT_4K 12 36 #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) 37 #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) 38 #define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) 39 40 #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) 41 #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) 42 #define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr)) 43 44 /* Boot time flags */ 45 extern int iommu_is_off; 46 extern int iommu_force_on; 47 48 struct iommu_table_ops { 49 /* 50 * When called with direction==DMA_NONE, it is equal to clear(). 51 * uaddr is a linear map address. 52 */ 53 int (*set)(struct iommu_table *tbl, 54 long index, long npages, 55 unsigned long uaddr, 56 enum dma_data_direction direction, 57 unsigned long attrs); 58 #ifdef CONFIG_IOMMU_API 59 /* 60 * Exchanges existing TCE with new TCE plus direction bits; 61 * returns old TCE and DMA direction mask. 62 * @tce is a physical address. 63 */ 64 int (*exchange)(struct iommu_table *tbl, 65 long index, 66 unsigned long *hpa, 67 enum dma_data_direction *direction); 68 /* Real mode */ 69 int (*exchange_rm)(struct iommu_table *tbl, 70 long index, 71 unsigned long *hpa, 72 enum dma_data_direction *direction); 73 74 __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); 75 #endif 76 void (*clear)(struct iommu_table *tbl, 77 long index, long npages); 78 /* get() returns a physical address */ 79 unsigned long (*get)(struct iommu_table *tbl, long index); 80 void (*flush)(struct iommu_table *tbl); 81 void (*free)(struct iommu_table *tbl); 82 }; 83 84 /* These are used by VIO */ 85 extern struct iommu_table_ops iommu_table_lpar_multi_ops; 86 extern struct iommu_table_ops iommu_table_pseries_ops; 87 88 /* 89 * IOMAP_MAX_ORDER defines the largest contiguous block 90 * of dma space we can get. IOMAP_MAX_ORDER = 13 91 * allows up to 2**12 pages (4096 * 4096) = 16 MB 92 */ 93 #define IOMAP_MAX_ORDER 13 94 95 #define IOMMU_POOL_HASHBITS 2 96 #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) 97 98 struct iommu_pool { 99 unsigned long start; 100 unsigned long end; 101 unsigned long hint; 102 spinlock_t lock; 103 } ____cacheline_aligned_in_smp; 104 105 struct iommu_table { 106 unsigned long it_busno; /* Bus number this table belongs to */ 107 unsigned long it_size; /* Size of iommu table in entries */ 108 unsigned long it_indirect_levels; 109 unsigned long it_level_size; 110 unsigned long it_allocated_size; 111 unsigned long it_offset; /* Offset into global table */ 112 unsigned long it_base; /* mapped address of tce table */ 113 unsigned long it_index; /* which iommu table this is */ 114 unsigned long it_type; /* type: PCI or Virtual Bus */ 115 unsigned long it_blocksize; /* Entries in each block (cacheline) */ 116 unsigned long poolsize; 117 unsigned long nr_pools; 118 struct iommu_pool large_pool; 119 struct iommu_pool pools[IOMMU_NR_POOLS]; 120 unsigned long *it_map; /* A simple allocation bitmap for now */ 121 unsigned long it_page_shift;/* table iommu page size */ 122 struct list_head it_group_list;/* List of iommu_table_group_link */ 123 __be64 *it_userspace; /* userspace view of the table */ 124 struct iommu_table_ops *it_ops; 125 struct kref it_kref; 126 int it_nid; 127 }; 128 129 #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ 130 ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) 131 #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ 132 ((tbl)->it_ops->useraddrptr((tbl), (entry), true)) 133 134 /* Pure 2^n version of get_order */ 135 static inline __attribute_const__ 136 int get_iommu_order(unsigned long size, struct iommu_table *tbl) 137 { 138 return __ilog2((size - 1) >> tbl->it_page_shift) + 1; 139 } 140 141 142 struct scatterlist; 143 144 #ifdef CONFIG_PPC64 145 146 #define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0) 147 148 static inline void set_iommu_table_base(struct device *dev, 149 struct iommu_table *base) 150 { 151 dev->archdata.iommu_table_base = base; 152 } 153 154 static inline void *get_iommu_table_base(struct device *dev) 155 { 156 return dev->archdata.iommu_table_base; 157 } 158 159 extern int dma_iommu_dma_supported(struct device *dev, u64 mask); 160 161 extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl); 162 extern int iommu_tce_table_put(struct iommu_table *tbl); 163 164 /* Initializes an iommu_table based in values set in the passed-in 165 * structure 166 */ 167 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, 168 int nid); 169 #define IOMMU_TABLE_GROUP_MAX_TABLES 2 170 171 struct iommu_table_group; 172 173 struct iommu_table_group_ops { 174 unsigned long (*get_table_size)( 175 __u32 page_shift, 176 __u64 window_size, 177 __u32 levels); 178 long (*create_table)(struct iommu_table_group *table_group, 179 int num, 180 __u32 page_shift, 181 __u64 window_size, 182 __u32 levels, 183 struct iommu_table **ptbl); 184 long (*set_window)(struct iommu_table_group *table_group, 185 int num, 186 struct iommu_table *tblnew); 187 long (*unset_window)(struct iommu_table_group *table_group, 188 int num); 189 /* Switch ownership from platform code to external user (e.g. VFIO) */ 190 void (*take_ownership)(struct iommu_table_group *table_group); 191 /* Switch ownership from external user (e.g. VFIO) back to core */ 192 void (*release_ownership)(struct iommu_table_group *table_group); 193 }; 194 195 struct iommu_table_group_link { 196 struct list_head next; 197 struct rcu_head rcu; 198 struct iommu_table_group *table_group; 199 }; 200 201 struct iommu_table_group { 202 /* IOMMU properties */ 203 __u32 tce32_start; 204 __u32 tce32_size; 205 __u64 pgsizes; /* Bitmap of supported page sizes */ 206 __u32 max_dynamic_windows_supported; 207 __u32 max_levels; 208 209 struct iommu_group *group; 210 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; 211 struct iommu_table_group_ops *ops; 212 }; 213 214 #ifdef CONFIG_IOMMU_API 215 216 extern void iommu_register_group(struct iommu_table_group *table_group, 217 int pci_domain_number, unsigned long pe_num); 218 extern int iommu_add_device(struct device *dev); 219 extern void iommu_del_device(struct device *dev); 220 extern int __init tce_iommu_bus_notifier_init(void); 221 extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, 222 unsigned long *hpa, enum dma_data_direction *direction); 223 #else 224 static inline void iommu_register_group(struct iommu_table_group *table_group, 225 int pci_domain_number, 226 unsigned long pe_num) 227 { 228 } 229 230 static inline int iommu_add_device(struct device *dev) 231 { 232 return 0; 233 } 234 235 static inline void iommu_del_device(struct device *dev) 236 { 237 } 238 239 static inline int __init tce_iommu_bus_notifier_init(void) 240 { 241 return 0; 242 } 243 #endif /* !CONFIG_IOMMU_API */ 244 245 int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr); 246 247 #else 248 249 static inline void *get_iommu_table_base(struct device *dev) 250 { 251 return NULL; 252 } 253 254 static inline int dma_iommu_dma_supported(struct device *dev, u64 mask) 255 { 256 return 0; 257 } 258 259 #endif /* CONFIG_PPC64 */ 260 261 extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, 262 struct scatterlist *sglist, int nelems, 263 unsigned long mask, 264 enum dma_data_direction direction, 265 unsigned long attrs); 266 extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, 267 struct scatterlist *sglist, 268 int nelems, 269 enum dma_data_direction direction, 270 unsigned long attrs); 271 272 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 273 size_t size, dma_addr_t *dma_handle, 274 unsigned long mask, gfp_t flag, int node); 275 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 276 void *vaddr, dma_addr_t dma_handle); 277 extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 278 struct page *page, unsigned long offset, 279 size_t size, unsigned long mask, 280 enum dma_data_direction direction, 281 unsigned long attrs); 282 extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 283 size_t size, enum dma_data_direction direction, 284 unsigned long attrs); 285 286 extern void iommu_init_early_pSeries(void); 287 extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); 288 extern void iommu_init_early_pasemi(void); 289 290 #if defined(CONFIG_PPC64) && defined(CONFIG_PM) 291 static inline void iommu_save(void) 292 { 293 if (ppc_md.iommu_save) 294 ppc_md.iommu_save(); 295 } 296 297 static inline void iommu_restore(void) 298 { 299 if (ppc_md.iommu_restore) 300 ppc_md.iommu_restore(); 301 } 302 #endif 303 304 /* The API to support IOMMU operations for VFIO */ 305 extern int iommu_tce_check_ioba(unsigned long page_shift, 306 unsigned long offset, unsigned long size, 307 unsigned long ioba, unsigned long npages); 308 extern int iommu_tce_check_gpa(unsigned long page_shift, 309 unsigned long gpa); 310 311 #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ 312 (iommu_tce_check_ioba((tbl)->it_page_shift, \ 313 (tbl)->it_offset, (tbl)->it_size, \ 314 (ioba), (npages)) || (tce_value)) 315 #define iommu_tce_put_param_check(tbl, ioba, gpa) \ 316 (iommu_tce_check_ioba((tbl)->it_page_shift, \ 317 (tbl)->it_offset, (tbl)->it_size, \ 318 (ioba), 1) || \ 319 iommu_tce_check_gpa((tbl)->it_page_shift, (gpa))) 320 321 extern void iommu_flush_tce(struct iommu_table *tbl); 322 extern int iommu_take_ownership(struct iommu_table *tbl); 323 extern void iommu_release_ownership(struct iommu_table *tbl); 324 325 extern enum dma_data_direction iommu_tce_direction(unsigned long tce); 326 extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir); 327 328 #endif /* __KERNEL__ */ 329 #endif /* _ASM_IOMMU_H */ 330