1 /* 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 * Rewrite, cleanup: 4 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21 #ifndef _ASM_IOMMU_H 22 #define _ASM_IOMMU_H 23 #ifdef __KERNEL__ 24 25 #include <linux/compiler.h> 26 #include <linux/spinlock.h> 27 #include <linux/device.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/bitops.h> 30 #include <asm/machdep.h> 31 #include <asm/types.h> 32 33 #define IOMMU_PAGE_SHIFT_4K 12 34 #define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) 35 #define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) 36 #define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) 37 38 #define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) 39 #define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) 40 #define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr)) 41 42 /* Boot time flags */ 43 extern int iommu_is_off; 44 extern int iommu_force_on; 45 46 /* 47 * IOMAP_MAX_ORDER defines the largest contiguous block 48 * of dma space we can get. IOMAP_MAX_ORDER = 13 49 * allows up to 2**12 pages (4096 * 4096) = 16 MB 50 */ 51 #define IOMAP_MAX_ORDER 13 52 53 #define IOMMU_POOL_HASHBITS 2 54 #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) 55 56 struct iommu_pool { 57 unsigned long start; 58 unsigned long end; 59 unsigned long hint; 60 spinlock_t lock; 61 } ____cacheline_aligned_in_smp; 62 63 struct iommu_table { 64 unsigned long it_busno; /* Bus number this table belongs to */ 65 unsigned long it_size; /* Size of iommu table in entries */ 66 unsigned long it_offset; /* Offset into global table */ 67 unsigned long it_base; /* mapped address of tce table */ 68 unsigned long it_index; /* which iommu table this is */ 69 unsigned long it_type; /* type: PCI or Virtual Bus */ 70 unsigned long it_blocksize; /* Entries in each block (cacheline) */ 71 unsigned long poolsize; 72 unsigned long nr_pools; 73 struct iommu_pool large_pool; 74 struct iommu_pool pools[IOMMU_NR_POOLS]; 75 unsigned long *it_map; /* A simple allocation bitmap for now */ 76 unsigned long it_page_shift;/* table iommu page size */ 77 #ifdef CONFIG_IOMMU_API 78 struct iommu_group *it_group; 79 #endif 80 void (*set_bypass)(struct iommu_table *tbl, bool enable); 81 }; 82 83 /* Pure 2^n version of get_order */ 84 static inline __attribute_const__ 85 int get_iommu_order(unsigned long size, struct iommu_table *tbl) 86 { 87 return __ilog2((size - 1) >> tbl->it_page_shift) + 1; 88 } 89 90 91 struct scatterlist; 92 93 static inline void set_iommu_table_base(struct device *dev, void *base) 94 { 95 dev->archdata.dma_data.iommu_table_base = base; 96 } 97 98 static inline void *get_iommu_table_base(struct device *dev) 99 { 100 return dev->archdata.dma_data.iommu_table_base; 101 } 102 103 /* Frees table for an individual device node */ 104 extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); 105 106 /* Initializes an iommu_table based in values set in the passed-in 107 * structure 108 */ 109 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, 110 int nid); 111 #ifdef CONFIG_IOMMU_API 112 extern void iommu_register_group(struct iommu_table *tbl, 113 int pci_domain_number, unsigned long pe_num); 114 extern int iommu_add_device(struct device *dev); 115 extern void iommu_del_device(struct device *dev); 116 #else 117 static inline void iommu_register_group(struct iommu_table *tbl, 118 int pci_domain_number, 119 unsigned long pe_num) 120 { 121 } 122 123 static inline int iommu_add_device(struct device *dev) 124 { 125 return 0; 126 } 127 128 static inline void iommu_del_device(struct device *dev) 129 { 130 } 131 #endif /* !CONFIG_IOMMU_API */ 132 133 static inline void set_iommu_table_base_and_group(struct device *dev, 134 void *base) 135 { 136 set_iommu_table_base(dev, base); 137 iommu_add_device(dev); 138 } 139 140 extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, 141 struct scatterlist *sglist, int nelems, 142 unsigned long mask, 143 enum dma_data_direction direction, 144 struct dma_attrs *attrs); 145 extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, 146 struct scatterlist *sglist, 147 int nelems, 148 enum dma_data_direction direction, 149 struct dma_attrs *attrs); 150 151 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 152 size_t size, dma_addr_t *dma_handle, 153 unsigned long mask, gfp_t flag, int node); 154 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 155 void *vaddr, dma_addr_t dma_handle); 156 extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 157 struct page *page, unsigned long offset, 158 size_t size, unsigned long mask, 159 enum dma_data_direction direction, 160 struct dma_attrs *attrs); 161 extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 162 size_t size, enum dma_data_direction direction, 163 struct dma_attrs *attrs); 164 165 extern void iommu_init_early_pSeries(void); 166 extern void iommu_init_early_dart(void); 167 extern void iommu_init_early_pasemi(void); 168 169 extern void alloc_dart_table(void); 170 #if defined(CONFIG_PPC64) && defined(CONFIG_PM) 171 static inline void iommu_save(void) 172 { 173 if (ppc_md.iommu_save) 174 ppc_md.iommu_save(); 175 } 176 177 static inline void iommu_restore(void) 178 { 179 if (ppc_md.iommu_restore) 180 ppc_md.iommu_restore(); 181 } 182 #endif 183 184 /* The API to support IOMMU operations for VFIO */ 185 extern int iommu_tce_clear_param_check(struct iommu_table *tbl, 186 unsigned long ioba, unsigned long tce_value, 187 unsigned long npages); 188 extern int iommu_tce_put_param_check(struct iommu_table *tbl, 189 unsigned long ioba, unsigned long tce); 190 extern int iommu_tce_build(struct iommu_table *tbl, unsigned long entry, 191 unsigned long hwaddr, enum dma_data_direction direction); 192 extern unsigned long iommu_clear_tce(struct iommu_table *tbl, 193 unsigned long entry); 194 extern int iommu_clear_tces_and_put_pages(struct iommu_table *tbl, 195 unsigned long entry, unsigned long pages); 196 extern int iommu_put_tce_user_mode(struct iommu_table *tbl, 197 unsigned long entry, unsigned long tce); 198 199 extern void iommu_flush_tce(struct iommu_table *tbl); 200 extern int iommu_take_ownership(struct iommu_table *tbl); 201 extern void iommu_release_ownership(struct iommu_table *tbl); 202 203 extern enum dma_data_direction iommu_tce_direction(unsigned long tce); 204 205 #endif /* __KERNEL__ */ 206 #endif /* _ASM_IOMMU_H */ 207