1 /* 2 * Copyright (C) 2005-2007, PA Semi, Inc 3 * 4 * Maintained by: Olof Johansson <olof@lixom.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #undef DEBUG 21 22 #include <linux/types.h> 23 #include <linux/spinlock.h> 24 #include <linux/pci.h> 25 #include <asm/iommu.h> 26 #include <asm/machdep.h> 27 #include <asm/abs_addr.h> 28 29 30 #define IOBMAP_PAGE_SHIFT 12 31 #define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT) 32 #define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1) 33 34 #define IOBMAP_PAGE_FACTOR (PAGE_SHIFT - IOBMAP_PAGE_SHIFT) 35 36 #define IOB_BASE 0xe0000000 37 #define IOB_SIZE 0x3000 38 /* Configuration registers */ 39 #define IOBCAP_REG 0x10 40 #define IOBCOM_REG 0x40 41 /* Enable IOB address translation */ 42 #define IOBCOM_ATEN 0x00000100 43 44 /* Address decode configuration register */ 45 #define IOB_AD_REG 0x53 46 /* IOBCOM_AD_REG fields */ 47 #define IOB_AD_VGPRT 0x00000e00 48 #define IOB_AD_VGAEN 0x00000100 49 /* Direct mapping settings */ 50 #define IOB_AD_MPSEL_MASK 0x00000030 51 #define IOB_AD_MPSEL_B38 0x00000000 52 #define IOB_AD_MPSEL_B40 0x00000010 53 #define IOB_AD_MPSEL_B42 0x00000020 54 /* Translation window size / enable */ 55 #define IOB_AD_TRNG_MASK 0x00000003 56 #define IOB_AD_TRNG_256M 0x00000000 57 #define IOB_AD_TRNG_2G 0x00000001 58 #define IOB_AD_TRNG_128G 0x00000003 59 60 #define IOB_TABLEBASE_REG 0x55 61 62 /* Base of the 64 4-byte L1 registers */ 63 #define IOB_XLT_L1_REGBASE 0xac0 64 65 /* Register to invalidate TLB entries */ 66 #define IOB_AT_INVAL_TLB_REG 0xb40 67 68 /* The top two bits of the level 1 entry contains valid and type flags */ 69 #define IOBMAP_L1E_V 0x40000000 70 #define IOBMAP_L1E_V_B 0x80000000 71 72 /* For big page entries, the bottom two bits contains flags */ 73 #define IOBMAP_L1E_BIG_CACHED 0x00000002 74 #define IOBMAP_L1E_BIG_PRIORITY 0x00000001 75 76 /* For regular level 2 entries, top 2 bits contain valid and cache flags */ 77 #define IOBMAP_L2E_V 0x80000000 78 #define IOBMAP_L2E_V_CACHED 0xc0000000 79 80 static u32 *iob; 81 static u32 iob_l1_emptyval; 82 static u32 iob_l2_emptyval; 83 static u32 *iob_l2_base; 84 85 static struct iommu_table iommu_table_iobmap; 86 static int iommu_table_iobmap_inited; 87 88 static void iobmap_build(struct iommu_table *tbl, long index, 89 long npages, unsigned long uaddr, 90 enum dma_data_direction direction) 91 { 92 u32 *ip; 93 u32 rpn; 94 unsigned long bus_addr; 95 96 pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr); 97 98 bus_addr = (tbl->it_offset + index) << PAGE_SHIFT; 99 100 npages <<= IOBMAP_PAGE_FACTOR; 101 index <<= IOBMAP_PAGE_FACTOR; 102 103 ip = ((u32 *)tbl->it_base) + index; 104 105 while (npages--) { 106 rpn = virt_to_abs(uaddr) >> IOBMAP_PAGE_SHIFT; 107 108 *(ip++) = IOBMAP_L2E_V | rpn; 109 /* invalidate tlb, can be optimized more */ 110 out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14); 111 112 uaddr += IOBMAP_PAGE_SIZE; 113 bus_addr += IOBMAP_PAGE_SIZE; 114 } 115 } 116 117 118 static void iobmap_free(struct iommu_table *tbl, long index, 119 long npages) 120 { 121 u32 *ip; 122 unsigned long bus_addr; 123 124 pr_debug("iobmap: free at: %lx, %lx\n", index, npages); 125 126 bus_addr = (tbl->it_offset + index) << PAGE_SHIFT; 127 128 npages <<= IOBMAP_PAGE_FACTOR; 129 index <<= IOBMAP_PAGE_FACTOR; 130 131 ip = ((u32 *)tbl->it_base) + index; 132 133 while (npages--) { 134 *(ip++) = iob_l2_emptyval; 135 /* invalidate tlb, can be optimized more */ 136 out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14); 137 bus_addr += IOBMAP_PAGE_SIZE; 138 } 139 } 140 141 142 static void iommu_table_iobmap_setup(void) 143 { 144 pr_debug(" -> %s\n", __func__); 145 iommu_table_iobmap.it_busno = 0; 146 iommu_table_iobmap.it_offset = 0; 147 /* it_size is in number of entries */ 148 iommu_table_iobmap.it_size = 0x80000000 >> PAGE_SHIFT; 149 150 /* Initialize the common IOMMU code */ 151 iommu_table_iobmap.it_base = (unsigned long)iob_l2_base; 152 iommu_table_iobmap.it_index = 0; 153 /* XXXOJN tune this to avoid IOB cache invals. 154 * Should probably be 8 (64 bytes) 155 */ 156 iommu_table_iobmap.it_blocksize = 4; 157 iommu_init_table(&iommu_table_iobmap, 0); 158 pr_debug(" <- %s\n", __func__); 159 } 160 161 162 163 static void pci_dma_bus_setup_pasemi(struct pci_bus *bus) 164 { 165 struct device_node *dn; 166 167 pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self); 168 169 if (!iommu_table_iobmap_inited) { 170 iommu_table_iobmap_inited = 1; 171 iommu_table_iobmap_setup(); 172 } 173 174 dn = pci_bus_to_OF_node(bus); 175 176 if (dn) 177 PCI_DN(dn)->iommu_table = &iommu_table_iobmap; 178 179 } 180 181 182 static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) 183 { 184 pr_debug("pci_dma_dev_setup, dev %p (%s)\n", dev, pci_name(dev)); 185 186 /* DMA device is untranslated, but all other PCI-e goes through 187 * the IOMMU 188 */ 189 if (dev->vendor == 0x1959 && dev->device == 0xa007) 190 dev->dev.archdata.dma_ops = &dma_direct_ops; 191 else 192 dev->dev.archdata.dma_data = &iommu_table_iobmap; 193 } 194 195 static void pci_dma_bus_setup_null(struct pci_bus *b) { } 196 static void pci_dma_dev_setup_null(struct pci_dev *d) { } 197 198 int iob_init(struct device_node *dn) 199 { 200 unsigned long tmp; 201 u32 regword; 202 int i; 203 204 pr_debug(" -> %s\n", __func__); 205 206 /* Allocate a spare page to map all invalid IOTLB pages. */ 207 tmp = lmb_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE); 208 if (!tmp) 209 panic("IOBMAP: Cannot allocate spare page!"); 210 /* Empty l1 is marked invalid */ 211 iob_l1_emptyval = 0; 212 /* Empty l2 is mapped to dummy page */ 213 iob_l2_emptyval = IOBMAP_L2E_V | (tmp >> IOBMAP_PAGE_SHIFT); 214 215 iob = ioremap(IOB_BASE, IOB_SIZE); 216 if (!iob) 217 panic("IOBMAP: Cannot map registers!"); 218 219 /* setup direct mapping of the L1 entries */ 220 for (i = 0; i < 64; i++) { 221 /* Each L1 covers 32MB, i.e. 8K entries = 32K of ram */ 222 regword = IOBMAP_L1E_V | (__pa(iob_l2_base + i*0x2000) >> 12); 223 out_le32(iob+IOB_XLT_L1_REGBASE+i, regword); 224 } 225 226 /* set 2GB translation window, based at 0 */ 227 regword = in_le32(iob+IOB_AD_REG); 228 regword &= ~IOB_AD_TRNG_MASK; 229 regword |= IOB_AD_TRNG_2G; 230 out_le32(iob+IOB_AD_REG, regword); 231 232 /* Enable translation */ 233 regword = in_le32(iob+IOBCOM_REG); 234 regword |= IOBCOM_ATEN; 235 out_le32(iob+IOBCOM_REG, regword); 236 237 pr_debug(" <- %s\n", __func__); 238 239 return 0; 240 } 241 242 243 /* These are called very early. */ 244 void iommu_init_early_pasemi(void) 245 { 246 int iommu_off; 247 248 #ifndef CONFIG_PPC_PASEMI_IOMMU 249 iommu_off = 1; 250 #else 251 iommu_off = of_chosen && 252 get_property(of_chosen, "linux,iommu-off", NULL); 253 #endif 254 if (iommu_off) { 255 /* Direct I/O, IOMMU off */ 256 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_null; 257 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_null; 258 pci_dma_ops = &dma_direct_ops; 259 260 return; 261 } 262 263 iob_init(NULL); 264 265 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pasemi; 266 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pasemi; 267 ppc_md.tce_build = iobmap_build; 268 ppc_md.tce_free = iobmap_free; 269 pci_dma_ops = &dma_iommu_ops; 270 } 271 272 void __init alloc_iobmap_l2(void) 273 { 274 #ifndef CONFIG_PPC_PASEMI_IOMMU 275 return; 276 #endif 277 /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ 278 iob_l2_base = (u32 *)abs_to_virt(lmb_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); 279 280 printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base); 281 } 282