Lines Matching full:iommu

3  * IOMMU implementation for Cell Broadband Processor Architecture
24 #include <asm/iommu.h>
95 /* IOMMU sizing */
104 struct cbe_iommu *iommu; member
131 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, in invalidate_tce_cache() argument
138 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; in invalidate_tce_cache()
195 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_build_cell()
218 __pa(window->iommu->pad_page) | in tce_free_cell()
229 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_free_cell()
235 struct cbe_iommu *iommu = data; in ioc_interrupt() local
237 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); in ioc_interrupt()
241 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); in ioc_interrupt()
253 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); in ioc_interrupt()
270 printk(KERN_ERR "iommu: can't get address for %pOF\n", in cell_iommu_find_ioc()
298 static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu, in cell_iommu_setup_stab() argument
307 pr_debug("%s: iommu[%d]: segments: %lu\n", in cell_iommu_setup_stab()
308 __func__, iommu->nid, segments); in cell_iommu_setup_stab()
312 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); in cell_iommu_setup_stab()
314 iommu->stab = page_address(page); in cell_iommu_setup_stab()
315 memset(iommu->stab, 0, stab_size); in cell_iommu_setup_stab()
318 static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu, in cell_iommu_alloc_ptab() argument
335 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, in cell_iommu_alloc_ptab()
336 iommu->nid, ptab_size, get_order(ptab_size)); in cell_iommu_alloc_ptab()
337 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); in cell_iommu_alloc_ptab()
346 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", in cell_iommu_alloc_ptab()
347 __func__, iommu->nid, iommu->stab, ptab, in cell_iommu_alloc_ptab()
364 pr_debug("Setting up IOMMU stab:\n"); in cell_iommu_alloc_ptab()
370 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * in cell_iommu_alloc_ptab()
372 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); in cell_iommu_alloc_ptab()
378 static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu) in cell_iommu_enable_hardware() argument
384 if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) in cell_iommu_enable_hardware()
386 __func__, iommu->nid); in cell_iommu_enable_hardware()
388 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); in cell_iommu_enable_hardware()
389 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; in cell_iommu_enable_hardware()
394 /* setup interrupts for the iommu. */ in cell_iommu_enable_hardware()
395 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); in cell_iommu_enable_hardware()
396 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, in cell_iommu_enable_hardware()
398 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, in cell_iommu_enable_hardware()
402 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); in cell_iommu_enable_hardware()
405 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); in cell_iommu_enable_hardware()
408 /* set the IOC segment table origin register (and turn on the iommu) */ in cell_iommu_enable_hardware()
409 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; in cell_iommu_enable_hardware()
410 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); in cell_iommu_enable_hardware()
411 in_be64(iommu->xlate_regs + IOC_IOST_Origin); in cell_iommu_enable_hardware()
414 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; in cell_iommu_enable_hardware()
415 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); in cell_iommu_enable_hardware()
418 static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu, in cell_iommu_setup_hardware() argument
421 cell_iommu_setup_stab(iommu, base, size, 0, 0); in cell_iommu_setup_hardware()
422 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, in cell_iommu_setup_hardware()
424 cell_iommu_enable_hardware(iommu); in cell_iommu_setup_hardware()
433 printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n", in cell_iommu_get_ioid()
447 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, in cell_iommu_setup_window() argument
457 window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); in cell_iommu_setup_window()
463 window->iommu = iommu; in cell_iommu_setup_window()
466 window->table.it_base = (unsigned long)iommu->ptab; in cell_iommu_setup_window()
467 window->table.it_index = iommu->nid; in cell_iommu_setup_window()
474 if (!iommu_init_table(&window->table, iommu->nid, 0, 0)) in cell_iommu_setup_window()
475 panic("Failed to initialize iommu table"); in cell_iommu_setup_window()
483 list_add(&window->list, &iommu->windows); in cell_iommu_setup_window()
488 /* We need to map and reserve the first IOMMU page since it's used in cell_iommu_setup_window()
495 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); in cell_iommu_setup_window()
497 iommu->pad_page = page_address(page); in cell_iommu_setup_window()
498 clear_page(iommu->pad_page); in cell_iommu_setup_window()
502 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); in cell_iommu_setup_window()
528 struct cbe_iommu *iommu; in cell_get_iommu_table() local
531 * node's iommu. We -might- do something smarter later though it may in cell_get_iommu_table()
534 iommu = cell_iommu_for_node(dev_to_node(dev)); in cell_get_iommu_table()
535 if (iommu == NULL || list_empty(&iommu->windows)) { in cell_get_iommu_table()
536 dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n", in cell_get_iommu_table()
540 window = list_entry(iommu->windows.next, struct iommu_window, list); in cell_get_iommu_table()
605 struct cbe_iommu *iommu; in cell_iommu_alloc() local
611 printk(KERN_ERR "iommu: failed to get node for %pOF\n", in cell_iommu_alloc()
615 pr_debug("iommu: setting up iommu for node %d (%pOF)\n", in cell_iommu_alloc()
618 /* XXX todo: If we can have multiple windows on the same IOMMU, which in cell_iommu_alloc()
620 * iommu for that node is already setup. in cell_iommu_alloc()
623 * multiple window support since the cell iommu supports per-page ioids in cell_iommu_alloc()
627 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n", in cell_iommu_alloc()
634 iommu = &iommus[i]; in cell_iommu_alloc()
635 iommu->stab = NULL; in cell_iommu_alloc()
636 iommu->nid = nid; in cell_iommu_alloc()
637 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); in cell_iommu_alloc()
638 INIT_LIST_HEAD(&iommu->windows); in cell_iommu_alloc()
640 return iommu; in cell_iommu_alloc()
646 struct cbe_iommu *iommu; in cell_iommu_init_one() local
649 iommu = cell_iommu_alloc(np); in cell_iommu_init_one()
650 if (!iommu) in cell_iommu_init_one()
660 cell_iommu_setup_hardware(iommu, base, size); in cell_iommu_init_one()
663 cell_iommu_setup_window(iommu, np, base, size, in cell_iommu_init_one()
682 pr_debug("iommu: cleaning up iommu on node %d\n", node); in cell_disable_iommus()
700 /* When no iommu is present, we use direct DMA ops */ in cell_iommu_init_disabled()
734 * all of physical memory. If not, we force enable IOMMU in cell_iommu_init_disabled()
737 printk(KERN_WARNING "iommu: force-enabled, dma window" in cell_iommu_init_disabled()
748 printk("iommu: disabled, direct DMA offset is 0x%lx\n", in cell_iommu_init_disabled()
755 * Fixed IOMMU mapping support
757 * This code adds support for setting up a fixed IOMMU mapping on certain
764 * we setup the fixed mapping immediately above the normal IOMMU window.
767 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
773 * mapping above the normal IOMMU window as we would run out of address space.
774 * Instead we move the normal IOMMU window to coincide with the hash page
807 dev_dbg(dev, "iommu: no dma-ranges found\n"); in cell_iommu_get_fixed_address()
834 dev_dbg(dev, "iommu: no suitable range found!\n"); in cell_iommu_get_fixed_address()
857 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", in insert_16M_pte()
863 static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, in cell_iommu_setup_fixed_ptab() argument
869 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); in cell_iommu_setup_fixed_ptab()
873 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); in cell_iommu_setup_fixed_ptab()
879 pr_info("IOMMU: Using weak ordering for fixed mapping\n"); in cell_iommu_setup_fixed_ptab()
881 pr_info("IOMMU: Using strong ordering for fixed mapping\n"); in cell_iommu_setup_fixed_ptab()
889 pr_debug("iommu: fixed/dynamic overlap, skipping\n"); in cell_iommu_setup_fixed_ptab()
902 struct cbe_iommu *iommu; in cell_iommu_fixed_mapping_init() local
910 pr_debug("iommu: fixed mapping disabled, no axons found\n"); in cell_iommu_fixed_mapping_init()
919 pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); in cell_iommu_fixed_mapping_init()
924 * dynamic region, so find the top of the largest IOMMU window in cell_iommu_fixed_mapping_init()
947 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); in cell_iommu_fixed_mapping_init()
956 pr_debug("iommu: hash window not segment aligned\n"); in cell_iommu_fixed_mapping_init()
965 pr_debug("iommu: hash window doesn't fit in" in cell_iommu_fixed_mapping_init()
977 iommu = cell_iommu_alloc(np); in cell_iommu_fixed_mapping_init()
978 BUG_ON(!iommu); in cell_iommu_fixed_mapping_init()
987 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " in cell_iommu_fixed_mapping_init()
988 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, in cell_iommu_fixed_mapping_init()
991 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); in cell_iommu_fixed_mapping_init()
992 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, in cell_iommu_fixed_mapping_init()
994 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, in cell_iommu_fixed_mapping_init()
996 cell_iommu_enable_hardware(iommu); in cell_iommu_fixed_mapping_init()
997 cell_iommu_setup_window(iommu, np, dbase, dsize, 0); in cell_iommu_fixed_mapping_init()
1034 /* If IOMMU is disabled or we have little enough RAM to not need in cell_iommu_init()
1037 * Note: should we make sure we have the IOMMU actually disabled ? in cell_iommu_init()
1050 /* Create an iommu for each /axon node. */ in cell_iommu_init()
1057 /* Create an iommu for each toplevel /pci-internal node for in cell_iommu_init()
1066 /* Setup default PCI iommu ops */ in cell_iommu_init()