1f58a9d17SGeoff Levand /* 2f58a9d17SGeoff Levand * PS3 address space management. 3f58a9d17SGeoff Levand * 4f58a9d17SGeoff Levand * Copyright (C) 2006 Sony Computer Entertainment Inc. 5f58a9d17SGeoff Levand * Copyright 2006 Sony Corp. 6f58a9d17SGeoff Levand * 7f58a9d17SGeoff Levand * This program is free software; you can redistribute it and/or modify 8f58a9d17SGeoff Levand * it under the terms of the GNU General Public License as published by 9f58a9d17SGeoff Levand * the Free Software Foundation; version 2 of the License. 10f58a9d17SGeoff Levand * 11f58a9d17SGeoff Levand * This program is distributed in the hope that it will be useful, 12f58a9d17SGeoff Levand * but WITHOUT ANY WARRANTY; without even the implied warranty of 13f58a9d17SGeoff Levand * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14f58a9d17SGeoff Levand * GNU General Public License for more details. 15f58a9d17SGeoff Levand * 16f58a9d17SGeoff Levand * You should have received a copy of the GNU General Public License 17f58a9d17SGeoff Levand * along with this program; if not, write to the Free Software 18f58a9d17SGeoff Levand * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19f58a9d17SGeoff Levand */ 20f58a9d17SGeoff Levand 21f58a9d17SGeoff Levand #include <linux/kernel.h> 22f58a9d17SGeoff Levand #include <linux/module.h> 23f58a9d17SGeoff Levand #include <linux/memory_hotplug.h> 24f58a9d17SGeoff Levand 25*e22ba7e3SArnd Bergmann #include <asm/firmware.h> 26f58a9d17SGeoff Levand #include <asm/lmb.h> 27f58a9d17SGeoff Levand #include <asm/udbg.h> 28f58a9d17SGeoff Levand #include <asm/ps3.h> 29f58a9d17SGeoff Levand #include <asm/lv1call.h> 30f58a9d17SGeoff Levand 31f58a9d17SGeoff Levand #include "platform.h" 32f58a9d17SGeoff Levand 33f58a9d17SGeoff Levand #if defined(DEBUG) 34f58a9d17SGeoff Levand #define DBG(fmt...) udbg_printf(fmt) 35f58a9d17SGeoff Levand #else 36f58a9d17SGeoff Levand #define DBG(fmt...) do{if(0)printk(fmt);}while(0) 37f58a9d17SGeoff Levand #endif 38f58a9d17SGeoff Levand 39f58a9d17SGeoff Levand enum { 40f58a9d17SGeoff Levand #if defined(CONFIG_PS3_USE_LPAR_ADDR) 41f58a9d17SGeoff Levand USE_LPAR_ADDR = 1, 42f58a9d17SGeoff Levand #else 43f58a9d17SGeoff Levand USE_LPAR_ADDR = 0, 44f58a9d17SGeoff Levand #endif 45f58a9d17SGeoff Levand #if defined(CONFIG_PS3_DYNAMIC_DMA) 46f58a9d17SGeoff Levand USE_DYNAMIC_DMA = 1, 47f58a9d17SGeoff Levand #else 48f58a9d17SGeoff Levand USE_DYNAMIC_DMA = 0, 49f58a9d17SGeoff Levand #endif 50f58a9d17SGeoff Levand }; 51f58a9d17SGeoff Levand 52f58a9d17SGeoff Levand enum { 53f58a9d17SGeoff Levand PAGE_SHIFT_4K = 12U, 54f58a9d17SGeoff Levand PAGE_SHIFT_64K = 16U, 55f58a9d17SGeoff Levand PAGE_SHIFT_16M = 24U, 56f58a9d17SGeoff Levand }; 57f58a9d17SGeoff Levand 58f58a9d17SGeoff Levand static unsigned long make_page_sizes(unsigned long a, unsigned long b) 59f58a9d17SGeoff Levand { 60f58a9d17SGeoff Levand return (a << 56) | (b << 48); 61f58a9d17SGeoff Levand } 62f58a9d17SGeoff Levand 63f58a9d17SGeoff Levand enum { 64f58a9d17SGeoff Levand ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04, 65f58a9d17SGeoff Levand ALLOCATE_MEMORY_ADDR_ZERO = 0X08, 66f58a9d17SGeoff Levand }; 67f58a9d17SGeoff Levand 68f58a9d17SGeoff Levand /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */ 69f58a9d17SGeoff Levand 70f58a9d17SGeoff Levand enum { 71f58a9d17SGeoff Levand HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */ 72f58a9d17SGeoff Levand HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */ 73f58a9d17SGeoff Levand }; 74f58a9d17SGeoff Levand 75f58a9d17SGeoff Levand /*============================================================================*/ 76f58a9d17SGeoff Levand /* virtual address space routines */ 77f58a9d17SGeoff Levand /*============================================================================*/ 78f58a9d17SGeoff Levand 79f58a9d17SGeoff Levand /** 80f58a9d17SGeoff Levand * struct mem_region - memory region structure 81f58a9d17SGeoff Levand * @base: base address 82f58a9d17SGeoff Levand * @size: size in bytes 83f58a9d17SGeoff Levand * @offset: difference between base and rm.size 84f58a9d17SGeoff Levand */ 85f58a9d17SGeoff Levand 86f58a9d17SGeoff Levand struct mem_region { 87f58a9d17SGeoff Levand unsigned long base; 88f58a9d17SGeoff Levand unsigned long size; 89f58a9d17SGeoff Levand unsigned long offset; 90f58a9d17SGeoff Levand }; 91f58a9d17SGeoff Levand 92f58a9d17SGeoff Levand /** 93f58a9d17SGeoff Levand * struct map - address space state variables holder 94f58a9d17SGeoff Levand * @total: total memory available as reported by HV 95f58a9d17SGeoff Levand * @vas_id - HV virtual address space id 96f58a9d17SGeoff Levand * @htab_size: htab size in bytes 97f58a9d17SGeoff Levand * 98f58a9d17SGeoff Levand * The HV virtual address space (vas) allows for hotplug memory regions. 99f58a9d17SGeoff Levand * Memory regions can be created and destroyed in the vas at runtime. 100f58a9d17SGeoff Levand * @rm: real mode (bootmem) region 101f58a9d17SGeoff Levand * @r1: hotplug memory region(s) 102f58a9d17SGeoff Levand * 103f58a9d17SGeoff Levand * ps3 addresses 104f58a9d17SGeoff Levand * virt_addr: a cpu 'translated' effective address 105f58a9d17SGeoff Levand * phys_addr: an address in what Linux thinks is the physical address space 106f58a9d17SGeoff Levand * lpar_addr: an address in the HV virtual address space 107f58a9d17SGeoff Levand * bus_addr: an io controller 'translated' address on a device bus 108f58a9d17SGeoff Levand */ 109f58a9d17SGeoff Levand 110f58a9d17SGeoff Levand struct map { 111f58a9d17SGeoff Levand unsigned long total; 112f58a9d17SGeoff Levand unsigned long vas_id; 113f58a9d17SGeoff Levand unsigned long htab_size; 114f58a9d17SGeoff Levand struct mem_region rm; 115f58a9d17SGeoff Levand struct mem_region r1; 116f58a9d17SGeoff Levand }; 117f58a9d17SGeoff Levand 118f58a9d17SGeoff Levand #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__) 119f58a9d17SGeoff Levand static void _debug_dump_map(const struct map* m, const char* func, int line) 120f58a9d17SGeoff Levand { 121f58a9d17SGeoff Levand DBG("%s:%d: map.total = %lxh\n", func, line, m->total); 122f58a9d17SGeoff Levand DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size); 123f58a9d17SGeoff Levand DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id); 124f58a9d17SGeoff Levand DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size); 125f58a9d17SGeoff Levand DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base); 126f58a9d17SGeoff Levand DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset); 127f58a9d17SGeoff Levand DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size); 128f58a9d17SGeoff Levand } 129f58a9d17SGeoff Levand 130f58a9d17SGeoff Levand static struct map map; 131f58a9d17SGeoff Levand 132f58a9d17SGeoff Levand /** 133f58a9d17SGeoff Levand * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address 134f58a9d17SGeoff Levand * @phys_addr: linux physical address 135f58a9d17SGeoff Levand */ 136f58a9d17SGeoff Levand 137f58a9d17SGeoff Levand unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr) 138f58a9d17SGeoff Levand { 139f58a9d17SGeoff Levand BUG_ON(is_kernel_addr(phys_addr)); 140f58a9d17SGeoff Levand if (USE_LPAR_ADDR) 141f58a9d17SGeoff Levand return phys_addr; 142f58a9d17SGeoff Levand else 143f58a9d17SGeoff Levand return (phys_addr < map.rm.size || phys_addr >= map.total) 144f58a9d17SGeoff Levand ? phys_addr : phys_addr + map.r1.offset; 145f58a9d17SGeoff Levand } 146f58a9d17SGeoff Levand 147f58a9d17SGeoff Levand EXPORT_SYMBOL(ps3_mm_phys_to_lpar); 148f58a9d17SGeoff Levand 149f58a9d17SGeoff Levand /** 150f58a9d17SGeoff Levand * ps3_mm_vas_create - create the virtual address space 151f58a9d17SGeoff Levand */ 152f58a9d17SGeoff Levand 153f58a9d17SGeoff Levand void __init ps3_mm_vas_create(unsigned long* htab_size) 154f58a9d17SGeoff Levand { 155f58a9d17SGeoff Levand int result; 156f58a9d17SGeoff Levand unsigned long start_address; 157f58a9d17SGeoff Levand unsigned long size; 158f58a9d17SGeoff Levand unsigned long access_right; 159f58a9d17SGeoff Levand unsigned long max_page_size; 160f58a9d17SGeoff Levand unsigned long flags; 161f58a9d17SGeoff Levand 162f58a9d17SGeoff Levand result = lv1_query_logical_partition_address_region_info(0, 163f58a9d17SGeoff Levand &start_address, &size, &access_right, &max_page_size, 164f58a9d17SGeoff Levand &flags); 165f58a9d17SGeoff Levand 166f58a9d17SGeoff Levand if (result) { 167f58a9d17SGeoff Levand DBG("%s:%d: lv1_query_logical_partition_address_region_info " 168f58a9d17SGeoff Levand "failed: %s\n", __func__, __LINE__, 169f58a9d17SGeoff Levand ps3_result(result)); 170f58a9d17SGeoff Levand goto fail; 171f58a9d17SGeoff Levand } 172f58a9d17SGeoff Levand 173f58a9d17SGeoff Levand if (max_page_size < PAGE_SHIFT_16M) { 174f58a9d17SGeoff Levand DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__, 175f58a9d17SGeoff Levand max_page_size); 176f58a9d17SGeoff Levand goto fail; 177f58a9d17SGeoff Levand } 178f58a9d17SGeoff Levand 179f58a9d17SGeoff Levand BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX); 180f58a9d17SGeoff Levand BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN); 181f58a9d17SGeoff Levand 182f58a9d17SGeoff Levand result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE, 183f58a9d17SGeoff Levand 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K), 184f58a9d17SGeoff Levand &map.vas_id, &map.htab_size); 185f58a9d17SGeoff Levand 186f58a9d17SGeoff Levand if (result) { 187f58a9d17SGeoff Levand DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n", 188f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 189f58a9d17SGeoff Levand goto fail; 190f58a9d17SGeoff Levand } 191f58a9d17SGeoff Levand 192f58a9d17SGeoff Levand result = lv1_select_virtual_address_space(map.vas_id); 193f58a9d17SGeoff Levand 194f58a9d17SGeoff Levand if (result) { 195f58a9d17SGeoff Levand DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n", 196f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 197f58a9d17SGeoff Levand goto fail; 198f58a9d17SGeoff Levand } 199f58a9d17SGeoff Levand 200f58a9d17SGeoff Levand *htab_size = map.htab_size; 201f58a9d17SGeoff Levand 202f58a9d17SGeoff Levand debug_dump_map(&map); 203f58a9d17SGeoff Levand 204f58a9d17SGeoff Levand return; 205f58a9d17SGeoff Levand 206f58a9d17SGeoff Levand fail: 207f58a9d17SGeoff Levand panic("ps3_mm_vas_create failed"); 208f58a9d17SGeoff Levand } 209f58a9d17SGeoff Levand 210f58a9d17SGeoff Levand /** 211f58a9d17SGeoff Levand * ps3_mm_vas_destroy - 212f58a9d17SGeoff Levand */ 213f58a9d17SGeoff Levand 214f58a9d17SGeoff Levand void ps3_mm_vas_destroy(void) 215f58a9d17SGeoff Levand { 216f58a9d17SGeoff Levand if (map.vas_id) { 217f58a9d17SGeoff Levand lv1_select_virtual_address_space(0); 218f58a9d17SGeoff Levand lv1_destruct_virtual_address_space(map.vas_id); 219f58a9d17SGeoff Levand map.vas_id = 0; 220f58a9d17SGeoff Levand } 221f58a9d17SGeoff Levand } 222f58a9d17SGeoff Levand 223f58a9d17SGeoff Levand /*============================================================================*/ 224f58a9d17SGeoff Levand /* memory hotplug routines */ 225f58a9d17SGeoff Levand /*============================================================================*/ 226f58a9d17SGeoff Levand 227f58a9d17SGeoff Levand /** 228f58a9d17SGeoff Levand * ps3_mm_region_create - create a memory region in the vas 229f58a9d17SGeoff Levand * @r: pointer to a struct mem_region to accept initialized values 230f58a9d17SGeoff Levand * @size: requested region size 231f58a9d17SGeoff Levand * 232f58a9d17SGeoff Levand * This implementation creates the region with the vas large page size. 233f58a9d17SGeoff Levand * @size is rounded down to a multiple of the vas large page size. 234f58a9d17SGeoff Levand */ 235f58a9d17SGeoff Levand 236f58a9d17SGeoff Levand int ps3_mm_region_create(struct mem_region *r, unsigned long size) 237f58a9d17SGeoff Levand { 238f58a9d17SGeoff Levand int result; 239f58a9d17SGeoff Levand unsigned long muid; 240f58a9d17SGeoff Levand 241f58a9d17SGeoff Levand r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); 242f58a9d17SGeoff Levand 243f58a9d17SGeoff Levand DBG("%s:%d requested %lxh\n", __func__, __LINE__, size); 244f58a9d17SGeoff Levand DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size); 245f58a9d17SGeoff Levand DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__, 246f58a9d17SGeoff Levand (unsigned long)(size - r->size), 247f58a9d17SGeoff Levand (size - r->size) / 1024 / 1024); 248f58a9d17SGeoff Levand 249f58a9d17SGeoff Levand if (r->size == 0) { 250f58a9d17SGeoff Levand DBG("%s:%d: size == 0\n", __func__, __LINE__); 251f58a9d17SGeoff Levand result = -1; 252f58a9d17SGeoff Levand goto zero_region; 253f58a9d17SGeoff Levand } 254f58a9d17SGeoff Levand 255f58a9d17SGeoff Levand result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0, 256f58a9d17SGeoff Levand ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid); 257f58a9d17SGeoff Levand 258f58a9d17SGeoff Levand if (result || r->base < map.rm.size) { 259f58a9d17SGeoff Levand DBG("%s:%d: lv1_allocate_memory failed: %s\n", 260f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 261f58a9d17SGeoff Levand goto zero_region; 262f58a9d17SGeoff Levand } 263f58a9d17SGeoff Levand 264f58a9d17SGeoff Levand r->offset = r->base - map.rm.size; 265f58a9d17SGeoff Levand return result; 266f58a9d17SGeoff Levand 267f58a9d17SGeoff Levand zero_region: 268f58a9d17SGeoff Levand r->size = r->base = r->offset = 0; 269f58a9d17SGeoff Levand return result; 270f58a9d17SGeoff Levand } 271f58a9d17SGeoff Levand 272f58a9d17SGeoff Levand /** 273f58a9d17SGeoff Levand * ps3_mm_region_destroy - destroy a memory region 274f58a9d17SGeoff Levand * @r: pointer to struct mem_region 275f58a9d17SGeoff Levand */ 276f58a9d17SGeoff Levand 277f58a9d17SGeoff Levand void ps3_mm_region_destroy(struct mem_region *r) 278f58a9d17SGeoff Levand { 279f58a9d17SGeoff Levand if (r->base) { 280f58a9d17SGeoff Levand lv1_release_memory(r->base); 281f58a9d17SGeoff Levand r->size = r->base = r->offset = 0; 282f58a9d17SGeoff Levand map.total = map.rm.size; 283f58a9d17SGeoff Levand } 284f58a9d17SGeoff Levand } 285f58a9d17SGeoff Levand 286f58a9d17SGeoff Levand /** 287f58a9d17SGeoff Levand * ps3_mm_add_memory - hot add memory 288f58a9d17SGeoff Levand */ 289f58a9d17SGeoff Levand 290f58a9d17SGeoff Levand static int __init ps3_mm_add_memory(void) 291f58a9d17SGeoff Levand { 292f58a9d17SGeoff Levand int result; 293f58a9d17SGeoff Levand unsigned long start_addr; 294f58a9d17SGeoff Levand unsigned long start_pfn; 295f58a9d17SGeoff Levand unsigned long nr_pages; 296f58a9d17SGeoff Levand 297*e22ba7e3SArnd Bergmann if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) 298*e22ba7e3SArnd Bergmann return 0; 299*e22ba7e3SArnd Bergmann 300f58a9d17SGeoff Levand BUG_ON(!mem_init_done); 301f58a9d17SGeoff Levand 302f58a9d17SGeoff Levand start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size; 303f58a9d17SGeoff Levand start_pfn = start_addr >> PAGE_SHIFT; 304f58a9d17SGeoff Levand nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT; 305f58a9d17SGeoff Levand 306f58a9d17SGeoff Levand DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n", 307f58a9d17SGeoff Levand __func__, __LINE__, start_addr, start_pfn, nr_pages); 308f58a9d17SGeoff Levand 309f58a9d17SGeoff Levand result = add_memory(0, start_addr, map.r1.size); 310f58a9d17SGeoff Levand 311f58a9d17SGeoff Levand if (result) { 312f58a9d17SGeoff Levand DBG("%s:%d: add_memory failed: (%d)\n", 313f58a9d17SGeoff Levand __func__, __LINE__, result); 314f58a9d17SGeoff Levand return result; 315f58a9d17SGeoff Levand } 316f58a9d17SGeoff Levand 317f58a9d17SGeoff Levand result = online_pages(start_pfn, nr_pages); 318f58a9d17SGeoff Levand 319f58a9d17SGeoff Levand if (result) 320f58a9d17SGeoff Levand DBG("%s:%d: online_pages failed: (%d)\n", 321f58a9d17SGeoff Levand __func__, __LINE__, result); 322f58a9d17SGeoff Levand 323f58a9d17SGeoff Levand return result; 324f58a9d17SGeoff Levand } 325f58a9d17SGeoff Levand 326f58a9d17SGeoff Levand core_initcall(ps3_mm_add_memory); 327f58a9d17SGeoff Levand 328f58a9d17SGeoff Levand /*============================================================================*/ 329f58a9d17SGeoff Levand /* dma routines */ 330f58a9d17SGeoff Levand /*============================================================================*/ 331f58a9d17SGeoff Levand 332f58a9d17SGeoff Levand /** 333f58a9d17SGeoff Levand * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address. 334f58a9d17SGeoff Levand * @r: pointer to dma region structure 335f58a9d17SGeoff Levand * @lpar_addr: HV lpar address 336f58a9d17SGeoff Levand */ 337f58a9d17SGeoff Levand 338f58a9d17SGeoff Levand static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r, 339f58a9d17SGeoff Levand unsigned long lpar_addr) 340f58a9d17SGeoff Levand { 341f58a9d17SGeoff Levand BUG_ON(lpar_addr >= map.r1.base + map.r1.size); 342f58a9d17SGeoff Levand return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr 343f58a9d17SGeoff Levand : lpar_addr - map.r1.offset); 344f58a9d17SGeoff Levand } 345f58a9d17SGeoff Levand 346f58a9d17SGeoff Levand #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__) 347f58a9d17SGeoff Levand static void _dma_dump_region(const struct ps3_dma_region *r, const char* func, 348f58a9d17SGeoff Levand int line) 349f58a9d17SGeoff Levand { 350f58a9d17SGeoff Levand DBG("%s:%d: dev %u:%u\n", func, line, r->did.bus_id, 351f58a9d17SGeoff Levand r->did.dev_id); 352f58a9d17SGeoff Levand DBG("%s:%d: page_size %u\n", func, line, r->page_size); 353f58a9d17SGeoff Levand DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); 354f58a9d17SGeoff Levand DBG("%s:%d: len %lxh\n", func, line, r->len); 355f58a9d17SGeoff Levand } 356f58a9d17SGeoff Levand 357f58a9d17SGeoff Levand /** 358f58a9d17SGeoff Levand * dma_chunk - A chunk of dma pages mapped by the io controller. 359f58a9d17SGeoff Levand * @region - The dma region that owns this chunk. 360f58a9d17SGeoff Levand * @lpar_addr: Starting lpar address of the area to map. 361f58a9d17SGeoff Levand * @bus_addr: Starting ioc bus address of the area to map. 362f58a9d17SGeoff Levand * @len: Length in bytes of the area to map. 363f58a9d17SGeoff Levand * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the 364f58a9d17SGeoff Levand * list of all chuncks owned by the region. 365f58a9d17SGeoff Levand * 366f58a9d17SGeoff Levand * This implementation uses a very simple dma page manager 367f58a9d17SGeoff Levand * based on the dma_chunk structure. This scheme assumes 368f58a9d17SGeoff Levand * that all drivers use very well behaved dma ops. 369f58a9d17SGeoff Levand */ 370f58a9d17SGeoff Levand 371f58a9d17SGeoff Levand struct dma_chunk { 372f58a9d17SGeoff Levand struct ps3_dma_region *region; 373f58a9d17SGeoff Levand unsigned long lpar_addr; 374f58a9d17SGeoff Levand unsigned long bus_addr; 375f58a9d17SGeoff Levand unsigned long len; 376f58a9d17SGeoff Levand struct list_head link; 377f58a9d17SGeoff Levand unsigned int usage_count; 378f58a9d17SGeoff Levand }; 379f58a9d17SGeoff Levand 380f58a9d17SGeoff Levand #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__) 381f58a9d17SGeoff Levand static void _dma_dump_chunk (const struct dma_chunk* c, const char* func, 382f58a9d17SGeoff Levand int line) 383f58a9d17SGeoff Levand { 384f58a9d17SGeoff Levand DBG("%s:%d: r.dev %u:%u\n", func, line, 385f58a9d17SGeoff Levand c->region->did.bus_id, c->region->did.dev_id); 386f58a9d17SGeoff Levand DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); 387f58a9d17SGeoff Levand DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); 388f58a9d17SGeoff Levand DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); 389f58a9d17SGeoff Levand DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); 390f58a9d17SGeoff Levand DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); 391f58a9d17SGeoff Levand DBG("%s:%d: c.len %lxh\n", func, line, c->len); 392f58a9d17SGeoff Levand } 393f58a9d17SGeoff Levand 394f58a9d17SGeoff Levand static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, 395f58a9d17SGeoff Levand unsigned long bus_addr, unsigned long len) 396f58a9d17SGeoff Levand { 397f58a9d17SGeoff Levand struct dma_chunk *c; 398f58a9d17SGeoff Levand unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); 399f58a9d17SGeoff Levand unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size); 400f58a9d17SGeoff Levand 401f58a9d17SGeoff Levand list_for_each_entry(c, &r->chunk_list.head, link) { 402f58a9d17SGeoff Levand /* intersection */ 403f58a9d17SGeoff Levand if (aligned_bus >= c->bus_addr 404f58a9d17SGeoff Levand && aligned_bus < c->bus_addr + c->len 405f58a9d17SGeoff Levand && aligned_bus + aligned_len <= c->bus_addr + c->len) { 406f58a9d17SGeoff Levand return c; 407f58a9d17SGeoff Levand } 408f58a9d17SGeoff Levand /* below */ 409f58a9d17SGeoff Levand if (aligned_bus + aligned_len <= c->bus_addr) { 410f58a9d17SGeoff Levand continue; 411f58a9d17SGeoff Levand } 412f58a9d17SGeoff Levand /* above */ 413f58a9d17SGeoff Levand if (aligned_bus >= c->bus_addr + c->len) { 414f58a9d17SGeoff Levand continue; 415f58a9d17SGeoff Levand } 416f58a9d17SGeoff Levand 417f58a9d17SGeoff Levand /* we don't handle the multi-chunk case for now */ 418f58a9d17SGeoff Levand 419f58a9d17SGeoff Levand dma_dump_chunk(c); 420f58a9d17SGeoff Levand BUG(); 421f58a9d17SGeoff Levand } 422f58a9d17SGeoff Levand return NULL; 423f58a9d17SGeoff Levand } 424f58a9d17SGeoff Levand 425f58a9d17SGeoff Levand static int dma_free_chunk(struct dma_chunk *c) 426f58a9d17SGeoff Levand { 427f58a9d17SGeoff Levand int result = 0; 428f58a9d17SGeoff Levand 429f58a9d17SGeoff Levand if (c->bus_addr) { 430f58a9d17SGeoff Levand result = lv1_unmap_device_dma_region(c->region->did.bus_id, 431f58a9d17SGeoff Levand c->region->did.dev_id, c->bus_addr, c->len); 432f58a9d17SGeoff Levand BUG_ON(result); 433f58a9d17SGeoff Levand } 434f58a9d17SGeoff Levand 435f58a9d17SGeoff Levand kfree(c); 436f58a9d17SGeoff Levand return result; 437f58a9d17SGeoff Levand } 438f58a9d17SGeoff Levand 439f58a9d17SGeoff Levand /** 440f58a9d17SGeoff Levand * dma_map_pages - Maps dma pages into the io controller bus address space. 441f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 442f58a9d17SGeoff Levand * @phys_addr: Starting physical address of the area to map. 443f58a9d17SGeoff Levand * @len: Length in bytes of the area to map. 444f58a9d17SGeoff Levand * c_out: A pointer to receive an allocated struct dma_chunk for this area. 445f58a9d17SGeoff Levand * 446f58a9d17SGeoff Levand * This is the lowest level dma mapping routine, and is the one that will 447f58a9d17SGeoff Levand * make the HV call to add the pages into the io controller address space. 448f58a9d17SGeoff Levand */ 449f58a9d17SGeoff Levand 450f58a9d17SGeoff Levand static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, 451f58a9d17SGeoff Levand unsigned long len, struct dma_chunk **c_out) 452f58a9d17SGeoff Levand { 453f58a9d17SGeoff Levand int result; 454f58a9d17SGeoff Levand struct dma_chunk *c; 455f58a9d17SGeoff Levand 456f58a9d17SGeoff Levand c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); 457f58a9d17SGeoff Levand 458f58a9d17SGeoff Levand if (!c) { 459f58a9d17SGeoff Levand result = -ENOMEM; 460f58a9d17SGeoff Levand goto fail_alloc; 461f58a9d17SGeoff Levand } 462f58a9d17SGeoff Levand 463f58a9d17SGeoff Levand c->region = r; 464f58a9d17SGeoff Levand c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 465f58a9d17SGeoff Levand c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr); 466f58a9d17SGeoff Levand c->len = len; 467f58a9d17SGeoff Levand 468f58a9d17SGeoff Levand result = lv1_map_device_dma_region(c->region->did.bus_id, 469f58a9d17SGeoff Levand c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len, 470f58a9d17SGeoff Levand 0xf800000000000000UL); 471f58a9d17SGeoff Levand 472f58a9d17SGeoff Levand if (result) { 473f58a9d17SGeoff Levand DBG("%s:%d: lv1_map_device_dma_region failed: %s\n", 474f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 475f58a9d17SGeoff Levand goto fail_map; 476f58a9d17SGeoff Levand } 477f58a9d17SGeoff Levand 478f58a9d17SGeoff Levand list_add(&c->link, &r->chunk_list.head); 479f58a9d17SGeoff Levand 480f58a9d17SGeoff Levand *c_out = c; 481f58a9d17SGeoff Levand return 0; 482f58a9d17SGeoff Levand 483f58a9d17SGeoff Levand fail_map: 484f58a9d17SGeoff Levand kfree(c); 485f58a9d17SGeoff Levand fail_alloc: 486f58a9d17SGeoff Levand *c_out = NULL; 487f58a9d17SGeoff Levand DBG(" <- %s:%d\n", __func__, __LINE__); 488f58a9d17SGeoff Levand return result; 489f58a9d17SGeoff Levand } 490f58a9d17SGeoff Levand 491f58a9d17SGeoff Levand /** 492f58a9d17SGeoff Levand * dma_region_create - Create a device dma region. 493f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 494f58a9d17SGeoff Levand * 495f58a9d17SGeoff Levand * This is the lowest level dma region create routine, and is the one that 496f58a9d17SGeoff Levand * will make the HV call to create the region. 497f58a9d17SGeoff Levand */ 498f58a9d17SGeoff Levand 499f58a9d17SGeoff Levand static int dma_region_create(struct ps3_dma_region* r) 500f58a9d17SGeoff Levand { 501f58a9d17SGeoff Levand int result; 502f58a9d17SGeoff Levand 503f58a9d17SGeoff Levand r->len = _ALIGN_UP(map.total, 1 << r->page_size); 504f58a9d17SGeoff Levand INIT_LIST_HEAD(&r->chunk_list.head); 505f58a9d17SGeoff Levand spin_lock_init(&r->chunk_list.lock); 506f58a9d17SGeoff Levand 507f58a9d17SGeoff Levand result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id, 508f58a9d17SGeoff Levand r->len, r->page_size, r->region_type, &r->bus_addr); 509f58a9d17SGeoff Levand 510f58a9d17SGeoff Levand dma_dump_region(r); 511f58a9d17SGeoff Levand 512f58a9d17SGeoff Levand if (result) { 513f58a9d17SGeoff Levand DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n", 514f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 515f58a9d17SGeoff Levand r->len = r->bus_addr = 0; 516f58a9d17SGeoff Levand } 517f58a9d17SGeoff Levand 518f58a9d17SGeoff Levand return result; 519f58a9d17SGeoff Levand } 520f58a9d17SGeoff Levand 521f58a9d17SGeoff Levand /** 522f58a9d17SGeoff Levand * dma_region_free - Free a device dma region. 523f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 524f58a9d17SGeoff Levand * 525f58a9d17SGeoff Levand * This is the lowest level dma region free routine, and is the one that 526f58a9d17SGeoff Levand * will make the HV call to free the region. 527f58a9d17SGeoff Levand */ 528f58a9d17SGeoff Levand 529f58a9d17SGeoff Levand static int dma_region_free(struct ps3_dma_region* r) 530f58a9d17SGeoff Levand { 531f58a9d17SGeoff Levand int result; 532f58a9d17SGeoff Levand struct dma_chunk *c; 533f58a9d17SGeoff Levand struct dma_chunk *tmp; 534f58a9d17SGeoff Levand 535f58a9d17SGeoff Levand list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { 536f58a9d17SGeoff Levand list_del(&c->link); 537f58a9d17SGeoff Levand dma_free_chunk(c); 538f58a9d17SGeoff Levand } 539f58a9d17SGeoff Levand 540f58a9d17SGeoff Levand result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id, 541f58a9d17SGeoff Levand r->bus_addr); 542f58a9d17SGeoff Levand 543f58a9d17SGeoff Levand if (result) 544f58a9d17SGeoff Levand DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", 545f58a9d17SGeoff Levand __func__, __LINE__, ps3_result(result)); 546f58a9d17SGeoff Levand 547f58a9d17SGeoff Levand r->len = r->bus_addr = 0; 548f58a9d17SGeoff Levand 549f58a9d17SGeoff Levand return result; 550f58a9d17SGeoff Levand } 551f58a9d17SGeoff Levand 552f58a9d17SGeoff Levand /** 553f58a9d17SGeoff Levand * dma_map_area - Map an area of memory into a device dma region. 554f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 555f58a9d17SGeoff Levand * @virt_addr: Starting virtual address of the area to map. 556f58a9d17SGeoff Levand * @len: Length in bytes of the area to map. 557f58a9d17SGeoff Levand * @bus_addr: A pointer to return the starting ioc bus address of the area to 558f58a9d17SGeoff Levand * map. 559f58a9d17SGeoff Levand * 560f58a9d17SGeoff Levand * This is the common dma mapping routine. 561f58a9d17SGeoff Levand */ 562f58a9d17SGeoff Levand 563f58a9d17SGeoff Levand static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr, 564f58a9d17SGeoff Levand unsigned long len, unsigned long *bus_addr) 565f58a9d17SGeoff Levand { 566f58a9d17SGeoff Levand int result; 567f58a9d17SGeoff Levand unsigned long flags; 568f58a9d17SGeoff Levand struct dma_chunk *c; 569f58a9d17SGeoff Levand unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 570f58a9d17SGeoff Levand : virt_addr; 571f58a9d17SGeoff Levand 572f58a9d17SGeoff Levand *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 573f58a9d17SGeoff Levand 574f58a9d17SGeoff Levand if (!USE_DYNAMIC_DMA) { 575f58a9d17SGeoff Levand unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr); 576f58a9d17SGeoff Levand DBG(" -> %s:%d\n", __func__, __LINE__); 577f58a9d17SGeoff Levand DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__, 578f58a9d17SGeoff Levand virt_addr); 579f58a9d17SGeoff Levand DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__, 580f58a9d17SGeoff Levand phys_addr); 581f58a9d17SGeoff Levand DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__, 582f58a9d17SGeoff Levand lpar_addr); 583f58a9d17SGeoff Levand DBG("%s:%d len %lxh\n", __func__, __LINE__, len); 584f58a9d17SGeoff Levand DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__, 585f58a9d17SGeoff Levand *bus_addr, len); 586f58a9d17SGeoff Levand } 587f58a9d17SGeoff Levand 588f58a9d17SGeoff Levand spin_lock_irqsave(&r->chunk_list.lock, flags); 589f58a9d17SGeoff Levand c = dma_find_chunk(r, *bus_addr, len); 590f58a9d17SGeoff Levand 591f58a9d17SGeoff Levand if (c) { 592f58a9d17SGeoff Levand c->usage_count++; 593f58a9d17SGeoff Levand spin_unlock_irqrestore(&r->chunk_list.lock, flags); 594f58a9d17SGeoff Levand return 0; 595f58a9d17SGeoff Levand } 596f58a9d17SGeoff Levand 597f58a9d17SGeoff Levand result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size), 598f58a9d17SGeoff Levand _ALIGN_UP(len, 1 << r->page_size), &c); 599f58a9d17SGeoff Levand 600f58a9d17SGeoff Levand if (result) { 601f58a9d17SGeoff Levand *bus_addr = 0; 602f58a9d17SGeoff Levand DBG("%s:%d: dma_map_pages failed (%d)\n", 603f58a9d17SGeoff Levand __func__, __LINE__, result); 604f58a9d17SGeoff Levand spin_unlock_irqrestore(&r->chunk_list.lock, flags); 605f58a9d17SGeoff Levand return result; 606f58a9d17SGeoff Levand } 607f58a9d17SGeoff Levand 608f58a9d17SGeoff Levand c->usage_count = 1; 609f58a9d17SGeoff Levand 610f58a9d17SGeoff Levand spin_unlock_irqrestore(&r->chunk_list.lock, flags); 611f58a9d17SGeoff Levand return result; 612f58a9d17SGeoff Levand } 613f58a9d17SGeoff Levand 614f58a9d17SGeoff Levand /** 615f58a9d17SGeoff Levand * dma_unmap_area - Unmap an area of memory from a device dma region. 616f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 617f58a9d17SGeoff Levand * @bus_addr: The starting ioc bus address of the area to unmap. 618f58a9d17SGeoff Levand * @len: Length in bytes of the area to unmap. 619f58a9d17SGeoff Levand * 620f58a9d17SGeoff Levand * This is the common dma unmap routine. 621f58a9d17SGeoff Levand */ 622f58a9d17SGeoff Levand 623f58a9d17SGeoff Levand int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr, 624f58a9d17SGeoff Levand unsigned long len) 625f58a9d17SGeoff Levand { 626f58a9d17SGeoff Levand unsigned long flags; 627f58a9d17SGeoff Levand struct dma_chunk *c; 628f58a9d17SGeoff Levand 629f58a9d17SGeoff Levand spin_lock_irqsave(&r->chunk_list.lock, flags); 630f58a9d17SGeoff Levand c = dma_find_chunk(r, bus_addr, len); 631f58a9d17SGeoff Levand 632f58a9d17SGeoff Levand if (!c) { 633f58a9d17SGeoff Levand unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 634f58a9d17SGeoff Levand 1 << r->page_size); 635f58a9d17SGeoff Levand unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size); 636f58a9d17SGeoff Levand DBG("%s:%d: not found: bus_addr %lxh\n", 637f58a9d17SGeoff Levand __func__, __LINE__, bus_addr); 638f58a9d17SGeoff Levand DBG("%s:%d: not found: len %lxh\n", 639f58a9d17SGeoff Levand __func__, __LINE__, len); 640f58a9d17SGeoff Levand DBG("%s:%d: not found: aligned_bus %lxh\n", 641f58a9d17SGeoff Levand __func__, __LINE__, aligned_bus); 642f58a9d17SGeoff Levand DBG("%s:%d: not found: aligned_len %lxh\n", 643f58a9d17SGeoff Levand __func__, __LINE__, aligned_len); 644f58a9d17SGeoff Levand BUG(); 645f58a9d17SGeoff Levand } 646f58a9d17SGeoff Levand 647f58a9d17SGeoff Levand c->usage_count--; 648f58a9d17SGeoff Levand 649f58a9d17SGeoff Levand if (!c->usage_count) { 650f58a9d17SGeoff Levand list_del(&c->link); 651f58a9d17SGeoff Levand dma_free_chunk(c); 652f58a9d17SGeoff Levand } 653f58a9d17SGeoff Levand 654f58a9d17SGeoff Levand spin_unlock_irqrestore(&r->chunk_list.lock, flags); 655f58a9d17SGeoff Levand return 0; 656f58a9d17SGeoff Levand } 657f58a9d17SGeoff Levand 658f58a9d17SGeoff Levand /** 659f58a9d17SGeoff Levand * dma_region_create_linear - Setup a linear dma maping for a device. 660f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 661f58a9d17SGeoff Levand * 662f58a9d17SGeoff Levand * This routine creates an HV dma region for the device and maps all available 663f58a9d17SGeoff Levand * ram into the io controller bus address space. 664f58a9d17SGeoff Levand */ 665f58a9d17SGeoff Levand 666f58a9d17SGeoff Levand static int dma_region_create_linear(struct ps3_dma_region *r) 667f58a9d17SGeoff Levand { 668f58a9d17SGeoff Levand int result; 669f58a9d17SGeoff Levand unsigned long tmp; 670f58a9d17SGeoff Levand 671f58a9d17SGeoff Levand /* force 16M dma pages for linear mapping */ 672f58a9d17SGeoff Levand 673f58a9d17SGeoff Levand if (r->page_size != PS3_DMA_16M) { 674f58a9d17SGeoff Levand pr_info("%s:%d: forcing 16M pages for linear map\n", 675f58a9d17SGeoff Levand __func__, __LINE__); 676f58a9d17SGeoff Levand r->page_size = PS3_DMA_16M; 677f58a9d17SGeoff Levand } 678f58a9d17SGeoff Levand 679f58a9d17SGeoff Levand result = dma_region_create(r); 680f58a9d17SGeoff Levand BUG_ON(result); 681f58a9d17SGeoff Levand 682f58a9d17SGeoff Levand result = dma_map_area(r, map.rm.base, map.rm.size, &tmp); 683f58a9d17SGeoff Levand BUG_ON(result); 684f58a9d17SGeoff Levand 685f58a9d17SGeoff Levand if (USE_LPAR_ADDR) 686f58a9d17SGeoff Levand result = dma_map_area(r, map.r1.base, map.r1.size, 687f58a9d17SGeoff Levand &tmp); 688f58a9d17SGeoff Levand else 689f58a9d17SGeoff Levand result = dma_map_area(r, map.rm.size, map.r1.size, 690f58a9d17SGeoff Levand &tmp); 691f58a9d17SGeoff Levand 692f58a9d17SGeoff Levand BUG_ON(result); 693f58a9d17SGeoff Levand 694f58a9d17SGeoff Levand return result; 695f58a9d17SGeoff Levand } 696f58a9d17SGeoff Levand 697f58a9d17SGeoff Levand /** 698f58a9d17SGeoff Levand * dma_region_free_linear - Free a linear dma mapping for a device. 699f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 700f58a9d17SGeoff Levand * 701f58a9d17SGeoff Levand * This routine will unmap all mapped areas and free the HV dma region. 702f58a9d17SGeoff Levand */ 703f58a9d17SGeoff Levand 704f58a9d17SGeoff Levand static int dma_region_free_linear(struct ps3_dma_region *r) 705f58a9d17SGeoff Levand { 706f58a9d17SGeoff Levand int result; 707f58a9d17SGeoff Levand 708f58a9d17SGeoff Levand result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size); 709f58a9d17SGeoff Levand BUG_ON(result); 710f58a9d17SGeoff Levand 711f58a9d17SGeoff Levand result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base), 712f58a9d17SGeoff Levand map.r1.size); 713f58a9d17SGeoff Levand BUG_ON(result); 714f58a9d17SGeoff Levand 715f58a9d17SGeoff Levand result = dma_region_free(r); 716f58a9d17SGeoff Levand BUG_ON(result); 717f58a9d17SGeoff Levand 718f58a9d17SGeoff Levand return result; 719f58a9d17SGeoff Levand } 720f58a9d17SGeoff Levand 721f58a9d17SGeoff Levand /** 722f58a9d17SGeoff Levand * dma_map_area_linear - Map an area of memory into a device dma region. 723f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 724f58a9d17SGeoff Levand * @virt_addr: Starting virtual address of the area to map. 725f58a9d17SGeoff Levand * @len: Length in bytes of the area to map. 726f58a9d17SGeoff Levand * @bus_addr: A pointer to return the starting ioc bus address of the area to 727f58a9d17SGeoff Levand * map. 728f58a9d17SGeoff Levand * 729f58a9d17SGeoff Levand * This routine just returns the coresponding bus address. Actual mapping 730f58a9d17SGeoff Levand * occurs in dma_region_create_linear(). 731f58a9d17SGeoff Levand */ 732f58a9d17SGeoff Levand 733f58a9d17SGeoff Levand static int dma_map_area_linear(struct ps3_dma_region *r, 734f58a9d17SGeoff Levand unsigned long virt_addr, unsigned long len, unsigned long *bus_addr) 735f58a9d17SGeoff Levand { 736f58a9d17SGeoff Levand unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) 737f58a9d17SGeoff Levand : virt_addr; 738f58a9d17SGeoff Levand *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); 739f58a9d17SGeoff Levand return 0; 740f58a9d17SGeoff Levand } 741f58a9d17SGeoff Levand 742f58a9d17SGeoff Levand /** 743f58a9d17SGeoff Levand * dma_unmap_area_linear - Unmap an area of memory from a device dma region. 744f58a9d17SGeoff Levand * @r: Pointer to a struct ps3_dma_region. 745f58a9d17SGeoff Levand * @bus_addr: The starting ioc bus address of the area to unmap. 746f58a9d17SGeoff Levand * @len: Length in bytes of the area to unmap. 747f58a9d17SGeoff Levand * 748f58a9d17SGeoff Levand * This routine does nothing. Unmapping occurs in dma_region_free_linear(). 749f58a9d17SGeoff Levand */ 750f58a9d17SGeoff Levand 751f58a9d17SGeoff Levand static int dma_unmap_area_linear(struct ps3_dma_region *r, 752f58a9d17SGeoff Levand unsigned long bus_addr, unsigned long len) 753f58a9d17SGeoff Levand { 754f58a9d17SGeoff Levand return 0; 755f58a9d17SGeoff Levand } 756f58a9d17SGeoff Levand 757f58a9d17SGeoff Levand int ps3_dma_region_create(struct ps3_dma_region *r) 758f58a9d17SGeoff Levand { 759f58a9d17SGeoff Levand return (USE_DYNAMIC_DMA) 760f58a9d17SGeoff Levand ? dma_region_create(r) 761f58a9d17SGeoff Levand : dma_region_create_linear(r); 762f58a9d17SGeoff Levand } 763f58a9d17SGeoff Levand 764f58a9d17SGeoff Levand int ps3_dma_region_free(struct ps3_dma_region *r) 765f58a9d17SGeoff Levand { 766f58a9d17SGeoff Levand return (USE_DYNAMIC_DMA) 767f58a9d17SGeoff Levand ? dma_region_free(r) 768f58a9d17SGeoff Levand : dma_region_free_linear(r); 769f58a9d17SGeoff Levand } 770f58a9d17SGeoff Levand 771f58a9d17SGeoff Levand int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr, 772f58a9d17SGeoff Levand unsigned long len, unsigned long *bus_addr) 773f58a9d17SGeoff Levand { 774f58a9d17SGeoff Levand return (USE_DYNAMIC_DMA) 775f58a9d17SGeoff Levand ? dma_map_area(r, virt_addr, len, bus_addr) 776f58a9d17SGeoff Levand : dma_map_area_linear(r, virt_addr, len, bus_addr); 777f58a9d17SGeoff Levand } 778f58a9d17SGeoff Levand 779f58a9d17SGeoff Levand int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr, 780f58a9d17SGeoff Levand unsigned long len) 781f58a9d17SGeoff Levand { 782f58a9d17SGeoff Levand return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len) 783f58a9d17SGeoff Levand : dma_unmap_area_linear(r, bus_addr, len); 784f58a9d17SGeoff Levand } 785f58a9d17SGeoff Levand 786f58a9d17SGeoff Levand /*============================================================================*/ 787f58a9d17SGeoff Levand /* system startup routines */ 788f58a9d17SGeoff Levand /*============================================================================*/ 789f58a9d17SGeoff Levand 790f58a9d17SGeoff Levand /** 791f58a9d17SGeoff Levand * ps3_mm_init - initialize the address space state variables 792f58a9d17SGeoff Levand */ 793f58a9d17SGeoff Levand 794f58a9d17SGeoff Levand void __init ps3_mm_init(void) 795f58a9d17SGeoff Levand { 796f58a9d17SGeoff Levand int result; 797f58a9d17SGeoff Levand 798f58a9d17SGeoff Levand DBG(" -> %s:%d\n", __func__, __LINE__); 799f58a9d17SGeoff Levand 800f58a9d17SGeoff Levand result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size, 801f58a9d17SGeoff Levand &map.total); 802f58a9d17SGeoff Levand 803f58a9d17SGeoff Levand if (result) 804f58a9d17SGeoff Levand panic("ps3_repository_read_mm_info() failed"); 805f58a9d17SGeoff Levand 806f58a9d17SGeoff Levand map.rm.offset = map.rm.base; 807f58a9d17SGeoff Levand map.vas_id = map.htab_size = 0; 808f58a9d17SGeoff Levand 809f58a9d17SGeoff Levand /* this implementation assumes map.rm.base is zero */ 810f58a9d17SGeoff Levand 811f58a9d17SGeoff Levand BUG_ON(map.rm.base); 812f58a9d17SGeoff Levand BUG_ON(!map.rm.size); 813f58a9d17SGeoff Levand 814f58a9d17SGeoff Levand lmb_add(map.rm.base, map.rm.size); 815f58a9d17SGeoff Levand lmb_analyze(); 816f58a9d17SGeoff Levand 817f58a9d17SGeoff Levand /* arrange to do this in ps3_mm_add_memory */ 818f58a9d17SGeoff Levand ps3_mm_region_create(&map.r1, map.total - map.rm.size); 819f58a9d17SGeoff Levand 820f58a9d17SGeoff Levand DBG(" <- %s:%d\n", __func__, __LINE__); 821f58a9d17SGeoff Levand } 822f58a9d17SGeoff Levand 823f58a9d17SGeoff Levand /** 824f58a9d17SGeoff Levand * ps3_mm_shutdown - final cleanup of address space 825f58a9d17SGeoff Levand */ 826f58a9d17SGeoff Levand 827f58a9d17SGeoff Levand void ps3_mm_shutdown(void) 828f58a9d17SGeoff Levand { 829f58a9d17SGeoff Levand ps3_mm_region_destroy(&map.r1); 830f58a9d17SGeoff Levand map.total = map.rm.size; 831f58a9d17SGeoff Levand } 832