11da177e4SLinus Torvalds #include <linux/module.h> 21da177e4SLinus Torvalds #include <linux/highmem.h> 352ab320aSYoichi Yuasa #include <linux/sched.h> 4631330f5SRalf Baechle #include <linux/smp.h> 5bb86bf28SRalf Baechle #include <asm/fixmap.h> 61da177e4SLinus Torvalds #include <asm/tlbflush.h> 71da177e4SLinus Torvalds 8bb86bf28SRalf Baechle static pte_t *kmap_pte; 9bb86bf28SRalf Baechle 10bb86bf28SRalf Baechle unsigned long highstart_pfn, highend_pfn; 11bb86bf28SRalf Baechle 12*3e4d3af5SPeter Zijlstra void *kmap(struct page *page) 131da177e4SLinus Torvalds { 141da177e4SLinus Torvalds void *addr; 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds might_sleep(); 171da177e4SLinus Torvalds if (!PageHighMem(page)) 181da177e4SLinus Torvalds return page_address(page); 191da177e4SLinus Torvalds addr = kmap_high(page); 201da177e4SLinus Torvalds flush_tlb_one((unsigned long)addr); 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds return addr; 231da177e4SLinus Torvalds } 24*3e4d3af5SPeter Zijlstra EXPORT_SYMBOL(kmap); 251da177e4SLinus Torvalds 26*3e4d3af5SPeter Zijlstra void kunmap(struct page *page) 271da177e4SLinus Torvalds { 28b72b7092SRalf Baechle BUG_ON(in_interrupt()); 291da177e4SLinus Torvalds if (!PageHighMem(page)) 301da177e4SLinus Torvalds return; 311da177e4SLinus Torvalds kunmap_high(page); 321da177e4SLinus Torvalds } 33*3e4d3af5SPeter Zijlstra EXPORT_SYMBOL(kunmap); 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds /* 361da177e4SLinus Torvalds * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 371da177e4SLinus Torvalds * no global lock is needed and because the kmap code must perform a global TLB 381da177e4SLinus Torvalds * invalidation when the kmap pool wraps. 391da177e4SLinus Torvalds * 401da177e4SLinus Torvalds * However when holding an atomic kmap is is not legal to sleep, so atomic 411da177e4SLinus Torvalds * kmaps are appropriate for short, tight code paths only. 421da177e4SLinus Torvalds */ 431da177e4SLinus Torvalds 44*3e4d3af5SPeter Zijlstra void *__kmap_atomic(struct page *page) 451da177e4SLinus Torvalds { 461da177e4SLinus Torvalds unsigned long vaddr; 47*3e4d3af5SPeter Zijlstra int idx, type; 481da177e4SLinus Torvalds 491da177e4SLinus Torvalds /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 50a866374aSPeter Zijlstra pagefault_disable(); 511da177e4SLinus Torvalds if (!PageHighMem(page)) 521da177e4SLinus Torvalds return page_address(page); 531da177e4SLinus Torvalds 54*3e4d3af5SPeter Zijlstra type = kmap_atomic_idx_push(); 551da177e4SLinus Torvalds idx = type + KM_TYPE_NR*smp_processor_id(); 561da177e4SLinus Torvalds vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 571da177e4SLinus Torvalds #ifdef CONFIG_DEBUG_HIGHMEM 58b72b7092SRalf Baechle BUG_ON(!pte_none(*(kmap_pte - idx))); 591da177e4SLinus Torvalds #endif 60bb86bf28SRalf Baechle set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); 611da177e4SLinus Torvalds local_flush_tlb_one((unsigned long)vaddr); 621da177e4SLinus Torvalds 631da177e4SLinus Torvalds return (void*) vaddr; 641da177e4SLinus Torvalds } 65bb86bf28SRalf Baechle EXPORT_SYMBOL(__kmap_atomic); 661da177e4SLinus Torvalds 67*3e4d3af5SPeter Zijlstra void __kunmap_atomic(void *kvaddr) 681da177e4SLinus Torvalds { 691da177e4SLinus Torvalds unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 70*3e4d3af5SPeter Zijlstra int type; 711da177e4SLinus Torvalds 721da177e4SLinus Torvalds if (vaddr < FIXADDR_START) { // FIXME 73a866374aSPeter Zijlstra pagefault_enable(); 741da177e4SLinus Torvalds return; 751da177e4SLinus Torvalds } 761da177e4SLinus Torvalds 77*3e4d3af5SPeter Zijlstra type = kmap_atomic_idx_pop(); 78*3e4d3af5SPeter Zijlstra #ifdef CONFIG_DEBUG_HIGHMEM 79*3e4d3af5SPeter Zijlstra { 80*3e4d3af5SPeter Zijlstra int idx = type + KM_TYPE_NR * smp_processor_id(); 81*3e4d3af5SPeter Zijlstra 82b72b7092SRalf Baechle BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 831da177e4SLinus Torvalds 841da177e4SLinus Torvalds /* 851da177e4SLinus Torvalds * force other mappings to Oops if they'll try to access 861da177e4SLinus Torvalds * this pte without first remap it 871da177e4SLinus Torvalds */ 881da177e4SLinus Torvalds pte_clear(&init_mm, vaddr, kmap_pte-idx); 891da177e4SLinus Torvalds local_flush_tlb_one(vaddr); 90*3e4d3af5SPeter Zijlstra } 911da177e4SLinus Torvalds #endif 92a866374aSPeter Zijlstra pagefault_enable(); 931da177e4SLinus Torvalds } 94*3e4d3af5SPeter Zijlstra EXPORT_SYMBOL(__kunmap_atomic); 951da177e4SLinus Torvalds 9660080265SRalf Baechle /* 9760080265SRalf Baechle * This is the same as kmap_atomic() but can map memory that doesn't 9860080265SRalf Baechle * have a struct page associated with it. 9960080265SRalf Baechle */ 100*3e4d3af5SPeter Zijlstra void *kmap_atomic_pfn(unsigned long pfn) 10160080265SRalf Baechle { 10260080265SRalf Baechle unsigned long vaddr; 103*3e4d3af5SPeter Zijlstra int idx, type; 10460080265SRalf Baechle 105a866374aSPeter Zijlstra pagefault_disable(); 10660080265SRalf Baechle 107*3e4d3af5SPeter Zijlstra type = kmap_atomic_idx_push(); 10860080265SRalf Baechle idx = type + KM_TYPE_NR*smp_processor_id(); 10960080265SRalf Baechle vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 110bb86bf28SRalf Baechle set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); 11160080265SRalf Baechle flush_tlb_one(vaddr); 11260080265SRalf Baechle 11360080265SRalf Baechle return (void*) vaddr; 11460080265SRalf Baechle } 11560080265SRalf Baechle 116*3e4d3af5SPeter Zijlstra struct page *kmap_atomic_to_page(void *ptr) 1171da177e4SLinus Torvalds { 1181da177e4SLinus Torvalds unsigned long idx, vaddr = (unsigned long)ptr; 1191da177e4SLinus Torvalds pte_t *pte; 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds if (vaddr < FIXADDR_START) 1221da177e4SLinus Torvalds return virt_to_page(ptr); 1231da177e4SLinus Torvalds 1241da177e4SLinus Torvalds idx = virt_to_fix(vaddr); 1251da177e4SLinus Torvalds pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 1261da177e4SLinus Torvalds return pte_page(*pte); 1271da177e4SLinus Torvalds } 1281da177e4SLinus Torvalds 129bb86bf28SRalf Baechle void __init kmap_init(void) 130bb86bf28SRalf Baechle { 131bb86bf28SRalf Baechle unsigned long kmap_vstart; 132bb86bf28SRalf Baechle 133bb86bf28SRalf Baechle /* cache the first kmap pte */ 134bb86bf28SRalf Baechle kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 135bb86bf28SRalf Baechle kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 136bb86bf28SRalf Baechle } 137