1b99fbc10SRalf Baechle #include <linux/compiler.h> 2*d9ba5778SPaul Gortmaker #include <linux/init.h> 3*d9ba5778SPaul Gortmaker #include <linux/export.h> 41da177e4SLinus Torvalds #include <linux/highmem.h> 552ab320aSYoichi Yuasa #include <linux/sched.h> 6631330f5SRalf Baechle #include <linux/smp.h> 7bb86bf28SRalf Baechle #include <asm/fixmap.h> 81da177e4SLinus Torvalds #include <asm/tlbflush.h> 91da177e4SLinus Torvalds 10bb86bf28SRalf Baechle static pte_t *kmap_pte; 11bb86bf28SRalf Baechle 12bb86bf28SRalf Baechle unsigned long highstart_pfn, highend_pfn; 13bb86bf28SRalf Baechle 143e4d3af5SPeter Zijlstra void *kmap(struct page *page) 151da177e4SLinus Torvalds { 161da177e4SLinus Torvalds void *addr; 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds might_sleep(); 191da177e4SLinus Torvalds if (!PageHighMem(page)) 201da177e4SLinus Torvalds return page_address(page); 211da177e4SLinus Torvalds addr = kmap_high(page); 221da177e4SLinus Torvalds flush_tlb_one((unsigned long)addr); 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds return addr; 251da177e4SLinus Torvalds } 263e4d3af5SPeter Zijlstra EXPORT_SYMBOL(kmap); 271da177e4SLinus Torvalds 283e4d3af5SPeter Zijlstra void kunmap(struct page *page) 291da177e4SLinus Torvalds { 30b72b7092SRalf Baechle BUG_ON(in_interrupt()); 311da177e4SLinus Torvalds if (!PageHighMem(page)) 321da177e4SLinus Torvalds return; 331da177e4SLinus Torvalds kunmap_high(page); 341da177e4SLinus Torvalds } 353e4d3af5SPeter Zijlstra EXPORT_SYMBOL(kunmap); 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds /* 381da177e4SLinus Torvalds * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 391da177e4SLinus Torvalds * no global lock is needed and because the kmap code must perform a global TLB 401da177e4SLinus Torvalds * invalidation when the kmap pool wraps. 411da177e4SLinus Torvalds * 421da177e4SLinus Torvalds * However when holding an atomic kmap is is not legal to sleep, so atomic 431da177e4SLinus Torvalds * kmaps are appropriate for short, tight code paths only. 441da177e4SLinus Torvalds */ 451da177e4SLinus Torvalds 46a24401bcSCong Wang void *kmap_atomic(struct page *page) 471da177e4SLinus Torvalds { 481da177e4SLinus Torvalds unsigned long vaddr; 493e4d3af5SPeter Zijlstra int idx, type; 501da177e4SLinus Torvalds 512cb7c9cbSDavid Hildenbrand preempt_disable(); 52a866374aSPeter Zijlstra pagefault_disable(); 531da177e4SLinus Torvalds if (!PageHighMem(page)) 541da177e4SLinus Torvalds return page_address(page); 551da177e4SLinus Torvalds 563e4d3af5SPeter Zijlstra type = kmap_atomic_idx_push(); 571da177e4SLinus Torvalds idx = type + KM_TYPE_NR*smp_processor_id(); 581da177e4SLinus Torvalds vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 591da177e4SLinus Torvalds #ifdef CONFIG_DEBUG_HIGHMEM 60b72b7092SRalf Baechle BUG_ON(!pte_none(*(kmap_pte - idx))); 611da177e4SLinus Torvalds #endif 62bb86bf28SRalf Baechle set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); 631da177e4SLinus Torvalds local_flush_tlb_one((unsigned long)vaddr); 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds return (void*) vaddr; 661da177e4SLinus Torvalds } 67a24401bcSCong Wang EXPORT_SYMBOL(kmap_atomic); 681da177e4SLinus Torvalds 693e4d3af5SPeter Zijlstra void __kunmap_atomic(void *kvaddr) 701da177e4SLinus Torvalds { 711da177e4SLinus Torvalds unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 72b99fbc10SRalf Baechle int type __maybe_unused; 731da177e4SLinus Torvalds 741da177e4SLinus Torvalds if (vaddr < FIXADDR_START) { // FIXME 75a866374aSPeter Zijlstra pagefault_enable(); 762cb7c9cbSDavid Hildenbrand preempt_enable(); 771da177e4SLinus Torvalds return; 781da177e4SLinus Torvalds } 791da177e4SLinus Torvalds 8020273941SPeter Zijlstra type = kmap_atomic_idx(); 813e4d3af5SPeter Zijlstra #ifdef CONFIG_DEBUG_HIGHMEM 823e4d3af5SPeter Zijlstra { 833e4d3af5SPeter Zijlstra int idx = type + KM_TYPE_NR * smp_processor_id(); 843e4d3af5SPeter Zijlstra 85b72b7092SRalf Baechle BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 861da177e4SLinus Torvalds 871da177e4SLinus Torvalds /* 881da177e4SLinus Torvalds * force other mappings to Oops if they'll try to access 891da177e4SLinus Torvalds * this pte without first remap it 901da177e4SLinus Torvalds */ 911da177e4SLinus Torvalds pte_clear(&init_mm, vaddr, kmap_pte-idx); 921da177e4SLinus Torvalds local_flush_tlb_one(vaddr); 933e4d3af5SPeter Zijlstra } 941da177e4SLinus Torvalds #endif 9520273941SPeter Zijlstra kmap_atomic_idx_pop(); 96a866374aSPeter Zijlstra pagefault_enable(); 972cb7c9cbSDavid Hildenbrand preempt_enable(); 981da177e4SLinus Torvalds } 993e4d3af5SPeter Zijlstra EXPORT_SYMBOL(__kunmap_atomic); 1001da177e4SLinus Torvalds 10160080265SRalf Baechle /* 10260080265SRalf Baechle * This is the same as kmap_atomic() but can map memory that doesn't 10360080265SRalf Baechle * have a struct page associated with it. 10460080265SRalf Baechle */ 1053e4d3af5SPeter Zijlstra void *kmap_atomic_pfn(unsigned long pfn) 10660080265SRalf Baechle { 10760080265SRalf Baechle unsigned long vaddr; 1083e4d3af5SPeter Zijlstra int idx, type; 10960080265SRalf Baechle 1102cb7c9cbSDavid Hildenbrand preempt_disable(); 111a866374aSPeter Zijlstra pagefault_disable(); 11260080265SRalf Baechle 1133e4d3af5SPeter Zijlstra type = kmap_atomic_idx_push(); 11460080265SRalf Baechle idx = type + KM_TYPE_NR*smp_processor_id(); 11560080265SRalf Baechle vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 116bb86bf28SRalf Baechle set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); 11760080265SRalf Baechle flush_tlb_one(vaddr); 11860080265SRalf Baechle 11960080265SRalf Baechle return (void*) vaddr; 12060080265SRalf Baechle } 12160080265SRalf Baechle 122bb86bf28SRalf Baechle void __init kmap_init(void) 123bb86bf28SRalf Baechle { 124bb86bf28SRalf Baechle unsigned long kmap_vstart; 125bb86bf28SRalf Baechle 126bb86bf28SRalf Baechle /* cache the first kmap pte */ 127bb86bf28SRalf Baechle kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); 128bb86bf28SRalf Baechle kmap_pte = kmap_get_fixmap_pte(kmap_vstart); 129bb86bf28SRalf Baechle } 130