11da177e4SLinus Torvalds #include <linux/config.h> 21da177e4SLinus Torvalds #include <linux/module.h> 31da177e4SLinus Torvalds #include <linux/highmem.h> 41da177e4SLinus Torvalds #include <asm/tlbflush.h> 51da177e4SLinus Torvalds 61da177e4SLinus Torvalds void *__kmap(struct page *page) 71da177e4SLinus Torvalds { 81da177e4SLinus Torvalds void *addr; 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds might_sleep(); 111da177e4SLinus Torvalds if (!PageHighMem(page)) 121da177e4SLinus Torvalds return page_address(page); 131da177e4SLinus Torvalds addr = kmap_high(page); 141da177e4SLinus Torvalds flush_tlb_one((unsigned long)addr); 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds return addr; 171da177e4SLinus Torvalds } 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds void __kunmap(struct page *page) 201da177e4SLinus Torvalds { 211da177e4SLinus Torvalds if (in_interrupt()) 221da177e4SLinus Torvalds BUG(); 231da177e4SLinus Torvalds if (!PageHighMem(page)) 241da177e4SLinus Torvalds return; 251da177e4SLinus Torvalds kunmap_high(page); 261da177e4SLinus Torvalds } 271da177e4SLinus Torvalds 281da177e4SLinus Torvalds /* 291da177e4SLinus Torvalds * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 301da177e4SLinus Torvalds * no global lock is needed and because the kmap code must perform a global TLB 311da177e4SLinus Torvalds * invalidation when the kmap pool wraps. 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * However when holding an atomic kmap is is not legal to sleep, so atomic 341da177e4SLinus Torvalds * kmaps are appropriate for short, tight code paths only. 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds void *__kmap_atomic(struct page *page, enum km_type type) 381da177e4SLinus Torvalds { 391da177e4SLinus Torvalds enum fixed_addresses idx; 401da177e4SLinus Torvalds unsigned long vaddr; 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 431da177e4SLinus Torvalds inc_preempt_count(); 441da177e4SLinus Torvalds if (!PageHighMem(page)) 451da177e4SLinus Torvalds return page_address(page); 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds idx = type + KM_TYPE_NR*smp_processor_id(); 481da177e4SLinus Torvalds vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 491da177e4SLinus Torvalds #ifdef CONFIG_DEBUG_HIGHMEM 501da177e4SLinus Torvalds if (!pte_none(*(kmap_pte-idx))) 511da177e4SLinus Torvalds BUG(); 521da177e4SLinus Torvalds #endif 531da177e4SLinus Torvalds set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); 541da177e4SLinus Torvalds local_flush_tlb_one((unsigned long)vaddr); 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds return (void*) vaddr; 571da177e4SLinus Torvalds } 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds void __kunmap_atomic(void *kvaddr, enum km_type type) 601da177e4SLinus Torvalds { 611da177e4SLinus Torvalds #ifdef CONFIG_DEBUG_HIGHMEM 621da177e4SLinus Torvalds unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 631da177e4SLinus Torvalds enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds if (vaddr < FIXADDR_START) { // FIXME 661da177e4SLinus Torvalds dec_preempt_count(); 671da177e4SLinus Torvalds preempt_check_resched(); 681da177e4SLinus Torvalds return; 691da177e4SLinus Torvalds } 701da177e4SLinus Torvalds 711da177e4SLinus Torvalds if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) 721da177e4SLinus Torvalds BUG(); 731da177e4SLinus Torvalds 741da177e4SLinus Torvalds /* 751da177e4SLinus Torvalds * force other mappings to Oops if they'll try to access 761da177e4SLinus Torvalds * this pte without first remap it 771da177e4SLinus Torvalds */ 781da177e4SLinus Torvalds pte_clear(&init_mm, vaddr, kmap_pte-idx); 791da177e4SLinus Torvalds local_flush_tlb_one(vaddr); 801da177e4SLinus Torvalds #endif 811da177e4SLinus Torvalds 821da177e4SLinus Torvalds dec_preempt_count(); 831da177e4SLinus Torvalds preempt_check_resched(); 841da177e4SLinus Torvalds } 851da177e4SLinus Torvalds 86*60080265SRalf Baechle /* 87*60080265SRalf Baechle * This is the same as kmap_atomic() but can map memory that doesn't 88*60080265SRalf Baechle * have a struct page associated with it. 89*60080265SRalf Baechle */ 90*60080265SRalf Baechle void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 91*60080265SRalf Baechle { 92*60080265SRalf Baechle enum fixed_addresses idx; 93*60080265SRalf Baechle unsigned long vaddr; 94*60080265SRalf Baechle 95*60080265SRalf Baechle inc_preempt_count(); 96*60080265SRalf Baechle 97*60080265SRalf Baechle idx = type + KM_TYPE_NR*smp_processor_id(); 98*60080265SRalf Baechle vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 99*60080265SRalf Baechle set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); 100*60080265SRalf Baechle flush_tlb_one(vaddr); 101*60080265SRalf Baechle 102*60080265SRalf Baechle return (void*) vaddr; 103*60080265SRalf Baechle } 104*60080265SRalf Baechle 1051da177e4SLinus Torvalds struct page *__kmap_atomic_to_page(void *ptr) 1061da177e4SLinus Torvalds { 1071da177e4SLinus Torvalds unsigned long idx, vaddr = (unsigned long)ptr; 1081da177e4SLinus Torvalds pte_t *pte; 1091da177e4SLinus Torvalds 1101da177e4SLinus Torvalds if (vaddr < FIXADDR_START) 1111da177e4SLinus Torvalds return virt_to_page(ptr); 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds idx = virt_to_fix(vaddr); 1141da177e4SLinus Torvalds pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 1151da177e4SLinus Torvalds return pte_page(*pte); 1161da177e4SLinus Torvalds } 1171da177e4SLinus Torvalds 1181da177e4SLinus Torvalds EXPORT_SYMBOL(__kmap); 1191da177e4SLinus Torvalds EXPORT_SYMBOL(__kunmap); 1201da177e4SLinus Torvalds EXPORT_SYMBOL(__kmap_atomic); 1211da177e4SLinus Torvalds EXPORT_SYMBOL(__kunmap_atomic); 1221da177e4SLinus Torvalds EXPORT_SYMBOL(__kmap_atomic_to_page); 123