1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * highmem.h: virtual kernel memory mappings for high memory 4 * 5 * Used in CONFIG_HIGHMEM systems for memory pages which 6 * are not addressable by direct kernel virtual addresses. 7 * 8 * Copyright (C) 1999 Gerhard Wichert, Siemens AG 9 * Gerhard.Wichert@pdb.siemens.de 10 * 11 * 12 * Redesigned the x86 32-bit VM architecture to deal with 13 * up to 16 Terabyte physical memory. With current x86 CPUs 14 * we now support up to 64 Gigabytes physical RAM. 15 * 16 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 17 */ 18 #ifndef _ASM_HIGHMEM_H 19 #define _ASM_HIGHMEM_H 20 21 #ifdef __KERNEL__ 22 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/uaccess.h> 26 #include <asm/fixmap.h> 27 28 extern pte_t *kmap_pte; 29 extern pgprot_t kmap_prot; 30 extern pte_t *pkmap_page_table; 31 32 /* 33 * Right now we initialize only a single pte table. It can be extended 34 * easily, subsequent pte tables have to be allocated in one physical 35 * chunk of RAM. 36 */ 37 /* 38 * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte 39 * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP 40 * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP 41 * in case of 16K/64K/256K page sizes. 42 */ 43 44 #define PKMAP_ORDER PTE_SHIFT 45 #define LAST_PKMAP (1 << PKMAP_ORDER) 46 47 #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ 48 & PMD_MASK) 49 50 #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 51 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) 52 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 53 54 extern void *kmap_high(struct page *page); 55 extern void kunmap_high(struct page *page); 56 extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); 57 extern void __kunmap_atomic(void *kvaddr); 58 59 static inline void *kmap(struct page *page) 60 { 61 might_sleep(); 62 if (!PageHighMem(page)) 63 return page_address(page); 64 return kmap_high(page); 65 } 66 67 static inline void kunmap(struct page *page) 68 { 69 BUG_ON(in_interrupt()); 70 if (!PageHighMem(page)) 71 return; 72 kunmap_high(page); 73 } 74 75 static inline void *kmap_atomic(struct page *page) 76 { 77 return kmap_atomic_prot(page, kmap_prot); 78 } 79 80 #define flush_cache_kmaps() { flush_icache(); flush_dcache(); } 81 82 #endif /* __KERNEL__ */ 83 84 #endif /* _ASM_HIGHMEM_H */ 85