pgtable.h (8389a7b909f252e74ea92b2794de8d660cfee96e) | pgtable.h (6bd33e1ece528f67646db33bf97406b747dafda0) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#ifndef _ASM_RISCV_PGTABLE_H 7#define _ASM_RISCV_PGTABLE_H 8 9#include <linux/mmzone.h> | 1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#ifndef _ASM_RISCV_PGTABLE_H 7#define _ASM_RISCV_PGTABLE_H 8 9#include <linux/mmzone.h> |
10#include <linux/sizes.h> |
|
10 11#include <asm/pgtable-bits.h> 12 13#ifndef __ASSEMBLY__ 14 15/* Page Upper Directory not used in RISC-V */ 16#include <asm-generic/pgtable-nopud.h> 17#include <asm/page.h> 18#include <asm/tlbflush.h> 19#include <linux/mm_types.h> 20 21#ifdef CONFIG_64BIT 22#include <asm/pgtable-64.h> 23#else 24#include <asm/pgtable-32.h> 25#endif /* CONFIG_64BIT */ 26 | 11 12#include <asm/pgtable-bits.h> 13 14#ifndef __ASSEMBLY__ 15 16/* Page Upper Directory not used in RISC-V */ 17#include <asm-generic/pgtable-nopud.h> 18#include <asm/page.h> 19#include <asm/tlbflush.h> 20#include <linux/mm_types.h> 21 22#ifdef CONFIG_64BIT 23#include <asm/pgtable-64.h> 24#else 25#include <asm/pgtable-32.h> 26#endif /* CONFIG_64BIT */ 27 |
28#ifdef CONFIG_MMU |
|
27/* Number of entries in the page global directory */ 28#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 29/* Number of entries in the page table */ 30#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) 31 32/* Number of PGD entries that a user-mode program can use */ 33#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | 29/* Number of entries in the page global directory */ 30#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 31/* Number of entries in the page table */ 32#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) 33 34/* Number of PGD entries that a user-mode program can use */ 35#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
34#define FIRST_USER_ADDRESS 0 | |
35 36/* Page protection bits */ 37#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 38 39#define PAGE_NONE __pgprot(_PAGE_PROT_NONE) 40#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 41#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 42#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) --- 35 unchanged lines hidden (view full) --- 78#define __S001 PAGE_READ 79#define __S010 PAGE_SHARED 80#define __S011 PAGE_SHARED 81#define __S100 PAGE_EXEC 82#define __S101 PAGE_READ_EXEC 83#define __S110 PAGE_SHARED_EXEC 84#define __S111 PAGE_SHARED_EXEC 85 | 36 37/* Page protection bits */ 38#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 39 40#define PAGE_NONE __pgprot(_PAGE_PROT_NONE) 41#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 42#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 43#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) --- 35 unchanged lines hidden (view full) --- 79#define __S001 PAGE_READ 80#define __S010 PAGE_SHARED 81#define __S011 PAGE_SHARED 82#define __S100 PAGE_EXEC 83#define __S101 PAGE_READ_EXEC 84#define __S110 PAGE_SHARED_EXEC 85#define __S111 PAGE_SHARED_EXEC 86 |
86#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 87#define VMALLOC_END (PAGE_OFFSET - 1) 88#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 89 90/* 91 * Roughly size the vmemmap space to be large enough to fit enough 92 * struct pages to map half the virtual address space. Then 93 * position vmemmap directly below the VMALLOC region. 94 */ 95#define VMEMMAP_SHIFT \ 96 (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 97#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 98#define VMEMMAP_END (VMALLOC_START - 1) 99#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) 100 101#define vmemmap ((struct page *)VMEMMAP_START) 102 103#define FIXADDR_TOP (VMEMMAP_START) 104#ifdef CONFIG_64BIT 105#define FIXADDR_SIZE PMD_SIZE 106#else 107#define FIXADDR_SIZE PGDIR_SIZE 108#endif 109#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 110 111/* 112 * ZERO_PAGE is a global shared page that is always zero, 113 * used for zero-mapped memory areas, etc. 114 */ 115extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 116#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 117 | |
118static inline int pmd_present(pmd_t pmd) 119{ 120 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 121} 122 123static inline int pmd_none(pmd_t pmd) 124{ 125 return (pmd_val(pmd) == 0); --- 53 unchanged lines hidden (view full) --- 179#define pte_page(x) pfn_to_page(pte_pfn(x)) 180 181/* Constructs a page table entry */ 182static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 183{ 184 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); 185} 186 | 87static inline int pmd_present(pmd_t pmd) 88{ 89 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 90} 91 92static inline int pmd_none(pmd_t pmd) 93{ 94 return (pmd_val(pmd) == 0); --- 53 unchanged lines hidden (view full) --- 148#define pte_page(x) pfn_to_page(pte_pfn(x)) 149 150/* Constructs a page table entry */ 151static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 152{ 153 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); 154} 155 |
187static inline pte_t mk_pte(struct page *page, pgprot_t prot) 188{ 189 return pfn_pte(page_to_pfn(page), prot); 190} | 156#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
191 192#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 193 194static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) 195{ 196 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr); 197} 198 --- 224 unchanged lines hidden (view full) --- 423#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 424#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 425#define __swp_entry(type, offset) ((swp_entry_t) \ 426 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 427 428#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 429#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 430 | 157 158#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 159 160static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) 161{ 162 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr); 163} 164 --- 224 unchanged lines hidden (view full) --- 389#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 390#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 391#define __swp_entry(type, offset) ((swp_entry_t) \ 392 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 393 394#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 395#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 396 |
431#ifdef CONFIG_FLATMEM 432#define kern_addr_valid(addr) (1) /* FIXME */ 433#endif | 397#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 398#define VMALLOC_END (PAGE_OFFSET - 1) 399#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) |
434 | 400 |
435extern void *dtb_early_va; 436extern void setup_bootmem(void); 437extern void paging_init(void); | 401/* 402 * Roughly size the vmemmap space to be large enough to fit enough 403 * struct pages to map half the virtual address space. Then 404 * position vmemmap directly below the VMALLOC region. 405 */ 406#define VMEMMAP_SHIFT \ 407 (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 408#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 409#define VMEMMAP_END (VMALLOC_START - 1) 410#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) |
438 | 411 |
412#define vmemmap ((struct page *)VMEMMAP_START) 413 414#define PCI_IO_SIZE SZ_16M 415#define PCI_IO_END VMEMMAP_START 416#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 417 418#define FIXADDR_TOP PCI_IO_START 419#ifdef CONFIG_64BIT 420#define FIXADDR_SIZE PMD_SIZE 421#else 422#define FIXADDR_SIZE PGDIR_SIZE 423#endif 424#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 425 |
|
439/* 440 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. 441 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 442 */ 443#ifdef CONFIG_64BIT 444#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) 445#else 446#define TASK_SIZE FIXADDR_START 447#endif 448 | 426/* 427 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. 428 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 429 */ 430#ifdef CONFIG_64BIT 431#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) 432#else 433#define TASK_SIZE FIXADDR_START 434#endif 435 |
436#else /* CONFIG_MMU */ 437 438#define PAGE_KERNEL __pgprot(0) 439#define swapper_pg_dir NULL 440#define VMALLOC_START 0 441 442#define TASK_SIZE 0xffffffffUL 443 444#endif /* !CONFIG_MMU */ 445 446#define kern_addr_valid(addr) (1) /* FIXME */ 447 448extern void *dtb_early_va; 449void setup_bootmem(void); 450void paging_init(void); 451 452#define FIRST_USER_ADDRESS 0 453 454/* 455 * ZERO_PAGE is a global shared page that is always zero, 456 * used for zero-mapped memory areas, etc. 457 */ 458extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 459#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 460 |
|
449#include <asm-generic/pgtable.h> 450 451#endif /* !__ASSEMBLY__ */ 452 453#endif /* _ASM_RISCV_PGTABLE_H */ | 461#include <asm-generic/pgtable.h> 462 463#endif /* !__ASSEMBLY__ */ 464 465#endif /* _ASM_RISCV_PGTABLE_H */ |