ioremap.c (072f58c6ce29cf6cf429480fcd1b1e87d1d5ed18) | ioremap.c (0e4c12b45aa88e74fdda117896d2b61c4e510cb9) |
---|---|
1/* 2 * Re-map IO memory to kernel address space so that we can access it. 3 * This is needed for high PCI addresses that aren't mapped in the 4 * 640k-1MB IO memory area on PC's 5 * 6 * (C) Copyright 1995 1996 Linus Torvalds 7 */ 8 --- 13 unchanged lines hidden (view full) --- 22#include <asm/pgtable.h> 23#include <asm/tlbflush.h> 24#include <asm/pgalloc.h> 25#include <asm/pat.h> 26#include <asm/setup.h> 27 28#include "physaddr.h" 29 | 1/* 2 * Re-map IO memory to kernel address space so that we can access it. 3 * This is needed for high PCI addresses that aren't mapped in the 4 * 640k-1MB IO memory area on PC's 5 * 6 * (C) Copyright 1995 1996 Linus Torvalds 7 */ 8 --- 13 unchanged lines hidden (view full) --- 22#include <asm/pgtable.h> 23#include <asm/tlbflush.h> 24#include <asm/pgalloc.h> 25#include <asm/pat.h> 26#include <asm/setup.h> 27 28#include "physaddr.h" 29 |
30struct ioremap_mem_flags { 31 bool system_ram; 32 bool desc_other; 33}; 34 |
|
30/* 31 * Fix up the linear direct mapping of the kernel to avoid cache attribute 32 * conflicts. 33 */ 34int ioremap_change_attr(unsigned long vaddr, unsigned long size, 35 enum page_cache_mode pcm) 36{ 37 unsigned long nrpages = size >> PAGE_SHIFT; --- 13 unchanged lines hidden (view full) --- 51 case _PAGE_CACHE_MODE_WB: 52 err = _set_memory_wb(vaddr, nrpages); 53 break; 54 } 55 56 return err; 57} 58 | 35/* 36 * Fix up the linear direct mapping of the kernel to avoid cache attribute 37 * conflicts. 38 */ 39int ioremap_change_attr(unsigned long vaddr, unsigned long size, 40 enum page_cache_mode pcm) 41{ 42 unsigned long nrpages = size >> PAGE_SHIFT; --- 13 unchanged lines hidden (view full) --- 56 case _PAGE_CACHE_MODE_WB: 57 err = _set_memory_wb(vaddr, nrpages); 58 break; 59 } 60 61 return err; 62} 63 |
59static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, 60 void *arg) | 64static bool __ioremap_check_ram(struct resource *res) |
61{ | 65{ |
66 unsigned long start_pfn, stop_pfn; |
|
62 unsigned long i; 63 | 67 unsigned long i; 68 |
64 for (i = 0; i < nr_pages; ++i) 65 if (pfn_valid(start_pfn + i) && 66 !PageReserved(pfn_to_page(start_pfn + i))) 67 return 1; | 69 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM) 70 return false; |
68 | 71 |
69 return 0; | 72 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; 73 stop_pfn = (res->end + 1) >> PAGE_SHIFT; 74 if (stop_pfn > start_pfn) { 75 for (i = 0; i < (stop_pfn - start_pfn); ++i) 76 if (pfn_valid(start_pfn + i) && 77 !PageReserved(pfn_to_page(start_pfn + i))) 78 return true; 79 } 80 81 return false; |
70} 71 | 82} 83 |
84static int __ioremap_check_desc_other(struct resource *res) 85{ 86 return (res->desc != IORES_DESC_NONE); 87} 88 89static int __ioremap_res_check(struct resource *res, void *arg) 90{ 91 struct ioremap_mem_flags *flags = arg; 92 93 if (!flags->system_ram) 94 flags->system_ram = __ioremap_check_ram(res); 95 96 if (!flags->desc_other) 97 flags->desc_other = __ioremap_check_desc_other(res); 98 99 return flags->system_ram && flags->desc_other; 100} 101 |
|
72/* | 102/* |
103 * To avoid multiple resource walks, this function walks resources marked as 104 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a 105 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). 106 */ 107static void __ioremap_check_mem(resource_size_t addr, unsigned long size, 108 struct ioremap_mem_flags *flags) 109{ 110 u64 start, end; 111 112 start = (u64)addr; 113 end = start + size - 1; 114 memset(flags, 0, sizeof(*flags)); 115 116 walk_mem_res(start, end, flags, __ioremap_res_check); 117} 118 119/* |
|
73 * Remap an arbitrary physical address space into the kernel virtual 74 * address space. It transparently creates kernel huge I/O mapping when 75 * the physical address is aligned by a huge page size (1GB or 2MB) and 76 * the requested size is at least the huge page size. 77 * 78 * NOTE: MTRRs can override PAT memory types with a 4KB granularity. 79 * Therefore, the mapping code falls back to use a smaller page toward 4KB 80 * when a mapping range is covered by non-WB type of MTRRs. 81 * 82 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 83 * have to convert them into an offset in a page-aligned mapping, but the 84 * caller shouldn't need to know that small detail. 85 */ 86static void __iomem *__ioremap_caller(resource_size_t phys_addr, 87 unsigned long size, enum page_cache_mode pcm, void *caller) 88{ 89 unsigned long offset, vaddr; | 120 * Remap an arbitrary physical address space into the kernel virtual 121 * address space. It transparently creates kernel huge I/O mapping when 122 * the physical address is aligned by a huge page size (1GB or 2MB) and 123 * the requested size is at least the huge page size. 124 * 125 * NOTE: MTRRs can override PAT memory types with a 4KB granularity. 126 * Therefore, the mapping code falls back to use a smaller page toward 4KB 127 * when a mapping range is covered by non-WB type of MTRRs. 128 * 129 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 130 * have to convert them into an offset in a page-aligned mapping, but the 131 * caller shouldn't need to know that small detail. 132 */ 133static void __iomem *__ioremap_caller(resource_size_t phys_addr, 134 unsigned long size, enum page_cache_mode pcm, void *caller) 135{ 136 unsigned long offset, vaddr; |
90 resource_size_t pfn, last_pfn, last_addr; | 137 resource_size_t last_addr; |
91 const resource_size_t unaligned_phys_addr = phys_addr; 92 const unsigned long unaligned_size = size; | 138 const resource_size_t unaligned_phys_addr = phys_addr; 139 const unsigned long unaligned_size = size; |
140 struct ioremap_mem_flags mem_flags; |
|
93 struct vm_struct *area; 94 enum page_cache_mode new_pcm; 95 pgprot_t prot; 96 int retval; 97 void __iomem *ret_addr; 98 99 /* Don't allow wraparound or zero size */ 100 last_addr = phys_addr + size - 1; 101 if (!size || last_addr < phys_addr) 102 return NULL; 103 104 if (!phys_addr_valid(phys_addr)) { 105 printk(KERN_WARNING "ioremap: invalid physical address %llx\n", 106 (unsigned long long)phys_addr); 107 WARN_ON_ONCE(1); 108 return NULL; 109 } 110 | 141 struct vm_struct *area; 142 enum page_cache_mode new_pcm; 143 pgprot_t prot; 144 int retval; 145 void __iomem *ret_addr; 146 147 /* Don't allow wraparound or zero size */ 148 last_addr = phys_addr + size - 1; 149 if (!size || last_addr < phys_addr) 150 return NULL; 151 152 if (!phys_addr_valid(phys_addr)) { 153 printk(KERN_WARNING "ioremap: invalid physical address %llx\n", 154 (unsigned long long)phys_addr); 155 WARN_ON_ONCE(1); 156 return NULL; 157 } 158 |
159 __ioremap_check_mem(phys_addr, size, &mem_flags); 160 |
|
111 /* 112 * Don't allow anybody to remap normal RAM that we're using.. 113 */ | 161 /* 162 * Don't allow anybody to remap normal RAM that we're using.. 163 */ |
114 pfn = phys_addr >> PAGE_SHIFT; 115 last_pfn = last_addr >> PAGE_SHIFT; 116 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, 117 __ioremap_check_ram) == 1) { | 164 if (mem_flags.system_ram) { |
118 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", 119 &phys_addr, &last_addr); 120 return NULL; 121 } 122 123 /* 124 * Mappings have to be page-aligned 125 */ --- 15 unchanged lines hidden (view full) --- 141 (unsigned long long)phys_addr, 142 (unsigned long long)(phys_addr + size), 143 pcm, new_pcm); 144 goto err_free_memtype; 145 } 146 pcm = new_pcm; 147 } 148 | 165 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", 166 &phys_addr, &last_addr); 167 return NULL; 168 } 169 170 /* 171 * Mappings have to be page-aligned 172 */ --- 15 unchanged lines hidden (view full) --- 188 (unsigned long long)phys_addr, 189 (unsigned long long)(phys_addr + size), 190 pcm, new_pcm); 191 goto err_free_memtype; 192 } 193 pcm = new_pcm; 194 } 195 |
196 /* 197 * If the page being mapped is in memory and SEV is active then 198 * make sure the memory encryption attribute is enabled in the 199 * resulting mapping. 200 */ |
|
149 prot = PAGE_KERNEL_IO; | 201 prot = PAGE_KERNEL_IO; |
202 if (sev_active() && mem_flags.desc_other) 203 prot = pgprot_encrypted(prot); 204 |
|
150 switch (pcm) { 151 case _PAGE_CACHE_MODE_UC: 152 default: 153 prot = __pgprot(pgprot_val(prot) | 154 cachemode2protval(_PAGE_CACHE_MODE_UC)); 155 break; 156 case _PAGE_CACHE_MODE_UC_MINUS: 157 prot = __pgprot(pgprot_val(prot) | --- 612 unchanged lines hidden --- | 205 switch (pcm) { 206 case _PAGE_CACHE_MODE_UC: 207 default: 208 prot = __pgprot(pgprot_val(prot) | 209 cachemode2protval(_PAGE_CACHE_MODE_UC)); 210 break; 211 case _PAGE_CACHE_MODE_UC_MINUS: 212 prot = __pgprot(pgprot_val(prot) | --- 612 unchanged lines hidden --- |