/linux/arch/sparc/mm/ |
H A D | init_32.c | 64 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; in calc_highpages() 65 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in calc_highpages() 82 unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); in calc_max_low_pfn() 85 last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT; in calc_max_low_pfn() 87 curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; in calc_max_low_pfn() 95 last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in calc_max_low_pfn() 174 start_pfn >>= PAGE_SHIFT; in bootmem_init() 176 max_pfn = end_of_phys_memory >> PAGE_SHIFT; in bootmem_init() 181 if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) { in bootmem_init() 182 highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); in bootmem_init() [all …]
|
/linux/arch/alpha/include/asm/ |
H A D | pgtable.h | 31 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) 36 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) 44 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) 45 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) 46 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) 50 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3)) 155 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) 178 #define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT) 180 ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \ 190 #define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) [all …]
|
/linux/arch/x86/mm/ |
H A D | init.c | 133 order = get_order((unsigned long)num << PAGE_SHIFT); in alloc_low_pages() 143 min_pfn_mapped << PAGE_SHIFT, in alloc_low_pages() 144 max_pfn_mapped << PAGE_SHIFT); in alloc_low_pages() 152 pfn = ret >> PAGE_SHIFT; in alloc_low_pages() 161 adr = __va((pfn + i) << PAGE_SHIFT); in alloc_low_pages() 165 return __va(pfn << PAGE_SHIFT); in alloc_low_pages() 197 pgt_buf_start = base >> PAGE_SHIFT; in early_alloc_pgt_buf() 199 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); in early_alloc_pgt_buf() 336 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr() 337 mr[nr_range].end = end_pfn<<PAGE_SHIFT; in save_mr() [all …]
|
H A D | init_32.c | 266 start_pfn = start >> PAGE_SHIFT; in kernel_physical_mapping_init() 267 end_pfn = end >> PAGE_SHIFT; in kernel_physical_mapping_init() 291 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 299 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 323 pfn &= PMD_MASK >> PAGE_SHIFT; in kernel_physical_mapping_init() 342 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 359 last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; in kernel_physical_mapping_init() 453 for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) { in native_pagetable_init() 454 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); in native_pagetable_init() 539 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; in parse_highmem() [all …]
|
/linux/drivers/gpu/drm/i915/ |
H A D | i915_ttm_buddy_manager.c | 73 min_page_size = bo->page_alignment << PAGE_SHIFT; in i915_ttm_buddy_man_alloc() 78 if (size > lpfn << PAGE_SHIFT) { in i915_ttm_buddy_man_alloc() 92 err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, in i915_ttm_buddy_man_alloc() 93 (u64)lpfn << PAGE_SHIFT, in i915_ttm_buddy_man_alloc() 94 (u64)n_pages << PAGE_SHIFT, in i915_ttm_buddy_man_alloc() 108 drm_buddy_block_offset(block) >> PAGE_SHIFT; in i915_ttm_buddy_man_alloc() 112 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); in i915_ttm_buddy_man_alloc() 179 drm_buddy_block_offset(block) >> PAGE_SHIFT; in i915_ttm_buddy_man_intersects() 181 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); in i915_ttm_buddy_man_intersects() 212 drm_buddy_block_offset(block) >> PAGE_SHIFT; in i915_ttm_buddy_man_compatible() [all …]
|
H A D | i915_mm.c | 46 return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT; in sgt_pfn() 48 return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); in sgt_pfn() 111 zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); in remap_io_mapping() 146 while (offset >= r.sgt.max >> PAGE_SHIFT) { in remap_io_sg() 147 offset -= r.sgt.max >> PAGE_SHIFT; in remap_io_sg() 152 r.sgt.curr = offset << PAGE_SHIFT; in remap_io_sg() 159 zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); in remap_io_sg()
|
/linux/arch/arc/include/asm/ |
H A D | highmem.h | 24 #define FIXADDR_TOP (FIXMAP_BASE + (FIX_KMAP_END << PAGE_SHIFT)) 30 #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 31 #define __virt_to_fix(x) (((FIXADDR_TOP - ((x) & PAGE_MASK))) >> PAGE_SHIFT) 35 #define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT) 37 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 38 #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
|
/linux/arch/arm/mm/ |
H A D | tlb-v7.S | 39 mov r0, r0, lsr #PAGE_SHIFT @ align address 40 mov r1, r1, lsr #PAGE_SHIFT 46 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA 47 mov r1, r1, lsl #PAGE_SHIFT 73 mov r0, r0, lsr #PAGE_SHIFT @ align address 74 mov r1, r1, lsr #PAGE_SHIFT 75 mov r0, r0, lsl #PAGE_SHIFT 76 mov r1, r1, lsl #PAGE_SHIFT
|
H A D | tlb-v6.S | 41 mov r0, r0, lsr #PAGE_SHIFT @ align address 42 mov r1, r1, lsr #PAGE_SHIFT 44 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA 45 mov r1, r1, lsl #PAGE_SHIFT 73 mov r0, r0, lsr #PAGE_SHIFT @ align address 74 mov r1, r1, lsr #PAGE_SHIFT 75 mov r0, r0, lsl #PAGE_SHIFT 76 mov r1, r1, lsl #PAGE_SHIFT
|
H A D | mmap.c | 18 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 52 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) in arch_get_unmapped_area() 76 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area() 105 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) in arch_get_unmapped_area_topdown() 127 info.align_offset = pgoff << PAGE_SHIFT; in arch_get_unmapped_area_topdown() 166 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT)); in valid_mmap_phys_addr_range()
|
/linux/arch/powerpc/mm/book3s64/ |
H A D | iommu_api.c | 80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 100 chunk = (1UL << (PAGE_SHIFT + MAX_PAGE_ORDER)) / in mm_iommu_do_alloc() 106 ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n, in mm_iommu_do_alloc() 135 if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) && in mm_iommu_do_alloc() 137 (mem2->entries << PAGE_SHIFT)))) { in mm_iommu_do_alloc() 150 pageshift = PAGE_SHIFT; in mm_iommu_do_alloc() 154 if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) in mm_iommu_do_alloc() 161 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; in mm_iommu_do_alloc() 214 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT); in mm_iommu_unpin() [all …]
|
/linux/arch/x86/kernel/cpu/mtrr/ |
H A D | if.c | 54 base >>= PAGE_SHIFT; in mtrr_file_add() 55 size >>= PAGE_SHIFT; in mtrr_file_add() 73 base >>= PAGE_SHIFT; in mtrr_file_del() 74 size >>= PAGE_SHIFT; in mtrr_file_del() 146 base >>= PAGE_SHIFT; in mtrr_write() 147 size >>= PAGE_SHIFT; in mtrr_write() 257 if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) in mtrr_ioctl() 258 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) in mtrr_ioctl() 261 gentry.base = base << PAGE_SHIFT; in mtrr_ioctl() 262 gentry.size = size << PAGE_SHIFT; in mtrr_ioctl() [all …]
|
H A D | centaur.c | 52 *base = centaur_mcr[reg].high >> PAGE_SHIFT; in centaur_get_mcr() 53 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; in centaur_get_mcr() 74 high = base << PAGE_SHIFT; in centaur_set_mcr() 77 low = -size << PAGE_SHIFT | 0x1f; in centaur_set_mcr() 80 low = -size << PAGE_SHIFT | 0x02; /* NC */ in centaur_set_mcr() 82 low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ in centaur_set_mcr()
|
H A D | amd.c | 20 *base = (low & 0xFFFE0000) >> PAGE_SHIFT; in amd_get_mtrr() 46 *size = (low + 4) << (15 - PAGE_SHIFT); in amd_get_mtrr() 83 regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) in amd_set_mtrr() 84 | (base << PAGE_SHIFT) | (type + 1); in amd_set_mtrr() 106 if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) in amd_validate_add_page()
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_ttm_vram_mgr.c | 61 if (!lpfn || lpfn > man->size >> PAGE_SHIFT) in xe_ttm_vram_mgr_new() 62 lpfn = man->size >> PAGE_SHIFT; in xe_ttm_vram_mgr_new() 64 if (tbo->base.size >> PAGE_SHIFT > (lpfn - place->fpfn)) in xe_ttm_vram_mgr_new() 84 if (place->fpfn || lpfn != man->size >> PAGE_SHIFT) in xe_ttm_vram_mgr_new() 95 min_page_size = (u64)tbo->page_alignment << PAGE_SHIFT; in xe_ttm_vram_mgr_new() 108 if (lpfn <= mgr->visible_size >> PAGE_SHIFT && size > mgr->visible_avail) { in xe_ttm_vram_mgr_new() 113 if (place->fpfn + (size >> PAGE_SHIFT) != lpfn && in xe_ttm_vram_mgr_new() 118 lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn); in xe_ttm_vram_mgr_new() 121 err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, in xe_ttm_vram_mgr_new() 122 (u64)lpfn << PAGE_SHIFT, size, in xe_ttm_vram_mgr_new() [all …]
|
/linux/arch/nios2/mm/ |
H A D | tlb.c | 23 << PAGE_SHIFT) 38 return ((addr | 0xC0000000UL) >> PAGE_SHIFT) << 2; in pteaddr_invalid() 53 WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2); in replace_tlb_one_pid() 64 if (((pteaddr >> 2) & 0xfffff) != (addr >> PAGE_SHIFT)) in replace_tlb_one_pid() 134 WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2); in flush_tlb_one() 144 if (((pteaddr >> 2) & 0xfffff) != (addr >> PAGE_SHIFT)) in flush_tlb_one() 173 line << (PAGE_SHIFT + cpuinfo.tlb_num_ways_log2)); in dump_tlb_line() 190 if ((tlbacc << PAGE_SHIFT) != 0) { in dump_tlb_line() 193 (pteaddr << (PAGE_SHIFT-2)), in dump_tlb_line() 194 (tlbacc << PAGE_SHIFT), in dump_tlb_line()
|
/linux/arch/x86/include/asm/ |
H A D | pgalloc.h | 67 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel() 74 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel_safe() 84 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); in pmd_populate() 101 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate() 107 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate_safe() 115 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); in p4d_populate() 121 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); in p4d_populate_safe() 138 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); in pgd_populate() 146 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); in pgd_populate_safe()
|
/linux/lib/ |
H A D | test_hmm.c | 212 for (pfn = (range->start >> PAGE_SHIFT); in dmirror_do_fault() 213 pfn < (range->end >> PAGE_SHIFT); in dmirror_do_fault() 252 xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT, in dmirror_do_update() 253 end >> PAGE_SHIFT) in dmirror_do_update() 349 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_fault() 368 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) { in dmirror_do_read() 390 unsigned long size = cmd->npages << PAGE_SHIFT; in dmirror_read() 409 start = cmd->addr + (bounce.cpages << PAGE_SHIFT); in dmirror_read() 434 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) { in dmirror_do_write() 456 unsigned long size = cmd->npages << PAGE_SHIFT; in dmirror_write() [all …]
|
/linux/include/drm/ |
H A D | drm_vma_manager.h | 37 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 38 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 256) 40 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 41 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 205 return ((__u64)node->vm_node.start) << PAGE_SHIFT; in drm_vma_node_offset_addr() 226 drm_vma_node_size(node) << PAGE_SHIFT, 1); in drm_vma_node_unmap()
|
/linux/mm/ |
H A D | hmm.c | 45 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill() 156 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole() 157 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole() 183 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); in pmd_to_hmm_pfn_flags() 197 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd() 204 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in hmm_vma_handle_pmd() 328 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; in hmm_vma_walk_pmd() 329 unsigned long npages = (end - start) >> PAGE_SHIFT; in hmm_vma_walk_pmd() 408 hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); in pud_to_hmm_pfn_flags() 438 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pud() [all …]
|
/linux/tools/include/linux/ |
H A D | pfn.h | 7 #define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) 8 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) 9 #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) 10 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
|
/linux/arch/openrisc/include/asm/ |
H A D | pgtable.h | 56 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) 66 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2)) 198 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) 332 #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) 336 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) 349 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) 350 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 360 #define PFN_PTE_SHIFT PAGE_SHIFT 361 #define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT) 362 #define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
|
/linux/include/asm-generic/ |
H A D | getorder.h | 33 return BITS_PER_LONG - PAGE_SHIFT; in get_order() 35 if (size < (1UL << PAGE_SHIFT)) in get_order() 38 return ilog2((size) - 1) - PAGE_SHIFT + 1; in get_order() 42 size >>= PAGE_SHIFT; in get_order()
|
/linux/include/linux/ |
H A D | pfn.h | 19 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) 20 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) 21 #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) 22 #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
|
/linux/arch/csky/kernel/ |
H A D | vdso.c | 21 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; in vdso_init() 32 pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); in vdso_init() 51 vdso_len = vdso_pages << PAGE_SHIFT; in arch_setup_additional_pages() 69 _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, in arch_setup_additional_pages() 79 vdso_base += (vdso_pages << PAGE_SHIFT); in arch_setup_additional_pages()
|