| /linux/drivers/gpu/drm/v3d/ |
| H A D | v3d_mmu.c | 32 static bool v3d_mmu_is_aligned(u32 page, u32 page_address, size_t alignment) in v3d_mmu_is_aligned() argument 35 IS_ALIGNED(page_address, alignment >> V3D_MMU_PAGE_SHIFT); in v3d_mmu_is_aligned() 98 u32 page_address = page_prot | pfn; in v3d_mmu_insert_ptes() local 104 v3d_mmu_is_aligned(page, page_address, SZ_1M)) { in v3d_mmu_insert_ptes() 106 page_address |= V3D_PTE_SUPERPAGE; in v3d_mmu_insert_ptes() 108 v3d_mmu_is_aligned(page, page_address, SZ_64K)) { in v3d_mmu_insert_ptes() 110 page_address |= V3D_PTE_BIGPAGE; in v3d_mmu_insert_ptes() 116 v3d->pt[page++] = page_address + i; in v3d_mmu_insert_ptes()
|
| /linux/crypto/async_tx/ |
| H A D | raid6test.c | 40 get_random_bytes(page_address(data[i]), PAGE_SIZE); in makedata() 131 memset(page_address(recovi), 0xf0, PAGE_SIZE); in test_disks() 132 memset(page_address(recovj), 0xba, PAGE_SIZE); in test_disks() 139 erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); in test_disks() 140 errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); in test_disks() 167 memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); in test() 168 memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); in test()
|
| H A D | async_raid6_recov.c | 73 a = page_address(srcs[0]) + src_offs[0]; in async_sum_product() 74 b = page_address(srcs[1]) + src_offs[1]; in async_sum_product() 75 c = page_address(dest) + d_off; in async_sum_product() 143 d = page_address(dest) + d_off; in async_mult() 144 s = page_address(src) + s_off; in async_mult() 419 ptrs[i] = page_address(blocks[i]) + offs[i]; in async_raid6_2data_recov() 502 ptrs[i] = page_address(blocks[i]) + offs[i]; in async_raid6_datap_recov()
|
| H A D | async_pq.c | 124 srcs[i] = page_address(blocks[i]) + offsets[i]; in do_sync_gen_syndrome() 406 p = page_address(p_src) + p_off; in async_syndrome_val() 407 s = page_address(spare) + s_off; in async_syndrome_val() 419 q = page_address(q_src) + q_off; in async_syndrome_val() 420 s = page_address(spare) + s_off; in async_syndrome_val()
|
| /linux/include/linux/ |
| H A D | highmem-internal.h | 46 addr = page_address(page); in kmap() 79 return page_address(page); in kmap_local_page_try_from_panic() 173 return page_address(page); in kmap() 182 kunmap_flush_on_unmap(page_address(page)); in kunmap() 188 return page_address(page); in kmap_local_page() 193 return page_address(page); in kmap_local_page_try_from_panic() 225 return page_address(page); in kmap_atomic()
|
| /linux/arch/arm64/mm/ |
| H A D | copypage.c | 19 void *kto = page_address(to); in copy_highpage() 20 void *kfrom = page_address(from); in copy_highpage() 48 kfrom = page_address(folio_page(src, i)); in copy_highpage() 49 kto = page_address(folio_page(dst, i)); in copy_highpage()
|
| /linux/mm/kmsan/ |
| H A D | shadow.c | 28 return page_address(shadow_page_for(page)); in shadow_ptr_for() 33 return page_address(origin_page_for(page)); in origin_ptr_for() 158 kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE, in kmsan_copy_page_meta() 184 __memset(page_address(shadow), 0, PAGE_SIZE * pages); in kmsan_alloc_page() 185 __memset(page_address(origin), 0, PAGE_SIZE * pages); in kmsan_alloc_page() 193 __memset(page_address(shadow), -1, PAGE_SIZE * pages); in kmsan_alloc_page() 202 ((depot_stack_handle_t *)page_address(origin))[i] = handle; in kmsan_alloc_page() 210 kmsan_internal_poison_memory(page_address(page), page_size(page), in kmsan_free_page()
|
| /linux/security/selinux/ |
| H A D | status.c | 52 status = page_address(selinux_state.status_page); in selinux_kernel_status_page() 85 status = page_address(selinux_state.status_page); in selinux_status_update_setenforce() 110 status = page_address(selinux_state.status_page); in selinux_status_update_policyload()
|
| /linux/net/ceph/ |
| H A D | cls_lock_client.c | 56 p = page_address(lock_op_page); in ceph_cls_lock() 115 p = page_address(unlock_op_page); in ceph_cls_unlock() 168 p = page_address(break_op_page); in ceph_cls_break_lock() 217 p = page_address(cookie_op_page); in ceph_cls_set_cookie() 362 p = page_address(get_info_op_page); in ceph_cls_lock_info() 377 p = page_address(reply_page); in ceph_cls_lock_info() 415 p = page_address(pages[0]); in ceph_cls_assert_locked()
|
| /linux/arch/x86/kernel/ |
| H A D | machine_kexec_32.c | 105 control_page = page_address(image->control_code_page); in machine_kexec_prepare_page_tables() 139 set_memory_x((unsigned long)page_address(image->control_code_page), 1); in machine_kexec_prepare() 153 set_memory_nx((unsigned long)page_address(image->control_code_page), 1); in machine_kexec_cleanup() 192 control_page = page_address(image->control_code_page); in machine_kexec()
|
| H A D | espfix_64.c | 167 pmd_p = (pmd_t *)page_address(page); in init_espfix_ap() 179 pte_p = (pte_t *)page_address(page); in init_espfix_ap() 187 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); in init_espfix_ap()
|
| H A D | machine_kexec_64.c | 232 p = page_address(page); in alloc_pgt_page() 345 void *control_page = page_address(image->control_code_page); in machine_kexec_prepare() 388 void *control_page = page_address(image->control_code_page); in machine_kexec_cleanup() 433 control_page = page_address(image->control_code_page); in machine_kexec()
|
| /linux/mm/ |
| H A D | highmem.c | 282 if (page_address(page)) in map_new_virtual() 283 return (unsigned long)page_address(page); in map_new_virtual() 316 vaddr = (unsigned long)page_address(page); in kmap_high() 342 vaddr = (unsigned long)page_address(page); in kmap_high_get() 369 vaddr = (unsigned long)page_address(page); in kunmap_high() 585 return page_address(page); in __kmap_local_page_prot() 753 void *page_address(const struct page *page) in page_address() function 779 EXPORT_SYMBOL(page_address);
|
| /linux/mm/kasan/ |
| H A D | common.c | 139 kasan_unpoison(set_tag(page_address(page), tag), in __kasan_unpoison_pages() 150 kasan_poison(page_address(page), PAGE_SIZE << order, in __kasan_poison_pages() 161 kasan_poison(page_address(page), page_size(page), in __kasan_poison_slab() 308 if (ptr != page_address(virt_to_head_page(ptr))) { in check_page_allocation() 502 ptr = page_address(page); in __kasan_mempool_poison_pages()
|
| /linux/arch/arm64/kernel/ |
| H A D | mte.c | 50 mte_clear_page_tags(page_address(page)); in mte_sync_tags() 63 mte_clear_page_tags(page_address(page)); in mte_sync_tags() 77 addr1 = page_address(page1); in memcmp_pages() 78 addr2 = page_address(page2); in memcmp_pages() 488 maddr = page_address(page); in __access_remote_tags()
|
| /linux/arch/arm/mm/ |
| H A D | copypage-v6.c | 80 discard_old_kernel_data(page_address(to)); in v6_copy_user_highpage_aliasing() 109 discard_old_kernel_data(page_address(page)); in v6_clear_user_highpage_aliasing()
|
| /linux/arch/powerpc/mm/ |
| H A D | dma-noncoherent.c | 102 unsigned long start = (unsigned long)page_address(page) + offset; in __dma_sync_page() 121 unsigned long kaddr = (unsigned long)page_address(page); in arch_dma_prep_coherent()
|
| /linux/arch/riscv/kernel/ |
| H A D | machine_kexec.c | 63 control_code_buffer = page_address(image->control_code_page); in machine_kexec_prepare() 157 void *control_code_buffer = page_address(image->control_code_page); in machine_kexec()
|
| /linux/drivers/net/ethernet/google/gve/ |
| H A D | gve_rx.c | 151 page_info->page_address = page_address(page); in gve_setup_rx_buffer() 229 rx->qpl_copy_pool[j].page_address = page_address(page); in gve_rx_prefill_pages() 543 void *src = page_info->page_address + page_info->page_offset; in gve_rx_copy_to_pool() 577 alloc_page_info.page_address = page_address(page); in gve_rx_copy_to_pool() 580 memcpy(alloc_page_info.page_address, src, page_info->pad + len); in gve_rx_copy_to_pool() 593 dst = copy_page_info->page_address + copy_page_info->page_offset; in gve_rx_copy_to_pool() 843 va = page_info->page_address + page_info->page_offset; in gve_rx() 865 xdp_prepare_buff(&xdp, page_info->page_address + in gve_rx()
|
| /linux/kernel/ |
| H A D | kexec_core.c | 295 arch_kexec_post_alloc_pages(page_address(pages), count, in kimage_alloc_pages() 313 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages() 458 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); in kimage_alloc_crash_control_pages() 497 ind_page = page_address(page); in kimage_add_entry() 569 arch_kexec_pre_free_pages(page_address(cma), nr_pages); in kimage_free_cma() 746 char *ptr = page_address(cma); in kimage_load_cma_segment() 900 arch_kexec_post_alloc_pages(page_address(page), 1, 0); in kimage_load_crash_segment() 923 arch_kexec_pre_free_pages(page_address(page), 1); in kimage_load_crash_segment()
|
| /linux/arch/um/kernel/skas/ |
| H A D | uaccess.c | 78 addr = (unsigned long) page_address(page) + in do_op_one_page() 269 addr = (unsigned long) page_address(page) + in arch_futex_atomic_op_inuser() 348 uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK); in futex_atomic_cmpxchg_inatomic()
|
| /linux/arch/m68k/include/asm/ |
| H A D | cacheflush_mm.h | 258 #define flush_dcache_page(page) __flush_pages_to_ram(page_address(page), 1) 264 __flush_pages_to_ram(page_address(page), nr)
|
| /linux/fs/isofs/ |
| H A D | namei.c | 167 page_address(page), in isofs_lookup() 168 1024 + page_address(page)); in isofs_lookup()
|
| /linux/crypto/ |
| H A D | scatterwalk.c | 152 src_virt = page_address(src_page) + src_offset; in memcpy_sglist() 153 dst_virt = page_address(dst_page) + dst_offset; in memcpy_sglist()
|
| /linux/arch/sh/kernel/ |
| H A D | dma-coherent.c | 12 __flush_purge_region(page_address(page), size); in arch_dma_prep_coherent()
|