Searched refs:PAGE_ALIGN_DOWN (Results 1 – 22 of 22) sorted by relevance
158 void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); in hva_to_pfn_retry() 280 old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); in __kvm_gpc_refresh() 281 old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); in __kvm_gpc_refresh() 288 gpc->uhva = PAGE_ALIGN_DOWN(uhva); in __kvm_gpc_refresh()
47 start = PAGE_ALIGN_DOWN(start); in accept_memory()176 start = PAGE_ALIGN_DOWN(start); in range_contains_unaccepted_memory()
697 start = PAGE_ALIGN_DOWN(efi.unaccepted); in reserve_unaccepted()
16 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)15 #define PAGE_ALIGN_DOWN( global() macro
51 start = PAGE_ALIGN_DOWN(start); in raw_copy_from_user()
85 phys = PAGE_ALIGN_DOWN(phys); in mmio_guard_ioremap_hook()
256 u64 aligned_iova = PAGE_ALIGN_DOWN(base_iova); in iommufd_hw_queue_destroy_access()293 u64 aligned_iova = PAGE_ALIGN_DOWN(cmd->nesting_parent_iova); in iommufd_hw_queue_alloc_phys()
254 split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE); in truncate_inode_partial_folio()267 PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE); in truncate_inode_partial_folio()
2058 for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) in fault_in_writeable() 2128 for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) in fault_in_safe_writeable() 2160 for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) in fault_in_readable()
443 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); in mmap_base()
2136 pgend = PAGE_ALIGN_DOWN(__pa(end_pg)); in free_memmap()
2821 addr = PAGE_ALIGN_DOWN(addr); in make_device_exclusive()
6424 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); in kvfree_rcu_cb()6436 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj); in kvfree_rcu_cb()
282 start = (void *)PAGE_ALIGN_DOWN((u64)start); in kmsan_init_alloc_meta_for_range()
349 aligned_end = PAGE_ALIGN_DOWN(zdev->end_dma + 1); in pci_dma_range_setup()
421 shadow_start = PAGE_ALIGN_DOWN(shadow_start); in __kasan_populate_vmalloc()
88 unsigned long sha_start = PAGE_ALIGN_DOWN(__sha(start)); in kasan_populate()
166 range.pos = PAGE_ALIGN_DOWN(*ppos); in fsnotify_pre_content()
643 __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr)); in flush_cache_page_if_present()
671 npages = (PAGE_ALIGN((u64)buf + len) - PAGE_ALIGN_DOWN((u64)buf)) >> in vmalloc_to_dma_addrs()
230 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE) macro
10231 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; in btrfs_add_swap_extent()