Searched refs:page_private (Results 1 – 24 of 24) sorted by relevance
205 return page_private(page) & BM_PAGE_IDX_MASK; in bm_page_to_idx()214 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_lock_io()221 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_unlock_io()231 clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); in bm_set_page_unchanged()232 clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); in bm_set_page_unchanged()237 set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); in bm_set_page_need_writeout()265 if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page))) in drbd_bm_mark_for_writeout()271 volatile const unsigned long *addr = &page_private(page); in bm_test_page_unchanged()277 set_bit(BM_PAGE_IO_ERROR, &page_private(page)); in bm_set_page_io_err()282 clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); in bm_clear_page_io_err()[all …]
1604 return (struct page *)page_private(page); in page_chain_next()
2062 drbd_pp_pool = (struct page *)page_private(page); in drbd_destroy_mempools()
182 *pagepool = (struct page *)page_private(page); in __erofs_allocpage()200 *pagepool = (struct page *)page_private(page); in erofs_release_pages()
127 return (struct balloon_dev_info *)page_private(page); in balloon_page_device()
183 ((struct buffer_head *)page_private(page)); \
333 return (struct page *)page_private(bounce_page); in fscrypt_pagecache_page()
560 #define page_private(page) ((page)->private) macro
103 address = page_private(buffer->pages[i]); in fw_iso_buffer_destroy()125 address = page_private(buffer->pages[i]); in fw_iso_buffer_lookup()
733 return page_private(ctx->pages[i]); in ar_buffer_bus()3402 page_bus = page_private(buffer->pages[page]); in queue_iso_transmit()3485 page_bus = page_private(buffer->pages[page]); in queue_iso_packet_per_buffer()3548 page_bus = page_private(buffer->pages[page]); in queue_iso_buffer_fill()
80 if (!page_private(page)) in f2fs_is_compressed_page()86 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC); in f2fs_is_compressed_page()142 return ((struct compress_io_ctx *)page_private(page))->rpages[0]; in f2fs_compress_control_page()807 (struct decompress_io_ctx *)page_private(page); in f2fs_end_read_compressed_page()1472 (struct compress_io_ctx *)page_private(page); in f2fs_compress_write_end_io()1864 (struct decompress_io_ctx *)page_private(page); in f2fs_put_page_dic()
735 for (last = rb->aux_nr_pages + (1 << page_private(page)); in rb_alloc_aux()750 if (page_private(page) != max_order) in rb_alloc_aux()
34 wr = (struct orangefs_write_range *)page_private(page); in orangefs_writepage_locked()124 page_private(ow->pages[i]); in orangefs_writepages_work()137 page_private(ow->pages[i]); in orangefs_writepages_work()
254 return (struct kvm_mmu_page *)page_private(page); in to_shadow_page()
544 return page_private(page); in buddy_order()558 #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
1772 VM_BUG_ON(page_private(page) != SWP_CONTINUED); in swp_swapcount()3852 if (!page_private(head)) { in add_swap_count_continuation()3913 if (page_private(head) != SWP_CONTINUED) { in swap_count_continued()3995 if (page_private(head)) { in free_swap_count_continuations()
738 struct zspage *zspage = (struct zspage *)page_private(page); in get_zspage()
1361 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
71 return 1 << page_private(page); in buf_nr_pages()
775 order = page_private(p); in topa_insert_pages()1290 order = page_private(p); in pt_buffer_try_single()
298 order = page_private(page); in kimage_free_pages()
1035 kfree((void *)page_private(pages[i])); in gnttab_pages_clear_private()
1994 struct page *next = (struct page *)page_private(head); in skb_copy_ubufs()2020 page = (struct page *)page_private(page); in skb_copy_ubufs()2039 head = (struct page *)page_private(head); in skb_copy_ubufs()