Searched refs:ppage (Results 1 – 4 of 4) sorted by relevance
67 struct page **ppage = xdr->pages + (xdr->page_base >> PAGE_SHIFT); in xdr_partial_copy_from_skb() local88 if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) { in xdr_partial_copy_from_skb()89 *ppage = alloc_page(GFP_NOWAIT); in xdr_partial_copy_from_skb()90 if (unlikely(*ppage == NULL)) { in xdr_partial_copy_from_skb()97 kaddr = kmap_atomic(*ppage); in xdr_partial_copy_from_skb()99 flush_dcache_page(*ppage); in xdr_partial_copy_from_skb()105 ppage++; in xdr_partial_copy_from_skb()
41 srl %g3, 12, %g1 ! ppage >> 1258 cmp %g3, %g1 ! ptag == ppage?94 sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE
296 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage, in nouveau_dmem_chunk_alloc() argument367 *ppage = &drm->dmem->free_folios->page; in nouveau_dmem_chunk_alloc()368 drm->dmem->free_folios = (*ppage)->zone_device_data; in nouveau_dmem_chunk_alloc()370 *ppage = drm->dmem->free_pages; in nouveau_dmem_chunk_alloc()371 drm->dmem->free_pages = (*ppage)->zone_device_data; in nouveau_dmem_chunk_alloc()
497 struct page **ppage, bool is_large) in dmirror_allocate_chunk() argument594 if (ppage) { in dmirror_allocate_chunk()600 *ppage = folio_page(mdevice->free_folios, 0); in dmirror_allocate_chunk()601 mdevice->free_folios = (*ppage)->zone_device_data; in dmirror_allocate_chunk()604 *ppage = mdevice->free_pages; in dmirror_allocate_chunk()605 mdevice->free_pages = (*ppage)->zone_device_data; in dmirror_allocate_chunk()