| /linux/lib/ |
| H A D | test_hmm.c | 631 struct page *dpage = NULL; in dmirror_devmem_alloc_page() local 650 dpage = folio_page(mdevice->free_folios, 0); in dmirror_devmem_alloc_page() 651 mdevice->free_folios = dpage->zone_device_data; in dmirror_devmem_alloc_page() 655 dpage = mdevice->free_pages; in dmirror_devmem_alloc_page() 656 mdevice->free_pages = dpage->zone_device_data; in dmirror_devmem_alloc_page() 661 if (dmirror_allocate_chunk(mdevice, &dpage, is_large)) in dmirror_devmem_alloc_page() 665 zone_device_folio_init(page_folio(dpage), order); in dmirror_devmem_alloc_page() 666 dpage->zone_device_data = rpage; in dmirror_devmem_alloc_page() 667 return dpage; in dmirror_devmem_alloc_page() 684 struct page *dpage; in dmirror_migrate_alloc_and_copy() local [all …]
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_dmem.c | 160 struct page *dpage = folio_page(dfolio, 0); in nouveau_dmem_copy_folio() local 165 dma_info->dma_addr = dma_map_page(dev, dpage, 0, page_size(dpage), in nouveau_dmem_copy_folio() 167 dma_info->size = page_size(dpage); in nouveau_dmem_copy_folio() 175 dma_unmap_page(dev, dma_info->dma_addr, page_size(dpage), in nouveau_dmem_copy_folio() 189 struct page *dpage; in nouveau_dmem_migrate_to_ram() local 242 dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER | __GFP_ZERO, in nouveau_dmem_migrate_to_ram() 245 dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, in nouveau_dmem_migrate_to_ram() 247 if (!dpage) { in nouveau_dmem_migrate_to_ram() 252 args.dst[0] = migrate_pfn(page_to_pfn(dpage)); in nouveau_dmem_migrate_to_ram() 255 dfolio = page_folio(dpage); in nouveau_dmem_migrate_to_ram() [all …]
|
| /linux/arch/powerpc/kvm/ |
| H A D | book3s_hv_uvmem.c | 520 struct page *dpage, *spage; in __kvmppc_svm_page_out() local 550 dpage = alloc_page_vma(GFP_HIGHUSER, vma, start); in __kvmppc_svm_page_out() 551 if (!dpage) { in __kvmppc_svm_page_out() 556 lock_page(dpage); in __kvmppc_svm_page_out() 558 pfn = page_to_pfn(dpage); in __kvmppc_svm_page_out() 574 unlock_page(dpage); in __kvmppc_svm_page_out() 575 __free_page(dpage); in __kvmppc_svm_page_out() 697 struct page *dpage = NULL; in kvmppc_uvmem_get_page() local 724 dpage = pfn_to_page(uvmem_pfn); in kvmppc_uvmem_get_page() 725 dpage->zone_device_data = pvt; in kvmppc_uvmem_get_page() [all …]
|
| /linux/crypto/ |
| H A D | scompress.c | 178 struct page *spage, *dpage; in scomp_acomp_comp_decomp() local 196 dpage = sg_page(req->dst); in scomp_acomp_comp_decomp() 201 dpage += doff / PAGE_SIZE; in scomp_acomp_comp_decomp() 206 if (PageHighMem(dpage + n) && in scomp_acomp_comp_decomp() 209 dst = kmap_local_page(dpage) + doff; in scomp_acomp_comp_decomp() 269 flush_dcache_page(dpage); in scomp_acomp_comp_decomp() 273 dpage++; in scomp_acomp_comp_decomp()
|
| /linux/drivers/dma/ |
| H A D | nbpfaxi.c | 692 struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); in nbpf_desc_page_alloc() local 701 if (!dpage) in nbpf_desc_page_alloc() 705 __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage)); in nbpf_desc_page_alloc() 707 for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc; in nbpf_desc_page_alloc() 708 i < ARRAY_SIZE(dpage->ldesc); in nbpf_desc_page_alloc() 722 for (i = 0, desc = dpage->desc; in nbpf_desc_page_alloc() 723 i < ARRAY_SIZE(dpage->desc); in nbpf_desc_page_alloc() 739 list_add(&dpage->node, &chan->desc_page); in nbpf_desc_page_alloc() 742 return ARRAY_SIZE(dpage->desc); in nbpf_desc_page_alloc() 1078 struct nbpf_desc_page *dpage, *tmp; in nbpf_free_chan_resources() local [all …]
|
| /linux/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_migrate.c | 589 struct page *dpage; in svm_migrate_copy_to_ram() local 631 dpage = svm_migrate_get_sys_page(migrate->vma, addr); in svm_migrate_copy_to_ram() 632 if (!dpage) { in svm_migrate_copy_to_ram() 639 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); in svm_migrate_copy_to_ram() 647 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); in svm_migrate_copy_to_ram() 649 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); in svm_migrate_copy_to_ram()
|
| /linux/kernel/trace/ |
| H A D | ring_buffer.c | 418 struct buffer_data_page *dpage; in alloc_cpu_data() local 433 dpage = page_address(page); in alloc_cpu_data() 434 rb_init_page(dpage); in alloc_cpu_data() 436 return dpage; in alloc_cpu_data() 1845 static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu, in rb_read_data_buffer() argument 1856 ts = dpage->time_stamp; in rb_read_data_buffer() 1860 event = (struct ring_buffer_event *)(dpage->data + e); in rb_read_data_buffer() 1897 static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu) in rb_validate_buffer() argument 1903 tail = local_read(&dpage->commit); in rb_validate_buffer() 1904 return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta); in rb_validate_buffer()
|
| /linux/drivers/net/ethernet/3com/ |
| H A D | typhoon.c | 1333 void *dpage; in typhoon_download_firmware() local 1354 dpage = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &dpage_dma, GFP_ATOMIC); in typhoon_download_firmware() 1355 if (!dpage) { in typhoon_download_firmware() 1419 dpage, len)); in typhoon_download_firmware() 1459 dma_free_coherent(&pdev->dev, PAGE_SIZE, dpage, dpage_dma); in typhoon_download_firmware()
|
| /linux/drivers/crypto/ |
| H A D | hifn_795x.c | 1308 struct page *spage, *dpage; in hifn_setup_dma() local 1329 dpage = sg_page(t); in hifn_setup_dma() 1334 dpage = sg_page(dst); in hifn_setup_dma() 1340 hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0); in hifn_setup_dma()
|
| /linux/drivers/scsi/ |
| H A D | st.c | 4170 struct page *dpage = st_bp->reserved_pages[dst_seg]; in move_buffer_data() local 4174 memmove(page_address(dpage) + dst_offset, in move_buffer_data()
|