/freebsd/sys/dev/mthca/ |
H A D | mthca_allocator.c | 122 if (array->page_list[p].page) in mthca_array_get() 123 return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; in mthca_array_get() 133 if (!array->page_list[p].page) in mthca_array_set() 134 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); in mthca_array_set() 136 if (!array->page_list[p].page) in mthca_array_set() 139 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; in mthca_array_set() 140 ++array->page_list[p].used; in mthca_array_set() 149 if (--array->page_list[p].used == 0) { in mthca_array_clear() 150 free_page((unsigned long) array->page_list[p].page); in mthca_array_clear() 151 array->page_list[p].page = NULL; in mthca_array_clear() [all …]
|
H A D | mthca_eq.c | 231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; in get_eqe() 482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mthca_create_eq() 484 if (!eq->page_list) in mthca_create_eq() 488 eq->page_list[i].buf = NULL; in mthca_create_eq() 500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, in mthca_create_eq() 502 if (!eq->page_list[i].buf) in mthca_create_eq() 506 dma_unmap_addr_set(&eq->page_list[i], mapping, t); in mthca_create_eq() 508 clear_page(eq->page_list[i].buf); in mthca_create_eq() 572 if (eq->page_list[i].buf) in mthca_create_eq() 574 eq->page_list[i].buf, in mthca_create_eq() [all …]
|
H A D | mthca_mr.c | 689 static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, in mthca_check_fmr() argument 706 if (page_list[i] & ~page_mask) in mthca_check_fmr() 716 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in mthca_tavor_map_phys_fmr() argument 725 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_tavor_map_phys_fmr() 738 __be64 mtt_entry = cpu_to_be64(page_list[i] | in mthca_tavor_map_phys_fmr() 757 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in mthca_arbel_map_phys_fmr() argument 765 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_arbel_map_phys_fmr() 786 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | in mthca_arbel_map_phys_fmr()
|
H A D | mthca_provider.h | 56 struct mthca_buf_list *page_list; member 116 struct mthca_buf_list *page_list; member
|
H A D | mthca_dev.h | 193 } *page_list; member 485 int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, 488 int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
|
/freebsd/sys/ofed/drivers/infiniband/core/ |
H A D | ib_fmr_pool.c | 118 u64 *page_list, in ib_fmr_cache_lookup() argument 128 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup() 133 !memcmp(page_list, fmr->page_list, in ib_fmr_cache_lookup() 134 page_list_len * sizeof *page_list)) in ib_fmr_cache_lookup() 415 u64 *page_list, in ib_fmr_pool_map_phys() argument 429 page_list, in ib_fmr_pool_map_phys() 454 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, in ib_fmr_pool_map_phys() 473 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); in ib_fmr_pool_map_phys() 477 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); in ib_fmr_pool_map_phys()
|
H A D | ib_umem_odp.c | 273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get() 274 sizeof(*umem->odp_data->page_list)); in ib_umem_odp_get() 275 if (!umem->odp_data->page_list) { in ib_umem_odp_get() 340 vfree(umem->odp_data->page_list); in ib_umem_odp_get() 410 vfree(umem->odp_data->page_list); in ib_umem_odp_release() 466 umem->odp_data->page_list[page_index] = page; in ib_umem_odp_map_dma_single_page() 468 } else if (umem->odp_data->page_list[page_index] == page) { in ib_umem_odp_map_dma_single_page() 472 umem->odp_data->page_list[page_index], page); in ib_umem_odp_map_dma_single_page() 639 if (umem->odp_data->page_list[idx]) { in ib_umem_odp_unmap_dma_pages() 640 struct page *page = umem->odp_data->page_list[idx]; in ib_umem_odp_unmap_dma_pages() [all …]
|
H A D | ib_umem.c | 89 struct page **page_list; in ib_umem_get() local 149 page_list = (struct page **) __get_free_page(GFP_KERNEL); in ib_umem_get() 150 if (!page_list) { in ib_umem_get() 184 gup_flags, page_list, vma_list); in ib_umem_get() 194 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); in ib_umem_get() 226 free_page((unsigned long) page_list); in ib_umem_get()
|
/freebsd/sys/dev/mlx4/mlx4_core/ |
H A D | mlx4_mr.c | 698 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument 715 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); in mlx4_write_mtt_chunk() 724 int start_index, int npages, u64 *page_list) in __mlx4_write_mtt() argument 739 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); in __mlx4_write_mtt() 744 page_list += chunk; in __mlx4_write_mtt() 752 int start_index, int npages, u64 *page_list) in mlx4_write_mtt() argument 775 inbox[i + 2] = cpu_to_be64(page_list[i] | in mlx4_write_mtt() 785 page_list += chunk; in mlx4_write_mtt() 791 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); in mlx4_write_mtt() 798 u64 *page_list; in mlx4_buf_write_mtt() local [all …]
|
H A D | mlx4_alloc.c | 614 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), in mlx4_buf_alloc() 616 if (!buf->page_list) in mlx4_buf_alloc() 620 buf->page_list[i].buf = in mlx4_buf_alloc() 624 if (!buf->page_list[i].buf) in mlx4_buf_alloc() 627 buf->page_list[i].map = t; in mlx4_buf_alloc() 629 memset(buf->page_list[i].buf, 0, PAGE_SIZE); in mlx4_buf_alloc() 638 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx4_buf_alloc() 668 if (buf->page_list[i].buf) in mlx4_buf_free() 671 buf->page_list[i].buf, in mlx4_buf_free() 672 buf->page_list[i].map); in mlx4_buf_free() [all …]
|
H A D | mlx4_eq.c | 118 …return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % … in get_eqe() 974 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mlx4_create_eq() 976 if (!eq->page_list) in mlx4_create_eq() 980 eq->page_list[i].buf = NULL; in mlx4_create_eq() 992 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> in mlx4_create_eq() 996 if (!eq->page_list[i].buf) in mlx4_create_eq() 1000 eq->page_list[i].map = t; in mlx4_create_eq() 1002 memset(eq->page_list[i].buf, 0, PAGE_SIZE); in mlx4_create_eq() 1054 if (eq->page_list[i].buf) in mlx4_create_eq() 1056 eq->page_list[i].buf, in mlx4_create_eq() [all …]
|
/freebsd/sys/dev/mlx5/mlx5_core/ |
H A D | mlx5_alloc.c | 58 buf->page_list[x] = segs[x].ds_addr; in mlx5_buf_load_mem_cb() 81 buf->page_list = kcalloc(buf->npages, sizeof(*buf->page_list), in mlx5_buf_alloc() 136 kfree(buf->page_list); in mlx5_buf_alloc() 146 kfree(buf->page_list); in mlx5_buf_free() 252 pas[i] = cpu_to_be64(buf->page_list[i]); in mlx5_fill_page_array()
|
/freebsd/sys/dev/drm2/ttm/ |
H A D | ttm_page_alloc_dma.c | 129 struct list_head page_list; member 389 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { in ttm_dma_pages_put() 390 list_del(&d_page->page_list); in ttm_dma_pages_put() 402 list_del(&d_page->page_list); in ttm_dma_page_put() 447 page_list) { in ttm_dma_page_pool_free() 452 list_move(&dma_p->page_list, &d_pages); in ttm_dma_page_pool_free() 685 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { in ttm_dma_handle_caching_state_failure() 689 list_del(&d_page->page_list); in ttm_dma_handle_caching_state_failure() 772 list_add(&dma_p->page_list, d_pages); in ttm_dma_pool_alloc_new_pages() 820 list_for_each_entry(d_page, &d_pages, page_list) { in ttm_dma_page_pool_fill_locked() [all …]
|
/freebsd/sys/ofed/include/rdma/ |
H A D | ib_fmr_pool.h | 78 u64 page_list[0]; member 89 u64 *page_list,
|
H A D | ib_umem_odp.h | 54 struct page **page_list; member
|
/freebsd/sys/contrib/xen/ |
H A D | kexec.h | 98 unsigned long page_list[KEXEC_XEN_NO_PAGES]; member
|
/freebsd/sys/dev/mlx4/mlx4_en/ |
H A D | mlx4_en_resources.c | 99 pages[i] = virt_to_page(buf->page_list[i].buf); in mlx4_en_map_buffer()
|
/freebsd/sys/ofed/drivers/infiniband/ulp/sdp/ |
H A D | sdp_zcopy.c | 88 payload_pg = sg_page(&chunk->page_list[0]); in sdp_post_srcavail() 473 &chunk->page_list[j]) >> PAGE_SHIFT; in sdp_alloc_fmr() 477 &chunk->page_list[j]) + in sdp_alloc_fmr()
|
/freebsd/sys/dev/gve/ |
H A D | gve_adminq.c | 511 __be64 *page_list; in gve_adminq_register_page_list() local 520 page_list = dma.cpu_addr; in gve_adminq_register_page_list() 523 page_list[i] = htobe64(qpl->dmas[i].bus_addr); in gve_adminq_register_page_list()
|
/freebsd/sys/dev/mlx4/mlx4_ib/ |
H A D | mlx4_ib_mr.c | 464 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, in mlx4_ib_map_phys_fmr() argument 470 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, in mlx4_ib_map_phys_fmr()
|
/freebsd/sys/dev/mlx4/ |
H A D | device.h | 637 struct mlx4_buf_list *page_list; member 1080 return (u8 *)buf->page_list[offset >> PAGE_SHIFT].buf + in mlx4_buf_offset() 1108 int start_index, int npages, u64 *page_list); 1396 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
|
/freebsd/sys/cam/scsi/ |
H A D | scsi_xpt.c | 1439 struct scsi_vpd_supported_page_list *page_list; in probedone() local 1442 page_list = in probedone() 1451 if (page_list == NULL) { in probedone() 1457 path->device->supported_vpds_len = page_list->length + in probedone() 1459 path->device->supported_vpds = (uint8_t *)page_list; in probedone() 1473 if (page_list) in probedone() 1474 free(page_list, M_CAMXPT); in probedone()
|
/freebsd/sys/dev/usb/storage/ |
H A D | umass.c | 2482 struct scsi_vpd_supported_page_list *page_list; in umass_cam_cb() local 2485 page_list = (struct scsi_vpd_supported_page_list *)csio->data_ptr; in umass_cam_cb() 2486 if (page_list->length + 1 < SVPD_SUPPORTED_PAGES_SIZE) { in umass_cam_cb() 2487 page_list->list[page_list->length] = SVPD_UNIT_SERIAL_NUMBER; in umass_cam_cb() 2488 page_list->length++; in umass_cam_cb()
|
/freebsd/sys/dev/bnxt/bnxt_re/ |
H A D | qplib_fp.h | 274 u64 *page_list; member
|
H A D | ib_verbs.h | 229 u64 *page_list; member
|