| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ |
| H A D | vmap.c | 32 u32 vmap = 0; in nvbios_vmap_table() local 36 vmap = nvbios_rd32(bios, bit_P.offset + 0x20); in nvbios_vmap_table() 37 if (vmap) { in nvbios_vmap_table() 38 *ver = nvbios_rd08(bios, vmap + 0); in nvbios_vmap_table() 42 *hdr = nvbios_rd08(bios, vmap + 1); in nvbios_vmap_table() 43 *cnt = nvbios_rd08(bios, vmap + 3); in nvbios_vmap_table() 44 *len = nvbios_rd08(bios, vmap + 2); in nvbios_vmap_table() 45 return vmap; in nvbios_vmap_table() 60 u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len); in nvbios_vmap_parse() local 62 switch (!!vmap * *ver) { in nvbios_vmap_parse() [all …]
|
| /linux/drivers/gpu/drm/xe/display/ |
| H A D | xe_panic.c | 16 struct iosys_map vmap; member 23 if (!panic->vmap.is_iomem && iosys_map_is_set(&panic->vmap)) { in xe_panic_kunmap() 24 drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE); in xe_panic_kunmap() 25 kunmap_local(panic->vmap.vaddr); in xe_panic_kunmap() 27 iosys_map_clear(&panic->vmap); in xe_panic_kunmap() 63 iosys_map_set_vaddr_iomem(&panic->vmap, in xe_panic_page_set_pixel() 67 iosys_map_set_vaddr(&panic->vmap, in xe_panic_page_set_pixel() 74 if (iosys_map_is_set(&panic->vmap)) in xe_panic_page_set_pixel() 75 iosys_map_wr(&panic->vmap, offse in xe_panic_page_set_pixel() [all...] |
| H A D | xe_hdcp_gsc.c | 81 xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo)); in intel_hdcp_gsc_initialize_message() 131 struct iosys_map *map = &gsc_context->hdcp_bo->vmap; in xe_gsc_send_sync() 171 addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap, in intel_hdcp_gsc_msg_send() 174 xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off, in intel_hdcp_gsc_msg_send() 197 xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap, in intel_hdcp_gsc_msg_send()
|
| H A D | xe_dsb_buffer.c | 24 iosys_map_wr(&dsb_buf->bo->vmap, idx * 4, u32, val); in intel_dsb_buffer_write() 29 return iosys_map_rd(&dsb_buf->bo->vmap, idx * 4, u32); in intel_dsb_buffer_read() 36 iosys_map_memset(&dsb_buf->bo->vmap, idx * 4, val, size); in intel_dsb_buffer_memset()
|
| /linux/drivers/net/ |
| H A D | vrf.c | 110 struct vrf_map vmap; member 136 return &nn_vrf->vmap; in netns_vrf_map() 182 static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap, in vrf_map_lookup_elem() argument 189 hash_for_each_possible(vmap->ht, me, hnode, key) { in vrf_map_lookup_elem() 197 static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me) in vrf_map_add_elem() argument 203 hash_add(vmap->ht, &me->hnode, key); in vrf_map_add_elem() 211 static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock) in vrf_map_lock() argument 213 spin_lock(&vmap->vmap_lock); in vrf_map_lock() 216 static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock) in vrf_map_unlock() argument 218 spin_unlock(&vmap->vmap_lock); in vrf_map_unlock() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/volt/ |
| H A D | base.c | 27 #include <subdev/bios/vmap.h> 87 u32 vmap; in nvkm_volt_map_min() local 89 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map_min() 90 if (vmap) { in nvkm_volt_map_min() 109 u32 vmap; in nvkm_volt_map() local 111 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map() 112 if (vmap) { in nvkm_volt_map() 295 struct nvbios_vmap vmap; in nvkm_volt_ctor() local 301 if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) { in nvkm_volt_ctor() 302 volt->max0_id = vmap in nvkm_volt_ctor() [all...] |
| /linux/mm/ |
| H A D | vma_exec.c | 107 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap, in create_init_stack_vma() argument 152 *vmap = vma; in create_init_stack_vma() 161 *vmap = NULL; in create_init_stack_vma()
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_sriov_vf_ccs.c | 164 xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP, in alloc_bb_pool() 166 xe_map_memset(xe, &sa_manager->shadow->vmap, offset, MI_NOOP, in alloc_bb_pool() 170 xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END); in alloc_bb_pool() 171 xe_map_wr(xe, &sa_manager->shadow->vmap, offset, u32, MI_BATCH_BUFFER_END); in alloc_bb_pool() 396 xe_map_wr(xe, &lrc->bo->vmap, XE_SRIOV_VF_CCS_RW_BB_ADDR_OFFSET, u32, addr); in xe_sriov_vf_ccs_rw_update_bb_addr()
|
| H A D | xe_lmtt.c | 87 xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, xe_bo_size(bo)); in lmtt_pt_alloc() 310 lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32)); in lmtt_write_pte() 312 xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte); in lmtt_write_pte() 315 lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64)); in lmtt_write_pte() 317 xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte); in lmtt_write_pte()
|
| H A D | xe_bo.c | 210 return true; /* needs vmap */ in force_contiguous() 1296 if (iosys_map_is_null(&bo->vmap)) { in xe_bo_evict_pinned_copy() 1303 xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0, in xe_bo_evict_pinned_copy() 1446 if (iosys_map_is_null(&bo->vmap)) { in xe_bo_restore_pinned() 1453 xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr, in xe_bo_restore_pinned() 1640 struct iosys_map vmap; in xe_ttm_access_memory() local 1669 iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping + in xe_ttm_access_memory() 1672 xe_map_memcpy_to(xe, &vmap, page_offse in xe_ttm_access_memory() [all...] |
| H A D | xe_migrate.c | 167 xe_map_wr(xe, &bo->vmap, ofs, u64, entry); in xe_migrate_program_identity() 176 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); in xe_migrate_program_identity() 180 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); in xe_migrate_program_identity() 221 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); in xe_migrate_prepare_vm() 230 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry); in xe_migrate_prepare_vm() 247 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, in xe_migrate_prepare_vm() 264 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, in xe_migrate_prepare_vm() 289 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64, in xe_migrate_prepare_vm() 297 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE + in xe_migrate_prepare_vm() 309 xe_map_wr(xe, &bo->vmap, map_of in xe_migrate_prepare_vm() [all...] |
| /linux/kernel/dma/ |
| H A D | remap.c | 29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
|
| /linux/tools/testing/selftests/net/ |
| H A D | test_vxlan_vnifiltering.sh | 221 for vmap in $(echo $vattrs | cut -d "," -f1- --output-delimiter=' ') 223 local vid=$(echo $vmap | awk -F'-' '{print ($1)}') 224 local family=$(echo $vmap | awk -F'-' '{print ($2)}') 225 local localip=$(echo $vmap | awk -F'-' '{print ($3)}') 226 local group=$(echo $vmap | awk -F'-' '{print ($4)}') 227 local vtype=$(echo $vmap | awk -F'-' '{print ($5)}') 228 local port=$(echo $vmap | awk -F'-' '{print ($6)}')
|
| /linux/Documentation/translations/zh_CN/mm/ |
| H A D | vmalloced-kernel-stacks.rst | 48 - vmalloc空间的堆栈需要可靠地工作。例如,如果vmap页表是按需创建的,当堆栈指向 91 工作。架构特定的vmap堆栈分配器照顾到了这个细节。
|
| H A D | highmem.rst | 114 * vmap()。这可以用来将多个物理页长期映射到一个连续的虚拟空间。它需要全局同步来解除
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_gem.c | 337 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); in etnaviv_gem_vmap() 366 return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot); in etnaviv_gem_vmap_impl() 497 .vmap = etnaviv_gem_vmap_impl, 566 .vmap = etnaviv_gem_prime_vmap, 730 .vmap = etnaviv_gem_vmap_impl,
|
| H A D | etnaviv_gem.h | 70 void *(*vmap)(struct etnaviv_gem_object *); member
|
| /linux/arch/arm/mm/ |
| H A D | fault-armv.c | 253 p1 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs() 254 p2 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs()
|
| /linux/drivers/gpu/drm/xen/ |
| H A D | xen_drm_front_gem.c | 108 .vmap = xen_drm_front_gem_prime_vmap, 292 vaddr = vmap(xen_obj->pages, xen_obj->num_pages, in xen_drm_front_gem_prime_vmap()
|
| /linux/Documentation/features/vm/huge-vmap/ |
| H A D | arch-support.txt | 2 # Feature name: huge-vmap
|
| /linux/drivers/accel/amdxdna/ |
| H A D | amdxdna_ubuf.c | 112 kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL); in amdxdna_ubuf_vmap() 130 .vmap = amdxdna_ubuf_vmap,
|
| /linux/drivers/gpu/drm/rockchip/ |
| H A D | rockchip_drm_gem.c | 139 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, in rockchip_gem_alloc_iommu() 142 DRM_ERROR("failed to vmap() buffer\n"); in rockchip_gem_alloc_iommu() 278 .vmap = rockchip_gem_prime_vmap, 524 vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, in rockchip_gem_prime_vmap()
|
| /linux/drivers/gpu/drm/v3d/ |
| H A D | v3d_bo.c | 71 .vmap = drm_gem_shmem_object_vmap, 198 bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP, in v3d_get_bo_vaddr()
|
| /linux/Documentation/core-api/ |
| H A D | cachetlb.rst | 376 vmap/vmalloc API. Since kernel I/O goes via physical pages, the I/O 378 the only aliases. This isn't true for vmap aliases, so anything in 379 the kernel trying to do I/O to vmap areas must manually manage 380 coherency. It must do this by flushing the vmap range before doing 386 the vmap area. This is to make sure that any data the kernel 387 modified in the vmap range is made visible to the physical 394 the cache for a given virtual address range in the vmap area 398 vmap area.
|
| /linux/arch/hexagon/kernel/ |
| H A D | vdso.c | 29 vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); in vdso_init()
|