Home
last modified time | relevance | path

Searched full:vmap (Results 1 – 25 of 188) sorted by relevance

12345678

/linux/drivers/gpu/drm/nouveau/nvkm/subdev/bios/
H A Dvmap.c26 #include <subdev/bios/vmap.h>
32 u32 vmap = 0; in nvbios_vmap_table() local
36 vmap = nvbios_rd32(bios, bit_P.offset + 0x20); in nvbios_vmap_table()
37 if (vmap) { in nvbios_vmap_table()
38 *ver = nvbios_rd08(bios, vmap + 0); in nvbios_vmap_table()
42 *hdr = nvbios_rd08(bios, vmap + 1); in nvbios_vmap_table()
43 *cnt = nvbios_rd08(bios, vmap + 3); in nvbios_vmap_table()
44 *len = nvbios_rd08(bios, vmap + 2); in nvbios_vmap_table()
45 return vmap; in nvbios_vmap_table()
60 u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len); in nvbios_vmap_parse() local
[all …]
/linux/drivers/gpu/drm/xe/display/
H A Dxe_panic.c16 struct iosys_map vmap; member
23 if (!panic->vmap.is_iomem && iosys_map_is_set(&panic->vmap)) { in xe_panic_kunmap()
24 drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE); in xe_panic_kunmap()
25 kunmap_local(panic->vmap.vaddr); in xe_panic_kunmap()
27 iosys_map_clear(&panic->vmap); in xe_panic_kunmap()
63 iosys_map_set_vaddr_iomem(&panic->vmap, in xe_panic_page_set_pixel()
67 iosys_map_set_vaddr(&panic->vmap, in xe_panic_page_set_pixel()
74 if (iosys_map_is_set(&panic->vmap)) in xe_panic_page_set_pixel()
75 iosys_map_wr(&panic->vmap, offset, u32, color); in xe_panic_page_set_pixel()
H A Dxe_hdcp_gsc.c81 xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo)); in intel_hdcp_gsc_initialize_message()
131 struct iosys_map *map = &gsc_context->hdcp_bo->vmap; in xe_gsc_send_sync()
171 addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap, in intel_hdcp_gsc_msg_send()
174 xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off, in intel_hdcp_gsc_msg_send()
197 xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap, in intel_hdcp_gsc_msg_send()
H A Dxe_dsb_buffer.c24 iosys_map_wr(&dsb_buf->bo->vmap, idx * 4, u32, val); in intel_dsb_buffer_write()
29 return iosys_map_rd(&dsb_buf->bo->vmap, idx * 4, u32); in intel_dsb_buffer_read()
36 iosys_map_memset(&dsb_buf->bo->vmap, idx * 4, val, size); in intel_dsb_buffer_memset()
/linux/drivers/net/
H A Dvrf.c110 struct vrf_map vmap; member
136 return &nn_vrf->vmap; in netns_vrf_map()
182 static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap, in vrf_map_lookup_elem() argument
189 hash_for_each_possible(vmap->ht, me, hnode, key) { in vrf_map_lookup_elem()
197 static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me) in vrf_map_add_elem() argument
203 hash_add(vmap->ht, &me->hnode, key); in vrf_map_add_elem()
211 static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock) in vrf_map_lock() argument
213 spin_lock(&vmap->vmap_lock); in vrf_map_lock()
216 static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock) in vrf_map_unlock() argument
218 spin_unlock(&vmap->vmap_lock); in vrf_map_unlock()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/volt/
H A Dbase.c27 #include <subdev/bios/vmap.h>
87 u32 vmap; in nvkm_volt_map_min() local
89 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map_min()
90 if (vmap) { in nvkm_volt_map_min()
109 u32 vmap; in nvkm_volt_map() local
111 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map()
112 if (vmap) { in nvkm_volt_map()
295 struct nvbios_vmap vmap; in nvkm_volt_ctor() local
301 if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) { in nvkm_volt_ctor()
302 volt->max0_id = vmap.max0; in nvkm_volt_ctor()
[all …]
/linux/Documentation/mm/
H A Dvmalloced-kernel-stacks.rst48 vmap page tables are created on demand, either this mechanism
100 - On arm64, all VMAP's stacks need to have the same alignment to ensure
101 that VMAP'd stack overflow detection works correctly. Arch specific
102 vmap stack allocator takes care of this detail.
131 Testing VMAP allocation with guard pages
/linux/include/trace/events/
H A Dvmalloc.h11 * alloc_vmap_area - called when a new vmap allocation occurs
54 * purge_vmap_area_lazy - called when vmap areas were lazily freed
57 * @npurged: numbed of purged vmap areas
87 * free_vmap_area_noflush - called when a vmap area is freed
/linux/mm/
H A Dvma_exec.c103 * On success, returns 0 and sets *vmap to the stack VMA and *top_mem_p to the
107 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap, in create_init_stack_vma() argument
152 *vmap = vma; in create_init_stack_vma()
161 *vmap = NULL; in create_init_stack_vma()
H A Dvmalloc.c6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
795 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
797 * matches small vmap mappings.
889 * This augment red-black tree represents the free vmap space.
891 * address. It is used for allocation and merging when a vmap
932 * An effective vmap-node logic. Users make use of nodes instead
956 * is fully disabled. Later on, after vmap is initialized these
963 /* A simple iterator over all vmap-nodes. */
1237 * there is no free vmap space. Normally it does not in get_va_next_sibling()
1764 * Also we can hit this path in case of regular "vmap" in va_clip()
[all …]
/linux/Documentation/core-api/
H A Dcachetlb.rst376 vmap/vmalloc API. Since kernel I/O goes via physical pages, the I/O
378 the only aliases. This isn't true for vmap aliases, so anything in
379 the kernel trying to do I/O to vmap areas must manually manage
380 coherency. It must do this by flushing the vmap range before doing
386 the vmap area. This is to make sure that any data the kernel
387 modified in the vmap range is made visible to the physical
394 the cache for a given virtual address range in the vmap area
398 vmap area.
/linux/drivers/dma-buf/
H A Ddma-buf.c978 * - &dma_buf_ops.vmap()
1357 * vmap interface is introduced. Note that on very old 32-bit architectures
1358 * vmalloc space might be limited and result in vmap calls failing.
1367 * The vmap call can fail if there is no vmap support in the exporter, or if
1369 * count for all vmap access and calls down into the exporter's vmap function
1371 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1567 * address space. Same restrictions as for vmap and friends apply.
1568 * @dmabuf: [in] buffer to vmap
1569 * @map: [out] returns the vmap pointer
1593 if (!dmabuf->ops->vmap) in dma_buf_vmap()
[all …]
/linux/arch/riscv/include/asm/
H A Dirq_stack.h20 * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
/linux/kernel/dma/
H A Dremap.c29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap()
53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
/linux/drivers/gpu/drm/tiny/
H A Dsharp-memory.c129 const struct iosys_map *vmap, in sharp_memory_set_tx_buffer_data() argument
143 drm_fb_xrgb8888_to_mono(&dst, &pitch, vmap, fb, &clip, fmtcnv_state); in sharp_memory_set_tx_buffer_data()
150 const struct iosys_map *vmap, in sharp_memory_update_display() argument
166 sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, vmap, clip, pitch, fmtcnv_state); in sharp_memory_update_display()
209 static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap, in sharp_memory_fb_dirty() argument
222 sharp_memory_update_display(smd, fb, vmap, clip, fmtconv_state); in sharp_memory_fb_dirty()
/linux/drivers/crypto/ccp/
H A Dsev-dev-tio.h44 struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */
45 struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */
/linux/drivers/gpu/drm/xe/
H A Dxe_sriov_vf_ccs.c164 xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP, in alloc_bb_pool()
166 xe_map_memset(xe, &sa_manager->shadow->vmap, offset, MI_NOOP, in alloc_bb_pool()
170 xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END); in alloc_bb_pool()
171 xe_map_wr(xe, &sa_manager->shadow->vmap, offset, u32, MI_BATCH_BUFFER_END); in alloc_bb_pool()
396 xe_map_wr(xe, &lrc->bo->vmap, XE_SRIOV_VF_CCS_RW_BB_ADDR_OFFSET, u32, addr); in xe_sriov_vf_ccs_rw_update_bb_addr()
H A Dxe_lmtt.c87 xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, xe_bo_size(bo)); in lmtt_pt_alloc()
310 lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32)); in lmtt_write_pte()
312 xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte); in lmtt_write_pte()
315 lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64)); in lmtt_write_pte()
317 xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte); in lmtt_write_pte()
H A Dxe_bo.c210 return true; /* needs vmap */ in force_contiguous()
1296 if (iosys_map_is_null(&bo->vmap)) { in xe_bo_evict_pinned_copy()
1303 xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0, in xe_bo_evict_pinned_copy()
1446 if (iosys_map_is_null(&bo->vmap)) { in xe_bo_restore_pinned()
1453 xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr, in xe_bo_restore_pinned()
1640 struct iosys_map vmap; in xe_ttm_access_memory() local
1669 iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping + in xe_ttm_access_memory()
1672 xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count); in xe_ttm_access_memory()
1674 xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count); in xe_ttm_access_memory()
2700 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); in xe_managed_bo_create_from_data()
[all …]
/linux/tools/testing/selftests/net/
H A Dtest_vxlan_vnifiltering.sh221 for vmap in $(echo $vattrs | cut -d "," -f1- --output-delimiter=' ')
223 local vid=$(echo $vmap | awk -F'-' '{print ($1)}')
224 local family=$(echo $vmap | awk -F'-' '{print ($2)}')
225 local localip=$(echo $vmap | awk -F'-' '{print ($3)}')
226 local group=$(echo $vmap | awk -F'-' '{print ($4)}')
227 local vtype=$(echo $vmap | awk -F'-' '{print ($5)}')
228 local port=$(echo $vmap | awk -F'-' '{print ($6)}')
/linux/drivers/gpu/drm/tegra/
H A Dfbdev.c123 bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP, in tegra_fbdev_driver_fbdev_probe()
126 dev_err(drm->dev, "failed to vmap() framebuffer\n"); in tegra_fbdev_driver_fbdev_probe()
/linux/drivers/gpu/drm/rockchip/
H A Drockchip_drm_gem.c139 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, in rockchip_gem_alloc_iommu()
142 DRM_ERROR("failed to vmap() buffer\n"); in rockchip_gem_alloc_iommu()
278 .vmap = rockchip_gem_prime_vmap,
524 vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, in rockchip_gem_prime_vmap()
/linux/drivers/gpu/drm/etnaviv/
H A Detnaviv_gem.c337 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); in etnaviv_gem_vmap()
366 return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot); in etnaviv_gem_vmap_impl()
497 .vmap = etnaviv_gem_vmap_impl,
566 .vmap = etnaviv_gem_prime_vmap,
730 .vmap = etnaviv_gem_vmap_impl,
/linux/kernel/module/
H A Dinternal.h176 * decompression was used two vmap() spaces were used. These failures can
186 * they waste twice as much vmap space. With module decompression three
187 * times the module's size vmap space is wasted.
/linux/drivers/gpu/drm/sitronix/
H A Dst7586.c75 struct iosys_map dst_map, vmap; in st7586_xrgb8888_to_gray332() local
82 iosys_map_set_vaddr(&vmap, vaddr); in st7586_xrgb8888_to_gray332()
83 drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip, fmtcnv_state); in st7586_xrgb8888_to_gray332()

12345678