| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ |
| H A D | vmap.c | 32 u32 vmap = 0; in nvbios_vmap_table() local 36 vmap = nvbios_rd32(bios, bit_P.offset + 0x20); in nvbios_vmap_table() 37 if (vmap) { in nvbios_vmap_table() 38 *ver = nvbios_rd08(bios, vmap + 0); in nvbios_vmap_table() 42 *hdr = nvbios_rd08(bios, vmap + 1); in nvbios_vmap_table() 43 *cnt = nvbios_rd08(bios, vmap + 3); in nvbios_vmap_table() 44 *len = nvbios_rd08(bios, vmap + 2); in nvbios_vmap_table() 45 return vmap; in nvbios_vmap_table() 60 u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len); in nvbios_vmap_parse() local 62 switch (!!vmap * *ver) { in nvbios_vmap_parse() [all …]
|
| H A D | Kbuild | 34 nvkm-y += nvkm/subdev/bios/vmap.o
|
| /linux/mm/ |
| H A D | vma_exec.c | 107 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap, in create_init_stack_vma() argument 152 *vmap = vma; in create_init_stack_vma() 161 *vmap = NULL; in create_init_stack_vma()
|
| H A D | vma.h | 381 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 631 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap,
|
| /linux/kernel/dma/ |
| H A D | remap.c | 29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
|
| /linux/tools/testing/selftests/net/ |
| H A D | test_vxlan_vnifiltering.sh | 221 for vmap in $(echo $vattrs | cut -d "," -f1- --output-delimiter=' ') 223 local vid=$(echo $vmap | awk -F'-' '{print ($1)}') 224 local family=$(echo $vmap | awk -F'-' '{print ($2)}') 225 local localip=$(echo $vmap | awk -F'-' '{print ($3)}') 226 local group=$(echo $vmap | awk -F'-' '{print ($4)}') 227 local vtype=$(echo $vmap | awk -F'-' '{print ($5)}') 228 local port=$(echo $vmap | awk -F'-' '{print ($6)}')
|
| /linux/Documentation/translations/zh_CN/mm/ |
| H A D | vmalloced-kernel-stacks.rst | 48 - vmalloc空间的堆栈需要可靠地工作。例如,如果vmap页表是按需创建的,当堆栈指向 91 工作。架构特定的vmap堆栈分配器照顾到了这个细节。
|
| H A D | highmem.rst | 114 * vmap()。这可以用来将多个物理页长期映射到一个连续的虚拟空间。它需要全局同步来解除
|
| /linux/arch/arm/mm/ |
| H A D | fault-armv.c | 253 p1 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs() 254 p2 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs()
|
| /linux/Documentation/features/vm/huge-vmap/ |
| H A D | arch-support.txt | 2 # Feature name: huge-vmap
|
| /linux/Documentation/core-api/ |
| H A D | cachetlb.rst | 376 vmap/vmalloc API. Since kernel I/O goes via physical pages, the I/O 378 the only aliases. This isn't true for vmap aliases, so anything in 379 the kernel trying to do I/O to vmap areas must manually manage 380 coherency. It must do this by flushing the vmap range before doing 386 the vmap area. This is to make sure that any data the kernel 387 modified in the vmap range is made visible to the physical 394 the cache for a given virtual address range in the vmap area 398 vmap area.
|
| /linux/arch/hexagon/kernel/ |
| H A D | vdso.c | 29 vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); in vdso_init()
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_migrate.c | 165 xe_map_wr(xe, &bo->vmap, ofs, u64, entry); in xe_migrate_program_identity() 174 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); in xe_migrate_program_identity() 178 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); in xe_migrate_program_identity() 219 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); in xe_migrate_prepare_vm() 228 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry); in xe_migrate_prepare_vm() 245 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, in xe_migrate_prepare_vm() 262 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, in xe_migrate_prepare_vm() 287 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64, in xe_migrate_prepare_vm() 295 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE + in xe_migrate_prepare_vm() 307 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64, in xe_migrate_prepare_vm() [all …]
|
| H A D | xe_guc_log.c | 163 xe_map_memcpy_from(xe, snapshot->copy[i], &log->bo->vmap, in xe_guc_log_snapshot_capture() 268 xe_map_memset(xe, &bo->vmap, 0, 0, guc_log_size()); in xe_guc_log_init()
|
| H A D | xe_guc_hwconfig.c | 104 xe_map_memcpy_from(xe, dst, &guc->hwconfig.bo->vmap, 0, in xe_guc_hwconfig_copy()
|
| H A D | xe_gsc_proxy.c | 430 gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0); in proxy_channel_alloc() 431 gsc->proxy.from_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, GSC_PROXY_BUFFER_SIZE); in proxy_channel_alloc()
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_gem.h | 70 void *(*vmap)(struct etnaviv_gem_object *); member
|
| H A D | etnaviv_gem_prime.c | 108 .vmap = etnaviv_gem_prime_vmap_impl,
|
| /linux/drivers/gpu/drm/tegra/ |
| H A D | fbdev.c | 123 bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP, in tegra_fbdev_driver_fbdev_probe()
|
| /linux/include/linux/ |
| H A D | dma-buf.h | 278 int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map); member
|
| /linux/drivers/gpu/drm/xe/tests/ |
| H A D | xe_guc_buf_kunit.c | 36 iosys_map_set_vaddr(&bo->vmap, buf); in replacement_xe_managed_bo_create_pin_map() 242 iosys_map_memcpy_from(d, &cache->sam->bo->vmap, 0, bytes); in test_flush()
|
| /linux/drivers/dma-buf/heaps/ |
| H A D | system_heap.c | 238 vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL); in system_heap_do_vmap() 313 .vmap = system_heap_vmap,
|
| /linux/Documentation/mm/ |
| H A D | vmalloced-kernel-stacks.rst | 48 vmap page tables are created on demand, either this mechanism 102 vmap stack allocator takes care of this detail.
|
| /linux/arch/arm/kernel/ |
| H A D | sleep.S | 123 @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
|
| /linux/arch/sh/kernel/ |
| H A D | io_trapped.c | 73 tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); in register_trapped_io()
|