Home
last modified time | relevance | path

Searched refs:kmap (Results 1 – 25 of 53) sorted by relevance

123

/linux/Documentation/translations/zh_CN/mm/
H A Dhighmem.rst64 在本地的kmap区域中采取pagefaults是有效的,除非获取本地映射的上下文由于其他原因不允许
76 虽然它比kmap()快得多,但在高内存的情况下,它对指针的有效性有限制。与kmap()映射相反,
81 kmap(),将页面映射到将被使用的同一线程中,并优先使用kmap_local_page()。
98 * kmap()。这应该被用来对单个页面进行短时间的映射,对抢占或迁移没有限制。它会带来开销,
102 映射变化必须广播到所有CPU(核)上,kmap()还需要在kmap的池被回绕(TLB项用光了,需要从第
104 槽出现。因此,kmap()只能从可抢占的上下文中调用。
107 高内存映射都是短暂的,而且只在一个地方使用。这意味着在这种情况下,kmap()的成本大
108 多被浪费了。kmap()并不是为长期映射而设计的,但是它已经朝着这个方向发展了,在较新
111 在64位系统中,调用kmap_local_page()、kmap_atomic()和kmap()没有实际作用,因为64位
/linux/tools/perf/util/
H A Dmap.c226 map = calloc(1, sizeof(*map) + (dso__kernel(dso) ? sizeof(struct kmap) : 0)); in map__new2()
244 struct kmap *kmap = __map__kmap((struct map *)map); in __map__is_extra_kernel_map() local
246 return kmap && kmap->name[0]; in __map__is_extra_kernel_map()
419 size += sizeof(struct kmap); in map__clone()
516 static const struct kmap *__map__const_kmap(const struct map *map);
531 const struct kmap *kmap = __map__const_kmap(map); in map__rip_2objdump() local
539 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps) { in map__rip_2objdump()
540 struct machine *machine = maps__machine(kmap->kmaps); in map__rip_2objdump()
614 struct kmap *__map__kmap(struct map *map) in __map__kmap()
620 return (struct kmap *)(&RC_CHK_ACCESS(map)[1]); in __map__kmap()
[all …]
H A Dbpf_lock_contention.c114 struct map *kmap; in init_numa_data() local
141 &kmap); in init_numa_data()
143 skel->rodata->contig_page_data_addr = map__unmap_ip(kmap, sym->start); in init_numa_data()
144 map__put(kmap); in init_numa_data()
155 &kmap); in init_numa_data()
159 skel->rodata->node_data_addr = map__unmap_ip(kmap, sym->start); in init_numa_data()
160 map__put(kmap); in init_numa_data()
240 struct map *kmap; in lock_contention_prepare() local
246 &kmap); in lock_contention_prepare()
260 addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start); in lock_contention_prepare()
[all …]
H A Dsymbol-elf.c1314 * @kmap: kernel maps and relocation reference symbol
1320 static bool ref_reloc_sym_not_found(struct kmap *kmap) in ref_reloc_sym_not_found()
1322 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && in ref_reloc_sym_not_found()
1323 !kmap->ref_reloc_sym->unrelocated_addr;
1328 * @kmap: kernel maps and relocation reference symbol
1334 static u64 ref_reloc(struct kmap *kmap) in ref_reloc()
1318 ref_reloc_sym_not_found(struct kmap * kmap) ref_reloc_sym_not_found() argument
1332 ref_reloc(struct kmap * kmap) ref_reloc() argument
1346 dso__process_kernel_symbol(struct dso * dso,struct map * map,GElf_Sym * sym,GElf_Shdr * shdr,struct maps * kmaps,struct kmap * kmap,struct dso ** curr_dsop,const char * section_name,bool adjust_kernel_syms,bool kmodule,bool * remap_kernel,u64 max_text_sh_offset) dso__process_kernel_symbol() argument
1472 struct kmap *kmap = dso__kernel(dso) ? map__kmap(map) : NULL; dso__load_sym_internal() local
[all...]
H A Dmachine.c1042 struct kmap *kmap; in machine__create_extra_kernel_map() local
1053 kmap = map__kmap(map); in machine__create_extra_kernel_map()
1055 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); in machine__create_extra_kernel_map()
1061 kmap->name, map__start(map), map__end(map)); in machine__create_extra_kernel_map()
1109 struct kmap *kmap = __map__kmap(map); in machine__map_x86_64_entry_trampolines_cb() local
1111 if (!kmap || !is_entry_trampoline(kmap->name)) in machine__map_x86_64_entry_trampolines_cb()
1194 struct kmap *kmap; in machine__destroy_kernel_maps() local
1200 kmap = map__kmap(map); in machine__destroy_kernel_maps()
1202 if (kmap && kmap->ref_reloc_sym) { in machine__destroy_kernel_maps()
1203 zfree((char **)&kmap->ref_reloc_sym->name); in machine__destroy_kernel_maps()
[all …]
H A Dsymbol.c1261 struct kmap *kmap = map__kmap(map); in validate_kcore_addresses() local
1263 if (!kmap) in validate_kcore_addresses()
1266 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { in validate_kcore_addresses()
1270 kmap->ref_reloc_sym->name, &start)) in validate_kcore_addresses()
1272 if (start != kmap->ref_reloc_sym->addr) in validate_kcore_addresses()
1490 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) in kallsyms__delta() argument
1494 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) in kallsyms__delta()
1497 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr)) in kallsyms__delta()
1500 *delta = addr - kmap->ref_reloc_sym->addr; in kallsyms__delta()
1507 struct kmap *kmap = map__kmap(map); in __dso__load_kallsyms() local
[all …]
H A Dmap.h41 struct kmap;
43 struct kmap *__map__kmap(struct map *map);
44 struct kmap *map__kmap(struct map *map);
H A Dmaps.h17 struct kmap { struct
H A Dsynthetic-events.c1130 struct kmap *kmap; in __perf_event__synthesize_kernel_mmap() local
1136 kmap = map__kmap(map); in __perf_event__synthesize_kernel_mmap()
1137 if (!kmap->ref_reloc_sym) in __perf_event__synthesize_kernel_mmap()
1164 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; in __perf_event__synthesize_kernel_mmap()
1169 event->mmap2.pgoff = kmap->ref_reloc_sym->addr; in __perf_event__synthesize_kernel_mmap()
1177 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; in __perf_event__synthesize_kernel_mmap()
1182 event->mmap.pgoff = kmap->ref_reloc_sym->addr; in __perf_event__synthesize_kernel_mmap()
/linux/tools/perf/arch/x86/util/
H A Devent.c28 struct kmap *kmap; in perf_event__synthesize_extra_kmaps_cb() local
34 kmap = map__kmap(map); in perf_event__synthesize_extra_kmaps_cb()
37 PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) + in perf_event__synthesize_extra_kmaps_cb()
60 strlcpy(event->mmap.filename, kmap->name, PATH_MAX); in perf_event__synthesize_extra_kmaps_cb()
/linux/Documentation/mm/
H A Dhighmem.rst61 These functions should always be used, whereas kmap_atomic() and kmap() have
70 It's valid to take pagefaults in a local kmap region, unless the context
87 While they are significantly faster than kmap(), for the highmem case they
88 come with restrictions about the pointers validity. Contrary to kmap()
95 therefore try to design their code to avoid the use of kmap() by mapping
131 * kmap(). This function has been deprecated; use kmap_local_page().
146 Mapping changes must be propagated across all the CPUs. kmap() also
147 requires global TLB invalidation when the kmap's pool wraps and it might
149 available. Therefore, kmap() is only callable from preemptible context.
154 kmap() is mostly wasted in such cases. kmap() was not intended for long
[all …]
/linux/arch/m68k/mm/
H A DMakefile9 obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o
11 obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o
/linux/drivers/gpu/drm/nouveau/include/nvkm/core/
H A Dmemory.h41 int (*kmap)(struct nvkm_memory *, struct nvkm_memory **); member
68 #define nvkm_memory_kmap(p,i) ((p)->func->kmap ? (p)->func->kmap((p), (i)) : -ENOSYS)
/linux/drivers/gpu/drm/panthor/
H A Dpanthor_fw.c447 bool was_mapped = !!section->mem->kmap; in panthor_fw_init_section_mem()
458 memcpy(section->mem->kmap, section->data.buf, section->data.size); in panthor_fw_init_section_mem()
460 memset(section->mem->kmap + section->data.size, 0, in panthor_fw_init_section_mem()
509 memset(mem->kmap, 0, panthor_kernel_bo_size(mem)); in panthor_fw_alloc_queue_iface_mem()
510 *input = mem->kmap; in panthor_fw_alloc_queue_iface_mem()
511 *output = mem->kmap + SZ_4K; in panthor_fw_alloc_queue_iface_mem()
864 return ptdev->fw->shared_section->mem->kmap + (mcu_va - shared_mem_start); in iface_fw_to_cpu_addr()
885 cs_iface->control = ptdev->fw->shared_section->mem->kmap + iface_offset; in panthor_init_cs_iface()
936 csg_iface->control = ptdev->fw->shared_section->mem->kmap + iface_offset; in panthor_init_csg_iface()
984 if (!ptdev->fw->shared_section->mem->kmap) in panthor_fw_init_ifaces()
[all …]
H A Dpanthor_sched.c449 void *kmap; member
862 if (queue->syncwait.kmap) { in panthor_queue_put_syncwait_obj()
863 struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap); in panthor_queue_put_syncwait_obj()
866 queue->syncwait.kmap = NULL; in panthor_queue_put_syncwait_obj()
881 if (queue->syncwait.kmap) in panthor_queue_get_syncwait_obj()
882 return queue->syncwait.kmap + queue->syncwait.offset; in panthor_queue_get_syncwait_obj()
895 queue->syncwait.kmap = map.vaddr; in panthor_queue_get_syncwait_obj()
896 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap)) in panthor_queue_get_syncwait_obj()
899 return queue->syncwait.kmap + queue->syncwait.offset; in panthor_queue_get_syncwait_obj()
2210 syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj)); in group_term_post_processing()
[all …]
/linux/include/linux/
H A Dhighmem-internal.h40 static inline void *kmap(struct page *page) in kmap() function
170 static inline void *kmap(struct page *page) in kmap() function
/linux/mm/
H A Dhighmem.c577 void *kmap; in __kmap_local_page_prot() local
588 kmap = arch_kmap_local_high_get(page); in __kmap_local_page_prot()
589 if (kmap) in __kmap_local_page_prot()
590 return kmap; in __kmap_local_page_prot()
/linux/fs/freevxfs/
H A Dvxfs_subr.c51 kmap(pp); in vxfs_get_page()
/linux/drivers/gpu/drm/loongson/
H A Dlsdc_ttm.h35 struct ttm_bo_kmap_obj kmap; member
/linux/arch/sh/mm/
H A DMakefile19 mmu-$(CONFIG_MMU) := extable_32.o fault.o ioremap.o kmap.o \
/linux/tools/perf/
H A Dbuiltin-lock.c978 struct map *kmap; in report_lock_contention_begin_event() local
996 &kmap); in report_lock_contention_begin_event()
1010 addrs[filters.nr_addrs++] = map__unmap_ip(kmap, sym->start); in report_lock_contention_begin_event()
1022 sym = machine__find_kernel_symbol(machine, key, &kmap); in report_lock_contention_begin_event()
1660 struct map *kmap; in print_lock_stat_stdio() local
1670 sym = machine__find_kernel_symbol(con->machine, ip, &kmap); in print_lock_stat_stdio()
1671 get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf)); in print_lock_stat_stdio()
1713 struct map *kmap; in print_lock_stat_csv() local
1723 sym = machine__find_kernel_symbol(con->machine, ip, &kmap); in print_lock_stat_csv()
1724 get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf)); in print_lock_stat_csv()
/linux/net/ceph/
H A Dpagelist.c70 pl->mapped_tail = kmap(page); in ceph_pagelist_addpage()
/linux/tools/testing/scatterlist/linux/
H A Dmm.h78 static inline void *kmap(struct page *page) in kmap() function
/linux/drivers/gpu/drm/radeon/
H A Dradeon_object.c234 r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap); in radeon_bo_kmap()
238 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); in radeon_bo_kmap()
252 ttm_bo_kunmap(&bo->kmap); in radeon_bo_kunmap()
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_bo.c676 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap); in nouveau_bo_map()
688 ttm_bo_kunmap(&nvbo->kmap); in nouveau_bo_unmap()
803 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr16()
817 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_rd32()
831 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); in nouveau_bo_wr32()

123