Home
last modified time | relevance | path

Searched full:mm (Results 1 – 25 of 2461) sorted by relevance

12345678910>>...99

/linux/arch/powerpc/include/asm/
H A Dmmu_context.h7 #include <linux/mm.h>
18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
20 extern void destroy_context(struct mm_struct *mm);
24 extern bool mm_iommu_preregistered(struct mm_struct *mm);
25 extern long mm_iommu_new(struct mm_struct *mm,
28 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
31 extern long mm_iommu_put(struct mm_struct *mm,
33 extern void mm_iommu_init(struct mm_struct *mm);
34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
36 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
[all …]
/linux/include/trace/events/
H A Dksm.h71 * @mm: address of the mm object of the process
77 TP_PROTO(void *mm),
79 TP_ARGS(mm),
82 __field(void *, mm)
86 __entry->mm = mm;
89 TP_printk("mm %p", __entry->mm)
95 * @mm: address of the mm object of the process
101 TP_PROTO(void *mm),
103 TP_ARGS(mm)
109 * @mm: address of the mm object of the process
[all …]
/linux/include/linux/
H A Dmmap_lock.h15 #include <linux/sched/mm.h>
26 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
27 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
29 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
31 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument
35 __mmap_lock_do_trace_start_locking(mm, write); in __mmap_lock_trace_start_locking()
38 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, in __mmap_lock_trace_acquire_returned() argument
42 __mmap_lock_do_trace_acquire_returned(mm, write, success); in __mmap_lock_trace_acquire_returned()
45 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) in __mmap_lock_trace_released() argument
48 __mmap_lock_do_trace_released(mm, write); in __mmap_lock_trace_released()
[all …]
H A Dmmu_notifier.h40 * that the mm refcount is zero and the range is no longer accessible.
66 * Called either by mmu_notifier_unregister or when the mm is
69 * methods (the ones invoked outside the mm context) and it
74 * tsk->mm == mm exits.
81 * last thread of this mm quits, you've also to be sure that
89 struct mm_struct *mm);
101 struct mm_struct *mm,
111 struct mm_struct *mm,
122 struct mm_struct *mm,
199 struct mm_struct *mm,
[all …]
/linux/arch/m68k/include/asm/
H A Dmmu_context.h28 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
32 if (mm->context != NO_CONTEXT) in get_mmu_context()
45 mm->context = ctx; in get_mmu_context()
46 context_mm[ctx] = mm; in get_mmu_context()
52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument
58 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
60 if (mm->context != NO_CONTEXT) { in destroy_context()
61 clear_bit(mm->context, context_map); in destroy_context()
62 mm->context = NO_CONTEXT; in destroy_context()
75 get_mmu_context(tsk->mm); in switch_mm()
[all …]
/linux/include/asm-generic/
H A Dpgalloc.h12 * @mm: the mm_struct of the current context
19 static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm) in __pte_alloc_one_kernel_noprof() argument
25 if (!pagetable_pte_ctor(mm, ptdesc)) { in __pte_alloc_one_kernel_noprof()
39 * @mm: the mm_struct of the current context
43 static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm) in pte_alloc_one_kernel_noprof() argument
45 return __pte_alloc_one_kernel_noprof(mm); in pte_alloc_one_kernel_noprof()
52 * @mm: the mm_struct of the current context
55 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
62 * @mm: the mm_struct of the current context
72 static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp) in __pte_alloc_one_noprof() argument
[all …]
H A Dmmu_context.h15 * @mm: the currently active mm context which is becoming lazy
18 * tsk->mm will be NULL
21 static inline void enter_lazy_tlb(struct mm_struct *mm, in enter_lazy_tlb() argument
29 * @tsk: task struct for the mm
30 * @mm: the new mm struct
35 struct mm_struct *mm) in init_new_context() argument
42 * destroy_context - Undo init_new_context when the mm is going away
43 * @mm: old mm struct
46 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
52 * activate_mm - called after exec switches the current task to a new mm, to switch to it
[all …]
/linux/arch/arm/mm/
H A Dpgd.c3 * linux/arch/arm/mm/pgd.c
7 #include <linux/mm.h>
17 #include "mm.h"
20 #define _pgd_alloc(mm) kmalloc_objs(pgd_t, PTRS_PER_PGD, GFP_KERNEL | __GFP_ZERO) argument
21 #define _pgd_free(mm, pgd) kfree(pgd) argument
23 #define _pgd_alloc(mm) __pgd_alloc(mm, 2) argument
24 #define _pgd_free(mm, pgd) __pgd_free(mm, pgd) argument
30 pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
38 new_pgd = _pgd_alloc(mm); in pgd_alloc()
55 new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), in pgd_alloc()
[all …]
/linux/mm/
H A Ddebug.c3 * mm/debug.c
5 * mm/ specific debug routines.
10 #include <linux/mm.h>
157 pr_emerg("vma %px start %px end %px mm %px\n" in dump_vma()
175 void dump_mm(const struct mm_struct *mm) in dump_mm() argument
177 pr_emerg("mm %px task_size %lu\n" in dump_mm()
202 mm, mm->task_size, in dump_mm()
203 mm->mmap_base, mm->mmap_legacy_base, in dump_mm()
204 mm->pgd, atomic_read(&mm->mm_users), in dump_mm()
205 atomic_read(&mm->mm_count), in dump_mm()
[all …]
/linux/arch/arm/include/asm/
H A Dmmu_context.h24 void __check_vmalloc_seq(struct mm_struct *mm);
27 static inline void check_vmalloc_seq(struct mm_struct *mm) in check_vmalloc_seq() argument
30 unlikely(atomic_read(&mm->context.vmalloc_seq) != in check_vmalloc_seq()
32 __check_vmalloc_seq(mm); in check_vmalloc_seq()
38 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
42 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
44 atomic64_set(&mm->context.id, 0); in init_new_context()
49 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
52 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
62 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument
[all …]
/linux/tools/testing/vma/tests/
H A Dmerge.c72 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm, in try_merge_new_vma() argument
92 return alloc_and_link_vma(mm, start, end, pgoff, vma_flags); in try_merge_new_vma()
100 struct mm_struct mm = {}; in test_simple_merge()
101 struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vma_flags); in test_simple_merge()
102 struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vma_flags); in test_simple_merge()
103 VMA_ITERATOR(vmi, &mm, 0x1000); in test_simple_merge()
105 .mm = &mm, in test_simple_merge()
113 ASSERT_FALSE(attach_vma(&mm, vma_left)); in test_simple_merge()
114 ASSERT_FALSE(attach_vma(&mm, vma_righ in test_simple_merge()
99 struct mm_struct mm = {}; test_simple_merge() local
133 struct mm_struct mm = {}; test_simple_modify() local
193 struct mm_struct mm = {}; test_simple_expand() local
221 struct mm_struct mm = {}; test_simple_shrink() local
242 struct mm_struct mm = {}; __test_merge_new() local
473 struct mm_struct mm = {}; test_vma_merge_special_flags() local
545 struct mm_struct mm = {}; test_vma_merge_with_close() local
754 struct mm_struct mm = {}; test_vma_merge_new_with_close() local
811 struct mm_struct mm = {}; __test_merge_existing() local
1071 struct mm_struct mm = {}; test_anon_vma_non_mergeable() local
1158 struct mm_struct mm = {}; test_dup_anon_vma() local
1318 struct mm_struct mm = {}; test_vmi_prealloc_fail() local
1384 struct mm_struct mm = {}; test_merge_extend() local
1414 struct mm_struct mm = {}; test_expand_only_mode() local
[all...]
/linux/fs/proc/
H A Dtask_nommu.c3 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
21 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument
23 VMA_ITERATOR(vmi, mm, 0); in task_mem()
28 mmap_read_lock(mm); in task_mem()
40 if (atomic_read(&mm->mm_count) > 1 || in task_mem()
50 if (atomic_read(&mm->mm_count) > 1) in task_mem()
51 sbytes += kobjsize(mm); in task_mem()
53 bytes += kobjsize(mm); in task_mem()
72 mmap_read_unlock(mm); in task_mem()
[all …]
/linux/drivers/net/ethernet/mscc/
H A Docelot_mm.c55 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_port_update_active_preemptible_tcs() local
66 ocelot_port->speed == SPEED_1000) && mm->tx_active) in ocelot_port_update_active_preemptible_tcs()
67 val = mm->preemptible_tcs; in ocelot_port_update_active_preemptible_tcs()
75 mm->active_preemptible_tcs = val; in ocelot_port_update_active_preemptible_tcs()
79 "port %d %s/%s, MM TX %s, preemptible TCs 0x%x, active 0x%x\n", in ocelot_port_update_active_preemptible_tcs()
82 mm->tx_active ? "active" : "inactive", mm->preemptible_tcs, in ocelot_port_update_active_preemptible_tcs()
83 mm->active_preemptible_tcs); in ocelot_port_update_active_preemptible_tcs()
93 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_port_change_fp() local
97 if (mm->preemptible_tcs == preemptible_tcs) in ocelot_port_change_fp()
100 mm->preemptible_tcs = preemptible_tcs; in ocelot_port_change_fp()
[all …]
/linux/rust/helpers/
H A Dmm.c3 #include <linux/mm.h>
4 #include <linux/sched/mm.h>
6 __rust_helper void rust_helper_mmgrab(struct mm_struct *mm) in rust_helper_mmgrab() argument
8 mmgrab(mm); in rust_helper_mmgrab()
11 __rust_helper void rust_helper_mmdrop(struct mm_struct *mm) in rust_helper_mmdrop() argument
13 mmdrop(mm); in rust_helper_mmdrop()
16 __rust_helper void rust_helper_mmget(struct mm_struct *mm) in rust_helper_mmget() argument
18 mmget(mm); in rust_helper_mmget()
21 __rust_helper bool rust_helper_mmget_not_zero(struct mm_struct *mm) in rust_helper_mmget_not_zero() argument
23 return mmget_not_zero(mm); in rust_helper_mmget_not_zero()
26 rust_helper_mmap_read_lock(struct mm_struct * mm) rust_helper_mmap_read_lock() argument
31 rust_helper_mmap_read_trylock(struct mm_struct * mm) rust_helper_mmap_read_trylock() argument
36 rust_helper_mmap_read_unlock(struct mm_struct * mm) rust_helper_mmap_read_unlock() argument
41 rust_helper_vma_lookup(struct mm_struct * mm,unsigned long addr) rust_helper_vma_lookup() argument
[all...]
/linux/arch/x86/kernel/
H A Dldt.c19 #include <linux/mm.h>
42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument
47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt()
50 * Any change to mm->context.ldt is followed by an IPI to all in load_mm_ldt()
51 * CPUs with the mm active. The LDT will not be freed until in load_mm_ldt()
93 * Load the LDT if either the old or new mm had an LDT. in switch_ldt()
95 * An mm will never go from having an LDT to not having an LDT. Two in switch_ldt()
138 struct mm_struct *mm = __mm; in flush_ldt() local
140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt()
143 load_mm_ldt(mm); in flush_ldt()
[all …]
/linux/kernel/
H A Dfork.c12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
18 #include <linux/sched/mm.h>
45 #include <linux/mm.h>
120 #include "../mm/internal.h"
480 /* SLAB cache for mm_struct structures (tsk->mm) */
562 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) in dup_mm_exe_file() argument
567 RCU_INIT_POINTER(mm->exe_file, exe_file); in dup_mm_exe_file()
577 static inline int mm_alloc_pgd(struct mm_struct *mm) in mm_alloc_pgd() argument
579 mm->pgd = pgd_alloc(mm); in mm_alloc_pgd()
585 mm_free_pgd(struct mm_struct * mm) mm_free_pgd() argument
590 mm_alloc_pgd(mm) global() argument
591 mm_free_pgd(mm) global() argument
597 mm_alloc_id(struct mm_struct * mm) mm_alloc_id() argument
608 mm_free_id(struct mm_struct * mm) mm_free_id() argument
620 mm_alloc_id(struct mm_struct * mm) mm_alloc_id() argument
621 mm_free_id(struct mm_struct * mm) mm_free_id() argument
624 check_mm(struct mm_struct * mm) check_mm() argument
652 free_mm(mm) global() argument
656 struct mm_struct *mm = arg; do_check_lazy_tlb() local
663 struct mm_struct *mm = arg; do_shoot_lazy_tlb() local
672 cleanup_lazy_tlbs(struct mm_struct * mm) cleanup_lazy_tlbs() argument
720 __mmdrop(struct mm_struct * mm) __mmdrop() argument
745 struct mm_struct *mm; mmdrop_async_fn() local
751 mmdrop_async(struct mm_struct * mm) mmdrop_async() argument
1034 mm_init_aio(struct mm_struct * mm) mm_init_aio() argument
1042 mm_clear_owner(struct mm_struct * mm,struct task_struct * p) mm_clear_owner() argument
1051 mm_init_owner(struct mm_struct * mm,struct task_struct * p) mm_init_owner() argument
1058 mm_init_uprobes_state(struct mm_struct * mm) mm_init_uprobes_state() argument
1066 mmap_init_lock(struct mm_struct * mm) mmap_init_lock() argument
1075 mm_init(struct mm_struct * mm,struct task_struct * p,struct user_namespace * user_ns) mm_init() argument
1159 struct mm_struct *mm; mm_alloc() local
1170 __mmput(struct mm_struct * mm) __mmput() argument
1196 mmput(struct mm_struct * mm) mmput() argument
1208 struct mm_struct *mm = container_of(work, struct mm_struct, mmput_async_fn() local
1214 mmput_async(struct mm_struct * mm) mmput_async() argument
1237 set_mm_exe_file(struct mm_struct * mm,struct file * new_exe_file) set_mm_exe_file() argument
1274 replace_mm_exe_file(struct mm_struct * mm,struct file * new_exe_file) replace_mm_exe_file() argument
1325 get_mm_exe_file(struct mm_struct * mm) get_mm_exe_file() argument
1346 struct mm_struct *mm; get_task_exe_file() local
1371 struct mm_struct *mm; get_task_mm() local
1385 may_access_mm(struct mm_struct * mm,struct task_struct * task,unsigned int mode) may_access_mm() argument
1398 struct mm_struct *mm; mm_access() local
1463 mm_release(struct task_struct * tsk,struct mm_struct * mm) mm_release() argument
1496 exit_mm_release(struct task_struct * tsk,struct mm_struct * mm) exit_mm_release() argument
1502 exec_mm_release(struct task_struct * tsk,struct mm_struct * mm) exec_mm_release() argument
1521 struct mm_struct *mm; dup_mm() local
1561 struct mm_struct *mm, *oldmm; copy_mm() local
[all...]
/linux/drivers/gpu/drm/nouveau/nvkm/core/
H A Dmm.c24 #include <core/mm.h>
26 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
30 nvkm_mm_dump(struct nvkm_mm *mm, const char *header) in nvkm_mm_dump() argument
36 list_for_each_entry(node, &mm->nodes, nl_entry) { in nvkm_mm_dump()
41 list_for_each_entry(node, &mm->free, fl_entry) { in nvkm_mm_dump()
48 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis) in nvkm_mm_free() argument
72 list_for_each_entry(prev, &mm->free, fl_entry) { in nvkm_mm_free()
86 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) in region_head() argument
111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, in nvkm_mm_head() argument
121 list_for_each_entry(this, &mm->free, fl_entry) { in nvkm_mm_head()
[all …]
/linux/arch/powerpc/mm/book3s64/
H A Dmmu_context.c13 #include <linux/mm.h>
95 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument
99 mm->context.hash_context = kmalloc_obj(struct hash_mm_context); in hash__init_new_context()
100 if (!mm->context.hash_context) in hash__init_new_context()
110 * initialize context slice details for newly allocated mm's (which will in hash__init_new_context()
117 if (mm->context.id == 0) { in hash__init_new_context()
118 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context()
119 slice_init_new_context_exec(mm); in hash__init_new_context()
121 /* This is fork. Copy hash_context details from current->mm */ in hash__init_new_context()
122 …memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)… in hash__init_new_context()
[all …]
/linux/arch/x86/include/asm/
H A Dpgalloc.h6 #include <linux/mm.h> /* for struct page */
15 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } in __paravirt_pgd_alloc() argument
20 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument
21 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} in paravirt_pgd_free() argument
22 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pte() argument
23 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pmd() argument
26 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pud() argument
27 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_p4d() argument
50 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
62 static inline void pmd_populate_kernel(struct mm_struct *mm, in pmd_populate_kernel() argument
[all …]
/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_mqd_manager_v9.c37 static void update_mqd(struct mqd_manager *mm, void *mqd,
41 static uint64_t mqd_stride_v9(struct mqd_manager *mm, in mqd_stride_v9() argument
44 if (mm->dev->kfd->cwsr_enabled && in mqd_stride_v9()
56 return mm->mqd_size; in mqd_stride_v9()
69 static void update_cu_mask(struct mqd_manager *mm, void *mqd, in update_cu_mask() argument
78 mqd_symmetrically_map_cu_mask(mm, in update_cu_mask()
87 if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) && in update_cu_mask()
88 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) && in update_cu_mask()
89 KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0)) { in update_cu_mask()
132 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm, in allocate_mqd() argument
[all …]
H A Dkfd_mqd_manager.c49 struct kfd_mem_obj *allocate_hiq_mqd(struct mqd_manager *mm, struct queue_properties *q) in allocate_hiq_mqd() argument
52 struct kfd_node *dev = mm->dev; in allocate_hiq_mqd()
65 struct kfd_mem_obj *allocate_sdma_mqd(struct mqd_manager *mm, in allocate_sdma_mqd() argument
69 struct kfd_node *dev = mm->dev; in allocate_sdma_mqd()
93 void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, in free_mqd_hiq_sdma() argument
100 void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, in mqd_symmetrically_map_cu_mask() argument
104 struct amdgpu_cu_info *cu_info = &mm->dev->adev->gfx.cu_info; in mqd_symmetrically_map_cu_mask()
105 struct amdgpu_gfx_config *gfx_info = &mm->dev->adev->gfx.config; in mqd_symmetrically_map_cu_mask()
107 bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0); in mqd_symmetrically_map_cu_mask()
111 int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask); in mqd_symmetrically_map_cu_mask()
[all …]
/linux/arch/sparc/mm/
H A Dtsb.c2 /* arch/sparc64/mm/tsb.c
121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local
124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user()
127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user()
128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user()
140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user()
141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user()
142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user()
149 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user()
152 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, in flush_tsb_user_page() argument
[all …]
/linux/drivers/iommu/
H A Diommu-sva.c8 #include <linux/sched/mm.h>
17 struct mm_struct *mm);
19 /* Allocate a PASID for the mm within range (inclusive) */
20 static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev) in iommu_alloc_mm_data() argument
27 if (!arch_pgtable_dma_compat(mm)) in iommu_alloc_mm_data()
30 iommu_mm = mm->iommu_mm; in iommu_alloc_mm_data()
31 /* Is a PASID already associated with this mm? */ in iommu_alloc_mm_data()
48 iommu_mm->mm = mm; in iommu_alloc_mm_data()
51 * Make sure the write to mm->iommu_mm is not reordered in front of in iommu_alloc_mm_data()
55 smp_store_release(&mm->iommu_mm, iommu_mm); in iommu_alloc_mm_data()
[all …]
/linux/drivers/gpu/drm/i915/gvt/
H A Dgtt.c487 * MM helpers.
489 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, in _ppgtt_get_root_entry() argument
493 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in _ppgtt_get_root_entry()
495 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); in _ppgtt_get_root_entry()
497 entry->type = mm->ppgtt_mm.root_entry_type; in _ppgtt_get_root_entry()
498 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : in _ppgtt_get_root_entry()
499 mm->ppgtt_mm.shadow_pdps, in _ppgtt_get_root_entry()
500 entry, index, false, 0, mm->vgpu); in _ppgtt_get_root_entry()
504 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, in ppgtt_get_guest_root_entry() argument
507 _ppgtt_get_root_entry(mm, entry, index, true); in ppgtt_get_guest_root_entry()
[all …]
/linux/Documentation/translations/zh_CN/core-api/
H A Dmm-api.rst3 :Original: Documentation/core-api/mm-api.rst
31 mm/gup.c
51 mm/slab.c
53 mm/slab_common.c
55 mm/util.c
62 mm/vmalloc.c
73 mm/filemap.c
78 mm/readahead.c
83 mm/page-writeback.c
88 mm/truncate.c
[all …]

12345678910>>...99