| /linux/include/linux/ |
| H A D | mmap_lock.h | 26 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write); 27 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, 29 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write); 31 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument 35 __mmap_lock_do_trace_start_locking(mm, write); in __mmap_lock_trace_start_locking() 38 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, in __mmap_lock_trace_acquire_returned() argument 42 __mmap_lock_do_trace_acquire_returned(mm, write, success); in __mmap_lock_trace_acquire_returned() 45 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) in __mmap_lock_trace_released() argument 48 __mmap_lock_do_trace_released(mm, write); in __mmap_lock_trace_released() 53 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument [all …]
|
| H A D | ksm.h | 20 vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, 22 int ksm_enable_merge_any(struct mm_struct *mm); 23 int ksm_disable_merge_any(struct mm_struct *mm); 24 int ksm_disable(struct mm_struct *mm); 26 int __ksm_enter(struct mm_struct *mm); 27 void __ksm_exit(struct mm_struct *mm); 37 static inline void ksm_map_zero_page(struct mm_struct *mm) in ksm_map_zero_page() argument 40 atomic_long_inc(&mm->ksm_zero_pages); in ksm_map_zero_page() 43 static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) in ksm_might_unmap_zero_page() argument 47 atomic_long_dec(&mm->ksm_zero_pages); in ksm_might_unmap_zero_page() [all …]
|
| /linux/arch/powerpc/include/asm/ |
| H A D | mmu_context.h | 18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 20 extern void destroy_context(struct mm_struct *mm); 24 extern bool mm_iommu_preregistered(struct mm_struct *mm); 25 extern long mm_iommu_new(struct mm_struct *mm, 28 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, 31 extern long mm_iommu_put(struct mm_struct *mm, 33 extern void mm_iommu_init(struct mm_struct *mm); 34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, 36 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, 40 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, [all …]
|
| /linux/arch/s390/include/asm/ |
| H A D | pgalloc.h | 30 struct ptdesc *page_table_alloc_pgste_noprof(struct mm_struct *mm); 39 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); 41 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, in check_asce_limit() argument 46 if (addr + len > mm->context.asce_limit && in check_asce_limit() 48 rc = crst_table_upgrade(mm, addr + len); in check_asce_limit() 55 static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long address) in p4d_alloc_one_noprof() argument 57 unsigned long *table = crst_table_alloc_noprof(mm); in p4d_alloc_one_noprof() 68 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) in p4d_free() argument 70 if (mm_p4d_folded(mm)) in p4d_free() 74 crst_table_free(mm, (unsigned long *) p4d); in p4d_free() [all …]
|
| /linux/mm/ |
| H A D | mmu_notifier.c | 191 interval_sub->mm->notifier_subscriptions; in mmu_interval_read_begin() 264 struct mm_struct *mm) in mn_itree_release() argument 269 .mm = mm, in mn_itree_release() 302 struct mm_struct *mm) in mn_hlist_release() argument 321 subscription->ops->release(subscription, mm); in mn_hlist_release() 350 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument 353 mm->notifier_subscriptions; in __mmu_notifier_release() 356 mn_itree_release(subscriptions, mm); in __mmu_notifier_release() 359 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release() 367 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument [all …]
|
| H A D | debug.c | 175 void dump_mm(const struct mm_struct *mm) in dump_mm() argument 202 mm, mm->task_size, in dump_mm() 203 mm->mmap_base, mm->mmap_legacy_base, in dump_mm() 204 mm->pgd, atomic_read(&mm->mm_users), in dump_mm() 205 atomic_read(&mm->mm_count), in dump_mm() 206 mm_pgtables_bytes(mm), in dump_mm() 207 mm->map_count, in dump_mm() 208 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm() 209 (u64)atomic64_read(&mm->pinned_vm), in dump_mm() 210 mm->data_vm, mm->exec_vm, mm->stack_vm, in dump_mm() [all …]
|
| H A D | mmap_lock.c | 26 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write) in __mmap_lock_do_trace_start_locking() argument 28 trace_mmap_lock_start_locking(mm, write); in __mmap_lock_do_trace_start_locking() 32 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, in __mmap_lock_do_trace_acquire_returned() argument 35 trace_mmap_lock_acquire_returned(mm, write, success); in __mmap_lock_do_trace_acquire_returned() 39 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write) in __mmap_lock_do_trace_released() argument 41 trace_mmap_lock_released(mm, write); in __mmap_lock_do_trace_released() 163 static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm, in vma_start_read() argument 177 if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) { in vma_start_read() 197 if (unlikely(vma->vm_mm != mm)) in vma_start_read() 211 if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) { in vma_start_read() [all …]
|
| H A D | mmap.c | 111 return mlock_future_ok(current->mm, current->mm->def_flags, len) in check_brk_limits() 118 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local 125 if (mmap_write_lock_killable(mm)) in SYSCALL_DEFINE1() 128 origbrk = mm->brk; in SYSCALL_DEFINE1() 130 min_brk = mm->start_brk; in SYSCALL_DEFINE1() 138 min_brk = mm->end_data; in SYSCALL_DEFINE1() 149 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1() 150 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1() 154 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1() 156 mm->brk = brk; in SYSCALL_DEFINE1() [all …]
|
| /linux/arch/m68k/include/asm/ |
| H A D | mmu_context.h | 28 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument 32 if (mm->context != NO_CONTEXT) in get_mmu_context() 45 mm->context = ctx; in get_mmu_context() 46 context_mm[ctx] = mm; in get_mmu_context() 52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument 58 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 60 if (mm->context != NO_CONTEXT) { in destroy_context() 61 clear_bit(mm->context, context_map); in destroy_context() 62 mm->context = NO_CONTEXT; in destroy_context() 75 get_mmu_context(tsk->mm); in switch_mm() [all …]
|
| /linux/arch/s390/mm/ |
| H A D | pgtable.c | 40 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument 47 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local() 51 asce = asce ? : mm->context.asce; in ptep_ipte_local() 60 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument 67 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global() 71 asce = asce ? : mm->context.asce; in ptep_ipte_global() 80 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument 89 atomic_inc(&mm->context.flush_count); in ptep_flush_direct() 91 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct() 92 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct() [all …]
|
| /linux/arch/powerpc/mm/book3s64/ |
| H A D | mmu_context.c | 95 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument 99 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), in hash__init_new_context() 101 if (!mm->context.hash_context) in hash__init_new_context() 118 if (mm->context.id == 0) { in hash__init_new_context() 119 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context() 120 slice_init_new_context_exec(mm); in hash__init_new_context() 123 …memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)… in hash__init_new_context() 126 if (current->mm->context.hash_context->spt) { in hash__init_new_context() 127 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), in hash__init_new_context() 129 if (!mm->context.hash_context->spt) { in hash__init_new_context() [all …]
|
| /linux/tools/testing/vma/ |
| H A D | vma.c | 66 static struct vm_area_struct *alloc_vma(struct mm_struct *mm, in alloc_vma() argument 72 struct vm_area_struct *vma = vm_area_alloc(mm); in alloc_vma() 87 static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma) in attach_vma() argument 91 res = vma_link(mm, vma); in attach_vma() 104 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, in alloc_and_link_vma() argument 110 struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags); in alloc_and_link_vma() 115 if (attach_vma(mm, vma)) { in alloc_and_link_vma() 213 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm, in try_merge_new_vma() argument 234 return alloc_and_link_vma(mm, start, end, pgoff, vm_flags); in try_merge_new_vma() 251 static int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi) in cleanup_mm() argument [all …]
|
| /linux/Documentation/core-api/ |
| H A D | mm-api.rst | 14 .. kernel-doc:: mm/gup.c 40 .. kernel-doc:: mm/slub.c 43 .. kernel-doc:: mm/slab_common.c 46 .. kernel-doc:: mm/util.c 52 .. kernel-doc:: mm/vmalloc.c 61 .. kernel-doc:: mm/filemap.c 67 .. kernel-doc:: mm/readahead.c 70 .. kernel-doc:: mm/readahead.c 76 .. kernel-doc:: mm/page-writeback.c 82 .. kernel-doc:: mm/truncate.c [all …]
|
| /linux/include/asm-generic/ |
| H A D | pgalloc.h | 19 static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm) in __pte_alloc_one_kernel_noprof() argument 25 if (!pagetable_pte_ctor(mm, ptdesc)) { in __pte_alloc_one_kernel_noprof() 43 static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm) in pte_alloc_one_kernel_noprof() argument 45 return __pte_alloc_one_kernel_noprof(mm); in pte_alloc_one_kernel_noprof() 55 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument 72 static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp) in __pte_alloc_one_noprof() argument 79 if (!pagetable_pte_ctor(mm, ptdesc)) { in __pte_alloc_one_noprof() 97 static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm) in pte_alloc_one_noprof() argument 99 return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER); in pte_alloc_one_noprof() 114 static inline void pte_free(struct mm_struct *mm, struct page *pte_page) in pte_free() argument [all …]
|
| /linux/arch/arm/include/asm/ |
| H A D | mmu_context.h | 24 void __check_vmalloc_seq(struct mm_struct *mm); 27 static inline void check_vmalloc_seq(struct mm_struct *mm) in check_vmalloc_seq() argument 30 unlikely(atomic_read(&mm->context.vmalloc_seq) != in check_vmalloc_seq() 32 __check_vmalloc_seq(mm); in check_vmalloc_seq() 38 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 42 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument 44 atomic64_set(&mm->context.id, 0); in init_new_context() 49 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 52 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument 62 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument [all …]
|
| /linux/arch/x86/kernel/ |
| H A D | ldt.c | 42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument 47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt() 138 struct mm_struct *mm = __mm; in flush_ldt() local 140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt() 143 load_mm_ldt(mm); in flush_ldt() 189 static void do_sanity_check(struct mm_struct *mm, in do_sanity_check() argument 193 if (mm->context.ldt) { in do_sanity_check() 234 static void map_ldt_struct_to_user(struct mm_struct *mm) in map_ldt_struct_to_user() argument 236 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in map_ldt_struct_to_user() 243 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) in map_ldt_struct_to_user() [all …]
|
| /linux/arch/sparc/mm/ |
| H A D | tsb.c | 121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local 124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user() 127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user() 128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user() 140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user() 141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user() 142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user() 149 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user() 152 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, in flush_tsb_user_page() argument 157 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page() [all …]
|
| /linux/include/linux/sched/ |
| H A D | mm.h | 35 static inline void mmgrab(struct mm_struct *mm) in mmgrab() argument 37 atomic_inc(&mm->mm_count); in mmgrab() 45 extern void __mmdrop(struct mm_struct *mm); 47 static inline void mmdrop(struct mm_struct *mm) in mmdrop() argument 54 if (unlikely(atomic_dec_and_test(&mm->mm_count))) in mmdrop() 55 __mmdrop(mm); in mmdrop() 65 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); in __mmdrop_delayed() local 67 __mmdrop(mm); in __mmdrop_delayed() 74 static inline void mmdrop_sched(struct mm_struct *mm) in mmdrop_sched() argument 77 if (atomic_dec_and_test(&mm->mm_count)) in mmdrop_sched() [all …]
|
| /linux/fs/proc/ |
| H A D | task_nommu.c | 21 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument 23 VMA_ITERATOR(vmi, mm, 0); in task_mem() 28 mmap_read_lock(mm); in task_mem() 40 if (atomic_read(&mm->mm_count) > 1 || in task_mem() 50 if (atomic_read(&mm->mm_count) > 1) in task_mem() 51 sbytes += kobjsize(mm); in task_mem() 53 bytes += kobjsize(mm); in task_mem() 72 mmap_read_unlock(mm); in task_mem() 81 unsigned long task_vsize(struct mm_struct *mm) in task_vsize() argument 83 VMA_ITERATOR(vmi, mm, 0); in task_vsize() [all …]
|
| /linux/drivers/net/ethernet/mscc/ |
| H A D | ocelot_mm.c | 55 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_port_update_active_preemptible_tcs() local 66 ocelot_port->speed == SPEED_1000) && mm->tx_active) in ocelot_port_update_active_preemptible_tcs() 67 val = mm->preemptible_tcs; in ocelot_port_update_active_preemptible_tcs() 75 mm->active_preemptible_tcs = val; in ocelot_port_update_active_preemptible_tcs() 82 mm->tx_active ? "active" : "inactive", mm->preemptible_tcs, in ocelot_port_update_active_preemptible_tcs() 83 mm->active_preemptible_tcs); in ocelot_port_update_active_preemptible_tcs() 93 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_port_change_fp() local 97 if (mm->preemptible_tcs == preemptible_tcs) in ocelot_port_change_fp() 100 mm->preemptible_tcs = preemptible_tcs; in ocelot_port_change_fp() 108 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_mm_update_port_status() local [all …]
|
| /linux/include/trace/events/ |
| H A D | ksm.h | 77 TP_PROTO(void *mm), 79 TP_ARGS(mm), 82 __field(void *, mm) 86 __entry->mm = mm; 89 TP_printk("mm %p", __entry->mm) 101 TP_PROTO(void *mm), 103 TP_ARGS(mm) 115 TP_PROTO(void *mm), 117 TP_ARGS(mm) 132 TP_PROTO(unsigned long pfn, void *rmap_item, void *mm, int err), [all …]
|
| /linux/kernel/ |
| H A D | fork.c | 559 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) in dup_mm_exe_file() argument 564 RCU_INIT_POINTER(mm->exe_file, exe_file); in dup_mm_exe_file() 574 static inline int mm_alloc_pgd(struct mm_struct *mm) in mm_alloc_pgd() argument 576 mm->pgd = pgd_alloc(mm); in mm_alloc_pgd() 577 if (unlikely(!mm->pgd)) in mm_alloc_pgd() 582 static inline void mm_free_pgd(struct mm_struct *mm) in mm_free_pgd() argument 584 pgd_free(mm, mm->pgd); in mm_free_pgd() 587 #define mm_alloc_pgd(mm) (0) argument 588 #define mm_free_pgd(mm) argument 594 static inline int mm_alloc_id(struct mm_struct *mm) in mm_alloc_id() argument [all …]
|
| /linux/arch/mips/include/asm/ |
| H A D | mmu_context.h | 106 static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm) in cpu_context() argument 109 return atomic64_read(&mm->context.mmid); in cpu_context() 111 return mm->context.asid[cpu]; in cpu_context() 115 struct mm_struct *mm, u64 ctx) in set_cpu_context() argument 118 atomic64_set(&mm->context.mmid, ctx); in set_cpu_context() 120 mm->context.asid[cpu] = ctx; in set_cpu_context() 124 #define cpu_asid(cpu, mm) \ argument 125 (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) 127 extern void get_new_mmu_context(struct mm_struct *mm); 128 extern void check_mmu_context(struct mm_struct *mm); [all …]
|
| /linux/rust/helpers/ |
| H A D | mm.c | 6 void rust_helper_mmgrab(struct mm_struct *mm) in rust_helper_mmgrab() argument 8 mmgrab(mm); in rust_helper_mmgrab() 11 void rust_helper_mmdrop(struct mm_struct *mm) in rust_helper_mmdrop() argument 13 mmdrop(mm); in rust_helper_mmdrop() 16 void rust_helper_mmget(struct mm_struct *mm) in rust_helper_mmget() argument 18 mmget(mm); in rust_helper_mmget() 21 bool rust_helper_mmget_not_zero(struct mm_struct *mm) in rust_helper_mmget_not_zero() argument 23 return mmget_not_zero(mm); in rust_helper_mmget_not_zero() 26 void rust_helper_mmap_read_lock(struct mm_struct *mm) in rust_helper_mmap_read_lock() argument 28 mmap_read_lock(mm); in rust_helper_mmap_read_lock() [all …]
|
| /linux/arch/x86/include/asm/ |
| H A D | pgalloc.h | 15 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } in __paravirt_pgd_alloc() argument 20 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument 21 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} in paravirt_pgd_free() argument 22 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pte() argument 23 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pmd() argument 26 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pud() argument 27 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_p4d() argument 50 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 62 static inline void pmd_populate_kernel(struct mm_struct *mm, in pmd_populate_kernel() argument 65 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel() [all …]
|