xref: /linux/include/linux/mmu_context.h (revision 9ae606bc74dd0e58d4de894e3c5cbb9d45599267)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
23d2d827fSMichael S. Tsirkin #ifndef _LINUX_MMU_CONTEXT_H
33d2d827fSMichael S. Tsirkin #define _LINUX_MMU_CONTEXT_H
43d2d827fSMichael S. Tsirkin 
5f98db601SAndy Lutomirski #include <asm/mmu_context.h>
6bf9282dcSPeter Zijlstra #include <asm/mmu.h>
7f98db601SAndy Lutomirski 
8f98db601SAndy Lutomirski /* Architectures that care about IRQ state in switch_mm can override this. */
9f98db601SAndy Lutomirski #ifndef switch_mm_irqs_off
10f98db601SAndy Lutomirski # define switch_mm_irqs_off switch_mm
11f98db601SAndy Lutomirski #endif
12f98db601SAndy Lutomirski 
13bf9282dcSPeter Zijlstra #ifndef leave_mm
14bf9282dcSPeter Zijlstra static inline void leave_mm(int cpu) { }
15bf9282dcSPeter Zijlstra #endif
16bf9282dcSPeter Zijlstra 
17*9ae606bcSWill Deacon /*
18*9ae606bcSWill Deacon  * CPUs that are capable of running user task @p. Must contain at least one
19*9ae606bcSWill Deacon  * active CPU. It is assumed that the kernel can run on all CPUs, so calling
20*9ae606bcSWill Deacon  * this for a kernel thread is pointless.
21*9ae606bcSWill Deacon  *
22*9ae606bcSWill Deacon  * By default, we assume a sane, homogeneous system.
23*9ae606bcSWill Deacon  */
24*9ae606bcSWill Deacon #ifndef task_cpu_possible_mask
25*9ae606bcSWill Deacon # define task_cpu_possible_mask(p)	cpu_possible_mask
26*9ae606bcSWill Deacon # define task_cpu_possible(cpu, p)	true
27*9ae606bcSWill Deacon #else
28*9ae606bcSWill Deacon # define task_cpu_possible(cpu, p)	cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
29*9ae606bcSWill Deacon #endif
30*9ae606bcSWill Deacon 
313d2d827fSMichael S. Tsirkin #endif
32