xref: /linux/arch/riscv/include/asm/mmu_context.h (revision a8b70ccf10e38775785d9cb12ead916474549f99)
1 /*
2  * Copyright (C) 2012 Regents of the University of California
3  * Copyright (C) 2017 SiFive
4  *
5  *   This program is free software; you can redistribute it and/or
6  *   modify it under the terms of the GNU General Public License
7  *   as published by the Free Software Foundation, version 2.
8  *
9  *   This program is distributed in the hope that it will be useful,
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *   GNU General Public License for more details.
13  */
14 
15 #ifndef _ASM_RISCV_MMU_CONTEXT_H
16 #define _ASM_RISCV_MMU_CONTEXT_H
17 
18 #include <linux/mm_types.h>
19 #include <asm-generic/mm_hooks.h>
20 
21 #include <linux/mm.h>
22 #include <linux/sched.h>
23 #include <asm/tlbflush.h>
24 #include <asm/cacheflush.h>
25 
26 static inline void enter_lazy_tlb(struct mm_struct *mm,
27 	struct task_struct *task)
28 {
29 }
30 
31 /* Initialize context-related info for a new mm_struct */
32 static inline int init_new_context(struct task_struct *task,
33 	struct mm_struct *mm)
34 {
35 	return 0;
36 }
37 
38 static inline void destroy_context(struct mm_struct *mm)
39 {
40 }
41 
42 /*
43  * When necessary, performs a deferred icache flush for the given MM context,
44  * on the local CPU.  RISC-V has no direct mechanism for instruction cache
45  * shoot downs, so instead we send an IPI that informs the remote harts they
46  * need to flush their local instruction caches.  To avoid pathologically slow
47  * behavior in a common case (a bunch of single-hart processes on a many-hart
48  * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
49  * executing a MM context and instead schedule a deferred local instruction
50  * cache flush to be performed before execution resumes on each hart.  This
51  * actually performs that local instruction cache flush, which implicitly only
52  * refers to the current hart.
53  */
54 static inline void flush_icache_deferred(struct mm_struct *mm)
55 {
56 #ifdef CONFIG_SMP
57 	unsigned int cpu = smp_processor_id();
58 	cpumask_t *mask = &mm->context.icache_stale_mask;
59 
60 	if (cpumask_test_cpu(cpu, mask)) {
61 		cpumask_clear_cpu(cpu, mask);
62 		/*
63 		 * Ensure the remote hart's writes are visible to this hart.
64 		 * This pairs with a barrier in flush_icache_mm.
65 		 */
66 		smp_mb();
67 		local_flush_icache_all();
68 	}
69 #endif
70 }
71 
72 static inline void switch_mm(struct mm_struct *prev,
73 	struct mm_struct *next, struct task_struct *task)
74 {
75 	if (likely(prev != next)) {
76 		/*
77 		 * Mark the current MM context as inactive, and the next as
78 		 * active.  This is at least used by the icache flushing
79 		 * routines in order to determine who should
80 		 */
81 		unsigned int cpu = smp_processor_id();
82 
83 		cpumask_clear_cpu(cpu, mm_cpumask(prev));
84 		cpumask_set_cpu(cpu, mm_cpumask(next));
85 
86 		/*
87 		 * Use the old spbtr name instead of using the current satp
88 		 * name to support binutils 2.29 which doesn't know about the
89 		 * privileged ISA 1.10 yet.
90 		 */
91 		csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
92 		local_flush_tlb_all();
93 
94 		flush_icache_deferred(next);
95 	}
96 }
97 
98 static inline void activate_mm(struct mm_struct *prev,
99 			       struct mm_struct *next)
100 {
101 	switch_mm(prev, next, NULL);
102 }
103 
104 static inline void deactivate_mm(struct task_struct *task,
105 	struct mm_struct *mm)
106 {
107 }
108 
109 #endif /* _ASM_RISCV_MMU_CONTEXT_H */
110