xref: /linux/mm/mmap_lock.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0
2 #define CREATE_TRACE_POINTS
3 #include <trace/events/mmap_lock.h>
4 
5 #include <linux/mm.h>
6 #include <linux/cgroup.h>
7 #include <linux/memcontrol.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/mutex.h>
10 #include <linux/percpu.h>
11 #include <linux/rcupdate.h>
12 #include <linux/smp.h>
13 #include <linux/trace_events.h>
14 #include <linux/local_lock.h>
15 
16 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
17 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
18 EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
19 
20 #ifdef CONFIG_MEMCG
21 
22 static atomic_t reg_refcount;
23 
24 /*
25  * Size of the buffer for memcg path names. Ignoring stack trace support,
26  * trace_events_hist.c uses MAX_FILTER_STR_VAL for this, so we also use it.
27  */
28 #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
29 
30 int trace_mmap_lock_reg(void)
31 {
32 	atomic_inc(&reg_refcount);
33 	return 0;
34 }
35 
36 void trace_mmap_lock_unreg(void)
37 {
38 	atomic_dec(&reg_refcount);
39 }
40 
41 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                    \
42 	do {                                                    \
43 		char buf[MEMCG_PATH_BUF_SIZE];                  \
44 		get_mm_memcg_path(mm, buf, sizeof(buf));        \
45 		trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \
46 	} while (0)
47 
48 #else /* !CONFIG_MEMCG */
49 
50 int trace_mmap_lock_reg(void)
51 {
52 	return 0;
53 }
54 
55 void trace_mmap_lock_unreg(void)
56 {
57 }
58 
59 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                                   \
60 	trace_mmap_lock_##type(mm, "", ##__VA_ARGS__)
61 
62 #endif /* CONFIG_MEMCG */
63 
64 #ifdef CONFIG_TRACING
65 #ifdef CONFIG_MEMCG
66 /*
67  * Write the given mm_struct's memcg path to a buffer. If the path cannot be
68  * determined or the trace event is being unregistered, empty string is written.
69  */
70 static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen)
71 {
72 	struct mem_cgroup *memcg;
73 
74 	buf[0] = '\0';
75 	/* No need to get path if no trace event is registered. */
76 	if (!atomic_read(&reg_refcount))
77 		return;
78 	memcg = get_mem_cgroup_from_mm(mm);
79 	if (memcg == NULL)
80 		return;
81 	if (memcg->css.cgroup)
82 		cgroup_path(memcg->css.cgroup, buf, buflen);
83 	css_put(&memcg->css);
84 }
85 
86 #endif /* CONFIG_MEMCG */
87 
88 /*
89  * Trace calls must be in a separate file, as otherwise there's a circular
90  * dependency between linux/mmap_lock.h and trace/events/mmap_lock.h.
91  */
92 
93 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write)
94 {
95 	TRACE_MMAP_LOCK_EVENT(start_locking, mm, write);
96 }
97 EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking);
98 
99 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
100 					   bool success)
101 {
102 	TRACE_MMAP_LOCK_EVENT(acquire_returned, mm, write, success);
103 }
104 EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned);
105 
106 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
107 {
108 	TRACE_MMAP_LOCK_EVENT(released, mm, write);
109 }
110 EXPORT_SYMBOL(__mmap_lock_do_trace_released);
111 #endif /* CONFIG_TRACING */
112