xref: /linux/kernel/trace/trace_events_user.c (revision c5dbf04160005e07e8ca7232a7faa77ab1547ae0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, Microsoft Corporation.
4  *
5  * Authors:
6  *   Beau Belgrave <beaub@linux.microsoft.com>
7  */
8 
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
13 #include <linux/io.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/refcount.h>
18 #include <linux/trace_events.h>
19 #include <linux/tracefs.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/user_events.h>
25 #include "trace_dynevent.h"
26 #include "trace_output.h"
27 #include "trace.h"
28 
29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
30 
31 #define FIELD_DEPTH_TYPE 0
32 #define FIELD_DEPTH_NAME 1
33 #define FIELD_DEPTH_SIZE 2
34 
35 /* Limit how long of an event name plus args within the subsystem. */
36 #define MAX_EVENT_DESC 512
37 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
38 #define MAX_FIELD_ARRAY_SIZE 1024
39 
40 /*
41  * Internal bits (kernel side only) to keep track of connected probes:
42  * These are used when status is requested in text form about an event. These
43  * bits are compared against an internal byte on the event to determine which
44  * probes to print out to the user.
45  *
46  * These do not reflect the mapped bytes between the user and kernel space.
47  */
48 #define EVENT_STATUS_FTRACE BIT(0)
49 #define EVENT_STATUS_PERF BIT(1)
50 #define EVENT_STATUS_OTHER BIT(7)
51 
52 /*
53  * Stores the system name, tables, and locks for a group of events. This
54  * allows isolation for events by various means.
55  */
56 struct user_event_group {
57 	char		*system_name;
58 	struct		hlist_node node;
59 	struct		mutex reg_mutex;
60 	DECLARE_HASHTABLE(register_table, 8);
61 };
62 
63 /* Group for init_user_ns mapping, top-most group */
64 static struct user_event_group *init_group;
65 
66 /* Max allowed events for the whole system */
67 static unsigned int max_user_events = 32768;
68 
69 /* Current number of events on the whole system */
70 static unsigned int current_user_events;
71 
72 /*
73  * Stores per-event properties, as users register events
74  * within a file a user_event might be created if it does not
75  * already exist. These are globally used and their lifetime
76  * is tied to the refcnt member. These cannot go away until the
77  * refcnt reaches one.
78  */
79 struct user_event {
80 	struct user_event_group		*group;
81 	struct tracepoint		tracepoint;
82 	struct trace_event_call		call;
83 	struct trace_event_class	class;
84 	struct dyn_event		devent;
85 	struct hlist_node		node;
86 	struct list_head		fields;
87 	struct list_head		validators;
88 	struct work_struct		put_work;
89 	refcount_t			refcnt;
90 	int				min_size;
91 	int				reg_flags;
92 	char				status;
93 };
94 
95 /*
96  * Stores per-mm/event properties that enable an address to be
97  * updated properly for each task. As tasks are forked, we use
98  * these to track enablement sites that are tied to an event.
99  */
100 struct user_event_enabler {
101 	struct list_head	mm_enablers_link;
102 	struct user_event	*event;
103 	unsigned long		addr;
104 
105 	/* Track enable bit, flags, etc. Aligned for bitops. */
106 	unsigned long		values;
107 };
108 
109 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
110 #define ENABLE_VAL_BIT_MASK 0x3F
111 
112 /* Bit 6 is for faulting status of enablement */
113 #define ENABLE_VAL_FAULTING_BIT 6
114 
115 /* Bit 7 is for freeing status of enablement */
116 #define ENABLE_VAL_FREEING_BIT 7
117 
118 /* Bit 8 is for marking 32-bit on 64-bit */
119 #define ENABLE_VAL_32_ON_64_BIT 8
120 
121 #define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT)
122 
123 /* Only duplicate the bit and compat values */
124 #define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK)
125 
126 #define ENABLE_BITOPS(e) (&(e)->values)
127 
128 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
129 
130 /* Used for asynchronous faulting in of pages */
131 struct user_event_enabler_fault {
132 	struct work_struct		work;
133 	struct user_event_mm		*mm;
134 	struct user_event_enabler	*enabler;
135 	int				attempt;
136 };
137 
138 static struct kmem_cache *fault_cache;
139 
140 /* Global list of memory descriptors using user_events */
141 static LIST_HEAD(user_event_mms);
142 static DEFINE_SPINLOCK(user_event_mms_lock);
143 
144 /*
145  * Stores per-file events references, as users register events
146  * within a file this structure is modified and freed via RCU.
147  * The lifetime of this struct is tied to the lifetime of the file.
148  * These are not shared and only accessible by the file that created it.
149  */
150 struct user_event_refs {
151 	struct rcu_head		rcu;
152 	int			count;
153 	struct user_event	*events[];
154 };
155 
156 struct user_event_file_info {
157 	struct user_event_group	*group;
158 	struct user_event_refs	*refs;
159 };
160 
161 #define VALIDATOR_ENSURE_NULL (1 << 0)
162 #define VALIDATOR_REL (1 << 1)
163 
164 struct user_event_validator {
165 	struct list_head	user_event_link;
166 	int			offset;
167 	int			flags;
168 };
169 
170 static inline void align_addr_bit(unsigned long *addr, int *bit,
171 				  unsigned long *flags)
172 {
173 	if (IS_ALIGNED(*addr, sizeof(long))) {
174 #ifdef __BIG_ENDIAN
175 		/* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */
176 		if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags))
177 			*bit += 32;
178 #endif
179 		return;
180 	}
181 
182 	*addr = ALIGN_DOWN(*addr, sizeof(long));
183 
184 	/*
185 	 * We only support 32 and 64 bit values. The only time we need
186 	 * to align is a 32 bit value on a 64 bit kernel, which on LE
187 	 * is always 32 bits, and on BE requires no change when unaligned.
188 	 */
189 #ifdef __LITTLE_ENDIAN
190 	*bit += 32;
191 #endif
192 }
193 
194 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
195 				   void *tpdata, bool *faulted);
196 
197 static int user_event_parse(struct user_event_group *group, char *name,
198 			    char *args, char *flags,
199 			    struct user_event **newuser, int reg_flags);
200 
201 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
202 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
203 static void user_event_mm_put(struct user_event_mm *mm);
204 static int destroy_user_event(struct user_event *user);
205 
206 static u32 user_event_key(char *name)
207 {
208 	return jhash(name, strlen(name), 0);
209 }
210 
211 static bool user_event_capable(u16 reg_flags)
212 {
213 	/* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */
214 	if (reg_flags & USER_EVENT_REG_PERSIST) {
215 		if (!perfmon_capable())
216 			return false;
217 	}
218 
219 	return true;
220 }
221 
222 static struct user_event *user_event_get(struct user_event *user)
223 {
224 	refcount_inc(&user->refcnt);
225 
226 	return user;
227 }
228 
229 static void delayed_destroy_user_event(struct work_struct *work)
230 {
231 	struct user_event *user = container_of(
232 		work, struct user_event, put_work);
233 
234 	mutex_lock(&event_mutex);
235 
236 	if (!refcount_dec_and_test(&user->refcnt))
237 		goto out;
238 
239 	if (destroy_user_event(user)) {
240 		/*
241 		 * The only reason this would fail here is if we cannot
242 		 * update the visibility of the event. In this case the
243 		 * event stays in the hashtable, waiting for someone to
244 		 * attempt to delete it later.
245 		 */
246 		pr_warn("user_events: Unable to delete event\n");
247 		refcount_set(&user->refcnt, 1);
248 	}
249 out:
250 	mutex_unlock(&event_mutex);
251 }
252 
253 static void user_event_put(struct user_event *user, bool locked)
254 {
255 	bool delete;
256 
257 	if (unlikely(!user))
258 		return;
259 
260 	/*
261 	 * When the event is not enabled for auto-delete there will always
262 	 * be at least 1 reference to the event. During the event creation
263 	 * we initially set the refcnt to 2 to achieve this. In those cases
264 	 * the caller must acquire event_mutex and after decrement check if
265 	 * the refcnt is 1, meaning this is the last reference. When auto
266 	 * delete is enabled, there will only be 1 ref, IE: refcnt will be
267 	 * only set to 1 during creation to allow the below checks to go
268 	 * through upon the last put. The last put must always be done with
269 	 * the event mutex held.
270 	 */
271 	if (!locked) {
272 		lockdep_assert_not_held(&event_mutex);
273 		delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex);
274 	} else {
275 		lockdep_assert_held(&event_mutex);
276 		delete = refcount_dec_and_test(&user->refcnt);
277 	}
278 
279 	if (!delete)
280 		return;
281 
282 	/*
283 	 * We now have the event_mutex in all cases, which ensures that
284 	 * no new references will be taken until event_mutex is released.
285 	 * New references come through find_user_event(), which requires
286 	 * the event_mutex to be held.
287 	 */
288 
289 	if (user->reg_flags & USER_EVENT_REG_PERSIST) {
290 		/* We should not get here when persist flag is set */
291 		pr_alert("BUG: Auto-delete engaged on persistent event\n");
292 		goto out;
293 	}
294 
295 	/*
296 	 * Unfortunately we have to attempt the actual destroy in a work
297 	 * queue. This is because not all cases handle a trace_event_call
298 	 * being removed within the class->reg() operation for unregister.
299 	 */
300 	INIT_WORK(&user->put_work, delayed_destroy_user_event);
301 
302 	/*
303 	 * Since the event is still in the hashtable, we have to re-inc
304 	 * the ref count to 1. This count will be decremented and checked
305 	 * in the work queue to ensure it's still the last ref. This is
306 	 * needed because a user-process could register the same event in
307 	 * between the time of event_mutex release and the work queue
308 	 * running the delayed destroy. If we removed the item now from
309 	 * the hashtable, this would result in a timing window where a
310 	 * user process would fail a register because the trace_event_call
311 	 * register would fail in the tracing layers.
312 	 */
313 	refcount_set(&user->refcnt, 1);
314 
315 	if (WARN_ON_ONCE(!schedule_work(&user->put_work))) {
316 		/*
317 		 * If we fail we must wait for an admin to attempt delete or
318 		 * another register/close of the event, whichever is first.
319 		 */
320 		pr_warn("user_events: Unable to queue delayed destroy\n");
321 	}
322 out:
323 	/* Ensure if we didn't have event_mutex before we unlock it */
324 	if (!locked)
325 		mutex_unlock(&event_mutex);
326 }
327 
328 static void user_event_group_destroy(struct user_event_group *group)
329 {
330 	kfree(group->system_name);
331 	kfree(group);
332 }
333 
334 static char *user_event_group_system_name(void)
335 {
336 	char *system_name;
337 	int len = sizeof(USER_EVENTS_SYSTEM) + 1;
338 
339 	system_name = kmalloc(len, GFP_KERNEL);
340 
341 	if (!system_name)
342 		return NULL;
343 
344 	snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
345 
346 	return system_name;
347 }
348 
349 static struct user_event_group *current_user_event_group(void)
350 {
351 	return init_group;
352 }
353 
354 static struct user_event_group *user_event_group_create(void)
355 {
356 	struct user_event_group *group;
357 
358 	group = kzalloc(sizeof(*group), GFP_KERNEL);
359 
360 	if (!group)
361 		return NULL;
362 
363 	group->system_name = user_event_group_system_name();
364 
365 	if (!group->system_name)
366 		goto error;
367 
368 	mutex_init(&group->reg_mutex);
369 	hash_init(group->register_table);
370 
371 	return group;
372 error:
373 	if (group)
374 		user_event_group_destroy(group);
375 
376 	return NULL;
377 };
378 
379 static void user_event_enabler_destroy(struct user_event_enabler *enabler,
380 				       bool locked)
381 {
382 	list_del_rcu(&enabler->mm_enablers_link);
383 
384 	/* No longer tracking the event via the enabler */
385 	user_event_put(enabler->event, locked);
386 
387 	kfree(enabler);
388 }
389 
390 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
391 				  int attempt)
392 {
393 	bool unlocked;
394 	int ret;
395 
396 	/*
397 	 * Normally this is low, ensure that it cannot be taken advantage of by
398 	 * bad user processes to cause excessive looping.
399 	 */
400 	if (attempt > 10)
401 		return -EFAULT;
402 
403 	mmap_read_lock(mm->mm);
404 
405 	/* Ensure MM has tasks, cannot use after exit_mm() */
406 	if (refcount_read(&mm->tasks) == 0) {
407 		ret = -ENOENT;
408 		goto out;
409 	}
410 
411 	ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
412 			       &unlocked);
413 out:
414 	mmap_read_unlock(mm->mm);
415 
416 	return ret;
417 }
418 
419 static int user_event_enabler_write(struct user_event_mm *mm,
420 				    struct user_event_enabler *enabler,
421 				    bool fixup_fault, int *attempt);
422 
423 static void user_event_enabler_fault_fixup(struct work_struct *work)
424 {
425 	struct user_event_enabler_fault *fault = container_of(
426 		work, struct user_event_enabler_fault, work);
427 	struct user_event_enabler *enabler = fault->enabler;
428 	struct user_event_mm *mm = fault->mm;
429 	unsigned long uaddr = enabler->addr;
430 	int attempt = fault->attempt;
431 	int ret;
432 
433 	ret = user_event_mm_fault_in(mm, uaddr, attempt);
434 
435 	if (ret && ret != -ENOENT) {
436 		struct user_event *user = enabler->event;
437 
438 		pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
439 			mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
440 	}
441 
442 	/* Prevent state changes from racing */
443 	mutex_lock(&event_mutex);
444 
445 	/* User asked for enabler to be removed during fault */
446 	if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
447 		user_event_enabler_destroy(enabler, true);
448 		goto out;
449 	}
450 
451 	/*
452 	 * If we managed to get the page, re-issue the write. We do not
453 	 * want to get into a possible infinite loop, which is why we only
454 	 * attempt again directly if the page came in. If we couldn't get
455 	 * the page here, then we will try again the next time the event is
456 	 * enabled/disabled.
457 	 */
458 	clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
459 
460 	if (!ret) {
461 		mmap_read_lock(mm->mm);
462 		user_event_enabler_write(mm, enabler, true, &attempt);
463 		mmap_read_unlock(mm->mm);
464 	}
465 out:
466 	mutex_unlock(&event_mutex);
467 
468 	/* In all cases we no longer need the mm or fault */
469 	user_event_mm_put(mm);
470 	kmem_cache_free(fault_cache, fault);
471 }
472 
473 static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
474 					   struct user_event_enabler *enabler,
475 					   int attempt)
476 {
477 	struct user_event_enabler_fault *fault;
478 
479 	fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
480 
481 	if (!fault)
482 		return false;
483 
484 	INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
485 	fault->mm = user_event_mm_get(mm);
486 	fault->enabler = enabler;
487 	fault->attempt = attempt;
488 
489 	/* Don't try to queue in again while we have a pending fault */
490 	set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
491 
492 	if (!schedule_work(&fault->work)) {
493 		/* Allow another attempt later */
494 		clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
495 
496 		user_event_mm_put(mm);
497 		kmem_cache_free(fault_cache, fault);
498 
499 		return false;
500 	}
501 
502 	return true;
503 }
504 
505 static int user_event_enabler_write(struct user_event_mm *mm,
506 				    struct user_event_enabler *enabler,
507 				    bool fixup_fault, int *attempt)
508 {
509 	unsigned long uaddr = enabler->addr;
510 	unsigned long *ptr;
511 	struct page *page;
512 	void *kaddr;
513 	int bit = ENABLE_BIT(enabler);
514 	int ret;
515 
516 	lockdep_assert_held(&event_mutex);
517 	mmap_assert_locked(mm->mm);
518 
519 	*attempt += 1;
520 
521 	/* Ensure MM has tasks, cannot use after exit_mm() */
522 	if (refcount_read(&mm->tasks) == 0)
523 		return -ENOENT;
524 
525 	if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
526 		     test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
527 		return -EBUSY;
528 
529 	align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler));
530 
531 	ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
532 				    &page, NULL);
533 
534 	if (unlikely(ret <= 0)) {
535 		if (!fixup_fault)
536 			return -EFAULT;
537 
538 		if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
539 			pr_warn("user_events: Unable to queue fault handler\n");
540 
541 		return -EFAULT;
542 	}
543 
544 	kaddr = kmap_local_page(page);
545 	ptr = kaddr + (uaddr & ~PAGE_MASK);
546 
547 	/* Update bit atomically, user tracers must be atomic as well */
548 	if (enabler->event && enabler->event->status)
549 		set_bit(bit, ptr);
550 	else
551 		clear_bit(bit, ptr);
552 
553 	kunmap_local(kaddr);
554 	unpin_user_pages_dirty_lock(&page, 1, true);
555 
556 	return 0;
557 }
558 
559 static bool user_event_enabler_exists(struct user_event_mm *mm,
560 				      unsigned long uaddr, unsigned char bit)
561 {
562 	struct user_event_enabler *enabler;
563 
564 	list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
565 		if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
566 			return true;
567 	}
568 
569 	return false;
570 }
571 
572 static void user_event_enabler_update(struct user_event *user)
573 {
574 	struct user_event_enabler *enabler;
575 	struct user_event_mm *next;
576 	struct user_event_mm *mm;
577 	int attempt;
578 
579 	lockdep_assert_held(&event_mutex);
580 
581 	/*
582 	 * We need to build a one-shot list of all the mms that have an
583 	 * enabler for the user_event passed in. This list is only valid
584 	 * while holding the event_mutex. The only reason for this is due
585 	 * to the global mm list being RCU protected and we use methods
586 	 * which can wait (mmap_read_lock and pin_user_pages_remote).
587 	 *
588 	 * NOTE: user_event_mm_get_all() increments the ref count of each
589 	 * mm that is added to the list to prevent removal timing windows.
590 	 * We must always put each mm after they are used, which may wait.
591 	 */
592 	mm = user_event_mm_get_all(user);
593 
594 	while (mm) {
595 		next = mm->next;
596 		mmap_read_lock(mm->mm);
597 
598 		list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
599 			if (enabler->event == user) {
600 				attempt = 0;
601 				user_event_enabler_write(mm, enabler, true, &attempt);
602 			}
603 		}
604 
605 		mmap_read_unlock(mm->mm);
606 		user_event_mm_put(mm);
607 		mm = next;
608 	}
609 }
610 
611 static bool user_event_enabler_dup(struct user_event_enabler *orig,
612 				   struct user_event_mm *mm)
613 {
614 	struct user_event_enabler *enabler;
615 
616 	/* Skip pending frees */
617 	if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
618 		return true;
619 
620 	enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
621 
622 	if (!enabler)
623 		return false;
624 
625 	enabler->event = user_event_get(orig->event);
626 	enabler->addr = orig->addr;
627 
628 	/* Only dup part of value (ignore future flags, etc) */
629 	enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
630 
631 	/* Enablers not exposed yet, RCU not required */
632 	list_add(&enabler->mm_enablers_link, &mm->enablers);
633 
634 	return true;
635 }
636 
637 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
638 {
639 	refcount_inc(&mm->refcnt);
640 
641 	return mm;
642 }
643 
644 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
645 {
646 	struct user_event_mm *found = NULL;
647 	struct user_event_enabler *enabler;
648 	struct user_event_mm *mm;
649 
650 	/*
651 	 * We use the mm->next field to build a one-shot list from the global
652 	 * RCU protected list. To build this list the event_mutex must be held.
653 	 * This lets us build a list without requiring allocs that could fail
654 	 * when user based events are most wanted for diagnostics.
655 	 */
656 	lockdep_assert_held(&event_mutex);
657 
658 	/*
659 	 * We do not want to block fork/exec while enablements are being
660 	 * updated, so we use RCU to walk the current tasks that have used
661 	 * user_events ABI for 1 or more events. Each enabler found in each
662 	 * task that matches the event being updated has a write to reflect
663 	 * the kernel state back into the process. Waits/faults must not occur
664 	 * during this. So we scan the list under RCU for all the mm that have
665 	 * the event within it. This is needed because mm_read_lock() can wait.
666 	 * Each user mm returned has a ref inc to handle remove RCU races.
667 	 */
668 	rcu_read_lock();
669 
670 	list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
671 		list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
672 			if (enabler->event == user) {
673 				mm->next = found;
674 				found = user_event_mm_get(mm);
675 				break;
676 			}
677 		}
678 	}
679 
680 	rcu_read_unlock();
681 
682 	return found;
683 }
684 
685 static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
686 {
687 	struct user_event_mm *user_mm;
688 
689 	user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
690 
691 	if (!user_mm)
692 		return NULL;
693 
694 	user_mm->mm = t->mm;
695 	INIT_LIST_HEAD(&user_mm->enablers);
696 	refcount_set(&user_mm->refcnt, 1);
697 	refcount_set(&user_mm->tasks, 1);
698 
699 	/*
700 	 * The lifetime of the memory descriptor can slightly outlast
701 	 * the task lifetime if a ref to the user_event_mm is taken
702 	 * between list_del_rcu() and call_rcu(). Therefore we need
703 	 * to take a reference to it to ensure it can live this long
704 	 * under this corner case. This can also occur in clones that
705 	 * outlast the parent.
706 	 */
707 	mmgrab(user_mm->mm);
708 
709 	return user_mm;
710 }
711 
712 static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
713 {
714 	unsigned long flags;
715 
716 	spin_lock_irqsave(&user_event_mms_lock, flags);
717 	list_add_rcu(&user_mm->mms_link, &user_event_mms);
718 	spin_unlock_irqrestore(&user_event_mms_lock, flags);
719 
720 	t->user_event_mm = user_mm;
721 }
722 
723 static struct user_event_mm *current_user_event_mm(void)
724 {
725 	struct user_event_mm *user_mm = current->user_event_mm;
726 
727 	if (user_mm)
728 		goto inc;
729 
730 	user_mm = user_event_mm_alloc(current);
731 
732 	if (!user_mm)
733 		goto error;
734 
735 	user_event_mm_attach(user_mm, current);
736 inc:
737 	refcount_inc(&user_mm->refcnt);
738 error:
739 	return user_mm;
740 }
741 
742 static void user_event_mm_destroy(struct user_event_mm *mm)
743 {
744 	struct user_event_enabler *enabler, *next;
745 
746 	list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
747 		user_event_enabler_destroy(enabler, false);
748 
749 	mmdrop(mm->mm);
750 	kfree(mm);
751 }
752 
753 static void user_event_mm_put(struct user_event_mm *mm)
754 {
755 	if (mm && refcount_dec_and_test(&mm->refcnt))
756 		user_event_mm_destroy(mm);
757 }
758 
759 static void delayed_user_event_mm_put(struct work_struct *work)
760 {
761 	struct user_event_mm *mm;
762 
763 	mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
764 	user_event_mm_put(mm);
765 }
766 
767 void user_event_mm_remove(struct task_struct *t)
768 {
769 	struct user_event_mm *mm;
770 	unsigned long flags;
771 
772 	might_sleep();
773 
774 	mm = t->user_event_mm;
775 	t->user_event_mm = NULL;
776 
777 	/* Clone will increment the tasks, only remove if last clone */
778 	if (!refcount_dec_and_test(&mm->tasks))
779 		return;
780 
781 	/* Remove the mm from the list, so it can no longer be enabled */
782 	spin_lock_irqsave(&user_event_mms_lock, flags);
783 	list_del_rcu(&mm->mms_link);
784 	spin_unlock_irqrestore(&user_event_mms_lock, flags);
785 
786 	/*
787 	 * We need to wait for currently occurring writes to stop within
788 	 * the mm. This is required since exit_mm() snaps the current rss
789 	 * stats and clears them. On the final mmdrop(), check_mm() will
790 	 * report a bug if these increment.
791 	 *
792 	 * All writes/pins are done under mmap_read lock, take the write
793 	 * lock to ensure in-progress faults have completed. Faults that
794 	 * are pending but yet to run will check the task count and skip
795 	 * the fault since the mm is going away.
796 	 */
797 	mmap_write_lock(mm->mm);
798 	mmap_write_unlock(mm->mm);
799 
800 	/*
801 	 * Put for mm must be done after RCU delay to handle new refs in
802 	 * between the list_del_rcu() and now. This ensures any get refs
803 	 * during rcu_read_lock() are accounted for during list removal.
804 	 *
805 	 * CPU A			|	CPU B
806 	 * ---------------------------------------------------------------
807 	 * user_event_mm_remove()	|	rcu_read_lock();
808 	 * list_del_rcu()		|	list_for_each_entry_rcu();
809 	 * call_rcu()			|	refcount_inc();
810 	 * .				|	rcu_read_unlock();
811 	 * schedule_work()		|	.
812 	 * user_event_mm_put()		|	.
813 	 *
814 	 * mmdrop() cannot be called in the softirq context of call_rcu()
815 	 * so we use a work queue after call_rcu() to run within.
816 	 */
817 	INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
818 	queue_rcu_work(system_wq, &mm->put_rwork);
819 }
820 
821 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
822 {
823 	struct user_event_mm *mm = user_event_mm_alloc(t);
824 	struct user_event_enabler *enabler;
825 
826 	if (!mm)
827 		return;
828 
829 	rcu_read_lock();
830 
831 	list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
832 		if (!user_event_enabler_dup(enabler, mm))
833 			goto error;
834 	}
835 
836 	rcu_read_unlock();
837 
838 	user_event_mm_attach(mm, t);
839 	return;
840 error:
841 	rcu_read_unlock();
842 	user_event_mm_destroy(mm);
843 }
844 
845 static bool current_user_event_enabler_exists(unsigned long uaddr,
846 					      unsigned char bit)
847 {
848 	struct user_event_mm *user_mm = current_user_event_mm();
849 	bool exists;
850 
851 	if (!user_mm)
852 		return false;
853 
854 	exists = user_event_enabler_exists(user_mm, uaddr, bit);
855 
856 	user_event_mm_put(user_mm);
857 
858 	return exists;
859 }
860 
861 static struct user_event_enabler
862 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
863 			   int *write_result)
864 {
865 	struct user_event_enabler *enabler;
866 	struct user_event_mm *user_mm;
867 	unsigned long uaddr = (unsigned long)reg->enable_addr;
868 	int attempt = 0;
869 
870 	user_mm = current_user_event_mm();
871 
872 	if (!user_mm)
873 		return NULL;
874 
875 	enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
876 
877 	if (!enabler)
878 		goto out;
879 
880 	enabler->event = user;
881 	enabler->addr = uaddr;
882 	enabler->values = reg->enable_bit;
883 
884 #if BITS_PER_LONG >= 64
885 	if (reg->enable_size == 4)
886 		set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler));
887 #endif
888 
889 retry:
890 	/* Prevents state changes from racing with new enablers */
891 	mutex_lock(&event_mutex);
892 
893 	/* Attempt to reflect the current state within the process */
894 	mmap_read_lock(user_mm->mm);
895 	*write_result = user_event_enabler_write(user_mm, enabler, false,
896 						 &attempt);
897 	mmap_read_unlock(user_mm->mm);
898 
899 	/*
900 	 * If the write works, then we will track the enabler. A ref to the
901 	 * underlying user_event is held by the enabler to prevent it going
902 	 * away while the enabler is still in use by a process. The ref is
903 	 * removed when the enabler is destroyed. This means a event cannot
904 	 * be forcefully deleted from the system until all tasks using it
905 	 * exit or run exec(), which includes forks and clones.
906 	 */
907 	if (!*write_result) {
908 		user_event_get(user);
909 		list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
910 	}
911 
912 	mutex_unlock(&event_mutex);
913 
914 	if (*write_result) {
915 		/* Attempt to fault-in and retry if it worked */
916 		if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
917 			goto retry;
918 
919 		kfree(enabler);
920 		enabler = NULL;
921 	}
922 out:
923 	user_event_mm_put(user_mm);
924 
925 	return enabler;
926 }
927 
928 static __always_inline __must_check
929 bool user_event_last_ref(struct user_event *user)
930 {
931 	int last = 0;
932 
933 	if (user->reg_flags & USER_EVENT_REG_PERSIST)
934 		last = 1;
935 
936 	return refcount_read(&user->refcnt) == last;
937 }
938 
939 static __always_inline __must_check
940 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
941 {
942 	size_t ret;
943 
944 	pagefault_disable();
945 
946 	ret = copy_from_iter_nocache(addr, bytes, i);
947 
948 	pagefault_enable();
949 
950 	return ret;
951 }
952 
953 static struct list_head *user_event_get_fields(struct trace_event_call *call)
954 {
955 	struct user_event *user = (struct user_event *)call->data;
956 
957 	return &user->fields;
958 }
959 
960 /*
961  * Parses a register command for user_events
962  * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
963  *
964  * Example event named 'test' with a 20 char 'msg' field with an unsigned int
965  * 'id' field after:
966  * test char[20] msg;unsigned int id
967  *
968  * NOTE: Offsets are from the user data perspective, they are not from the
969  * trace_entry/buffer perspective. We automatically add the common properties
970  * sizes to the offset for the user.
971  *
972  * Upon success user_event has its ref count increased by 1.
973  */
974 static int user_event_parse_cmd(struct user_event_group *group,
975 				char *raw_command, struct user_event **newuser,
976 				int reg_flags)
977 {
978 	char *name = raw_command;
979 	char *args = strpbrk(name, " ");
980 	char *flags;
981 
982 	if (args)
983 		*args++ = '\0';
984 
985 	flags = strpbrk(name, ":");
986 
987 	if (flags)
988 		*flags++ = '\0';
989 
990 	return user_event_parse(group, name, args, flags, newuser, reg_flags);
991 }
992 
993 static int user_field_array_size(const char *type)
994 {
995 	const char *start = strchr(type, '[');
996 	char val[8];
997 	char *bracket;
998 	int size = 0;
999 
1000 	if (start == NULL)
1001 		return -EINVAL;
1002 
1003 	if (strscpy(val, start + 1, sizeof(val)) <= 0)
1004 		return -EINVAL;
1005 
1006 	bracket = strchr(val, ']');
1007 
1008 	if (!bracket)
1009 		return -EINVAL;
1010 
1011 	*bracket = '\0';
1012 
1013 	if (kstrtouint(val, 0, &size))
1014 		return -EINVAL;
1015 
1016 	if (size > MAX_FIELD_ARRAY_SIZE)
1017 		return -EINVAL;
1018 
1019 	return size;
1020 }
1021 
1022 static int user_field_size(const char *type)
1023 {
1024 	/* long is not allowed from a user, since it's ambigious in size */
1025 	if (strcmp(type, "s64") == 0)
1026 		return sizeof(s64);
1027 	if (strcmp(type, "u64") == 0)
1028 		return sizeof(u64);
1029 	if (strcmp(type, "s32") == 0)
1030 		return sizeof(s32);
1031 	if (strcmp(type, "u32") == 0)
1032 		return sizeof(u32);
1033 	if (strcmp(type, "int") == 0)
1034 		return sizeof(int);
1035 	if (strcmp(type, "unsigned int") == 0)
1036 		return sizeof(unsigned int);
1037 	if (strcmp(type, "s16") == 0)
1038 		return sizeof(s16);
1039 	if (strcmp(type, "u16") == 0)
1040 		return sizeof(u16);
1041 	if (strcmp(type, "short") == 0)
1042 		return sizeof(short);
1043 	if (strcmp(type, "unsigned short") == 0)
1044 		return sizeof(unsigned short);
1045 	if (strcmp(type, "s8") == 0)
1046 		return sizeof(s8);
1047 	if (strcmp(type, "u8") == 0)
1048 		return sizeof(u8);
1049 	if (strcmp(type, "char") == 0)
1050 		return sizeof(char);
1051 	if (strcmp(type, "unsigned char") == 0)
1052 		return sizeof(unsigned char);
1053 	if (str_has_prefix(type, "char["))
1054 		return user_field_array_size(type);
1055 	if (str_has_prefix(type, "unsigned char["))
1056 		return user_field_array_size(type);
1057 	if (str_has_prefix(type, "__data_loc "))
1058 		return sizeof(u32);
1059 	if (str_has_prefix(type, "__rel_loc "))
1060 		return sizeof(u32);
1061 
1062 	/* Uknown basic type, error */
1063 	return -EINVAL;
1064 }
1065 
1066 static void user_event_destroy_validators(struct user_event *user)
1067 {
1068 	struct user_event_validator *validator, *next;
1069 	struct list_head *head = &user->validators;
1070 
1071 	list_for_each_entry_safe(validator, next, head, user_event_link) {
1072 		list_del(&validator->user_event_link);
1073 		kfree(validator);
1074 	}
1075 }
1076 
1077 static void user_event_destroy_fields(struct user_event *user)
1078 {
1079 	struct ftrace_event_field *field, *next;
1080 	struct list_head *head = &user->fields;
1081 
1082 	list_for_each_entry_safe(field, next, head, link) {
1083 		list_del(&field->link);
1084 		kfree(field);
1085 	}
1086 }
1087 
1088 static int user_event_add_field(struct user_event *user, const char *type,
1089 				const char *name, int offset, int size,
1090 				int is_signed, int filter_type)
1091 {
1092 	struct user_event_validator *validator;
1093 	struct ftrace_event_field *field;
1094 	int validator_flags = 0;
1095 
1096 	field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
1097 
1098 	if (!field)
1099 		return -ENOMEM;
1100 
1101 	if (str_has_prefix(type, "__data_loc "))
1102 		goto add_validator;
1103 
1104 	if (str_has_prefix(type, "__rel_loc ")) {
1105 		validator_flags |= VALIDATOR_REL;
1106 		goto add_validator;
1107 	}
1108 
1109 	goto add_field;
1110 
1111 add_validator:
1112 	if (strstr(type, "char") != NULL)
1113 		validator_flags |= VALIDATOR_ENSURE_NULL;
1114 
1115 	validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
1116 
1117 	if (!validator) {
1118 		kfree(field);
1119 		return -ENOMEM;
1120 	}
1121 
1122 	validator->flags = validator_flags;
1123 	validator->offset = offset;
1124 
1125 	/* Want sequential access when validating */
1126 	list_add_tail(&validator->user_event_link, &user->validators);
1127 
1128 add_field:
1129 	field->type = type;
1130 	field->name = name;
1131 	field->offset = offset;
1132 	field->size = size;
1133 	field->is_signed = is_signed;
1134 	field->filter_type = filter_type;
1135 
1136 	if (filter_type == FILTER_OTHER)
1137 		field->filter_type = filter_assign_type(type);
1138 
1139 	list_add(&field->link, &user->fields);
1140 
1141 	/*
1142 	 * Min size from user writes that are required, this does not include
1143 	 * the size of trace_entry (common fields).
1144 	 */
1145 	user->min_size = (offset + size) - sizeof(struct trace_entry);
1146 
1147 	return 0;
1148 }
1149 
1150 /*
1151  * Parses the values of a field within the description
1152  * Format: type name [size]
1153  */
1154 static int user_event_parse_field(char *field, struct user_event *user,
1155 				  u32 *offset)
1156 {
1157 	char *part, *type, *name;
1158 	u32 depth = 0, saved_offset = *offset;
1159 	int len, size = -EINVAL;
1160 	bool is_struct = false;
1161 
1162 	field = skip_spaces(field);
1163 
1164 	if (*field == '\0')
1165 		return 0;
1166 
1167 	/* Handle types that have a space within */
1168 	len = str_has_prefix(field, "unsigned ");
1169 	if (len)
1170 		goto skip_next;
1171 
1172 	len = str_has_prefix(field, "struct ");
1173 	if (len) {
1174 		is_struct = true;
1175 		goto skip_next;
1176 	}
1177 
1178 	len = str_has_prefix(field, "__data_loc unsigned ");
1179 	if (len)
1180 		goto skip_next;
1181 
1182 	len = str_has_prefix(field, "__data_loc ");
1183 	if (len)
1184 		goto skip_next;
1185 
1186 	len = str_has_prefix(field, "__rel_loc unsigned ");
1187 	if (len)
1188 		goto skip_next;
1189 
1190 	len = str_has_prefix(field, "__rel_loc ");
1191 	if (len)
1192 		goto skip_next;
1193 
1194 	goto parse;
1195 skip_next:
1196 	type = field;
1197 	field = strpbrk(field + len, " ");
1198 
1199 	if (field == NULL)
1200 		return -EINVAL;
1201 
1202 	*field++ = '\0';
1203 	depth++;
1204 parse:
1205 	name = NULL;
1206 
1207 	while ((part = strsep(&field, " ")) != NULL) {
1208 		switch (depth++) {
1209 		case FIELD_DEPTH_TYPE:
1210 			type = part;
1211 			break;
1212 		case FIELD_DEPTH_NAME:
1213 			name = part;
1214 			break;
1215 		case FIELD_DEPTH_SIZE:
1216 			if (!is_struct)
1217 				return -EINVAL;
1218 
1219 			if (kstrtou32(part, 10, &size))
1220 				return -EINVAL;
1221 			break;
1222 		default:
1223 			return -EINVAL;
1224 		}
1225 	}
1226 
1227 	if (depth < FIELD_DEPTH_SIZE || !name)
1228 		return -EINVAL;
1229 
1230 	if (depth == FIELD_DEPTH_SIZE)
1231 		size = user_field_size(type);
1232 
1233 	if (size == 0)
1234 		return -EINVAL;
1235 
1236 	if (size < 0)
1237 		return size;
1238 
1239 	*offset = saved_offset + size;
1240 
1241 	return user_event_add_field(user, type, name, saved_offset, size,
1242 				    type[0] != 'u', FILTER_OTHER);
1243 }
1244 
1245 static int user_event_parse_fields(struct user_event *user, char *args)
1246 {
1247 	char *field;
1248 	u32 offset = sizeof(struct trace_entry);
1249 	int ret = -EINVAL;
1250 
1251 	if (args == NULL)
1252 		return 0;
1253 
1254 	while ((field = strsep(&args, ";")) != NULL) {
1255 		ret = user_event_parse_field(field, user, &offset);
1256 
1257 		if (ret)
1258 			break;
1259 	}
1260 
1261 	return ret;
1262 }
1263 
1264 static struct trace_event_fields user_event_fields_array[1];
1265 
1266 static const char *user_field_format(const char *type)
1267 {
1268 	if (strcmp(type, "s64") == 0)
1269 		return "%lld";
1270 	if (strcmp(type, "u64") == 0)
1271 		return "%llu";
1272 	if (strcmp(type, "s32") == 0)
1273 		return "%d";
1274 	if (strcmp(type, "u32") == 0)
1275 		return "%u";
1276 	if (strcmp(type, "int") == 0)
1277 		return "%d";
1278 	if (strcmp(type, "unsigned int") == 0)
1279 		return "%u";
1280 	if (strcmp(type, "s16") == 0)
1281 		return "%d";
1282 	if (strcmp(type, "u16") == 0)
1283 		return "%u";
1284 	if (strcmp(type, "short") == 0)
1285 		return "%d";
1286 	if (strcmp(type, "unsigned short") == 0)
1287 		return "%u";
1288 	if (strcmp(type, "s8") == 0)
1289 		return "%d";
1290 	if (strcmp(type, "u8") == 0)
1291 		return "%u";
1292 	if (strcmp(type, "char") == 0)
1293 		return "%d";
1294 	if (strcmp(type, "unsigned char") == 0)
1295 		return "%u";
1296 	if (strstr(type, "char[") != NULL)
1297 		return "%s";
1298 
1299 	/* Unknown, likely struct, allowed treat as 64-bit */
1300 	return "%llu";
1301 }
1302 
1303 static bool user_field_is_dyn_string(const char *type, const char **str_func)
1304 {
1305 	if (str_has_prefix(type, "__data_loc ")) {
1306 		*str_func = "__get_str";
1307 		goto check;
1308 	}
1309 
1310 	if (str_has_prefix(type, "__rel_loc ")) {
1311 		*str_func = "__get_rel_str";
1312 		goto check;
1313 	}
1314 
1315 	return false;
1316 check:
1317 	return strstr(type, "char") != NULL;
1318 }
1319 
1320 #define LEN_OR_ZERO (len ? len - pos : 0)
1321 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1322 				     char *buf, int len, bool *colon)
1323 {
1324 	int pos = 0, i = *iout;
1325 
1326 	*colon = false;
1327 
1328 	for (; i < argc; ++i) {
1329 		if (i != *iout)
1330 			pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1331 
1332 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1333 
1334 		if (strchr(argv[i], ';')) {
1335 			++i;
1336 			*colon = true;
1337 			break;
1338 		}
1339 	}
1340 
1341 	/* Actual set, advance i */
1342 	if (len != 0)
1343 		*iout = i;
1344 
1345 	return pos + 1;
1346 }
1347 
1348 static int user_field_set_string(struct ftrace_event_field *field,
1349 				 char *buf, int len, bool colon)
1350 {
1351 	int pos = 0;
1352 
1353 	pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1354 	pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1355 	pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1356 
1357 	if (str_has_prefix(field->type, "struct "))
1358 		pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
1359 
1360 	if (colon)
1361 		pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1362 
1363 	return pos + 1;
1364 }
1365 
1366 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1367 {
1368 	struct ftrace_event_field *field;
1369 	struct list_head *head = &user->fields;
1370 	int pos = 0, depth = 0;
1371 	const char *str_func;
1372 
1373 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1374 
1375 	list_for_each_entry_reverse(field, head, link) {
1376 		if (depth != 0)
1377 			pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1378 
1379 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1380 				field->name, user_field_format(field->type));
1381 
1382 		depth++;
1383 	}
1384 
1385 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1386 
1387 	list_for_each_entry_reverse(field, head, link) {
1388 		if (user_field_is_dyn_string(field->type, &str_func))
1389 			pos += snprintf(buf + pos, LEN_OR_ZERO,
1390 					", %s(%s)", str_func, field->name);
1391 		else
1392 			pos += snprintf(buf + pos, LEN_OR_ZERO,
1393 					", REC->%s", field->name);
1394 	}
1395 
1396 	return pos + 1;
1397 }
1398 #undef LEN_OR_ZERO
1399 
1400 static int user_event_create_print_fmt(struct user_event *user)
1401 {
1402 	char *print_fmt;
1403 	int len;
1404 
1405 	len = user_event_set_print_fmt(user, NULL, 0);
1406 
1407 	print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1408 
1409 	if (!print_fmt)
1410 		return -ENOMEM;
1411 
1412 	user_event_set_print_fmt(user, print_fmt, len);
1413 
1414 	user->call.print_fmt = print_fmt;
1415 
1416 	return 0;
1417 }
1418 
1419 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1420 						int flags,
1421 						struct trace_event *event)
1422 {
1423 	return print_event_fields(iter, event);
1424 }
1425 
1426 static struct trace_event_functions user_event_funcs = {
1427 	.trace = user_event_print_trace,
1428 };
1429 
1430 static int user_event_set_call_visible(struct user_event *user, bool visible)
1431 {
1432 	int ret;
1433 	const struct cred *old_cred;
1434 	struct cred *cred;
1435 
1436 	cred = prepare_creds();
1437 
1438 	if (!cred)
1439 		return -ENOMEM;
1440 
1441 	/*
1442 	 * While by default tracefs is locked down, systems can be configured
1443 	 * to allow user_event files to be less locked down. The extreme case
1444 	 * being "other" has read/write access to user_events_data/status.
1445 	 *
1446 	 * When not locked down, processes may not have permissions to
1447 	 * add/remove calls themselves to tracefs. We need to temporarily
1448 	 * switch to root file permission to allow for this scenario.
1449 	 */
1450 	cred->fsuid = GLOBAL_ROOT_UID;
1451 
1452 	old_cred = override_creds(cred);
1453 
1454 	if (visible)
1455 		ret = trace_add_event_call(&user->call);
1456 	else
1457 		ret = trace_remove_event_call(&user->call);
1458 
1459 	revert_creds(old_cred);
1460 	put_cred(cred);
1461 
1462 	return ret;
1463 }
1464 
1465 static int destroy_user_event(struct user_event *user)
1466 {
1467 	int ret = 0;
1468 
1469 	lockdep_assert_held(&event_mutex);
1470 
1471 	/* Must destroy fields before call removal */
1472 	user_event_destroy_fields(user);
1473 
1474 	ret = user_event_set_call_visible(user, false);
1475 
1476 	if (ret)
1477 		return ret;
1478 
1479 	dyn_event_remove(&user->devent);
1480 	hash_del(&user->node);
1481 
1482 	user_event_destroy_validators(user);
1483 	kfree(user->call.print_fmt);
1484 	kfree(EVENT_NAME(user));
1485 	kfree(user);
1486 
1487 	if (current_user_events > 0)
1488 		current_user_events--;
1489 	else
1490 		pr_alert("BUG: Bad current_user_events\n");
1491 
1492 	return ret;
1493 }
1494 
1495 static struct user_event *find_user_event(struct user_event_group *group,
1496 					  char *name, u32 *outkey)
1497 {
1498 	struct user_event *user;
1499 	u32 key = user_event_key(name);
1500 
1501 	*outkey = key;
1502 
1503 	hash_for_each_possible(group->register_table, user, node, key)
1504 		if (!strcmp(EVENT_NAME(user), name))
1505 			return user_event_get(user);
1506 
1507 	return NULL;
1508 }
1509 
1510 static int user_event_validate(struct user_event *user, void *data, int len)
1511 {
1512 	struct list_head *head = &user->validators;
1513 	struct user_event_validator *validator;
1514 	void *pos, *end = data + len;
1515 	u32 loc, offset, size;
1516 
1517 	list_for_each_entry(validator, head, user_event_link) {
1518 		pos = data + validator->offset;
1519 
1520 		/* Already done min_size check, no bounds check here */
1521 		loc = *(u32 *)pos;
1522 		offset = loc & 0xffff;
1523 		size = loc >> 16;
1524 
1525 		if (likely(validator->flags & VALIDATOR_REL))
1526 			pos += offset + sizeof(loc);
1527 		else
1528 			pos = data + offset;
1529 
1530 		pos += size;
1531 
1532 		if (unlikely(pos > end))
1533 			return -EFAULT;
1534 
1535 		if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1536 			if (unlikely(*(char *)(pos - 1) != '\0'))
1537 				return -EFAULT;
1538 	}
1539 
1540 	return 0;
1541 }
1542 
1543 /*
1544  * Writes the user supplied payload out to a trace file.
1545  */
1546 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1547 			      void *tpdata, bool *faulted)
1548 {
1549 	struct trace_event_file *file;
1550 	struct trace_entry *entry;
1551 	struct trace_event_buffer event_buffer;
1552 	size_t size = sizeof(*entry) + i->count;
1553 
1554 	file = (struct trace_event_file *)tpdata;
1555 
1556 	if (!file ||
1557 	    !(file->flags & EVENT_FILE_FL_ENABLED) ||
1558 	    trace_trigger_soft_disabled(file))
1559 		return;
1560 
1561 	/* Allocates and fills trace_entry, + 1 of this is data payload */
1562 	entry = trace_event_buffer_reserve(&event_buffer, file, size);
1563 
1564 	if (unlikely(!entry))
1565 		return;
1566 
1567 	if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i)))
1568 		goto discard;
1569 
1570 	if (!list_empty(&user->validators) &&
1571 	    unlikely(user_event_validate(user, entry, size)))
1572 		goto discard;
1573 
1574 	trace_event_buffer_commit(&event_buffer);
1575 
1576 	return;
1577 discard:
1578 	*faulted = true;
1579 	__trace_event_discard_commit(event_buffer.buffer,
1580 				     event_buffer.event);
1581 }
1582 
1583 #ifdef CONFIG_PERF_EVENTS
1584 /*
1585  * Writes the user supplied payload out to perf ring buffer.
1586  */
1587 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1588 			    void *tpdata, bool *faulted)
1589 {
1590 	struct hlist_head *perf_head;
1591 
1592 	perf_head = this_cpu_ptr(user->call.perf_events);
1593 
1594 	if (perf_head && !hlist_empty(perf_head)) {
1595 		struct trace_entry *perf_entry;
1596 		struct pt_regs *regs;
1597 		size_t size = sizeof(*perf_entry) + i->count;
1598 		int context;
1599 
1600 		perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1601 						  &regs, &context);
1602 
1603 		if (unlikely(!perf_entry))
1604 			return;
1605 
1606 		perf_fetch_caller_regs(regs);
1607 
1608 		if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i)))
1609 			goto discard;
1610 
1611 		if (!list_empty(&user->validators) &&
1612 		    unlikely(user_event_validate(user, perf_entry, size)))
1613 			goto discard;
1614 
1615 		perf_trace_buf_submit(perf_entry, size, context,
1616 				      user->call.event.type, 1, regs,
1617 				      perf_head, NULL);
1618 
1619 		return;
1620 discard:
1621 		*faulted = true;
1622 		perf_swevent_put_recursion_context(context);
1623 	}
1624 }
1625 #endif
1626 
1627 /*
1628  * Update the enabled bit among all user processes.
1629  */
1630 static void update_enable_bit_for(struct user_event *user)
1631 {
1632 	struct tracepoint *tp = &user->tracepoint;
1633 	char status = 0;
1634 
1635 	if (atomic_read(&tp->key.enabled) > 0) {
1636 		struct tracepoint_func *probe_func_ptr;
1637 		user_event_func_t probe_func;
1638 
1639 		rcu_read_lock_sched();
1640 
1641 		probe_func_ptr = rcu_dereference_sched(tp->funcs);
1642 
1643 		if (probe_func_ptr) {
1644 			do {
1645 				probe_func = probe_func_ptr->func;
1646 
1647 				if (probe_func == user_event_ftrace)
1648 					status |= EVENT_STATUS_FTRACE;
1649 #ifdef CONFIG_PERF_EVENTS
1650 				else if (probe_func == user_event_perf)
1651 					status |= EVENT_STATUS_PERF;
1652 #endif
1653 				else
1654 					status |= EVENT_STATUS_OTHER;
1655 			} while ((++probe_func_ptr)->func);
1656 		}
1657 
1658 		rcu_read_unlock_sched();
1659 	}
1660 
1661 	user->status = status;
1662 
1663 	user_event_enabler_update(user);
1664 }
1665 
1666 /*
1667  * Register callback for our events from tracing sub-systems.
1668  */
1669 static int user_event_reg(struct trace_event_call *call,
1670 			  enum trace_reg type,
1671 			  void *data)
1672 {
1673 	struct user_event *user = (struct user_event *)call->data;
1674 	int ret = 0;
1675 
1676 	if (!user)
1677 		return -ENOENT;
1678 
1679 	switch (type) {
1680 	case TRACE_REG_REGISTER:
1681 		ret = tracepoint_probe_register(call->tp,
1682 						call->class->probe,
1683 						data);
1684 		if (!ret)
1685 			goto inc;
1686 		break;
1687 
1688 	case TRACE_REG_UNREGISTER:
1689 		tracepoint_probe_unregister(call->tp,
1690 					    call->class->probe,
1691 					    data);
1692 		goto dec;
1693 
1694 #ifdef CONFIG_PERF_EVENTS
1695 	case TRACE_REG_PERF_REGISTER:
1696 		ret = tracepoint_probe_register(call->tp,
1697 						call->class->perf_probe,
1698 						data);
1699 		if (!ret)
1700 			goto inc;
1701 		break;
1702 
1703 	case TRACE_REG_PERF_UNREGISTER:
1704 		tracepoint_probe_unregister(call->tp,
1705 					    call->class->perf_probe,
1706 					    data);
1707 		goto dec;
1708 
1709 	case TRACE_REG_PERF_OPEN:
1710 	case TRACE_REG_PERF_CLOSE:
1711 	case TRACE_REG_PERF_ADD:
1712 	case TRACE_REG_PERF_DEL:
1713 		break;
1714 #endif
1715 	}
1716 
1717 	return ret;
1718 inc:
1719 	user_event_get(user);
1720 	update_enable_bit_for(user);
1721 	return 0;
1722 dec:
1723 	update_enable_bit_for(user);
1724 	user_event_put(user, true);
1725 	return 0;
1726 }
1727 
1728 static int user_event_create(const char *raw_command)
1729 {
1730 	struct user_event_group *group;
1731 	struct user_event *user;
1732 	char *name;
1733 	int ret;
1734 
1735 	if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1736 		return -ECANCELED;
1737 
1738 	raw_command += USER_EVENTS_PREFIX_LEN;
1739 	raw_command = skip_spaces(raw_command);
1740 
1741 	name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1742 
1743 	if (!name)
1744 		return -ENOMEM;
1745 
1746 	group = current_user_event_group();
1747 
1748 	if (!group) {
1749 		kfree(name);
1750 		return -ENOENT;
1751 	}
1752 
1753 	mutex_lock(&group->reg_mutex);
1754 
1755 	/* Dyn events persist, otherwise they would cleanup immediately */
1756 	ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST);
1757 
1758 	if (!ret)
1759 		user_event_put(user, false);
1760 
1761 	mutex_unlock(&group->reg_mutex);
1762 
1763 	if (ret)
1764 		kfree(name);
1765 
1766 	return ret;
1767 }
1768 
1769 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1770 {
1771 	struct user_event *user = container_of(ev, struct user_event, devent);
1772 	struct ftrace_event_field *field;
1773 	struct list_head *head;
1774 	int depth = 0;
1775 
1776 	seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1777 
1778 	head = trace_get_fields(&user->call);
1779 
1780 	list_for_each_entry_reverse(field, head, link) {
1781 		if (depth == 0)
1782 			seq_puts(m, " ");
1783 		else
1784 			seq_puts(m, "; ");
1785 
1786 		seq_printf(m, "%s %s", field->type, field->name);
1787 
1788 		if (str_has_prefix(field->type, "struct "))
1789 			seq_printf(m, " %d", field->size);
1790 
1791 		depth++;
1792 	}
1793 
1794 	seq_puts(m, "\n");
1795 
1796 	return 0;
1797 }
1798 
1799 static bool user_event_is_busy(struct dyn_event *ev)
1800 {
1801 	struct user_event *user = container_of(ev, struct user_event, devent);
1802 
1803 	return !user_event_last_ref(user);
1804 }
1805 
1806 static int user_event_free(struct dyn_event *ev)
1807 {
1808 	struct user_event *user = container_of(ev, struct user_event, devent);
1809 
1810 	if (!user_event_last_ref(user))
1811 		return -EBUSY;
1812 
1813 	if (!user_event_capable(user->reg_flags))
1814 		return -EPERM;
1815 
1816 	return destroy_user_event(user);
1817 }
1818 
1819 static bool user_field_match(struct ftrace_event_field *field, int argc,
1820 			     const char **argv, int *iout)
1821 {
1822 	char *field_name = NULL, *dyn_field_name = NULL;
1823 	bool colon = false, match = false;
1824 	int dyn_len, len;
1825 
1826 	if (*iout >= argc)
1827 		return false;
1828 
1829 	dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1830 					    0, &colon);
1831 
1832 	len = user_field_set_string(field, field_name, 0, colon);
1833 
1834 	if (dyn_len != len)
1835 		return false;
1836 
1837 	dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1838 	field_name = kmalloc(len, GFP_KERNEL);
1839 
1840 	if (!dyn_field_name || !field_name)
1841 		goto out;
1842 
1843 	user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1844 				  dyn_len, &colon);
1845 
1846 	user_field_set_string(field, field_name, len, colon);
1847 
1848 	match = strcmp(dyn_field_name, field_name) == 0;
1849 out:
1850 	kfree(dyn_field_name);
1851 	kfree(field_name);
1852 
1853 	return match;
1854 }
1855 
1856 static bool user_fields_match(struct user_event *user, int argc,
1857 			      const char **argv)
1858 {
1859 	struct ftrace_event_field *field;
1860 	struct list_head *head = &user->fields;
1861 	int i = 0;
1862 
1863 	list_for_each_entry_reverse(field, head, link) {
1864 		if (!user_field_match(field, argc, argv, &i))
1865 			return false;
1866 	}
1867 
1868 	if (i != argc)
1869 		return false;
1870 
1871 	return true;
1872 }
1873 
1874 static bool user_event_match(const char *system, const char *event,
1875 			     int argc, const char **argv, struct dyn_event *ev)
1876 {
1877 	struct user_event *user = container_of(ev, struct user_event, devent);
1878 	bool match;
1879 
1880 	match = strcmp(EVENT_NAME(user), event) == 0 &&
1881 		(!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1882 
1883 	if (match && argc > 0)
1884 		match = user_fields_match(user, argc, argv);
1885 	else if (match && argc == 0)
1886 		match = list_empty(&user->fields);
1887 
1888 	return match;
1889 }
1890 
1891 static struct dyn_event_operations user_event_dops = {
1892 	.create = user_event_create,
1893 	.show = user_event_show,
1894 	.is_busy = user_event_is_busy,
1895 	.free = user_event_free,
1896 	.match = user_event_match,
1897 };
1898 
1899 static int user_event_trace_register(struct user_event *user)
1900 {
1901 	int ret;
1902 
1903 	ret = register_trace_event(&user->call.event);
1904 
1905 	if (!ret)
1906 		return -ENODEV;
1907 
1908 	ret = user_event_set_call_visible(user, true);
1909 
1910 	if (ret)
1911 		unregister_trace_event(&user->call.event);
1912 
1913 	return ret;
1914 }
1915 
1916 /*
1917  * Parses the event name, arguments and flags then registers if successful.
1918  * The name buffer lifetime is owned by this method for success cases only.
1919  * Upon success the returned user_event has its ref count increased by 1.
1920  */
1921 static int user_event_parse(struct user_event_group *group, char *name,
1922 			    char *args, char *flags,
1923 			    struct user_event **newuser, int reg_flags)
1924 {
1925 	int ret;
1926 	u32 key;
1927 	struct user_event *user;
1928 	int argc = 0;
1929 	char **argv;
1930 
1931 	/* Currently don't support any text based flags */
1932 	if (flags != NULL)
1933 		return -EINVAL;
1934 
1935 	if (!user_event_capable(reg_flags))
1936 		return -EPERM;
1937 
1938 	/* Prevent dyn_event from racing */
1939 	mutex_lock(&event_mutex);
1940 	user = find_user_event(group, name, &key);
1941 	mutex_unlock(&event_mutex);
1942 
1943 	if (user) {
1944 		if (args) {
1945 			argv = argv_split(GFP_KERNEL, args, &argc);
1946 			if (!argv) {
1947 				ret = -ENOMEM;
1948 				goto error;
1949 			}
1950 
1951 			ret = user_fields_match(user, argc, (const char **)argv);
1952 			argv_free(argv);
1953 
1954 		} else
1955 			ret = list_empty(&user->fields);
1956 
1957 		if (ret) {
1958 			*newuser = user;
1959 			/*
1960 			 * Name is allocated by caller, free it since it already exists.
1961 			 * Caller only worries about failure cases for freeing.
1962 			 */
1963 			kfree(name);
1964 		} else {
1965 			ret = -EADDRINUSE;
1966 			goto error;
1967 		}
1968 
1969 		return 0;
1970 error:
1971 		user_event_put(user, false);
1972 		return ret;
1973 	}
1974 
1975 	user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
1976 
1977 	if (!user)
1978 		return -ENOMEM;
1979 
1980 	INIT_LIST_HEAD(&user->class.fields);
1981 	INIT_LIST_HEAD(&user->fields);
1982 	INIT_LIST_HEAD(&user->validators);
1983 
1984 	user->group = group;
1985 	user->tracepoint.name = name;
1986 
1987 	ret = user_event_parse_fields(user, args);
1988 
1989 	if (ret)
1990 		goto put_user;
1991 
1992 	ret = user_event_create_print_fmt(user);
1993 
1994 	if (ret)
1995 		goto put_user;
1996 
1997 	user->call.data = user;
1998 	user->call.class = &user->class;
1999 	user->call.name = name;
2000 	user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
2001 	user->call.tp = &user->tracepoint;
2002 	user->call.event.funcs = &user_event_funcs;
2003 	user->class.system = group->system_name;
2004 
2005 	user->class.fields_array = user_event_fields_array;
2006 	user->class.get_fields = user_event_get_fields;
2007 	user->class.reg = user_event_reg;
2008 	user->class.probe = user_event_ftrace;
2009 #ifdef CONFIG_PERF_EVENTS
2010 	user->class.perf_probe = user_event_perf;
2011 #endif
2012 
2013 	mutex_lock(&event_mutex);
2014 
2015 	if (current_user_events >= max_user_events) {
2016 		ret = -EMFILE;
2017 		goto put_user_lock;
2018 	}
2019 
2020 	ret = user_event_trace_register(user);
2021 
2022 	if (ret)
2023 		goto put_user_lock;
2024 
2025 	user->reg_flags = reg_flags;
2026 
2027 	if (user->reg_flags & USER_EVENT_REG_PERSIST) {
2028 		/* Ensure we track self ref and caller ref (2) */
2029 		refcount_set(&user->refcnt, 2);
2030 	} else {
2031 		/* Ensure we track only caller ref (1) */
2032 		refcount_set(&user->refcnt, 1);
2033 	}
2034 
2035 	dyn_event_init(&user->devent, &user_event_dops);
2036 	dyn_event_add(&user->devent, &user->call);
2037 	hash_add(group->register_table, &user->node, key);
2038 	current_user_events++;
2039 
2040 	mutex_unlock(&event_mutex);
2041 
2042 	*newuser = user;
2043 	return 0;
2044 put_user_lock:
2045 	mutex_unlock(&event_mutex);
2046 put_user:
2047 	user_event_destroy_fields(user);
2048 	user_event_destroy_validators(user);
2049 	kfree(user->call.print_fmt);
2050 	kfree(user);
2051 	return ret;
2052 }
2053 
2054 /*
2055  * Deletes a previously created event if it is no longer being used.
2056  */
2057 static int delete_user_event(struct user_event_group *group, char *name)
2058 {
2059 	u32 key;
2060 	struct user_event *user = find_user_event(group, name, &key);
2061 
2062 	if (!user)
2063 		return -ENOENT;
2064 
2065 	user_event_put(user, true);
2066 
2067 	if (!user_event_last_ref(user))
2068 		return -EBUSY;
2069 
2070 	if (!user_event_capable(user->reg_flags))
2071 		return -EPERM;
2072 
2073 	return destroy_user_event(user);
2074 }
2075 
2076 /*
2077  * Validates the user payload and writes via iterator.
2078  */
2079 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
2080 {
2081 	struct user_event_file_info *info = file->private_data;
2082 	struct user_event_refs *refs;
2083 	struct user_event *user = NULL;
2084 	struct tracepoint *tp;
2085 	ssize_t ret = i->count;
2086 	int idx;
2087 
2088 	if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
2089 		return -EFAULT;
2090 
2091 	if (idx < 0)
2092 		return -EINVAL;
2093 
2094 	rcu_read_lock_sched();
2095 
2096 	refs = rcu_dereference_sched(info->refs);
2097 
2098 	/*
2099 	 * The refs->events array is protected by RCU, and new items may be
2100 	 * added. But the user retrieved from indexing into the events array
2101 	 * shall be immutable while the file is opened.
2102 	 */
2103 	if (likely(refs && idx < refs->count))
2104 		user = refs->events[idx];
2105 
2106 	rcu_read_unlock_sched();
2107 
2108 	if (unlikely(user == NULL))
2109 		return -ENOENT;
2110 
2111 	if (unlikely(i->count < user->min_size))
2112 		return -EINVAL;
2113 
2114 	tp = &user->tracepoint;
2115 
2116 	/*
2117 	 * It's possible key.enabled disables after this check, however
2118 	 * we don't mind if a few events are included in this condition.
2119 	 */
2120 	if (likely(atomic_read(&tp->key.enabled) > 0)) {
2121 		struct tracepoint_func *probe_func_ptr;
2122 		user_event_func_t probe_func;
2123 		struct iov_iter copy;
2124 		void *tpdata;
2125 		bool faulted;
2126 
2127 		if (unlikely(fault_in_iov_iter_readable(i, i->count)))
2128 			return -EFAULT;
2129 
2130 		faulted = false;
2131 
2132 		rcu_read_lock_sched();
2133 
2134 		probe_func_ptr = rcu_dereference_sched(tp->funcs);
2135 
2136 		if (probe_func_ptr) {
2137 			do {
2138 				copy = *i;
2139 				probe_func = probe_func_ptr->func;
2140 				tpdata = probe_func_ptr->data;
2141 				probe_func(user, &copy, tpdata, &faulted);
2142 			} while ((++probe_func_ptr)->func);
2143 		}
2144 
2145 		rcu_read_unlock_sched();
2146 
2147 		if (unlikely(faulted))
2148 			return -EFAULT;
2149 	} else
2150 		return -EBADF;
2151 
2152 	return ret;
2153 }
2154 
2155 static int user_events_open(struct inode *node, struct file *file)
2156 {
2157 	struct user_event_group *group;
2158 	struct user_event_file_info *info;
2159 
2160 	group = current_user_event_group();
2161 
2162 	if (!group)
2163 		return -ENOENT;
2164 
2165 	info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
2166 
2167 	if (!info)
2168 		return -ENOMEM;
2169 
2170 	info->group = group;
2171 
2172 	file->private_data = info;
2173 
2174 	return 0;
2175 }
2176 
2177 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
2178 				 size_t count, loff_t *ppos)
2179 {
2180 	struct iovec iov;
2181 	struct iov_iter i;
2182 
2183 	if (unlikely(*ppos != 0))
2184 		return -EFAULT;
2185 
2186 	if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
2187 					 count, &iov, &i)))
2188 		return -EFAULT;
2189 
2190 	return user_events_write_core(file, &i);
2191 }
2192 
2193 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
2194 {
2195 	return user_events_write_core(kp->ki_filp, i);
2196 }
2197 
2198 static int user_events_ref_add(struct user_event_file_info *info,
2199 			       struct user_event *user)
2200 {
2201 	struct user_event_group *group = info->group;
2202 	struct user_event_refs *refs, *new_refs;
2203 	int i, size, count = 0;
2204 
2205 	refs = rcu_dereference_protected(info->refs,
2206 					 lockdep_is_held(&group->reg_mutex));
2207 
2208 	if (refs) {
2209 		count = refs->count;
2210 
2211 		for (i = 0; i < count; ++i)
2212 			if (refs->events[i] == user)
2213 				return i;
2214 	}
2215 
2216 	size = struct_size(refs, events, count + 1);
2217 
2218 	new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
2219 
2220 	if (!new_refs)
2221 		return -ENOMEM;
2222 
2223 	new_refs->count = count + 1;
2224 
2225 	for (i = 0; i < count; ++i)
2226 		new_refs->events[i] = refs->events[i];
2227 
2228 	new_refs->events[i] = user_event_get(user);
2229 
2230 	rcu_assign_pointer(info->refs, new_refs);
2231 
2232 	if (refs)
2233 		kfree_rcu(refs, rcu);
2234 
2235 	return i;
2236 }
2237 
2238 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
2239 {
2240 	u32 size;
2241 	long ret;
2242 
2243 	ret = get_user(size, &ureg->size);
2244 
2245 	if (ret)
2246 		return ret;
2247 
2248 	if (size > PAGE_SIZE)
2249 		return -E2BIG;
2250 
2251 	if (size < offsetofend(struct user_reg, write_index))
2252 		return -EINVAL;
2253 
2254 	ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2255 
2256 	if (ret)
2257 		return ret;
2258 
2259 	/* Ensure only valid flags */
2260 	if (kreg->flags & ~(USER_EVENT_REG_MAX-1))
2261 		return -EINVAL;
2262 
2263 	/* Ensure supported size */
2264 	switch (kreg->enable_size) {
2265 	case 4:
2266 		/* 32-bit */
2267 		break;
2268 #if BITS_PER_LONG >= 64
2269 	case 8:
2270 		/* 64-bit */
2271 		break;
2272 #endif
2273 	default:
2274 		return -EINVAL;
2275 	}
2276 
2277 	/* Ensure natural alignment */
2278 	if (kreg->enable_addr % kreg->enable_size)
2279 		return -EINVAL;
2280 
2281 	/* Ensure bit range for size */
2282 	if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2283 		return -EINVAL;
2284 
2285 	/* Ensure accessible */
2286 	if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2287 		       kreg->enable_size))
2288 		return -EFAULT;
2289 
2290 	kreg->size = size;
2291 
2292 	return 0;
2293 }
2294 
2295 /*
2296  * Registers a user_event on behalf of a user process.
2297  */
2298 static long user_events_ioctl_reg(struct user_event_file_info *info,
2299 				  unsigned long uarg)
2300 {
2301 	struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2302 	struct user_reg reg;
2303 	struct user_event *user;
2304 	struct user_event_enabler *enabler;
2305 	char *name;
2306 	long ret;
2307 	int write_result;
2308 
2309 	ret = user_reg_get(ureg, &reg);
2310 
2311 	if (ret)
2312 		return ret;
2313 
2314 	/*
2315 	 * Prevent users from using the same address and bit multiple times
2316 	 * within the same mm address space. This can cause unexpected behavior
2317 	 * for user processes that is far easier to debug if this is explictly
2318 	 * an error upon registering.
2319 	 */
2320 	if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
2321 					      reg.enable_bit))
2322 		return -EADDRINUSE;
2323 
2324 	name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2325 			    MAX_EVENT_DESC);
2326 
2327 	if (IS_ERR(name)) {
2328 		ret = PTR_ERR(name);
2329 		return ret;
2330 	}
2331 
2332 	ret = user_event_parse_cmd(info->group, name, &user, reg.flags);
2333 
2334 	if (ret) {
2335 		kfree(name);
2336 		return ret;
2337 	}
2338 
2339 	ret = user_events_ref_add(info, user);
2340 
2341 	/* No longer need parse ref, ref_add either worked or not */
2342 	user_event_put(user, false);
2343 
2344 	/* Positive number is index and valid */
2345 	if (ret < 0)
2346 		return ret;
2347 
2348 	/*
2349 	 * user_events_ref_add succeeded:
2350 	 * At this point we have a user_event, it's lifetime is bound by the
2351 	 * reference count, not this file. If anything fails, the user_event
2352 	 * still has a reference until the file is released. During release
2353 	 * any remaining references (from user_events_ref_add) are decremented.
2354 	 *
2355 	 * Attempt to create an enabler, which too has a lifetime tied in the
2356 	 * same way for the event. Once the task that caused the enabler to be
2357 	 * created exits or issues exec() then the enablers it has created
2358 	 * will be destroyed and the ref to the event will be decremented.
2359 	 */
2360 	enabler = user_event_enabler_create(&reg, user, &write_result);
2361 
2362 	if (!enabler)
2363 		return -ENOMEM;
2364 
2365 	/* Write failed/faulted, give error back to caller */
2366 	if (write_result)
2367 		return write_result;
2368 
2369 	put_user((u32)ret, &ureg->write_index);
2370 
2371 	return 0;
2372 }
2373 
2374 /*
2375  * Deletes a user_event on behalf of a user process.
2376  */
2377 static long user_events_ioctl_del(struct user_event_file_info *info,
2378 				  unsigned long uarg)
2379 {
2380 	void __user *ubuf = (void __user *)uarg;
2381 	char *name;
2382 	long ret;
2383 
2384 	name = strndup_user(ubuf, MAX_EVENT_DESC);
2385 
2386 	if (IS_ERR(name))
2387 		return PTR_ERR(name);
2388 
2389 	/* event_mutex prevents dyn_event from racing */
2390 	mutex_lock(&event_mutex);
2391 	ret = delete_user_event(info->group, name);
2392 	mutex_unlock(&event_mutex);
2393 
2394 	kfree(name);
2395 
2396 	return ret;
2397 }
2398 
2399 static long user_unreg_get(struct user_unreg __user *ureg,
2400 			   struct user_unreg *kreg)
2401 {
2402 	u32 size;
2403 	long ret;
2404 
2405 	ret = get_user(size, &ureg->size);
2406 
2407 	if (ret)
2408 		return ret;
2409 
2410 	if (size > PAGE_SIZE)
2411 		return -E2BIG;
2412 
2413 	if (size < offsetofend(struct user_unreg, disable_addr))
2414 		return -EINVAL;
2415 
2416 	ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2417 
2418 	/* Ensure no reserved values, since we don't support any yet */
2419 	if (kreg->__reserved || kreg->__reserved2)
2420 		return -EINVAL;
2421 
2422 	return ret;
2423 }
2424 
2425 static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
2426 				   unsigned long uaddr, unsigned char bit,
2427 				   unsigned long flags)
2428 {
2429 	struct user_event_enabler enabler;
2430 	int result;
2431 	int attempt = 0;
2432 
2433 	memset(&enabler, 0, sizeof(enabler));
2434 	enabler.addr = uaddr;
2435 	enabler.values = bit | flags;
2436 retry:
2437 	/* Prevents state changes from racing with new enablers */
2438 	mutex_lock(&event_mutex);
2439 
2440 	/* Force the bit to be cleared, since no event is attached */
2441 	mmap_read_lock(user_mm->mm);
2442 	result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
2443 	mmap_read_unlock(user_mm->mm);
2444 
2445 	mutex_unlock(&event_mutex);
2446 
2447 	if (result) {
2448 		/* Attempt to fault-in and retry if it worked */
2449 		if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
2450 			goto retry;
2451 	}
2452 
2453 	return result;
2454 }
2455 
2456 /*
2457  * Unregisters an enablement address/bit within a task/user mm.
2458  */
2459 static long user_events_ioctl_unreg(unsigned long uarg)
2460 {
2461 	struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2462 	struct user_event_mm *mm = current->user_event_mm;
2463 	struct user_event_enabler *enabler, *next;
2464 	struct user_unreg reg;
2465 	unsigned long flags;
2466 	long ret;
2467 
2468 	ret = user_unreg_get(ureg, &reg);
2469 
2470 	if (ret)
2471 		return ret;
2472 
2473 	if (!mm)
2474 		return -ENOENT;
2475 
2476 	flags = 0;
2477 	ret = -ENOENT;
2478 
2479 	/*
2480 	 * Flags freeing and faulting are used to indicate if the enabler is in
2481 	 * use at all. When faulting is set a page-fault is occurring asyncly.
2482 	 * During async fault if freeing is set, the enabler will be destroyed.
2483 	 * If no async fault is happening, we can destroy it now since we hold
2484 	 * the event_mutex during these checks.
2485 	 */
2486 	mutex_lock(&event_mutex);
2487 
2488 	list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
2489 		if (enabler->addr == reg.disable_addr &&
2490 		    ENABLE_BIT(enabler) == reg.disable_bit) {
2491 			set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2492 
2493 			/* We must keep compat flags for the clear */
2494 			flags |= enabler->values & ENABLE_VAL_COMPAT_MASK;
2495 
2496 			if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2497 				user_event_enabler_destroy(enabler, true);
2498 
2499 			/* Removed at least one */
2500 			ret = 0;
2501 		}
2502 	}
2503 
2504 	mutex_unlock(&event_mutex);
2505 
2506 	/* Ensure bit is now cleared for user, regardless of event status */
2507 	if (!ret)
2508 		ret = user_event_mm_clear_bit(mm, reg.disable_addr,
2509 					      reg.disable_bit, flags);
2510 
2511 	return ret;
2512 }
2513 
2514 /*
2515  * Handles the ioctl from user mode to register or alter operations.
2516  */
2517 static long user_events_ioctl(struct file *file, unsigned int cmd,
2518 			      unsigned long uarg)
2519 {
2520 	struct user_event_file_info *info = file->private_data;
2521 	struct user_event_group *group = info->group;
2522 	long ret = -ENOTTY;
2523 
2524 	switch (cmd) {
2525 	case DIAG_IOCSREG:
2526 		mutex_lock(&group->reg_mutex);
2527 		ret = user_events_ioctl_reg(info, uarg);
2528 		mutex_unlock(&group->reg_mutex);
2529 		break;
2530 
2531 	case DIAG_IOCSDEL:
2532 		mutex_lock(&group->reg_mutex);
2533 		ret = user_events_ioctl_del(info, uarg);
2534 		mutex_unlock(&group->reg_mutex);
2535 		break;
2536 
2537 	case DIAG_IOCSUNREG:
2538 		mutex_lock(&group->reg_mutex);
2539 		ret = user_events_ioctl_unreg(uarg);
2540 		mutex_unlock(&group->reg_mutex);
2541 		break;
2542 	}
2543 
2544 	return ret;
2545 }
2546 
2547 /*
2548  * Handles the final close of the file from user mode.
2549  */
2550 static int user_events_release(struct inode *node, struct file *file)
2551 {
2552 	struct user_event_file_info *info = file->private_data;
2553 	struct user_event_group *group;
2554 	struct user_event_refs *refs;
2555 	int i;
2556 
2557 	if (!info)
2558 		return -EINVAL;
2559 
2560 	group = info->group;
2561 
2562 	/*
2563 	 * Ensure refs cannot change under any situation by taking the
2564 	 * register mutex during the final freeing of the references.
2565 	 */
2566 	mutex_lock(&group->reg_mutex);
2567 
2568 	refs = info->refs;
2569 
2570 	if (!refs)
2571 		goto out;
2572 
2573 	/*
2574 	 * The lifetime of refs has reached an end, it's tied to this file.
2575 	 * The underlying user_events are ref counted, and cannot be freed.
2576 	 * After this decrement, the user_events may be freed elsewhere.
2577 	 */
2578 	for (i = 0; i < refs->count; ++i)
2579 		user_event_put(refs->events[i], false);
2580 
2581 out:
2582 	file->private_data = NULL;
2583 
2584 	mutex_unlock(&group->reg_mutex);
2585 
2586 	kfree(refs);
2587 	kfree(info);
2588 
2589 	return 0;
2590 }
2591 
2592 static const struct file_operations user_data_fops = {
2593 	.open		= user_events_open,
2594 	.write		= user_events_write,
2595 	.write_iter	= user_events_write_iter,
2596 	.unlocked_ioctl	= user_events_ioctl,
2597 	.release	= user_events_release,
2598 };
2599 
2600 static void *user_seq_start(struct seq_file *m, loff_t *pos)
2601 {
2602 	if (*pos)
2603 		return NULL;
2604 
2605 	return (void *)1;
2606 }
2607 
2608 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2609 {
2610 	++*pos;
2611 	return NULL;
2612 }
2613 
2614 static void user_seq_stop(struct seq_file *m, void *p)
2615 {
2616 }
2617 
2618 static int user_seq_show(struct seq_file *m, void *p)
2619 {
2620 	struct user_event_group *group = m->private;
2621 	struct user_event *user;
2622 	char status;
2623 	int i, active = 0, busy = 0;
2624 
2625 	if (!group)
2626 		return -EINVAL;
2627 
2628 	mutex_lock(&group->reg_mutex);
2629 
2630 	hash_for_each(group->register_table, i, user, node) {
2631 		status = user->status;
2632 
2633 		seq_printf(m, "%s", EVENT_NAME(user));
2634 
2635 		if (status != 0)
2636 			seq_puts(m, " #");
2637 
2638 		if (status != 0) {
2639 			seq_puts(m, " Used by");
2640 			if (status & EVENT_STATUS_FTRACE)
2641 				seq_puts(m, " ftrace");
2642 			if (status & EVENT_STATUS_PERF)
2643 				seq_puts(m, " perf");
2644 			if (status & EVENT_STATUS_OTHER)
2645 				seq_puts(m, " other");
2646 			busy++;
2647 		}
2648 
2649 		seq_puts(m, "\n");
2650 		active++;
2651 	}
2652 
2653 	mutex_unlock(&group->reg_mutex);
2654 
2655 	seq_puts(m, "\n");
2656 	seq_printf(m, "Active: %d\n", active);
2657 	seq_printf(m, "Busy: %d\n", busy);
2658 
2659 	return 0;
2660 }
2661 
2662 static const struct seq_operations user_seq_ops = {
2663 	.start	= user_seq_start,
2664 	.next	= user_seq_next,
2665 	.stop	= user_seq_stop,
2666 	.show	= user_seq_show,
2667 };
2668 
2669 static int user_status_open(struct inode *node, struct file *file)
2670 {
2671 	struct user_event_group *group;
2672 	int ret;
2673 
2674 	group = current_user_event_group();
2675 
2676 	if (!group)
2677 		return -ENOENT;
2678 
2679 	ret = seq_open(file, &user_seq_ops);
2680 
2681 	if (!ret) {
2682 		/* Chain group to seq_file */
2683 		struct seq_file *m = file->private_data;
2684 
2685 		m->private = group;
2686 	}
2687 
2688 	return ret;
2689 }
2690 
2691 static const struct file_operations user_status_fops = {
2692 	.open		= user_status_open,
2693 	.read		= seq_read,
2694 	.llseek		= seq_lseek,
2695 	.release	= seq_release,
2696 };
2697 
2698 /*
2699  * Creates a set of tracefs files to allow user mode interactions.
2700  */
2701 static int create_user_tracefs(void)
2702 {
2703 	struct dentry *edata, *emmap;
2704 
2705 	edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2706 				    NULL, NULL, &user_data_fops);
2707 
2708 	if (!edata) {
2709 		pr_warn("Could not create tracefs 'user_events_data' entry\n");
2710 		goto err;
2711 	}
2712 
2713 	emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2714 				    NULL, NULL, &user_status_fops);
2715 
2716 	if (!emmap) {
2717 		tracefs_remove(edata);
2718 		pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2719 		goto err;
2720 	}
2721 
2722 	return 0;
2723 err:
2724 	return -ENODEV;
2725 }
2726 
2727 static int set_max_user_events_sysctl(struct ctl_table *table, int write,
2728 				      void *buffer, size_t *lenp, loff_t *ppos)
2729 {
2730 	int ret;
2731 
2732 	mutex_lock(&event_mutex);
2733 
2734 	ret = proc_douintvec(table, write, buffer, lenp, ppos);
2735 
2736 	mutex_unlock(&event_mutex);
2737 
2738 	return ret;
2739 }
2740 
2741 static struct ctl_table user_event_sysctls[] = {
2742 	{
2743 		.procname	= "user_events_max",
2744 		.data		= &max_user_events,
2745 		.maxlen		= sizeof(unsigned int),
2746 		.mode		= 0644,
2747 		.proc_handler	= set_max_user_events_sysctl,
2748 	},
2749 	{}
2750 };
2751 
2752 static int __init trace_events_user_init(void)
2753 {
2754 	int ret;
2755 
2756 	fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2757 
2758 	if (!fault_cache)
2759 		return -ENOMEM;
2760 
2761 	init_group = user_event_group_create();
2762 
2763 	if (!init_group) {
2764 		kmem_cache_destroy(fault_cache);
2765 		return -ENOMEM;
2766 	}
2767 
2768 	ret = create_user_tracefs();
2769 
2770 	if (ret) {
2771 		pr_warn("user_events could not register with tracefs\n");
2772 		user_event_group_destroy(init_group);
2773 		kmem_cache_destroy(fault_cache);
2774 		init_group = NULL;
2775 		return ret;
2776 	}
2777 
2778 	if (dyn_event_register(&user_event_dops))
2779 		pr_warn("user_events could not register with dyn_events\n");
2780 
2781 	register_sysctl_init("kernel", user_event_sysctls);
2782 
2783 	return 0;
2784 }
2785 
2786 fs_initcall(trace_events_user_init);
2787