xref: /linux/include/kunit/run-in-irq-context.h (revision f990ad67f0febc51274adb604d5bdeab0d06d024)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Helper function for testing code in interrupt contexts
4  *
5  * Copyright 2025 Google LLC
6  */
7 #ifndef _KUNIT_RUN_IN_IRQ_CONTEXT_H
8 #define _KUNIT_RUN_IN_IRQ_CONTEXT_H
9 
10 #include <kunit/test.h>
11 #include <linux/timekeeping.h>
12 #include <linux/hrtimer.h>
13 #include <linux/workqueue.h>
14 
15 struct kunit_irq_test_state {
16 	bool (*func)(void *test_specific_state);
17 	void *test_specific_state;
18 	bool task_func_reported_failure;
19 	bool hardirq_func_reported_failure;
20 	bool softirq_func_reported_failure;
21 	atomic_t task_func_calls;
22 	atomic_t hardirq_func_calls;
23 	atomic_t softirq_func_calls;
24 	ktime_t interval;
25 	struct hrtimer timer;
26 	struct work_struct bh_work;
27 };
28 
29 static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer)
30 {
31 	struct kunit_irq_test_state *state =
32 		container_of(timer, typeof(*state), timer);
33 	int task_calls, hardirq_calls, softirq_calls;
34 
35 	WARN_ON_ONCE(!in_hardirq());
36 	task_calls = atomic_read(&state->task_func_calls);
37 	hardirq_calls = atomic_inc_return(&state->hardirq_func_calls);
38 	softirq_calls = atomic_read(&state->softirq_func_calls);
39 
40 	/*
41 	 * If the timer is firing too often for the softirq or task to ever have
42 	 * a chance to run, increase the timer interval.  This is needed on very
43 	 * slow systems.
44 	 */
45 	if (hardirq_calls >= 20 && (softirq_calls == 0 || task_calls == 0))
46 		state->interval = ktime_add_ns(state->interval, 250);
47 
48 	if (!state->func(state->test_specific_state))
49 		state->hardirq_func_reported_failure = true;
50 
51 	hrtimer_forward_now(&state->timer, state->interval);
52 	queue_work(system_bh_wq, &state->bh_work);
53 	return HRTIMER_RESTART;
54 }
55 
56 static void kunit_irq_test_bh_work_func(struct work_struct *work)
57 {
58 	struct kunit_irq_test_state *state =
59 		container_of(work, typeof(*state), bh_work);
60 
61 	WARN_ON_ONCE(!in_serving_softirq());
62 	atomic_inc(&state->softirq_func_calls);
63 
64 	if (!state->func(state->test_specific_state))
65 		state->softirq_func_reported_failure = true;
66 }
67 
68 /*
69  * Helper function which repeatedly runs the given @func in task, softirq, and
70  * hardirq context concurrently, and reports a failure to KUnit if any
71  * invocation of @func in any context returns false.  @func is passed
72  * @test_specific_state as its argument.  At most 3 invocations of @func will
73  * run concurrently: one in each of task, softirq, and hardirq context.  @func
74  * will continue running until either @max_iterations calls have been made (so
75  * long as at least one each runs in task, softirq, and hardirq contexts), or
76  * one second has passed.
77  *
78  * The main purpose of this interrupt context testing is to validate fallback
79  * code paths that run in contexts where the normal code path cannot be used,
80  * typically due to the FPU or vector registers already being in-use in kernel
81  * mode.  These code paths aren't covered when the test code is executed only by
82  * the KUnit test runner thread in task context.  The reason for the concurrency
83  * is because merely using hardirq context is not sufficient to reach a fallback
84  * code path on some architectures; the hardirq actually has to occur while the
85  * FPU or vector unit was already in-use in kernel mode.
86  *
87  * Another purpose of this testing is to detect issues with the architecture's
88  * irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions,
89  * especially in softirq context when the softirq may have interrupted a task
90  * already using kernel-mode FPU or vector (if the arch didn't prevent that).
91  * Crypto functions are often executed in softirqs, so this is important.
92  */
93 static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
94 				      int max_iterations,
95 				      void *test_specific_state)
96 {
97 	struct kunit_irq_test_state state = {
98 		.func = func,
99 		.test_specific_state = test_specific_state,
100 		/*
101 		 * Start with a 5us timer interval.  If the system can't keep
102 		 * up, kunit_irq_test_timer_func() will increase it.
103 		 */
104 		.interval = us_to_ktime(5),
105 	};
106 	unsigned long end_jiffies;
107 	int task_calls, hardirq_calls, softirq_calls;
108 
109 	/*
110 	 * Set up a hrtimer (the way we access hardirq context) and a work
111 	 * struct for the BH workqueue (the way we access softirq context).
112 	 */
113 	hrtimer_setup_on_stack(&state.timer, kunit_irq_test_timer_func,
114 			       CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
115 	INIT_WORK_ONSTACK(&state.bh_work, kunit_irq_test_bh_work_func);
116 
117 	/*
118 	 * Run for up to max_iterations (including at least one task, softirq,
119 	 * and hardirq), or 1 second, whichever comes first.
120 	 */
121 	end_jiffies = jiffies + HZ;
122 	hrtimer_start(&state.timer, state.interval, HRTIMER_MODE_REL_HARD);
123 	do {
124 		if (!func(test_specific_state))
125 			state.task_func_reported_failure = true;
126 
127 		task_calls = atomic_inc_return(&state.task_func_calls);
128 		hardirq_calls = atomic_read(&state.hardirq_func_calls);
129 		softirq_calls = atomic_read(&state.softirq_func_calls);
130 	} while ((task_calls + hardirq_calls + softirq_calls < max_iterations ||
131 		  (task_calls == 0 || hardirq_calls == 0 ||
132 		   softirq_calls == 0)) &&
133 		 !time_after(jiffies, end_jiffies));
134 
135 	/* Cancel the timer and work. */
136 	hrtimer_cancel(&state.timer);
137 	flush_work(&state.bh_work);
138 
139 	/* Sanity check: the timer and BH functions should have been run. */
140 	KUNIT_EXPECT_GT_MSG(test, atomic_read(&state.hardirq_func_calls), 0,
141 			    "Timer function was not called");
142 	KUNIT_EXPECT_GT_MSG(test, atomic_read(&state.softirq_func_calls), 0,
143 			    "BH work function was not called");
144 
145 	/* Check for failure reported from any context. */
146 	KUNIT_EXPECT_FALSE_MSG(test, state.task_func_reported_failure,
147 			       "Failure reported from task context");
148 	KUNIT_EXPECT_FALSE_MSG(test, state.hardirq_func_reported_failure,
149 			       "Failure reported from hardirq context");
150 	KUNIT_EXPECT_FALSE_MSG(test, state.softirq_func_reported_failure,
151 			       "Failure reported from softirq context");
152 }
153 
154 #endif /* _KUNIT_RUN_IN_IRQ_CONTEXT_H */
155