xref: /linux/include/kunit/run-in-irq-context.h (revision c31f4aa8fed048fa70e742c4bb49bb48dc489ab3)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Helper function for testing code in interrupt contexts
4  *
5  * Copyright 2025 Google LLC
6  */
7 #ifndef _KUNIT_RUN_IN_IRQ_CONTEXT_H
8 #define _KUNIT_RUN_IN_IRQ_CONTEXT_H
9 
10 #include <kunit/test.h>
11 #include <linux/timekeeping.h>
12 #include <linux/hrtimer.h>
13 #include <linux/workqueue.h>
14 
15 #define KUNIT_IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
16 
17 struct kunit_irq_test_state {
18 	bool (*func)(void *test_specific_state);
19 	void *test_specific_state;
20 	bool task_func_reported_failure;
21 	bool hardirq_func_reported_failure;
22 	bool softirq_func_reported_failure;
23 	atomic_t hardirq_func_calls;
24 	atomic_t softirq_func_calls;
25 	struct hrtimer timer;
26 	struct work_struct bh_work;
27 };
28 
29 static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer)
30 {
31 	struct kunit_irq_test_state *state =
32 		container_of(timer, typeof(*state), timer);
33 
34 	WARN_ON_ONCE(!in_hardirq());
35 	atomic_inc(&state->hardirq_func_calls);
36 
37 	if (!state->func(state->test_specific_state))
38 		state->hardirq_func_reported_failure = true;
39 
40 	hrtimer_forward_now(&state->timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL);
41 	queue_work(system_bh_wq, &state->bh_work);
42 	return HRTIMER_RESTART;
43 }
44 
45 static void kunit_irq_test_bh_work_func(struct work_struct *work)
46 {
47 	struct kunit_irq_test_state *state =
48 		container_of(work, typeof(*state), bh_work);
49 
50 	WARN_ON_ONCE(!in_serving_softirq());
51 	atomic_inc(&state->softirq_func_calls);
52 
53 	if (!state->func(state->test_specific_state))
54 		state->softirq_func_reported_failure = true;
55 }
56 
57 /*
58  * Helper function which repeatedly runs the given @func in task, softirq, and
59  * hardirq context concurrently, and reports a failure to KUnit if any
60  * invocation of @func in any context returns false.  @func is passed
61  * @test_specific_state as its argument.  At most 3 invocations of @func will
62  * run concurrently: one in each of task, softirq, and hardirq context.  @func
63  * will continue running until either @max_iterations calls have been made (so
64  * long as at least one each runs in task, softirq, and hardirq contexts), or
65  * one second has passed.
66  *
67  * The main purpose of this interrupt context testing is to validate fallback
68  * code paths that run in contexts where the normal code path cannot be used,
69  * typically due to the FPU or vector registers already being in-use in kernel
70  * mode.  These code paths aren't covered when the test code is executed only by
71  * the KUnit test runner thread in task context.  The reason for the concurrency
72  * is because merely using hardirq context is not sufficient to reach a fallback
73  * code path on some architectures; the hardirq actually has to occur while the
74  * FPU or vector unit was already in-use in kernel mode.
75  *
76  * Another purpose of this testing is to detect issues with the architecture's
77  * irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions,
78  * especially in softirq context when the softirq may have interrupted a task
79  * already using kernel-mode FPU or vector (if the arch didn't prevent that).
80  * Crypto functions are often executed in softirqs, so this is important.
81  */
82 static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
83 				      int max_iterations,
84 				      void *test_specific_state)
85 {
86 	struct kunit_irq_test_state state = {
87 		.func = func,
88 		.test_specific_state = test_specific_state,
89 	};
90 	unsigned long end_jiffies;
91 	int hardirq_calls, softirq_calls;
92 	bool allctx = false;
93 
94 	/*
95 	 * Set up a hrtimer (the way we access hardirq context) and a work
96 	 * struct for the BH workqueue (the way we access softirq context).
97 	 */
98 	hrtimer_setup_on_stack(&state.timer, kunit_irq_test_timer_func,
99 			       CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
100 	INIT_WORK_ONSTACK(&state.bh_work, kunit_irq_test_bh_work_func);
101 
102 	/*
103 	 * Run for up to max_iterations (including at least one task, softirq,
104 	 * and hardirq), or 1 second, whichever comes first.
105 	 */
106 	end_jiffies = jiffies + HZ;
107 	hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL,
108 		      HRTIMER_MODE_REL_HARD);
109 	for (int task_calls = 0, calls = 0;
110 	     ((calls < max_iterations) || !allctx) &&
111 	     !time_after(jiffies, end_jiffies);
112 	     task_calls++) {
113 		if (!func(test_specific_state))
114 			state.task_func_reported_failure = true;
115 
116 		hardirq_calls = atomic_read(&state.hardirq_func_calls);
117 		softirq_calls = atomic_read(&state.softirq_func_calls);
118 		calls = task_calls + hardirq_calls + softirq_calls;
119 		allctx = (task_calls > 0) && (hardirq_calls > 0) &&
120 			 (softirq_calls > 0);
121 	}
122 
123 	/* Cancel the timer and work. */
124 	hrtimer_cancel(&state.timer);
125 	flush_work(&state.bh_work);
126 
127 	/* Sanity check: the timer and BH functions should have been run. */
128 	KUNIT_EXPECT_GT_MSG(test, atomic_read(&state.hardirq_func_calls), 0,
129 			    "Timer function was not called");
130 	KUNIT_EXPECT_GT_MSG(test, atomic_read(&state.softirq_func_calls), 0,
131 			    "BH work function was not called");
132 
133 	/* Check for failure reported from any context. */
134 	KUNIT_EXPECT_FALSE_MSG(test, state.task_func_reported_failure,
135 			       "Failure reported from task context");
136 	KUNIT_EXPECT_FALSE_MSG(test, state.hardirq_func_reported_failure,
137 			       "Failure reported from hardirq context");
138 	KUNIT_EXPECT_FALSE_MSG(test, state.softirq_func_reported_failure,
139 			       "Failure reported from softirq context");
140 }
141 
142 #endif /* _KUNIT_RUN_IN_IRQ_CONTEXT_H */
143