1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
4 */
5
6 #define pr_fmt(fmt) "tlmm-test: " fmt
7
8 #include <kunit/test.h>
9 #include <linux/delay.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_irq.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/platform_device.h>
19
20 /*
21 * This TLMM test module serves the purpose of validating that the TLMM driver
22 * (pinctrl-msm) delivers expected number of interrupts in response to changing
23 * GPIO state.
24 *
25 * To achieve this without external equipment the test takes a module parameter
26 * "gpio", which the tester is expected to specify an unused and non-connected
27 * pin. The GPIO state is then driven by adjusting the bias of the pin, at
28 * suitable times through the different test cases.
29 *
30 * Upon execution, the test initialization will find the TLMM node (subject to
31 * tlmm_of_match[] allow listing) and create the necessary references
32 * dynamically, rather then relying on e.g. Devicetree and phandles.
33 */
34
35 #define MSM_PULL_MASK GENMASK(2, 0)
36 #define MSM_PULL_DOWN 1
37 #define MSM_PULL_UP 3
38 #define TLMM_REG_SIZE 0x1000
39
40 static int tlmm_test_gpio = -1;
41 module_param_named(gpio, tlmm_test_gpio, int, 0600);
42
43 static struct {
44 void __iomem *base;
45 void __iomem *reg;
46 int irq;
47
48 u32 low_val;
49 u32 high_val;
50 } tlmm_suite;
51
52 /**
53 * struct tlmm_test_priv - Per-test context
54 * @intr_count: number of times hard handler was hit with TLMM_TEST_COUNT op set
55 * @thread_count: number of times thread handler was hit with TLMM_TEST_COUNT op set
56 * @intr_op: operations to be performed by the hard IRQ handler
57 * @intr_op_remain: number of times the TLMM_TEST_THEN_* operations should be
58 * performed by the hard IRQ handler
59 * @thread_op: operations to be performed by the threaded IRQ handler
60 * @thread_op_remain: number of times the TLMM_TEST_THEN_* operations should
61 * be performed by the threaded IRQ handler
62 */
63 struct tlmm_test_priv {
64 atomic_t intr_count;
65 atomic_t thread_count;
66
67 unsigned int intr_op;
68 atomic_t intr_op_remain;
69
70 unsigned int thread_op;
71 atomic_t thread_op_remain;
72 };
73
74 /* Operation masks for @intr_op and @thread_op */
75 #define TLMM_TEST_COUNT BIT(0)
76 #define TLMM_TEST_OUTPUT_LOW BIT(1)
77 #define TLMM_TEST_OUTPUT_HIGH BIT(2)
78 #define TLMM_TEST_THEN_HIGH BIT(3)
79 #define TLMM_TEST_THEN_LOW BIT(4)
80 #define TLMM_TEST_WAKE_THREAD BIT(5)
81
tlmm_output_low(void)82 static void tlmm_output_low(void)
83 {
84 writel(tlmm_suite.low_val, tlmm_suite.reg);
85 readl(tlmm_suite.reg);
86 }
87
tlmm_output_high(void)88 static void tlmm_output_high(void)
89 {
90 writel(tlmm_suite.high_val, tlmm_suite.reg);
91 readl(tlmm_suite.reg);
92 }
93
tlmm_test_intr_fn(int irq,void * dev_id)94 static irqreturn_t tlmm_test_intr_fn(int irq, void *dev_id)
95 {
96 struct tlmm_test_priv *priv = dev_id;
97
98 if (priv->intr_op & TLMM_TEST_COUNT)
99 atomic_inc(&priv->intr_count);
100
101 if (priv->intr_op & TLMM_TEST_OUTPUT_LOW)
102 tlmm_output_low();
103 if (priv->intr_op & TLMM_TEST_OUTPUT_HIGH)
104 tlmm_output_high();
105
106 if (atomic_dec_if_positive(&priv->intr_op_remain) > 0) {
107 udelay(1);
108
109 if (priv->intr_op & TLMM_TEST_THEN_LOW)
110 tlmm_output_low();
111 if (priv->intr_op & TLMM_TEST_THEN_HIGH)
112 tlmm_output_high();
113 }
114
115 return priv->intr_op & TLMM_TEST_WAKE_THREAD ? IRQ_WAKE_THREAD : IRQ_HANDLED;
116 }
117
tlmm_test_intr_thread_fn(int irq,void * dev_id)118 static irqreturn_t tlmm_test_intr_thread_fn(int irq, void *dev_id)
119 {
120 struct tlmm_test_priv *priv = dev_id;
121
122 if (priv->thread_op & TLMM_TEST_COUNT)
123 atomic_inc(&priv->thread_count);
124
125 if (priv->thread_op & TLMM_TEST_OUTPUT_LOW)
126 tlmm_output_low();
127 if (priv->thread_op & TLMM_TEST_OUTPUT_HIGH)
128 tlmm_output_high();
129
130 if (atomic_dec_if_positive(&priv->thread_op_remain) > 0) {
131 udelay(1);
132 if (priv->thread_op & TLMM_TEST_THEN_LOW)
133 tlmm_output_low();
134 if (priv->thread_op & TLMM_TEST_THEN_HIGH)
135 tlmm_output_high();
136 }
137
138 return IRQ_HANDLED;
139 }
140
tlmm_test_request_hard_irq(struct kunit * test,unsigned long irqflags)141 static void tlmm_test_request_hard_irq(struct kunit *test, unsigned long irqflags)
142 {
143 struct tlmm_test_priv *priv = test->priv;
144 int ret;
145
146 ret = request_irq(tlmm_suite.irq, tlmm_test_intr_fn, irqflags, test->name, priv);
147 KUNIT_EXPECT_EQ(test, ret, 0);
148 }
149
tlmm_test_request_threaded_irq(struct kunit * test,unsigned long irqflags)150 static void tlmm_test_request_threaded_irq(struct kunit *test, unsigned long irqflags)
151 {
152 struct tlmm_test_priv *priv = test->priv;
153 int ret;
154
155 ret = request_threaded_irq(tlmm_suite.irq,
156 tlmm_test_intr_fn, tlmm_test_intr_thread_fn,
157 irqflags, test->name, priv);
158
159 KUNIT_EXPECT_EQ(test, ret, 0);
160 }
161
tlmm_test_silent(struct kunit * test,unsigned long irqflags)162 static void tlmm_test_silent(struct kunit *test, unsigned long irqflags)
163 {
164 struct tlmm_test_priv *priv = test->priv;
165
166 priv->intr_op = TLMM_TEST_COUNT;
167
168 /* GPIO line at non-triggering level */
169 if (irqflags == IRQF_TRIGGER_LOW || irqflags == IRQF_TRIGGER_FALLING)
170 tlmm_output_high();
171 else
172 tlmm_output_low();
173
174 tlmm_test_request_hard_irq(test, irqflags);
175 msleep(100);
176 free_irq(tlmm_suite.irq, priv);
177
178 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 0);
179 }
180
181 /*
182 * Test that no RISING interrupts are triggered on a silent pin
183 */
tlmm_test_silent_rising(struct kunit * test)184 static void tlmm_test_silent_rising(struct kunit *test)
185 {
186 tlmm_test_silent(test, IRQF_TRIGGER_RISING);
187 }
188
189 /*
190 * Test that no FALLING interrupts are triggered on a silent pin
191 */
tlmm_test_silent_falling(struct kunit * test)192 static void tlmm_test_silent_falling(struct kunit *test)
193 {
194 tlmm_test_silent(test, IRQF_TRIGGER_FALLING);
195 }
196
197 /*
198 * Test that no LOW interrupts are triggered on a silent pin
199 */
tlmm_test_silent_low(struct kunit * test)200 static void tlmm_test_silent_low(struct kunit *test)
201 {
202 tlmm_test_silent(test, IRQF_TRIGGER_LOW);
203 }
204
205 /*
206 * Test that no HIGH interrupts are triggered on a silent pin
207 */
tlmm_test_silent_high(struct kunit * test)208 static void tlmm_test_silent_high(struct kunit *test)
209 {
210 tlmm_test_silent(test, IRQF_TRIGGER_HIGH);
211 }
212
213 /*
214 * Square wave with 10 high pulses, assert that we get 10 rising interrupts
215 */
tlmm_test_rising(struct kunit * test)216 static void tlmm_test_rising(struct kunit *test)
217 {
218 struct tlmm_test_priv *priv = test->priv;
219 int i;
220
221 priv->intr_op = TLMM_TEST_COUNT;
222
223 tlmm_output_low();
224
225 tlmm_test_request_hard_irq(test, IRQF_TRIGGER_RISING);
226 for (i = 0; i < 10; i++) {
227 tlmm_output_low();
228 msleep(20);
229 tlmm_output_high();
230 msleep(20);
231 }
232
233 free_irq(tlmm_suite.irq, priv);
234
235 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
236 }
237
238 /*
239 * Square wave with 10 low pulses, assert that we get 10 falling interrupts
240 */
tlmm_test_falling(struct kunit * test)241 static void tlmm_test_falling(struct kunit *test)
242 {
243 struct tlmm_test_priv *priv = test->priv;
244 int i;
245
246 priv->intr_op = TLMM_TEST_COUNT;
247
248 tlmm_output_high();
249
250 tlmm_test_request_hard_irq(test, IRQF_TRIGGER_FALLING);
251 for (i = 0; i < 10; i++) {
252 tlmm_output_high();
253 msleep(20);
254 tlmm_output_low();
255 msleep(20);
256 }
257 free_irq(tlmm_suite.irq, priv);
258
259 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
260 }
261
262 /*
263 * Drive line low 10 times, handler drives it high to "clear the interrupt
264 * source", assert we get 10 interrupts
265 */
tlmm_test_low(struct kunit * test)266 static void tlmm_test_low(struct kunit *test)
267 {
268 struct tlmm_test_priv *priv = test->priv;
269 int i;
270
271 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH;
272 atomic_set(&priv->intr_op_remain, 9);
273
274 tlmm_output_high();
275
276 tlmm_test_request_hard_irq(test, IRQF_TRIGGER_LOW);
277 for (i = 0; i < 10; i++) {
278 msleep(20);
279 tlmm_output_low();
280 }
281 msleep(100);
282 free_irq(tlmm_suite.irq, priv);
283
284 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
285 }
286
287 /*
288 * Drive line high 10 times, handler drives it low to "clear the interrupt
289 * source", assert we get 10 interrupts
290 */
tlmm_test_high(struct kunit * test)291 static void tlmm_test_high(struct kunit *test)
292 {
293 struct tlmm_test_priv *priv = test->priv;
294 int i;
295
296 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW;
297 atomic_set(&priv->intr_op_remain, 9);
298
299 tlmm_output_low();
300
301 tlmm_test_request_hard_irq(test, IRQF_TRIGGER_HIGH);
302 for (i = 0; i < 10; i++) {
303 msleep(20);
304 tlmm_output_high();
305 }
306 msleep(100);
307 free_irq(tlmm_suite.irq, priv);
308
309 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
310 }
311
312 /*
313 * Handler drives GPIO high to "clear the interrupt source", then low to
314 * simulate a new interrupt, repeated 10 times, assert we get 10 interrupts
315 */
tlmm_test_falling_in_handler(struct kunit * test)316 static void tlmm_test_falling_in_handler(struct kunit *test)
317 {
318 struct tlmm_test_priv *priv = test->priv;
319
320 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH | TLMM_TEST_THEN_LOW;
321 atomic_set(&priv->intr_op_remain, 10);
322
323 tlmm_output_high();
324
325 tlmm_test_request_hard_irq(test, IRQF_TRIGGER_FALLING);
326 msleep(20);
327 tlmm_output_low();
328 msleep(100);
329 free_irq(tlmm_suite.irq, priv);
330
331 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
332 }
333
334 /*
335 * Handler drives GPIO low to "clear the interrupt source", then high to
336 * simulate a new interrupt, repeated 10 times, assert we get 10 interrupts
337 */
tlmm_test_rising_in_handler(struct kunit * test)338 static void tlmm_test_rising_in_handler(struct kunit *test)
339 {
340 struct tlmm_test_priv *priv = test->priv;
341
342 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW | TLMM_TEST_THEN_HIGH;
343 atomic_set(&priv->intr_op_remain, 10);
344
345 tlmm_output_low();
346
347 tlmm_test_request_hard_irq(test, IRQF_TRIGGER_RISING);
348 msleep(20);
349 tlmm_output_high();
350 msleep(100);
351 free_irq(tlmm_suite.irq, priv);
352
353 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
354 }
355
356 /*
357 * Square wave with 10 high pulses, assert that we get 10 rising hard and
358 * 10 threaded interrupts
359 */
tlmm_test_thread_rising(struct kunit * test)360 static void tlmm_test_thread_rising(struct kunit *test)
361 {
362 struct tlmm_test_priv *priv = test->priv;
363 int i;
364
365 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
366 priv->thread_op = TLMM_TEST_COUNT;
367
368 tlmm_output_low();
369
370 tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_RISING);
371 for (i = 0; i < 10; i++) {
372 tlmm_output_low();
373 msleep(20);
374 tlmm_output_high();
375 msleep(20);
376 }
377 free_irq(tlmm_suite.irq, priv);
378
379 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
380 KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
381 }
382
383 /*
384 * Square wave with 10 low pulses, assert that we get 10 falling interrupts
385 */
tlmm_test_thread_falling(struct kunit * test)386 static void tlmm_test_thread_falling(struct kunit *test)
387 {
388 struct tlmm_test_priv *priv = test->priv;
389 int i;
390
391 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
392 priv->thread_op = TLMM_TEST_COUNT;
393
394 tlmm_output_high();
395
396 tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_FALLING);
397 for (i = 0; i < 10; i++) {
398 tlmm_output_high();
399 msleep(20);
400 tlmm_output_low();
401 msleep(20);
402 }
403 free_irq(tlmm_suite.irq, priv);
404
405 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
406 KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
407 }
408
409 /*
410 * Drive line high 10 times, threaded handler drives it low to "clear the
411 * interrupt source", assert we get 10 interrupts
412 */
tlmm_test_thread_high(struct kunit * test)413 static void tlmm_test_thread_high(struct kunit *test)
414 {
415 struct tlmm_test_priv *priv = test->priv;
416 int i;
417
418 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
419 priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW;
420
421 tlmm_output_low();
422
423 tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
424 for (i = 0; i < 10; i++) {
425 tlmm_output_high();
426 msleep(20);
427 }
428 free_irq(tlmm_suite.irq, priv);
429
430 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
431 KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
432 }
433
434 /*
435 * Drive line low 10 times, threaded handler drives it high to "clear the
436 * interrupt source", assert we get 10 interrupts
437 */
tlmm_test_thread_low(struct kunit * test)438 static void tlmm_test_thread_low(struct kunit *test)
439 {
440 struct tlmm_test_priv *priv = test->priv;
441 int i;
442
443 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
444 priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH;
445
446 tlmm_output_high();
447
448 tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
449 for (i = 0; i < 10; i++) {
450 tlmm_output_low();
451 msleep(20);
452 }
453 free_irq(tlmm_suite.irq, priv);
454
455 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
456 KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
457 }
458
459 /*
460 * Handler drives GPIO low to "clear the interrupt source", then high in the
461 * threaded handler to simulate a new interrupt, repeated 10 times, assert we
462 * get 10 interrupts
463 */
tlmm_test_thread_rising_in_handler(struct kunit * test)464 static void tlmm_test_thread_rising_in_handler(struct kunit *test)
465 {
466 struct tlmm_test_priv *priv = test->priv;
467
468 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW | TLMM_TEST_WAKE_THREAD;
469 priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_THEN_HIGH;
470 atomic_set(&priv->thread_op_remain, 10);
471
472 tlmm_output_low();
473
474 tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_RISING);
475 msleep(20);
476 tlmm_output_high();
477 msleep(100);
478 free_irq(tlmm_suite.irq, priv);
479
480 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
481 KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
482 }
483
484 /*
485 * Handler drives GPIO high to "clear the interrupt source", then low in the
486 * threaded handler to simulate a new interrupt, repeated 10 times, assert we
487 * get 10 interrupts
488 */
tlmm_test_thread_falling_in_handler(struct kunit * test)489 static void tlmm_test_thread_falling_in_handler(struct kunit *test)
490 {
491 struct tlmm_test_priv *priv = test->priv;
492
493 priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH | TLMM_TEST_WAKE_THREAD;
494 priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_THEN_LOW;
495 atomic_set(&priv->thread_op_remain, 10);
496
497 tlmm_output_high();
498
499 tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_FALLING);
500 msleep(20);
501 tlmm_output_low();
502 msleep(100);
503 free_irq(tlmm_suite.irq, priv);
504
505 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
506 KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
507 }
508
509 /*
510 * Validate that edge interrupts occurring while irq is disabled is delivered
511 * once the interrupt is reenabled.
512 */
tlmm_test_rising_while_disabled(struct kunit * test)513 static void tlmm_test_rising_while_disabled(struct kunit *test)
514 {
515 struct tlmm_test_priv *priv = test->priv;
516 unsigned int after_edge;
517 unsigned int before_edge;
518
519 priv->intr_op = TLMM_TEST_COUNT;
520 atomic_set(&priv->thread_op_remain, 10);
521
522 tlmm_output_low();
523
524 tlmm_test_request_hard_irq(test, IRQF_TRIGGER_RISING);
525 msleep(20);
526
527 disable_irq(tlmm_suite.irq);
528 before_edge = atomic_read(&priv->intr_count);
529
530 tlmm_output_high();
531 msleep(20);
532 after_edge = atomic_read(&priv->intr_count);
533
534 msleep(20);
535 enable_irq(tlmm_suite.irq);
536 msleep(20);
537
538 free_irq(tlmm_suite.irq, priv);
539
540 KUNIT_ASSERT_EQ(test, before_edge, 0);
541 KUNIT_ASSERT_EQ(test, after_edge, 0);
542 KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 1);
543 }
544
tlmm_test_init(struct kunit * test)545 static int tlmm_test_init(struct kunit *test)
546 {
547 struct tlmm_test_priv *priv;
548
549 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
550 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
551
552 atomic_set(&priv->intr_count, 0);
553 atomic_set(&priv->thread_count, 0);
554
555 atomic_set(&priv->intr_op_remain, 0);
556 atomic_set(&priv->thread_op_remain, 0);
557
558 test->priv = priv;
559
560 return 0;
561 }
562
563 /*
564 * NOTE: When adding compatibles to this list, ensure that TLMM_REG_SIZE and
565 * pull configuration values are supported and correct.
566 */
567 static const struct of_device_id tlmm_of_match[] = {
568 { .compatible = "qcom,sc8280xp-tlmm" },
569 { .compatible = "qcom,x1e80100-tlmm" },
570 {}
571 };
572
tlmm_test_init_suite(struct kunit_suite * suite)573 static int tlmm_test_init_suite(struct kunit_suite *suite)
574 {
575 struct of_phandle_args args = {};
576 struct resource res;
577 int ret;
578 u32 val;
579
580 if (tlmm_test_gpio < 0) {
581 pr_err("use the tlmm-test.gpio module parameter to specify which GPIO to use\n");
582 return -EINVAL;
583 }
584
585 struct device_node *tlmm __free(device_node) = of_find_matching_node(NULL, tlmm_of_match);
586 if (!tlmm) {
587 pr_err("failed to find tlmm node\n");
588 return -EINVAL;
589 }
590
591 ret = of_address_to_resource(tlmm, 0, &res);
592 if (ret < 0)
593 return ret;
594
595 tlmm_suite.base = ioremap(res.start, resource_size(&res));
596 if (!tlmm_suite.base)
597 return -ENOMEM;
598
599 args.np = tlmm;
600 args.args_count = 2;
601 args.args[0] = tlmm_test_gpio;
602 args.args[1] = 0;
603
604 tlmm_suite.irq = irq_create_of_mapping(&args);
605 if (!tlmm_suite.irq) {
606 pr_err("failed to map TLMM irq %d\n", args.args[0]);
607 goto err_unmap;
608 }
609
610 tlmm_suite.reg = tlmm_suite.base + tlmm_test_gpio * TLMM_REG_SIZE;
611 val = readl(tlmm_suite.reg) & ~MSM_PULL_MASK;
612 tlmm_suite.low_val = val | MSM_PULL_DOWN;
613 tlmm_suite.high_val = val | MSM_PULL_UP;
614
615 return 0;
616
617 err_unmap:
618 iounmap(tlmm_suite.base);
619 tlmm_suite.base = NULL;
620 return -EINVAL;
621 }
622
tlmm_test_exit_suite(struct kunit_suite * suite)623 static void tlmm_test_exit_suite(struct kunit_suite *suite)
624 {
625 irq_dispose_mapping(tlmm_suite.irq);
626 iounmap(tlmm_suite.base);
627
628 tlmm_suite.base = NULL;
629 tlmm_suite.irq = -1;
630 }
631
632 static struct kunit_case tlmm_test_cases[] = {
633 KUNIT_CASE(tlmm_test_silent_rising),
634 KUNIT_CASE(tlmm_test_silent_falling),
635 KUNIT_CASE(tlmm_test_silent_low),
636 KUNIT_CASE(tlmm_test_silent_high),
637 KUNIT_CASE(tlmm_test_rising),
638 KUNIT_CASE(tlmm_test_falling),
639 KUNIT_CASE(tlmm_test_high),
640 KUNIT_CASE(tlmm_test_low),
641 KUNIT_CASE(tlmm_test_rising_in_handler),
642 KUNIT_CASE(tlmm_test_falling_in_handler),
643 KUNIT_CASE(tlmm_test_thread_rising),
644 KUNIT_CASE(tlmm_test_thread_falling),
645 KUNIT_CASE(tlmm_test_thread_high),
646 KUNIT_CASE(tlmm_test_thread_low),
647 KUNIT_CASE(tlmm_test_thread_rising_in_handler),
648 KUNIT_CASE(tlmm_test_thread_falling_in_handler),
649 KUNIT_CASE(tlmm_test_rising_while_disabled),
650 {}
651 };
652
653 static struct kunit_suite tlmm_test_suite = {
654 .name = "tlmm-test",
655 .init = tlmm_test_init,
656 .suite_init = tlmm_test_init_suite,
657 .suite_exit = tlmm_test_exit_suite,
658 .test_cases = tlmm_test_cases,
659 };
660
661 kunit_test_suites(&tlmm_test_suite);
662
663 MODULE_DESCRIPTION("Qualcomm TLMM test");
664 MODULE_LICENSE("GPL");
665