xref: /linux/drivers/pinctrl/qcom/tlmm-test.c (revision 186f3edfdd41f2ae87fc40a9ccba52a3bf930994)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #define pr_fmt(fmt) "tlmm-test: " fmt
7 
8 #include <kunit/test.h>
9 #include <linux/delay.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_irq.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 
21 /*
22  * This TLMM test module serves the purpose of validating that the TLMM driver
23  * (pinctrl-msm) delivers expected number of interrupts in response to changing
24  * GPIO state.
25  *
26  * To achieve this without external equipment the test takes a module parameter
27  * "gpio", which the tester is expected to specify an unused and non-connected
28  * pin. The GPIO state is then driven by adjusting the bias of the pin, at
29  * suitable times through the different test cases.
30  *
31  * Upon execution, the test initialization will find the TLMM node (subject to
32  * tlmm_of_match[] allow listing) and create the necessary references
33  * dynamically, rather then relying on e.g. Devicetree and phandles.
34  */
35 
36 #define MSM_PULL_MASK		GENMASK(2, 0)
37 #define MSM_PULL_DOWN		1
38 #define MSM_PULL_UP		3
39 #define TLMM_REG_SIZE		0x1000
40 
41 static int tlmm_test_gpio = -1;
42 static char *tlmm_reg_name = "default_region";
43 
44 module_param_named(gpio, tlmm_test_gpio, int, 0600);
45 module_param_named(name, tlmm_reg_name, charp, 0600);
46 
47 static struct {
48 	void __iomem *base;
49 	void __iomem *reg;
50 	int irq;
51 
52 	u32 low_val;
53 	u32 high_val;
54 } tlmm_suite;
55 
56 /**
57  * struct tlmm_test_priv - Per-test context
58  * @intr_count:		number of times hard handler was hit with TLMM_TEST_COUNT op set
59  * @thread_count:	number of times thread handler was hit with TLMM_TEST_COUNT op set
60  * @intr_op:		operations to be performed by the hard IRQ handler
61  * @intr_op_remain:	number of times the TLMM_TEST_THEN_* operations should be
62  *			performed by the hard IRQ handler
63  * @thread_op:		operations to be performed by the threaded IRQ handler
64  * @thread_op_remain:	number of times the TLMM_TEST_THEN_* operations should
65  *			be performed by the threaded IRQ handler
66  */
67 struct tlmm_test_priv {
68 	atomic_t intr_count;
69 	atomic_t thread_count;
70 
71 	unsigned int intr_op;
72 	atomic_t intr_op_remain;
73 
74 	unsigned int thread_op;
75 	atomic_t thread_op_remain;
76 };
77 
78 /* Operation masks for @intr_op and @thread_op */
79 #define TLMM_TEST_COUNT		BIT(0)
80 #define TLMM_TEST_OUTPUT_LOW	BIT(1)
81 #define TLMM_TEST_OUTPUT_HIGH	BIT(2)
82 #define TLMM_TEST_THEN_HIGH	BIT(3)
83 #define TLMM_TEST_THEN_LOW	BIT(4)
84 #define TLMM_TEST_WAKE_THREAD	BIT(5)
85 
tlmm_output_low(void)86 static void tlmm_output_low(void)
87 {
88 	writel(tlmm_suite.low_val, tlmm_suite.reg);
89 	readl(tlmm_suite.reg);
90 }
91 
tlmm_output_high(void)92 static void tlmm_output_high(void)
93 {
94 	writel(tlmm_suite.high_val, tlmm_suite.reg);
95 	readl(tlmm_suite.reg);
96 }
97 
tlmm_test_intr_fn(int irq,void * dev_id)98 static irqreturn_t tlmm_test_intr_fn(int irq, void *dev_id)
99 {
100 	struct tlmm_test_priv *priv = dev_id;
101 
102 	if (priv->intr_op & TLMM_TEST_COUNT)
103 		atomic_inc(&priv->intr_count);
104 
105 	if (priv->intr_op & TLMM_TEST_OUTPUT_LOW)
106 		tlmm_output_low();
107 	if (priv->intr_op & TLMM_TEST_OUTPUT_HIGH)
108 		tlmm_output_high();
109 
110 	if (atomic_dec_if_positive(&priv->intr_op_remain) > 0) {
111 		udelay(1);
112 
113 		if (priv->intr_op & TLMM_TEST_THEN_LOW)
114 			tlmm_output_low();
115 		if (priv->intr_op & TLMM_TEST_THEN_HIGH)
116 			tlmm_output_high();
117 	}
118 
119 	return priv->intr_op & TLMM_TEST_WAKE_THREAD ? IRQ_WAKE_THREAD : IRQ_HANDLED;
120 }
121 
tlmm_test_intr_thread_fn(int irq,void * dev_id)122 static irqreturn_t tlmm_test_intr_thread_fn(int irq, void *dev_id)
123 {
124 	struct tlmm_test_priv *priv = dev_id;
125 
126 	if (priv->thread_op & TLMM_TEST_COUNT)
127 		atomic_inc(&priv->thread_count);
128 
129 	if (priv->thread_op & TLMM_TEST_OUTPUT_LOW)
130 		tlmm_output_low();
131 	if (priv->thread_op & TLMM_TEST_OUTPUT_HIGH)
132 		tlmm_output_high();
133 
134 	if (atomic_dec_if_positive(&priv->thread_op_remain) > 0) {
135 		udelay(1);
136 		if (priv->thread_op & TLMM_TEST_THEN_LOW)
137 			tlmm_output_low();
138 		if (priv->thread_op & TLMM_TEST_THEN_HIGH)
139 			tlmm_output_high();
140 	}
141 
142 	return IRQ_HANDLED;
143 }
144 
tlmm_test_request_hard_irq(struct kunit * test,unsigned long irqflags)145 static void tlmm_test_request_hard_irq(struct kunit *test, unsigned long irqflags)
146 {
147 	struct tlmm_test_priv *priv = test->priv;
148 	int ret;
149 
150 	ret = request_irq(tlmm_suite.irq, tlmm_test_intr_fn, irqflags, test->name, priv);
151 	KUNIT_EXPECT_EQ(test, ret, 0);
152 }
153 
tlmm_test_request_threaded_irq(struct kunit * test,unsigned long irqflags)154 static void tlmm_test_request_threaded_irq(struct kunit *test, unsigned long irqflags)
155 {
156 	struct tlmm_test_priv *priv = test->priv;
157 	int ret;
158 
159 	ret = request_threaded_irq(tlmm_suite.irq,
160 				   tlmm_test_intr_fn, tlmm_test_intr_thread_fn,
161 				   irqflags, test->name, priv);
162 
163 	KUNIT_EXPECT_EQ(test, ret, 0);
164 }
165 
tlmm_test_silent(struct kunit * test,unsigned long irqflags)166 static void tlmm_test_silent(struct kunit *test, unsigned long irqflags)
167 {
168 	struct tlmm_test_priv *priv = test->priv;
169 
170 	priv->intr_op = TLMM_TEST_COUNT;
171 
172 	/* GPIO line at non-triggering level */
173 	if (irqflags == IRQF_TRIGGER_LOW || irqflags == IRQF_TRIGGER_FALLING)
174 		tlmm_output_high();
175 	else
176 		tlmm_output_low();
177 
178 	tlmm_test_request_hard_irq(test, irqflags);
179 	msleep(100);
180 	free_irq(tlmm_suite.irq, priv);
181 
182 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 0);
183 }
184 
185 /*
186  * Test that no RISING interrupts are triggered on a silent pin
187  */
tlmm_test_silent_rising(struct kunit * test)188 static void tlmm_test_silent_rising(struct kunit *test)
189 {
190 	tlmm_test_silent(test, IRQF_TRIGGER_RISING);
191 }
192 
193 /*
194  * Test that no FALLING interrupts are triggered on a silent pin
195  */
tlmm_test_silent_falling(struct kunit * test)196 static void tlmm_test_silent_falling(struct kunit *test)
197 {
198 	tlmm_test_silent(test, IRQF_TRIGGER_FALLING);
199 }
200 
201 /*
202  * Test that no LOW interrupts are triggered on a silent pin
203  */
tlmm_test_silent_low(struct kunit * test)204 static void tlmm_test_silent_low(struct kunit *test)
205 {
206 	tlmm_test_silent(test, IRQF_TRIGGER_LOW);
207 }
208 
209 /*
210  * Test that no HIGH interrupts are triggered on a silent pin
211  */
tlmm_test_silent_high(struct kunit * test)212 static void tlmm_test_silent_high(struct kunit *test)
213 {
214 	tlmm_test_silent(test, IRQF_TRIGGER_HIGH);
215 }
216 
217 /*
218  * Square wave with 10 high pulses, assert that we get 10 rising interrupts
219  */
tlmm_test_rising(struct kunit * test)220 static void tlmm_test_rising(struct kunit *test)
221 {
222 	struct tlmm_test_priv *priv = test->priv;
223 	int i;
224 
225 	priv->intr_op = TLMM_TEST_COUNT;
226 
227 	tlmm_output_low();
228 
229 	tlmm_test_request_hard_irq(test, IRQF_TRIGGER_RISING);
230 	for (i = 0; i < 10; i++) {
231 		tlmm_output_low();
232 		msleep(20);
233 		tlmm_output_high();
234 		msleep(20);
235 	}
236 
237 	free_irq(tlmm_suite.irq, priv);
238 
239 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
240 }
241 
242 /*
243  * Square wave with 10 low pulses, assert that we get 10 falling interrupts
244  */
tlmm_test_falling(struct kunit * test)245 static void tlmm_test_falling(struct kunit *test)
246 {
247 	struct tlmm_test_priv *priv = test->priv;
248 	int i;
249 
250 	priv->intr_op = TLMM_TEST_COUNT;
251 
252 	tlmm_output_high();
253 
254 	tlmm_test_request_hard_irq(test, IRQF_TRIGGER_FALLING);
255 	for (i = 0; i < 10; i++) {
256 		tlmm_output_high();
257 		msleep(20);
258 		tlmm_output_low();
259 		msleep(20);
260 	}
261 	free_irq(tlmm_suite.irq, priv);
262 
263 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
264 }
265 
266 /*
267  * Drive line low 10 times, handler drives it high to "clear the interrupt
268  * source", assert we get 10 interrupts
269  */
tlmm_test_low(struct kunit * test)270 static void tlmm_test_low(struct kunit *test)
271 {
272 	struct tlmm_test_priv *priv = test->priv;
273 	int i;
274 
275 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH;
276 	atomic_set(&priv->intr_op_remain, 9);
277 
278 	tlmm_output_high();
279 
280 	tlmm_test_request_hard_irq(test, IRQF_TRIGGER_LOW);
281 	for (i = 0; i < 10; i++) {
282 		msleep(20);
283 		tlmm_output_low();
284 	}
285 	msleep(100);
286 	free_irq(tlmm_suite.irq, priv);
287 
288 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
289 }
290 
291 /*
292  * Drive line high 10 times, handler drives it low to "clear the interrupt
293  * source", assert we get 10 interrupts
294  */
tlmm_test_high(struct kunit * test)295 static void tlmm_test_high(struct kunit *test)
296 {
297 	struct tlmm_test_priv *priv = test->priv;
298 	int i;
299 
300 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW;
301 	atomic_set(&priv->intr_op_remain, 9);
302 
303 	tlmm_output_low();
304 
305 	tlmm_test_request_hard_irq(test, IRQF_TRIGGER_HIGH);
306 	for (i = 0; i < 10; i++) {
307 		msleep(20);
308 		tlmm_output_high();
309 	}
310 	msleep(100);
311 	free_irq(tlmm_suite.irq, priv);
312 
313 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
314 }
315 
316 /*
317  * Handler drives GPIO high to "clear the interrupt source", then low to
318  * simulate a new interrupt, repeated 10 times, assert we get 10 interrupts
319  */
tlmm_test_falling_in_handler(struct kunit * test)320 static void tlmm_test_falling_in_handler(struct kunit *test)
321 {
322 	struct tlmm_test_priv *priv = test->priv;
323 
324 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH | TLMM_TEST_THEN_LOW;
325 	atomic_set(&priv->intr_op_remain, 10);
326 
327 	tlmm_output_high();
328 
329 	tlmm_test_request_hard_irq(test, IRQF_TRIGGER_FALLING);
330 	msleep(20);
331 	tlmm_output_low();
332 	msleep(100);
333 	free_irq(tlmm_suite.irq, priv);
334 
335 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
336 }
337 
338 /*
339  * Handler drives GPIO low to "clear the interrupt source", then high to
340  * simulate a new interrupt, repeated 10 times, assert we get 10 interrupts
341  */
tlmm_test_rising_in_handler(struct kunit * test)342 static void tlmm_test_rising_in_handler(struct kunit *test)
343 {
344 	struct tlmm_test_priv *priv = test->priv;
345 
346 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW | TLMM_TEST_THEN_HIGH;
347 	atomic_set(&priv->intr_op_remain, 10);
348 
349 	tlmm_output_low();
350 
351 	tlmm_test_request_hard_irq(test, IRQF_TRIGGER_RISING);
352 	msleep(20);
353 	tlmm_output_high();
354 	msleep(100);
355 	free_irq(tlmm_suite.irq, priv);
356 
357 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
358 }
359 
360 /*
361  * Square wave with 10 high pulses, assert that we get 10 rising hard and
362  * 10 threaded interrupts
363  */
tlmm_test_thread_rising(struct kunit * test)364 static void tlmm_test_thread_rising(struct kunit *test)
365 {
366 	struct tlmm_test_priv *priv = test->priv;
367 	int i;
368 
369 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
370 	priv->thread_op = TLMM_TEST_COUNT;
371 
372 	tlmm_output_low();
373 
374 	tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_RISING);
375 	for (i = 0; i < 10; i++) {
376 		tlmm_output_low();
377 		msleep(20);
378 		tlmm_output_high();
379 		msleep(20);
380 	}
381 	free_irq(tlmm_suite.irq, priv);
382 
383 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
384 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
385 }
386 
387 /*
388  * Square wave with 10 low pulses, assert that we get 10 falling interrupts
389  */
tlmm_test_thread_falling(struct kunit * test)390 static void tlmm_test_thread_falling(struct kunit *test)
391 {
392 	struct tlmm_test_priv *priv = test->priv;
393 	int i;
394 
395 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
396 	priv->thread_op = TLMM_TEST_COUNT;
397 
398 	tlmm_output_high();
399 
400 	tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_FALLING);
401 	for (i = 0; i < 10; i++) {
402 		tlmm_output_high();
403 		msleep(20);
404 		tlmm_output_low();
405 		msleep(20);
406 	}
407 	free_irq(tlmm_suite.irq, priv);
408 
409 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
410 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
411 }
412 
413 /*
414  * Drive line high 10 times, threaded handler drives it low to "clear the
415  * interrupt source", assert we get 10 interrupts
416  */
tlmm_test_thread_high(struct kunit * test)417 static void tlmm_test_thread_high(struct kunit *test)
418 {
419 	struct tlmm_test_priv *priv = test->priv;
420 	int i;
421 
422 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
423 	priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW;
424 
425 	tlmm_output_low();
426 
427 	tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
428 	for (i = 0; i < 10; i++) {
429 		tlmm_output_high();
430 		msleep(20);
431 	}
432 	free_irq(tlmm_suite.irq, priv);
433 
434 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
435 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
436 }
437 
438 /*
439  * Drive line low 10 times, threaded handler drives it high to "clear the
440  * interrupt source", assert we get 10 interrupts
441  */
tlmm_test_thread_low(struct kunit * test)442 static void tlmm_test_thread_low(struct kunit *test)
443 {
444 	struct tlmm_test_priv *priv = test->priv;
445 	int i;
446 
447 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_WAKE_THREAD;
448 	priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH;
449 
450 	tlmm_output_high();
451 
452 	tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
453 	for (i = 0; i < 10; i++) {
454 		tlmm_output_low();
455 		msleep(20);
456 	}
457 	free_irq(tlmm_suite.irq, priv);
458 
459 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
460 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
461 }
462 
463 /*
464  * Handler drives GPIO low to "clear the interrupt source", then high in the
465  * threaded handler to simulate a new interrupt, repeated 10 times, assert we
466  * get 10 interrupts
467  */
tlmm_test_thread_rising_in_handler(struct kunit * test)468 static void tlmm_test_thread_rising_in_handler(struct kunit *test)
469 {
470 	struct tlmm_test_priv *priv = test->priv;
471 
472 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_LOW | TLMM_TEST_WAKE_THREAD;
473 	priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_THEN_HIGH;
474 	atomic_set(&priv->thread_op_remain, 10);
475 
476 	tlmm_output_low();
477 
478 	tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_RISING);
479 	msleep(20);
480 	tlmm_output_high();
481 	msleep(100);
482 	free_irq(tlmm_suite.irq, priv);
483 
484 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
485 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
486 }
487 
488 /*
489  * Handler drives GPIO high to "clear the interrupt source", then low in the
490  * threaded handler to simulate a new interrupt, repeated 10 times, assert we
491  * get 10 interrupts
492  */
tlmm_test_thread_falling_in_handler(struct kunit * test)493 static void tlmm_test_thread_falling_in_handler(struct kunit *test)
494 {
495 	struct tlmm_test_priv *priv = test->priv;
496 
497 	priv->intr_op = TLMM_TEST_COUNT | TLMM_TEST_OUTPUT_HIGH | TLMM_TEST_WAKE_THREAD;
498 	priv->thread_op = TLMM_TEST_COUNT | TLMM_TEST_THEN_LOW;
499 	atomic_set(&priv->thread_op_remain, 10);
500 
501 	tlmm_output_high();
502 
503 	tlmm_test_request_threaded_irq(test, IRQF_TRIGGER_FALLING);
504 	msleep(20);
505 	tlmm_output_low();
506 	msleep(100);
507 	free_irq(tlmm_suite.irq, priv);
508 
509 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 10);
510 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->thread_count), 10);
511 }
512 
513 /*
514  * Validate that edge interrupts occurring while irq is disabled is delivered
515  * once the interrupt is reenabled.
516  */
tlmm_test_rising_while_disabled(struct kunit * test)517 static void tlmm_test_rising_while_disabled(struct kunit *test)
518 {
519 	struct tlmm_test_priv *priv = test->priv;
520 	unsigned int after_edge;
521 	unsigned int before_edge;
522 
523 	priv->intr_op = TLMM_TEST_COUNT;
524 	atomic_set(&priv->thread_op_remain, 10);
525 
526 	tlmm_output_low();
527 
528 	tlmm_test_request_hard_irq(test, IRQF_TRIGGER_RISING);
529 	msleep(20);
530 
531 	disable_irq(tlmm_suite.irq);
532 	before_edge = atomic_read(&priv->intr_count);
533 
534 	tlmm_output_high();
535 	msleep(20);
536 	after_edge = atomic_read(&priv->intr_count);
537 
538 	msleep(20);
539 	enable_irq(tlmm_suite.irq);
540 	msleep(20);
541 
542 	free_irq(tlmm_suite.irq, priv);
543 
544 	KUNIT_ASSERT_EQ(test, before_edge, 0);
545 	KUNIT_ASSERT_EQ(test, after_edge, 0);
546 	KUNIT_ASSERT_EQ(test, atomic_read(&priv->intr_count), 1);
547 }
548 
tlmm_test_init(struct kunit * test)549 static int tlmm_test_init(struct kunit *test)
550 {
551 	struct tlmm_test_priv *priv;
552 
553 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
554 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
555 
556 	atomic_set(&priv->intr_count, 0);
557 	atomic_set(&priv->thread_count, 0);
558 
559 	atomic_set(&priv->intr_op_remain, 0);
560 	atomic_set(&priv->thread_op_remain, 0);
561 
562 	test->priv = priv;
563 
564 	return 0;
565 }
566 
567 /*
568  * NOTE: When adding compatibles to this list, ensure that TLMM_REG_SIZE and
569  * pull configuration values are supported and correct.
570  */
571 static const struct of_device_id tlmm_of_match[] = {
572 	{ .compatible = "qcom,sc8280xp-tlmm" },
573 	{ .compatible = "qcom,x1e80100-tlmm" },
574 	{}
575 };
576 
tlmm_reg_base(struct device_node * tlmm,struct resource * res)577 static int tlmm_reg_base(struct device_node *tlmm, struct resource *res)
578 {
579 	const char **reg_names;
580 	int count;
581 	int ret;
582 	int i;
583 
584 	count = of_property_count_strings(tlmm, "reg-names");
585 	if (count <= 0) {
586 		pr_err("failed to find tlmm reg name\n");
587 		return count;
588 	}
589 
590 	reg_names = kcalloc(count, sizeof(char *), GFP_KERNEL);
591 	if (!reg_names)
592 		return -ENOMEM;
593 
594 	ret = of_property_read_string_array(tlmm, "reg-names", reg_names, count);
595 	if (ret != count) {
596 		kfree(reg_names);
597 		return -EINVAL;
598 	}
599 
600 	if (!strcmp(tlmm_reg_name, "default_region")) {
601 		ret = of_address_to_resource(tlmm, 0, res);
602 	} else {
603 		for (i = 0; i < count; i++) {
604 			if (!strcmp(reg_names[i], tlmm_reg_name)) {
605 				ret = of_address_to_resource(tlmm, i, res);
606 				break;
607 			}
608 		}
609 		if (i == count)
610 			ret = -EINVAL;
611 	}
612 
613 	kfree(reg_names);
614 
615 	return ret;
616 }
617 
tlmm_test_init_suite(struct kunit_suite * suite)618 static int tlmm_test_init_suite(struct kunit_suite *suite)
619 {
620 	struct of_phandle_args args = {};
621 	struct resource res;
622 	int ret;
623 	u32 val;
624 
625 	if (tlmm_test_gpio < 0) {
626 		pr_err("use the tlmm-test.gpio module parameter to specify which GPIO to use\n");
627 		return -EINVAL;
628 	}
629 
630 	struct device_node *tlmm __free(device_node) = of_find_matching_node(NULL, tlmm_of_match);
631 	if (!tlmm) {
632 		pr_err("failed to find tlmm node\n");
633 		return -EINVAL;
634 	}
635 
636 	ret = tlmm_reg_base(tlmm, &res);
637 	if (ret < 0)
638 		return ret;
639 
640 	tlmm_suite.base = ioremap(res.start, resource_size(&res));
641 	if (!tlmm_suite.base)
642 		return -ENOMEM;
643 
644 	args.np = tlmm;
645 	args.args_count = 2;
646 	args.args[0] = tlmm_test_gpio;
647 	args.args[1] = 0;
648 
649 	tlmm_suite.irq = irq_create_of_mapping(&args);
650 	if (!tlmm_suite.irq) {
651 		pr_err("failed to map TLMM irq %d\n", args.args[0]);
652 		goto err_unmap;
653 	}
654 
655 	tlmm_suite.reg = tlmm_suite.base + tlmm_test_gpio * TLMM_REG_SIZE;
656 	val = readl(tlmm_suite.reg) & ~MSM_PULL_MASK;
657 	tlmm_suite.low_val = val | MSM_PULL_DOWN;
658 	tlmm_suite.high_val = val | MSM_PULL_UP;
659 
660 	return 0;
661 
662 err_unmap:
663 	iounmap(tlmm_suite.base);
664 	tlmm_suite.base = NULL;
665 	return -EINVAL;
666 }
667 
tlmm_test_exit_suite(struct kunit_suite * suite)668 static void tlmm_test_exit_suite(struct kunit_suite *suite)
669 {
670 	irq_dispose_mapping(tlmm_suite.irq);
671 	iounmap(tlmm_suite.base);
672 
673 	tlmm_suite.base = NULL;
674 	tlmm_suite.irq = -1;
675 }
676 
677 static struct kunit_case tlmm_test_cases[] = {
678 	KUNIT_CASE(tlmm_test_silent_rising),
679 	KUNIT_CASE(tlmm_test_silent_falling),
680 	KUNIT_CASE(tlmm_test_silent_low),
681 	KUNIT_CASE(tlmm_test_silent_high),
682 	KUNIT_CASE(tlmm_test_rising),
683 	KUNIT_CASE(tlmm_test_falling),
684 	KUNIT_CASE(tlmm_test_high),
685 	KUNIT_CASE(tlmm_test_low),
686 	KUNIT_CASE(tlmm_test_rising_in_handler),
687 	KUNIT_CASE(tlmm_test_falling_in_handler),
688 	KUNIT_CASE(tlmm_test_thread_rising),
689 	KUNIT_CASE(tlmm_test_thread_falling),
690 	KUNIT_CASE(tlmm_test_thread_high),
691 	KUNIT_CASE(tlmm_test_thread_low),
692 	KUNIT_CASE(tlmm_test_thread_rising_in_handler),
693 	KUNIT_CASE(tlmm_test_thread_falling_in_handler),
694 	KUNIT_CASE(tlmm_test_rising_while_disabled),
695 	{}
696 };
697 
698 static struct kunit_suite tlmm_test_suite = {
699 	.name = "tlmm-test",
700 	.init = tlmm_test_init,
701 	.suite_init = tlmm_test_init_suite,
702 	.suite_exit = tlmm_test_exit_suite,
703 	.test_cases = tlmm_test_cases,
704 };
705 
706 kunit_test_suites(&tlmm_test_suite);
707 
708 MODULE_DESCRIPTION("Qualcomm TLMM test");
709 MODULE_LICENSE("GPL");
710