xref: /linux/drivers/clk/clk_test.c (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kunit tests for clk framework
4  */
5 #include <linux/clk.h>
6 #include <linux/clk-provider.h>
7 #include <linux/of.h>
8 #include <linux/platform_device.h>
9 
10 /* Needed for clk_hw_get_clk() */
11 #include "clk.h"
12 
13 #include <kunit/clk.h>
14 #include <kunit/of.h>
15 #include <kunit/platform_device.h>
16 #include <kunit/test.h>
17 
18 #include "clk_parent_data_test.h"
19 
20 static const struct clk_ops empty_clk_ops = { };
21 
22 #define DUMMY_CLOCK_INIT_RATE	(42 * 1000 * 1000)
23 #define DUMMY_CLOCK_RATE_1	(142 * 1000 * 1000)
24 #define DUMMY_CLOCK_RATE_2	(242 * 1000 * 1000)
25 
26 struct clk_dummy_context {
27 	struct clk_hw hw;
28 	unsigned long rate;
29 };
30 
31 static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
32 					   unsigned long parent_rate)
33 {
34 	struct clk_dummy_context *ctx =
35 		container_of(hw, struct clk_dummy_context, hw);
36 
37 	return ctx->rate;
38 }
39 
40 static int clk_dummy_determine_rate(struct clk_hw *hw,
41 				    struct clk_rate_request *req)
42 {
43 	/* Just return the same rate without modifying it */
44 	return 0;
45 }
46 
47 static int clk_dummy_maximize_rate(struct clk_hw *hw,
48 				   struct clk_rate_request *req)
49 {
50 	/*
51 	 * If there's a maximum set, always run the clock at the maximum
52 	 * allowed.
53 	 */
54 	if (req->max_rate < ULONG_MAX)
55 		req->rate = req->max_rate;
56 
57 	return 0;
58 }
59 
60 static int clk_dummy_minimize_rate(struct clk_hw *hw,
61 				   struct clk_rate_request *req)
62 {
63 	/*
64 	 * If there's a minimum set, always run the clock at the minimum
65 	 * allowed.
66 	 */
67 	if (req->min_rate > 0)
68 		req->rate = req->min_rate;
69 
70 	return 0;
71 }
72 
73 static int clk_dummy_set_rate(struct clk_hw *hw,
74 			      unsigned long rate,
75 			      unsigned long parent_rate)
76 {
77 	struct clk_dummy_context *ctx =
78 		container_of(hw, struct clk_dummy_context, hw);
79 
80 	ctx->rate = rate;
81 	return 0;
82 }
83 
84 static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
85 {
86 	if (index >= clk_hw_get_num_parents(hw))
87 		return -EINVAL;
88 
89 	return 0;
90 }
91 
92 static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
93 {
94 	return 0;
95 }
96 
97 static const struct clk_ops clk_dummy_rate_ops = {
98 	.recalc_rate = clk_dummy_recalc_rate,
99 	.determine_rate = clk_dummy_determine_rate,
100 	.set_rate = clk_dummy_set_rate,
101 };
102 
103 static const struct clk_ops clk_dummy_maximize_rate_ops = {
104 	.recalc_rate = clk_dummy_recalc_rate,
105 	.determine_rate = clk_dummy_maximize_rate,
106 	.set_rate = clk_dummy_set_rate,
107 };
108 
109 static const struct clk_ops clk_dummy_minimize_rate_ops = {
110 	.recalc_rate = clk_dummy_recalc_rate,
111 	.determine_rate = clk_dummy_minimize_rate,
112 	.set_rate = clk_dummy_set_rate,
113 };
114 
115 static const struct clk_ops clk_dummy_single_parent_ops = {
116 	/*
117 	 * FIXME: Even though we should probably be able to use
118 	 * __clk_mux_determine_rate() here, if we use it and call
119 	 * clk_round_rate() or clk_set_rate() with a rate lower than
120 	 * what all the parents can provide, it will return -EINVAL.
121 	 *
122 	 * This is due to the fact that it has the undocumented
123 	 * behaviour to always pick up the closest rate higher than the
124 	 * requested rate. If we get something lower, it thus considers
125 	 * that it's not acceptable and will return an error.
126 	 *
127 	 * It's somewhat inconsistent and creates a weird threshold
128 	 * between rates above the parent rate which would be rounded to
129 	 * what the parent can provide, but rates below will simply
130 	 * return an error.
131 	 */
132 	.determine_rate = __clk_mux_determine_rate_closest,
133 	.set_parent = clk_dummy_single_set_parent,
134 	.get_parent = clk_dummy_single_get_parent,
135 };
136 
137 struct clk_multiple_parent_ctx {
138 	struct clk_dummy_context parents_ctx[2];
139 	struct clk_hw hw;
140 	u8 current_parent;
141 };
142 
143 static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
144 {
145 	struct clk_multiple_parent_ctx *ctx =
146 		container_of(hw, struct clk_multiple_parent_ctx, hw);
147 
148 	if (index >= clk_hw_get_num_parents(hw))
149 		return -EINVAL;
150 
151 	ctx->current_parent = index;
152 
153 	return 0;
154 }
155 
156 static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
157 {
158 	struct clk_multiple_parent_ctx *ctx =
159 		container_of(hw, struct clk_multiple_parent_ctx, hw);
160 
161 	return ctx->current_parent;
162 }
163 
164 static const struct clk_ops clk_multiple_parents_mux_ops = {
165 	.get_parent = clk_multiple_parents_mux_get_parent,
166 	.set_parent = clk_multiple_parents_mux_set_parent,
167 	.determine_rate = __clk_mux_determine_rate_closest,
168 };
169 
170 static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
171 	.determine_rate = clk_hw_determine_rate_no_reparent,
172 	.get_parent = clk_multiple_parents_mux_get_parent,
173 	.set_parent = clk_multiple_parents_mux_set_parent,
174 };
175 
176 static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
177 {
178 	struct clk_dummy_context *ctx;
179 	struct clk_init_data init = { };
180 	int ret;
181 
182 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
183 	if (!ctx)
184 		return -ENOMEM;
185 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
186 	test->priv = ctx;
187 
188 	init.name = "test_dummy_rate";
189 	init.ops = ops;
190 	ctx->hw.init = &init;
191 
192 	ret = clk_hw_register(NULL, &ctx->hw);
193 	if (ret)
194 		return ret;
195 
196 	return 0;
197 }
198 
199 static int clk_test_init(struct kunit *test)
200 {
201 	return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
202 }
203 
204 static int clk_maximize_test_init(struct kunit *test)
205 {
206 	return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
207 }
208 
209 static int clk_minimize_test_init(struct kunit *test)
210 {
211 	return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
212 }
213 
214 static void clk_test_exit(struct kunit *test)
215 {
216 	struct clk_dummy_context *ctx = test->priv;
217 
218 	clk_hw_unregister(&ctx->hw);
219 }
220 
221 /*
222  * Test that the actual rate matches what is returned by clk_get_rate()
223  */
224 static void clk_test_get_rate(struct kunit *test)
225 {
226 	struct clk_dummy_context *ctx = test->priv;
227 	struct clk_hw *hw = &ctx->hw;
228 	struct clk *clk = clk_hw_get_clk(hw, NULL);
229 	unsigned long rate;
230 
231 	rate = clk_get_rate(clk);
232 	KUNIT_ASSERT_GT(test, rate, 0);
233 	KUNIT_EXPECT_EQ(test, rate, ctx->rate);
234 
235 	clk_put(clk);
236 }
237 
238 /*
239  * Test that, after a call to clk_set_rate(), the rate returned by
240  * clk_get_rate() matches.
241  *
242  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
243  * modify the requested rate, which is our case in clk_dummy_rate_ops.
244  */
245 static void clk_test_set_get_rate(struct kunit *test)
246 {
247 	struct clk_dummy_context *ctx = test->priv;
248 	struct clk_hw *hw = &ctx->hw;
249 	struct clk *clk = clk_hw_get_clk(hw, NULL);
250 	unsigned long rate;
251 
252 	KUNIT_ASSERT_EQ(test,
253 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
254 			0);
255 
256 	rate = clk_get_rate(clk);
257 	KUNIT_ASSERT_GT(test, rate, 0);
258 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
259 
260 	clk_put(clk);
261 }
262 
263 /*
264  * Test that, after several calls to clk_set_rate(), the rate returned
265  * by clk_get_rate() matches the last one.
266  *
267  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
268  * modify the requested rate, which is our case in clk_dummy_rate_ops.
269  */
270 static void clk_test_set_set_get_rate(struct kunit *test)
271 {
272 	struct clk_dummy_context *ctx = test->priv;
273 	struct clk_hw *hw = &ctx->hw;
274 	struct clk *clk = clk_hw_get_clk(hw, NULL);
275 	unsigned long rate;
276 
277 	KUNIT_ASSERT_EQ(test,
278 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
279 			0);
280 
281 	KUNIT_ASSERT_EQ(test,
282 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
283 			0);
284 
285 	rate = clk_get_rate(clk);
286 	KUNIT_ASSERT_GT(test, rate, 0);
287 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
288 
289 	clk_put(clk);
290 }
291 
292 /*
293  * Test that clk_round_rate and clk_set_rate are consitent and will
294  * return the same frequency.
295  */
296 static void clk_test_round_set_get_rate(struct kunit *test)
297 {
298 	struct clk_dummy_context *ctx = test->priv;
299 	struct clk_hw *hw = &ctx->hw;
300 	struct clk *clk = clk_hw_get_clk(hw, NULL);
301 	unsigned long set_rate;
302 	long rounded_rate;
303 
304 	rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
305 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
306 	KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
307 
308 	KUNIT_ASSERT_EQ(test,
309 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
310 			0);
311 
312 	set_rate = clk_get_rate(clk);
313 	KUNIT_ASSERT_GT(test, set_rate, 0);
314 	KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
315 
316 	clk_put(clk);
317 }
318 
319 static struct kunit_case clk_test_cases[] = {
320 	KUNIT_CASE(clk_test_get_rate),
321 	KUNIT_CASE(clk_test_set_get_rate),
322 	KUNIT_CASE(clk_test_set_set_get_rate),
323 	KUNIT_CASE(clk_test_round_set_get_rate),
324 	{}
325 };
326 
327 /*
328  * Test suite for a basic rate clock, without any parent.
329  *
330  * These tests exercise the rate API with simple scenarios
331  */
332 static struct kunit_suite clk_test_suite = {
333 	.name = "clk-test",
334 	.init = clk_test_init,
335 	.exit = clk_test_exit,
336 	.test_cases = clk_test_cases,
337 };
338 
339 static int clk_uncached_test_init(struct kunit *test)
340 {
341 	struct clk_dummy_context *ctx;
342 	int ret;
343 
344 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
345 	if (!ctx)
346 		return -ENOMEM;
347 	test->priv = ctx;
348 
349 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
350 	ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
351 					     &clk_dummy_rate_ops,
352 					     CLK_GET_RATE_NOCACHE);
353 
354 	ret = clk_hw_register(NULL, &ctx->hw);
355 	if (ret)
356 		return ret;
357 
358 	return 0;
359 }
360 
361 /*
362  * Test that for an uncached clock, the clock framework doesn't cache
363  * the rate and clk_get_rate() will return the underlying clock rate
364  * even if it changed.
365  */
366 static void clk_test_uncached_get_rate(struct kunit *test)
367 {
368 	struct clk_dummy_context *ctx = test->priv;
369 	struct clk_hw *hw = &ctx->hw;
370 	struct clk *clk = clk_hw_get_clk(hw, NULL);
371 	unsigned long rate;
372 
373 	rate = clk_get_rate(clk);
374 	KUNIT_ASSERT_GT(test, rate, 0);
375 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
376 
377 	/* We change the rate behind the clock framework's back */
378 	ctx->rate = DUMMY_CLOCK_RATE_1;
379 	rate = clk_get_rate(clk);
380 	KUNIT_ASSERT_GT(test, rate, 0);
381 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
382 
383 	clk_put(clk);
384 }
385 
386 /*
387  * Test that for an uncached clock, clk_set_rate_range() will work
388  * properly if the rate hasn't changed.
389  */
390 static void clk_test_uncached_set_range(struct kunit *test)
391 {
392 	struct clk_dummy_context *ctx = test->priv;
393 	struct clk_hw *hw = &ctx->hw;
394 	struct clk *clk = clk_hw_get_clk(hw, NULL);
395 	unsigned long rate;
396 
397 	KUNIT_ASSERT_EQ(test,
398 			clk_set_rate_range(clk,
399 					   DUMMY_CLOCK_RATE_1,
400 					   DUMMY_CLOCK_RATE_2),
401 			0);
402 
403 	rate = clk_get_rate(clk);
404 	KUNIT_ASSERT_GT(test, rate, 0);
405 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
406 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
407 
408 	clk_put(clk);
409 }
410 
411 /*
412  * Test that for an uncached clock, clk_set_rate_range() will work
413  * properly if the rate has changed in hardware.
414  *
415  * In this case, it means that if the rate wasn't initially in the range
416  * we're trying to set, but got changed at some point into the range
417  * without the kernel knowing about it, its rate shouldn't be affected.
418  */
419 static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
420 {
421 	struct clk_dummy_context *ctx = test->priv;
422 	struct clk_hw *hw = &ctx->hw;
423 	struct clk *clk = clk_hw_get_clk(hw, NULL);
424 	unsigned long rate;
425 
426 	/* We change the rate behind the clock framework's back */
427 	ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
428 	KUNIT_ASSERT_EQ(test,
429 			clk_set_rate_range(clk,
430 					   DUMMY_CLOCK_RATE_1,
431 					   DUMMY_CLOCK_RATE_2),
432 			0);
433 
434 	rate = clk_get_rate(clk);
435 	KUNIT_ASSERT_GT(test, rate, 0);
436 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
437 
438 	clk_put(clk);
439 }
440 
441 static struct kunit_case clk_uncached_test_cases[] = {
442 	KUNIT_CASE(clk_test_uncached_get_rate),
443 	KUNIT_CASE(clk_test_uncached_set_range),
444 	KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
445 	{}
446 };
447 
448 /*
449  * Test suite for a basic, uncached, rate clock, without any parent.
450  *
451  * These tests exercise the rate API with simple scenarios
452  */
453 static struct kunit_suite clk_uncached_test_suite = {
454 	.name = "clk-uncached-test",
455 	.init = clk_uncached_test_init,
456 	.exit = clk_test_exit,
457 	.test_cases = clk_uncached_test_cases,
458 };
459 
460 static int
461 clk_multiple_parents_mux_test_init(struct kunit *test)
462 {
463 	struct clk_multiple_parent_ctx *ctx;
464 	const char *parents[2] = { "parent-0", "parent-1"};
465 	int ret;
466 
467 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
468 	if (!ctx)
469 		return -ENOMEM;
470 	test->priv = ctx;
471 
472 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
473 							    &clk_dummy_rate_ops,
474 							    0);
475 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
476 	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
477 	if (ret)
478 		return ret;
479 
480 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
481 							    &clk_dummy_rate_ops,
482 							    0);
483 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
484 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
485 	if (ret)
486 		return ret;
487 
488 	ctx->current_parent = 0;
489 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
490 					   &clk_multiple_parents_mux_ops,
491 					   CLK_SET_RATE_PARENT);
492 	ret = clk_hw_register(NULL, &ctx->hw);
493 	if (ret)
494 		return ret;
495 
496 	return 0;
497 }
498 
499 static void
500 clk_multiple_parents_mux_test_exit(struct kunit *test)
501 {
502 	struct clk_multiple_parent_ctx *ctx = test->priv;
503 
504 	clk_hw_unregister(&ctx->hw);
505 	clk_hw_unregister(&ctx->parents_ctx[0].hw);
506 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
507 }
508 
509 /*
510  * Test that for a clock with multiple parents, clk_get_parent()
511  * actually returns the current one.
512  */
513 static void
514 clk_test_multiple_parents_mux_get_parent(struct kunit *test)
515 {
516 	struct clk_multiple_parent_ctx *ctx = test->priv;
517 	struct clk_hw *hw = &ctx->hw;
518 	struct clk *clk = clk_hw_get_clk(hw, NULL);
519 	struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
520 
521 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
522 
523 	clk_put(parent);
524 	clk_put(clk);
525 }
526 
527 /*
528  * Test that for a clock with a multiple parents, clk_has_parent()
529  * actually reports all of them as parents.
530  */
531 static void
532 clk_test_multiple_parents_mux_has_parent(struct kunit *test)
533 {
534 	struct clk_multiple_parent_ctx *ctx = test->priv;
535 	struct clk_hw *hw = &ctx->hw;
536 	struct clk *clk = clk_hw_get_clk(hw, NULL);
537 	struct clk *parent;
538 
539 	parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
540 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
541 	clk_put(parent);
542 
543 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
544 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
545 	clk_put(parent);
546 
547 	clk_put(clk);
548 }
549 
550 /*
551  * Test that for a clock with a multiple parents, if we set a range on
552  * that clock and the parent is changed, its rate after the reparenting
553  * is still within the range we asked for.
554  *
555  * FIXME: clk_set_parent() only does the reparenting but doesn't
556  * reevaluate whether the new clock rate is within its boundaries or
557  * not.
558  */
559 static void
560 clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
561 {
562 	struct clk_multiple_parent_ctx *ctx = test->priv;
563 	struct clk_hw *hw = &ctx->hw;
564 	struct clk *clk = clk_hw_get_clk(hw, NULL);
565 	struct clk *parent1, *parent2;
566 	unsigned long rate;
567 	int ret;
568 
569 	kunit_skip(test, "This needs to be fixed in the core.");
570 
571 	parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
572 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
573 	KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
574 
575 	parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
576 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
577 
578 	ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
579 	KUNIT_ASSERT_EQ(test, ret, 0);
580 
581 	ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
582 	KUNIT_ASSERT_EQ(test, ret, 0);
583 
584 	ret = clk_set_rate_range(clk,
585 				 DUMMY_CLOCK_RATE_1 - 1000,
586 				 DUMMY_CLOCK_RATE_1 + 1000);
587 	KUNIT_ASSERT_EQ(test, ret, 0);
588 
589 	ret = clk_set_parent(clk, parent2);
590 	KUNIT_ASSERT_EQ(test, ret, 0);
591 
592 	rate = clk_get_rate(clk);
593 	KUNIT_ASSERT_GT(test, rate, 0);
594 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
595 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
596 
597 	clk_put(parent2);
598 	clk_put(parent1);
599 	clk_put(clk);
600 }
601 
602 static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
603 	KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
604 	KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
605 	KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
606 	{}
607 };
608 
609 /*
610  * Test suite for a basic mux clock with two parents, with
611  * CLK_SET_RATE_PARENT on the child.
612  *
613  * These tests exercise the consumer API and check that the state of the
614  * child and parents are sane and consistent.
615  */
616 static struct kunit_suite
617 clk_multiple_parents_mux_test_suite = {
618 	.name = "clk-multiple-parents-mux-test",
619 	.init = clk_multiple_parents_mux_test_init,
620 	.exit = clk_multiple_parents_mux_test_exit,
621 	.test_cases = clk_multiple_parents_mux_test_cases,
622 };
623 
624 static int
625 clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
626 {
627 	struct clk_multiple_parent_ctx *ctx;
628 	const char *parents[2] = { "missing-parent", "proper-parent"};
629 	int ret;
630 
631 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
632 	if (!ctx)
633 		return -ENOMEM;
634 	test->priv = ctx;
635 
636 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
637 							    &clk_dummy_rate_ops,
638 							    0);
639 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
640 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
641 	if (ret)
642 		return ret;
643 
644 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
645 					   &clk_multiple_parents_mux_ops,
646 					   CLK_SET_RATE_PARENT);
647 	ret = clk_hw_register(NULL, &ctx->hw);
648 	if (ret)
649 		return ret;
650 
651 	return 0;
652 }
653 
654 static void
655 clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
656 {
657 	struct clk_multiple_parent_ctx *ctx = test->priv;
658 
659 	clk_hw_unregister(&ctx->hw);
660 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
661 }
662 
663 /*
664  * Test that, for a mux whose current parent hasn't been registered yet and is
665  * thus orphan, clk_get_parent() will return NULL.
666  */
667 static void
668 clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
669 {
670 	struct clk_multiple_parent_ctx *ctx = test->priv;
671 	struct clk_hw *hw = &ctx->hw;
672 	struct clk *clk = clk_hw_get_clk(hw, NULL);
673 
674 	KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
675 
676 	clk_put(clk);
677 }
678 
679 /*
680  * Test that, for a mux whose current parent hasn't been registered yet,
681  * calling clk_set_parent() to a valid parent will properly update the
682  * mux parent and its orphan status.
683  */
684 static void
685 clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
686 {
687 	struct clk_multiple_parent_ctx *ctx = test->priv;
688 	struct clk_hw *hw = &ctx->hw;
689 	struct clk *clk = clk_hw_get_clk(hw, NULL);
690 	struct clk *parent, *new_parent;
691 	int ret;
692 
693 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
694 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
695 
696 	ret = clk_set_parent(clk, parent);
697 	KUNIT_ASSERT_EQ(test, ret, 0);
698 
699 	new_parent = clk_get_parent(clk);
700 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
701 	KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
702 
703 	clk_put(parent);
704 	clk_put(clk);
705 }
706 
707 /*
708  * Test that, for a mux that started orphan but got switched to a valid
709  * parent, calling clk_drop_range() on the mux won't affect the parent
710  * rate.
711  */
712 static void
713 clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
714 {
715 	struct clk_multiple_parent_ctx *ctx = test->priv;
716 	struct clk_hw *hw = &ctx->hw;
717 	struct clk *clk = clk_hw_get_clk(hw, NULL);
718 	struct clk *parent;
719 	unsigned long parent_rate, new_parent_rate;
720 	int ret;
721 
722 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
723 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
724 
725 	parent_rate = clk_get_rate(parent);
726 	KUNIT_ASSERT_GT(test, parent_rate, 0);
727 
728 	ret = clk_set_parent(clk, parent);
729 	KUNIT_ASSERT_EQ(test, ret, 0);
730 
731 	ret = clk_drop_range(clk);
732 	KUNIT_ASSERT_EQ(test, ret, 0);
733 
734 	new_parent_rate = clk_get_rate(clk);
735 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
736 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
737 
738 	clk_put(parent);
739 	clk_put(clk);
740 }
741 
742 /*
743  * Test that, for a mux that started orphan but got switched to a valid
744  * parent, the rate of the mux and its new parent are consistent.
745  */
746 static void
747 clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
748 {
749 	struct clk_multiple_parent_ctx *ctx = test->priv;
750 	struct clk_hw *hw = &ctx->hw;
751 	struct clk *clk = clk_hw_get_clk(hw, NULL);
752 	struct clk *parent;
753 	unsigned long parent_rate, rate;
754 	int ret;
755 
756 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
757 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
758 
759 	parent_rate = clk_get_rate(parent);
760 	KUNIT_ASSERT_GT(test, parent_rate, 0);
761 
762 	ret = clk_set_parent(clk, parent);
763 	KUNIT_ASSERT_EQ(test, ret, 0);
764 
765 	rate = clk_get_rate(clk);
766 	KUNIT_ASSERT_GT(test, rate, 0);
767 	KUNIT_EXPECT_EQ(test, parent_rate, rate);
768 
769 	clk_put(parent);
770 	clk_put(clk);
771 }
772 
773 /*
774  * Test that, for a mux that started orphan but got switched to a valid
775  * parent, calling clk_put() on the mux won't affect the parent rate.
776  */
777 static void
778 clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
779 {
780 	struct clk_multiple_parent_ctx *ctx = test->priv;
781 	struct clk *clk, *parent;
782 	unsigned long parent_rate, new_parent_rate;
783 	int ret;
784 
785 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
786 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
787 
788 	clk = clk_hw_get_clk(&ctx->hw, NULL);
789 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
790 
791 	parent_rate = clk_get_rate(parent);
792 	KUNIT_ASSERT_GT(test, parent_rate, 0);
793 
794 	ret = clk_set_parent(clk, parent);
795 	KUNIT_ASSERT_EQ(test, ret, 0);
796 
797 	clk_put(clk);
798 
799 	new_parent_rate = clk_get_rate(parent);
800 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
801 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
802 
803 	clk_put(parent);
804 }
805 
806 /*
807  * Test that, for a mux that started orphan but got switched to a valid
808  * parent, calling clk_set_rate_range() will affect the parent state if
809  * its rate is out of range.
810  */
811 static void
812 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
813 {
814 	struct clk_multiple_parent_ctx *ctx = test->priv;
815 	struct clk_hw *hw = &ctx->hw;
816 	struct clk *clk = clk_hw_get_clk(hw, NULL);
817 	struct clk *parent;
818 	unsigned long rate;
819 	int ret;
820 
821 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
822 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
823 
824 	ret = clk_set_parent(clk, parent);
825 	KUNIT_ASSERT_EQ(test, ret, 0);
826 
827 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
828 	KUNIT_ASSERT_EQ(test, ret, 0);
829 
830 	rate = clk_get_rate(clk);
831 	KUNIT_ASSERT_GT(test, rate, 0);
832 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
833 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
834 
835 	clk_put(parent);
836 	clk_put(clk);
837 }
838 
839 /*
840  * Test that, for a mux that started orphan but got switched to a valid
841  * parent, calling clk_set_rate_range() won't affect the parent state if
842  * its rate is within range.
843  */
844 static void
845 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
846 {
847 	struct clk_multiple_parent_ctx *ctx = test->priv;
848 	struct clk_hw *hw = &ctx->hw;
849 	struct clk *clk = clk_hw_get_clk(hw, NULL);
850 	struct clk *parent;
851 	unsigned long parent_rate, new_parent_rate;
852 	int ret;
853 
854 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
855 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
856 
857 	parent_rate = clk_get_rate(parent);
858 	KUNIT_ASSERT_GT(test, parent_rate, 0);
859 
860 	ret = clk_set_parent(clk, parent);
861 	KUNIT_ASSERT_EQ(test, ret, 0);
862 
863 	ret = clk_set_rate_range(clk,
864 				 DUMMY_CLOCK_INIT_RATE - 1000,
865 				 DUMMY_CLOCK_INIT_RATE + 1000);
866 	KUNIT_ASSERT_EQ(test, ret, 0);
867 
868 	new_parent_rate = clk_get_rate(parent);
869 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
870 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
871 
872 	clk_put(parent);
873 	clk_put(clk);
874 }
875 
876 /*
877  * Test that, for a mux whose current parent hasn't been registered yet,
878  * calling clk_set_rate_range() will succeed, and will be taken into
879  * account when rounding a rate.
880  */
881 static void
882 clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
883 {
884 	struct clk_multiple_parent_ctx *ctx = test->priv;
885 	struct clk_hw *hw = &ctx->hw;
886 	struct clk *clk = clk_hw_get_clk(hw, NULL);
887 	long rate;
888 	int ret;
889 
890 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
891 	KUNIT_ASSERT_EQ(test, ret, 0);
892 
893 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
894 	KUNIT_ASSERT_GT(test, rate, 0);
895 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
896 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
897 
898 	clk_put(clk);
899 }
900 
901 /*
902  * Test that, for a mux that started orphan, was assigned and rate and
903  * then got switched to a valid parent, its rate is eventually within
904  * range.
905  *
906  * FIXME: Even though we update the rate as part of clk_set_parent(), we
907  * don't evaluate whether that new rate is within range and needs to be
908  * adjusted.
909  */
910 static void
911 clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
912 {
913 	struct clk_multiple_parent_ctx *ctx = test->priv;
914 	struct clk_hw *hw = &ctx->hw;
915 	struct clk *clk = clk_hw_get_clk(hw, NULL);
916 	struct clk *parent;
917 	unsigned long rate;
918 	int ret;
919 
920 	kunit_skip(test, "This needs to be fixed in the core.");
921 
922 	clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
923 
924 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
925 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
926 
927 	ret = clk_set_parent(clk, parent);
928 	KUNIT_ASSERT_EQ(test, ret, 0);
929 
930 	rate = clk_get_rate(clk);
931 	KUNIT_ASSERT_GT(test, rate, 0);
932 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
933 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
934 
935 	clk_put(parent);
936 	clk_put(clk);
937 }
938 
939 static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
940 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
941 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
942 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
943 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
944 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
945 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
946 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
947 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
948 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
949 	{}
950 };
951 
952 /*
953  * Test suite for a basic mux clock with two parents. The default parent
954  * isn't registered, only the second parent is. By default, the clock
955  * will thus be orphan.
956  *
957  * These tests exercise the behaviour of the consumer API when dealing
958  * with an orphan clock, and how we deal with the transition to a valid
959  * parent.
960  */
961 static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
962 	.name = "clk-orphan-transparent-multiple-parent-mux-test",
963 	.init = clk_orphan_transparent_multiple_parent_mux_test_init,
964 	.exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
965 	.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
966 };
967 
968 struct clk_single_parent_ctx {
969 	struct clk_dummy_context parent_ctx;
970 	struct clk_hw hw;
971 };
972 
973 static int clk_single_parent_mux_test_init(struct kunit *test)
974 {
975 	struct clk_single_parent_ctx *ctx;
976 	int ret;
977 
978 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
979 	if (!ctx)
980 		return -ENOMEM;
981 	test->priv = ctx;
982 
983 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
984 	ctx->parent_ctx.hw.init =
985 		CLK_HW_INIT_NO_PARENT("parent-clk",
986 				      &clk_dummy_rate_ops,
987 				      0);
988 
989 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
990 	if (ret)
991 		return ret;
992 
993 	ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
994 				   &clk_dummy_single_parent_ops,
995 				   CLK_SET_RATE_PARENT);
996 
997 	ret = clk_hw_register(NULL, &ctx->hw);
998 	if (ret)
999 		return ret;
1000 
1001 	return 0;
1002 }
1003 
1004 static void
1005 clk_single_parent_mux_test_exit(struct kunit *test)
1006 {
1007 	struct clk_single_parent_ctx *ctx = test->priv;
1008 
1009 	clk_hw_unregister(&ctx->hw);
1010 	clk_hw_unregister(&ctx->parent_ctx.hw);
1011 }
1012 
1013 /*
1014  * Test that for a clock with a single parent, clk_get_parent() actually
1015  * returns the parent.
1016  */
1017 static void
1018 clk_test_single_parent_mux_get_parent(struct kunit *test)
1019 {
1020 	struct clk_single_parent_ctx *ctx = test->priv;
1021 	struct clk_hw *hw = &ctx->hw;
1022 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1023 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1024 
1025 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
1026 
1027 	clk_put(parent);
1028 	clk_put(clk);
1029 }
1030 
1031 /*
1032  * Test that for a clock with a single parent, clk_has_parent() actually
1033  * reports it as a parent.
1034  */
1035 static void
1036 clk_test_single_parent_mux_has_parent(struct kunit *test)
1037 {
1038 	struct clk_single_parent_ctx *ctx = test->priv;
1039 	struct clk_hw *hw = &ctx->hw;
1040 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1041 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1042 
1043 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1044 
1045 	clk_put(parent);
1046 	clk_put(clk);
1047 }
1048 
1049 /*
1050  * Test that for a clock that can't modify its rate and with a single
1051  * parent, if we set disjoints range on the parent and then the child,
1052  * the second will return an error.
1053  *
1054  * FIXME: clk_set_rate_range() only considers the current clock when
1055  * evaluating whether ranges are disjoints and not the upstream clocks
1056  * ranges.
1057  */
1058 static void
1059 clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1060 {
1061 	struct clk_single_parent_ctx *ctx = test->priv;
1062 	struct clk_hw *hw = &ctx->hw;
1063 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1064 	struct clk *parent;
1065 	int ret;
1066 
1067 	kunit_skip(test, "This needs to be fixed in the core.");
1068 
1069 	parent = clk_get_parent(clk);
1070 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1071 
1072 	ret = clk_set_rate_range(parent, 1000, 2000);
1073 	KUNIT_ASSERT_EQ(test, ret, 0);
1074 
1075 	ret = clk_set_rate_range(clk, 3000, 4000);
1076 	KUNIT_EXPECT_LT(test, ret, 0);
1077 
1078 	clk_put(clk);
1079 }
1080 
1081 /*
1082  * Test that for a clock that can't modify its rate and with a single
1083  * parent, if we set disjoints range on the child and then the parent,
1084  * the second will return an error.
1085  *
1086  * FIXME: clk_set_rate_range() only considers the current clock when
1087  * evaluating whether ranges are disjoints and not the downstream clocks
1088  * ranges.
1089  */
1090 static void
1091 clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1092 {
1093 	struct clk_single_parent_ctx *ctx = test->priv;
1094 	struct clk_hw *hw = &ctx->hw;
1095 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1096 	struct clk *parent;
1097 	int ret;
1098 
1099 	kunit_skip(test, "This needs to be fixed in the core.");
1100 
1101 	parent = clk_get_parent(clk);
1102 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1103 
1104 	ret = clk_set_rate_range(clk, 1000, 2000);
1105 	KUNIT_ASSERT_EQ(test, ret, 0);
1106 
1107 	ret = clk_set_rate_range(parent, 3000, 4000);
1108 	KUNIT_EXPECT_LT(test, ret, 0);
1109 
1110 	clk_put(clk);
1111 }
1112 
1113 /*
1114  * Test that for a clock that can't modify its rate and with a single
1115  * parent, if we set a range on the parent and then call
1116  * clk_round_rate(), the boundaries of the parent are taken into
1117  * account.
1118  */
1119 static void
1120 clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1121 {
1122 	struct clk_single_parent_ctx *ctx = test->priv;
1123 	struct clk_hw *hw = &ctx->hw;
1124 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1125 	struct clk *parent;
1126 	long rate;
1127 	int ret;
1128 
1129 	parent = clk_get_parent(clk);
1130 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1131 
1132 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1133 	KUNIT_ASSERT_EQ(test, ret, 0);
1134 
1135 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1136 	KUNIT_ASSERT_GT(test, rate, 0);
1137 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1138 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1139 
1140 	clk_put(clk);
1141 }
1142 
1143 /*
1144  * Test that for a clock that can't modify its rate and with a single
1145  * parent, if we set a range on the parent and a more restrictive one on
1146  * the child, and then call clk_round_rate(), the boundaries of the
1147  * two clocks are taken into account.
1148  */
1149 static void
1150 clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1151 {
1152 	struct clk_single_parent_ctx *ctx = test->priv;
1153 	struct clk_hw *hw = &ctx->hw;
1154 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1155 	struct clk *parent;
1156 	long rate;
1157 	int ret;
1158 
1159 	parent = clk_get_parent(clk);
1160 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1161 
1162 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1163 	KUNIT_ASSERT_EQ(test, ret, 0);
1164 
1165 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1166 	KUNIT_ASSERT_EQ(test, ret, 0);
1167 
1168 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1169 	KUNIT_ASSERT_GT(test, rate, 0);
1170 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1171 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1172 
1173 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1174 	KUNIT_ASSERT_GT(test, rate, 0);
1175 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1176 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1177 
1178 	clk_put(clk);
1179 }
1180 
1181 /*
1182  * Test that for a clock that can't modify its rate and with a single
1183  * parent, if we set a range on the child and a more restrictive one on
1184  * the parent, and then call clk_round_rate(), the boundaries of the
1185  * two clocks are taken into account.
1186  */
1187 static void
1188 clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1189 {
1190 	struct clk_single_parent_ctx *ctx = test->priv;
1191 	struct clk_hw *hw = &ctx->hw;
1192 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1193 	struct clk *parent;
1194 	long rate;
1195 	int ret;
1196 
1197 	parent = clk_get_parent(clk);
1198 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1199 
1200 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1201 	KUNIT_ASSERT_EQ(test, ret, 0);
1202 
1203 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1204 	KUNIT_ASSERT_EQ(test, ret, 0);
1205 
1206 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1207 	KUNIT_ASSERT_GT(test, rate, 0);
1208 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1209 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1210 
1211 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1212 	KUNIT_ASSERT_GT(test, rate, 0);
1213 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1214 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1215 
1216 	clk_put(clk);
1217 }
1218 
1219 static struct kunit_case clk_single_parent_mux_test_cases[] = {
1220 	KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1221 	KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1222 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1223 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1224 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1225 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1226 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1227 	{}
1228 };
1229 
1230 /*
1231  * Test suite for a basic mux clock with one parent, with
1232  * CLK_SET_RATE_PARENT on the child.
1233  *
1234  * These tests exercise the consumer API and check that the state of the
1235  * child and parent are sane and consistent.
1236  */
1237 static struct kunit_suite
1238 clk_single_parent_mux_test_suite = {
1239 	.name = "clk-single-parent-mux-test",
1240 	.init = clk_single_parent_mux_test_init,
1241 	.exit = clk_single_parent_mux_test_exit,
1242 	.test_cases = clk_single_parent_mux_test_cases,
1243 };
1244 
1245 static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1246 {
1247 	struct clk_single_parent_ctx *ctx;
1248 	struct clk_init_data init = { };
1249 	const char * const parents[] = { "orphan_parent" };
1250 	int ret;
1251 
1252 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1253 	if (!ctx)
1254 		return -ENOMEM;
1255 	test->priv = ctx;
1256 
1257 	init.name = "test_orphan_dummy_parent";
1258 	init.ops = &clk_dummy_single_parent_ops;
1259 	init.parent_names = parents;
1260 	init.num_parents = ARRAY_SIZE(parents);
1261 	init.flags = CLK_SET_RATE_PARENT;
1262 	ctx->hw.init = &init;
1263 
1264 	ret = clk_hw_register(NULL, &ctx->hw);
1265 	if (ret)
1266 		return ret;
1267 
1268 	memset(&init, 0, sizeof(init));
1269 	init.name = "orphan_parent";
1270 	init.ops = &clk_dummy_rate_ops;
1271 	ctx->parent_ctx.hw.init = &init;
1272 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1273 
1274 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1275 	if (ret)
1276 		return ret;
1277 
1278 	return 0;
1279 }
1280 
1281 /*
1282  * Test that a mux-only clock, with an initial rate within a range,
1283  * will still have the same rate after the range has been enforced.
1284  *
1285  * See:
1286  * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1287  */
1288 static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1289 {
1290 	struct clk_single_parent_ctx *ctx = test->priv;
1291 	struct clk_hw *hw = &ctx->hw;
1292 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1293 	unsigned long rate, new_rate;
1294 
1295 	rate = clk_get_rate(clk);
1296 	KUNIT_ASSERT_GT(test, rate, 0);
1297 
1298 	KUNIT_ASSERT_EQ(test,
1299 			clk_set_rate_range(clk,
1300 					   ctx->parent_ctx.rate - 1000,
1301 					   ctx->parent_ctx.rate + 1000),
1302 			0);
1303 
1304 	new_rate = clk_get_rate(clk);
1305 	KUNIT_ASSERT_GT(test, new_rate, 0);
1306 	KUNIT_EXPECT_EQ(test, rate, new_rate);
1307 
1308 	clk_put(clk);
1309 }
1310 
1311 static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1312 	KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1313 	{}
1314 };
1315 
1316 /*
1317  * Test suite for a basic mux clock with one parent. The parent is
1318  * registered after its child. The clock will thus be an orphan when
1319  * registered, but will no longer be when the tests run.
1320  *
1321  * These tests make sure a clock that used to be orphan has a sane,
1322  * consistent, behaviour.
1323  */
1324 static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1325 	.name = "clk-orphan-transparent-single-parent-test",
1326 	.init = clk_orphan_transparent_single_parent_mux_test_init,
1327 	.exit = clk_single_parent_mux_test_exit,
1328 	.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1329 };
1330 
1331 struct clk_single_parent_two_lvl_ctx {
1332 	struct clk_dummy_context parent_parent_ctx;
1333 	struct clk_dummy_context parent_ctx;
1334 	struct clk_hw hw;
1335 };
1336 
1337 static int
1338 clk_orphan_two_level_root_last_test_init(struct kunit *test)
1339 {
1340 	struct clk_single_parent_two_lvl_ctx *ctx;
1341 	int ret;
1342 
1343 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1344 	if (!ctx)
1345 		return -ENOMEM;
1346 	test->priv = ctx;
1347 
1348 	ctx->parent_ctx.hw.init =
1349 		CLK_HW_INIT("intermediate-parent",
1350 			    "root-parent",
1351 			    &clk_dummy_single_parent_ops,
1352 			    CLK_SET_RATE_PARENT);
1353 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1354 	if (ret)
1355 		return ret;
1356 
1357 	ctx->hw.init =
1358 		CLK_HW_INIT("test-clk", "intermediate-parent",
1359 			    &clk_dummy_single_parent_ops,
1360 			    CLK_SET_RATE_PARENT);
1361 	ret = clk_hw_register(NULL, &ctx->hw);
1362 	if (ret)
1363 		return ret;
1364 
1365 	ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1366 	ctx->parent_parent_ctx.hw.init =
1367 		CLK_HW_INIT_NO_PARENT("root-parent",
1368 				      &clk_dummy_rate_ops,
1369 				      0);
1370 	ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1371 	if (ret)
1372 		return ret;
1373 
1374 	return 0;
1375 }
1376 
1377 static void
1378 clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1379 {
1380 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1381 
1382 	clk_hw_unregister(&ctx->hw);
1383 	clk_hw_unregister(&ctx->parent_ctx.hw);
1384 	clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1385 }
1386 
1387 /*
1388  * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1389  * will return the proper rate.
1390  */
1391 static void
1392 clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1393 {
1394 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1395 	struct clk_hw *hw = &ctx->hw;
1396 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1397 	unsigned long rate;
1398 
1399 	rate = clk_get_rate(clk);
1400 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1401 
1402 	clk_put(clk);
1403 }
1404 
1405 /*
1406  * Test that, for a clock whose parent used to be orphan,
1407  * clk_set_rate_range() won't affect its rate if it is already within
1408  * range.
1409  *
1410  * See (for Exynos 4210):
1411  * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1412  */
1413 static void
1414 clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1415 {
1416 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1417 	struct clk_hw *hw = &ctx->hw;
1418 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1419 	unsigned long rate;
1420 	int ret;
1421 
1422 	ret = clk_set_rate_range(clk,
1423 				 DUMMY_CLOCK_INIT_RATE - 1000,
1424 				 DUMMY_CLOCK_INIT_RATE + 1000);
1425 	KUNIT_ASSERT_EQ(test, ret, 0);
1426 
1427 	rate = clk_get_rate(clk);
1428 	KUNIT_ASSERT_GT(test, rate, 0);
1429 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1430 
1431 	clk_put(clk);
1432 }
1433 
1434 static struct kunit_case
1435 clk_orphan_two_level_root_last_test_cases[] = {
1436 	KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1437 	KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1438 	{}
1439 };
1440 
1441 /*
1442  * Test suite for a basic, transparent, clock with a parent that is also
1443  * such a clock. The parent's parent is registered last, while the
1444  * parent and its child are registered in that order. The intermediate
1445  * and leaf clocks will thus be orphan when registered, but the leaf
1446  * clock itself will always have its parent and will never be
1447  * reparented. Indeed, it's only orphan because its parent is.
1448  *
1449  * These tests exercise the behaviour of the consumer API when dealing
1450  * with an orphan clock, and how we deal with the transition to a valid
1451  * parent.
1452  */
1453 static struct kunit_suite
1454 clk_orphan_two_level_root_last_test_suite = {
1455 	.name = "clk-orphan-two-level-root-last-test",
1456 	.init = clk_orphan_two_level_root_last_test_init,
1457 	.exit = clk_orphan_two_level_root_last_test_exit,
1458 	.test_cases = clk_orphan_two_level_root_last_test_cases,
1459 };
1460 
1461 /*
1462  * Test that clk_set_rate_range won't return an error for a valid range
1463  * and that it will make sure the rate of the clock is within the
1464  * boundaries.
1465  */
1466 static void clk_range_test_set_range(struct kunit *test)
1467 {
1468 	struct clk_dummy_context *ctx = test->priv;
1469 	struct clk_hw *hw = &ctx->hw;
1470 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1471 	unsigned long rate;
1472 
1473 	KUNIT_ASSERT_EQ(test,
1474 			clk_set_rate_range(clk,
1475 					   DUMMY_CLOCK_RATE_1,
1476 					   DUMMY_CLOCK_RATE_2),
1477 			0);
1478 
1479 	rate = clk_get_rate(clk);
1480 	KUNIT_ASSERT_GT(test, rate, 0);
1481 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1482 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1483 
1484 	clk_put(clk);
1485 }
1486 
1487 /*
1488  * Test that calling clk_set_rate_range with a minimum rate higher than
1489  * the maximum rate returns an error.
1490  */
1491 static void clk_range_test_set_range_invalid(struct kunit *test)
1492 {
1493 	struct clk_dummy_context *ctx = test->priv;
1494 	struct clk_hw *hw = &ctx->hw;
1495 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1496 
1497 	KUNIT_EXPECT_LT(test,
1498 			clk_set_rate_range(clk,
1499 					   DUMMY_CLOCK_RATE_1 + 1000,
1500 					   DUMMY_CLOCK_RATE_1),
1501 			0);
1502 
1503 	clk_put(clk);
1504 }
1505 
1506 /*
1507  * Test that users can't set multiple, disjoints, range that would be
1508  * impossible to meet.
1509  */
1510 static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1511 {
1512 	struct clk_dummy_context *ctx = test->priv;
1513 	struct clk_hw *hw = &ctx->hw;
1514 	struct clk *user1, *user2;
1515 
1516 	user1 = clk_hw_get_clk(hw, NULL);
1517 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1518 
1519 	user2 = clk_hw_get_clk(hw, NULL);
1520 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1521 
1522 	KUNIT_ASSERT_EQ(test,
1523 			clk_set_rate_range(user1, 1000, 2000),
1524 			0);
1525 
1526 	KUNIT_EXPECT_LT(test,
1527 			clk_set_rate_range(user2, 3000, 4000),
1528 			0);
1529 
1530 	clk_put(user2);
1531 	clk_put(user1);
1532 }
1533 
1534 /*
1535  * Test that if our clock has some boundaries and we try to round a rate
1536  * lower than the minimum, the returned rate will be within range.
1537  */
1538 static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1539 {
1540 	struct clk_dummy_context *ctx = test->priv;
1541 	struct clk_hw *hw = &ctx->hw;
1542 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1543 	long rate;
1544 
1545 	KUNIT_ASSERT_EQ(test,
1546 			clk_set_rate_range(clk,
1547 					   DUMMY_CLOCK_RATE_1,
1548 					   DUMMY_CLOCK_RATE_2),
1549 			0);
1550 
1551 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1552 	KUNIT_ASSERT_GT(test, rate, 0);
1553 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1554 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1555 
1556 	clk_put(clk);
1557 }
1558 
1559 /*
1560  * Test that if our clock has some boundaries and we try to set a rate
1561  * higher than the maximum, the new rate will be within range.
1562  */
1563 static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1564 {
1565 	struct clk_dummy_context *ctx = test->priv;
1566 	struct clk_hw *hw = &ctx->hw;
1567 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1568 	unsigned long rate;
1569 
1570 	KUNIT_ASSERT_EQ(test,
1571 			clk_set_rate_range(clk,
1572 					   DUMMY_CLOCK_RATE_1,
1573 					   DUMMY_CLOCK_RATE_2),
1574 			0);
1575 
1576 	KUNIT_ASSERT_EQ(test,
1577 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1578 			0);
1579 
1580 	rate = clk_get_rate(clk);
1581 	KUNIT_ASSERT_GT(test, rate, 0);
1582 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1583 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1584 
1585 	clk_put(clk);
1586 }
1587 
1588 /*
1589  * Test that if our clock has some boundaries and we try to round and
1590  * set a rate lower than the minimum, the rate returned by
1591  * clk_round_rate() will be consistent with the new rate set by
1592  * clk_set_rate().
1593  */
1594 static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1595 {
1596 	struct clk_dummy_context *ctx = test->priv;
1597 	struct clk_hw *hw = &ctx->hw;
1598 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1599 	long rounded;
1600 
1601 	KUNIT_ASSERT_EQ(test,
1602 			clk_set_rate_range(clk,
1603 					   DUMMY_CLOCK_RATE_1,
1604 					   DUMMY_CLOCK_RATE_2),
1605 			0);
1606 
1607 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1608 	KUNIT_ASSERT_GT(test, rounded, 0);
1609 
1610 	KUNIT_ASSERT_EQ(test,
1611 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1612 			0);
1613 
1614 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1615 
1616 	clk_put(clk);
1617 }
1618 
1619 /*
1620  * Test that if our clock has some boundaries and we try to round a rate
1621  * higher than the maximum, the returned rate will be within range.
1622  */
1623 static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1624 {
1625 	struct clk_dummy_context *ctx = test->priv;
1626 	struct clk_hw *hw = &ctx->hw;
1627 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1628 	long rate;
1629 
1630 	KUNIT_ASSERT_EQ(test,
1631 			clk_set_rate_range(clk,
1632 					   DUMMY_CLOCK_RATE_1,
1633 					   DUMMY_CLOCK_RATE_2),
1634 			0);
1635 
1636 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1637 	KUNIT_ASSERT_GT(test, rate, 0);
1638 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1639 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1640 
1641 	clk_put(clk);
1642 }
1643 
1644 /*
1645  * Test that if our clock has some boundaries and we try to set a rate
1646  * higher than the maximum, the new rate will be within range.
1647  */
1648 static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1649 {
1650 	struct clk_dummy_context *ctx = test->priv;
1651 	struct clk_hw *hw = &ctx->hw;
1652 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1653 	unsigned long rate;
1654 
1655 	KUNIT_ASSERT_EQ(test,
1656 			clk_set_rate_range(clk,
1657 					   DUMMY_CLOCK_RATE_1,
1658 					   DUMMY_CLOCK_RATE_2),
1659 			0);
1660 
1661 	KUNIT_ASSERT_EQ(test,
1662 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1663 			0);
1664 
1665 	rate = clk_get_rate(clk);
1666 	KUNIT_ASSERT_GT(test, rate, 0);
1667 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1668 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1669 
1670 	clk_put(clk);
1671 }
1672 
1673 /*
1674  * Test that if our clock has some boundaries and we try to round and
1675  * set a rate higher than the maximum, the rate returned by
1676  * clk_round_rate() will be consistent with the new rate set by
1677  * clk_set_rate().
1678  */
1679 static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1680 {
1681 	struct clk_dummy_context *ctx = test->priv;
1682 	struct clk_hw *hw = &ctx->hw;
1683 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1684 	long rounded;
1685 
1686 	KUNIT_ASSERT_EQ(test,
1687 			clk_set_rate_range(clk,
1688 					   DUMMY_CLOCK_RATE_1,
1689 					   DUMMY_CLOCK_RATE_2),
1690 			0);
1691 
1692 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1693 	KUNIT_ASSERT_GT(test, rounded, 0);
1694 
1695 	KUNIT_ASSERT_EQ(test,
1696 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1697 			0);
1698 
1699 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1700 
1701 	clk_put(clk);
1702 }
1703 
1704 /*
1705  * Test that if our clock has a rate lower than the minimum set by a
1706  * call to clk_set_rate_range(), the rate will be raised to match the
1707  * new minimum.
1708  *
1709  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1710  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1711  */
1712 static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1713 {
1714 	struct clk_dummy_context *ctx = test->priv;
1715 	struct clk_hw *hw = &ctx->hw;
1716 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1717 	unsigned long rate;
1718 
1719 	KUNIT_ASSERT_EQ(test,
1720 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1721 			0);
1722 
1723 	KUNIT_ASSERT_EQ(test,
1724 			clk_set_rate_range(clk,
1725 					   DUMMY_CLOCK_RATE_1,
1726 					   DUMMY_CLOCK_RATE_2),
1727 			0);
1728 
1729 	rate = clk_get_rate(clk);
1730 	KUNIT_ASSERT_GT(test, rate, 0);
1731 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1732 
1733 	clk_put(clk);
1734 }
1735 
1736 /*
1737  * Test that if our clock has a rate higher than the maximum set by a
1738  * call to clk_set_rate_range(), the rate will be lowered to match the
1739  * new maximum.
1740  *
1741  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1742  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1743  */
1744 static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1745 {
1746 	struct clk_dummy_context *ctx = test->priv;
1747 	struct clk_hw *hw = &ctx->hw;
1748 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1749 	unsigned long rate;
1750 
1751 	KUNIT_ASSERT_EQ(test,
1752 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1753 			0);
1754 
1755 	KUNIT_ASSERT_EQ(test,
1756 			clk_set_rate_range(clk,
1757 					   DUMMY_CLOCK_RATE_1,
1758 					   DUMMY_CLOCK_RATE_2),
1759 			0);
1760 
1761 	rate = clk_get_rate(clk);
1762 	KUNIT_ASSERT_GT(test, rate, 0);
1763 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1764 
1765 	clk_put(clk);
1766 }
1767 
1768 static struct kunit_case clk_range_test_cases[] = {
1769 	KUNIT_CASE(clk_range_test_set_range),
1770 	KUNIT_CASE(clk_range_test_set_range_invalid),
1771 	KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1772 	KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1773 	KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1774 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1775 	KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1776 	KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1777 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1778 	KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1779 	KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1780 	{}
1781 };
1782 
1783 /*
1784  * Test suite for a basic rate clock, without any parent.
1785  *
1786  * These tests exercise the rate range API: clk_set_rate_range(),
1787  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1788  */
1789 static struct kunit_suite clk_range_test_suite = {
1790 	.name = "clk-range-test",
1791 	.init = clk_test_init,
1792 	.exit = clk_test_exit,
1793 	.test_cases = clk_range_test_cases,
1794 };
1795 
1796 /*
1797  * Test that if we have several subsequent calls to
1798  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1799  * needed each and every time.
1800  *
1801  * With clk_dummy_maximize_rate_ops, this means that the rate will
1802  * trail along the maximum as it evolves.
1803  */
1804 static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1805 {
1806 	struct clk_dummy_context *ctx = test->priv;
1807 	struct clk_hw *hw = &ctx->hw;
1808 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1809 	unsigned long rate;
1810 
1811 	KUNIT_ASSERT_EQ(test,
1812 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1813 			0);
1814 
1815 	KUNIT_ASSERT_EQ(test,
1816 			clk_set_rate_range(clk,
1817 					   DUMMY_CLOCK_RATE_1,
1818 					   DUMMY_CLOCK_RATE_2),
1819 			0);
1820 
1821 	rate = clk_get_rate(clk);
1822 	KUNIT_ASSERT_GT(test, rate, 0);
1823 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1824 
1825 	KUNIT_ASSERT_EQ(test,
1826 			clk_set_rate_range(clk,
1827 					   DUMMY_CLOCK_RATE_1,
1828 					   DUMMY_CLOCK_RATE_2 - 1000),
1829 			0);
1830 
1831 	rate = clk_get_rate(clk);
1832 	KUNIT_ASSERT_GT(test, rate, 0);
1833 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1834 
1835 	KUNIT_ASSERT_EQ(test,
1836 			clk_set_rate_range(clk,
1837 					   DUMMY_CLOCK_RATE_1,
1838 					   DUMMY_CLOCK_RATE_2),
1839 			0);
1840 
1841 	rate = clk_get_rate(clk);
1842 	KUNIT_ASSERT_GT(test, rate, 0);
1843 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1844 
1845 	clk_put(clk);
1846 }
1847 
1848 /*
1849  * Test that if we have several subsequent calls to
1850  * clk_set_rate_range(), across multiple users, the core will reevaluate
1851  * whether a new rate is needed each and every time.
1852  *
1853  * With clk_dummy_maximize_rate_ops, this means that the rate will
1854  * trail along the maximum as it evolves.
1855  */
1856 static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1857 {
1858 	struct clk_dummy_context *ctx = test->priv;
1859 	struct clk_hw *hw = &ctx->hw;
1860 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1861 	struct clk *user1, *user2;
1862 	unsigned long rate;
1863 
1864 	user1 = clk_hw_get_clk(hw, NULL);
1865 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1866 
1867 	user2 = clk_hw_get_clk(hw, NULL);
1868 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1869 
1870 	KUNIT_ASSERT_EQ(test,
1871 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1872 			0);
1873 
1874 	KUNIT_ASSERT_EQ(test,
1875 			clk_set_rate_range(user1,
1876 					   0,
1877 					   DUMMY_CLOCK_RATE_2),
1878 			0);
1879 
1880 	rate = clk_get_rate(clk);
1881 	KUNIT_ASSERT_GT(test, rate, 0);
1882 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1883 
1884 	KUNIT_ASSERT_EQ(test,
1885 			clk_set_rate_range(user2,
1886 					   0,
1887 					   DUMMY_CLOCK_RATE_1),
1888 			0);
1889 
1890 	rate = clk_get_rate(clk);
1891 	KUNIT_ASSERT_GT(test, rate, 0);
1892 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1893 
1894 	KUNIT_ASSERT_EQ(test,
1895 			clk_drop_range(user2),
1896 			0);
1897 
1898 	rate = clk_get_rate(clk);
1899 	KUNIT_ASSERT_GT(test, rate, 0);
1900 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1901 
1902 	clk_put(user2);
1903 	clk_put(user1);
1904 	clk_put(clk);
1905 }
1906 
1907 /*
1908  * Test that if we have several subsequent calls to
1909  * clk_set_rate_range(), across multiple users, the core will reevaluate
1910  * whether a new rate is needed, including when a user drop its clock.
1911  *
1912  * With clk_dummy_maximize_rate_ops, this means that the rate will
1913  * trail along the maximum as it evolves.
1914  */
1915 static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1916 {
1917 	struct clk_dummy_context *ctx = test->priv;
1918 	struct clk_hw *hw = &ctx->hw;
1919 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1920 	struct clk *user1, *user2;
1921 	unsigned long rate;
1922 
1923 	user1 = clk_hw_get_clk(hw, NULL);
1924 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1925 
1926 	user2 = clk_hw_get_clk(hw, NULL);
1927 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1928 
1929 	KUNIT_ASSERT_EQ(test,
1930 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1931 			0);
1932 
1933 	KUNIT_ASSERT_EQ(test,
1934 			clk_set_rate_range(user1,
1935 					   0,
1936 					   DUMMY_CLOCK_RATE_2),
1937 			0);
1938 
1939 	rate = clk_get_rate(clk);
1940 	KUNIT_ASSERT_GT(test, rate, 0);
1941 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1942 
1943 	KUNIT_ASSERT_EQ(test,
1944 			clk_set_rate_range(user2,
1945 					   0,
1946 					   DUMMY_CLOCK_RATE_1),
1947 			0);
1948 
1949 	rate = clk_get_rate(clk);
1950 	KUNIT_ASSERT_GT(test, rate, 0);
1951 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1952 
1953 	clk_put(user2);
1954 
1955 	rate = clk_get_rate(clk);
1956 	KUNIT_ASSERT_GT(test, rate, 0);
1957 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1958 
1959 	clk_put(user1);
1960 	clk_put(clk);
1961 }
1962 
1963 static struct kunit_case clk_range_maximize_test_cases[] = {
1964 	KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1965 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1966 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1967 	{}
1968 };
1969 
1970 /*
1971  * Test suite for a basic rate clock, without any parent.
1972  *
1973  * These tests exercise the rate range API: clk_set_rate_range(),
1974  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1975  * driver that will always try to run at the highest possible rate.
1976  */
1977 static struct kunit_suite clk_range_maximize_test_suite = {
1978 	.name = "clk-range-maximize-test",
1979 	.init = clk_maximize_test_init,
1980 	.exit = clk_test_exit,
1981 	.test_cases = clk_range_maximize_test_cases,
1982 };
1983 
1984 /*
1985  * Test that if we have several subsequent calls to
1986  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1987  * needed each and every time.
1988  *
1989  * With clk_dummy_minimize_rate_ops, this means that the rate will
1990  * trail along the minimum as it evolves.
1991  */
1992 static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1993 {
1994 	struct clk_dummy_context *ctx = test->priv;
1995 	struct clk_hw *hw = &ctx->hw;
1996 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1997 	unsigned long rate;
1998 
1999 	KUNIT_ASSERT_EQ(test,
2000 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
2001 			0);
2002 
2003 	KUNIT_ASSERT_EQ(test,
2004 			clk_set_rate_range(clk,
2005 					   DUMMY_CLOCK_RATE_1,
2006 					   DUMMY_CLOCK_RATE_2),
2007 			0);
2008 
2009 	rate = clk_get_rate(clk);
2010 	KUNIT_ASSERT_GT(test, rate, 0);
2011 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2012 
2013 	KUNIT_ASSERT_EQ(test,
2014 			clk_set_rate_range(clk,
2015 					   DUMMY_CLOCK_RATE_1 + 1000,
2016 					   DUMMY_CLOCK_RATE_2),
2017 			0);
2018 
2019 	rate = clk_get_rate(clk);
2020 	KUNIT_ASSERT_GT(test, rate, 0);
2021 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
2022 
2023 	KUNIT_ASSERT_EQ(test,
2024 			clk_set_rate_range(clk,
2025 					   DUMMY_CLOCK_RATE_1,
2026 					   DUMMY_CLOCK_RATE_2),
2027 			0);
2028 
2029 	rate = clk_get_rate(clk);
2030 	KUNIT_ASSERT_GT(test, rate, 0);
2031 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2032 
2033 	clk_put(clk);
2034 }
2035 
2036 /*
2037  * Test that if we have several subsequent calls to
2038  * clk_set_rate_range(), across multiple users, the core will reevaluate
2039  * whether a new rate is needed each and every time.
2040  *
2041  * With clk_dummy_minimize_rate_ops, this means that the rate will
2042  * trail along the minimum as it evolves.
2043  */
2044 static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2045 {
2046 	struct clk_dummy_context *ctx = test->priv;
2047 	struct clk_hw *hw = &ctx->hw;
2048 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2049 	struct clk *user1, *user2;
2050 	unsigned long rate;
2051 
2052 	user1 = clk_hw_get_clk(hw, NULL);
2053 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2054 
2055 	user2 = clk_hw_get_clk(hw, NULL);
2056 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2057 
2058 	KUNIT_ASSERT_EQ(test,
2059 			clk_set_rate_range(user1,
2060 					   DUMMY_CLOCK_RATE_1,
2061 					   ULONG_MAX),
2062 			0);
2063 
2064 	rate = clk_get_rate(clk);
2065 	KUNIT_ASSERT_GT(test, rate, 0);
2066 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2067 
2068 	KUNIT_ASSERT_EQ(test,
2069 			clk_set_rate_range(user2,
2070 					   DUMMY_CLOCK_RATE_2,
2071 					   ULONG_MAX),
2072 			0);
2073 
2074 	rate = clk_get_rate(clk);
2075 	KUNIT_ASSERT_GT(test, rate, 0);
2076 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2077 
2078 	KUNIT_ASSERT_EQ(test,
2079 			clk_drop_range(user2),
2080 			0);
2081 
2082 	rate = clk_get_rate(clk);
2083 	KUNIT_ASSERT_GT(test, rate, 0);
2084 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2085 
2086 	clk_put(user2);
2087 	clk_put(user1);
2088 	clk_put(clk);
2089 }
2090 
2091 /*
2092  * Test that if we have several subsequent calls to
2093  * clk_set_rate_range(), across multiple users, the core will reevaluate
2094  * whether a new rate is needed, including when a user drop its clock.
2095  *
2096  * With clk_dummy_minimize_rate_ops, this means that the rate will
2097  * trail along the minimum as it evolves.
2098  */
2099 static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2100 {
2101 	struct clk_dummy_context *ctx = test->priv;
2102 	struct clk_hw *hw = &ctx->hw;
2103 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2104 	struct clk *user1, *user2;
2105 	unsigned long rate;
2106 
2107 	user1 = clk_hw_get_clk(hw, NULL);
2108 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2109 
2110 	user2 = clk_hw_get_clk(hw, NULL);
2111 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2112 
2113 	KUNIT_ASSERT_EQ(test,
2114 			clk_set_rate_range(user1,
2115 					   DUMMY_CLOCK_RATE_1,
2116 					   ULONG_MAX),
2117 			0);
2118 
2119 	rate = clk_get_rate(clk);
2120 	KUNIT_ASSERT_GT(test, rate, 0);
2121 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2122 
2123 	KUNIT_ASSERT_EQ(test,
2124 			clk_set_rate_range(user2,
2125 					   DUMMY_CLOCK_RATE_2,
2126 					   ULONG_MAX),
2127 			0);
2128 
2129 	rate = clk_get_rate(clk);
2130 	KUNIT_ASSERT_GT(test, rate, 0);
2131 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2132 
2133 	clk_put(user2);
2134 
2135 	rate = clk_get_rate(clk);
2136 	KUNIT_ASSERT_GT(test, rate, 0);
2137 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2138 
2139 	clk_put(user1);
2140 	clk_put(clk);
2141 }
2142 
2143 static struct kunit_case clk_range_minimize_test_cases[] = {
2144 	KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2145 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2146 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2147 	{}
2148 };
2149 
2150 /*
2151  * Test suite for a basic rate clock, without any parent.
2152  *
2153  * These tests exercise the rate range API: clk_set_rate_range(),
2154  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2155  * driver that will always try to run at the lowest possible rate.
2156  */
2157 static struct kunit_suite clk_range_minimize_test_suite = {
2158 	.name = "clk-range-minimize-test",
2159 	.init = clk_minimize_test_init,
2160 	.exit = clk_test_exit,
2161 	.test_cases = clk_range_minimize_test_cases,
2162 };
2163 
2164 struct clk_leaf_mux_ctx {
2165 	struct clk_multiple_parent_ctx mux_ctx;
2166 	struct clk_hw hw;
2167 	struct clk_hw parent;
2168 	struct clk_rate_request *req;
2169 	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2170 };
2171 
2172 static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
2173 {
2174 	struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw);
2175 	int ret;
2176 	struct clk_rate_request *parent_req = ctx->req;
2177 
2178 	clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate);
2179 	ret = ctx->determine_rate_func(req->best_parent_hw, parent_req);
2180 	if (ret)
2181 		return ret;
2182 
2183 	req->rate = parent_req->rate;
2184 
2185 	return 0;
2186 }
2187 
2188 static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = {
2189 	.determine_rate = clk_leaf_mux_determine_rate,
2190 	.set_parent = clk_dummy_single_set_parent,
2191 	.get_parent = clk_dummy_single_get_parent,
2192 };
2193 
2194 static int
2195 clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2196 {
2197 	struct clk_leaf_mux_ctx *ctx;
2198 	const char *top_parents[2] = { "parent-0", "parent-1" };
2199 	int ret;
2200 
2201 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2202 	if (!ctx)
2203 		return -ENOMEM;
2204 	test->priv = ctx;
2205 
2206 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2207 								    &clk_dummy_rate_ops,
2208 								    0);
2209 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2210 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2211 	if (ret)
2212 		return ret;
2213 
2214 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2215 								    &clk_dummy_rate_ops,
2216 								    0);
2217 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2218 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2219 	if (ret)
2220 		return ret;
2221 
2222 	ctx->mux_ctx.current_parent = 0;
2223 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2224 						   &clk_multiple_parents_mux_ops,
2225 						   0);
2226 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2227 	if (ret)
2228 		return ret;
2229 
2230 	ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw,
2231 					  &empty_clk_ops, CLK_SET_RATE_PARENT);
2232 	ret = clk_hw_register(NULL, &ctx->parent);
2233 	if (ret)
2234 		return ret;
2235 
2236 	ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent,
2237 				      &clk_leaf_mux_set_rate_parent_ops,
2238 				      CLK_SET_RATE_PARENT);
2239 	ret = clk_hw_register(NULL, &ctx->hw);
2240 	if (ret)
2241 		return ret;
2242 
2243 	return 0;
2244 }
2245 
2246 static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2247 {
2248 	struct clk_leaf_mux_ctx *ctx = test->priv;
2249 
2250 	clk_hw_unregister(&ctx->hw);
2251 	clk_hw_unregister(&ctx->parent);
2252 	clk_hw_unregister(&ctx->mux_ctx.hw);
2253 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2254 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2255 }
2256 
2257 struct clk_leaf_mux_set_rate_parent_determine_rate_test_case {
2258 	const char *desc;
2259 	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2260 };
2261 
2262 static void
2263 clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(
2264 		const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc)
2265 {
2266 	strcpy(desc, t->desc);
2267 }
2268 
2269 static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case
2270 clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = {
2271 	{
2272 		/*
2273 		 * Test that __clk_determine_rate() on the parent that can't
2274 		 * change rate doesn't return a clk_rate_request structure with
2275 		 * the best_parent_hw pointer pointing to the parent.
2276 		 */
2277 		.desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent",
2278 		.determine_rate_func = __clk_determine_rate,
2279 	},
2280 	{
2281 		/*
2282 		 * Test that __clk_mux_determine_rate() on the parent that
2283 		 * can't change rate doesn't return a clk_rate_request
2284 		 * structure with the best_parent_hw pointer pointing to
2285 		 * the parent.
2286 		 */
2287 		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent",
2288 		.determine_rate_func = __clk_mux_determine_rate,
2289 	},
2290 	{
2291 		/*
2292 		 * Test that __clk_mux_determine_rate_closest() on the parent
2293 		 * that can't change rate doesn't return a clk_rate_request
2294 		 * structure with the best_parent_hw pointer pointing to
2295 		 * the parent.
2296 		 */
2297 		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent",
2298 		.determine_rate_func = __clk_mux_determine_rate_closest,
2299 	},
2300 	{
2301 		/*
2302 		 * Test that clk_hw_determine_rate_no_reparent() on the parent
2303 		 * that can't change rate doesn't return a clk_rate_request
2304 		 * structure with the best_parent_hw pointer pointing to
2305 		 * the parent.
2306 		 */
2307 		.desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent",
2308 		.determine_rate_func = clk_hw_determine_rate_no_reparent,
2309 	},
2310 };
2311 
2312 KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2313 		  clk_leaf_mux_set_rate_parent_determine_rate_test_cases,
2314 		  clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)
2315 
2316 /*
2317  * Test that when a clk that can't change rate itself calls a function like
2318  * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request
2319  * structure that has the best_parent_hw pointer point to the clk_hw passed
2320  * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop
2321  * forwarding clk_rate_requests to the parent") for more background.
2322  */
2323 static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test)
2324 {
2325 	struct clk_leaf_mux_ctx *ctx = test->priv;
2326 	struct clk_hw *hw = &ctx->hw;
2327 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2328 	struct clk_rate_request req;
2329 	unsigned long rate;
2330 	const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param;
2331 
2332 	test_param = test->param_value;
2333 	ctx->determine_rate_func = test_param->determine_rate_func;
2334 
2335 	ctx->req = &req;
2336 	rate = clk_get_rate(clk);
2337 	KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2338 	KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2));
2339 
2340 	KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2341 	KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2342 	KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2343 
2344 	clk_put(clk);
2345 }
2346 
2347 static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2348 	KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2349 			 clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params),
2350 	{}
2351 };
2352 
2353 /*
2354  * Test suite for a clock whose parent is a pass-through clk whose parent is a
2355  * mux with multiple parents. The leaf and pass-through clocks have the
2356  * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which
2357  * will then select which parent is the best fit for a given rate.
2358  *
2359  * These tests exercise the behaviour of muxes, and the proper selection
2360  * of parents.
2361  */
2362 static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2363 	.name = "clk-leaf-mux-set-rate-parent",
2364 	.init = clk_leaf_mux_set_rate_parent_test_init,
2365 	.exit = clk_leaf_mux_set_rate_parent_test_exit,
2366 	.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2367 };
2368 
2369 struct clk_mux_notifier_rate_change {
2370 	bool done;
2371 	unsigned long old_rate;
2372 	unsigned long new_rate;
2373 	wait_queue_head_t wq;
2374 };
2375 
2376 struct clk_mux_notifier_ctx {
2377 	struct clk_multiple_parent_ctx mux_ctx;
2378 	struct clk *clk;
2379 	struct notifier_block clk_nb;
2380 	struct clk_mux_notifier_rate_change pre_rate_change;
2381 	struct clk_mux_notifier_rate_change post_rate_change;
2382 };
2383 
2384 #define NOTIFIER_TIMEOUT_MS 100
2385 
2386 static int clk_mux_notifier_callback(struct notifier_block *nb,
2387 				     unsigned long action, void *data)
2388 {
2389 	struct clk_notifier_data *clk_data = data;
2390 	struct clk_mux_notifier_ctx *ctx = container_of(nb,
2391 							struct clk_mux_notifier_ctx,
2392 							clk_nb);
2393 
2394 	if (action & PRE_RATE_CHANGE) {
2395 		ctx->pre_rate_change.old_rate = clk_data->old_rate;
2396 		ctx->pre_rate_change.new_rate = clk_data->new_rate;
2397 		ctx->pre_rate_change.done = true;
2398 		wake_up_interruptible(&ctx->pre_rate_change.wq);
2399 	}
2400 
2401 	if (action & POST_RATE_CHANGE) {
2402 		ctx->post_rate_change.old_rate = clk_data->old_rate;
2403 		ctx->post_rate_change.new_rate = clk_data->new_rate;
2404 		ctx->post_rate_change.done = true;
2405 		wake_up_interruptible(&ctx->post_rate_change.wq);
2406 	}
2407 
2408 	return 0;
2409 }
2410 
2411 static int clk_mux_notifier_test_init(struct kunit *test)
2412 {
2413 	struct clk_mux_notifier_ctx *ctx;
2414 	const char *top_parents[2] = { "parent-0", "parent-1" };
2415 	int ret;
2416 
2417 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2418 	if (!ctx)
2419 		return -ENOMEM;
2420 	test->priv = ctx;
2421 	ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2422 	init_waitqueue_head(&ctx->pre_rate_change.wq);
2423 	init_waitqueue_head(&ctx->post_rate_change.wq);
2424 
2425 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2426 								    &clk_dummy_rate_ops,
2427 								    0);
2428 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2429 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2430 	if (ret)
2431 		return ret;
2432 
2433 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2434 								    &clk_dummy_rate_ops,
2435 								    0);
2436 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2437 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2438 	if (ret)
2439 		return ret;
2440 
2441 	ctx->mux_ctx.current_parent = 0;
2442 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2443 						   &clk_multiple_parents_mux_ops,
2444 						   0);
2445 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2446 	if (ret)
2447 		return ret;
2448 
2449 	ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2450 	ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2451 	if (ret)
2452 		return ret;
2453 
2454 	return 0;
2455 }
2456 
2457 static void clk_mux_notifier_test_exit(struct kunit *test)
2458 {
2459 	struct clk_mux_notifier_ctx *ctx = test->priv;
2460 	struct clk *clk = ctx->clk;
2461 
2462 	clk_notifier_unregister(clk, &ctx->clk_nb);
2463 	clk_put(clk);
2464 
2465 	clk_hw_unregister(&ctx->mux_ctx.hw);
2466 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2467 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2468 }
2469 
2470 /*
2471  * Test that if the we have a notifier registered on a mux, the core
2472  * will notify us when we switch to another parent, and with the proper
2473  * old and new rates.
2474  */
2475 static void clk_mux_notifier_set_parent_test(struct kunit *test)
2476 {
2477 	struct clk_mux_notifier_ctx *ctx = test->priv;
2478 	struct clk_hw *hw = &ctx->mux_ctx.hw;
2479 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2480 	struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2481 	int ret;
2482 
2483 	ret = clk_set_parent(clk, new_parent);
2484 	KUNIT_ASSERT_EQ(test, ret, 0);
2485 
2486 	ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2487 					       ctx->pre_rate_change.done,
2488 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2489 	KUNIT_ASSERT_GT(test, ret, 0);
2490 
2491 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2492 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2493 
2494 	ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2495 					       ctx->post_rate_change.done,
2496 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2497 	KUNIT_ASSERT_GT(test, ret, 0);
2498 
2499 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2500 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2501 
2502 	clk_put(new_parent);
2503 	clk_put(clk);
2504 }
2505 
2506 static struct kunit_case clk_mux_notifier_test_cases[] = {
2507 	KUNIT_CASE(clk_mux_notifier_set_parent_test),
2508 	{}
2509 };
2510 
2511 /*
2512  * Test suite for a mux with multiple parents, and a notifier registered
2513  * on the mux.
2514  *
2515  * These tests exercise the behaviour of notifiers.
2516  */
2517 static struct kunit_suite clk_mux_notifier_test_suite = {
2518 	.name = "clk-mux-notifier",
2519 	.init = clk_mux_notifier_test_init,
2520 	.exit = clk_mux_notifier_test_exit,
2521 	.test_cases = clk_mux_notifier_test_cases,
2522 };
2523 
2524 static int
2525 clk_mux_no_reparent_test_init(struct kunit *test)
2526 {
2527 	struct clk_multiple_parent_ctx *ctx;
2528 	const char *parents[2] = { "parent-0", "parent-1"};
2529 	int ret;
2530 
2531 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2532 	if (!ctx)
2533 		return -ENOMEM;
2534 	test->priv = ctx;
2535 
2536 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2537 							    &clk_dummy_rate_ops,
2538 							    0);
2539 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2540 	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
2541 	if (ret)
2542 		return ret;
2543 
2544 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2545 							    &clk_dummy_rate_ops,
2546 							    0);
2547 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2548 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
2549 	if (ret)
2550 		return ret;
2551 
2552 	ctx->current_parent = 0;
2553 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
2554 					   &clk_multiple_parents_no_reparent_mux_ops,
2555 					   0);
2556 	ret = clk_hw_register(NULL, &ctx->hw);
2557 	if (ret)
2558 		return ret;
2559 
2560 	return 0;
2561 }
2562 
2563 static void
2564 clk_mux_no_reparent_test_exit(struct kunit *test)
2565 {
2566 	struct clk_multiple_parent_ctx *ctx = test->priv;
2567 
2568 	clk_hw_unregister(&ctx->hw);
2569 	clk_hw_unregister(&ctx->parents_ctx[0].hw);
2570 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
2571 }
2572 
2573 /*
2574  * Test that if the we have a mux that cannot change parent and we call
2575  * clk_round_rate() on it with a rate that should cause it to change
2576  * parent, it won't.
2577  */
2578 static void clk_mux_no_reparent_round_rate(struct kunit *test)
2579 {
2580 	struct clk_multiple_parent_ctx *ctx = test->priv;
2581 	struct clk_hw *hw = &ctx->hw;
2582 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2583 	struct clk *other_parent, *parent;
2584 	unsigned long other_parent_rate;
2585 	unsigned long parent_rate;
2586 	long rounded_rate;
2587 
2588 	parent = clk_get_parent(clk);
2589 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2590 
2591 	parent_rate = clk_get_rate(parent);
2592 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2593 
2594 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2595 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2596 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2597 
2598 	other_parent_rate = clk_get_rate(other_parent);
2599 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2600 	clk_put(other_parent);
2601 
2602 	rounded_rate = clk_round_rate(clk, other_parent_rate);
2603 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
2604 	KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
2605 
2606 	clk_put(clk);
2607 }
2608 
2609 /*
2610  * Test that if the we have a mux that cannot change parent and we call
2611  * clk_set_rate() on it with a rate that should cause it to change
2612  * parent, it won't.
2613  */
2614 static void clk_mux_no_reparent_set_rate(struct kunit *test)
2615 {
2616 	struct clk_multiple_parent_ctx *ctx = test->priv;
2617 	struct clk_hw *hw = &ctx->hw;
2618 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2619 	struct clk *other_parent, *parent;
2620 	unsigned long other_parent_rate;
2621 	unsigned long parent_rate;
2622 	unsigned long rate;
2623 	int ret;
2624 
2625 	parent = clk_get_parent(clk);
2626 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2627 
2628 	parent_rate = clk_get_rate(parent);
2629 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2630 
2631 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2632 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2633 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2634 
2635 	other_parent_rate = clk_get_rate(other_parent);
2636 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2637 	clk_put(other_parent);
2638 
2639 	ret = clk_set_rate(clk, other_parent_rate);
2640 	KUNIT_ASSERT_EQ(test, ret, 0);
2641 
2642 	rate = clk_get_rate(clk);
2643 	KUNIT_ASSERT_GT(test, rate, 0);
2644 	KUNIT_EXPECT_EQ(test, rate, parent_rate);
2645 
2646 	clk_put(clk);
2647 }
2648 
2649 static struct kunit_case clk_mux_no_reparent_test_cases[] = {
2650 	KUNIT_CASE(clk_mux_no_reparent_round_rate),
2651 	KUNIT_CASE(clk_mux_no_reparent_set_rate),
2652 	{}
2653 };
2654 
2655 /*
2656  * Test suite for a clock mux that isn't allowed to change parent, using
2657  * the clk_hw_determine_rate_no_reparent() helper.
2658  *
2659  * These tests exercise that helper, and the proper selection of
2660  * rates and parents.
2661  */
2662 static struct kunit_suite clk_mux_no_reparent_test_suite = {
2663 	.name = "clk-mux-no-reparent",
2664 	.init = clk_mux_no_reparent_test_init,
2665 	.exit = clk_mux_no_reparent_test_exit,
2666 	.test_cases = clk_mux_no_reparent_test_cases,
2667 };
2668 
2669 struct clk_register_clk_parent_data_test_case {
2670 	const char *desc;
2671 	struct clk_parent_data pdata;
2672 };
2673 
2674 static void
2675 clk_register_clk_parent_data_test_case_to_desc(
2676 		const struct clk_register_clk_parent_data_test_case *t, char *desc)
2677 {
2678 	strcpy(desc, t->desc);
2679 }
2680 
2681 static const struct clk_register_clk_parent_data_test_case
2682 clk_register_clk_parent_data_of_cases[] = {
2683 	{
2684 		/*
2685 		 * Test that a clk registered with a struct device_node can
2686 		 * find a parent based on struct clk_parent_data::index.
2687 		 */
2688 		.desc = "clk_parent_data_of_index_test",
2689 		.pdata.index = 0,
2690 	},
2691 	{
2692 		/*
2693 		 * Test that a clk registered with a struct device_node can
2694 		 * find a parent based on struct clk_parent_data::fwname.
2695 		 */
2696 		.desc = "clk_parent_data_of_fwname_test",
2697 		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2698 	},
2699 	{
2700 		/*
2701 		 * Test that a clk registered with a struct device_node can
2702 		 * find a parent based on struct clk_parent_data::name.
2703 		 */
2704 		.desc = "clk_parent_data_of_name_test",
2705 		/* The index must be negative to indicate firmware not used */
2706 		.pdata.index = -1,
2707 		.pdata.name = CLK_PARENT_DATA_1MHZ_NAME,
2708 	},
2709 	{
2710 		/*
2711 		 * Test that a clk registered with a struct device_node can
2712 		 * find a parent based on struct
2713 		 * clk_parent_data::{fw_name,name}.
2714 		 */
2715 		.desc = "clk_parent_data_of_fwname_name_test",
2716 		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2717 		.pdata.name = "not_matching",
2718 	},
2719 	{
2720 		/*
2721 		 * Test that a clk registered with a struct device_node can
2722 		 * find a parent based on struct clk_parent_data::{index,name}.
2723 		 * Index takes priority.
2724 		 */
2725 		.desc = "clk_parent_data_of_index_name_priority_test",
2726 		.pdata.index = 0,
2727 		.pdata.name = "not_matching",
2728 	},
2729 	{
2730 		/*
2731 		 * Test that a clk registered with a struct device_node can
2732 		 * find a parent based on struct
2733 		 * clk_parent_data::{index,fwname,name}. The fw_name takes
2734 		 * priority over index and name.
2735 		 */
2736 		.desc = "clk_parent_data_of_index_fwname_name_priority_test",
2737 		.pdata.index = 1,
2738 		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2739 		.pdata.name = "not_matching",
2740 	},
2741 };
2742 
2743 KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases,
2744 		  clk_register_clk_parent_data_test_case_to_desc)
2745 
2746 /**
2747  * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests
2748  * @np: device node of clk under test
2749  * @hw: clk_hw for clk under test
2750  */
2751 struct clk_register_clk_parent_data_of_ctx {
2752 	struct device_node *np;
2753 	struct clk_hw hw;
2754 };
2755 
2756 static int clk_register_clk_parent_data_of_test_init(struct kunit *test)
2757 {
2758 	struct clk_register_clk_parent_data_of_ctx *ctx;
2759 
2760 	KUNIT_ASSERT_EQ(test, 0,
2761 			of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
2762 
2763 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2764 	if (!ctx)
2765 		return -ENOMEM;
2766 	test->priv = ctx;
2767 
2768 	ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data");
2769 	if (!ctx->np)
2770 		return -ENODEV;
2771 
2772 	of_node_put_kunit(test, ctx->np);
2773 
2774 	return 0;
2775 }
2776 
2777 /*
2778  * Test that a clk registered with a struct device_node can find a parent based on
2779  * struct clk_parent_data when the hw member isn't set.
2780  */
2781 static void clk_register_clk_parent_data_of_test(struct kunit *test)
2782 {
2783 	struct clk_register_clk_parent_data_of_ctx *ctx = test->priv;
2784 	struct clk_hw *parent_hw;
2785 	const struct clk_register_clk_parent_data_test_case *test_param;
2786 	struct clk_init_data init = { };
2787 	struct clk *expected_parent, *actual_parent;
2788 
2789 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np);
2790 
2791 	expected_parent = of_clk_get_kunit(test, ctx->np, 0);
2792 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2793 
2794 	test_param = test->param_value;
2795 	init.parent_data = &test_param->pdata;
2796 	init.num_parents = 1;
2797 	init.name = "parent_data_of_test_clk";
2798 	init.ops = &clk_dummy_single_parent_ops;
2799 	ctx->hw.init = &init;
2800 	KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw));
2801 
2802 	parent_hw = clk_hw_get_parent(&ctx->hw);
2803 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2804 
2805 	actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2806 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2807 
2808 	KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2809 }
2810 
2811 static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = {
2812 	KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test,
2813 			 clk_register_clk_parent_data_of_test_gen_params),
2814 	{}
2815 };
2816 
2817 /*
2818  * Test suite for registering clks with struct clk_parent_data and a struct
2819  * device_node.
2820  */
2821 static struct kunit_suite clk_register_clk_parent_data_of_suite = {
2822 	.name = "clk_register_clk_parent_data_of",
2823 	.init = clk_register_clk_parent_data_of_test_init,
2824 	.test_cases = clk_register_clk_parent_data_of_test_cases,
2825 };
2826 
2827 /**
2828  * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
2829  * @dev: device of clk under test
2830  * @hw: clk_hw for clk under test
2831  * @pdrv: driver to attach to find @dev
2832  */
2833 struct clk_register_clk_parent_data_device_ctx {
2834 	struct device *dev;
2835 	struct clk_hw hw;
2836 	struct platform_driver pdrv;
2837 };
2838 
2839 static inline struct clk_register_clk_parent_data_device_ctx *
2840 clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
2841 {
2842 	return container_of(to_platform_driver(pdev->dev.driver),
2843 			    struct clk_register_clk_parent_data_device_ctx, pdrv);
2844 }
2845 
2846 static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
2847 {
2848 	struct clk_register_clk_parent_data_device_ctx *ctx;
2849 
2850 	ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
2851 	ctx->dev = &pdev->dev;
2852 
2853 	return 0;
2854 }
2855 
2856 static void clk_register_clk_parent_data_device_driver(struct kunit *test)
2857 {
2858 	struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
2859 	static const struct of_device_id match_table[] = {
2860 		{ .compatible = "test,clk-parent-data" },
2861 		{ }
2862 	};
2863 
2864 	ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
2865 	ctx->pdrv.driver.of_match_table = match_table;
2866 	ctx->pdrv.driver.name = __func__;
2867 	ctx->pdrv.driver.owner = THIS_MODULE;
2868 
2869 	KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
2870 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
2871 }
2872 
2873 static const struct clk_register_clk_parent_data_test_case
2874 clk_register_clk_parent_data_device_cases[] = {
2875 	{
2876 		/*
2877 		 * Test that a clk registered with a struct device can find a
2878 		 * parent based on struct clk_parent_data::index.
2879 		 */
2880 		.desc = "clk_parent_data_device_index_test",
2881 		.pdata.index = 1,
2882 	},
2883 	{
2884 		/*
2885 		 * Test that a clk registered with a struct device can find a
2886 		 * parent based on struct clk_parent_data::fwname.
2887 		 */
2888 		.desc = "clk_parent_data_device_fwname_test",
2889 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2890 	},
2891 	{
2892 		/*
2893 		 * Test that a clk registered with a struct device can find a
2894 		 * parent based on struct clk_parent_data::name.
2895 		 */
2896 		.desc = "clk_parent_data_device_name_test",
2897 		/* The index must be negative to indicate firmware not used */
2898 		.pdata.index = -1,
2899 		.pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
2900 	},
2901 	{
2902 		/*
2903 		 * Test that a clk registered with a struct device can find a
2904 		 * parent based on struct clk_parent_data::{fw_name,name}.
2905 		 */
2906 		.desc = "clk_parent_data_device_fwname_name_test",
2907 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2908 		.pdata.name = "not_matching",
2909 	},
2910 	{
2911 		/*
2912 		 * Test that a clk registered with a struct device can find a
2913 		 * parent based on struct clk_parent_data::{index,name}. Index
2914 		 * takes priority.
2915 		 */
2916 		.desc = "clk_parent_data_device_index_name_priority_test",
2917 		.pdata.index = 1,
2918 		.pdata.name = "not_matching",
2919 	},
2920 	{
2921 		/*
2922 		 * Test that a clk registered with a struct device can find a
2923 		 * parent based on struct clk_parent_data::{index,fwname,name}.
2924 		 * The fw_name takes priority over index and name.
2925 		 */
2926 		.desc = "clk_parent_data_device_index_fwname_name_priority_test",
2927 		.pdata.index = 0,
2928 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2929 		.pdata.name = "not_matching",
2930 	},
2931 };
2932 
2933 KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
2934 		  clk_register_clk_parent_data_device_cases,
2935 		  clk_register_clk_parent_data_test_case_to_desc)
2936 
2937 /*
2938  * Test that a clk registered with a struct device can find a parent based on
2939  * struct clk_parent_data when the hw member isn't set.
2940  */
2941 static void clk_register_clk_parent_data_device_test(struct kunit *test)
2942 {
2943 	struct clk_register_clk_parent_data_device_ctx *ctx;
2944 	const struct clk_register_clk_parent_data_test_case *test_param;
2945 	struct clk_hw *parent_hw;
2946 	struct clk_init_data init = { };
2947 	struct clk *expected_parent, *actual_parent;
2948 
2949 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2950 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
2951 	test->priv = ctx;
2952 
2953 	clk_register_clk_parent_data_device_driver(test);
2954 
2955 	expected_parent = clk_get_kunit(test, ctx->dev, "50");
2956 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2957 
2958 	test_param = test->param_value;
2959 	init.parent_data = &test_param->pdata;
2960 	init.num_parents = 1;
2961 	init.name = "parent_data_device_test_clk";
2962 	init.ops = &clk_dummy_single_parent_ops;
2963 	ctx->hw.init = &init;
2964 	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
2965 
2966 	parent_hw = clk_hw_get_parent(&ctx->hw);
2967 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2968 
2969 	actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2970 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2971 
2972 	KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2973 }
2974 
2975 static const struct clk_register_clk_parent_data_test_case
2976 clk_register_clk_parent_data_device_hw_cases[] = {
2977 	{
2978 		/*
2979 		 * Test that a clk registered with a struct device can find a
2980 		 * parent based on struct clk_parent_data::hw.
2981 		 */
2982 		.desc = "clk_parent_data_device_hw_index_test",
2983 		/* The index must be negative to indicate firmware not used */
2984 		.pdata.index = -1,
2985 	},
2986 	{
2987 		/*
2988 		 * Test that a clk registered with a struct device can find a
2989 		 * parent based on struct clk_parent_data::hw when
2990 		 * struct clk_parent_data::fw_name is set.
2991 		 */
2992 		.desc = "clk_parent_data_device_hw_fwname_test",
2993 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2994 	},
2995 	{
2996 		/*
2997 		 * Test that a clk registered with a struct device can find a
2998 		 * parent based on struct clk_parent_data::hw when struct
2999 		 * clk_parent_data::name is set.
3000 		 */
3001 		.desc = "clk_parent_data_device_hw_name_test",
3002 		/* The index must be negative to indicate firmware not used */
3003 		.pdata.index = -1,
3004 		.pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
3005 	},
3006 	{
3007 		/*
3008 		 * Test that a clk registered with a struct device can find a
3009 		 * parent based on struct clk_parent_data::hw when struct
3010 		 * clk_parent_data::{fw_name,name} are set.
3011 		 */
3012 		.desc = "clk_parent_data_device_hw_fwname_name_test",
3013 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
3014 		.pdata.name = "not_matching",
3015 	},
3016 	{
3017 		/*
3018 		 * Test that a clk registered with a struct device can find a
3019 		 * parent based on struct clk_parent_data::hw when struct
3020 		 * clk_parent_data::index is set. The hw pointer takes
3021 		 * priority.
3022 		 */
3023 		.desc = "clk_parent_data_device_hw_index_priority_test",
3024 		.pdata.index = 0,
3025 	},
3026 	{
3027 		/*
3028 		 * Test that a clk registered with a struct device can find a
3029 		 * parent based on struct clk_parent_data::hw when
3030 		 * struct clk_parent_data::{index,fwname,name} are set.
3031 		 * The hw pointer takes priority over everything else.
3032 		 */
3033 		.desc = "clk_parent_data_device_hw_index_fwname_name_priority_test",
3034 		.pdata.index = 0,
3035 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
3036 		.pdata.name = "not_matching",
3037 	},
3038 };
3039 
3040 KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
3041 		  clk_register_clk_parent_data_device_hw_cases,
3042 		  clk_register_clk_parent_data_test_case_to_desc)
3043 
3044 /*
3045  * Test that a clk registered with a struct device can find a
3046  * parent based on struct clk_parent_data::hw.
3047  */
3048 static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
3049 {
3050 	struct clk_register_clk_parent_data_device_ctx *ctx;
3051 	const struct clk_register_clk_parent_data_test_case *test_param;
3052 	struct clk_dummy_context *parent;
3053 	struct clk_hw *parent_hw;
3054 	struct clk_parent_data pdata = { };
3055 	struct clk_init_data init = { };
3056 
3057 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
3058 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
3059 	test->priv = ctx;
3060 
3061 	clk_register_clk_parent_data_device_driver(test);
3062 
3063 	parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
3064 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
3065 
3066 	parent_hw = &parent->hw;
3067 	parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
3068 						&clk_dummy_rate_ops, 0);
3069 
3070 	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
3071 
3072 	test_param = test->param_value;
3073 	memcpy(&pdata, &test_param->pdata, sizeof(pdata));
3074 	pdata.hw = parent_hw;
3075 	init.parent_data = &pdata;
3076 	init.num_parents = 1;
3077 	init.ops = &clk_dummy_single_parent_ops;
3078 	init.name = "parent_data_device_hw_test_clk";
3079 	ctx->hw.init = &init;
3080 	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
3081 
3082 	KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
3083 }
3084 
3085 static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
3086 	KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test,
3087 			 clk_register_clk_parent_data_device_test_gen_params),
3088 	KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test,
3089 			 clk_register_clk_parent_data_device_hw_test_gen_params),
3090 	{}
3091 };
3092 
3093 static int clk_register_clk_parent_data_device_init(struct kunit *test)
3094 {
3095 	KUNIT_ASSERT_EQ(test, 0,
3096 			of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
3097 
3098 	return 0;
3099 }
3100 
3101 /*
3102  * Test suite for registering clks with struct clk_parent_data and a struct
3103  * device.
3104  */
3105 static struct kunit_suite clk_register_clk_parent_data_device_suite = {
3106 	.name = "clk_register_clk_parent_data_device",
3107 	.init = clk_register_clk_parent_data_device_init,
3108 	.test_cases = clk_register_clk_parent_data_device_test_cases,
3109 };
3110 
3111 kunit_test_suites(
3112 	&clk_leaf_mux_set_rate_parent_test_suite,
3113 	&clk_test_suite,
3114 	&clk_multiple_parents_mux_test_suite,
3115 	&clk_mux_no_reparent_test_suite,
3116 	&clk_mux_notifier_test_suite,
3117 	&clk_orphan_transparent_multiple_parent_mux_test_suite,
3118 	&clk_orphan_transparent_single_parent_test_suite,
3119 	&clk_orphan_two_level_root_last_test_suite,
3120 	&clk_range_test_suite,
3121 	&clk_range_maximize_test_suite,
3122 	&clk_range_minimize_test_suite,
3123 	&clk_register_clk_parent_data_of_suite,
3124 	&clk_register_clk_parent_data_device_suite,
3125 	&clk_single_parent_mux_test_suite,
3126 	&clk_uncached_test_suite,
3127 );
3128 MODULE_DESCRIPTION("Kunit tests for clk framework");
3129 MODULE_LICENSE("GPL v2");
3130