xref: /linux/drivers/clk/clk_test.c (revision 9f3a2ba62c7226a6604b8aaeb92b5ff906fa4e6b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kunit tests for clk framework
4  */
5 #include <linux/clk.h>
6 #include <linux/clk-provider.h>
7 #include <linux/clk/clk-conf.h>
8 #include <linux/of.h>
9 #include <linux/platform_device.h>
10 
11 /* Needed for clk_hw_get_clk() */
12 #include "clk.h"
13 
14 #include <kunit/clk.h>
15 #include <kunit/of.h>
16 #include <kunit/platform_device.h>
17 #include <kunit/test.h>
18 
19 #include "kunit_clk_assigned_rates.h"
20 #include "clk_parent_data_test.h"
21 
22 static const struct clk_ops empty_clk_ops = { };
23 
24 #define DUMMY_CLOCK_INIT_RATE	(42 * 1000 * 1000)
25 #define DUMMY_CLOCK_RATE_1	(142 * 1000 * 1000)
26 #define DUMMY_CLOCK_RATE_2	(242 * 1000 * 1000)
27 
28 struct clk_dummy_context {
29 	struct clk_hw hw;
30 	unsigned long rate;
31 };
32 
clk_dummy_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)33 static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
34 					   unsigned long parent_rate)
35 {
36 	struct clk_dummy_context *ctx =
37 		container_of(hw, struct clk_dummy_context, hw);
38 
39 	return ctx->rate;
40 }
41 
clk_dummy_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)42 static int clk_dummy_determine_rate(struct clk_hw *hw,
43 				    struct clk_rate_request *req)
44 {
45 	/* Just return the same rate without modifying it */
46 	return 0;
47 }
48 
clk_dummy_maximize_rate(struct clk_hw * hw,struct clk_rate_request * req)49 static int clk_dummy_maximize_rate(struct clk_hw *hw,
50 				   struct clk_rate_request *req)
51 {
52 	/*
53 	 * If there's a maximum set, always run the clock at the maximum
54 	 * allowed.
55 	 */
56 	if (req->max_rate < ULONG_MAX)
57 		req->rate = req->max_rate;
58 
59 	return 0;
60 }
61 
clk_dummy_minimize_rate(struct clk_hw * hw,struct clk_rate_request * req)62 static int clk_dummy_minimize_rate(struct clk_hw *hw,
63 				   struct clk_rate_request *req)
64 {
65 	/*
66 	 * If there's a minimum set, always run the clock at the minimum
67 	 * allowed.
68 	 */
69 	if (req->min_rate > 0)
70 		req->rate = req->min_rate;
71 
72 	return 0;
73 }
74 
clk_dummy_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)75 static int clk_dummy_set_rate(struct clk_hw *hw,
76 			      unsigned long rate,
77 			      unsigned long parent_rate)
78 {
79 	struct clk_dummy_context *ctx =
80 		container_of(hw, struct clk_dummy_context, hw);
81 
82 	ctx->rate = rate;
83 	return 0;
84 }
85 
clk_dummy_single_set_parent(struct clk_hw * hw,u8 index)86 static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
87 {
88 	if (index >= clk_hw_get_num_parents(hw))
89 		return -EINVAL;
90 
91 	return 0;
92 }
93 
clk_dummy_single_get_parent(struct clk_hw * hw)94 static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
95 {
96 	return 0;
97 }
98 
99 static const struct clk_ops clk_dummy_rate_ops = {
100 	.recalc_rate = clk_dummy_recalc_rate,
101 	.determine_rate = clk_dummy_determine_rate,
102 	.set_rate = clk_dummy_set_rate,
103 };
104 
105 static const struct clk_ops clk_dummy_maximize_rate_ops = {
106 	.recalc_rate = clk_dummy_recalc_rate,
107 	.determine_rate = clk_dummy_maximize_rate,
108 	.set_rate = clk_dummy_set_rate,
109 };
110 
111 static const struct clk_ops clk_dummy_minimize_rate_ops = {
112 	.recalc_rate = clk_dummy_recalc_rate,
113 	.determine_rate = clk_dummy_minimize_rate,
114 	.set_rate = clk_dummy_set_rate,
115 };
116 
117 static const struct clk_ops clk_dummy_single_parent_ops = {
118 	/*
119 	 * FIXME: Even though we should probably be able to use
120 	 * __clk_mux_determine_rate() here, if we use it and call
121 	 * clk_round_rate() or clk_set_rate() with a rate lower than
122 	 * what all the parents can provide, it will return -EINVAL.
123 	 *
124 	 * This is due to the fact that it has the undocumented
125 	 * behaviour to always pick up the closest rate higher than the
126 	 * requested rate. If we get something lower, it thus considers
127 	 * that it's not acceptable and will return an error.
128 	 *
129 	 * It's somewhat inconsistent and creates a weird threshold
130 	 * between rates above the parent rate which would be rounded to
131 	 * what the parent can provide, but rates below will simply
132 	 * return an error.
133 	 */
134 	.determine_rate = __clk_mux_determine_rate_closest,
135 	.set_parent = clk_dummy_single_set_parent,
136 	.get_parent = clk_dummy_single_get_parent,
137 };
138 
139 struct clk_multiple_parent_ctx {
140 	struct clk_dummy_context parents_ctx[2];
141 	struct clk_hw hw;
142 	u8 current_parent;
143 };
144 
clk_multiple_parents_mux_set_parent(struct clk_hw * hw,u8 index)145 static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
146 {
147 	struct clk_multiple_parent_ctx *ctx =
148 		container_of(hw, struct clk_multiple_parent_ctx, hw);
149 
150 	if (index >= clk_hw_get_num_parents(hw))
151 		return -EINVAL;
152 
153 	ctx->current_parent = index;
154 
155 	return 0;
156 }
157 
clk_multiple_parents_mux_get_parent(struct clk_hw * hw)158 static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
159 {
160 	struct clk_multiple_parent_ctx *ctx =
161 		container_of(hw, struct clk_multiple_parent_ctx, hw);
162 
163 	return ctx->current_parent;
164 }
165 
166 static const struct clk_ops clk_multiple_parents_mux_ops = {
167 	.get_parent = clk_multiple_parents_mux_get_parent,
168 	.set_parent = clk_multiple_parents_mux_set_parent,
169 	.determine_rate = __clk_mux_determine_rate_closest,
170 };
171 
172 static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
173 	.determine_rate = clk_hw_determine_rate_no_reparent,
174 	.get_parent = clk_multiple_parents_mux_get_parent,
175 	.set_parent = clk_multiple_parents_mux_set_parent,
176 };
177 
clk_test_init_with_ops(struct kunit * test,const struct clk_ops * ops)178 static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
179 {
180 	struct clk_dummy_context *ctx;
181 	struct clk_init_data init = { };
182 	int ret;
183 
184 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
185 	if (!ctx)
186 		return -ENOMEM;
187 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
188 	test->priv = ctx;
189 
190 	init.name = "test_dummy_rate";
191 	init.ops = ops;
192 	ctx->hw.init = &init;
193 
194 	ret = clk_hw_register(NULL, &ctx->hw);
195 	if (ret)
196 		return ret;
197 
198 	return 0;
199 }
200 
clk_test_init(struct kunit * test)201 static int clk_test_init(struct kunit *test)
202 {
203 	return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
204 }
205 
clk_maximize_test_init(struct kunit * test)206 static int clk_maximize_test_init(struct kunit *test)
207 {
208 	return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
209 }
210 
clk_minimize_test_init(struct kunit * test)211 static int clk_minimize_test_init(struct kunit *test)
212 {
213 	return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
214 }
215 
clk_test_exit(struct kunit * test)216 static void clk_test_exit(struct kunit *test)
217 {
218 	struct clk_dummy_context *ctx = test->priv;
219 
220 	clk_hw_unregister(&ctx->hw);
221 }
222 
223 /*
224  * Test that the actual rate matches what is returned by clk_get_rate()
225  */
clk_test_get_rate(struct kunit * test)226 static void clk_test_get_rate(struct kunit *test)
227 {
228 	struct clk_dummy_context *ctx = test->priv;
229 	struct clk_hw *hw = &ctx->hw;
230 	struct clk *clk = clk_hw_get_clk(hw, NULL);
231 	unsigned long rate;
232 
233 	rate = clk_get_rate(clk);
234 	KUNIT_ASSERT_GT(test, rate, 0);
235 	KUNIT_EXPECT_EQ(test, rate, ctx->rate);
236 
237 	clk_put(clk);
238 }
239 
240 /*
241  * Test that, after a call to clk_set_rate(), the rate returned by
242  * clk_get_rate() matches.
243  *
244  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
245  * modify the requested rate, which is our case in clk_dummy_rate_ops.
246  */
clk_test_set_get_rate(struct kunit * test)247 static void clk_test_set_get_rate(struct kunit *test)
248 {
249 	struct clk_dummy_context *ctx = test->priv;
250 	struct clk_hw *hw = &ctx->hw;
251 	struct clk *clk = clk_hw_get_clk(hw, NULL);
252 	unsigned long rate;
253 
254 	KUNIT_ASSERT_EQ(test,
255 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
256 			0);
257 
258 	rate = clk_get_rate(clk);
259 	KUNIT_ASSERT_GT(test, rate, 0);
260 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
261 
262 	clk_put(clk);
263 }
264 
265 /*
266  * Test that, after several calls to clk_set_rate(), the rate returned
267  * by clk_get_rate() matches the last one.
268  *
269  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
270  * modify the requested rate, which is our case in clk_dummy_rate_ops.
271  */
clk_test_set_set_get_rate(struct kunit * test)272 static void clk_test_set_set_get_rate(struct kunit *test)
273 {
274 	struct clk_dummy_context *ctx = test->priv;
275 	struct clk_hw *hw = &ctx->hw;
276 	struct clk *clk = clk_hw_get_clk(hw, NULL);
277 	unsigned long rate;
278 
279 	KUNIT_ASSERT_EQ(test,
280 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
281 			0);
282 
283 	KUNIT_ASSERT_EQ(test,
284 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
285 			0);
286 
287 	rate = clk_get_rate(clk);
288 	KUNIT_ASSERT_GT(test, rate, 0);
289 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
290 
291 	clk_put(clk);
292 }
293 
294 /*
295  * Test that clk_round_rate and clk_set_rate are consitent and will
296  * return the same frequency.
297  */
clk_test_round_set_get_rate(struct kunit * test)298 static void clk_test_round_set_get_rate(struct kunit *test)
299 {
300 	struct clk_dummy_context *ctx = test->priv;
301 	struct clk_hw *hw = &ctx->hw;
302 	struct clk *clk = clk_hw_get_clk(hw, NULL);
303 	unsigned long set_rate;
304 	long rounded_rate;
305 
306 	rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
307 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
308 	KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
309 
310 	KUNIT_ASSERT_EQ(test,
311 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
312 			0);
313 
314 	set_rate = clk_get_rate(clk);
315 	KUNIT_ASSERT_GT(test, set_rate, 0);
316 	KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
317 
318 	clk_put(clk);
319 }
320 
321 static struct kunit_case clk_test_cases[] = {
322 	KUNIT_CASE(clk_test_get_rate),
323 	KUNIT_CASE(clk_test_set_get_rate),
324 	KUNIT_CASE(clk_test_set_set_get_rate),
325 	KUNIT_CASE(clk_test_round_set_get_rate),
326 	{}
327 };
328 
329 /*
330  * Test suite for a basic rate clock, without any parent.
331  *
332  * These tests exercise the rate API with simple scenarios
333  */
334 static struct kunit_suite clk_test_suite = {
335 	.name = "clk-test",
336 	.init = clk_test_init,
337 	.exit = clk_test_exit,
338 	.test_cases = clk_test_cases,
339 };
340 
clk_uncached_test_init(struct kunit * test)341 static int clk_uncached_test_init(struct kunit *test)
342 {
343 	struct clk_dummy_context *ctx;
344 	int ret;
345 
346 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
347 	if (!ctx)
348 		return -ENOMEM;
349 	test->priv = ctx;
350 
351 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
352 	ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
353 					     &clk_dummy_rate_ops,
354 					     CLK_GET_RATE_NOCACHE);
355 
356 	ret = clk_hw_register(NULL, &ctx->hw);
357 	if (ret)
358 		return ret;
359 
360 	return 0;
361 }
362 
363 /*
364  * Test that for an uncached clock, the clock framework doesn't cache
365  * the rate and clk_get_rate() will return the underlying clock rate
366  * even if it changed.
367  */
clk_test_uncached_get_rate(struct kunit * test)368 static void clk_test_uncached_get_rate(struct kunit *test)
369 {
370 	struct clk_dummy_context *ctx = test->priv;
371 	struct clk_hw *hw = &ctx->hw;
372 	struct clk *clk = clk_hw_get_clk(hw, NULL);
373 	unsigned long rate;
374 
375 	rate = clk_get_rate(clk);
376 	KUNIT_ASSERT_GT(test, rate, 0);
377 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
378 
379 	/* We change the rate behind the clock framework's back */
380 	ctx->rate = DUMMY_CLOCK_RATE_1;
381 	rate = clk_get_rate(clk);
382 	KUNIT_ASSERT_GT(test, rate, 0);
383 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
384 
385 	clk_put(clk);
386 }
387 
388 /*
389  * Test that for an uncached clock, clk_set_rate_range() will work
390  * properly if the rate hasn't changed.
391  */
clk_test_uncached_set_range(struct kunit * test)392 static void clk_test_uncached_set_range(struct kunit *test)
393 {
394 	struct clk_dummy_context *ctx = test->priv;
395 	struct clk_hw *hw = &ctx->hw;
396 	struct clk *clk = clk_hw_get_clk(hw, NULL);
397 	unsigned long rate;
398 
399 	KUNIT_ASSERT_EQ(test,
400 			clk_set_rate_range(clk,
401 					   DUMMY_CLOCK_RATE_1,
402 					   DUMMY_CLOCK_RATE_2),
403 			0);
404 
405 	rate = clk_get_rate(clk);
406 	KUNIT_ASSERT_GT(test, rate, 0);
407 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
408 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
409 
410 	clk_put(clk);
411 }
412 
413 /*
414  * Test that for an uncached clock, clk_set_rate_range() will work
415  * properly if the rate has changed in hardware.
416  *
417  * In this case, it means that if the rate wasn't initially in the range
418  * we're trying to set, but got changed at some point into the range
419  * without the kernel knowing about it, its rate shouldn't be affected.
420  */
clk_test_uncached_updated_rate_set_range(struct kunit * test)421 static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
422 {
423 	struct clk_dummy_context *ctx = test->priv;
424 	struct clk_hw *hw = &ctx->hw;
425 	struct clk *clk = clk_hw_get_clk(hw, NULL);
426 	unsigned long rate;
427 
428 	/* We change the rate behind the clock framework's back */
429 	ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
430 	KUNIT_ASSERT_EQ(test,
431 			clk_set_rate_range(clk,
432 					   DUMMY_CLOCK_RATE_1,
433 					   DUMMY_CLOCK_RATE_2),
434 			0);
435 
436 	rate = clk_get_rate(clk);
437 	KUNIT_ASSERT_GT(test, rate, 0);
438 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
439 
440 	clk_put(clk);
441 }
442 
443 static struct kunit_case clk_uncached_test_cases[] = {
444 	KUNIT_CASE(clk_test_uncached_get_rate),
445 	KUNIT_CASE(clk_test_uncached_set_range),
446 	KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
447 	{}
448 };
449 
450 /*
451  * Test suite for a basic, uncached, rate clock, without any parent.
452  *
453  * These tests exercise the rate API with simple scenarios
454  */
455 static struct kunit_suite clk_uncached_test_suite = {
456 	.name = "clk-uncached-test",
457 	.init = clk_uncached_test_init,
458 	.exit = clk_test_exit,
459 	.test_cases = clk_uncached_test_cases,
460 };
461 
462 static int
clk_multiple_parents_mux_test_init(struct kunit * test)463 clk_multiple_parents_mux_test_init(struct kunit *test)
464 {
465 	struct clk_multiple_parent_ctx *ctx;
466 	const char *parents[2] = { "parent-0", "parent-1"};
467 	int ret;
468 
469 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
470 	if (!ctx)
471 		return -ENOMEM;
472 	test->priv = ctx;
473 
474 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
475 							    &clk_dummy_rate_ops,
476 							    0);
477 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
478 	ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[0].hw);
479 	if (ret)
480 		return ret;
481 
482 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
483 							    &clk_dummy_rate_ops,
484 							    0);
485 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
486 	ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
487 	if (ret)
488 		return ret;
489 
490 	ctx->current_parent = 0;
491 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
492 					   &clk_multiple_parents_mux_ops,
493 					   CLK_SET_RATE_PARENT);
494 	ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
495 	if (ret)
496 		return ret;
497 
498 	return 0;
499 }
500 
501 /*
502  * Test that for a clock with multiple parents, clk_get_parent()
503  * actually returns the current one.
504  */
505 static void
clk_test_multiple_parents_mux_get_parent(struct kunit * test)506 clk_test_multiple_parents_mux_get_parent(struct kunit *test)
507 {
508 	struct clk_multiple_parent_ctx *ctx = test->priv;
509 	struct clk_hw *hw = &ctx->hw;
510 	struct clk *clk = clk_hw_get_clk(hw, NULL);
511 	struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
512 
513 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
514 
515 	clk_put(parent);
516 	clk_put(clk);
517 }
518 
519 /*
520  * Test that for a clock with a multiple parents, clk_has_parent()
521  * actually reports all of them as parents.
522  */
523 static void
clk_test_multiple_parents_mux_has_parent(struct kunit * test)524 clk_test_multiple_parents_mux_has_parent(struct kunit *test)
525 {
526 	struct clk_multiple_parent_ctx *ctx = test->priv;
527 	struct clk_hw *hw = &ctx->hw;
528 	struct clk *clk = clk_hw_get_clk(hw, NULL);
529 	struct clk *parent;
530 
531 	parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
532 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
533 	clk_put(parent);
534 
535 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
536 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
537 	clk_put(parent);
538 
539 	clk_put(clk);
540 }
541 
542 /*
543  * Test that for a clock with a multiple parents, if we set a range on
544  * that clock and the parent is changed, its rate after the reparenting
545  * is still within the range we asked for.
546  *
547  * FIXME: clk_set_parent() only does the reparenting but doesn't
548  * reevaluate whether the new clock rate is within its boundaries or
549  * not.
550  */
551 static void
clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit * test)552 clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
553 {
554 	struct clk_multiple_parent_ctx *ctx = test->priv;
555 	struct clk_hw *hw = &ctx->hw;
556 	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
557 	struct clk *parent1, *parent2;
558 	unsigned long rate;
559 	int ret;
560 
561 	kunit_skip(test, "This needs to be fixed in the core.");
562 
563 	parent1 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[0].hw, NULL);
564 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
565 	KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
566 
567 	parent2 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
568 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
569 
570 	ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
571 	KUNIT_ASSERT_EQ(test, ret, 0);
572 
573 	ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
574 	KUNIT_ASSERT_EQ(test, ret, 0);
575 
576 	ret = clk_set_rate_range(clk,
577 				 DUMMY_CLOCK_RATE_1 - 1000,
578 				 DUMMY_CLOCK_RATE_1 + 1000);
579 	KUNIT_ASSERT_EQ(test, ret, 0);
580 
581 	ret = clk_set_parent(clk, parent2);
582 	KUNIT_ASSERT_EQ(test, ret, 0);
583 
584 	rate = clk_get_rate(clk);
585 	KUNIT_ASSERT_GT(test, rate, 0);
586 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
587 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
588 }
589 
590 static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
591 	KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
592 	KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
593 	KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
594 	{}
595 };
596 
597 /*
598  * Test suite for a basic mux clock with two parents, with
599  * CLK_SET_RATE_PARENT on the child.
600  *
601  * These tests exercise the consumer API and check that the state of the
602  * child and parents are sane and consistent.
603  */
604 static struct kunit_suite
605 clk_multiple_parents_mux_test_suite = {
606 	.name = "clk-multiple-parents-mux-test",
607 	.init = clk_multiple_parents_mux_test_init,
608 	.test_cases = clk_multiple_parents_mux_test_cases,
609 };
610 
611 static int
clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit * test)612 clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
613 {
614 	struct clk_multiple_parent_ctx *ctx;
615 	const char *parents[2] = { "missing-parent", "proper-parent"};
616 	int ret;
617 
618 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
619 	if (!ctx)
620 		return -ENOMEM;
621 	test->priv = ctx;
622 
623 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
624 							    &clk_dummy_rate_ops,
625 							    0);
626 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
627 	ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
628 	if (ret)
629 		return ret;
630 
631 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
632 					   &clk_multiple_parents_mux_ops,
633 					   CLK_SET_RATE_PARENT);
634 	ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
635 	if (ret)
636 		return ret;
637 
638 	return 0;
639 }
640 
641 /*
642  * Test that, for a mux whose current parent hasn't been registered yet and is
643  * thus orphan, clk_get_parent() will return NULL.
644  */
645 static void
clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit * test)646 clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
647 {
648 	struct clk_multiple_parent_ctx *ctx = test->priv;
649 	struct clk_hw *hw = &ctx->hw;
650 	struct clk *clk = clk_hw_get_clk(hw, NULL);
651 
652 	KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
653 
654 	clk_put(clk);
655 }
656 
657 /*
658  * Test that, for a mux whose current parent hasn't been registered yet,
659  * calling clk_set_parent() to a valid parent will properly update the
660  * mux parent and its orphan status.
661  */
662 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit * test)663 clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
664 {
665 	struct clk_multiple_parent_ctx *ctx = test->priv;
666 	struct clk_hw *hw = &ctx->hw;
667 	struct clk *clk = clk_hw_get_clk(hw, NULL);
668 	struct clk *parent, *new_parent;
669 	int ret;
670 
671 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
672 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
673 
674 	ret = clk_set_parent(clk, parent);
675 	KUNIT_ASSERT_EQ(test, ret, 0);
676 
677 	new_parent = clk_get_parent(clk);
678 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
679 	KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
680 
681 	clk_put(parent);
682 	clk_put(clk);
683 }
684 
685 /*
686  * Test that, for a mux that started orphan but got switched to a valid
687  * parent, calling clk_drop_range() on the mux won't affect the parent
688  * rate.
689  */
690 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit * test)691 clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
692 {
693 	struct clk_multiple_parent_ctx *ctx = test->priv;
694 	struct clk_hw *hw = &ctx->hw;
695 	struct clk *clk = clk_hw_get_clk(hw, NULL);
696 	struct clk *parent;
697 	unsigned long parent_rate, new_parent_rate;
698 	int ret;
699 
700 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
701 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
702 
703 	parent_rate = clk_get_rate(parent);
704 	KUNIT_ASSERT_GT(test, parent_rate, 0);
705 
706 	ret = clk_set_parent(clk, parent);
707 	KUNIT_ASSERT_EQ(test, ret, 0);
708 
709 	ret = clk_drop_range(clk);
710 	KUNIT_ASSERT_EQ(test, ret, 0);
711 
712 	new_parent_rate = clk_get_rate(clk);
713 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
714 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
715 
716 	clk_put(parent);
717 	clk_put(clk);
718 }
719 
720 /*
721  * Test that, for a mux that started orphan but got switched to a valid
722  * parent, the rate of the mux and its new parent are consistent.
723  */
724 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit * test)725 clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
726 {
727 	struct clk_multiple_parent_ctx *ctx = test->priv;
728 	struct clk_hw *hw = &ctx->hw;
729 	struct clk *clk = clk_hw_get_clk(hw, NULL);
730 	struct clk *parent;
731 	unsigned long parent_rate, rate;
732 	int ret;
733 
734 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
735 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
736 
737 	parent_rate = clk_get_rate(parent);
738 	KUNIT_ASSERT_GT(test, parent_rate, 0);
739 
740 	ret = clk_set_parent(clk, parent);
741 	KUNIT_ASSERT_EQ(test, ret, 0);
742 
743 	rate = clk_get_rate(clk);
744 	KUNIT_ASSERT_GT(test, rate, 0);
745 	KUNIT_EXPECT_EQ(test, parent_rate, rate);
746 
747 	clk_put(parent);
748 	clk_put(clk);
749 }
750 
751 /*
752  * Test that, for a mux that started orphan but got switched to a valid
753  * parent, calling clk_put() on the mux won't affect the parent rate.
754  */
755 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit * test)756 clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
757 {
758 	struct clk_multiple_parent_ctx *ctx = test->priv;
759 	struct clk *clk, *parent;
760 	unsigned long parent_rate, new_parent_rate;
761 	int ret;
762 
763 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
764 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
765 
766 	clk = clk_hw_get_clk(&ctx->hw, NULL);
767 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
768 
769 	parent_rate = clk_get_rate(parent);
770 	KUNIT_ASSERT_GT(test, parent_rate, 0);
771 
772 	ret = clk_set_parent(clk, parent);
773 	KUNIT_ASSERT_EQ(test, ret, 0);
774 
775 	clk_put(clk);
776 
777 	new_parent_rate = clk_get_rate(parent);
778 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
779 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
780 
781 	clk_put(parent);
782 }
783 
784 /*
785  * Test that, for a mux that started orphan but got switched to a valid
786  * parent, calling clk_set_rate_range() will affect the parent state if
787  * its rate is out of range.
788  */
789 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit * test)790 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
791 {
792 	struct clk_multiple_parent_ctx *ctx = test->priv;
793 	struct clk_hw *hw = &ctx->hw;
794 	struct clk *clk = clk_hw_get_clk(hw, NULL);
795 	struct clk *parent;
796 	unsigned long rate;
797 	int ret;
798 
799 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
800 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
801 
802 	ret = clk_set_parent(clk, parent);
803 	KUNIT_ASSERT_EQ(test, ret, 0);
804 
805 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
806 	KUNIT_ASSERT_EQ(test, ret, 0);
807 
808 	rate = clk_get_rate(clk);
809 	KUNIT_ASSERT_GT(test, rate, 0);
810 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
811 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
812 
813 	clk_put(parent);
814 	clk_put(clk);
815 }
816 
817 /*
818  * Test that, for a mux that started orphan but got switched to a valid
819  * parent, calling clk_set_rate_range() won't affect the parent state if
820  * its rate is within range.
821  */
822 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit * test)823 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
824 {
825 	struct clk_multiple_parent_ctx *ctx = test->priv;
826 	struct clk_hw *hw = &ctx->hw;
827 	struct clk *clk = clk_hw_get_clk(hw, NULL);
828 	struct clk *parent;
829 	unsigned long parent_rate, new_parent_rate;
830 	int ret;
831 
832 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
833 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
834 
835 	parent_rate = clk_get_rate(parent);
836 	KUNIT_ASSERT_GT(test, parent_rate, 0);
837 
838 	ret = clk_set_parent(clk, parent);
839 	KUNIT_ASSERT_EQ(test, ret, 0);
840 
841 	ret = clk_set_rate_range(clk,
842 				 DUMMY_CLOCK_INIT_RATE - 1000,
843 				 DUMMY_CLOCK_INIT_RATE + 1000);
844 	KUNIT_ASSERT_EQ(test, ret, 0);
845 
846 	new_parent_rate = clk_get_rate(parent);
847 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
848 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
849 
850 	clk_put(parent);
851 	clk_put(clk);
852 }
853 
854 /*
855  * Test that, for a mux whose current parent hasn't been registered yet,
856  * calling clk_set_rate_range() will succeed, and will be taken into
857  * account when rounding a rate.
858  */
859 static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit * test)860 clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
861 {
862 	struct clk_multiple_parent_ctx *ctx = test->priv;
863 	struct clk_hw *hw = &ctx->hw;
864 	struct clk *clk = clk_hw_get_clk(hw, NULL);
865 	long rate;
866 	int ret;
867 
868 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
869 	KUNIT_ASSERT_EQ(test, ret, 0);
870 
871 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
872 	KUNIT_ASSERT_GT(test, rate, 0);
873 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
874 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
875 
876 	clk_put(clk);
877 }
878 
879 /*
880  * Test that, for a mux that started orphan, was assigned and rate and
881  * then got switched to a valid parent, its rate is eventually within
882  * range.
883  *
884  * FIXME: Even though we update the rate as part of clk_set_parent(), we
885  * don't evaluate whether that new rate is within range and needs to be
886  * adjusted.
887  */
888 static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit * test)889 clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
890 {
891 	struct clk_multiple_parent_ctx *ctx = test->priv;
892 	struct clk_hw *hw = &ctx->hw;
893 	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
894 	struct clk *parent;
895 	unsigned long rate;
896 	int ret;
897 
898 	kunit_skip(test, "This needs to be fixed in the core.");
899 
900 	clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
901 
902 	parent = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
903 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
904 
905 	ret = clk_set_parent(clk, parent);
906 	KUNIT_ASSERT_EQ(test, ret, 0);
907 
908 	rate = clk_get_rate(clk);
909 	KUNIT_ASSERT_GT(test, rate, 0);
910 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
911 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
912 }
913 
914 static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
915 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
916 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
917 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
918 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
919 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
920 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
921 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
922 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
923 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
924 	{}
925 };
926 
927 /*
928  * Test suite for a basic mux clock with two parents. The default parent
929  * isn't registered, only the second parent is. By default, the clock
930  * will thus be orphan.
931  *
932  * These tests exercise the behaviour of the consumer API when dealing
933  * with an orphan clock, and how we deal with the transition to a valid
934  * parent.
935  */
936 static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
937 	.name = "clk-orphan-transparent-multiple-parent-mux-test",
938 	.init = clk_orphan_transparent_multiple_parent_mux_test_init,
939 	.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
940 };
941 
942 struct clk_single_parent_ctx {
943 	struct clk_dummy_context parent_ctx;
944 	struct clk_hw hw;
945 };
946 
clk_single_parent_mux_test_init(struct kunit * test)947 static int clk_single_parent_mux_test_init(struct kunit *test)
948 {
949 	struct clk_single_parent_ctx *ctx;
950 	int ret;
951 
952 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
953 	if (!ctx)
954 		return -ENOMEM;
955 	test->priv = ctx;
956 
957 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
958 	ctx->parent_ctx.hw.init =
959 		CLK_HW_INIT_NO_PARENT("parent-clk",
960 				      &clk_dummy_rate_ops,
961 				      0);
962 
963 	ret = clk_hw_register_kunit(test, NULL, &ctx->parent_ctx.hw);
964 	if (ret)
965 		return ret;
966 
967 	ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
968 				   &clk_dummy_single_parent_ops,
969 				   CLK_SET_RATE_PARENT);
970 
971 	ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
972 	if (ret)
973 		return ret;
974 
975 	return 0;
976 }
977 
978 static void
clk_single_parent_mux_test_exit(struct kunit * test)979 clk_single_parent_mux_test_exit(struct kunit *test)
980 {
981 	struct clk_single_parent_ctx *ctx = test->priv;
982 
983 	clk_hw_unregister(&ctx->hw);
984 	clk_hw_unregister(&ctx->parent_ctx.hw);
985 }
986 
987 /*
988  * Test that for a clock with a single parent, clk_get_parent() actually
989  * returns the parent.
990  */
991 static void
clk_test_single_parent_mux_get_parent(struct kunit * test)992 clk_test_single_parent_mux_get_parent(struct kunit *test)
993 {
994 	struct clk_single_parent_ctx *ctx = test->priv;
995 	struct clk_hw *hw = &ctx->hw;
996 	struct clk *clk = clk_hw_get_clk(hw, NULL);
997 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
998 
999 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
1000 
1001 	clk_put(parent);
1002 	clk_put(clk);
1003 }
1004 
1005 /*
1006  * Test that for a clock with a single parent, clk_has_parent() actually
1007  * reports it as a parent.
1008  */
1009 static void
clk_test_single_parent_mux_has_parent(struct kunit * test)1010 clk_test_single_parent_mux_has_parent(struct kunit *test)
1011 {
1012 	struct clk_single_parent_ctx *ctx = test->priv;
1013 	struct clk_hw *hw = &ctx->hw;
1014 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1015 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1016 
1017 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1018 
1019 	clk_put(parent);
1020 	clk_put(clk);
1021 }
1022 
1023 /*
1024  * Test that for a clock that can't modify its rate and with a single
1025  * parent, if we set disjoints range on the parent and then the child,
1026  * the second will return an error.
1027  *
1028  * FIXME: clk_set_rate_range() only considers the current clock when
1029  * evaluating whether ranges are disjoints and not the upstream clocks
1030  * ranges.
1031  */
1032 static void
clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit * test)1033 clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1034 {
1035 	struct clk_single_parent_ctx *ctx = test->priv;
1036 	struct clk_hw *hw = &ctx->hw;
1037 	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
1038 	struct clk *parent;
1039 	int ret;
1040 
1041 	kunit_skip(test, "This needs to be fixed in the core.");
1042 
1043 	parent = clk_get_parent(clk);
1044 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1045 
1046 	ret = clk_set_rate_range(parent, 1000, 2000);
1047 	KUNIT_ASSERT_EQ(test, ret, 0);
1048 
1049 	ret = clk_set_rate_range(clk, 3000, 4000);
1050 	KUNIT_EXPECT_LT(test, ret, 0);
1051 }
1052 
1053 /*
1054  * Test that for a clock that can't modify its rate and with a single
1055  * parent, if we set disjoints range on the child and then the parent,
1056  * the second will return an error.
1057  *
1058  * FIXME: clk_set_rate_range() only considers the current clock when
1059  * evaluating whether ranges are disjoints and not the downstream clocks
1060  * ranges.
1061  */
1062 static void
clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit * test)1063 clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1064 {
1065 	struct clk_single_parent_ctx *ctx = test->priv;
1066 	struct clk_hw *hw = &ctx->hw;
1067 	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
1068 	struct clk *parent;
1069 	int ret;
1070 
1071 	kunit_skip(test, "This needs to be fixed in the core.");
1072 
1073 	parent = clk_get_parent(clk);
1074 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1075 
1076 	ret = clk_set_rate_range(clk, 1000, 2000);
1077 	KUNIT_ASSERT_EQ(test, ret, 0);
1078 
1079 	ret = clk_set_rate_range(parent, 3000, 4000);
1080 	KUNIT_EXPECT_LT(test, ret, 0);
1081 }
1082 
1083 /*
1084  * Test that for a clock that can't modify its rate and with a single
1085  * parent, if we set a range on the parent and then call
1086  * clk_round_rate(), the boundaries of the parent are taken into
1087  * account.
1088  */
1089 static void
clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit * test)1090 clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1091 {
1092 	struct clk_single_parent_ctx *ctx = test->priv;
1093 	struct clk_hw *hw = &ctx->hw;
1094 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1095 	struct clk *parent;
1096 	long rate;
1097 	int ret;
1098 
1099 	parent = clk_get_parent(clk);
1100 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1101 
1102 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1103 	KUNIT_ASSERT_EQ(test, ret, 0);
1104 
1105 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1106 	KUNIT_ASSERT_GT(test, rate, 0);
1107 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1108 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1109 
1110 	clk_put(clk);
1111 }
1112 
1113 /*
1114  * Test that for a clock that can't modify its rate and with a single
1115  * parent, if we set a range on the parent and a more restrictive one on
1116  * the child, and then call clk_round_rate(), the boundaries of the
1117  * two clocks are taken into account.
1118  */
1119 static void
clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit * test)1120 clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1121 {
1122 	struct clk_single_parent_ctx *ctx = test->priv;
1123 	struct clk_hw *hw = &ctx->hw;
1124 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1125 	struct clk *parent;
1126 	long rate;
1127 	int ret;
1128 
1129 	parent = clk_get_parent(clk);
1130 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1131 
1132 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1133 	KUNIT_ASSERT_EQ(test, ret, 0);
1134 
1135 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1136 	KUNIT_ASSERT_EQ(test, ret, 0);
1137 
1138 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1139 	KUNIT_ASSERT_GT(test, rate, 0);
1140 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1141 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1142 
1143 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1144 	KUNIT_ASSERT_GT(test, rate, 0);
1145 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1146 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1147 
1148 	clk_put(clk);
1149 }
1150 
1151 /*
1152  * Test that for a clock that can't modify its rate and with a single
1153  * parent, if we set a range on the child and a more restrictive one on
1154  * the parent, and then call clk_round_rate(), the boundaries of the
1155  * two clocks are taken into account.
1156  */
1157 static void
clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit * test)1158 clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1159 {
1160 	struct clk_single_parent_ctx *ctx = test->priv;
1161 	struct clk_hw *hw = &ctx->hw;
1162 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1163 	struct clk *parent;
1164 	long rate;
1165 	int ret;
1166 
1167 	parent = clk_get_parent(clk);
1168 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1169 
1170 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1171 	KUNIT_ASSERT_EQ(test, ret, 0);
1172 
1173 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1174 	KUNIT_ASSERT_EQ(test, ret, 0);
1175 
1176 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1177 	KUNIT_ASSERT_GT(test, rate, 0);
1178 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1179 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1180 
1181 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1182 	KUNIT_ASSERT_GT(test, rate, 0);
1183 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1184 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1185 
1186 	clk_put(clk);
1187 }
1188 
1189 static struct kunit_case clk_single_parent_mux_test_cases[] = {
1190 	KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1191 	KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1192 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1193 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1194 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1195 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1196 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1197 	{}
1198 };
1199 
1200 /*
1201  * Test suite for a basic mux clock with one parent, with
1202  * CLK_SET_RATE_PARENT on the child.
1203  *
1204  * These tests exercise the consumer API and check that the state of the
1205  * child and parent are sane and consistent.
1206  */
1207 static struct kunit_suite
1208 clk_single_parent_mux_test_suite = {
1209 	.name = "clk-single-parent-mux-test",
1210 	.init = clk_single_parent_mux_test_init,
1211 	.test_cases = clk_single_parent_mux_test_cases,
1212 };
1213 
clk_orphan_transparent_single_parent_mux_test_init(struct kunit * test)1214 static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1215 {
1216 	struct clk_single_parent_ctx *ctx;
1217 	struct clk_init_data init = { };
1218 	const char * const parents[] = { "orphan_parent" };
1219 	int ret;
1220 
1221 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1222 	if (!ctx)
1223 		return -ENOMEM;
1224 	test->priv = ctx;
1225 
1226 	init.name = "test_orphan_dummy_parent";
1227 	init.ops = &clk_dummy_single_parent_ops;
1228 	init.parent_names = parents;
1229 	init.num_parents = ARRAY_SIZE(parents);
1230 	init.flags = CLK_SET_RATE_PARENT;
1231 	ctx->hw.init = &init;
1232 
1233 	ret = clk_hw_register(NULL, &ctx->hw);
1234 	if (ret)
1235 		return ret;
1236 
1237 	memset(&init, 0, sizeof(init));
1238 	init.name = "orphan_parent";
1239 	init.ops = &clk_dummy_rate_ops;
1240 	ctx->parent_ctx.hw.init = &init;
1241 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1242 
1243 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1244 	if (ret)
1245 		return ret;
1246 
1247 	return 0;
1248 }
1249 
1250 /*
1251  * Test that a mux-only clock, with an initial rate within a range,
1252  * will still have the same rate after the range has been enforced.
1253  *
1254  * See:
1255  * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1256  */
clk_test_orphan_transparent_parent_mux_set_range(struct kunit * test)1257 static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1258 {
1259 	struct clk_single_parent_ctx *ctx = test->priv;
1260 	struct clk_hw *hw = &ctx->hw;
1261 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1262 	unsigned long rate, new_rate;
1263 
1264 	rate = clk_get_rate(clk);
1265 	KUNIT_ASSERT_GT(test, rate, 0);
1266 
1267 	KUNIT_ASSERT_EQ(test,
1268 			clk_set_rate_range(clk,
1269 					   ctx->parent_ctx.rate - 1000,
1270 					   ctx->parent_ctx.rate + 1000),
1271 			0);
1272 
1273 	new_rate = clk_get_rate(clk);
1274 	KUNIT_ASSERT_GT(test, new_rate, 0);
1275 	KUNIT_EXPECT_EQ(test, rate, new_rate);
1276 
1277 	clk_put(clk);
1278 }
1279 
1280 static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1281 	KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1282 	{}
1283 };
1284 
1285 /*
1286  * Test suite for a basic mux clock with one parent. The parent is
1287  * registered after its child. The clock will thus be an orphan when
1288  * registered, but will no longer be when the tests run.
1289  *
1290  * These tests make sure a clock that used to be orphan has a sane,
1291  * consistent, behaviour.
1292  */
1293 static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1294 	.name = "clk-orphan-transparent-single-parent-test",
1295 	.init = clk_orphan_transparent_single_parent_mux_test_init,
1296 	.exit = clk_single_parent_mux_test_exit,
1297 	.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1298 };
1299 
1300 struct clk_single_parent_two_lvl_ctx {
1301 	struct clk_dummy_context parent_parent_ctx;
1302 	struct clk_dummy_context parent_ctx;
1303 	struct clk_hw hw;
1304 };
1305 
1306 static int
clk_orphan_two_level_root_last_test_init(struct kunit * test)1307 clk_orphan_two_level_root_last_test_init(struct kunit *test)
1308 {
1309 	struct clk_single_parent_two_lvl_ctx *ctx;
1310 	int ret;
1311 
1312 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1313 	if (!ctx)
1314 		return -ENOMEM;
1315 	test->priv = ctx;
1316 
1317 	ctx->parent_ctx.hw.init =
1318 		CLK_HW_INIT("intermediate-parent",
1319 			    "root-parent",
1320 			    &clk_dummy_single_parent_ops,
1321 			    CLK_SET_RATE_PARENT);
1322 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1323 	if (ret)
1324 		return ret;
1325 
1326 	ctx->hw.init =
1327 		CLK_HW_INIT("test-clk", "intermediate-parent",
1328 			    &clk_dummy_single_parent_ops,
1329 			    CLK_SET_RATE_PARENT);
1330 	ret = clk_hw_register(NULL, &ctx->hw);
1331 	if (ret)
1332 		return ret;
1333 
1334 	ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1335 	ctx->parent_parent_ctx.hw.init =
1336 		CLK_HW_INIT_NO_PARENT("root-parent",
1337 				      &clk_dummy_rate_ops,
1338 				      0);
1339 	ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1340 	if (ret)
1341 		return ret;
1342 
1343 	return 0;
1344 }
1345 
1346 static void
clk_orphan_two_level_root_last_test_exit(struct kunit * test)1347 clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1348 {
1349 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1350 
1351 	clk_hw_unregister(&ctx->hw);
1352 	clk_hw_unregister(&ctx->parent_ctx.hw);
1353 	clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1354 }
1355 
1356 /*
1357  * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1358  * will return the proper rate.
1359  */
1360 static void
clk_orphan_two_level_root_last_test_get_rate(struct kunit * test)1361 clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1362 {
1363 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1364 	struct clk_hw *hw = &ctx->hw;
1365 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1366 	unsigned long rate;
1367 
1368 	rate = clk_get_rate(clk);
1369 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1370 
1371 	clk_put(clk);
1372 }
1373 
1374 /*
1375  * Test that, for a clock whose parent used to be orphan,
1376  * clk_set_rate_range() won't affect its rate if it is already within
1377  * range.
1378  *
1379  * See (for Exynos 4210):
1380  * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1381  */
1382 static void
clk_orphan_two_level_root_last_test_set_range(struct kunit * test)1383 clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1384 {
1385 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1386 	struct clk_hw *hw = &ctx->hw;
1387 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1388 	unsigned long rate;
1389 	int ret;
1390 
1391 	ret = clk_set_rate_range(clk,
1392 				 DUMMY_CLOCK_INIT_RATE - 1000,
1393 				 DUMMY_CLOCK_INIT_RATE + 1000);
1394 	KUNIT_ASSERT_EQ(test, ret, 0);
1395 
1396 	rate = clk_get_rate(clk);
1397 	KUNIT_ASSERT_GT(test, rate, 0);
1398 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1399 
1400 	clk_put(clk);
1401 }
1402 
1403 static struct kunit_case
1404 clk_orphan_two_level_root_last_test_cases[] = {
1405 	KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1406 	KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1407 	{}
1408 };
1409 
1410 /*
1411  * Test suite for a basic, transparent, clock with a parent that is also
1412  * such a clock. The parent's parent is registered last, while the
1413  * parent and its child are registered in that order. The intermediate
1414  * and leaf clocks will thus be orphan when registered, but the leaf
1415  * clock itself will always have its parent and will never be
1416  * reparented. Indeed, it's only orphan because its parent is.
1417  *
1418  * These tests exercise the behaviour of the consumer API when dealing
1419  * with an orphan clock, and how we deal with the transition to a valid
1420  * parent.
1421  */
1422 static struct kunit_suite
1423 clk_orphan_two_level_root_last_test_suite = {
1424 	.name = "clk-orphan-two-level-root-last-test",
1425 	.init = clk_orphan_two_level_root_last_test_init,
1426 	.exit = clk_orphan_two_level_root_last_test_exit,
1427 	.test_cases = clk_orphan_two_level_root_last_test_cases,
1428 };
1429 
1430 /*
1431  * Test that clk_set_rate_range won't return an error for a valid range
1432  * and that it will make sure the rate of the clock is within the
1433  * boundaries.
1434  */
clk_range_test_set_range(struct kunit * test)1435 static void clk_range_test_set_range(struct kunit *test)
1436 {
1437 	struct clk_dummy_context *ctx = test->priv;
1438 	struct clk_hw *hw = &ctx->hw;
1439 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1440 	unsigned long rate;
1441 
1442 	KUNIT_ASSERT_EQ(test,
1443 			clk_set_rate_range(clk,
1444 					   DUMMY_CLOCK_RATE_1,
1445 					   DUMMY_CLOCK_RATE_2),
1446 			0);
1447 
1448 	rate = clk_get_rate(clk);
1449 	KUNIT_ASSERT_GT(test, rate, 0);
1450 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1451 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1452 
1453 	clk_put(clk);
1454 }
1455 
1456 /*
1457  * Test that calling clk_set_rate_range with a minimum rate higher than
1458  * the maximum rate returns an error.
1459  */
clk_range_test_set_range_invalid(struct kunit * test)1460 static void clk_range_test_set_range_invalid(struct kunit *test)
1461 {
1462 	struct clk_dummy_context *ctx = test->priv;
1463 	struct clk_hw *hw = &ctx->hw;
1464 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1465 
1466 	KUNIT_EXPECT_LT(test,
1467 			clk_set_rate_range(clk,
1468 					   DUMMY_CLOCK_RATE_1 + 1000,
1469 					   DUMMY_CLOCK_RATE_1),
1470 			0);
1471 
1472 	clk_put(clk);
1473 }
1474 
1475 /*
1476  * Test that users can't set multiple, disjoints, range that would be
1477  * impossible to meet.
1478  */
clk_range_test_multiple_disjoints_range(struct kunit * test)1479 static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1480 {
1481 	struct clk_dummy_context *ctx = test->priv;
1482 	struct clk_hw *hw = &ctx->hw;
1483 	struct clk *user1, *user2;
1484 
1485 	user1 = clk_hw_get_clk(hw, NULL);
1486 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1487 
1488 	user2 = clk_hw_get_clk(hw, NULL);
1489 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1490 
1491 	KUNIT_ASSERT_EQ(test,
1492 			clk_set_rate_range(user1, 1000, 2000),
1493 			0);
1494 
1495 	KUNIT_EXPECT_LT(test,
1496 			clk_set_rate_range(user2, 3000, 4000),
1497 			0);
1498 
1499 	clk_put(user2);
1500 	clk_put(user1);
1501 }
1502 
1503 /*
1504  * Test that if our clock has some boundaries and we try to round a rate
1505  * lower than the minimum, the returned rate will be within range.
1506  */
clk_range_test_set_range_round_rate_lower(struct kunit * test)1507 static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1508 {
1509 	struct clk_dummy_context *ctx = test->priv;
1510 	struct clk_hw *hw = &ctx->hw;
1511 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1512 	long rate;
1513 
1514 	KUNIT_ASSERT_EQ(test,
1515 			clk_set_rate_range(clk,
1516 					   DUMMY_CLOCK_RATE_1,
1517 					   DUMMY_CLOCK_RATE_2),
1518 			0);
1519 
1520 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1521 	KUNIT_ASSERT_GT(test, rate, 0);
1522 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1523 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1524 
1525 	clk_put(clk);
1526 }
1527 
1528 /*
1529  * Test that if our clock has some boundaries and we try to set a rate
1530  * higher than the maximum, the new rate will be within range.
1531  */
clk_range_test_set_range_set_rate_lower(struct kunit * test)1532 static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1533 {
1534 	struct clk_dummy_context *ctx = test->priv;
1535 	struct clk_hw *hw = &ctx->hw;
1536 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1537 	unsigned long rate;
1538 
1539 	KUNIT_ASSERT_EQ(test,
1540 			clk_set_rate_range(clk,
1541 					   DUMMY_CLOCK_RATE_1,
1542 					   DUMMY_CLOCK_RATE_2),
1543 			0);
1544 
1545 	KUNIT_ASSERT_EQ(test,
1546 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1547 			0);
1548 
1549 	rate = clk_get_rate(clk);
1550 	KUNIT_ASSERT_GT(test, rate, 0);
1551 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1552 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1553 
1554 	clk_put(clk);
1555 }
1556 
1557 /*
1558  * Test that if our clock has some boundaries and we try to round and
1559  * set a rate lower than the minimum, the rate returned by
1560  * clk_round_rate() will be consistent with the new rate set by
1561  * clk_set_rate().
1562  */
clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit * test)1563 static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1564 {
1565 	struct clk_dummy_context *ctx = test->priv;
1566 	struct clk_hw *hw = &ctx->hw;
1567 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1568 	long rounded;
1569 
1570 	KUNIT_ASSERT_EQ(test,
1571 			clk_set_rate_range(clk,
1572 					   DUMMY_CLOCK_RATE_1,
1573 					   DUMMY_CLOCK_RATE_2),
1574 			0);
1575 
1576 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1577 	KUNIT_ASSERT_GT(test, rounded, 0);
1578 
1579 	KUNIT_ASSERT_EQ(test,
1580 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1581 			0);
1582 
1583 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1584 
1585 	clk_put(clk);
1586 }
1587 
1588 /*
1589  * Test that if our clock has some boundaries and we try to round a rate
1590  * higher than the maximum, the returned rate will be within range.
1591  */
clk_range_test_set_range_round_rate_higher(struct kunit * test)1592 static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1593 {
1594 	struct clk_dummy_context *ctx = test->priv;
1595 	struct clk_hw *hw = &ctx->hw;
1596 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1597 	long rate;
1598 
1599 	KUNIT_ASSERT_EQ(test,
1600 			clk_set_rate_range(clk,
1601 					   DUMMY_CLOCK_RATE_1,
1602 					   DUMMY_CLOCK_RATE_2),
1603 			0);
1604 
1605 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1606 	KUNIT_ASSERT_GT(test, rate, 0);
1607 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1608 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1609 
1610 	clk_put(clk);
1611 }
1612 
1613 /*
1614  * Test that if our clock has some boundaries and we try to set a rate
1615  * higher than the maximum, the new rate will be within range.
1616  */
clk_range_test_set_range_set_rate_higher(struct kunit * test)1617 static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1618 {
1619 	struct clk_dummy_context *ctx = test->priv;
1620 	struct clk_hw *hw = &ctx->hw;
1621 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1622 	unsigned long rate;
1623 
1624 	KUNIT_ASSERT_EQ(test,
1625 			clk_set_rate_range(clk,
1626 					   DUMMY_CLOCK_RATE_1,
1627 					   DUMMY_CLOCK_RATE_2),
1628 			0);
1629 
1630 	KUNIT_ASSERT_EQ(test,
1631 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1632 			0);
1633 
1634 	rate = clk_get_rate(clk);
1635 	KUNIT_ASSERT_GT(test, rate, 0);
1636 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1637 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1638 
1639 	clk_put(clk);
1640 }
1641 
1642 /*
1643  * Test that if our clock has some boundaries and we try to round and
1644  * set a rate higher than the maximum, the rate returned by
1645  * clk_round_rate() will be consistent with the new rate set by
1646  * clk_set_rate().
1647  */
clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit * test)1648 static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1649 {
1650 	struct clk_dummy_context *ctx = test->priv;
1651 	struct clk_hw *hw = &ctx->hw;
1652 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1653 	long rounded;
1654 
1655 	KUNIT_ASSERT_EQ(test,
1656 			clk_set_rate_range(clk,
1657 					   DUMMY_CLOCK_RATE_1,
1658 					   DUMMY_CLOCK_RATE_2),
1659 			0);
1660 
1661 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1662 	KUNIT_ASSERT_GT(test, rounded, 0);
1663 
1664 	KUNIT_ASSERT_EQ(test,
1665 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1666 			0);
1667 
1668 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1669 
1670 	clk_put(clk);
1671 }
1672 
1673 /*
1674  * Test that if our clock has a rate lower than the minimum set by a
1675  * call to clk_set_rate_range(), the rate will be raised to match the
1676  * new minimum.
1677  *
1678  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1679  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1680  */
clk_range_test_set_range_get_rate_raised(struct kunit * test)1681 static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1682 {
1683 	struct clk_dummy_context *ctx = test->priv;
1684 	struct clk_hw *hw = &ctx->hw;
1685 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1686 	unsigned long rate;
1687 
1688 	KUNIT_ASSERT_EQ(test,
1689 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1690 			0);
1691 
1692 	KUNIT_ASSERT_EQ(test,
1693 			clk_set_rate_range(clk,
1694 					   DUMMY_CLOCK_RATE_1,
1695 					   DUMMY_CLOCK_RATE_2),
1696 			0);
1697 
1698 	rate = clk_get_rate(clk);
1699 	KUNIT_ASSERT_GT(test, rate, 0);
1700 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1701 
1702 	clk_put(clk);
1703 }
1704 
1705 /*
1706  * Test that if our clock has a rate higher than the maximum set by a
1707  * call to clk_set_rate_range(), the rate will be lowered to match the
1708  * new maximum.
1709  *
1710  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1711  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1712  */
clk_range_test_set_range_get_rate_lowered(struct kunit * test)1713 static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1714 {
1715 	struct clk_dummy_context *ctx = test->priv;
1716 	struct clk_hw *hw = &ctx->hw;
1717 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1718 	unsigned long rate;
1719 
1720 	KUNIT_ASSERT_EQ(test,
1721 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1722 			0);
1723 
1724 	KUNIT_ASSERT_EQ(test,
1725 			clk_set_rate_range(clk,
1726 					   DUMMY_CLOCK_RATE_1,
1727 					   DUMMY_CLOCK_RATE_2),
1728 			0);
1729 
1730 	rate = clk_get_rate(clk);
1731 	KUNIT_ASSERT_GT(test, rate, 0);
1732 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1733 
1734 	clk_put(clk);
1735 }
1736 
1737 static struct kunit_case clk_range_test_cases[] = {
1738 	KUNIT_CASE(clk_range_test_set_range),
1739 	KUNIT_CASE(clk_range_test_set_range_invalid),
1740 	KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1741 	KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1742 	KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1743 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1744 	KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1745 	KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1746 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1747 	KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1748 	KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1749 	{}
1750 };
1751 
1752 /*
1753  * Test suite for a basic rate clock, without any parent.
1754  *
1755  * These tests exercise the rate range API: clk_set_rate_range(),
1756  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1757  */
1758 static struct kunit_suite clk_range_test_suite = {
1759 	.name = "clk-range-test",
1760 	.init = clk_test_init,
1761 	.exit = clk_test_exit,
1762 	.test_cases = clk_range_test_cases,
1763 };
1764 
1765 /*
1766  * Test that if we have several subsequent calls to
1767  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1768  * needed each and every time.
1769  *
1770  * With clk_dummy_maximize_rate_ops, this means that the rate will
1771  * trail along the maximum as it evolves.
1772  */
clk_range_test_set_range_rate_maximized(struct kunit * test)1773 static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1774 {
1775 	struct clk_dummy_context *ctx = test->priv;
1776 	struct clk_hw *hw = &ctx->hw;
1777 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1778 	unsigned long rate;
1779 
1780 	KUNIT_ASSERT_EQ(test,
1781 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1782 			0);
1783 
1784 	KUNIT_ASSERT_EQ(test,
1785 			clk_set_rate_range(clk,
1786 					   DUMMY_CLOCK_RATE_1,
1787 					   DUMMY_CLOCK_RATE_2),
1788 			0);
1789 
1790 	rate = clk_get_rate(clk);
1791 	KUNIT_ASSERT_GT(test, rate, 0);
1792 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1793 
1794 	KUNIT_ASSERT_EQ(test,
1795 			clk_set_rate_range(clk,
1796 					   DUMMY_CLOCK_RATE_1,
1797 					   DUMMY_CLOCK_RATE_2 - 1000),
1798 			0);
1799 
1800 	rate = clk_get_rate(clk);
1801 	KUNIT_ASSERT_GT(test, rate, 0);
1802 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1803 
1804 	KUNIT_ASSERT_EQ(test,
1805 			clk_set_rate_range(clk,
1806 					   DUMMY_CLOCK_RATE_1,
1807 					   DUMMY_CLOCK_RATE_2),
1808 			0);
1809 
1810 	rate = clk_get_rate(clk);
1811 	KUNIT_ASSERT_GT(test, rate, 0);
1812 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1813 
1814 	clk_put(clk);
1815 }
1816 
1817 /*
1818  * Test that if we have several subsequent calls to
1819  * clk_set_rate_range(), across multiple users, the core will reevaluate
1820  * whether a new rate is needed each and every time.
1821  *
1822  * With clk_dummy_maximize_rate_ops, this means that the rate will
1823  * trail along the maximum as it evolves.
1824  */
clk_range_test_multiple_set_range_rate_maximized(struct kunit * test)1825 static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1826 {
1827 	struct clk_dummy_context *ctx = test->priv;
1828 	struct clk_hw *hw = &ctx->hw;
1829 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1830 	struct clk *user1, *user2;
1831 	unsigned long rate;
1832 
1833 	user1 = clk_hw_get_clk(hw, NULL);
1834 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1835 
1836 	user2 = clk_hw_get_clk(hw, NULL);
1837 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1838 
1839 	KUNIT_ASSERT_EQ(test,
1840 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1841 			0);
1842 
1843 	KUNIT_ASSERT_EQ(test,
1844 			clk_set_rate_range(user1,
1845 					   0,
1846 					   DUMMY_CLOCK_RATE_2),
1847 			0);
1848 
1849 	rate = clk_get_rate(clk);
1850 	KUNIT_ASSERT_GT(test, rate, 0);
1851 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1852 
1853 	KUNIT_ASSERT_EQ(test,
1854 			clk_set_rate_range(user2,
1855 					   0,
1856 					   DUMMY_CLOCK_RATE_1),
1857 			0);
1858 
1859 	rate = clk_get_rate(clk);
1860 	KUNIT_ASSERT_GT(test, rate, 0);
1861 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1862 
1863 	KUNIT_ASSERT_EQ(test,
1864 			clk_drop_range(user2),
1865 			0);
1866 
1867 	rate = clk_get_rate(clk);
1868 	KUNIT_ASSERT_GT(test, rate, 0);
1869 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1870 
1871 	clk_put(user2);
1872 	clk_put(user1);
1873 	clk_put(clk);
1874 }
1875 
1876 /*
1877  * Test that if we have several subsequent calls to
1878  * clk_set_rate_range(), across multiple users, the core will reevaluate
1879  * whether a new rate is needed, including when a user drop its clock.
1880  *
1881  * With clk_dummy_maximize_rate_ops, this means that the rate will
1882  * trail along the maximum as it evolves.
1883  */
clk_range_test_multiple_set_range_rate_put_maximized(struct kunit * test)1884 static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1885 {
1886 	struct clk_dummy_context *ctx = test->priv;
1887 	struct clk_hw *hw = &ctx->hw;
1888 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1889 	struct clk *user1, *user2;
1890 	unsigned long rate;
1891 
1892 	user1 = clk_hw_get_clk(hw, NULL);
1893 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1894 
1895 	user2 = clk_hw_get_clk(hw, NULL);
1896 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1897 
1898 	KUNIT_ASSERT_EQ(test,
1899 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1900 			0);
1901 
1902 	KUNIT_ASSERT_EQ(test,
1903 			clk_set_rate_range(user1,
1904 					   0,
1905 					   DUMMY_CLOCK_RATE_2),
1906 			0);
1907 
1908 	rate = clk_get_rate(clk);
1909 	KUNIT_ASSERT_GT(test, rate, 0);
1910 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1911 
1912 	KUNIT_ASSERT_EQ(test,
1913 			clk_set_rate_range(user2,
1914 					   0,
1915 					   DUMMY_CLOCK_RATE_1),
1916 			0);
1917 
1918 	rate = clk_get_rate(clk);
1919 	KUNIT_ASSERT_GT(test, rate, 0);
1920 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1921 
1922 	clk_put(user2);
1923 
1924 	rate = clk_get_rate(clk);
1925 	KUNIT_ASSERT_GT(test, rate, 0);
1926 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1927 
1928 	clk_put(user1);
1929 	clk_put(clk);
1930 }
1931 
1932 static struct kunit_case clk_range_maximize_test_cases[] = {
1933 	KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1934 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1935 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1936 	{}
1937 };
1938 
1939 /*
1940  * Test suite for a basic rate clock, without any parent.
1941  *
1942  * These tests exercise the rate range API: clk_set_rate_range(),
1943  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1944  * driver that will always try to run at the highest possible rate.
1945  */
1946 static struct kunit_suite clk_range_maximize_test_suite = {
1947 	.name = "clk-range-maximize-test",
1948 	.init = clk_maximize_test_init,
1949 	.exit = clk_test_exit,
1950 	.test_cases = clk_range_maximize_test_cases,
1951 };
1952 
1953 /*
1954  * Test that if we have several subsequent calls to
1955  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1956  * needed each and every time.
1957  *
1958  * With clk_dummy_minimize_rate_ops, this means that the rate will
1959  * trail along the minimum as it evolves.
1960  */
clk_range_test_set_range_rate_minimized(struct kunit * test)1961 static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1962 {
1963 	struct clk_dummy_context *ctx = test->priv;
1964 	struct clk_hw *hw = &ctx->hw;
1965 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1966 	unsigned long rate;
1967 
1968 	KUNIT_ASSERT_EQ(test,
1969 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1970 			0);
1971 
1972 	KUNIT_ASSERT_EQ(test,
1973 			clk_set_rate_range(clk,
1974 					   DUMMY_CLOCK_RATE_1,
1975 					   DUMMY_CLOCK_RATE_2),
1976 			0);
1977 
1978 	rate = clk_get_rate(clk);
1979 	KUNIT_ASSERT_GT(test, rate, 0);
1980 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1981 
1982 	KUNIT_ASSERT_EQ(test,
1983 			clk_set_rate_range(clk,
1984 					   DUMMY_CLOCK_RATE_1 + 1000,
1985 					   DUMMY_CLOCK_RATE_2),
1986 			0);
1987 
1988 	rate = clk_get_rate(clk);
1989 	KUNIT_ASSERT_GT(test, rate, 0);
1990 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1991 
1992 	KUNIT_ASSERT_EQ(test,
1993 			clk_set_rate_range(clk,
1994 					   DUMMY_CLOCK_RATE_1,
1995 					   DUMMY_CLOCK_RATE_2),
1996 			0);
1997 
1998 	rate = clk_get_rate(clk);
1999 	KUNIT_ASSERT_GT(test, rate, 0);
2000 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2001 
2002 	clk_put(clk);
2003 }
2004 
2005 /*
2006  * Test that if we have several subsequent calls to
2007  * clk_set_rate_range(), across multiple users, the core will reevaluate
2008  * whether a new rate is needed each and every time.
2009  *
2010  * With clk_dummy_minimize_rate_ops, this means that the rate will
2011  * trail along the minimum as it evolves.
2012  */
clk_range_test_multiple_set_range_rate_minimized(struct kunit * test)2013 static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2014 {
2015 	struct clk_dummy_context *ctx = test->priv;
2016 	struct clk_hw *hw = &ctx->hw;
2017 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2018 	struct clk *user1, *user2;
2019 	unsigned long rate;
2020 
2021 	user1 = clk_hw_get_clk(hw, NULL);
2022 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2023 
2024 	user2 = clk_hw_get_clk(hw, NULL);
2025 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2026 
2027 	KUNIT_ASSERT_EQ(test,
2028 			clk_set_rate_range(user1,
2029 					   DUMMY_CLOCK_RATE_1,
2030 					   ULONG_MAX),
2031 			0);
2032 
2033 	rate = clk_get_rate(clk);
2034 	KUNIT_ASSERT_GT(test, rate, 0);
2035 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2036 
2037 	KUNIT_ASSERT_EQ(test,
2038 			clk_set_rate_range(user2,
2039 					   DUMMY_CLOCK_RATE_2,
2040 					   ULONG_MAX),
2041 			0);
2042 
2043 	rate = clk_get_rate(clk);
2044 	KUNIT_ASSERT_GT(test, rate, 0);
2045 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2046 
2047 	KUNIT_ASSERT_EQ(test,
2048 			clk_drop_range(user2),
2049 			0);
2050 
2051 	rate = clk_get_rate(clk);
2052 	KUNIT_ASSERT_GT(test, rate, 0);
2053 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2054 
2055 	clk_put(user2);
2056 	clk_put(user1);
2057 	clk_put(clk);
2058 }
2059 
2060 /*
2061  * Test that if we have several subsequent calls to
2062  * clk_set_rate_range(), across multiple users, the core will reevaluate
2063  * whether a new rate is needed, including when a user drop its clock.
2064  *
2065  * With clk_dummy_minimize_rate_ops, this means that the rate will
2066  * trail along the minimum as it evolves.
2067  */
clk_range_test_multiple_set_range_rate_put_minimized(struct kunit * test)2068 static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2069 {
2070 	struct clk_dummy_context *ctx = test->priv;
2071 	struct clk_hw *hw = &ctx->hw;
2072 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2073 	struct clk *user1, *user2;
2074 	unsigned long rate;
2075 
2076 	user1 = clk_hw_get_clk(hw, NULL);
2077 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2078 
2079 	user2 = clk_hw_get_clk(hw, NULL);
2080 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2081 
2082 	KUNIT_ASSERT_EQ(test,
2083 			clk_set_rate_range(user1,
2084 					   DUMMY_CLOCK_RATE_1,
2085 					   ULONG_MAX),
2086 			0);
2087 
2088 	rate = clk_get_rate(clk);
2089 	KUNIT_ASSERT_GT(test, rate, 0);
2090 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2091 
2092 	KUNIT_ASSERT_EQ(test,
2093 			clk_set_rate_range(user2,
2094 					   DUMMY_CLOCK_RATE_2,
2095 					   ULONG_MAX),
2096 			0);
2097 
2098 	rate = clk_get_rate(clk);
2099 	KUNIT_ASSERT_GT(test, rate, 0);
2100 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2101 
2102 	clk_put(user2);
2103 
2104 	rate = clk_get_rate(clk);
2105 	KUNIT_ASSERT_GT(test, rate, 0);
2106 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2107 
2108 	clk_put(user1);
2109 	clk_put(clk);
2110 }
2111 
2112 static struct kunit_case clk_range_minimize_test_cases[] = {
2113 	KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2114 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2115 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2116 	{}
2117 };
2118 
2119 /*
2120  * Test suite for a basic rate clock, without any parent.
2121  *
2122  * These tests exercise the rate range API: clk_set_rate_range(),
2123  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2124  * driver that will always try to run at the lowest possible rate.
2125  */
2126 static struct kunit_suite clk_range_minimize_test_suite = {
2127 	.name = "clk-range-minimize-test",
2128 	.init = clk_minimize_test_init,
2129 	.exit = clk_test_exit,
2130 	.test_cases = clk_range_minimize_test_cases,
2131 };
2132 
2133 struct clk_leaf_mux_ctx {
2134 	struct clk_multiple_parent_ctx mux_ctx;
2135 	struct clk_hw hw;
2136 	struct clk_hw parent;
2137 	struct clk_rate_request *req;
2138 	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2139 };
2140 
clk_leaf_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)2141 static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
2142 {
2143 	struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw);
2144 	int ret;
2145 	struct clk_rate_request *parent_req = ctx->req;
2146 
2147 	clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate);
2148 	ret = ctx->determine_rate_func(req->best_parent_hw, parent_req);
2149 	if (ret)
2150 		return ret;
2151 
2152 	req->rate = parent_req->rate;
2153 
2154 	return 0;
2155 }
2156 
2157 static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = {
2158 	.determine_rate = clk_leaf_mux_determine_rate,
2159 	.set_parent = clk_dummy_single_set_parent,
2160 	.get_parent = clk_dummy_single_get_parent,
2161 };
2162 
2163 static int
clk_leaf_mux_set_rate_parent_test_init(struct kunit * test)2164 clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2165 {
2166 	struct clk_leaf_mux_ctx *ctx;
2167 	const char *top_parents[2] = { "parent-0", "parent-1" };
2168 	int ret;
2169 
2170 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2171 	if (!ctx)
2172 		return -ENOMEM;
2173 	test->priv = ctx;
2174 
2175 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2176 								    &clk_dummy_rate_ops,
2177 								    0);
2178 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2179 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2180 	if (ret)
2181 		return ret;
2182 
2183 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2184 								    &clk_dummy_rate_ops,
2185 								    0);
2186 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2187 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2188 	if (ret)
2189 		return ret;
2190 
2191 	ctx->mux_ctx.current_parent = 0;
2192 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2193 						   &clk_multiple_parents_mux_ops,
2194 						   0);
2195 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2196 	if (ret)
2197 		return ret;
2198 
2199 	ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw,
2200 					  &empty_clk_ops, CLK_SET_RATE_PARENT);
2201 	ret = clk_hw_register(NULL, &ctx->parent);
2202 	if (ret)
2203 		return ret;
2204 
2205 	ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent,
2206 				      &clk_leaf_mux_set_rate_parent_ops,
2207 				      CLK_SET_RATE_PARENT);
2208 	ret = clk_hw_register(NULL, &ctx->hw);
2209 	if (ret)
2210 		return ret;
2211 
2212 	return 0;
2213 }
2214 
clk_leaf_mux_set_rate_parent_test_exit(struct kunit * test)2215 static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2216 {
2217 	struct clk_leaf_mux_ctx *ctx = test->priv;
2218 
2219 	clk_hw_unregister(&ctx->hw);
2220 	clk_hw_unregister(&ctx->parent);
2221 	clk_hw_unregister(&ctx->mux_ctx.hw);
2222 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2223 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2224 }
2225 
2226 struct clk_leaf_mux_set_rate_parent_determine_rate_test_case {
2227 	const char *desc;
2228 	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2229 };
2230 
2231 static void
clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case * t,char * desc)2232 clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(
2233 		const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc)
2234 {
2235 	strcpy(desc, t->desc);
2236 }
2237 
2238 static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case
2239 clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = {
2240 	{
2241 		/*
2242 		 * Test that __clk_determine_rate() on the parent that can't
2243 		 * change rate doesn't return a clk_rate_request structure with
2244 		 * the best_parent_hw pointer pointing to the parent.
2245 		 */
2246 		.desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent",
2247 		.determine_rate_func = __clk_determine_rate,
2248 	},
2249 	{
2250 		/*
2251 		 * Test that __clk_mux_determine_rate() on the parent that
2252 		 * can't change rate doesn't return a clk_rate_request
2253 		 * structure with the best_parent_hw pointer pointing to
2254 		 * the parent.
2255 		 */
2256 		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent",
2257 		.determine_rate_func = __clk_mux_determine_rate,
2258 	},
2259 	{
2260 		/*
2261 		 * Test that __clk_mux_determine_rate_closest() on the parent
2262 		 * that can't change rate doesn't return a clk_rate_request
2263 		 * structure with the best_parent_hw pointer pointing to
2264 		 * the parent.
2265 		 */
2266 		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent",
2267 		.determine_rate_func = __clk_mux_determine_rate_closest,
2268 	},
2269 	{
2270 		/*
2271 		 * Test that clk_hw_determine_rate_no_reparent() on the parent
2272 		 * that can't change rate doesn't return a clk_rate_request
2273 		 * structure with the best_parent_hw pointer pointing to
2274 		 * the parent.
2275 		 */
2276 		.desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent",
2277 		.determine_rate_func = clk_hw_determine_rate_no_reparent,
2278 	},
2279 };
2280 
KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,clk_leaf_mux_set_rate_parent_determine_rate_test_cases,clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)2281 KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2282 		  clk_leaf_mux_set_rate_parent_determine_rate_test_cases,
2283 		  clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)
2284 
2285 /*
2286  * Test that when a clk that can't change rate itself calls a function like
2287  * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request
2288  * structure that has the best_parent_hw pointer point to the clk_hw passed
2289  * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop
2290  * forwarding clk_rate_requests to the parent") for more background.
2291  */
2292 static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test)
2293 {
2294 	struct clk_leaf_mux_ctx *ctx = test->priv;
2295 	struct clk_hw *hw = &ctx->hw;
2296 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2297 	struct clk_rate_request req;
2298 	unsigned long rate;
2299 	const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param;
2300 
2301 	test_param = test->param_value;
2302 	ctx->determine_rate_func = test_param->determine_rate_func;
2303 
2304 	ctx->req = &req;
2305 	rate = clk_get_rate(clk);
2306 	KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2307 	KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2));
2308 
2309 	KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2310 	KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2311 	KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2312 
2313 	clk_put(clk);
2314 }
2315 
2316 static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2317 	KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2318 			 clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params),
2319 	{}
2320 };
2321 
2322 /*
2323  * Test suite for a clock whose parent is a pass-through clk whose parent is a
2324  * mux with multiple parents. The leaf and pass-through clocks have the
2325  * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which
2326  * will then select which parent is the best fit for a given rate.
2327  *
2328  * These tests exercise the behaviour of muxes, and the proper selection
2329  * of parents.
2330  */
2331 static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2332 	.name = "clk-leaf-mux-set-rate-parent",
2333 	.init = clk_leaf_mux_set_rate_parent_test_init,
2334 	.exit = clk_leaf_mux_set_rate_parent_test_exit,
2335 	.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2336 };
2337 
2338 struct clk_mux_notifier_rate_change {
2339 	bool done;
2340 	unsigned long old_rate;
2341 	unsigned long new_rate;
2342 	wait_queue_head_t wq;
2343 };
2344 
2345 struct clk_mux_notifier_ctx {
2346 	struct clk_multiple_parent_ctx mux_ctx;
2347 	struct clk *clk;
2348 	struct notifier_block clk_nb;
2349 	struct clk_mux_notifier_rate_change pre_rate_change;
2350 	struct clk_mux_notifier_rate_change post_rate_change;
2351 };
2352 
2353 #define NOTIFIER_TIMEOUT_MS 100
2354 
clk_mux_notifier_callback(struct notifier_block * nb,unsigned long action,void * data)2355 static int clk_mux_notifier_callback(struct notifier_block *nb,
2356 				     unsigned long action, void *data)
2357 {
2358 	struct clk_notifier_data *clk_data = data;
2359 	struct clk_mux_notifier_ctx *ctx = container_of(nb,
2360 							struct clk_mux_notifier_ctx,
2361 							clk_nb);
2362 
2363 	if (action & PRE_RATE_CHANGE) {
2364 		ctx->pre_rate_change.old_rate = clk_data->old_rate;
2365 		ctx->pre_rate_change.new_rate = clk_data->new_rate;
2366 		ctx->pre_rate_change.done = true;
2367 		wake_up_interruptible(&ctx->pre_rate_change.wq);
2368 	}
2369 
2370 	if (action & POST_RATE_CHANGE) {
2371 		ctx->post_rate_change.old_rate = clk_data->old_rate;
2372 		ctx->post_rate_change.new_rate = clk_data->new_rate;
2373 		ctx->post_rate_change.done = true;
2374 		wake_up_interruptible(&ctx->post_rate_change.wq);
2375 	}
2376 
2377 	return 0;
2378 }
2379 
clk_mux_notifier_test_init(struct kunit * test)2380 static int clk_mux_notifier_test_init(struct kunit *test)
2381 {
2382 	struct clk_mux_notifier_ctx *ctx;
2383 	const char *top_parents[2] = { "parent-0", "parent-1" };
2384 	int ret;
2385 
2386 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2387 	if (!ctx)
2388 		return -ENOMEM;
2389 	test->priv = ctx;
2390 	ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2391 	init_waitqueue_head(&ctx->pre_rate_change.wq);
2392 	init_waitqueue_head(&ctx->post_rate_change.wq);
2393 
2394 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2395 								    &clk_dummy_rate_ops,
2396 								    0);
2397 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2398 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2399 	if (ret)
2400 		return ret;
2401 
2402 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2403 								    &clk_dummy_rate_ops,
2404 								    0);
2405 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2406 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2407 	if (ret)
2408 		return ret;
2409 
2410 	ctx->mux_ctx.current_parent = 0;
2411 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2412 						   &clk_multiple_parents_mux_ops,
2413 						   0);
2414 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2415 	if (ret)
2416 		return ret;
2417 
2418 	ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2419 	ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2420 	if (ret)
2421 		return ret;
2422 
2423 	return 0;
2424 }
2425 
clk_mux_notifier_test_exit(struct kunit * test)2426 static void clk_mux_notifier_test_exit(struct kunit *test)
2427 {
2428 	struct clk_mux_notifier_ctx *ctx = test->priv;
2429 	struct clk *clk = ctx->clk;
2430 
2431 	clk_notifier_unregister(clk, &ctx->clk_nb);
2432 	clk_put(clk);
2433 
2434 	clk_hw_unregister(&ctx->mux_ctx.hw);
2435 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2436 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2437 }
2438 
2439 /*
2440  * Test that if the we have a notifier registered on a mux, the core
2441  * will notify us when we switch to another parent, and with the proper
2442  * old and new rates.
2443  */
clk_mux_notifier_set_parent_test(struct kunit * test)2444 static void clk_mux_notifier_set_parent_test(struct kunit *test)
2445 {
2446 	struct clk_mux_notifier_ctx *ctx = test->priv;
2447 	struct clk_hw *hw = &ctx->mux_ctx.hw;
2448 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2449 	struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2450 	int ret;
2451 
2452 	ret = clk_set_parent(clk, new_parent);
2453 	KUNIT_ASSERT_EQ(test, ret, 0);
2454 
2455 	ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2456 					       ctx->pre_rate_change.done,
2457 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2458 	KUNIT_ASSERT_GT(test, ret, 0);
2459 
2460 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2461 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2462 
2463 	ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2464 					       ctx->post_rate_change.done,
2465 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2466 	KUNIT_ASSERT_GT(test, ret, 0);
2467 
2468 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2469 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2470 
2471 	clk_put(new_parent);
2472 	clk_put(clk);
2473 }
2474 
2475 static struct kunit_case clk_mux_notifier_test_cases[] = {
2476 	KUNIT_CASE(clk_mux_notifier_set_parent_test),
2477 	{}
2478 };
2479 
2480 /*
2481  * Test suite for a mux with multiple parents, and a notifier registered
2482  * on the mux.
2483  *
2484  * These tests exercise the behaviour of notifiers.
2485  */
2486 static struct kunit_suite clk_mux_notifier_test_suite = {
2487 	.name = "clk-mux-notifier",
2488 	.init = clk_mux_notifier_test_init,
2489 	.exit = clk_mux_notifier_test_exit,
2490 	.test_cases = clk_mux_notifier_test_cases,
2491 };
2492 
2493 static int
clk_mux_no_reparent_test_init(struct kunit * test)2494 clk_mux_no_reparent_test_init(struct kunit *test)
2495 {
2496 	struct clk_multiple_parent_ctx *ctx;
2497 	const char *parents[2] = { "parent-0", "parent-1"};
2498 	int ret;
2499 
2500 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2501 	if (!ctx)
2502 		return -ENOMEM;
2503 	test->priv = ctx;
2504 
2505 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2506 							    &clk_dummy_rate_ops,
2507 							    0);
2508 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2509 	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
2510 	if (ret)
2511 		return ret;
2512 
2513 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2514 							    &clk_dummy_rate_ops,
2515 							    0);
2516 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2517 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
2518 	if (ret)
2519 		return ret;
2520 
2521 	ctx->current_parent = 0;
2522 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
2523 					   &clk_multiple_parents_no_reparent_mux_ops,
2524 					   0);
2525 	ret = clk_hw_register(NULL, &ctx->hw);
2526 	if (ret)
2527 		return ret;
2528 
2529 	return 0;
2530 }
2531 
2532 static void
clk_mux_no_reparent_test_exit(struct kunit * test)2533 clk_mux_no_reparent_test_exit(struct kunit *test)
2534 {
2535 	struct clk_multiple_parent_ctx *ctx = test->priv;
2536 
2537 	clk_hw_unregister(&ctx->hw);
2538 	clk_hw_unregister(&ctx->parents_ctx[0].hw);
2539 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
2540 }
2541 
2542 /*
2543  * Test that if the we have a mux that cannot change parent and we call
2544  * clk_round_rate() on it with a rate that should cause it to change
2545  * parent, it won't.
2546  */
clk_mux_no_reparent_round_rate(struct kunit * test)2547 static void clk_mux_no_reparent_round_rate(struct kunit *test)
2548 {
2549 	struct clk_multiple_parent_ctx *ctx = test->priv;
2550 	struct clk_hw *hw = &ctx->hw;
2551 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2552 	struct clk *other_parent, *parent;
2553 	unsigned long other_parent_rate;
2554 	unsigned long parent_rate;
2555 	long rounded_rate;
2556 
2557 	parent = clk_get_parent(clk);
2558 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2559 
2560 	parent_rate = clk_get_rate(parent);
2561 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2562 
2563 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2564 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2565 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2566 
2567 	other_parent_rate = clk_get_rate(other_parent);
2568 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2569 	clk_put(other_parent);
2570 
2571 	rounded_rate = clk_round_rate(clk, other_parent_rate);
2572 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
2573 	KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
2574 
2575 	clk_put(clk);
2576 }
2577 
2578 /*
2579  * Test that if the we have a mux that cannot change parent and we call
2580  * clk_set_rate() on it with a rate that should cause it to change
2581  * parent, it won't.
2582  */
clk_mux_no_reparent_set_rate(struct kunit * test)2583 static void clk_mux_no_reparent_set_rate(struct kunit *test)
2584 {
2585 	struct clk_multiple_parent_ctx *ctx = test->priv;
2586 	struct clk_hw *hw = &ctx->hw;
2587 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2588 	struct clk *other_parent, *parent;
2589 	unsigned long other_parent_rate;
2590 	unsigned long parent_rate;
2591 	unsigned long rate;
2592 	int ret;
2593 
2594 	parent = clk_get_parent(clk);
2595 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2596 
2597 	parent_rate = clk_get_rate(parent);
2598 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2599 
2600 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2601 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2602 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2603 
2604 	other_parent_rate = clk_get_rate(other_parent);
2605 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2606 	clk_put(other_parent);
2607 
2608 	ret = clk_set_rate(clk, other_parent_rate);
2609 	KUNIT_ASSERT_EQ(test, ret, 0);
2610 
2611 	rate = clk_get_rate(clk);
2612 	KUNIT_ASSERT_GT(test, rate, 0);
2613 	KUNIT_EXPECT_EQ(test, rate, parent_rate);
2614 
2615 	clk_put(clk);
2616 }
2617 
2618 static struct kunit_case clk_mux_no_reparent_test_cases[] = {
2619 	KUNIT_CASE(clk_mux_no_reparent_round_rate),
2620 	KUNIT_CASE(clk_mux_no_reparent_set_rate),
2621 	{}
2622 };
2623 
2624 /*
2625  * Test suite for a clock mux that isn't allowed to change parent, using
2626  * the clk_hw_determine_rate_no_reparent() helper.
2627  *
2628  * These tests exercise that helper, and the proper selection of
2629  * rates and parents.
2630  */
2631 static struct kunit_suite clk_mux_no_reparent_test_suite = {
2632 	.name = "clk-mux-no-reparent",
2633 	.init = clk_mux_no_reparent_test_init,
2634 	.exit = clk_mux_no_reparent_test_exit,
2635 	.test_cases = clk_mux_no_reparent_test_cases,
2636 };
2637 
2638 struct clk_register_clk_parent_data_test_case {
2639 	const char *desc;
2640 	struct clk_parent_data pdata;
2641 };
2642 
2643 static void
clk_register_clk_parent_data_test_case_to_desc(const struct clk_register_clk_parent_data_test_case * t,char * desc)2644 clk_register_clk_parent_data_test_case_to_desc(
2645 		const struct clk_register_clk_parent_data_test_case *t, char *desc)
2646 {
2647 	strcpy(desc, t->desc);
2648 }
2649 
2650 static const struct clk_register_clk_parent_data_test_case
2651 clk_register_clk_parent_data_of_cases[] = {
2652 	{
2653 		/*
2654 		 * Test that a clk registered with a struct device_node can
2655 		 * find a parent based on struct clk_parent_data::index.
2656 		 */
2657 		.desc = "clk_parent_data_of_index_test",
2658 		.pdata.index = 0,
2659 	},
2660 	{
2661 		/*
2662 		 * Test that a clk registered with a struct device_node can
2663 		 * find a parent based on struct clk_parent_data::fwname.
2664 		 */
2665 		.desc = "clk_parent_data_of_fwname_test",
2666 		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2667 	},
2668 	{
2669 		/*
2670 		 * Test that a clk registered with a struct device_node can
2671 		 * find a parent based on struct clk_parent_data::name.
2672 		 */
2673 		.desc = "clk_parent_data_of_name_test",
2674 		/* The index must be negative to indicate firmware not used */
2675 		.pdata.index = -1,
2676 		.pdata.name = CLK_PARENT_DATA_1MHZ_NAME,
2677 	},
2678 	{
2679 		/*
2680 		 * Test that a clk registered with a struct device_node can
2681 		 * find a parent based on struct
2682 		 * clk_parent_data::{fw_name,name}.
2683 		 */
2684 		.desc = "clk_parent_data_of_fwname_name_test",
2685 		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2686 		.pdata.name = "not_matching",
2687 	},
2688 	{
2689 		/*
2690 		 * Test that a clk registered with a struct device_node can
2691 		 * find a parent based on struct clk_parent_data::{index,name}.
2692 		 * Index takes priority.
2693 		 */
2694 		.desc = "clk_parent_data_of_index_name_priority_test",
2695 		.pdata.index = 0,
2696 		.pdata.name = "not_matching",
2697 	},
2698 	{
2699 		/*
2700 		 * Test that a clk registered with a struct device_node can
2701 		 * find a parent based on struct
2702 		 * clk_parent_data::{index,fwname,name}. The fw_name takes
2703 		 * priority over index and name.
2704 		 */
2705 		.desc = "clk_parent_data_of_index_fwname_name_priority_test",
2706 		.pdata.index = 1,
2707 		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2708 		.pdata.name = "not_matching",
2709 	},
2710 };
2711 
2712 KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases,
2713 		  clk_register_clk_parent_data_test_case_to_desc)
2714 
2715 /**
2716  * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests
2717  * @np: device node of clk under test
2718  * @hw: clk_hw for clk under test
2719  */
2720 struct clk_register_clk_parent_data_of_ctx {
2721 	struct device_node *np;
2722 	struct clk_hw hw;
2723 };
2724 
clk_register_clk_parent_data_of_test_init(struct kunit * test)2725 static int clk_register_clk_parent_data_of_test_init(struct kunit *test)
2726 {
2727 	struct clk_register_clk_parent_data_of_ctx *ctx;
2728 
2729 	KUNIT_ASSERT_EQ(test, 0,
2730 			of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
2731 
2732 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2733 	if (!ctx)
2734 		return -ENOMEM;
2735 	test->priv = ctx;
2736 
2737 	ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data");
2738 	if (!ctx->np)
2739 		return -ENODEV;
2740 
2741 	of_node_put_kunit(test, ctx->np);
2742 
2743 	return 0;
2744 }
2745 
2746 /*
2747  * Test that a clk registered with a struct device_node can find a parent based on
2748  * struct clk_parent_data when the hw member isn't set.
2749  */
clk_register_clk_parent_data_of_test(struct kunit * test)2750 static void clk_register_clk_parent_data_of_test(struct kunit *test)
2751 {
2752 	struct clk_register_clk_parent_data_of_ctx *ctx = test->priv;
2753 	struct clk_hw *parent_hw;
2754 	const struct clk_register_clk_parent_data_test_case *test_param;
2755 	struct clk_init_data init = { };
2756 	struct clk *expected_parent, *actual_parent;
2757 
2758 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np);
2759 
2760 	expected_parent = of_clk_get_kunit(test, ctx->np, 0);
2761 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2762 
2763 	test_param = test->param_value;
2764 	init.parent_data = &test_param->pdata;
2765 	init.num_parents = 1;
2766 	init.name = "parent_data_of_test_clk";
2767 	init.ops = &clk_dummy_single_parent_ops;
2768 	ctx->hw.init = &init;
2769 	KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw));
2770 
2771 	parent_hw = clk_hw_get_parent(&ctx->hw);
2772 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2773 
2774 	actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2775 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2776 
2777 	KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2778 }
2779 
2780 static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = {
2781 	KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test,
2782 			 clk_register_clk_parent_data_of_test_gen_params),
2783 	{}
2784 };
2785 
2786 /*
2787  * Test suite for registering clks with struct clk_parent_data and a struct
2788  * device_node.
2789  */
2790 static struct kunit_suite clk_register_clk_parent_data_of_suite = {
2791 	.name = "clk_register_clk_parent_data_of",
2792 	.init = clk_register_clk_parent_data_of_test_init,
2793 	.test_cases = clk_register_clk_parent_data_of_test_cases,
2794 };
2795 
2796 /**
2797  * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
2798  * @dev: device of clk under test
2799  * @hw: clk_hw for clk under test
2800  * @pdrv: driver to attach to find @dev
2801  */
2802 struct clk_register_clk_parent_data_device_ctx {
2803 	struct device *dev;
2804 	struct clk_hw hw;
2805 	struct platform_driver pdrv;
2806 };
2807 
2808 static inline struct clk_register_clk_parent_data_device_ctx *
clk_register_clk_parent_data_driver_to_test_context(struct platform_device * pdev)2809 clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
2810 {
2811 	return container_of(to_platform_driver(pdev->dev.driver),
2812 			    struct clk_register_clk_parent_data_device_ctx, pdrv);
2813 }
2814 
clk_register_clk_parent_data_device_probe(struct platform_device * pdev)2815 static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
2816 {
2817 	struct clk_register_clk_parent_data_device_ctx *ctx;
2818 
2819 	ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
2820 	ctx->dev = &pdev->dev;
2821 
2822 	return 0;
2823 }
2824 
clk_register_clk_parent_data_device_driver(struct kunit * test)2825 static void clk_register_clk_parent_data_device_driver(struct kunit *test)
2826 {
2827 	struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
2828 	static const struct of_device_id match_table[] = {
2829 		{ .compatible = "test,clk-parent-data" },
2830 		{ }
2831 	};
2832 
2833 	ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
2834 	ctx->pdrv.driver.of_match_table = match_table;
2835 	ctx->pdrv.driver.name = __func__;
2836 	ctx->pdrv.driver.owner = THIS_MODULE;
2837 
2838 	KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
2839 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
2840 }
2841 
2842 static const struct clk_register_clk_parent_data_test_case
2843 clk_register_clk_parent_data_device_cases[] = {
2844 	{
2845 		/*
2846 		 * Test that a clk registered with a struct device can find a
2847 		 * parent based on struct clk_parent_data::index.
2848 		 */
2849 		.desc = "clk_parent_data_device_index_test",
2850 		.pdata.index = 1,
2851 	},
2852 	{
2853 		/*
2854 		 * Test that a clk registered with a struct device can find a
2855 		 * parent based on struct clk_parent_data::fwname.
2856 		 */
2857 		.desc = "clk_parent_data_device_fwname_test",
2858 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2859 	},
2860 	{
2861 		/*
2862 		 * Test that a clk registered with a struct device can find a
2863 		 * parent based on struct clk_parent_data::name.
2864 		 */
2865 		.desc = "clk_parent_data_device_name_test",
2866 		/* The index must be negative to indicate firmware not used */
2867 		.pdata.index = -1,
2868 		.pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
2869 	},
2870 	{
2871 		/*
2872 		 * Test that a clk registered with a struct device can find a
2873 		 * parent based on struct clk_parent_data::{fw_name,name}.
2874 		 */
2875 		.desc = "clk_parent_data_device_fwname_name_test",
2876 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2877 		.pdata.name = "not_matching",
2878 	},
2879 	{
2880 		/*
2881 		 * Test that a clk registered with a struct device can find a
2882 		 * parent based on struct clk_parent_data::{index,name}. Index
2883 		 * takes priority.
2884 		 */
2885 		.desc = "clk_parent_data_device_index_name_priority_test",
2886 		.pdata.index = 1,
2887 		.pdata.name = "not_matching",
2888 	},
2889 	{
2890 		/*
2891 		 * Test that a clk registered with a struct device can find a
2892 		 * parent based on struct clk_parent_data::{index,fwname,name}.
2893 		 * The fw_name takes priority over index and name.
2894 		 */
2895 		.desc = "clk_parent_data_device_index_fwname_name_priority_test",
2896 		.pdata.index = 0,
2897 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2898 		.pdata.name = "not_matching",
2899 	},
2900 };
2901 
KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,clk_register_clk_parent_data_device_cases,clk_register_clk_parent_data_test_case_to_desc)2902 KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
2903 		  clk_register_clk_parent_data_device_cases,
2904 		  clk_register_clk_parent_data_test_case_to_desc)
2905 
2906 /*
2907  * Test that a clk registered with a struct device can find a parent based on
2908  * struct clk_parent_data when the hw member isn't set.
2909  */
2910 static void clk_register_clk_parent_data_device_test(struct kunit *test)
2911 {
2912 	struct clk_register_clk_parent_data_device_ctx *ctx;
2913 	const struct clk_register_clk_parent_data_test_case *test_param;
2914 	struct clk_hw *parent_hw;
2915 	struct clk_init_data init = { };
2916 	struct clk *expected_parent, *actual_parent;
2917 
2918 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2919 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
2920 	test->priv = ctx;
2921 
2922 	clk_register_clk_parent_data_device_driver(test);
2923 
2924 	expected_parent = clk_get_kunit(test, ctx->dev, "50");
2925 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2926 
2927 	test_param = test->param_value;
2928 	init.parent_data = &test_param->pdata;
2929 	init.num_parents = 1;
2930 	init.name = "parent_data_device_test_clk";
2931 	init.ops = &clk_dummy_single_parent_ops;
2932 	ctx->hw.init = &init;
2933 	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
2934 
2935 	parent_hw = clk_hw_get_parent(&ctx->hw);
2936 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2937 
2938 	actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2939 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2940 
2941 	KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2942 }
2943 
2944 static const struct clk_register_clk_parent_data_test_case
2945 clk_register_clk_parent_data_device_hw_cases[] = {
2946 	{
2947 		/*
2948 		 * Test that a clk registered with a struct device can find a
2949 		 * parent based on struct clk_parent_data::hw.
2950 		 */
2951 		.desc = "clk_parent_data_device_hw_index_test",
2952 		/* The index must be negative to indicate firmware not used */
2953 		.pdata.index = -1,
2954 	},
2955 	{
2956 		/*
2957 		 * Test that a clk registered with a struct device can find a
2958 		 * parent based on struct clk_parent_data::hw when
2959 		 * struct clk_parent_data::fw_name is set.
2960 		 */
2961 		.desc = "clk_parent_data_device_hw_fwname_test",
2962 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2963 	},
2964 	{
2965 		/*
2966 		 * Test that a clk registered with a struct device can find a
2967 		 * parent based on struct clk_parent_data::hw when struct
2968 		 * clk_parent_data::name is set.
2969 		 */
2970 		.desc = "clk_parent_data_device_hw_name_test",
2971 		/* The index must be negative to indicate firmware not used */
2972 		.pdata.index = -1,
2973 		.pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
2974 	},
2975 	{
2976 		/*
2977 		 * Test that a clk registered with a struct device can find a
2978 		 * parent based on struct clk_parent_data::hw when struct
2979 		 * clk_parent_data::{fw_name,name} are set.
2980 		 */
2981 		.desc = "clk_parent_data_device_hw_fwname_name_test",
2982 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2983 		.pdata.name = "not_matching",
2984 	},
2985 	{
2986 		/*
2987 		 * Test that a clk registered with a struct device can find a
2988 		 * parent based on struct clk_parent_data::hw when struct
2989 		 * clk_parent_data::index is set. The hw pointer takes
2990 		 * priority.
2991 		 */
2992 		.desc = "clk_parent_data_device_hw_index_priority_test",
2993 		.pdata.index = 0,
2994 	},
2995 	{
2996 		/*
2997 		 * Test that a clk registered with a struct device can find a
2998 		 * parent based on struct clk_parent_data::hw when
2999 		 * struct clk_parent_data::{index,fwname,name} are set.
3000 		 * The hw pointer takes priority over everything else.
3001 		 */
3002 		.desc = "clk_parent_data_device_hw_index_fwname_name_priority_test",
3003 		.pdata.index = 0,
3004 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
3005 		.pdata.name = "not_matching",
3006 	},
3007 };
3008 
KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,clk_register_clk_parent_data_device_hw_cases,clk_register_clk_parent_data_test_case_to_desc)3009 KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
3010 		  clk_register_clk_parent_data_device_hw_cases,
3011 		  clk_register_clk_parent_data_test_case_to_desc)
3012 
3013 /*
3014  * Test that a clk registered with a struct device can find a
3015  * parent based on struct clk_parent_data::hw.
3016  */
3017 static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
3018 {
3019 	struct clk_register_clk_parent_data_device_ctx *ctx;
3020 	const struct clk_register_clk_parent_data_test_case *test_param;
3021 	struct clk_dummy_context *parent;
3022 	struct clk_hw *parent_hw;
3023 	struct clk_parent_data pdata = { };
3024 	struct clk_init_data init = { };
3025 
3026 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
3027 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
3028 	test->priv = ctx;
3029 
3030 	clk_register_clk_parent_data_device_driver(test);
3031 
3032 	parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
3033 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
3034 
3035 	parent_hw = &parent->hw;
3036 	parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
3037 						&clk_dummy_rate_ops, 0);
3038 
3039 	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
3040 
3041 	test_param = test->param_value;
3042 	memcpy(&pdata, &test_param->pdata, sizeof(pdata));
3043 	pdata.hw = parent_hw;
3044 	init.parent_data = &pdata;
3045 	init.num_parents = 1;
3046 	init.ops = &clk_dummy_single_parent_ops;
3047 	init.name = "parent_data_device_hw_test_clk";
3048 	ctx->hw.init = &init;
3049 	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
3050 
3051 	KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
3052 }
3053 
3054 static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
3055 	KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test,
3056 			 clk_register_clk_parent_data_device_test_gen_params),
3057 	KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test,
3058 			 clk_register_clk_parent_data_device_hw_test_gen_params),
3059 	{}
3060 };
3061 
clk_register_clk_parent_data_device_init(struct kunit * test)3062 static int clk_register_clk_parent_data_device_init(struct kunit *test)
3063 {
3064 	KUNIT_ASSERT_EQ(test, 0,
3065 			of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
3066 
3067 	return 0;
3068 }
3069 
3070 /*
3071  * Test suite for registering clks with struct clk_parent_data and a struct
3072  * device.
3073  */
3074 static struct kunit_suite clk_register_clk_parent_data_device_suite = {
3075 	.name = "clk_register_clk_parent_data_device",
3076 	.init = clk_register_clk_parent_data_device_init,
3077 	.test_cases = clk_register_clk_parent_data_device_test_cases,
3078 };
3079 
3080 struct clk_assigned_rates_context {
3081 	struct clk_dummy_context clk0;
3082 	struct clk_dummy_context clk1;
3083 };
3084 
3085 /*
3086  * struct clk_assigned_rates_test_param - Test parameters for clk_assigned_rates test
3087  * @desc: Test description
3088  * @overlay_begin: Pointer to start of DT overlay to apply for test
3089  * @overlay_end: Pointer to end of DT overlay to apply for test
3090  * @rate0: Initial rate of first clk
3091  * @rate1: Initial rate of second clk
3092  * @consumer_test: true if a consumer is being tested
3093  */
3094 struct clk_assigned_rates_test_param {
3095 	const char *desc;
3096 	u8 *overlay_begin;
3097 	u8 *overlay_end;
3098 	unsigned long rate0;
3099 	unsigned long rate1;
3100 	bool consumer_test;
3101 };
3102 
3103 #define TEST_PARAM_OVERLAY(overlay_name)				\
3104 	.overlay_begin = of_overlay_begin(overlay_name),		\
3105 	.overlay_end = of_overlay_end(overlay_name)
3106 
3107 static void
clk_assigned_rates_register_clk(struct kunit * test,struct clk_dummy_context * ctx,struct device_node * np,const char * name,unsigned long rate)3108 clk_assigned_rates_register_clk(struct kunit *test,
3109 				struct clk_dummy_context *ctx,
3110 				struct device_node *np, const char *name,
3111 				unsigned long rate)
3112 {
3113 	struct clk_init_data init = { };
3114 
3115 	init.name = name;
3116 	init.ops = &clk_dummy_rate_ops;
3117 	ctx->hw.init = &init;
3118 	ctx->rate = rate;
3119 
3120 	KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, &ctx->hw));
3121 	KUNIT_ASSERT_EQ(test, ctx->rate, rate);
3122 }
3123 
3124 /*
3125  * Does most of the work of the test:
3126  *
3127  * 1. Apply the overlay to test
3128  * 2. Register the clk or clks to test
3129  * 3. Register the clk provider
3130  * 4. Apply clk defaults to the consumer device if this is a consumer test
3131  *
3132  * The tests will set different test_param values to test different scenarios
3133  * and validate that in their test functions.
3134  */
clk_assigned_rates_test_init(struct kunit * test)3135 static int clk_assigned_rates_test_init(struct kunit *test)
3136 {
3137 	struct device_node *np, *consumer;
3138 	struct clk_hw_onecell_data *data;
3139 	struct clk_assigned_rates_context *ctx;
3140 	u32 clk_cells;
3141 	const struct clk_assigned_rates_test_param *test_param;
3142 
3143 	test_param = test->param_value;
3144 
3145 	KUNIT_ASSERT_EQ(test, 0, __of_overlay_apply_kunit(test,
3146 							  test_param->overlay_begin,
3147 							  test_param->overlay_end));
3148 
3149 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3150 		ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL));
3151 	test->priv = ctx;
3152 
3153 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3154 		np = of_find_compatible_node(NULL, NULL, "test,clk-assigned-rates"));
3155 	of_node_put_kunit(test, np);
3156 
3157 	KUNIT_ASSERT_EQ(test, 0, of_property_read_u32(np, "#clock-cells", &clk_cells));
3158 	/* Only support #clock-cells = <0> or <1> */
3159 	KUNIT_ASSERT_LT(test, clk_cells, 2);
3160 
3161 	clk_assigned_rates_register_clk(test, &ctx->clk0, np,
3162 					"test_assigned_rate0", test_param->rate0);
3163 	if (clk_cells == 0) {
3164 		KUNIT_ASSERT_EQ(test, 0,
3165 				of_clk_add_hw_provider_kunit(test, np, of_clk_hw_simple_get,
3166 							     &ctx->clk0.hw));
3167 	} else if (clk_cells == 1) {
3168 		clk_assigned_rates_register_clk(test, &ctx->clk1, np,
3169 						"test_assigned_rate1", test_param->rate1);
3170 
3171 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3172 			data = kunit_kzalloc(test, struct_size(data, hws, 2), GFP_KERNEL));
3173 		data->num = 2;
3174 		data->hws[0] = &ctx->clk0.hw;
3175 		data->hws[1] = &ctx->clk1.hw;
3176 
3177 		KUNIT_ASSERT_EQ(test, 0,
3178 				of_clk_add_hw_provider_kunit(test, np, of_clk_hw_onecell_get, data));
3179 	}
3180 
3181 	/* Consumers are optional */
3182 	if (test_param->consumer_test) {
3183 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
3184 			consumer = of_find_compatible_node(NULL, NULL, "test,clk-consumer"));
3185 		of_node_put_kunit(test, consumer);
3186 
3187 		KUNIT_ASSERT_EQ(test, 0, of_clk_set_defaults(consumer, false));
3188 	}
3189 
3190 	return 0;
3191 }
3192 
clk_assigned_rates_assigns_one(struct kunit * test)3193 static void clk_assigned_rates_assigns_one(struct kunit *test)
3194 {
3195 	struct clk_assigned_rates_context *ctx = test->priv;
3196 
3197 	KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
3198 }
3199 
clk_assigned_rates_assigns_multiple(struct kunit * test)3200 static void clk_assigned_rates_assigns_multiple(struct kunit *test)
3201 {
3202 	struct clk_assigned_rates_context *ctx = test->priv;
3203 
3204 	KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
3205 	KUNIT_EXPECT_EQ(test, ctx->clk1.rate, ASSIGNED_RATES_1_RATE);
3206 }
3207 
clk_assigned_rates_skips(struct kunit * test)3208 static void clk_assigned_rates_skips(struct kunit *test)
3209 {
3210 	struct clk_assigned_rates_context *ctx = test->priv;
3211 	const struct clk_assigned_rates_test_param *test_param = test->param_value;
3212 
3213 	KUNIT_EXPECT_NE(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
3214 	KUNIT_EXPECT_EQ(test, ctx->clk0.rate, test_param->rate0);
3215 }
3216 
3217 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one);
3218 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one_consumer);
3219 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one);
3220 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one_consumer);
3221 
3222 /* Test cases that assign one rate */
3223 static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_one_test_params[] = {
3224 	{
3225 		/*
3226 		 * Test that a single cell assigned-clock-rates property
3227 		 * assigns the rate when the property is in the provider.
3228 		 */
3229 		.desc = "provider assigns",
3230 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one),
3231 	},
3232 	{
3233 		/*
3234 		 * Test that a single cell assigned-clock-rates property
3235 		 * assigns the rate when the property is in the consumer.
3236 		 */
3237 		.desc = "consumer assigns",
3238 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one_consumer),
3239 		.consumer_test = true,
3240 	},
3241 	{
3242 		/*
3243 		 * Test that a single cell assigned-clock-rates-u64 property
3244 		 * assigns the rate when the property is in the provider.
3245 		 */
3246 		.desc = "provider assigns u64",
3247 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one),
3248 	},
3249 	{
3250 		/*
3251 		 * Test that a single cell assigned-clock-rates-u64 property
3252 		 * assigns the rate when the property is in the consumer.
3253 		 */
3254 		.desc = "consumer assigns u64",
3255 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one_consumer),
3256 		.consumer_test = true,
3257 	},
3258 };
3259 KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_one,
3260 		       clk_assigned_rates_assigns_one_test_params, desc)
3261 
3262 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple);
3263 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple_consumer);
3264 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple);
3265 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple_consumer);
3266 
3267 /* Test cases that assign multiple rates */
3268 static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_multiple_test_params[] = {
3269 	{
3270 		/*
3271 		 * Test that a multiple cell assigned-clock-rates property
3272 		 * assigns the rates when the property is in the provider.
3273 		 */
3274 		.desc = "provider assigns",
3275 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple),
3276 	},
3277 	{
3278 		/*
3279 		 * Test that a multiple cell assigned-clock-rates property
3280 		 * assigns the rates when the property is in the consumer.
3281 		 */
3282 		.desc = "consumer assigns",
3283 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple_consumer),
3284 		.consumer_test = true,
3285 	},
3286 	{
3287 		/*
3288 		 * Test that a single cell assigned-clock-rates-u64 property
3289 		 * assigns the rate when the property is in the provider.
3290 		 */
3291 		.desc = "provider assigns u64",
3292 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple),
3293 	},
3294 	{
3295 		/*
3296 		 * Test that a multiple cell assigned-clock-rates-u64 property
3297 		 * assigns the rates when the property is in the consumer.
3298 		 */
3299 		.desc = "consumer assigns u64",
3300 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple_consumer),
3301 		.consumer_test = true,
3302 	},
3303 };
3304 KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_multiple,
3305 		       clk_assigned_rates_assigns_multiple_test_params,
3306 		       desc)
3307 
3308 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without);
3309 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without_consumer);
3310 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero);
3311 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero_consumer);
3312 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null);
3313 OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null_consumer);
3314 
3315 /* Test cases that skip changing the rate due to malformed DT */
3316 static const struct clk_assigned_rates_test_param clk_assigned_rates_skips_test_params[] = {
3317 	{
3318 		/*
3319 		 * Test that an assigned-clock-rates property without an assigned-clocks
3320 		 * property fails when the property is in the provider.
3321 		 */
3322 		.desc = "provider missing assigned-clocks",
3323 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without),
3324 		.rate0 = 3000,
3325 	},
3326 	{
3327 		/*
3328 		 * Test that an assigned-clock-rates property without an assigned-clocks
3329 		 * property fails when the property is in the consumer.
3330 		 */
3331 		.desc = "consumer missing assigned-clocks",
3332 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without_consumer),
3333 		.rate0 = 3000,
3334 		.consumer_test = true,
3335 	},
3336 	{
3337 		/*
3338 		 * Test that an assigned-clock-rates property of zero doesn't
3339 		 * set a rate when the property is in the provider.
3340 		 */
3341 		.desc = "provider assigned-clock-rates of zero",
3342 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero),
3343 		.rate0 = 3000,
3344 	},
3345 	{
3346 		/*
3347 		 * Test that an assigned-clock-rates property of zero doesn't
3348 		 * set a rate when the property is in the consumer.
3349 		 */
3350 		.desc = "consumer assigned-clock-rates of zero",
3351 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero_consumer),
3352 		.rate0 = 3000,
3353 		.consumer_test = true,
3354 	},
3355 	{
3356 		/*
3357 		 * Test that an assigned-clocks property with a null phandle
3358 		 * doesn't set a rate when the property is in the provider.
3359 		 */
3360 		.desc = "provider assigned-clocks null phandle",
3361 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null),
3362 		.rate0 = 3000,
3363 	},
3364 	{
3365 		/*
3366 		 * Test that an assigned-clocks property with a null phandle
3367 		 * doesn't set a rate when the property is in the consumer.
3368 		 */
3369 		.desc = "provider assigned-clocks null phandle",
3370 		TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null_consumer),
3371 		.rate0 = 3000,
3372 		.consumer_test = true,
3373 	},
3374 };
3375 KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_skips,
3376 		       clk_assigned_rates_skips_test_params,
3377 		       desc)
3378 
3379 static struct kunit_case clk_assigned_rates_test_cases[] = {
3380 	KUNIT_CASE_PARAM(clk_assigned_rates_assigns_one,
3381 			 clk_assigned_rates_assigns_one_gen_params),
3382 	KUNIT_CASE_PARAM(clk_assigned_rates_assigns_multiple,
3383 			 clk_assigned_rates_assigns_multiple_gen_params),
3384 	KUNIT_CASE_PARAM(clk_assigned_rates_skips,
3385 			 clk_assigned_rates_skips_gen_params),
3386 	{}
3387 };
3388 
3389 /*
3390  * Test suite for assigned-clock-rates{-u64} DT property.
3391  */
3392 static struct kunit_suite clk_assigned_rates_suite = {
3393 	.name = "clk_assigned_rates",
3394 	.test_cases = clk_assigned_rates_test_cases,
3395 	.init = clk_assigned_rates_test_init,
3396 };
3397 
3398 kunit_test_suites(
3399 	&clk_assigned_rates_suite,
3400 	&clk_leaf_mux_set_rate_parent_test_suite,
3401 	&clk_test_suite,
3402 	&clk_multiple_parents_mux_test_suite,
3403 	&clk_mux_no_reparent_test_suite,
3404 	&clk_mux_notifier_test_suite,
3405 	&clk_orphan_transparent_multiple_parent_mux_test_suite,
3406 	&clk_orphan_transparent_single_parent_test_suite,
3407 	&clk_orphan_two_level_root_last_test_suite,
3408 	&clk_range_test_suite,
3409 	&clk_range_maximize_test_suite,
3410 	&clk_range_minimize_test_suite,
3411 	&clk_register_clk_parent_data_of_suite,
3412 	&clk_register_clk_parent_data_device_suite,
3413 	&clk_single_parent_mux_test_suite,
3414 	&clk_uncached_test_suite,
3415 );
3416 MODULE_DESCRIPTION("Kunit tests for clk framework");
3417 MODULE_LICENSE("GPL v2");
3418