xref: /linux/drivers/clk/clk_test.c (revision d4b82e5808241239cb3ae2bff5a6c6767ea976cb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kunit tests for clk framework
4  */
5 #include <linux/clk.h>
6 #include <linux/clk-provider.h>
7 #include <linux/of.h>
8 #include <linux/platform_device.h>
9 
10 /* Needed for clk_hw_get_clk() */
11 #include "clk.h"
12 
13 #include <kunit/clk.h>
14 #include <kunit/of.h>
15 #include <kunit/platform_device.h>
16 #include <kunit/test.h>
17 
18 #include "clk_parent_data_test.h"
19 
20 static const struct clk_ops empty_clk_ops = { };
21 
22 #define DUMMY_CLOCK_INIT_RATE	(42 * 1000 * 1000)
23 #define DUMMY_CLOCK_RATE_1	(142 * 1000 * 1000)
24 #define DUMMY_CLOCK_RATE_2	(242 * 1000 * 1000)
25 
26 struct clk_dummy_context {
27 	struct clk_hw hw;
28 	unsigned long rate;
29 };
30 
clk_dummy_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)31 static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
32 					   unsigned long parent_rate)
33 {
34 	struct clk_dummy_context *ctx =
35 		container_of(hw, struct clk_dummy_context, hw);
36 
37 	return ctx->rate;
38 }
39 
clk_dummy_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)40 static int clk_dummy_determine_rate(struct clk_hw *hw,
41 				    struct clk_rate_request *req)
42 {
43 	/* Just return the same rate without modifying it */
44 	return 0;
45 }
46 
clk_dummy_maximize_rate(struct clk_hw * hw,struct clk_rate_request * req)47 static int clk_dummy_maximize_rate(struct clk_hw *hw,
48 				   struct clk_rate_request *req)
49 {
50 	/*
51 	 * If there's a maximum set, always run the clock at the maximum
52 	 * allowed.
53 	 */
54 	if (req->max_rate < ULONG_MAX)
55 		req->rate = req->max_rate;
56 
57 	return 0;
58 }
59 
clk_dummy_minimize_rate(struct clk_hw * hw,struct clk_rate_request * req)60 static int clk_dummy_minimize_rate(struct clk_hw *hw,
61 				   struct clk_rate_request *req)
62 {
63 	/*
64 	 * If there's a minimum set, always run the clock at the minimum
65 	 * allowed.
66 	 */
67 	if (req->min_rate > 0)
68 		req->rate = req->min_rate;
69 
70 	return 0;
71 }
72 
clk_dummy_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)73 static int clk_dummy_set_rate(struct clk_hw *hw,
74 			      unsigned long rate,
75 			      unsigned long parent_rate)
76 {
77 	struct clk_dummy_context *ctx =
78 		container_of(hw, struct clk_dummy_context, hw);
79 
80 	ctx->rate = rate;
81 	return 0;
82 }
83 
clk_dummy_single_set_parent(struct clk_hw * hw,u8 index)84 static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
85 {
86 	if (index >= clk_hw_get_num_parents(hw))
87 		return -EINVAL;
88 
89 	return 0;
90 }
91 
clk_dummy_single_get_parent(struct clk_hw * hw)92 static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
93 {
94 	return 0;
95 }
96 
97 static const struct clk_ops clk_dummy_rate_ops = {
98 	.recalc_rate = clk_dummy_recalc_rate,
99 	.determine_rate = clk_dummy_determine_rate,
100 	.set_rate = clk_dummy_set_rate,
101 };
102 
103 static const struct clk_ops clk_dummy_maximize_rate_ops = {
104 	.recalc_rate = clk_dummy_recalc_rate,
105 	.determine_rate = clk_dummy_maximize_rate,
106 	.set_rate = clk_dummy_set_rate,
107 };
108 
109 static const struct clk_ops clk_dummy_minimize_rate_ops = {
110 	.recalc_rate = clk_dummy_recalc_rate,
111 	.determine_rate = clk_dummy_minimize_rate,
112 	.set_rate = clk_dummy_set_rate,
113 };
114 
115 static const struct clk_ops clk_dummy_single_parent_ops = {
116 	/*
117 	 * FIXME: Even though we should probably be able to use
118 	 * __clk_mux_determine_rate() here, if we use it and call
119 	 * clk_round_rate() or clk_set_rate() with a rate lower than
120 	 * what all the parents can provide, it will return -EINVAL.
121 	 *
122 	 * This is due to the fact that it has the undocumented
123 	 * behaviour to always pick up the closest rate higher than the
124 	 * requested rate. If we get something lower, it thus considers
125 	 * that it's not acceptable and will return an error.
126 	 *
127 	 * It's somewhat inconsistent and creates a weird threshold
128 	 * between rates above the parent rate which would be rounded to
129 	 * what the parent can provide, but rates below will simply
130 	 * return an error.
131 	 */
132 	.determine_rate = __clk_mux_determine_rate_closest,
133 	.set_parent = clk_dummy_single_set_parent,
134 	.get_parent = clk_dummy_single_get_parent,
135 };
136 
137 struct clk_multiple_parent_ctx {
138 	struct clk_dummy_context parents_ctx[2];
139 	struct clk_hw hw;
140 	u8 current_parent;
141 };
142 
clk_multiple_parents_mux_set_parent(struct clk_hw * hw,u8 index)143 static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
144 {
145 	struct clk_multiple_parent_ctx *ctx =
146 		container_of(hw, struct clk_multiple_parent_ctx, hw);
147 
148 	if (index >= clk_hw_get_num_parents(hw))
149 		return -EINVAL;
150 
151 	ctx->current_parent = index;
152 
153 	return 0;
154 }
155 
clk_multiple_parents_mux_get_parent(struct clk_hw * hw)156 static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
157 {
158 	struct clk_multiple_parent_ctx *ctx =
159 		container_of(hw, struct clk_multiple_parent_ctx, hw);
160 
161 	return ctx->current_parent;
162 }
163 
164 static const struct clk_ops clk_multiple_parents_mux_ops = {
165 	.get_parent = clk_multiple_parents_mux_get_parent,
166 	.set_parent = clk_multiple_parents_mux_set_parent,
167 	.determine_rate = __clk_mux_determine_rate_closest,
168 };
169 
170 static const struct clk_ops clk_multiple_parents_no_reparent_mux_ops = {
171 	.determine_rate = clk_hw_determine_rate_no_reparent,
172 	.get_parent = clk_multiple_parents_mux_get_parent,
173 	.set_parent = clk_multiple_parents_mux_set_parent,
174 };
175 
clk_test_init_with_ops(struct kunit * test,const struct clk_ops * ops)176 static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
177 {
178 	struct clk_dummy_context *ctx;
179 	struct clk_init_data init = { };
180 	int ret;
181 
182 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
183 	if (!ctx)
184 		return -ENOMEM;
185 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
186 	test->priv = ctx;
187 
188 	init.name = "test_dummy_rate";
189 	init.ops = ops;
190 	ctx->hw.init = &init;
191 
192 	ret = clk_hw_register(NULL, &ctx->hw);
193 	if (ret)
194 		return ret;
195 
196 	return 0;
197 }
198 
clk_test_init(struct kunit * test)199 static int clk_test_init(struct kunit *test)
200 {
201 	return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
202 }
203 
clk_maximize_test_init(struct kunit * test)204 static int clk_maximize_test_init(struct kunit *test)
205 {
206 	return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
207 }
208 
clk_minimize_test_init(struct kunit * test)209 static int clk_minimize_test_init(struct kunit *test)
210 {
211 	return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
212 }
213 
clk_test_exit(struct kunit * test)214 static void clk_test_exit(struct kunit *test)
215 {
216 	struct clk_dummy_context *ctx = test->priv;
217 
218 	clk_hw_unregister(&ctx->hw);
219 }
220 
221 /*
222  * Test that the actual rate matches what is returned by clk_get_rate()
223  */
clk_test_get_rate(struct kunit * test)224 static void clk_test_get_rate(struct kunit *test)
225 {
226 	struct clk_dummy_context *ctx = test->priv;
227 	struct clk_hw *hw = &ctx->hw;
228 	struct clk *clk = clk_hw_get_clk(hw, NULL);
229 	unsigned long rate;
230 
231 	rate = clk_get_rate(clk);
232 	KUNIT_ASSERT_GT(test, rate, 0);
233 	KUNIT_EXPECT_EQ(test, rate, ctx->rate);
234 
235 	clk_put(clk);
236 }
237 
238 /*
239  * Test that, after a call to clk_set_rate(), the rate returned by
240  * clk_get_rate() matches.
241  *
242  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
243  * modify the requested rate, which is our case in clk_dummy_rate_ops.
244  */
clk_test_set_get_rate(struct kunit * test)245 static void clk_test_set_get_rate(struct kunit *test)
246 {
247 	struct clk_dummy_context *ctx = test->priv;
248 	struct clk_hw *hw = &ctx->hw;
249 	struct clk *clk = clk_hw_get_clk(hw, NULL);
250 	unsigned long rate;
251 
252 	KUNIT_ASSERT_EQ(test,
253 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
254 			0);
255 
256 	rate = clk_get_rate(clk);
257 	KUNIT_ASSERT_GT(test, rate, 0);
258 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
259 
260 	clk_put(clk);
261 }
262 
263 /*
264  * Test that, after several calls to clk_set_rate(), the rate returned
265  * by clk_get_rate() matches the last one.
266  *
267  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
268  * modify the requested rate, which is our case in clk_dummy_rate_ops.
269  */
clk_test_set_set_get_rate(struct kunit * test)270 static void clk_test_set_set_get_rate(struct kunit *test)
271 {
272 	struct clk_dummy_context *ctx = test->priv;
273 	struct clk_hw *hw = &ctx->hw;
274 	struct clk *clk = clk_hw_get_clk(hw, NULL);
275 	unsigned long rate;
276 
277 	KUNIT_ASSERT_EQ(test,
278 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
279 			0);
280 
281 	KUNIT_ASSERT_EQ(test,
282 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
283 			0);
284 
285 	rate = clk_get_rate(clk);
286 	KUNIT_ASSERT_GT(test, rate, 0);
287 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
288 
289 	clk_put(clk);
290 }
291 
292 /*
293  * Test that clk_round_rate and clk_set_rate are consitent and will
294  * return the same frequency.
295  */
clk_test_round_set_get_rate(struct kunit * test)296 static void clk_test_round_set_get_rate(struct kunit *test)
297 {
298 	struct clk_dummy_context *ctx = test->priv;
299 	struct clk_hw *hw = &ctx->hw;
300 	struct clk *clk = clk_hw_get_clk(hw, NULL);
301 	unsigned long set_rate;
302 	long rounded_rate;
303 
304 	rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
305 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
306 	KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
307 
308 	KUNIT_ASSERT_EQ(test,
309 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
310 			0);
311 
312 	set_rate = clk_get_rate(clk);
313 	KUNIT_ASSERT_GT(test, set_rate, 0);
314 	KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
315 
316 	clk_put(clk);
317 }
318 
319 static struct kunit_case clk_test_cases[] = {
320 	KUNIT_CASE(clk_test_get_rate),
321 	KUNIT_CASE(clk_test_set_get_rate),
322 	KUNIT_CASE(clk_test_set_set_get_rate),
323 	KUNIT_CASE(clk_test_round_set_get_rate),
324 	{}
325 };
326 
327 /*
328  * Test suite for a basic rate clock, without any parent.
329  *
330  * These tests exercise the rate API with simple scenarios
331  */
332 static struct kunit_suite clk_test_suite = {
333 	.name = "clk-test",
334 	.init = clk_test_init,
335 	.exit = clk_test_exit,
336 	.test_cases = clk_test_cases,
337 };
338 
clk_uncached_test_init(struct kunit * test)339 static int clk_uncached_test_init(struct kunit *test)
340 {
341 	struct clk_dummy_context *ctx;
342 	int ret;
343 
344 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
345 	if (!ctx)
346 		return -ENOMEM;
347 	test->priv = ctx;
348 
349 	ctx->rate = DUMMY_CLOCK_INIT_RATE;
350 	ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
351 					     &clk_dummy_rate_ops,
352 					     CLK_GET_RATE_NOCACHE);
353 
354 	ret = clk_hw_register(NULL, &ctx->hw);
355 	if (ret)
356 		return ret;
357 
358 	return 0;
359 }
360 
361 /*
362  * Test that for an uncached clock, the clock framework doesn't cache
363  * the rate and clk_get_rate() will return the underlying clock rate
364  * even if it changed.
365  */
clk_test_uncached_get_rate(struct kunit * test)366 static void clk_test_uncached_get_rate(struct kunit *test)
367 {
368 	struct clk_dummy_context *ctx = test->priv;
369 	struct clk_hw *hw = &ctx->hw;
370 	struct clk *clk = clk_hw_get_clk(hw, NULL);
371 	unsigned long rate;
372 
373 	rate = clk_get_rate(clk);
374 	KUNIT_ASSERT_GT(test, rate, 0);
375 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
376 
377 	/* We change the rate behind the clock framework's back */
378 	ctx->rate = DUMMY_CLOCK_RATE_1;
379 	rate = clk_get_rate(clk);
380 	KUNIT_ASSERT_GT(test, rate, 0);
381 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
382 
383 	clk_put(clk);
384 }
385 
386 /*
387  * Test that for an uncached clock, clk_set_rate_range() will work
388  * properly if the rate hasn't changed.
389  */
clk_test_uncached_set_range(struct kunit * test)390 static void clk_test_uncached_set_range(struct kunit *test)
391 {
392 	struct clk_dummy_context *ctx = test->priv;
393 	struct clk_hw *hw = &ctx->hw;
394 	struct clk *clk = clk_hw_get_clk(hw, NULL);
395 	unsigned long rate;
396 
397 	KUNIT_ASSERT_EQ(test,
398 			clk_set_rate_range(clk,
399 					   DUMMY_CLOCK_RATE_1,
400 					   DUMMY_CLOCK_RATE_2),
401 			0);
402 
403 	rate = clk_get_rate(clk);
404 	KUNIT_ASSERT_GT(test, rate, 0);
405 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
406 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
407 
408 	clk_put(clk);
409 }
410 
411 /*
412  * Test that for an uncached clock, clk_set_rate_range() will work
413  * properly if the rate has changed in hardware.
414  *
415  * In this case, it means that if the rate wasn't initially in the range
416  * we're trying to set, but got changed at some point into the range
417  * without the kernel knowing about it, its rate shouldn't be affected.
418  */
clk_test_uncached_updated_rate_set_range(struct kunit * test)419 static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
420 {
421 	struct clk_dummy_context *ctx = test->priv;
422 	struct clk_hw *hw = &ctx->hw;
423 	struct clk *clk = clk_hw_get_clk(hw, NULL);
424 	unsigned long rate;
425 
426 	/* We change the rate behind the clock framework's back */
427 	ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
428 	KUNIT_ASSERT_EQ(test,
429 			clk_set_rate_range(clk,
430 					   DUMMY_CLOCK_RATE_1,
431 					   DUMMY_CLOCK_RATE_2),
432 			0);
433 
434 	rate = clk_get_rate(clk);
435 	KUNIT_ASSERT_GT(test, rate, 0);
436 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
437 
438 	clk_put(clk);
439 }
440 
441 static struct kunit_case clk_uncached_test_cases[] = {
442 	KUNIT_CASE(clk_test_uncached_get_rate),
443 	KUNIT_CASE(clk_test_uncached_set_range),
444 	KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
445 	{}
446 };
447 
448 /*
449  * Test suite for a basic, uncached, rate clock, without any parent.
450  *
451  * These tests exercise the rate API with simple scenarios
452  */
453 static struct kunit_suite clk_uncached_test_suite = {
454 	.name = "clk-uncached-test",
455 	.init = clk_uncached_test_init,
456 	.exit = clk_test_exit,
457 	.test_cases = clk_uncached_test_cases,
458 };
459 
460 static int
clk_multiple_parents_mux_test_init(struct kunit * test)461 clk_multiple_parents_mux_test_init(struct kunit *test)
462 {
463 	struct clk_multiple_parent_ctx *ctx;
464 	const char *parents[2] = { "parent-0", "parent-1"};
465 	int ret;
466 
467 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
468 	if (!ctx)
469 		return -ENOMEM;
470 	test->priv = ctx;
471 
472 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
473 							    &clk_dummy_rate_ops,
474 							    0);
475 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
476 	ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[0].hw);
477 	if (ret)
478 		return ret;
479 
480 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
481 							    &clk_dummy_rate_ops,
482 							    0);
483 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
484 	ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
485 	if (ret)
486 		return ret;
487 
488 	ctx->current_parent = 0;
489 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
490 					   &clk_multiple_parents_mux_ops,
491 					   CLK_SET_RATE_PARENT);
492 	ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
493 	if (ret)
494 		return ret;
495 
496 	return 0;
497 }
498 
499 /*
500  * Test that for a clock with multiple parents, clk_get_parent()
501  * actually returns the current one.
502  */
503 static void
clk_test_multiple_parents_mux_get_parent(struct kunit * test)504 clk_test_multiple_parents_mux_get_parent(struct kunit *test)
505 {
506 	struct clk_multiple_parent_ctx *ctx = test->priv;
507 	struct clk_hw *hw = &ctx->hw;
508 	struct clk *clk = clk_hw_get_clk(hw, NULL);
509 	struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
510 
511 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
512 
513 	clk_put(parent);
514 	clk_put(clk);
515 }
516 
517 /*
518  * Test that for a clock with a multiple parents, clk_has_parent()
519  * actually reports all of them as parents.
520  */
521 static void
clk_test_multiple_parents_mux_has_parent(struct kunit * test)522 clk_test_multiple_parents_mux_has_parent(struct kunit *test)
523 {
524 	struct clk_multiple_parent_ctx *ctx = test->priv;
525 	struct clk_hw *hw = &ctx->hw;
526 	struct clk *clk = clk_hw_get_clk(hw, NULL);
527 	struct clk *parent;
528 
529 	parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
530 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
531 	clk_put(parent);
532 
533 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
534 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
535 	clk_put(parent);
536 
537 	clk_put(clk);
538 }
539 
540 /*
541  * Test that for a clock with a multiple parents, if we set a range on
542  * that clock and the parent is changed, its rate after the reparenting
543  * is still within the range we asked for.
544  *
545  * FIXME: clk_set_parent() only does the reparenting but doesn't
546  * reevaluate whether the new clock rate is within its boundaries or
547  * not.
548  */
549 static void
clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit * test)550 clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
551 {
552 	struct clk_multiple_parent_ctx *ctx = test->priv;
553 	struct clk_hw *hw = &ctx->hw;
554 	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
555 	struct clk *parent1, *parent2;
556 	unsigned long rate;
557 	int ret;
558 
559 	kunit_skip(test, "This needs to be fixed in the core.");
560 
561 	parent1 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[0].hw, NULL);
562 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
563 	KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
564 
565 	parent2 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
566 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
567 
568 	ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
569 	KUNIT_ASSERT_EQ(test, ret, 0);
570 
571 	ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
572 	KUNIT_ASSERT_EQ(test, ret, 0);
573 
574 	ret = clk_set_rate_range(clk,
575 				 DUMMY_CLOCK_RATE_1 - 1000,
576 				 DUMMY_CLOCK_RATE_1 + 1000);
577 	KUNIT_ASSERT_EQ(test, ret, 0);
578 
579 	ret = clk_set_parent(clk, parent2);
580 	KUNIT_ASSERT_EQ(test, ret, 0);
581 
582 	rate = clk_get_rate(clk);
583 	KUNIT_ASSERT_GT(test, rate, 0);
584 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
585 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
586 }
587 
588 static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
589 	KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
590 	KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
591 	KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
592 	{}
593 };
594 
595 /*
596  * Test suite for a basic mux clock with two parents, with
597  * CLK_SET_RATE_PARENT on the child.
598  *
599  * These tests exercise the consumer API and check that the state of the
600  * child and parents are sane and consistent.
601  */
602 static struct kunit_suite
603 clk_multiple_parents_mux_test_suite = {
604 	.name = "clk-multiple-parents-mux-test",
605 	.init = clk_multiple_parents_mux_test_init,
606 	.test_cases = clk_multiple_parents_mux_test_cases,
607 };
608 
609 static int
clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit * test)610 clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
611 {
612 	struct clk_multiple_parent_ctx *ctx;
613 	const char *parents[2] = { "missing-parent", "proper-parent"};
614 	int ret;
615 
616 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
617 	if (!ctx)
618 		return -ENOMEM;
619 	test->priv = ctx;
620 
621 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
622 							    &clk_dummy_rate_ops,
623 							    0);
624 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
625 	ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
626 	if (ret)
627 		return ret;
628 
629 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
630 					   &clk_multiple_parents_mux_ops,
631 					   CLK_SET_RATE_PARENT);
632 	ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
633 	if (ret)
634 		return ret;
635 
636 	return 0;
637 }
638 
639 /*
640  * Test that, for a mux whose current parent hasn't been registered yet and is
641  * thus orphan, clk_get_parent() will return NULL.
642  */
643 static void
clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit * test)644 clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
645 {
646 	struct clk_multiple_parent_ctx *ctx = test->priv;
647 	struct clk_hw *hw = &ctx->hw;
648 	struct clk *clk = clk_hw_get_clk(hw, NULL);
649 
650 	KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
651 
652 	clk_put(clk);
653 }
654 
655 /*
656  * Test that, for a mux whose current parent hasn't been registered yet,
657  * calling clk_set_parent() to a valid parent will properly update the
658  * mux parent and its orphan status.
659  */
660 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit * test)661 clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
662 {
663 	struct clk_multiple_parent_ctx *ctx = test->priv;
664 	struct clk_hw *hw = &ctx->hw;
665 	struct clk *clk = clk_hw_get_clk(hw, NULL);
666 	struct clk *parent, *new_parent;
667 	int ret;
668 
669 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
670 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
671 
672 	ret = clk_set_parent(clk, parent);
673 	KUNIT_ASSERT_EQ(test, ret, 0);
674 
675 	new_parent = clk_get_parent(clk);
676 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
677 	KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
678 
679 	clk_put(parent);
680 	clk_put(clk);
681 }
682 
683 /*
684  * Test that, for a mux that started orphan but got switched to a valid
685  * parent, calling clk_drop_range() on the mux won't affect the parent
686  * rate.
687  */
688 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit * test)689 clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
690 {
691 	struct clk_multiple_parent_ctx *ctx = test->priv;
692 	struct clk_hw *hw = &ctx->hw;
693 	struct clk *clk = clk_hw_get_clk(hw, NULL);
694 	struct clk *parent;
695 	unsigned long parent_rate, new_parent_rate;
696 	int ret;
697 
698 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
699 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
700 
701 	parent_rate = clk_get_rate(parent);
702 	KUNIT_ASSERT_GT(test, parent_rate, 0);
703 
704 	ret = clk_set_parent(clk, parent);
705 	KUNIT_ASSERT_EQ(test, ret, 0);
706 
707 	ret = clk_drop_range(clk);
708 	KUNIT_ASSERT_EQ(test, ret, 0);
709 
710 	new_parent_rate = clk_get_rate(clk);
711 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
712 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
713 
714 	clk_put(parent);
715 	clk_put(clk);
716 }
717 
718 /*
719  * Test that, for a mux that started orphan but got switched to a valid
720  * parent, the rate of the mux and its new parent are consistent.
721  */
722 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit * test)723 clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
724 {
725 	struct clk_multiple_parent_ctx *ctx = test->priv;
726 	struct clk_hw *hw = &ctx->hw;
727 	struct clk *clk = clk_hw_get_clk(hw, NULL);
728 	struct clk *parent;
729 	unsigned long parent_rate, rate;
730 	int ret;
731 
732 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
733 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
734 
735 	parent_rate = clk_get_rate(parent);
736 	KUNIT_ASSERT_GT(test, parent_rate, 0);
737 
738 	ret = clk_set_parent(clk, parent);
739 	KUNIT_ASSERT_EQ(test, ret, 0);
740 
741 	rate = clk_get_rate(clk);
742 	KUNIT_ASSERT_GT(test, rate, 0);
743 	KUNIT_EXPECT_EQ(test, parent_rate, rate);
744 
745 	clk_put(parent);
746 	clk_put(clk);
747 }
748 
749 /*
750  * Test that, for a mux that started orphan but got switched to a valid
751  * parent, calling clk_put() on the mux won't affect the parent rate.
752  */
753 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit * test)754 clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
755 {
756 	struct clk_multiple_parent_ctx *ctx = test->priv;
757 	struct clk *clk, *parent;
758 	unsigned long parent_rate, new_parent_rate;
759 	int ret;
760 
761 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
762 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
763 
764 	clk = clk_hw_get_clk(&ctx->hw, NULL);
765 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
766 
767 	parent_rate = clk_get_rate(parent);
768 	KUNIT_ASSERT_GT(test, parent_rate, 0);
769 
770 	ret = clk_set_parent(clk, parent);
771 	KUNIT_ASSERT_EQ(test, ret, 0);
772 
773 	clk_put(clk);
774 
775 	new_parent_rate = clk_get_rate(parent);
776 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
777 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
778 
779 	clk_put(parent);
780 }
781 
782 /*
783  * Test that, for a mux that started orphan but got switched to a valid
784  * parent, calling clk_set_rate_range() will affect the parent state if
785  * its rate is out of range.
786  */
787 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit * test)788 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
789 {
790 	struct clk_multiple_parent_ctx *ctx = test->priv;
791 	struct clk_hw *hw = &ctx->hw;
792 	struct clk *clk = clk_hw_get_clk(hw, NULL);
793 	struct clk *parent;
794 	unsigned long rate;
795 	int ret;
796 
797 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
798 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
799 
800 	ret = clk_set_parent(clk, parent);
801 	KUNIT_ASSERT_EQ(test, ret, 0);
802 
803 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
804 	KUNIT_ASSERT_EQ(test, ret, 0);
805 
806 	rate = clk_get_rate(clk);
807 	KUNIT_ASSERT_GT(test, rate, 0);
808 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
809 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
810 
811 	clk_put(parent);
812 	clk_put(clk);
813 }
814 
815 /*
816  * Test that, for a mux that started orphan but got switched to a valid
817  * parent, calling clk_set_rate_range() won't affect the parent state if
818  * its rate is within range.
819  */
820 static void
clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit * test)821 clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
822 {
823 	struct clk_multiple_parent_ctx *ctx = test->priv;
824 	struct clk_hw *hw = &ctx->hw;
825 	struct clk *clk = clk_hw_get_clk(hw, NULL);
826 	struct clk *parent;
827 	unsigned long parent_rate, new_parent_rate;
828 	int ret;
829 
830 	parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
831 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
832 
833 	parent_rate = clk_get_rate(parent);
834 	KUNIT_ASSERT_GT(test, parent_rate, 0);
835 
836 	ret = clk_set_parent(clk, parent);
837 	KUNIT_ASSERT_EQ(test, ret, 0);
838 
839 	ret = clk_set_rate_range(clk,
840 				 DUMMY_CLOCK_INIT_RATE - 1000,
841 				 DUMMY_CLOCK_INIT_RATE + 1000);
842 	KUNIT_ASSERT_EQ(test, ret, 0);
843 
844 	new_parent_rate = clk_get_rate(parent);
845 	KUNIT_ASSERT_GT(test, new_parent_rate, 0);
846 	KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
847 
848 	clk_put(parent);
849 	clk_put(clk);
850 }
851 
852 /*
853  * Test that, for a mux whose current parent hasn't been registered yet,
854  * calling clk_set_rate_range() will succeed, and will be taken into
855  * account when rounding a rate.
856  */
857 static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit * test)858 clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
859 {
860 	struct clk_multiple_parent_ctx *ctx = test->priv;
861 	struct clk_hw *hw = &ctx->hw;
862 	struct clk *clk = clk_hw_get_clk(hw, NULL);
863 	long rate;
864 	int ret;
865 
866 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
867 	KUNIT_ASSERT_EQ(test, ret, 0);
868 
869 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
870 	KUNIT_ASSERT_GT(test, rate, 0);
871 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
872 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
873 
874 	clk_put(clk);
875 }
876 
877 /*
878  * Test that, for a mux that started orphan, was assigned and rate and
879  * then got switched to a valid parent, its rate is eventually within
880  * range.
881  *
882  * FIXME: Even though we update the rate as part of clk_set_parent(), we
883  * don't evaluate whether that new rate is within range and needs to be
884  * adjusted.
885  */
886 static void
clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit * test)887 clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
888 {
889 	struct clk_multiple_parent_ctx *ctx = test->priv;
890 	struct clk_hw *hw = &ctx->hw;
891 	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
892 	struct clk *parent;
893 	unsigned long rate;
894 	int ret;
895 
896 	kunit_skip(test, "This needs to be fixed in the core.");
897 
898 	clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
899 
900 	parent = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
901 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
902 
903 	ret = clk_set_parent(clk, parent);
904 	KUNIT_ASSERT_EQ(test, ret, 0);
905 
906 	rate = clk_get_rate(clk);
907 	KUNIT_ASSERT_GT(test, rate, 0);
908 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
909 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
910 }
911 
912 static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
913 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
914 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
915 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
916 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
917 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
918 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
919 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
920 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
921 	KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
922 	{}
923 };
924 
925 /*
926  * Test suite for a basic mux clock with two parents. The default parent
927  * isn't registered, only the second parent is. By default, the clock
928  * will thus be orphan.
929  *
930  * These tests exercise the behaviour of the consumer API when dealing
931  * with an orphan clock, and how we deal with the transition to a valid
932  * parent.
933  */
934 static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
935 	.name = "clk-orphan-transparent-multiple-parent-mux-test",
936 	.init = clk_orphan_transparent_multiple_parent_mux_test_init,
937 	.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
938 };
939 
940 struct clk_single_parent_ctx {
941 	struct clk_dummy_context parent_ctx;
942 	struct clk_hw hw;
943 };
944 
clk_single_parent_mux_test_init(struct kunit * test)945 static int clk_single_parent_mux_test_init(struct kunit *test)
946 {
947 	struct clk_single_parent_ctx *ctx;
948 	int ret;
949 
950 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
951 	if (!ctx)
952 		return -ENOMEM;
953 	test->priv = ctx;
954 
955 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
956 	ctx->parent_ctx.hw.init =
957 		CLK_HW_INIT_NO_PARENT("parent-clk",
958 				      &clk_dummy_rate_ops,
959 				      0);
960 
961 	ret = clk_hw_register_kunit(test, NULL, &ctx->parent_ctx.hw);
962 	if (ret)
963 		return ret;
964 
965 	ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
966 				   &clk_dummy_single_parent_ops,
967 				   CLK_SET_RATE_PARENT);
968 
969 	ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
970 	if (ret)
971 		return ret;
972 
973 	return 0;
974 }
975 
976 static void
clk_single_parent_mux_test_exit(struct kunit * test)977 clk_single_parent_mux_test_exit(struct kunit *test)
978 {
979 	struct clk_single_parent_ctx *ctx = test->priv;
980 
981 	clk_hw_unregister(&ctx->hw);
982 	clk_hw_unregister(&ctx->parent_ctx.hw);
983 }
984 
985 /*
986  * Test that for a clock with a single parent, clk_get_parent() actually
987  * returns the parent.
988  */
989 static void
clk_test_single_parent_mux_get_parent(struct kunit * test)990 clk_test_single_parent_mux_get_parent(struct kunit *test)
991 {
992 	struct clk_single_parent_ctx *ctx = test->priv;
993 	struct clk_hw *hw = &ctx->hw;
994 	struct clk *clk = clk_hw_get_clk(hw, NULL);
995 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
996 
997 	KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
998 
999 	clk_put(parent);
1000 	clk_put(clk);
1001 }
1002 
1003 /*
1004  * Test that for a clock with a single parent, clk_has_parent() actually
1005  * reports it as a parent.
1006  */
1007 static void
clk_test_single_parent_mux_has_parent(struct kunit * test)1008 clk_test_single_parent_mux_has_parent(struct kunit *test)
1009 {
1010 	struct clk_single_parent_ctx *ctx = test->priv;
1011 	struct clk_hw *hw = &ctx->hw;
1012 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1013 	struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
1014 
1015 	KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
1016 
1017 	clk_put(parent);
1018 	clk_put(clk);
1019 }
1020 
1021 /*
1022  * Test that for a clock that can't modify its rate and with a single
1023  * parent, if we set disjoints range on the parent and then the child,
1024  * the second will return an error.
1025  *
1026  * FIXME: clk_set_rate_range() only considers the current clock when
1027  * evaluating whether ranges are disjoints and not the upstream clocks
1028  * ranges.
1029  */
1030 static void
clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit * test)1031 clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
1032 {
1033 	struct clk_single_parent_ctx *ctx = test->priv;
1034 	struct clk_hw *hw = &ctx->hw;
1035 	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
1036 	struct clk *parent;
1037 	int ret;
1038 
1039 	kunit_skip(test, "This needs to be fixed in the core.");
1040 
1041 	parent = clk_get_parent(clk);
1042 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1043 
1044 	ret = clk_set_rate_range(parent, 1000, 2000);
1045 	KUNIT_ASSERT_EQ(test, ret, 0);
1046 
1047 	ret = clk_set_rate_range(clk, 3000, 4000);
1048 	KUNIT_EXPECT_LT(test, ret, 0);
1049 }
1050 
1051 /*
1052  * Test that for a clock that can't modify its rate and with a single
1053  * parent, if we set disjoints range on the child and then the parent,
1054  * the second will return an error.
1055  *
1056  * FIXME: clk_set_rate_range() only considers the current clock when
1057  * evaluating whether ranges are disjoints and not the downstream clocks
1058  * ranges.
1059  */
1060 static void
clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit * test)1061 clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
1062 {
1063 	struct clk_single_parent_ctx *ctx = test->priv;
1064 	struct clk_hw *hw = &ctx->hw;
1065 	struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
1066 	struct clk *parent;
1067 	int ret;
1068 
1069 	kunit_skip(test, "This needs to be fixed in the core.");
1070 
1071 	parent = clk_get_parent(clk);
1072 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1073 
1074 	ret = clk_set_rate_range(clk, 1000, 2000);
1075 	KUNIT_ASSERT_EQ(test, ret, 0);
1076 
1077 	ret = clk_set_rate_range(parent, 3000, 4000);
1078 	KUNIT_EXPECT_LT(test, ret, 0);
1079 }
1080 
1081 /*
1082  * Test that for a clock that can't modify its rate and with a single
1083  * parent, if we set a range on the parent and then call
1084  * clk_round_rate(), the boundaries of the parent are taken into
1085  * account.
1086  */
1087 static void
clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit * test)1088 clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
1089 {
1090 	struct clk_single_parent_ctx *ctx = test->priv;
1091 	struct clk_hw *hw = &ctx->hw;
1092 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1093 	struct clk *parent;
1094 	long rate;
1095 	int ret;
1096 
1097 	parent = clk_get_parent(clk);
1098 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1099 
1100 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1101 	KUNIT_ASSERT_EQ(test, ret, 0);
1102 
1103 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1104 	KUNIT_ASSERT_GT(test, rate, 0);
1105 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1106 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1107 
1108 	clk_put(clk);
1109 }
1110 
1111 /*
1112  * Test that for a clock that can't modify its rate and with a single
1113  * parent, if we set a range on the parent and a more restrictive one on
1114  * the child, and then call clk_round_rate(), the boundaries of the
1115  * two clocks are taken into account.
1116  */
1117 static void
clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit * test)1118 clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
1119 {
1120 	struct clk_single_parent_ctx *ctx = test->priv;
1121 	struct clk_hw *hw = &ctx->hw;
1122 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1123 	struct clk *parent;
1124 	long rate;
1125 	int ret;
1126 
1127 	parent = clk_get_parent(clk);
1128 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1129 
1130 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1131 	KUNIT_ASSERT_EQ(test, ret, 0);
1132 
1133 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1134 	KUNIT_ASSERT_EQ(test, ret, 0);
1135 
1136 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1137 	KUNIT_ASSERT_GT(test, rate, 0);
1138 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1139 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1140 
1141 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1142 	KUNIT_ASSERT_GT(test, rate, 0);
1143 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1144 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1145 
1146 	clk_put(clk);
1147 }
1148 
1149 /*
1150  * Test that for a clock that can't modify its rate and with a single
1151  * parent, if we set a range on the child and a more restrictive one on
1152  * the parent, and then call clk_round_rate(), the boundaries of the
1153  * two clocks are taken into account.
1154  */
1155 static void
clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit * test)1156 clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
1157 {
1158 	struct clk_single_parent_ctx *ctx = test->priv;
1159 	struct clk_hw *hw = &ctx->hw;
1160 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1161 	struct clk *parent;
1162 	long rate;
1163 	int ret;
1164 
1165 	parent = clk_get_parent(clk);
1166 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
1167 
1168 	ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
1169 	KUNIT_ASSERT_EQ(test, ret, 0);
1170 
1171 	ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
1172 	KUNIT_ASSERT_EQ(test, ret, 0);
1173 
1174 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1175 	KUNIT_ASSERT_GT(test, rate, 0);
1176 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1177 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1178 
1179 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1180 	KUNIT_ASSERT_GT(test, rate, 0);
1181 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1182 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1183 
1184 	clk_put(clk);
1185 }
1186 
1187 static struct kunit_case clk_single_parent_mux_test_cases[] = {
1188 	KUNIT_CASE(clk_test_single_parent_mux_get_parent),
1189 	KUNIT_CASE(clk_test_single_parent_mux_has_parent),
1190 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
1191 	KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
1192 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
1193 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
1194 	KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
1195 	{}
1196 };
1197 
1198 /*
1199  * Test suite for a basic mux clock with one parent, with
1200  * CLK_SET_RATE_PARENT on the child.
1201  *
1202  * These tests exercise the consumer API and check that the state of the
1203  * child and parent are sane and consistent.
1204  */
1205 static struct kunit_suite
1206 clk_single_parent_mux_test_suite = {
1207 	.name = "clk-single-parent-mux-test",
1208 	.init = clk_single_parent_mux_test_init,
1209 	.test_cases = clk_single_parent_mux_test_cases,
1210 };
1211 
clk_orphan_transparent_single_parent_mux_test_init(struct kunit * test)1212 static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
1213 {
1214 	struct clk_single_parent_ctx *ctx;
1215 	struct clk_init_data init = { };
1216 	const char * const parents[] = { "orphan_parent" };
1217 	int ret;
1218 
1219 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1220 	if (!ctx)
1221 		return -ENOMEM;
1222 	test->priv = ctx;
1223 
1224 	init.name = "test_orphan_dummy_parent";
1225 	init.ops = &clk_dummy_single_parent_ops;
1226 	init.parent_names = parents;
1227 	init.num_parents = ARRAY_SIZE(parents);
1228 	init.flags = CLK_SET_RATE_PARENT;
1229 	ctx->hw.init = &init;
1230 
1231 	ret = clk_hw_register(NULL, &ctx->hw);
1232 	if (ret)
1233 		return ret;
1234 
1235 	memset(&init, 0, sizeof(init));
1236 	init.name = "orphan_parent";
1237 	init.ops = &clk_dummy_rate_ops;
1238 	ctx->parent_ctx.hw.init = &init;
1239 	ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1240 
1241 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1242 	if (ret)
1243 		return ret;
1244 
1245 	return 0;
1246 }
1247 
1248 /*
1249  * Test that a mux-only clock, with an initial rate within a range,
1250  * will still have the same rate after the range has been enforced.
1251  *
1252  * See:
1253  * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
1254  */
clk_test_orphan_transparent_parent_mux_set_range(struct kunit * test)1255 static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
1256 {
1257 	struct clk_single_parent_ctx *ctx = test->priv;
1258 	struct clk_hw *hw = &ctx->hw;
1259 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1260 	unsigned long rate, new_rate;
1261 
1262 	rate = clk_get_rate(clk);
1263 	KUNIT_ASSERT_GT(test, rate, 0);
1264 
1265 	KUNIT_ASSERT_EQ(test,
1266 			clk_set_rate_range(clk,
1267 					   ctx->parent_ctx.rate - 1000,
1268 					   ctx->parent_ctx.rate + 1000),
1269 			0);
1270 
1271 	new_rate = clk_get_rate(clk);
1272 	KUNIT_ASSERT_GT(test, new_rate, 0);
1273 	KUNIT_EXPECT_EQ(test, rate, new_rate);
1274 
1275 	clk_put(clk);
1276 }
1277 
1278 static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
1279 	KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
1280 	{}
1281 };
1282 
1283 /*
1284  * Test suite for a basic mux clock with one parent. The parent is
1285  * registered after its child. The clock will thus be an orphan when
1286  * registered, but will no longer be when the tests run.
1287  *
1288  * These tests make sure a clock that used to be orphan has a sane,
1289  * consistent, behaviour.
1290  */
1291 static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
1292 	.name = "clk-orphan-transparent-single-parent-test",
1293 	.init = clk_orphan_transparent_single_parent_mux_test_init,
1294 	.exit = clk_single_parent_mux_test_exit,
1295 	.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
1296 };
1297 
1298 struct clk_single_parent_two_lvl_ctx {
1299 	struct clk_dummy_context parent_parent_ctx;
1300 	struct clk_dummy_context parent_ctx;
1301 	struct clk_hw hw;
1302 };
1303 
1304 static int
clk_orphan_two_level_root_last_test_init(struct kunit * test)1305 clk_orphan_two_level_root_last_test_init(struct kunit *test)
1306 {
1307 	struct clk_single_parent_two_lvl_ctx *ctx;
1308 	int ret;
1309 
1310 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
1311 	if (!ctx)
1312 		return -ENOMEM;
1313 	test->priv = ctx;
1314 
1315 	ctx->parent_ctx.hw.init =
1316 		CLK_HW_INIT("intermediate-parent",
1317 			    "root-parent",
1318 			    &clk_dummy_single_parent_ops,
1319 			    CLK_SET_RATE_PARENT);
1320 	ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
1321 	if (ret)
1322 		return ret;
1323 
1324 	ctx->hw.init =
1325 		CLK_HW_INIT("test-clk", "intermediate-parent",
1326 			    &clk_dummy_single_parent_ops,
1327 			    CLK_SET_RATE_PARENT);
1328 	ret = clk_hw_register(NULL, &ctx->hw);
1329 	if (ret)
1330 		return ret;
1331 
1332 	ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
1333 	ctx->parent_parent_ctx.hw.init =
1334 		CLK_HW_INIT_NO_PARENT("root-parent",
1335 				      &clk_dummy_rate_ops,
1336 				      0);
1337 	ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
1338 	if (ret)
1339 		return ret;
1340 
1341 	return 0;
1342 }
1343 
1344 static void
clk_orphan_two_level_root_last_test_exit(struct kunit * test)1345 clk_orphan_two_level_root_last_test_exit(struct kunit *test)
1346 {
1347 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1348 
1349 	clk_hw_unregister(&ctx->hw);
1350 	clk_hw_unregister(&ctx->parent_ctx.hw);
1351 	clk_hw_unregister(&ctx->parent_parent_ctx.hw);
1352 }
1353 
1354 /*
1355  * Test that, for a clock whose parent used to be orphan, clk_get_rate()
1356  * will return the proper rate.
1357  */
1358 static void
clk_orphan_two_level_root_last_test_get_rate(struct kunit * test)1359 clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
1360 {
1361 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1362 	struct clk_hw *hw = &ctx->hw;
1363 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1364 	unsigned long rate;
1365 
1366 	rate = clk_get_rate(clk);
1367 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1368 
1369 	clk_put(clk);
1370 }
1371 
1372 /*
1373  * Test that, for a clock whose parent used to be orphan,
1374  * clk_set_rate_range() won't affect its rate if it is already within
1375  * range.
1376  *
1377  * See (for Exynos 4210):
1378  * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
1379  */
1380 static void
clk_orphan_two_level_root_last_test_set_range(struct kunit * test)1381 clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
1382 {
1383 	struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
1384 	struct clk_hw *hw = &ctx->hw;
1385 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1386 	unsigned long rate;
1387 	int ret;
1388 
1389 	ret = clk_set_rate_range(clk,
1390 				 DUMMY_CLOCK_INIT_RATE - 1000,
1391 				 DUMMY_CLOCK_INIT_RATE + 1000);
1392 	KUNIT_ASSERT_EQ(test, ret, 0);
1393 
1394 	rate = clk_get_rate(clk);
1395 	KUNIT_ASSERT_GT(test, rate, 0);
1396 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
1397 
1398 	clk_put(clk);
1399 }
1400 
1401 static struct kunit_case
1402 clk_orphan_two_level_root_last_test_cases[] = {
1403 	KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
1404 	KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
1405 	{}
1406 };
1407 
1408 /*
1409  * Test suite for a basic, transparent, clock with a parent that is also
1410  * such a clock. The parent's parent is registered last, while the
1411  * parent and its child are registered in that order. The intermediate
1412  * and leaf clocks will thus be orphan when registered, but the leaf
1413  * clock itself will always have its parent and will never be
1414  * reparented. Indeed, it's only orphan because its parent is.
1415  *
1416  * These tests exercise the behaviour of the consumer API when dealing
1417  * with an orphan clock, and how we deal with the transition to a valid
1418  * parent.
1419  */
1420 static struct kunit_suite
1421 clk_orphan_two_level_root_last_test_suite = {
1422 	.name = "clk-orphan-two-level-root-last-test",
1423 	.init = clk_orphan_two_level_root_last_test_init,
1424 	.exit = clk_orphan_two_level_root_last_test_exit,
1425 	.test_cases = clk_orphan_two_level_root_last_test_cases,
1426 };
1427 
1428 /*
1429  * Test that clk_set_rate_range won't return an error for a valid range
1430  * and that it will make sure the rate of the clock is within the
1431  * boundaries.
1432  */
clk_range_test_set_range(struct kunit * test)1433 static void clk_range_test_set_range(struct kunit *test)
1434 {
1435 	struct clk_dummy_context *ctx = test->priv;
1436 	struct clk_hw *hw = &ctx->hw;
1437 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1438 	unsigned long rate;
1439 
1440 	KUNIT_ASSERT_EQ(test,
1441 			clk_set_rate_range(clk,
1442 					   DUMMY_CLOCK_RATE_1,
1443 					   DUMMY_CLOCK_RATE_2),
1444 			0);
1445 
1446 	rate = clk_get_rate(clk);
1447 	KUNIT_ASSERT_GT(test, rate, 0);
1448 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1449 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1450 
1451 	clk_put(clk);
1452 }
1453 
1454 /*
1455  * Test that calling clk_set_rate_range with a minimum rate higher than
1456  * the maximum rate returns an error.
1457  */
clk_range_test_set_range_invalid(struct kunit * test)1458 static void clk_range_test_set_range_invalid(struct kunit *test)
1459 {
1460 	struct clk_dummy_context *ctx = test->priv;
1461 	struct clk_hw *hw = &ctx->hw;
1462 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1463 
1464 	KUNIT_EXPECT_LT(test,
1465 			clk_set_rate_range(clk,
1466 					   DUMMY_CLOCK_RATE_1 + 1000,
1467 					   DUMMY_CLOCK_RATE_1),
1468 			0);
1469 
1470 	clk_put(clk);
1471 }
1472 
1473 /*
1474  * Test that users can't set multiple, disjoints, range that would be
1475  * impossible to meet.
1476  */
clk_range_test_multiple_disjoints_range(struct kunit * test)1477 static void clk_range_test_multiple_disjoints_range(struct kunit *test)
1478 {
1479 	struct clk_dummy_context *ctx = test->priv;
1480 	struct clk_hw *hw = &ctx->hw;
1481 	struct clk *user1, *user2;
1482 
1483 	user1 = clk_hw_get_clk(hw, NULL);
1484 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1485 
1486 	user2 = clk_hw_get_clk(hw, NULL);
1487 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1488 
1489 	KUNIT_ASSERT_EQ(test,
1490 			clk_set_rate_range(user1, 1000, 2000),
1491 			0);
1492 
1493 	KUNIT_EXPECT_LT(test,
1494 			clk_set_rate_range(user2, 3000, 4000),
1495 			0);
1496 
1497 	clk_put(user2);
1498 	clk_put(user1);
1499 }
1500 
1501 /*
1502  * Test that if our clock has some boundaries and we try to round a rate
1503  * lower than the minimum, the returned rate will be within range.
1504  */
clk_range_test_set_range_round_rate_lower(struct kunit * test)1505 static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
1506 {
1507 	struct clk_dummy_context *ctx = test->priv;
1508 	struct clk_hw *hw = &ctx->hw;
1509 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1510 	long rate;
1511 
1512 	KUNIT_ASSERT_EQ(test,
1513 			clk_set_rate_range(clk,
1514 					   DUMMY_CLOCK_RATE_1,
1515 					   DUMMY_CLOCK_RATE_2),
1516 			0);
1517 
1518 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1519 	KUNIT_ASSERT_GT(test, rate, 0);
1520 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1521 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1522 
1523 	clk_put(clk);
1524 }
1525 
1526 /*
1527  * Test that if our clock has some boundaries and we try to set a rate
1528  * higher than the maximum, the new rate will be within range.
1529  */
clk_range_test_set_range_set_rate_lower(struct kunit * test)1530 static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
1531 {
1532 	struct clk_dummy_context *ctx = test->priv;
1533 	struct clk_hw *hw = &ctx->hw;
1534 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1535 	unsigned long rate;
1536 
1537 	KUNIT_ASSERT_EQ(test,
1538 			clk_set_rate_range(clk,
1539 					   DUMMY_CLOCK_RATE_1,
1540 					   DUMMY_CLOCK_RATE_2),
1541 			0);
1542 
1543 	KUNIT_ASSERT_EQ(test,
1544 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1545 			0);
1546 
1547 	rate = clk_get_rate(clk);
1548 	KUNIT_ASSERT_GT(test, rate, 0);
1549 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1550 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1551 
1552 	clk_put(clk);
1553 }
1554 
1555 /*
1556  * Test that if our clock has some boundaries and we try to round and
1557  * set a rate lower than the minimum, the rate returned by
1558  * clk_round_rate() will be consistent with the new rate set by
1559  * clk_set_rate().
1560  */
clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit * test)1561 static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
1562 {
1563 	struct clk_dummy_context *ctx = test->priv;
1564 	struct clk_hw *hw = &ctx->hw;
1565 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1566 	long rounded;
1567 
1568 	KUNIT_ASSERT_EQ(test,
1569 			clk_set_rate_range(clk,
1570 					   DUMMY_CLOCK_RATE_1,
1571 					   DUMMY_CLOCK_RATE_2),
1572 			0);
1573 
1574 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
1575 	KUNIT_ASSERT_GT(test, rounded, 0);
1576 
1577 	KUNIT_ASSERT_EQ(test,
1578 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1579 			0);
1580 
1581 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1582 
1583 	clk_put(clk);
1584 }
1585 
1586 /*
1587  * Test that if our clock has some boundaries and we try to round a rate
1588  * higher than the maximum, the returned rate will be within range.
1589  */
clk_range_test_set_range_round_rate_higher(struct kunit * test)1590 static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
1591 {
1592 	struct clk_dummy_context *ctx = test->priv;
1593 	struct clk_hw *hw = &ctx->hw;
1594 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1595 	long rate;
1596 
1597 	KUNIT_ASSERT_EQ(test,
1598 			clk_set_rate_range(clk,
1599 					   DUMMY_CLOCK_RATE_1,
1600 					   DUMMY_CLOCK_RATE_2),
1601 			0);
1602 
1603 	rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1604 	KUNIT_ASSERT_GT(test, rate, 0);
1605 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1606 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1607 
1608 	clk_put(clk);
1609 }
1610 
1611 /*
1612  * Test that if our clock has some boundaries and we try to set a rate
1613  * higher than the maximum, the new rate will be within range.
1614  */
clk_range_test_set_range_set_rate_higher(struct kunit * test)1615 static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
1616 {
1617 	struct clk_dummy_context *ctx = test->priv;
1618 	struct clk_hw *hw = &ctx->hw;
1619 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1620 	unsigned long rate;
1621 
1622 	KUNIT_ASSERT_EQ(test,
1623 			clk_set_rate_range(clk,
1624 					   DUMMY_CLOCK_RATE_1,
1625 					   DUMMY_CLOCK_RATE_2),
1626 			0);
1627 
1628 	KUNIT_ASSERT_EQ(test,
1629 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1630 			0);
1631 
1632 	rate = clk_get_rate(clk);
1633 	KUNIT_ASSERT_GT(test, rate, 0);
1634 	KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
1635 	KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
1636 
1637 	clk_put(clk);
1638 }
1639 
1640 /*
1641  * Test that if our clock has some boundaries and we try to round and
1642  * set a rate higher than the maximum, the rate returned by
1643  * clk_round_rate() will be consistent with the new rate set by
1644  * clk_set_rate().
1645  */
clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit * test)1646 static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
1647 {
1648 	struct clk_dummy_context *ctx = test->priv;
1649 	struct clk_hw *hw = &ctx->hw;
1650 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1651 	long rounded;
1652 
1653 	KUNIT_ASSERT_EQ(test,
1654 			clk_set_rate_range(clk,
1655 					   DUMMY_CLOCK_RATE_1,
1656 					   DUMMY_CLOCK_RATE_2),
1657 			0);
1658 
1659 	rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
1660 	KUNIT_ASSERT_GT(test, rounded, 0);
1661 
1662 	KUNIT_ASSERT_EQ(test,
1663 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1664 			0);
1665 
1666 	KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
1667 
1668 	clk_put(clk);
1669 }
1670 
1671 /*
1672  * Test that if our clock has a rate lower than the minimum set by a
1673  * call to clk_set_rate_range(), the rate will be raised to match the
1674  * new minimum.
1675  *
1676  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1677  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1678  */
clk_range_test_set_range_get_rate_raised(struct kunit * test)1679 static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
1680 {
1681 	struct clk_dummy_context *ctx = test->priv;
1682 	struct clk_hw *hw = &ctx->hw;
1683 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1684 	unsigned long rate;
1685 
1686 	KUNIT_ASSERT_EQ(test,
1687 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1688 			0);
1689 
1690 	KUNIT_ASSERT_EQ(test,
1691 			clk_set_rate_range(clk,
1692 					   DUMMY_CLOCK_RATE_1,
1693 					   DUMMY_CLOCK_RATE_2),
1694 			0);
1695 
1696 	rate = clk_get_rate(clk);
1697 	KUNIT_ASSERT_GT(test, rate, 0);
1698 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1699 
1700 	clk_put(clk);
1701 }
1702 
1703 /*
1704  * Test that if our clock has a rate higher than the maximum set by a
1705  * call to clk_set_rate_range(), the rate will be lowered to match the
1706  * new maximum.
1707  *
1708  * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
1709  * modify the requested rate, which is our case in clk_dummy_rate_ops.
1710  */
clk_range_test_set_range_get_rate_lowered(struct kunit * test)1711 static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
1712 {
1713 	struct clk_dummy_context *ctx = test->priv;
1714 	struct clk_hw *hw = &ctx->hw;
1715 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1716 	unsigned long rate;
1717 
1718 	KUNIT_ASSERT_EQ(test,
1719 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1720 			0);
1721 
1722 	KUNIT_ASSERT_EQ(test,
1723 			clk_set_rate_range(clk,
1724 					   DUMMY_CLOCK_RATE_1,
1725 					   DUMMY_CLOCK_RATE_2),
1726 			0);
1727 
1728 	rate = clk_get_rate(clk);
1729 	KUNIT_ASSERT_GT(test, rate, 0);
1730 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1731 
1732 	clk_put(clk);
1733 }
1734 
1735 static struct kunit_case clk_range_test_cases[] = {
1736 	KUNIT_CASE(clk_range_test_set_range),
1737 	KUNIT_CASE(clk_range_test_set_range_invalid),
1738 	KUNIT_CASE(clk_range_test_multiple_disjoints_range),
1739 	KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
1740 	KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
1741 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
1742 	KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
1743 	KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
1744 	KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
1745 	KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
1746 	KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
1747 	{}
1748 };
1749 
1750 /*
1751  * Test suite for a basic rate clock, without any parent.
1752  *
1753  * These tests exercise the rate range API: clk_set_rate_range(),
1754  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
1755  */
1756 static struct kunit_suite clk_range_test_suite = {
1757 	.name = "clk-range-test",
1758 	.init = clk_test_init,
1759 	.exit = clk_test_exit,
1760 	.test_cases = clk_range_test_cases,
1761 };
1762 
1763 /*
1764  * Test that if we have several subsequent calls to
1765  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1766  * needed each and every time.
1767  *
1768  * With clk_dummy_maximize_rate_ops, this means that the rate will
1769  * trail along the maximum as it evolves.
1770  */
clk_range_test_set_range_rate_maximized(struct kunit * test)1771 static void clk_range_test_set_range_rate_maximized(struct kunit *test)
1772 {
1773 	struct clk_dummy_context *ctx = test->priv;
1774 	struct clk_hw *hw = &ctx->hw;
1775 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1776 	unsigned long rate;
1777 
1778 	KUNIT_ASSERT_EQ(test,
1779 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1780 			0);
1781 
1782 	KUNIT_ASSERT_EQ(test,
1783 			clk_set_rate_range(clk,
1784 					   DUMMY_CLOCK_RATE_1,
1785 					   DUMMY_CLOCK_RATE_2),
1786 			0);
1787 
1788 	rate = clk_get_rate(clk);
1789 	KUNIT_ASSERT_GT(test, rate, 0);
1790 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1791 
1792 	KUNIT_ASSERT_EQ(test,
1793 			clk_set_rate_range(clk,
1794 					   DUMMY_CLOCK_RATE_1,
1795 					   DUMMY_CLOCK_RATE_2 - 1000),
1796 			0);
1797 
1798 	rate = clk_get_rate(clk);
1799 	KUNIT_ASSERT_GT(test, rate, 0);
1800 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
1801 
1802 	KUNIT_ASSERT_EQ(test,
1803 			clk_set_rate_range(clk,
1804 					   DUMMY_CLOCK_RATE_1,
1805 					   DUMMY_CLOCK_RATE_2),
1806 			0);
1807 
1808 	rate = clk_get_rate(clk);
1809 	KUNIT_ASSERT_GT(test, rate, 0);
1810 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1811 
1812 	clk_put(clk);
1813 }
1814 
1815 /*
1816  * Test that if we have several subsequent calls to
1817  * clk_set_rate_range(), across multiple users, the core will reevaluate
1818  * whether a new rate is needed each and every time.
1819  *
1820  * With clk_dummy_maximize_rate_ops, this means that the rate will
1821  * trail along the maximum as it evolves.
1822  */
clk_range_test_multiple_set_range_rate_maximized(struct kunit * test)1823 static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
1824 {
1825 	struct clk_dummy_context *ctx = test->priv;
1826 	struct clk_hw *hw = &ctx->hw;
1827 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1828 	struct clk *user1, *user2;
1829 	unsigned long rate;
1830 
1831 	user1 = clk_hw_get_clk(hw, NULL);
1832 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1833 
1834 	user2 = clk_hw_get_clk(hw, NULL);
1835 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1836 
1837 	KUNIT_ASSERT_EQ(test,
1838 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1839 			0);
1840 
1841 	KUNIT_ASSERT_EQ(test,
1842 			clk_set_rate_range(user1,
1843 					   0,
1844 					   DUMMY_CLOCK_RATE_2),
1845 			0);
1846 
1847 	rate = clk_get_rate(clk);
1848 	KUNIT_ASSERT_GT(test, rate, 0);
1849 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1850 
1851 	KUNIT_ASSERT_EQ(test,
1852 			clk_set_rate_range(user2,
1853 					   0,
1854 					   DUMMY_CLOCK_RATE_1),
1855 			0);
1856 
1857 	rate = clk_get_rate(clk);
1858 	KUNIT_ASSERT_GT(test, rate, 0);
1859 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1860 
1861 	KUNIT_ASSERT_EQ(test,
1862 			clk_drop_range(user2),
1863 			0);
1864 
1865 	rate = clk_get_rate(clk);
1866 	KUNIT_ASSERT_GT(test, rate, 0);
1867 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1868 
1869 	clk_put(user2);
1870 	clk_put(user1);
1871 	clk_put(clk);
1872 }
1873 
1874 /*
1875  * Test that if we have several subsequent calls to
1876  * clk_set_rate_range(), across multiple users, the core will reevaluate
1877  * whether a new rate is needed, including when a user drop its clock.
1878  *
1879  * With clk_dummy_maximize_rate_ops, this means that the rate will
1880  * trail along the maximum as it evolves.
1881  */
clk_range_test_multiple_set_range_rate_put_maximized(struct kunit * test)1882 static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
1883 {
1884 	struct clk_dummy_context *ctx = test->priv;
1885 	struct clk_hw *hw = &ctx->hw;
1886 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1887 	struct clk *user1, *user2;
1888 	unsigned long rate;
1889 
1890 	user1 = clk_hw_get_clk(hw, NULL);
1891 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
1892 
1893 	user2 = clk_hw_get_clk(hw, NULL);
1894 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
1895 
1896 	KUNIT_ASSERT_EQ(test,
1897 			clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
1898 			0);
1899 
1900 	KUNIT_ASSERT_EQ(test,
1901 			clk_set_rate_range(user1,
1902 					   0,
1903 					   DUMMY_CLOCK_RATE_2),
1904 			0);
1905 
1906 	rate = clk_get_rate(clk);
1907 	KUNIT_ASSERT_GT(test, rate, 0);
1908 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1909 
1910 	KUNIT_ASSERT_EQ(test,
1911 			clk_set_rate_range(user2,
1912 					   0,
1913 					   DUMMY_CLOCK_RATE_1),
1914 			0);
1915 
1916 	rate = clk_get_rate(clk);
1917 	KUNIT_ASSERT_GT(test, rate, 0);
1918 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1919 
1920 	clk_put(user2);
1921 
1922 	rate = clk_get_rate(clk);
1923 	KUNIT_ASSERT_GT(test, rate, 0);
1924 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
1925 
1926 	clk_put(user1);
1927 	clk_put(clk);
1928 }
1929 
1930 static struct kunit_case clk_range_maximize_test_cases[] = {
1931 	KUNIT_CASE(clk_range_test_set_range_rate_maximized),
1932 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
1933 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
1934 	{}
1935 };
1936 
1937 /*
1938  * Test suite for a basic rate clock, without any parent.
1939  *
1940  * These tests exercise the rate range API: clk_set_rate_range(),
1941  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
1942  * driver that will always try to run at the highest possible rate.
1943  */
1944 static struct kunit_suite clk_range_maximize_test_suite = {
1945 	.name = "clk-range-maximize-test",
1946 	.init = clk_maximize_test_init,
1947 	.exit = clk_test_exit,
1948 	.test_cases = clk_range_maximize_test_cases,
1949 };
1950 
1951 /*
1952  * Test that if we have several subsequent calls to
1953  * clk_set_rate_range(), the core will reevaluate whether a new rate is
1954  * needed each and every time.
1955  *
1956  * With clk_dummy_minimize_rate_ops, this means that the rate will
1957  * trail along the minimum as it evolves.
1958  */
clk_range_test_set_range_rate_minimized(struct kunit * test)1959 static void clk_range_test_set_range_rate_minimized(struct kunit *test)
1960 {
1961 	struct clk_dummy_context *ctx = test->priv;
1962 	struct clk_hw *hw = &ctx->hw;
1963 	struct clk *clk = clk_hw_get_clk(hw, NULL);
1964 	unsigned long rate;
1965 
1966 	KUNIT_ASSERT_EQ(test,
1967 			clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
1968 			0);
1969 
1970 	KUNIT_ASSERT_EQ(test,
1971 			clk_set_rate_range(clk,
1972 					   DUMMY_CLOCK_RATE_1,
1973 					   DUMMY_CLOCK_RATE_2),
1974 			0);
1975 
1976 	rate = clk_get_rate(clk);
1977 	KUNIT_ASSERT_GT(test, rate, 0);
1978 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1979 
1980 	KUNIT_ASSERT_EQ(test,
1981 			clk_set_rate_range(clk,
1982 					   DUMMY_CLOCK_RATE_1 + 1000,
1983 					   DUMMY_CLOCK_RATE_2),
1984 			0);
1985 
1986 	rate = clk_get_rate(clk);
1987 	KUNIT_ASSERT_GT(test, rate, 0);
1988 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
1989 
1990 	KUNIT_ASSERT_EQ(test,
1991 			clk_set_rate_range(clk,
1992 					   DUMMY_CLOCK_RATE_1,
1993 					   DUMMY_CLOCK_RATE_2),
1994 			0);
1995 
1996 	rate = clk_get_rate(clk);
1997 	KUNIT_ASSERT_GT(test, rate, 0);
1998 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
1999 
2000 	clk_put(clk);
2001 }
2002 
2003 /*
2004  * Test that if we have several subsequent calls to
2005  * clk_set_rate_range(), across multiple users, the core will reevaluate
2006  * whether a new rate is needed each and every time.
2007  *
2008  * With clk_dummy_minimize_rate_ops, this means that the rate will
2009  * trail along the minimum as it evolves.
2010  */
clk_range_test_multiple_set_range_rate_minimized(struct kunit * test)2011 static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
2012 {
2013 	struct clk_dummy_context *ctx = test->priv;
2014 	struct clk_hw *hw = &ctx->hw;
2015 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2016 	struct clk *user1, *user2;
2017 	unsigned long rate;
2018 
2019 	user1 = clk_hw_get_clk(hw, NULL);
2020 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2021 
2022 	user2 = clk_hw_get_clk(hw, NULL);
2023 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2024 
2025 	KUNIT_ASSERT_EQ(test,
2026 			clk_set_rate_range(user1,
2027 					   DUMMY_CLOCK_RATE_1,
2028 					   ULONG_MAX),
2029 			0);
2030 
2031 	rate = clk_get_rate(clk);
2032 	KUNIT_ASSERT_GT(test, rate, 0);
2033 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2034 
2035 	KUNIT_ASSERT_EQ(test,
2036 			clk_set_rate_range(user2,
2037 					   DUMMY_CLOCK_RATE_2,
2038 					   ULONG_MAX),
2039 			0);
2040 
2041 	rate = clk_get_rate(clk);
2042 	KUNIT_ASSERT_GT(test, rate, 0);
2043 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2044 
2045 	KUNIT_ASSERT_EQ(test,
2046 			clk_drop_range(user2),
2047 			0);
2048 
2049 	rate = clk_get_rate(clk);
2050 	KUNIT_ASSERT_GT(test, rate, 0);
2051 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2052 
2053 	clk_put(user2);
2054 	clk_put(user1);
2055 	clk_put(clk);
2056 }
2057 
2058 /*
2059  * Test that if we have several subsequent calls to
2060  * clk_set_rate_range(), across multiple users, the core will reevaluate
2061  * whether a new rate is needed, including when a user drop its clock.
2062  *
2063  * With clk_dummy_minimize_rate_ops, this means that the rate will
2064  * trail along the minimum as it evolves.
2065  */
clk_range_test_multiple_set_range_rate_put_minimized(struct kunit * test)2066 static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
2067 {
2068 	struct clk_dummy_context *ctx = test->priv;
2069 	struct clk_hw *hw = &ctx->hw;
2070 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2071 	struct clk *user1, *user2;
2072 	unsigned long rate;
2073 
2074 	user1 = clk_hw_get_clk(hw, NULL);
2075 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
2076 
2077 	user2 = clk_hw_get_clk(hw, NULL);
2078 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
2079 
2080 	KUNIT_ASSERT_EQ(test,
2081 			clk_set_rate_range(user1,
2082 					   DUMMY_CLOCK_RATE_1,
2083 					   ULONG_MAX),
2084 			0);
2085 
2086 	rate = clk_get_rate(clk);
2087 	KUNIT_ASSERT_GT(test, rate, 0);
2088 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2089 
2090 	KUNIT_ASSERT_EQ(test,
2091 			clk_set_rate_range(user2,
2092 					   DUMMY_CLOCK_RATE_2,
2093 					   ULONG_MAX),
2094 			0);
2095 
2096 	rate = clk_get_rate(clk);
2097 	KUNIT_ASSERT_GT(test, rate, 0);
2098 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
2099 
2100 	clk_put(user2);
2101 
2102 	rate = clk_get_rate(clk);
2103 	KUNIT_ASSERT_GT(test, rate, 0);
2104 	KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2105 
2106 	clk_put(user1);
2107 	clk_put(clk);
2108 }
2109 
2110 static struct kunit_case clk_range_minimize_test_cases[] = {
2111 	KUNIT_CASE(clk_range_test_set_range_rate_minimized),
2112 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
2113 	KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
2114 	{}
2115 };
2116 
2117 /*
2118  * Test suite for a basic rate clock, without any parent.
2119  *
2120  * These tests exercise the rate range API: clk_set_rate_range(),
2121  * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
2122  * driver that will always try to run at the lowest possible rate.
2123  */
2124 static struct kunit_suite clk_range_minimize_test_suite = {
2125 	.name = "clk-range-minimize-test",
2126 	.init = clk_minimize_test_init,
2127 	.exit = clk_test_exit,
2128 	.test_cases = clk_range_minimize_test_cases,
2129 };
2130 
2131 struct clk_leaf_mux_ctx {
2132 	struct clk_multiple_parent_ctx mux_ctx;
2133 	struct clk_hw hw;
2134 	struct clk_hw parent;
2135 	struct clk_rate_request *req;
2136 	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2137 };
2138 
clk_leaf_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)2139 static int clk_leaf_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
2140 {
2141 	struct clk_leaf_mux_ctx *ctx = container_of(hw, struct clk_leaf_mux_ctx, hw);
2142 	int ret;
2143 	struct clk_rate_request *parent_req = ctx->req;
2144 
2145 	clk_hw_forward_rate_request(hw, req, req->best_parent_hw, parent_req, req->rate);
2146 	ret = ctx->determine_rate_func(req->best_parent_hw, parent_req);
2147 	if (ret)
2148 		return ret;
2149 
2150 	req->rate = parent_req->rate;
2151 
2152 	return 0;
2153 }
2154 
2155 static const struct clk_ops clk_leaf_mux_set_rate_parent_ops = {
2156 	.determine_rate = clk_leaf_mux_determine_rate,
2157 	.set_parent = clk_dummy_single_set_parent,
2158 	.get_parent = clk_dummy_single_get_parent,
2159 };
2160 
2161 static int
clk_leaf_mux_set_rate_parent_test_init(struct kunit * test)2162 clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
2163 {
2164 	struct clk_leaf_mux_ctx *ctx;
2165 	const char *top_parents[2] = { "parent-0", "parent-1" };
2166 	int ret;
2167 
2168 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2169 	if (!ctx)
2170 		return -ENOMEM;
2171 	test->priv = ctx;
2172 
2173 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2174 								    &clk_dummy_rate_ops,
2175 								    0);
2176 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2177 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2178 	if (ret)
2179 		return ret;
2180 
2181 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2182 								    &clk_dummy_rate_ops,
2183 								    0);
2184 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2185 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2186 	if (ret)
2187 		return ret;
2188 
2189 	ctx->mux_ctx.current_parent = 0;
2190 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2191 						   &clk_multiple_parents_mux_ops,
2192 						   0);
2193 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2194 	if (ret)
2195 		return ret;
2196 
2197 	ctx->parent.init = CLK_HW_INIT_HW("test-parent", &ctx->mux_ctx.hw,
2198 					  &empty_clk_ops, CLK_SET_RATE_PARENT);
2199 	ret = clk_hw_register(NULL, &ctx->parent);
2200 	if (ret)
2201 		return ret;
2202 
2203 	ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->parent,
2204 				      &clk_leaf_mux_set_rate_parent_ops,
2205 				      CLK_SET_RATE_PARENT);
2206 	ret = clk_hw_register(NULL, &ctx->hw);
2207 	if (ret)
2208 		return ret;
2209 
2210 	return 0;
2211 }
2212 
clk_leaf_mux_set_rate_parent_test_exit(struct kunit * test)2213 static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
2214 {
2215 	struct clk_leaf_mux_ctx *ctx = test->priv;
2216 
2217 	clk_hw_unregister(&ctx->hw);
2218 	clk_hw_unregister(&ctx->parent);
2219 	clk_hw_unregister(&ctx->mux_ctx.hw);
2220 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2221 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2222 }
2223 
2224 struct clk_leaf_mux_set_rate_parent_determine_rate_test_case {
2225 	const char *desc;
2226 	int (*determine_rate_func)(struct clk_hw *hw, struct clk_rate_request *req);
2227 };
2228 
2229 static void
clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case * t,char * desc)2230 clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc(
2231 		const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *t, char *desc)
2232 {
2233 	strcpy(desc, t->desc);
2234 }
2235 
2236 static const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case
2237 clk_leaf_mux_set_rate_parent_determine_rate_test_cases[] = {
2238 	{
2239 		/*
2240 		 * Test that __clk_determine_rate() on the parent that can't
2241 		 * change rate doesn't return a clk_rate_request structure with
2242 		 * the best_parent_hw pointer pointing to the parent.
2243 		 */
2244 		.desc = "clk_leaf_mux_set_rate_parent__clk_determine_rate_proper_parent",
2245 		.determine_rate_func = __clk_determine_rate,
2246 	},
2247 	{
2248 		/*
2249 		 * Test that __clk_mux_determine_rate() on the parent that
2250 		 * can't change rate doesn't return a clk_rate_request
2251 		 * structure with the best_parent_hw pointer pointing to
2252 		 * the parent.
2253 		 */
2254 		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_proper_parent",
2255 		.determine_rate_func = __clk_mux_determine_rate,
2256 	},
2257 	{
2258 		/*
2259 		 * Test that __clk_mux_determine_rate_closest() on the parent
2260 		 * that can't change rate doesn't return a clk_rate_request
2261 		 * structure with the best_parent_hw pointer pointing to
2262 		 * the parent.
2263 		 */
2264 		.desc = "clk_leaf_mux_set_rate_parent__clk_mux_determine_rate_closest_proper_parent",
2265 		.determine_rate_func = __clk_mux_determine_rate_closest,
2266 	},
2267 	{
2268 		/*
2269 		 * Test that clk_hw_determine_rate_no_reparent() on the parent
2270 		 * that can't change rate doesn't return a clk_rate_request
2271 		 * structure with the best_parent_hw pointer pointing to
2272 		 * the parent.
2273 		 */
2274 		.desc = "clk_leaf_mux_set_rate_parent_clk_hw_determine_rate_no_reparent_proper_parent",
2275 		.determine_rate_func = clk_hw_determine_rate_no_reparent,
2276 	},
2277 };
2278 
KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,clk_leaf_mux_set_rate_parent_determine_rate_test_cases,clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)2279 KUNIT_ARRAY_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2280 		  clk_leaf_mux_set_rate_parent_determine_rate_test_cases,
2281 		  clk_leaf_mux_set_rate_parent_determine_rate_test_case_to_desc)
2282 
2283 /*
2284  * Test that when a clk that can't change rate itself calls a function like
2285  * __clk_determine_rate() on its parent it doesn't get back a clk_rate_request
2286  * structure that has the best_parent_hw pointer point to the clk_hw passed
2287  * into the determine rate function. See commit 262ca38f4b6e ("clk: Stop
2288  * forwarding clk_rate_requests to the parent") for more background.
2289  */
2290 static void clk_leaf_mux_set_rate_parent_determine_rate_test(struct kunit *test)
2291 {
2292 	struct clk_leaf_mux_ctx *ctx = test->priv;
2293 	struct clk_hw *hw = &ctx->hw;
2294 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2295 	struct clk_rate_request req;
2296 	unsigned long rate;
2297 	const struct clk_leaf_mux_set_rate_parent_determine_rate_test_case *test_param;
2298 
2299 	test_param = test->param_value;
2300 	ctx->determine_rate_func = test_param->determine_rate_func;
2301 
2302 	ctx->req = &req;
2303 	rate = clk_get_rate(clk);
2304 	KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
2305 	KUNIT_ASSERT_EQ(test, DUMMY_CLOCK_RATE_2, clk_round_rate(clk, DUMMY_CLOCK_RATE_2));
2306 
2307 	KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
2308 	KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
2309 	KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
2310 
2311 	clk_put(clk);
2312 }
2313 
2314 static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
2315 	KUNIT_CASE_PARAM(clk_leaf_mux_set_rate_parent_determine_rate_test,
2316 			 clk_leaf_mux_set_rate_parent_determine_rate_test_gen_params),
2317 	{}
2318 };
2319 
2320 /*
2321  * Test suite for a clock whose parent is a pass-through clk whose parent is a
2322  * mux with multiple parents. The leaf and pass-through clocks have the
2323  * CLK_SET_RATE_PARENT flag, and will forward rate requests to the mux, which
2324  * will then select which parent is the best fit for a given rate.
2325  *
2326  * These tests exercise the behaviour of muxes, and the proper selection
2327  * of parents.
2328  */
2329 static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
2330 	.name = "clk-leaf-mux-set-rate-parent",
2331 	.init = clk_leaf_mux_set_rate_parent_test_init,
2332 	.exit = clk_leaf_mux_set_rate_parent_test_exit,
2333 	.test_cases = clk_leaf_mux_set_rate_parent_test_cases,
2334 };
2335 
2336 struct clk_mux_notifier_rate_change {
2337 	bool done;
2338 	unsigned long old_rate;
2339 	unsigned long new_rate;
2340 	wait_queue_head_t wq;
2341 };
2342 
2343 struct clk_mux_notifier_ctx {
2344 	struct clk_multiple_parent_ctx mux_ctx;
2345 	struct clk *clk;
2346 	struct notifier_block clk_nb;
2347 	struct clk_mux_notifier_rate_change pre_rate_change;
2348 	struct clk_mux_notifier_rate_change post_rate_change;
2349 };
2350 
2351 #define NOTIFIER_TIMEOUT_MS 100
2352 
clk_mux_notifier_callback(struct notifier_block * nb,unsigned long action,void * data)2353 static int clk_mux_notifier_callback(struct notifier_block *nb,
2354 				     unsigned long action, void *data)
2355 {
2356 	struct clk_notifier_data *clk_data = data;
2357 	struct clk_mux_notifier_ctx *ctx = container_of(nb,
2358 							struct clk_mux_notifier_ctx,
2359 							clk_nb);
2360 
2361 	if (action & PRE_RATE_CHANGE) {
2362 		ctx->pre_rate_change.old_rate = clk_data->old_rate;
2363 		ctx->pre_rate_change.new_rate = clk_data->new_rate;
2364 		ctx->pre_rate_change.done = true;
2365 		wake_up_interruptible(&ctx->pre_rate_change.wq);
2366 	}
2367 
2368 	if (action & POST_RATE_CHANGE) {
2369 		ctx->post_rate_change.old_rate = clk_data->old_rate;
2370 		ctx->post_rate_change.new_rate = clk_data->new_rate;
2371 		ctx->post_rate_change.done = true;
2372 		wake_up_interruptible(&ctx->post_rate_change.wq);
2373 	}
2374 
2375 	return 0;
2376 }
2377 
clk_mux_notifier_test_init(struct kunit * test)2378 static int clk_mux_notifier_test_init(struct kunit *test)
2379 {
2380 	struct clk_mux_notifier_ctx *ctx;
2381 	const char *top_parents[2] = { "parent-0", "parent-1" };
2382 	int ret;
2383 
2384 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2385 	if (!ctx)
2386 		return -ENOMEM;
2387 	test->priv = ctx;
2388 	ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
2389 	init_waitqueue_head(&ctx->pre_rate_change.wq);
2390 	init_waitqueue_head(&ctx->post_rate_change.wq);
2391 
2392 	ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2393 								    &clk_dummy_rate_ops,
2394 								    0);
2395 	ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2396 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
2397 	if (ret)
2398 		return ret;
2399 
2400 	ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2401 								    &clk_dummy_rate_ops,
2402 								    0);
2403 	ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2404 	ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
2405 	if (ret)
2406 		return ret;
2407 
2408 	ctx->mux_ctx.current_parent = 0;
2409 	ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
2410 						   &clk_multiple_parents_mux_ops,
2411 						   0);
2412 	ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
2413 	if (ret)
2414 		return ret;
2415 
2416 	ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
2417 	ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
2418 	if (ret)
2419 		return ret;
2420 
2421 	return 0;
2422 }
2423 
clk_mux_notifier_test_exit(struct kunit * test)2424 static void clk_mux_notifier_test_exit(struct kunit *test)
2425 {
2426 	struct clk_mux_notifier_ctx *ctx = test->priv;
2427 	struct clk *clk = ctx->clk;
2428 
2429 	clk_notifier_unregister(clk, &ctx->clk_nb);
2430 	clk_put(clk);
2431 
2432 	clk_hw_unregister(&ctx->mux_ctx.hw);
2433 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
2434 	clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
2435 }
2436 
2437 /*
2438  * Test that if the we have a notifier registered on a mux, the core
2439  * will notify us when we switch to another parent, and with the proper
2440  * old and new rates.
2441  */
clk_mux_notifier_set_parent_test(struct kunit * test)2442 static void clk_mux_notifier_set_parent_test(struct kunit *test)
2443 {
2444 	struct clk_mux_notifier_ctx *ctx = test->priv;
2445 	struct clk_hw *hw = &ctx->mux_ctx.hw;
2446 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2447 	struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
2448 	int ret;
2449 
2450 	ret = clk_set_parent(clk, new_parent);
2451 	KUNIT_ASSERT_EQ(test, ret, 0);
2452 
2453 	ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
2454 					       ctx->pre_rate_change.done,
2455 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2456 	KUNIT_ASSERT_GT(test, ret, 0);
2457 
2458 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2459 	KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2460 
2461 	ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
2462 					       ctx->post_rate_change.done,
2463 					       msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
2464 	KUNIT_ASSERT_GT(test, ret, 0);
2465 
2466 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
2467 	KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
2468 
2469 	clk_put(new_parent);
2470 	clk_put(clk);
2471 }
2472 
2473 static struct kunit_case clk_mux_notifier_test_cases[] = {
2474 	KUNIT_CASE(clk_mux_notifier_set_parent_test),
2475 	{}
2476 };
2477 
2478 /*
2479  * Test suite for a mux with multiple parents, and a notifier registered
2480  * on the mux.
2481  *
2482  * These tests exercise the behaviour of notifiers.
2483  */
2484 static struct kunit_suite clk_mux_notifier_test_suite = {
2485 	.name = "clk-mux-notifier",
2486 	.init = clk_mux_notifier_test_init,
2487 	.exit = clk_mux_notifier_test_exit,
2488 	.test_cases = clk_mux_notifier_test_cases,
2489 };
2490 
2491 static int
clk_mux_no_reparent_test_init(struct kunit * test)2492 clk_mux_no_reparent_test_init(struct kunit *test)
2493 {
2494 	struct clk_multiple_parent_ctx *ctx;
2495 	const char *parents[2] = { "parent-0", "parent-1"};
2496 	int ret;
2497 
2498 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2499 	if (!ctx)
2500 		return -ENOMEM;
2501 	test->priv = ctx;
2502 
2503 	ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
2504 							    &clk_dummy_rate_ops,
2505 							    0);
2506 	ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
2507 	ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
2508 	if (ret)
2509 		return ret;
2510 
2511 	ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
2512 							    &clk_dummy_rate_ops,
2513 							    0);
2514 	ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
2515 	ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
2516 	if (ret)
2517 		return ret;
2518 
2519 	ctx->current_parent = 0;
2520 	ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
2521 					   &clk_multiple_parents_no_reparent_mux_ops,
2522 					   0);
2523 	ret = clk_hw_register(NULL, &ctx->hw);
2524 	if (ret)
2525 		return ret;
2526 
2527 	return 0;
2528 }
2529 
2530 static void
clk_mux_no_reparent_test_exit(struct kunit * test)2531 clk_mux_no_reparent_test_exit(struct kunit *test)
2532 {
2533 	struct clk_multiple_parent_ctx *ctx = test->priv;
2534 
2535 	clk_hw_unregister(&ctx->hw);
2536 	clk_hw_unregister(&ctx->parents_ctx[0].hw);
2537 	clk_hw_unregister(&ctx->parents_ctx[1].hw);
2538 }
2539 
2540 /*
2541  * Test that if the we have a mux that cannot change parent and we call
2542  * clk_round_rate() on it with a rate that should cause it to change
2543  * parent, it won't.
2544  */
clk_mux_no_reparent_round_rate(struct kunit * test)2545 static void clk_mux_no_reparent_round_rate(struct kunit *test)
2546 {
2547 	struct clk_multiple_parent_ctx *ctx = test->priv;
2548 	struct clk_hw *hw = &ctx->hw;
2549 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2550 	struct clk *other_parent, *parent;
2551 	unsigned long other_parent_rate;
2552 	unsigned long parent_rate;
2553 	long rounded_rate;
2554 
2555 	parent = clk_get_parent(clk);
2556 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2557 
2558 	parent_rate = clk_get_rate(parent);
2559 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2560 
2561 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2562 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2563 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2564 
2565 	other_parent_rate = clk_get_rate(other_parent);
2566 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2567 	clk_put(other_parent);
2568 
2569 	rounded_rate = clk_round_rate(clk, other_parent_rate);
2570 	KUNIT_ASSERT_GT(test, rounded_rate, 0);
2571 	KUNIT_EXPECT_EQ(test, rounded_rate, parent_rate);
2572 
2573 	clk_put(clk);
2574 }
2575 
2576 /*
2577  * Test that if the we have a mux that cannot change parent and we call
2578  * clk_set_rate() on it with a rate that should cause it to change
2579  * parent, it won't.
2580  */
clk_mux_no_reparent_set_rate(struct kunit * test)2581 static void clk_mux_no_reparent_set_rate(struct kunit *test)
2582 {
2583 	struct clk_multiple_parent_ctx *ctx = test->priv;
2584 	struct clk_hw *hw = &ctx->hw;
2585 	struct clk *clk = clk_hw_get_clk(hw, NULL);
2586 	struct clk *other_parent, *parent;
2587 	unsigned long other_parent_rate;
2588 	unsigned long parent_rate;
2589 	unsigned long rate;
2590 	int ret;
2591 
2592 	parent = clk_get_parent(clk);
2593 	KUNIT_ASSERT_PTR_NE(test, parent, NULL);
2594 
2595 	parent_rate = clk_get_rate(parent);
2596 	KUNIT_ASSERT_GT(test, parent_rate, 0);
2597 
2598 	other_parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
2599 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, other_parent);
2600 	KUNIT_ASSERT_FALSE(test, clk_is_match(parent, other_parent));
2601 
2602 	other_parent_rate = clk_get_rate(other_parent);
2603 	KUNIT_ASSERT_GT(test, other_parent_rate, 0);
2604 	clk_put(other_parent);
2605 
2606 	ret = clk_set_rate(clk, other_parent_rate);
2607 	KUNIT_ASSERT_EQ(test, ret, 0);
2608 
2609 	rate = clk_get_rate(clk);
2610 	KUNIT_ASSERT_GT(test, rate, 0);
2611 	KUNIT_EXPECT_EQ(test, rate, parent_rate);
2612 
2613 	clk_put(clk);
2614 }
2615 
2616 static struct kunit_case clk_mux_no_reparent_test_cases[] = {
2617 	KUNIT_CASE(clk_mux_no_reparent_round_rate),
2618 	KUNIT_CASE(clk_mux_no_reparent_set_rate),
2619 	{}
2620 };
2621 
2622 /*
2623  * Test suite for a clock mux that isn't allowed to change parent, using
2624  * the clk_hw_determine_rate_no_reparent() helper.
2625  *
2626  * These tests exercise that helper, and the proper selection of
2627  * rates and parents.
2628  */
2629 static struct kunit_suite clk_mux_no_reparent_test_suite = {
2630 	.name = "clk-mux-no-reparent",
2631 	.init = clk_mux_no_reparent_test_init,
2632 	.exit = clk_mux_no_reparent_test_exit,
2633 	.test_cases = clk_mux_no_reparent_test_cases,
2634 };
2635 
2636 struct clk_register_clk_parent_data_test_case {
2637 	const char *desc;
2638 	struct clk_parent_data pdata;
2639 };
2640 
2641 static void
clk_register_clk_parent_data_test_case_to_desc(const struct clk_register_clk_parent_data_test_case * t,char * desc)2642 clk_register_clk_parent_data_test_case_to_desc(
2643 		const struct clk_register_clk_parent_data_test_case *t, char *desc)
2644 {
2645 	strcpy(desc, t->desc);
2646 }
2647 
2648 static const struct clk_register_clk_parent_data_test_case
2649 clk_register_clk_parent_data_of_cases[] = {
2650 	{
2651 		/*
2652 		 * Test that a clk registered with a struct device_node can
2653 		 * find a parent based on struct clk_parent_data::index.
2654 		 */
2655 		.desc = "clk_parent_data_of_index_test",
2656 		.pdata.index = 0,
2657 	},
2658 	{
2659 		/*
2660 		 * Test that a clk registered with a struct device_node can
2661 		 * find a parent based on struct clk_parent_data::fwname.
2662 		 */
2663 		.desc = "clk_parent_data_of_fwname_test",
2664 		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2665 	},
2666 	{
2667 		/*
2668 		 * Test that a clk registered with a struct device_node can
2669 		 * find a parent based on struct clk_parent_data::name.
2670 		 */
2671 		.desc = "clk_parent_data_of_name_test",
2672 		/* The index must be negative to indicate firmware not used */
2673 		.pdata.index = -1,
2674 		.pdata.name = CLK_PARENT_DATA_1MHZ_NAME,
2675 	},
2676 	{
2677 		/*
2678 		 * Test that a clk registered with a struct device_node can
2679 		 * find a parent based on struct
2680 		 * clk_parent_data::{fw_name,name}.
2681 		 */
2682 		.desc = "clk_parent_data_of_fwname_name_test",
2683 		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2684 		.pdata.name = "not_matching",
2685 	},
2686 	{
2687 		/*
2688 		 * Test that a clk registered with a struct device_node can
2689 		 * find a parent based on struct clk_parent_data::{index,name}.
2690 		 * Index takes priority.
2691 		 */
2692 		.desc = "clk_parent_data_of_index_name_priority_test",
2693 		.pdata.index = 0,
2694 		.pdata.name = "not_matching",
2695 	},
2696 	{
2697 		/*
2698 		 * Test that a clk registered with a struct device_node can
2699 		 * find a parent based on struct
2700 		 * clk_parent_data::{index,fwname,name}. The fw_name takes
2701 		 * priority over index and name.
2702 		 */
2703 		.desc = "clk_parent_data_of_index_fwname_name_priority_test",
2704 		.pdata.index = 1,
2705 		.pdata.fw_name = CLK_PARENT_DATA_PARENT1,
2706 		.pdata.name = "not_matching",
2707 	},
2708 };
2709 
2710 KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_of_test, clk_register_clk_parent_data_of_cases,
2711 		  clk_register_clk_parent_data_test_case_to_desc)
2712 
2713 /**
2714  * struct clk_register_clk_parent_data_of_ctx - Context for clk_parent_data OF tests
2715  * @np: device node of clk under test
2716  * @hw: clk_hw for clk under test
2717  */
2718 struct clk_register_clk_parent_data_of_ctx {
2719 	struct device_node *np;
2720 	struct clk_hw hw;
2721 };
2722 
clk_register_clk_parent_data_of_test_init(struct kunit * test)2723 static int clk_register_clk_parent_data_of_test_init(struct kunit *test)
2724 {
2725 	struct clk_register_clk_parent_data_of_ctx *ctx;
2726 
2727 	KUNIT_ASSERT_EQ(test, 0,
2728 			of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
2729 
2730 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2731 	if (!ctx)
2732 		return -ENOMEM;
2733 	test->priv = ctx;
2734 
2735 	ctx->np = of_find_compatible_node(NULL, NULL, "test,clk-parent-data");
2736 	if (!ctx->np)
2737 		return -ENODEV;
2738 
2739 	of_node_put_kunit(test, ctx->np);
2740 
2741 	return 0;
2742 }
2743 
2744 /*
2745  * Test that a clk registered with a struct device_node can find a parent based on
2746  * struct clk_parent_data when the hw member isn't set.
2747  */
clk_register_clk_parent_data_of_test(struct kunit * test)2748 static void clk_register_clk_parent_data_of_test(struct kunit *test)
2749 {
2750 	struct clk_register_clk_parent_data_of_ctx *ctx = test->priv;
2751 	struct clk_hw *parent_hw;
2752 	const struct clk_register_clk_parent_data_test_case *test_param;
2753 	struct clk_init_data init = { };
2754 	struct clk *expected_parent, *actual_parent;
2755 
2756 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->np);
2757 
2758 	expected_parent = of_clk_get_kunit(test, ctx->np, 0);
2759 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2760 
2761 	test_param = test->param_value;
2762 	init.parent_data = &test_param->pdata;
2763 	init.num_parents = 1;
2764 	init.name = "parent_data_of_test_clk";
2765 	init.ops = &clk_dummy_single_parent_ops;
2766 	ctx->hw.init = &init;
2767 	KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, ctx->np, &ctx->hw));
2768 
2769 	parent_hw = clk_hw_get_parent(&ctx->hw);
2770 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2771 
2772 	actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2773 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2774 
2775 	KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2776 }
2777 
2778 static struct kunit_case clk_register_clk_parent_data_of_test_cases[] = {
2779 	KUNIT_CASE_PARAM(clk_register_clk_parent_data_of_test,
2780 			 clk_register_clk_parent_data_of_test_gen_params),
2781 	{}
2782 };
2783 
2784 /*
2785  * Test suite for registering clks with struct clk_parent_data and a struct
2786  * device_node.
2787  */
2788 static struct kunit_suite clk_register_clk_parent_data_of_suite = {
2789 	.name = "clk_register_clk_parent_data_of",
2790 	.init = clk_register_clk_parent_data_of_test_init,
2791 	.test_cases = clk_register_clk_parent_data_of_test_cases,
2792 };
2793 
2794 /**
2795  * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
2796  * @dev: device of clk under test
2797  * @hw: clk_hw for clk under test
2798  * @pdrv: driver to attach to find @dev
2799  */
2800 struct clk_register_clk_parent_data_device_ctx {
2801 	struct device *dev;
2802 	struct clk_hw hw;
2803 	struct platform_driver pdrv;
2804 };
2805 
2806 static inline struct clk_register_clk_parent_data_device_ctx *
clk_register_clk_parent_data_driver_to_test_context(struct platform_device * pdev)2807 clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
2808 {
2809 	return container_of(to_platform_driver(pdev->dev.driver),
2810 			    struct clk_register_clk_parent_data_device_ctx, pdrv);
2811 }
2812 
clk_register_clk_parent_data_device_probe(struct platform_device * pdev)2813 static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
2814 {
2815 	struct clk_register_clk_parent_data_device_ctx *ctx;
2816 
2817 	ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
2818 	ctx->dev = &pdev->dev;
2819 
2820 	return 0;
2821 }
2822 
clk_register_clk_parent_data_device_driver(struct kunit * test)2823 static void clk_register_clk_parent_data_device_driver(struct kunit *test)
2824 {
2825 	struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
2826 	static const struct of_device_id match_table[] = {
2827 		{ .compatible = "test,clk-parent-data" },
2828 		{ }
2829 	};
2830 
2831 	ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
2832 	ctx->pdrv.driver.of_match_table = match_table;
2833 	ctx->pdrv.driver.name = __func__;
2834 	ctx->pdrv.driver.owner = THIS_MODULE;
2835 
2836 	KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
2837 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
2838 }
2839 
2840 static const struct clk_register_clk_parent_data_test_case
2841 clk_register_clk_parent_data_device_cases[] = {
2842 	{
2843 		/*
2844 		 * Test that a clk registered with a struct device can find a
2845 		 * parent based on struct clk_parent_data::index.
2846 		 */
2847 		.desc = "clk_parent_data_device_index_test",
2848 		.pdata.index = 1,
2849 	},
2850 	{
2851 		/*
2852 		 * Test that a clk registered with a struct device can find a
2853 		 * parent based on struct clk_parent_data::fwname.
2854 		 */
2855 		.desc = "clk_parent_data_device_fwname_test",
2856 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2857 	},
2858 	{
2859 		/*
2860 		 * Test that a clk registered with a struct device can find a
2861 		 * parent based on struct clk_parent_data::name.
2862 		 */
2863 		.desc = "clk_parent_data_device_name_test",
2864 		/* The index must be negative to indicate firmware not used */
2865 		.pdata.index = -1,
2866 		.pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
2867 	},
2868 	{
2869 		/*
2870 		 * Test that a clk registered with a struct device can find a
2871 		 * parent based on struct clk_parent_data::{fw_name,name}.
2872 		 */
2873 		.desc = "clk_parent_data_device_fwname_name_test",
2874 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2875 		.pdata.name = "not_matching",
2876 	},
2877 	{
2878 		/*
2879 		 * Test that a clk registered with a struct device can find a
2880 		 * parent based on struct clk_parent_data::{index,name}. Index
2881 		 * takes priority.
2882 		 */
2883 		.desc = "clk_parent_data_device_index_name_priority_test",
2884 		.pdata.index = 1,
2885 		.pdata.name = "not_matching",
2886 	},
2887 	{
2888 		/*
2889 		 * Test that a clk registered with a struct device can find a
2890 		 * parent based on struct clk_parent_data::{index,fwname,name}.
2891 		 * The fw_name takes priority over index and name.
2892 		 */
2893 		.desc = "clk_parent_data_device_index_fwname_name_priority_test",
2894 		.pdata.index = 0,
2895 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2896 		.pdata.name = "not_matching",
2897 	},
2898 };
2899 
KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,clk_register_clk_parent_data_device_cases,clk_register_clk_parent_data_test_case_to_desc)2900 KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
2901 		  clk_register_clk_parent_data_device_cases,
2902 		  clk_register_clk_parent_data_test_case_to_desc)
2903 
2904 /*
2905  * Test that a clk registered with a struct device can find a parent based on
2906  * struct clk_parent_data when the hw member isn't set.
2907  */
2908 static void clk_register_clk_parent_data_device_test(struct kunit *test)
2909 {
2910 	struct clk_register_clk_parent_data_device_ctx *ctx;
2911 	const struct clk_register_clk_parent_data_test_case *test_param;
2912 	struct clk_hw *parent_hw;
2913 	struct clk_init_data init = { };
2914 	struct clk *expected_parent, *actual_parent;
2915 
2916 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
2917 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
2918 	test->priv = ctx;
2919 
2920 	clk_register_clk_parent_data_device_driver(test);
2921 
2922 	expected_parent = clk_get_kunit(test, ctx->dev, "50");
2923 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
2924 
2925 	test_param = test->param_value;
2926 	init.parent_data = &test_param->pdata;
2927 	init.num_parents = 1;
2928 	init.name = "parent_data_device_test_clk";
2929 	init.ops = &clk_dummy_single_parent_ops;
2930 	ctx->hw.init = &init;
2931 	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
2932 
2933 	parent_hw = clk_hw_get_parent(&ctx->hw);
2934 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
2935 
2936 	actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
2937 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, actual_parent);
2938 
2939 	KUNIT_EXPECT_TRUE(test, clk_is_match(expected_parent, actual_parent));
2940 }
2941 
2942 static const struct clk_register_clk_parent_data_test_case
2943 clk_register_clk_parent_data_device_hw_cases[] = {
2944 	{
2945 		/*
2946 		 * Test that a clk registered with a struct device can find a
2947 		 * parent based on struct clk_parent_data::hw.
2948 		 */
2949 		.desc = "clk_parent_data_device_hw_index_test",
2950 		/* The index must be negative to indicate firmware not used */
2951 		.pdata.index = -1,
2952 	},
2953 	{
2954 		/*
2955 		 * Test that a clk registered with a struct device can find a
2956 		 * parent based on struct clk_parent_data::hw when
2957 		 * struct clk_parent_data::fw_name is set.
2958 		 */
2959 		.desc = "clk_parent_data_device_hw_fwname_test",
2960 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2961 	},
2962 	{
2963 		/*
2964 		 * Test that a clk registered with a struct device can find a
2965 		 * parent based on struct clk_parent_data::hw when struct
2966 		 * clk_parent_data::name is set.
2967 		 */
2968 		.desc = "clk_parent_data_device_hw_name_test",
2969 		/* The index must be negative to indicate firmware not used */
2970 		.pdata.index = -1,
2971 		.pdata.name = CLK_PARENT_DATA_50MHZ_NAME,
2972 	},
2973 	{
2974 		/*
2975 		 * Test that a clk registered with a struct device can find a
2976 		 * parent based on struct clk_parent_data::hw when struct
2977 		 * clk_parent_data::{fw_name,name} are set.
2978 		 */
2979 		.desc = "clk_parent_data_device_hw_fwname_name_test",
2980 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
2981 		.pdata.name = "not_matching",
2982 	},
2983 	{
2984 		/*
2985 		 * Test that a clk registered with a struct device can find a
2986 		 * parent based on struct clk_parent_data::hw when struct
2987 		 * clk_parent_data::index is set. The hw pointer takes
2988 		 * priority.
2989 		 */
2990 		.desc = "clk_parent_data_device_hw_index_priority_test",
2991 		.pdata.index = 0,
2992 	},
2993 	{
2994 		/*
2995 		 * Test that a clk registered with a struct device can find a
2996 		 * parent based on struct clk_parent_data::hw when
2997 		 * struct clk_parent_data::{index,fwname,name} are set.
2998 		 * The hw pointer takes priority over everything else.
2999 		 */
3000 		.desc = "clk_parent_data_device_hw_index_fwname_name_priority_test",
3001 		.pdata.index = 0,
3002 		.pdata.fw_name = CLK_PARENT_DATA_PARENT2,
3003 		.pdata.name = "not_matching",
3004 	},
3005 };
3006 
KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,clk_register_clk_parent_data_device_hw_cases,clk_register_clk_parent_data_test_case_to_desc)3007 KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
3008 		  clk_register_clk_parent_data_device_hw_cases,
3009 		  clk_register_clk_parent_data_test_case_to_desc)
3010 
3011 /*
3012  * Test that a clk registered with a struct device can find a
3013  * parent based on struct clk_parent_data::hw.
3014  */
3015 static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
3016 {
3017 	struct clk_register_clk_parent_data_device_ctx *ctx;
3018 	const struct clk_register_clk_parent_data_test_case *test_param;
3019 	struct clk_dummy_context *parent;
3020 	struct clk_hw *parent_hw;
3021 	struct clk_parent_data pdata = { };
3022 	struct clk_init_data init = { };
3023 
3024 	ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
3025 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
3026 	test->priv = ctx;
3027 
3028 	clk_register_clk_parent_data_device_driver(test);
3029 
3030 	parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
3031 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
3032 
3033 	parent_hw = &parent->hw;
3034 	parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
3035 						&clk_dummy_rate_ops, 0);
3036 
3037 	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
3038 
3039 	test_param = test->param_value;
3040 	memcpy(&pdata, &test_param->pdata, sizeof(pdata));
3041 	pdata.hw = parent_hw;
3042 	init.parent_data = &pdata;
3043 	init.num_parents = 1;
3044 	init.ops = &clk_dummy_single_parent_ops;
3045 	init.name = "parent_data_device_hw_test_clk";
3046 	ctx->hw.init = &init;
3047 	KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
3048 
3049 	KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
3050 }
3051 
3052 static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
3053 	KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_test,
3054 			 clk_register_clk_parent_data_device_test_gen_params),
3055 	KUNIT_CASE_PARAM(clk_register_clk_parent_data_device_hw_test,
3056 			 clk_register_clk_parent_data_device_hw_test_gen_params),
3057 	{}
3058 };
3059 
clk_register_clk_parent_data_device_init(struct kunit * test)3060 static int clk_register_clk_parent_data_device_init(struct kunit *test)
3061 {
3062 	KUNIT_ASSERT_EQ(test, 0,
3063 			of_overlay_apply_kunit(test, kunit_clk_parent_data_test));
3064 
3065 	return 0;
3066 }
3067 
3068 /*
3069  * Test suite for registering clks with struct clk_parent_data and a struct
3070  * device.
3071  */
3072 static struct kunit_suite clk_register_clk_parent_data_device_suite = {
3073 	.name = "clk_register_clk_parent_data_device",
3074 	.init = clk_register_clk_parent_data_device_init,
3075 	.test_cases = clk_register_clk_parent_data_device_test_cases,
3076 };
3077 
3078 kunit_test_suites(
3079 	&clk_leaf_mux_set_rate_parent_test_suite,
3080 	&clk_test_suite,
3081 	&clk_multiple_parents_mux_test_suite,
3082 	&clk_mux_no_reparent_test_suite,
3083 	&clk_mux_notifier_test_suite,
3084 	&clk_orphan_transparent_multiple_parent_mux_test_suite,
3085 	&clk_orphan_transparent_single_parent_test_suite,
3086 	&clk_orphan_two_level_root_last_test_suite,
3087 	&clk_range_test_suite,
3088 	&clk_range_maximize_test_suite,
3089 	&clk_range_minimize_test_suite,
3090 	&clk_register_clk_parent_data_of_suite,
3091 	&clk_register_clk_parent_data_device_suite,
3092 	&clk_single_parent_mux_test_suite,
3093 	&clk_uncached_test_suite,
3094 );
3095 MODULE_DESCRIPTION("Kunit tests for clk framework");
3096 MODULE_LICENSE("GPL v2");
3097