xref: /linux/drivers/clk/nuvoton/clk-ma35d1-divider.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Nuvoton Technology Corp.
4  * Author: Chi-Fang Li <cfli0@nuvoton.com>
5  */
6 
7 #include <linux/clk-provider.h>
8 #include <linux/device.h>
9 #include <linux/regmap.h>
10 #include <linux/spinlock.h>
11 
12 #include "clk-ma35d1.h"
13 
14 struct ma35d1_adc_clk_div {
15 	struct clk_hw hw;
16 	void __iomem *reg;
17 	u8 shift;
18 	u8 width;
19 	u32 mask;
20 	const struct clk_div_table *table;
21 	/* protects concurrent access to clock divider registers */
22 	spinlock_t *lock;
23 };
24 
25 static inline struct ma35d1_adc_clk_div *to_ma35d1_adc_clk_div(struct clk_hw *_hw)
26 {
27 	return container_of(_hw, struct ma35d1_adc_clk_div, hw);
28 }
29 
30 static unsigned long ma35d1_clkdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
31 {
32 	unsigned int val;
33 	struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
34 
35 	val = readl_relaxed(dclk->reg) >> dclk->shift;
36 	val &= clk_div_mask(dclk->width);
37 	val += 1;
38 	return divider_recalc_rate(hw, parent_rate, val, dclk->table,
39 				   CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
40 }
41 
42 static long ma35d1_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
43 {
44 	struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
45 
46 	return divider_round_rate(hw, rate, prate, dclk->table,
47 				  dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
48 }
49 
50 static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
51 {
52 	int value;
53 	unsigned long flags = 0;
54 	u32 data;
55 	struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
56 
57 	value = divider_get_val(rate, parent_rate, dclk->table,
58 				dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
59 
60 	spin_lock_irqsave(dclk->lock, flags);
61 
62 	data = readl_relaxed(dclk->reg);
63 	data &= ~(clk_div_mask(dclk->width) << dclk->shift);
64 	data |= (value - 1) << dclk->shift;
65 	data |= dclk->mask;
66 	writel_relaxed(data, dclk->reg);
67 
68 	spin_unlock_irqrestore(dclk->lock, flags);
69 	return 0;
70 }
71 
72 static const struct clk_ops ma35d1_adc_clkdiv_ops = {
73 	.recalc_rate = ma35d1_clkdiv_recalc_rate,
74 	.round_rate = ma35d1_clkdiv_round_rate,
75 	.set_rate = ma35d1_clkdiv_set_rate,
76 };
77 
78 struct clk_hw *ma35d1_reg_adc_clkdiv(struct device *dev, const char *name,
79 				     struct clk_hw *parent_hw, spinlock_t *lock,
80 				     unsigned long flags, void __iomem *reg,
81 				     u8 shift, u8 width, u32 mask_bit)
82 {
83 	struct ma35d1_adc_clk_div *div;
84 	struct clk_init_data init;
85 	struct clk_div_table *table;
86 	struct clk_parent_data pdata = { .index = 0 };
87 	u32 max_div, min_div;
88 	struct clk_hw *hw;
89 	int ret;
90 	int i;
91 
92 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
93 	if (!div)
94 		return ERR_PTR(-ENOMEM);
95 
96 	max_div = clk_div_mask(width) + 1;
97 	min_div = 1;
98 
99 	table = devm_kcalloc(dev, max_div + 1, sizeof(*table), GFP_KERNEL);
100 	if (!table)
101 		return ERR_PTR(-ENOMEM);
102 
103 	for (i = 0; i < max_div; i++) {
104 		table[i].val = min_div + i;
105 		table[i].div = 2 * table[i].val;
106 	}
107 	table[max_div].val = 0;
108 	table[max_div].div = 0;
109 
110 	memset(&init, 0, sizeof(init));
111 	init.name = name;
112 	init.ops = &ma35d1_adc_clkdiv_ops;
113 	init.flags |= flags;
114 	pdata.hw = parent_hw;
115 	init.parent_data = &pdata;
116 	init.num_parents = 1;
117 
118 	div->reg = reg;
119 	div->shift = shift;
120 	div->width = width;
121 	div->mask = mask_bit ? BIT(mask_bit) : 0;
122 	div->lock = lock;
123 	div->hw.init = &init;
124 	div->table = table;
125 
126 	hw = &div->hw;
127 	ret = devm_clk_hw_register(dev, hw);
128 	if (ret)
129 		return ERR_PTR(ret);
130 	return hw;
131 }
132 EXPORT_SYMBOL_GPL(ma35d1_reg_adc_clkdiv);
133