1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/clk.h>
4 #include <linux/clk-provider.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/mutex.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_domain.h>
9 #include <linux/pm_opp.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/slab.h>
12
13 #include <soc/tegra/common.h>
14
15 #include "clk.h"
16
17 /*
18 * This driver manages performance state of the core power domain for the
19 * independent PLLs and system clocks. We created a virtual clock device
20 * for such clocks, see tegra_clk_dev_register().
21 */
22
23 struct tegra_clk_device {
24 struct notifier_block clk_nb;
25 struct device *dev;
26 struct clk_hw *hw;
27 struct mutex lock;
28 };
29
tegra_clock_set_pd_state(struct tegra_clk_device * clk_dev,unsigned long rate)30 static int tegra_clock_set_pd_state(struct tegra_clk_device *clk_dev,
31 unsigned long rate)
32 {
33 struct device *dev = clk_dev->dev;
34 struct dev_pm_opp *opp;
35 unsigned int pstate;
36
37 opp = dev_pm_opp_find_freq_ceil(dev, &rate);
38 if (opp == ERR_PTR(-ERANGE)) {
39 /*
40 * Some clocks may be unused by a particular board and they
41 * may have uninitiated clock rate that is overly high. In
42 * this case clock is expected to be disabled, but still we
43 * need to set up performance state of the power domain and
44 * not error out clk initialization. A typical example is
45 * a PCIe clock on Android tablets.
46 */
47 dev_dbg(dev, "failed to find ceil OPP for %luHz\n", rate);
48 opp = dev_pm_opp_find_freq_floor(dev, &rate);
49 }
50
51 if (IS_ERR(opp)) {
52 dev_err(dev, "failed to find OPP for %luHz: %pe\n", rate, opp);
53 return PTR_ERR(opp);
54 }
55
56 pstate = dev_pm_opp_get_required_pstate(opp, 0);
57 dev_pm_opp_put(opp);
58
59 return dev_pm_genpd_set_performance_state(dev, pstate);
60 }
61
tegra_clock_change_notify(struct notifier_block * nb,unsigned long msg,void * data)62 static int tegra_clock_change_notify(struct notifier_block *nb,
63 unsigned long msg, void *data)
64 {
65 struct clk_notifier_data *cnd = data;
66 struct tegra_clk_device *clk_dev;
67 int err = 0;
68
69 clk_dev = container_of(nb, struct tegra_clk_device, clk_nb);
70
71 mutex_lock(&clk_dev->lock);
72 switch (msg) {
73 case PRE_RATE_CHANGE:
74 if (cnd->new_rate > cnd->old_rate)
75 err = tegra_clock_set_pd_state(clk_dev, cnd->new_rate);
76 break;
77
78 case ABORT_RATE_CHANGE:
79 err = tegra_clock_set_pd_state(clk_dev, cnd->old_rate);
80 break;
81
82 case POST_RATE_CHANGE:
83 if (cnd->new_rate < cnd->old_rate)
84 err = tegra_clock_set_pd_state(clk_dev, cnd->new_rate);
85 break;
86
87 default:
88 break;
89 }
90 mutex_unlock(&clk_dev->lock);
91
92 return notifier_from_errno(err);
93 }
94
tegra_clock_sync_pd_state(struct tegra_clk_device * clk_dev)95 static int tegra_clock_sync_pd_state(struct tegra_clk_device *clk_dev)
96 {
97 unsigned long rate;
98 int ret;
99
100 mutex_lock(&clk_dev->lock);
101
102 rate = clk_hw_get_rate(clk_dev->hw);
103 ret = tegra_clock_set_pd_state(clk_dev, rate);
104
105 mutex_unlock(&clk_dev->lock);
106
107 return ret;
108 }
109
tegra_clock_probe(struct platform_device * pdev)110 static int tegra_clock_probe(struct platform_device *pdev)
111 {
112 struct tegra_core_opp_params opp_params = {};
113 struct tegra_clk_device *clk_dev;
114 struct device *dev = &pdev->dev;
115 struct clk *clk;
116 int err;
117
118 if (!dev->pm_domain)
119 return -EINVAL;
120
121 clk_dev = devm_kzalloc(dev, sizeof(*clk_dev), GFP_KERNEL);
122 if (!clk_dev)
123 return -ENOMEM;
124
125 clk = devm_clk_get(dev, NULL);
126 if (IS_ERR(clk))
127 return PTR_ERR(clk);
128
129 clk_dev->dev = dev;
130 clk_dev->hw = __clk_get_hw(clk);
131 clk_dev->clk_nb.notifier_call = tegra_clock_change_notify;
132 mutex_init(&clk_dev->lock);
133
134 platform_set_drvdata(pdev, clk_dev);
135
136 /*
137 * Runtime PM was already enabled for this device by the parent clk
138 * driver and power domain state should be synced under clk_dev lock,
139 * hence we don't use the common OPP helper that initializes OPP
140 * state. For some clocks common OPP helper may fail to find ceil
141 * rate, it's handled by this driver.
142 */
143 err = devm_tegra_core_dev_init_opp_table(dev, &opp_params);
144 if (err)
145 return err;
146
147 err = clk_notifier_register(clk, &clk_dev->clk_nb);
148 if (err) {
149 dev_err(dev, "failed to register clk notifier: %d\n", err);
150 return err;
151 }
152
153 /*
154 * The driver is attaching to a potentially active/resumed clock, hence
155 * we need to sync the power domain performance state in a accordance to
156 * the clock rate if clock is resumed.
157 */
158 err = tegra_clock_sync_pd_state(clk_dev);
159 if (err)
160 goto unreg_clk;
161
162 return 0;
163
164 unreg_clk:
165 clk_notifier_unregister(clk, &clk_dev->clk_nb);
166
167 return err;
168 }
169
170 /*
171 * Tegra GENPD driver enables clocks during NOIRQ phase. It can't be done
172 * for clocks served by this driver because runtime PM is unavailable in
173 * NOIRQ phase. We will keep clocks resumed during suspend to mitigate this
174 * problem. In practice this makes no difference from a power management
175 * perspective since voltage is kept at a nominal level during suspend anyways.
176 */
177 static const struct dev_pm_ops tegra_clock_pm = {
178 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_resume_and_get, pm_runtime_put)
179 };
180
181 static const struct of_device_id tegra_clock_match[] = {
182 { .compatible = "nvidia,tegra20-sclk" },
183 { .compatible = "nvidia,tegra30-sclk" },
184 { .compatible = "nvidia,tegra30-pllc" },
185 { .compatible = "nvidia,tegra30-plle" },
186 { .compatible = "nvidia,tegra30-pllm" },
187 { }
188 };
189
190 static struct platform_driver tegra_clock_driver = {
191 .driver = {
192 .name = "tegra-clock",
193 .of_match_table = tegra_clock_match,
194 .pm = &tegra_clock_pm,
195 .suppress_bind_attrs = true,
196 },
197 .probe = tegra_clock_probe,
198 };
199 builtin_platform_driver(tegra_clock_driver);
200