1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * DT idle states parsing code.
4 *
5 * Copyright (C) 2014 ARM Ltd.
6 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 */
8
9 #define pr_fmt(fmt) "DT idle-states: " fmt
10
11 #include <linux/cpuidle.h>
12 #include <linux/cpumask.h>
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17
18 #include "dt_idle_states.h"
19
init_state_node(struct cpuidle_state * idle_state,const struct of_device_id * match_id,struct device_node * state_node)20 static int init_state_node(struct cpuidle_state *idle_state,
21 const struct of_device_id *match_id,
22 struct device_node *state_node)
23 {
24 int err;
25 const char *desc;
26
27 /*
28 * CPUidle drivers are expected to initialize the const void *data
29 * pointer of the passed in struct of_device_id array to the idle
30 * state enter function.
31 */
32 idle_state->enter = match_id->data;
33 /*
34 * Since this is not a "coupled" state, it's safe to assume interrupts
35 * won't be enabled when it exits allowing the tick to be frozen
36 * safely. So enter() can be also enter_s2idle() callback.
37 */
38 idle_state->enter_s2idle = match_id->data;
39
40 err = of_property_read_u32(state_node, "wakeup-latency-us",
41 &idle_state->exit_latency);
42 if (err) {
43 u32 entry_latency, exit_latency;
44
45 err = of_property_read_u32(state_node, "entry-latency-us",
46 &entry_latency);
47 if (err) {
48 pr_debug(" * %pOF missing entry-latency-us property\n",
49 state_node);
50 return -EINVAL;
51 }
52
53 err = of_property_read_u32(state_node, "exit-latency-us",
54 &exit_latency);
55 if (err) {
56 pr_debug(" * %pOF missing exit-latency-us property\n",
57 state_node);
58 return -EINVAL;
59 }
60 /*
61 * If wakeup-latency-us is missing, default to entry+exit
62 * latencies as defined in idle states bindings
63 */
64 idle_state->exit_latency = entry_latency + exit_latency;
65 }
66
67 err = of_property_read_u32(state_node, "min-residency-us",
68 &idle_state->target_residency);
69 if (err) {
70 pr_debug(" * %pOF missing min-residency-us property\n",
71 state_node);
72 return -EINVAL;
73 }
74
75 err = of_property_read_string(state_node, "idle-state-name", &desc);
76 if (err)
77 desc = state_node->name;
78
79 idle_state->flags = CPUIDLE_FLAG_RCU_IDLE;
80 if (of_property_read_bool(state_node, "local-timer-stop"))
81 idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
82 /*
83 * TODO:
84 * replace with kstrdup and pointer assignment when name
85 * and desc become string pointers
86 */
87 strscpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN);
88 strscpy(idle_state->desc, desc, CPUIDLE_DESC_LEN);
89 return 0;
90 }
91
92 /*
93 * Check that the idle state is uniform across all CPUs in the CPUidle driver
94 * cpumask
95 */
idle_state_valid(struct device_node * state_node,unsigned int idx,const cpumask_t * cpumask)96 static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
97 const cpumask_t *cpumask)
98 {
99 int cpu;
100 struct device_node *cpu_node, *curr_state_node;
101
102 /*
103 * Compare idle state phandles for index idx on all CPUs in the
104 * CPUidle driver cpumask. Start from next logical cpu following
105 * cpumask_first(cpumask) since that's the CPU state_node was
106 * retrieved from. If a mismatch is found bail out straight
107 * away since we certainly hit a firmware misconfiguration.
108 */
109 cpu = cpumask_first(cpumask) + 1;
110 for_each_cpu_from(cpu, cpumask) {
111 cpu_node = of_cpu_device_node_get(cpu);
112 curr_state_node = of_get_cpu_state_node(cpu_node, idx);
113 of_node_put(curr_state_node);
114 of_node_put(cpu_node);
115 if (state_node != curr_state_node)
116 return false;
117 }
118
119 return true;
120 }
121
122 /**
123 * dt_init_idle_driver() - Parse the DT idle states and initialize the
124 * idle driver states array
125 * @drv: Pointer to CPU idle driver to be initialized
126 * @matches: Array of of_device_id match structures to search in for
127 * compatible idle state nodes. The data pointer for each valid
128 * struct of_device_id entry in the matches array must point to
129 * a function with the following signature, that corresponds to
130 * the CPUidle state enter function signature:
131 *
132 * int (*)(struct cpuidle_device *dev,
133 * struct cpuidle_driver *drv,
134 * int index);
135 *
136 * @start_idx: First idle state index to be initialized
137 *
138 * If DT idle states are detected and are valid the state count and states
139 * array entries in the cpuidle driver are initialized accordingly starting
140 * from index start_idx.
141 *
142 * Return: number of valid DT idle states parsed, <0 on failure
143 */
dt_init_idle_driver(struct cpuidle_driver * drv,const struct of_device_id * matches,unsigned int start_idx)144 int dt_init_idle_driver(struct cpuidle_driver *drv,
145 const struct of_device_id *matches,
146 unsigned int start_idx)
147 {
148 struct cpuidle_state *idle_state;
149 struct device_node *state_node, *cpu_node;
150 const struct of_device_id *match_id;
151 int i, err = 0;
152 const cpumask_t *cpumask;
153 unsigned int state_idx = start_idx;
154
155 if (state_idx >= CPUIDLE_STATE_MAX)
156 return -EINVAL;
157 /*
158 * We get the idle states for the first logical cpu in the
159 * driver mask (or cpu_possible_mask if the driver cpumask is not set)
160 * and we check through idle_state_valid() if they are uniform
161 * across CPUs, otherwise we hit a firmware misconfiguration.
162 */
163 cpumask = drv->cpumask ? : cpu_possible_mask;
164 cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
165
166 for (i = 0; ; i++) {
167 state_node = of_get_cpu_state_node(cpu_node, i);
168 if (!state_node)
169 break;
170
171 match_id = of_match_node(matches, state_node);
172 if (!match_id) {
173 err = -ENODEV;
174 break;
175 }
176
177 if (!of_device_is_available(state_node)) {
178 of_node_put(state_node);
179 continue;
180 }
181
182 if (!idle_state_valid(state_node, i, cpumask)) {
183 pr_warn("%pOF idle state not valid, bailing out\n",
184 state_node);
185 err = -EINVAL;
186 break;
187 }
188
189 if (state_idx == CPUIDLE_STATE_MAX) {
190 pr_warn("State index reached static CPU idle driver states array size\n");
191 break;
192 }
193
194 idle_state = &drv->states[state_idx++];
195 err = init_state_node(idle_state, match_id, state_node);
196 if (err) {
197 pr_err("Parsing idle state node %pOF failed with err %d\n",
198 state_node, err);
199 err = -EINVAL;
200 break;
201 }
202 of_node_put(state_node);
203 }
204
205 of_node_put(state_node);
206 of_node_put(cpu_node);
207 if (err)
208 return err;
209
210 /* Set the number of total supported idle states. */
211 drv->state_count = state_idx;
212
213 /*
214 * Return the number of present and valid DT idle states, which can
215 * also be 0 on platforms with missing DT idle states or legacy DT
216 * configuration predating the DT idle states bindings.
217 */
218 return state_idx - start_idx;
219 }
220 EXPORT_SYMBOL_GPL(dt_init_idle_driver);
221