1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8
9 #include <linux/string_choices.h>
10 #include <sound/hdaudio_ext.h>
11 #include "avs.h"
12 #include "registers.h"
13 #include "trace.h"
14
15 #define AVS_ADSPCS_DELAY_US 1000
16
avs_dsp_core_power(struct avs_dev * adev,u32 core_mask,bool power)17 int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power)
18 {
19 u32 value, mask, reg;
20 int ret;
21
22 value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS);
23 trace_avs_dsp_core_op(value, core_mask, "power", power);
24
25 mask = AVS_ADSPCS_SPA_MASK(core_mask);
26 value = power ? mask : 0;
27
28 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
29 /* Delay the polling to avoid false positives. */
30 usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US);
31
32 mask = AVS_ADSPCS_CPA_MASK(core_mask);
33 value = power ? mask : 0;
34
35 ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
36 reg, (reg & mask) == value,
37 AVS_ADSPCS_INTERVAL_US,
38 AVS_ADSPCS_TIMEOUT_US);
39 if (ret)
40 dev_err(adev->dev, "core_mask %d power %s failed: %d\n",
41 core_mask, str_on_off(power), ret);
42
43 return ret;
44 }
45
avs_dsp_core_reset(struct avs_dev * adev,u32 core_mask,bool reset)46 int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset)
47 {
48 u32 value, mask, reg;
49 int ret;
50
51 value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS);
52 trace_avs_dsp_core_op(value, core_mask, "reset", reset);
53
54 mask = AVS_ADSPCS_CRST_MASK(core_mask);
55 value = reset ? mask : 0;
56
57 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
58
59 ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
60 reg, (reg & mask) == value,
61 AVS_ADSPCS_INTERVAL_US,
62 AVS_ADSPCS_TIMEOUT_US);
63 if (ret)
64 dev_err(adev->dev, "core_mask %d %s reset failed: %d\n",
65 core_mask, reset ? "enter" : "exit", ret);
66
67 return ret;
68 }
69
avs_dsp_core_stall(struct avs_dev * adev,u32 core_mask,bool stall)70 int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall)
71 {
72 u32 value, mask, reg;
73 int ret;
74
75 value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS);
76 trace_avs_dsp_core_op(value, core_mask, "stall", stall);
77
78 mask = AVS_ADSPCS_CSTALL_MASK(core_mask);
79 value = stall ? mask : 0;
80
81 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value);
82
83 ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS,
84 reg, (reg & mask) == value,
85 AVS_ADSPCS_INTERVAL_US,
86 AVS_ADSPCS_TIMEOUT_US);
87 if (ret) {
88 dev_err(adev->dev, "core_mask %d %sstall failed: %d\n",
89 core_mask, stall ? "" : "un", ret);
90 return ret;
91 }
92
93 /* Give HW time to propagate the change. */
94 usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US);
95 return 0;
96 }
97
avs_dsp_core_enable(struct avs_dev * adev,u32 core_mask)98 int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask)
99 {
100 int ret;
101
102 ret = avs_dsp_op(adev, power, core_mask, true);
103 if (ret)
104 return ret;
105
106 ret = avs_dsp_op(adev, reset, core_mask, false);
107 if (ret)
108 return ret;
109
110 return avs_dsp_op(adev, stall, core_mask, false);
111 }
112
avs_dsp_core_disable(struct avs_dev * adev,u32 core_mask)113 int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask)
114 {
115 /* No error checks to allow for complete DSP shutdown. */
116 avs_dsp_op(adev, stall, core_mask, true);
117 avs_dsp_op(adev, reset, core_mask, true);
118
119 return avs_dsp_op(adev, power, core_mask, false);
120 }
121
avs_dsp_enable(struct avs_dev * adev,u32 core_mask)122 static int avs_dsp_enable(struct avs_dev *adev, u32 core_mask)
123 {
124 u32 mask;
125 int ret;
126
127 ret = avs_dsp_core_enable(adev, core_mask);
128 if (ret < 0)
129 return ret;
130
131 mask = core_mask & ~AVS_MAIN_CORE_MASK;
132 if (!mask)
133 /*
134 * without main core, fw is dead anyway
135 * so setting D0 for it is futile.
136 */
137 return 0;
138
139 ret = avs_ipc_set_dx(adev, mask, true);
140 return AVS_IPC_RET(ret);
141 }
142
avs_dsp_disable(struct avs_dev * adev,u32 core_mask)143 static int avs_dsp_disable(struct avs_dev *adev, u32 core_mask)
144 {
145 int ret;
146
147 ret = avs_ipc_set_dx(adev, core_mask, false);
148 if (ret)
149 return AVS_IPC_RET(ret);
150
151 return avs_dsp_core_disable(adev, core_mask);
152 }
153
avs_dsp_get_core(struct avs_dev * adev,u32 core_id)154 static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id)
155 {
156 u32 mask;
157 int ret;
158
159 mask = BIT_MASK(core_id);
160 if (mask == AVS_MAIN_CORE_MASK)
161 /* nothing to do for main core */
162 return 0;
163 if (core_id >= adev->hw_cfg.dsp_cores) {
164 ret = -EINVAL;
165 goto err;
166 }
167
168 adev->core_refs[core_id]++;
169 if (adev->core_refs[core_id] == 1) {
170 /*
171 * No cores other than main-core can be running for DSP
172 * to achieve d0ix. Conscious SET_D0IX IPC failure is permitted,
173 * simply d0ix power state will no longer be attempted.
174 */
175 ret = avs_dsp_disable_d0ix(adev);
176 if (ret && ret != -AVS_EIPC)
177 goto err_disable_d0ix;
178
179 ret = avs_dsp_enable(adev, mask);
180 if (ret)
181 goto err_enable_dsp;
182 }
183
184 return 0;
185
186 err_enable_dsp:
187 avs_dsp_enable_d0ix(adev);
188 err_disable_d0ix:
189 adev->core_refs[core_id]--;
190 err:
191 dev_err(adev->dev, "get core %d failed: %d\n", core_id, ret);
192 return ret;
193 }
194
avs_dsp_put_core(struct avs_dev * adev,u32 core_id)195 static int avs_dsp_put_core(struct avs_dev *adev, u32 core_id)
196 {
197 u32 mask;
198 int ret;
199
200 mask = BIT_MASK(core_id);
201 if (mask == AVS_MAIN_CORE_MASK)
202 /* nothing to do for main core */
203 return 0;
204 if (core_id >= adev->hw_cfg.dsp_cores) {
205 ret = -EINVAL;
206 goto err;
207 }
208
209 adev->core_refs[core_id]--;
210 if (!adev->core_refs[core_id]) {
211 ret = avs_dsp_disable(adev, mask);
212 if (ret)
213 goto err;
214
215 /* Match disable_d0ix in avs_dsp_get_core(). */
216 avs_dsp_enable_d0ix(adev);
217 }
218
219 return 0;
220 err:
221 dev_err(adev->dev, "put core %d failed: %d\n", core_id, ret);
222 return ret;
223 }
224
avs_dsp_init_module(struct avs_dev * adev,u16 module_id,u8 ppl_instance_id,u8 core_id,u8 domain,void * param,u32 param_size,u8 * instance_id)225 int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
226 u8 core_id, u8 domain, void *param, u32 param_size,
227 u8 *instance_id)
228 {
229 struct avs_module_entry mentry;
230 bool was_loaded = false;
231 int ret, id;
232
233 id = avs_module_id_alloc(adev, module_id);
234 if (id < 0)
235 return id;
236
237 ret = avs_get_module_id_entry(adev, module_id, &mentry);
238 if (ret)
239 goto err_mod_entry;
240
241 ret = avs_dsp_get_core(adev, core_id);
242 if (ret)
243 goto err_mod_entry;
244
245 /* Load code into memory if this is the first instance. */
246 if (!id && !avs_module_entry_is_loaded(&mentry)) {
247 ret = avs_dsp_op(adev, transfer_mods, true, &mentry, 1);
248 if (ret) {
249 dev_err(adev->dev, "load modules failed: %d\n", ret);
250 goto err_mod_entry;
251 }
252 was_loaded = true;
253 }
254
255 ret = avs_ipc_init_instance(adev, module_id, id, ppl_instance_id,
256 core_id, domain, param, param_size);
257 if (ret) {
258 ret = AVS_IPC_RET(ret);
259 goto err_ipc;
260 }
261
262 *instance_id = id;
263 return 0;
264
265 err_ipc:
266 if (was_loaded)
267 avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
268 avs_dsp_put_core(adev, core_id);
269 err_mod_entry:
270 avs_module_id_free(adev, module_id, id);
271 return ret;
272 }
273
avs_dsp_delete_module(struct avs_dev * adev,u16 module_id,u8 instance_id,u8 ppl_instance_id,u8 core_id)274 void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u8 instance_id,
275 u8 ppl_instance_id, u8 core_id)
276 {
277 struct avs_module_entry mentry;
278 int ret;
279
280 /* Modules not owned by any pipeline need to be freed explicitly. */
281 if (ppl_instance_id == INVALID_PIPELINE_ID)
282 avs_ipc_delete_instance(adev, module_id, instance_id);
283
284 avs_module_id_free(adev, module_id, instance_id);
285
286 ret = avs_get_module_id_entry(adev, module_id, &mentry);
287 /* Unload occupied memory if this was the last instance. */
288 if (!ret && mentry.type.load_type == AVS_MODULE_LOAD_TYPE_LOADABLE) {
289 if (avs_is_module_ida_empty(adev, module_id)) {
290 ret = avs_dsp_op(adev, transfer_mods, false, &mentry, 1);
291 if (ret)
292 dev_err(adev->dev, "unload modules failed: %d\n", ret);
293 }
294 }
295
296 avs_dsp_put_core(adev, core_id);
297 }
298
avs_dsp_create_pipeline(struct avs_dev * adev,u16 req_size,u8 priority,bool lp,u16 attributes,u8 * instance_id)299 int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
300 bool lp, u16 attributes, u8 *instance_id)
301 {
302 struct avs_fw_cfg *fw_cfg = &adev->fw_cfg;
303 int ret, id;
304
305 id = ida_alloc_max(&adev->ppl_ida, fw_cfg->max_ppl_count - 1, GFP_KERNEL);
306 if (id < 0)
307 return id;
308
309 ret = avs_ipc_create_pipeline(adev, req_size, priority, id, lp, attributes);
310 if (ret) {
311 ida_free(&adev->ppl_ida, id);
312 return AVS_IPC_RET(ret);
313 }
314
315 *instance_id = id;
316 return 0;
317 }
318
avs_dsp_delete_pipeline(struct avs_dev * adev,u8 instance_id)319 int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id)
320 {
321 int ret;
322
323 ret = avs_ipc_delete_pipeline(adev, instance_id);
324 if (ret)
325 ret = AVS_IPC_RET(ret);
326
327 ida_free(&adev->ppl_ida, instance_id);
328 return ret;
329 }
330