xref: /linux/sound/soc/intel/avs/ipc.c (revision 2f1f570cd730c81807ae143a83766068dd82d577)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 
9 #include <linux/slab.h>
10 #include <sound/hdaudio_ext.h>
11 #include "avs.h"
12 #include "messages.h"
13 #include "registers.h"
14 
15 #define AVS_IPC_TIMEOUT_MS	300
16 
17 static void avs_dsp_recovery(struct avs_dev *adev)
18 {
19 	struct avs_soc_component *acomp;
20 	unsigned int core_mask;
21 	int ret;
22 
23 	mutex_lock(&adev->comp_list_mutex);
24 	/* disconnect all running streams */
25 	list_for_each_entry(acomp, &adev->comp_list, node) {
26 		struct snd_soc_pcm_runtime *rtd;
27 		struct snd_soc_card *card;
28 
29 		card = acomp->base.card;
30 		if (!card)
31 			continue;
32 
33 		for_each_card_rtds(card, rtd) {
34 			struct snd_pcm *pcm;
35 			int dir;
36 
37 			pcm = rtd->pcm;
38 			if (!pcm || rtd->dai_link->no_pcm)
39 				continue;
40 
41 			for_each_pcm_streams(dir) {
42 				struct snd_pcm_substream *substream;
43 
44 				substream = pcm->streams[dir].substream;
45 				if (!substream || !substream->runtime)
46 					continue;
47 
48 				snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
49 			}
50 		}
51 	}
52 	mutex_unlock(&adev->comp_list_mutex);
53 
54 	/* forcibly shutdown all cores */
55 	core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0);
56 	avs_dsp_core_disable(adev, core_mask);
57 
58 	/* attempt dsp reboot */
59 	ret = avs_dsp_boot_firmware(adev, true);
60 	if (ret < 0)
61 		dev_err(adev->dev, "dsp reboot failed: %d\n", ret);
62 
63 	pm_runtime_mark_last_busy(adev->dev);
64 	pm_runtime_enable(adev->dev);
65 	pm_request_autosuspend(adev->dev);
66 
67 	atomic_set(&adev->ipc->recovering, 0);
68 }
69 
70 static void avs_dsp_recovery_work(struct work_struct *work)
71 {
72 	struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work);
73 
74 	avs_dsp_recovery(to_avs_dev(ipc->dev));
75 }
76 
77 static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg *msg)
78 {
79 	struct avs_ipc *ipc = adev->ipc;
80 
81 	/* Account for the double-exception case. */
82 	ipc->ready = false;
83 
84 	if (!atomic_add_unless(&ipc->recovering, 1, 1)) {
85 		dev_err(adev->dev, "dsp recovery is already in progress\n");
86 		return;
87 	}
88 
89 	dev_crit(adev->dev, "communication severed, rebooting dsp..\n");
90 
91 	/* Re-enabled on recovery completion. */
92 	pm_runtime_disable(adev->dev);
93 
94 	/* Process received notification. */
95 	avs_dsp_op(adev, coredump, msg);
96 
97 	schedule_work(&ipc->recovery_work);
98 }
99 
100 static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header)
101 {
102 	struct avs_ipc *ipc = adev->ipc;
103 	union avs_reply_msg msg = AVS_MSG(header);
104 
105 	ipc->rx.header = header;
106 	/* Abort copying payload if request processing was unsuccessful. */
107 	if (!msg.status) {
108 		/* update size in case of LARGE_CONFIG_GET */
109 		if (msg.msg_target == AVS_MOD_MSG &&
110 		    msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET)
111 			ipc->rx.size = msg.ext.large_config.data_off_size;
112 
113 		memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size);
114 	}
115 }
116 
117 static void avs_dsp_process_notification(struct avs_dev *adev, u64 header)
118 {
119 	struct avs_notify_mod_data mod_data;
120 	union avs_notify_msg msg = AVS_MSG(header);
121 	size_t data_size = 0;
122 	void *data = NULL;
123 
124 	/* Ignore spurious notifications until handshake is established. */
125 	if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) {
126 		dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary);
127 		return;
128 	}
129 
130 	/* Calculate notification payload size. */
131 	switch (msg.notify_msg_type) {
132 	case AVS_NOTIFY_FW_READY:
133 		break;
134 
135 	case AVS_NOTIFY_PHRASE_DETECTED:
136 		data_size = sizeof(struct avs_notify_voice_data);
137 		break;
138 
139 	case AVS_NOTIFY_RESOURCE_EVENT:
140 		data_size = sizeof(struct avs_notify_res_data);
141 		break;
142 
143 	case AVS_NOTIFY_EXCEPTION_CAUGHT:
144 		break;
145 
146 	case AVS_NOTIFY_MODULE_EVENT:
147 		/* To know the total payload size, header needs to be read first. */
148 		memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data));
149 		data_size = sizeof(mod_data) + mod_data.data_size;
150 		break;
151 
152 	default:
153 		dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary);
154 		break;
155 	}
156 
157 	if (data_size) {
158 		data = kmalloc(data_size, GFP_KERNEL);
159 		if (!data)
160 			return;
161 
162 		memcpy_fromio(data, avs_uplink_addr(adev), data_size);
163 	}
164 
165 	/* Perform notification-specific operations. */
166 	switch (msg.notify_msg_type) {
167 	case AVS_NOTIFY_FW_READY:
168 		dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary);
169 		adev->ipc->ready = true;
170 		complete(&adev->fw_ready);
171 		break;
172 
173 	case AVS_NOTIFY_EXCEPTION_CAUGHT:
174 		avs_dsp_exception_caught(adev, &msg);
175 		break;
176 
177 	default:
178 		break;
179 	}
180 
181 	kfree(data);
182 }
183 
184 void avs_dsp_process_response(struct avs_dev *adev, u64 header)
185 {
186 	struct avs_ipc *ipc = adev->ipc;
187 
188 	/*
189 	 * Response may either be solicited - a reply for a request that has
190 	 * been sent beforehand - or unsolicited (notification).
191 	 */
192 	if (avs_msg_is_reply(header)) {
193 		/* Response processing is invoked from IRQ thread. */
194 		spin_lock_irq(&ipc->rx_lock);
195 		avs_dsp_receive_rx(adev, header);
196 		ipc->rx_completed = true;
197 		spin_unlock_irq(&ipc->rx_lock);
198 	} else {
199 		avs_dsp_process_notification(adev, header);
200 	}
201 
202 	complete(&ipc->busy_completion);
203 }
204 
205 irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
206 {
207 	struct avs_dev *adev = dev_id;
208 	struct avs_ipc *ipc = adev->ipc;
209 	u32 adspis, hipc_rsp, hipc_ack;
210 	irqreturn_t ret = IRQ_NONE;
211 
212 	adspis = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPIS);
213 	if (adspis == UINT_MAX || !(adspis & AVS_ADSP_ADSPIS_IPC))
214 		return ret;
215 
216 	hipc_ack = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCIE);
217 	hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
218 
219 	/* DSP acked host's request */
220 	if (hipc_ack & SKL_ADSP_HIPCIE_DONE) {
221 		/*
222 		 * As an extra precaution, mask done interrupt. Code executed
223 		 * due to complete() found below does not assume any masking.
224 		 */
225 		snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
226 				      AVS_ADSP_HIPCCTL_DONE, 0);
227 
228 		complete(&ipc->done_completion);
229 
230 		/* tell DSP it has our attention */
231 		snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCIE,
232 				      SKL_ADSP_HIPCIE_DONE,
233 				      SKL_ADSP_HIPCIE_DONE);
234 		/* unmask done interrupt */
235 		snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
236 				      AVS_ADSP_HIPCCTL_DONE,
237 				      AVS_ADSP_HIPCCTL_DONE);
238 		ret = IRQ_HANDLED;
239 	}
240 
241 	/* DSP sent new response to process */
242 	if (hipc_rsp & SKL_ADSP_HIPCT_BUSY) {
243 		/* mask busy interrupt */
244 		snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
245 				      AVS_ADSP_HIPCCTL_BUSY, 0);
246 
247 		ret = IRQ_WAKE_THREAD;
248 	}
249 
250 	return ret;
251 }
252 
253 irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
254 {
255 	struct avs_dev *adev = dev_id;
256 	union avs_reply_msg msg;
257 	u32 hipct, hipcte;
258 
259 	hipct = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
260 	hipcte = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCTE);
261 
262 	/* ensure DSP sent new response to process */
263 	if (!(hipct & SKL_ADSP_HIPCT_BUSY))
264 		return IRQ_NONE;
265 
266 	msg.primary = hipct;
267 	msg.ext.val = hipcte;
268 	avs_dsp_process_response(adev, msg.val);
269 
270 	/* tell DSP we accepted its message */
271 	snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCT,
272 			      SKL_ADSP_HIPCT_BUSY, SKL_ADSP_HIPCT_BUSY);
273 	/* unmask busy interrupt */
274 	snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
275 			      AVS_ADSP_HIPCCTL_BUSY, AVS_ADSP_HIPCCTL_BUSY);
276 
277 	return IRQ_HANDLED;
278 }
279 
280 static bool avs_ipc_is_busy(struct avs_ipc *ipc)
281 {
282 	struct avs_dev *adev = to_avs_dev(ipc->dev);
283 	u32 hipc_rsp;
284 
285 	hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
286 	return hipc_rsp & SKL_ADSP_HIPCT_BUSY;
287 }
288 
289 static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout)
290 {
291 	u32 repeats_left = 128; /* to avoid infinite looping */
292 	int ret;
293 
294 again:
295 	ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout));
296 
297 	/* DSP could be unresponsive at this point. */
298 	if (!ipc->ready)
299 		return -EPERM;
300 
301 	if (!ret) {
302 		if (!avs_ipc_is_busy(ipc))
303 			return -ETIMEDOUT;
304 		/*
305 		 * Firmware did its job, either notification or reply
306 		 * has been received - now wait until it's processed.
307 		 */
308 		wait_for_completion_killable(&ipc->busy_completion);
309 	}
310 
311 	/* Ongoing notification's bottom-half may cause early wakeup */
312 	spin_lock(&ipc->rx_lock);
313 	if (!ipc->rx_completed) {
314 		if (repeats_left) {
315 			/* Reply delayed due to notification. */
316 			repeats_left--;
317 			reinit_completion(&ipc->busy_completion);
318 			spin_unlock(&ipc->rx_lock);
319 			goto again;
320 		}
321 
322 		spin_unlock(&ipc->rx_lock);
323 		return -ETIMEDOUT;
324 	}
325 
326 	spin_unlock(&ipc->rx_lock);
327 	return 0;
328 }
329 
330 static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply)
331 {
332 	lockdep_assert_held(&ipc->rx_lock);
333 
334 	ipc->rx.header = 0;
335 	ipc->rx.size = reply ? reply->size : 0;
336 	ipc->rx_completed = false;
337 
338 	reinit_completion(&ipc->done_completion);
339 	reinit_completion(&ipc->busy_completion);
340 }
341 
342 static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx)
343 {
344 	tx->header |= SKL_ADSP_HIPCI_BUSY;
345 
346 	if (tx->size)
347 		memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size);
348 	snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCIE, tx->header >> 32);
349 	snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCI, tx->header & UINT_MAX);
350 }
351 
352 static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
353 			       struct avs_ipc_msg *reply, int timeout)
354 {
355 	struct avs_ipc *ipc = adev->ipc;
356 	int ret;
357 
358 	if (!ipc->ready)
359 		return -EPERM;
360 
361 	mutex_lock(&ipc->msg_mutex);
362 
363 	spin_lock(&ipc->rx_lock);
364 	avs_ipc_msg_init(ipc, reply);
365 	avs_dsp_send_tx(adev, request);
366 	spin_unlock(&ipc->rx_lock);
367 
368 	ret = avs_ipc_wait_busy_completion(ipc, timeout);
369 	if (ret) {
370 		if (ret == -ETIMEDOUT) {
371 			union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT);
372 
373 			/* Same treatment as on exception, just stack_dump=0. */
374 			avs_dsp_exception_caught(adev, &msg);
375 		}
376 		goto exit;
377 	}
378 
379 	ret = ipc->rx.rsp.status;
380 	if (reply) {
381 		reply->header = ipc->rx.header;
382 		if (reply->data && ipc->rx.size)
383 			memcpy(reply->data, ipc->rx.data, reply->size);
384 	}
385 
386 exit:
387 	mutex_unlock(&ipc->msg_mutex);
388 	return ret;
389 }
390 
391 int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
392 			     struct avs_ipc_msg *reply, int timeout)
393 {
394 	return avs_dsp_do_send_msg(adev, request, reply, timeout);
395 }
396 
397 int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
398 		     struct avs_ipc_msg *reply)
399 {
400 	return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms);
401 }
402 
403 static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout)
404 {
405 	struct avs_ipc *ipc = adev->ipc;
406 	int ret;
407 
408 	mutex_lock(&ipc->msg_mutex);
409 
410 	spin_lock(&ipc->rx_lock);
411 	avs_ipc_msg_init(ipc, NULL);
412 	avs_dsp_send_tx(adev, request);
413 	spin_unlock(&ipc->rx_lock);
414 
415 	/* ROM messages must be sent before main core is unstalled */
416 	ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false);
417 	if (!ret) {
418 		ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout));
419 		ret = ret ? 0 : -ETIMEDOUT;
420 	}
421 
422 	mutex_unlock(&ipc->msg_mutex);
423 
424 	return ret;
425 }
426 
427 int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout)
428 {
429 	return avs_dsp_do_send_rom_msg(adev, request, timeout);
430 }
431 
432 int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request)
433 {
434 	return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms);
435 }
436 
437 void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable)
438 {
439 	u32 value, mask;
440 
441 	/*
442 	 * No particular bit setting order. All of these are required
443 	 * to have a functional SW <-> FW communication.
444 	 */
445 	value = enable ? AVS_ADSP_ADSPIC_IPC : 0;
446 	snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value);
447 
448 	mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY;
449 	value = enable ? mask : 0;
450 	snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, mask, value);
451 }
452 
453 int avs_ipc_init(struct avs_ipc *ipc, struct device *dev)
454 {
455 	ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
456 	if (!ipc->rx.data)
457 		return -ENOMEM;
458 
459 	ipc->dev = dev;
460 	ipc->ready = false;
461 	ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS;
462 	INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work);
463 	init_completion(&ipc->done_completion);
464 	init_completion(&ipc->busy_completion);
465 	spin_lock_init(&ipc->rx_lock);
466 	mutex_init(&ipc->msg_mutex);
467 
468 	return 0;
469 }
470 
471 void avs_ipc_block(struct avs_ipc *ipc)
472 {
473 	ipc->ready = false;
474 	cancel_work_sync(&ipc->recovery_work);
475 }
476