Lines Matching +full:data +full:- +full:ready
1 // SPDX-License-Identifier: GPL-2.0-only
3 // Copyright(c) 2021-2022 Intel Corporation
9 #include <linux/io-64-nonatomic-lo-hi.h>
24 struct avs_ipc *ipc = adev->ipc;
28 if (ipc->in_d0ix == enable)
34 if (ret == -AVS_EIPC)
35 atomic_inc(&ipc->d0ix_disable_depth);
37 ipc->in_d0ix = false;
41 ipc->in_d0ix = enable;
47 if (atomic_read(&adev->ipc->d0ix_disable_depth))
50 mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work,
58 avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true);
63 struct avs_ipc *ipc = adev->ipc;
65 if (!atomic_read(&ipc->d0ix_disable_depth)) {
66 cancel_delayed_work_sync(&ipc->d0ix_work);
75 struct avs_ipc *ipc = adev->ipc;
78 if (atomic_inc_return(&ipc->d0ix_disable_depth) == 1) {
79 cancel_delayed_work_sync(&ipc->d0ix_work);
88 struct avs_ipc *ipc = adev->ipc;
90 if (atomic_dec_and_test(&ipc->d0ix_disable_depth))
91 queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work,
102 mutex_lock(&adev->comp_list_mutex);
104 list_for_each_entry(acomp, &adev->comp_list, node) {
108 card = acomp->base.card;
116 pcm = rtd->pcm;
117 if (!pcm || rtd->dai_link->no_pcm)
123 substream = pcm->streams[dir].substream;
124 if (!substream || !substream->runtime)
134 mutex_unlock(&adev->comp_list_mutex);
137 core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0);
143 dev_err(adev->dev, "dsp reboot failed: %d\n", ret);
145 pm_runtime_enable(adev->dev);
146 pm_request_autosuspend(adev->dev);
148 atomic_set(&adev->ipc->recovering, 0);
155 avs_dsp_recovery(to_avs_dev(ipc->dev));
160 struct avs_ipc *ipc = adev->ipc;
162 /* Account for the double-exception case. */
163 ipc->ready = false;
165 if (!atomic_add_unless(&ipc->recovering, 1, 1)) {
166 dev_err(adev->dev, "dsp recovery is already in progress\n");
170 dev_crit(adev->dev, "communication severed, rebooting dsp..\n");
173 if (current_work() != &ipc->d0ix_work.work)
174 cancel_delayed_work_sync(&ipc->d0ix_work);
175 ipc->in_d0ix = false;
176 /* Re-enabled on recovery completion. */
177 pm_runtime_disable(adev->dev);
182 schedule_work(&ipc->recovery_work);
187 struct avs_ipc *ipc = adev->ipc;
195 ipc->rx.header = header;
201 ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE,
204 memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size);
205 trace_avs_msg_payload(ipc->rx.data, ipc->rx.size);
214 void *data = NULL;
222 if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) {
223 dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary);
251 dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary);
256 data = kmalloc(data_size, GFP_KERNEL);
257 if (!data)
260 memcpy_fromio(data, avs_uplink_addr(adev), data_size);
261 trace_avs_msg_payload(data, data_size);
264 /* Perform notification-specific operations. */
267 dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary);
268 adev->ipc->ready = true;
269 complete(&adev->fw_ready);
284 kfree(data);
289 struct avs_ipc *ipc = adev->ipc;
292 * Response may either be solicited - a reply for a request that has
293 * been sent beforehand - or unsolicited (notification).
297 spin_lock_irq(&ipc->rx_lock);
299 ipc->rx_completed = true;
300 spin_unlock_irq(&ipc->rx_lock);
305 complete(&ipc->busy_completion);
310 struct avs_dev *adev = to_avs_dev(ipc->dev);
311 const struct avs_spec *const spec = adev->spec;
314 hipc_rsp = snd_hdac_adsp_readl(adev, spec->hipc->rsp_offset);
315 return hipc_rsp & spec->hipc->rsp_busy_mask;
324 ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout));
327 if (!ipc->ready)
328 return -EPERM;
332 return -ETIMEDOUT;
335 * has been received - now wait until it's processed.
337 wait_for_completion_killable(&ipc->busy_completion);
340 /* Ongoing notification's bottom-half may cause early wakeup */
341 spin_lock(&ipc->rx_lock);
342 if (!ipc->rx_completed) {
345 repeats_left--;
346 reinit_completion(&ipc->busy_completion);
347 spin_unlock(&ipc->rx_lock);
351 spin_unlock(&ipc->rx_lock);
352 return -ETIMEDOUT;
355 spin_unlock(&ipc->rx_lock);
361 lockdep_assert_held(&ipc->rx_lock);
363 ipc->rx.header = 0;
364 ipc->rx.size = reply ? reply->size : 0;
365 ipc->rx_completed = false;
367 reinit_completion(&ipc->done_completion);
368 reinit_completion(&ipc->busy_completion);
373 const struct avs_spec *const spec = adev->spec;
377 tx->header |= spec->hipc->req_busy_mask;
385 if (tx->size)
386 memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size);
387 snd_hdac_adsp_writel(adev, spec->hipc->req_ext_offset, tx->header >> 32);
388 snd_hdac_adsp_writel(adev, spec->hipc->req_offset, tx->header & UINT_MAX);
394 struct avs_ipc *ipc = adev->ipc;
397 if (!ipc->ready)
398 return -EPERM;
400 mutex_lock(&ipc->msg_mutex);
402 spin_lock(&ipc->rx_lock);
405 spin_unlock(&ipc->rx_lock);
409 if (ret == -ETIMEDOUT) {
418 ret = ipc->rx.rsp.status;
421 * -EPERM error code is expected and thus it's not an actual error.
425 if (ret == -EPERM || ret == AVS_IPC_NOT_SUPPORTED)
426 dev_dbg(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n",
427 name, request->glb.primary, request->glb.ext.val, ret);
429 dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n",
430 name, request->glb.primary, request->glb.ext.val, ret);
433 reply->header = ipc->rx.header;
434 reply->size = ipc->rx.size;
435 if (reply->data && ipc->rx.size)
436 memcpy(reply->data, ipc->rx.data, reply->size);
440 mutex_unlock(&ipc->msg_mutex);
450 trace_avs_d0ix("wake", wake_d0i0, request->header);
461 trace_avs_d0ix("schedule", schedule_d0ix, request->header);
481 return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, name);
494 return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms,
501 struct avs_ipc *ipc = adev->ipc;
504 mutex_lock(&ipc->msg_mutex);
506 spin_lock(&ipc->rx_lock);
513 spin_unlock(&ipc->rx_lock);
518 ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout));
519 ret = ret ? 0 : -ETIMEDOUT;
522 dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n",
523 name, request->glb.primary, request->glb.ext.val, ret);
525 mutex_unlock(&ipc->msg_mutex);
538 return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms, name);
543 const struct avs_spec *const spec = adev->spec;
548 * to have a functional SW <-> FW communication.
555 snd_hdac_adsp_updatel(adev, spec->hipc->ctl_offset, mask, value);
560 ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
561 if (!ipc->rx.data)
562 return -ENOMEM;
564 ipc->dev = dev;
565 ipc->ready = false;
566 ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS;
567 INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work);
568 INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work);
569 init_completion(&ipc->done_completion);
570 init_completion(&ipc->busy_completion);
571 spin_lock_init(&ipc->rx_lock);
572 mutex_init(&ipc->msg_mutex);
579 ipc->ready = false;
580 cancel_work_sync(&ipc->recovery_work);
581 cancel_delayed_work_sync(&ipc->d0ix_work);
582 ipc->in_d0ix = false;