1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. 4 // 5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com> 6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> 7 // 8 9 #include <linux/io-64-nonatomic-lo-hi.h> 10 #include <linux/slab.h> 11 #include <sound/hdaudio_ext.h> 12 #include "avs.h" 13 #include "messages.h" 14 #include "registers.h" 15 #include "trace.h" 16 17 #define AVS_IPC_TIMEOUT_MS 300 18 #define AVS_D0IX_DELAY_MS 300 19 20 static int 21 avs_dsp_set_d0ix(struct avs_dev *adev, bool enable) 22 { 23 struct avs_ipc *ipc = adev->ipc; 24 int ret; 25 26 /* Is transition required? */ 27 if (ipc->in_d0ix == enable) 28 return 0; 29 30 ret = avs_dsp_op(adev, set_d0ix, enable); 31 if (ret) { 32 /* Prevent further d0ix attempts on conscious IPC failure. */ 33 if (ret == -AVS_EIPC) 34 atomic_inc(&ipc->d0ix_disable_depth); 35 36 ipc->in_d0ix = false; 37 return ret; 38 } 39 40 ipc->in_d0ix = enable; 41 return 0; 42 } 43 44 static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx) 45 { 46 if (atomic_read(&adev->ipc->d0ix_disable_depth)) 47 return; 48 49 mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work, 50 msecs_to_jiffies(AVS_D0IX_DELAY_MS)); 51 } 52 53 static void avs_dsp_d0ix_work(struct work_struct *work) 54 { 55 struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work); 56 57 avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true); 58 } 59 60 static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx) 61 { 62 struct avs_ipc *ipc = adev->ipc; 63 64 if (!atomic_read(&ipc->d0ix_disable_depth)) { 65 cancel_delayed_work_sync(&ipc->d0ix_work); 66 return avs_dsp_set_d0ix(adev, false); 67 } 68 69 return 0; 70 } 71 72 int avs_dsp_disable_d0ix(struct avs_dev *adev) 73 { 74 struct avs_ipc *ipc = adev->ipc; 75 76 /* Prevent PG only on the first disable. */ 77 if (atomic_inc_return(&ipc->d0ix_disable_depth) == 1) { 78 cancel_delayed_work_sync(&ipc->d0ix_work); 79 return avs_dsp_set_d0ix(adev, false); 80 } 81 82 return 0; 83 } 84 85 int avs_dsp_enable_d0ix(struct avs_dev *adev) 86 { 87 struct avs_ipc *ipc = adev->ipc; 88 89 if (atomic_dec_and_test(&ipc->d0ix_disable_depth)) 90 queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work, 91 msecs_to_jiffies(AVS_D0IX_DELAY_MS)); 92 return 0; 93 } 94 95 static void avs_dsp_recovery(struct avs_dev *adev) 96 { 97 struct avs_soc_component *acomp; 98 unsigned int core_mask; 99 int ret; 100 101 mutex_lock(&adev->comp_list_mutex); 102 /* disconnect all running streams */ 103 list_for_each_entry(acomp, &adev->comp_list, node) { 104 struct snd_soc_pcm_runtime *rtd; 105 struct snd_soc_card *card; 106 107 card = acomp->base.card; 108 if (!card) 109 continue; 110 111 for_each_card_rtds(card, rtd) { 112 struct snd_pcm *pcm; 113 int dir; 114 115 pcm = rtd->pcm; 116 if (!pcm || rtd->dai_link->no_pcm) 117 continue; 118 119 for_each_pcm_streams(dir) { 120 struct snd_pcm_substream *substream; 121 122 substream = pcm->streams[dir].substream; 123 if (!substream || !substream->runtime) 124 continue; 125 126 /* No need for _irq() as we are in nonatomic context. */ 127 snd_pcm_stream_lock(substream); 128 snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); 129 snd_pcm_stream_unlock(substream); 130 } 131 } 132 } 133 mutex_unlock(&adev->comp_list_mutex); 134 135 /* forcibly shutdown all cores */ 136 core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0); 137 avs_dsp_core_disable(adev, core_mask); 138 139 /* attempt dsp reboot */ 140 ret = avs_dsp_boot_firmware(adev, true); 141 if (ret < 0) 142 dev_err(adev->dev, "dsp reboot failed: %d\n", ret); 143 144 pm_runtime_mark_last_busy(adev->dev); 145 pm_runtime_enable(adev->dev); 146 pm_request_autosuspend(adev->dev); 147 148 atomic_set(&adev->ipc->recovering, 0); 149 } 150 151 static void avs_dsp_recovery_work(struct work_struct *work) 152 { 153 struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work); 154 155 avs_dsp_recovery(to_avs_dev(ipc->dev)); 156 } 157 158 static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg *msg) 159 { 160 struct avs_ipc *ipc = adev->ipc; 161 162 /* Account for the double-exception case. */ 163 ipc->ready = false; 164 165 if (!atomic_add_unless(&ipc->recovering, 1, 1)) { 166 dev_err(adev->dev, "dsp recovery is already in progress\n"); 167 return; 168 } 169 170 dev_crit(adev->dev, "communication severed, rebooting dsp..\n"); 171 172 cancel_delayed_work_sync(&ipc->d0ix_work); 173 ipc->in_d0ix = false; 174 /* Re-enabled on recovery completion. */ 175 pm_runtime_disable(adev->dev); 176 177 /* Process received notification. */ 178 avs_dsp_op(adev, coredump, msg); 179 180 schedule_work(&ipc->recovery_work); 181 } 182 183 static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header) 184 { 185 struct avs_ipc *ipc = adev->ipc; 186 union avs_reply_msg msg = AVS_MSG(header); 187 u64 reg; 188 189 reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); 190 trace_avs_ipc_reply_msg(header, reg); 191 192 ipc->rx.header = header; 193 /* Abort copying payload if request processing was unsuccessful. */ 194 if (!msg.status) { 195 /* update size in case of LARGE_CONFIG_GET */ 196 if (msg.msg_target == AVS_MOD_MSG && 197 msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET) 198 ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE, 199 msg.ext.large_config.data_off_size); 200 201 memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size); 202 trace_avs_msg_payload(ipc->rx.data, ipc->rx.size); 203 } 204 } 205 206 static void avs_dsp_process_notification(struct avs_dev *adev, u64 header) 207 { 208 struct avs_notify_mod_data mod_data; 209 union avs_notify_msg msg = AVS_MSG(header); 210 size_t data_size = 0; 211 void *data = NULL; 212 u64 reg; 213 214 reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); 215 trace_avs_ipc_notify_msg(header, reg); 216 217 /* Ignore spurious notifications until handshake is established. */ 218 if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) { 219 dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary); 220 return; 221 } 222 223 /* Calculate notification payload size. */ 224 switch (msg.notify_msg_type) { 225 case AVS_NOTIFY_FW_READY: 226 break; 227 228 case AVS_NOTIFY_PHRASE_DETECTED: 229 data_size = sizeof(struct avs_notify_voice_data); 230 break; 231 232 case AVS_NOTIFY_RESOURCE_EVENT: 233 data_size = sizeof(struct avs_notify_res_data); 234 break; 235 236 case AVS_NOTIFY_LOG_BUFFER_STATUS: 237 case AVS_NOTIFY_EXCEPTION_CAUGHT: 238 break; 239 240 case AVS_NOTIFY_MODULE_EVENT: 241 /* To know the total payload size, header needs to be read first. */ 242 memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data)); 243 data_size = sizeof(mod_data) + mod_data.data_size; 244 break; 245 246 default: 247 dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary); 248 break; 249 } 250 251 if (data_size) { 252 data = kmalloc(data_size, GFP_KERNEL); 253 if (!data) 254 return; 255 256 memcpy_fromio(data, avs_uplink_addr(adev), data_size); 257 trace_avs_msg_payload(data, data_size); 258 } 259 260 /* Perform notification-specific operations. */ 261 switch (msg.notify_msg_type) { 262 case AVS_NOTIFY_FW_READY: 263 dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary); 264 adev->ipc->ready = true; 265 complete(&adev->fw_ready); 266 break; 267 268 case AVS_NOTIFY_LOG_BUFFER_STATUS: 269 avs_log_buffer_status_locked(adev, &msg); 270 break; 271 272 case AVS_NOTIFY_EXCEPTION_CAUGHT: 273 avs_dsp_exception_caught(adev, &msg); 274 break; 275 276 default: 277 break; 278 } 279 280 kfree(data); 281 } 282 283 void avs_dsp_process_response(struct avs_dev *adev, u64 header) 284 { 285 struct avs_ipc *ipc = adev->ipc; 286 287 /* 288 * Response may either be solicited - a reply for a request that has 289 * been sent beforehand - or unsolicited (notification). 290 */ 291 if (avs_msg_is_reply(header)) { 292 /* Response processing is invoked from IRQ thread. */ 293 spin_lock_irq(&ipc->rx_lock); 294 avs_dsp_receive_rx(adev, header); 295 ipc->rx_completed = true; 296 spin_unlock_irq(&ipc->rx_lock); 297 } else { 298 avs_dsp_process_notification(adev, header); 299 } 300 301 complete(&ipc->busy_completion); 302 } 303 304 irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id) 305 { 306 struct avs_dev *adev = dev_id; 307 struct avs_ipc *ipc = adev->ipc; 308 u32 adspis, hipc_rsp, hipc_ack; 309 irqreturn_t ret = IRQ_NONE; 310 311 adspis = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPIS); 312 if (adspis == UINT_MAX || !(adspis & AVS_ADSP_ADSPIS_IPC)) 313 return ret; 314 315 hipc_ack = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCIE); 316 hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); 317 318 /* DSP acked host's request */ 319 if (hipc_ack & SKL_ADSP_HIPCIE_DONE) { 320 /* 321 * As an extra precaution, mask done interrupt. Code executed 322 * due to complete() found below does not assume any masking. 323 */ 324 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, 325 AVS_ADSP_HIPCCTL_DONE, 0); 326 327 complete(&ipc->done_completion); 328 329 /* tell DSP it has our attention */ 330 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCIE, 331 SKL_ADSP_HIPCIE_DONE, 332 SKL_ADSP_HIPCIE_DONE); 333 /* unmask done interrupt */ 334 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, 335 AVS_ADSP_HIPCCTL_DONE, 336 AVS_ADSP_HIPCCTL_DONE); 337 ret = IRQ_HANDLED; 338 } 339 340 /* DSP sent new response to process */ 341 if (hipc_rsp & SKL_ADSP_HIPCT_BUSY) { 342 /* mask busy interrupt */ 343 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, 344 AVS_ADSP_HIPCCTL_BUSY, 0); 345 346 ret = IRQ_WAKE_THREAD; 347 } 348 349 return ret; 350 } 351 352 irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id) 353 { 354 struct avs_dev *adev = dev_id; 355 union avs_reply_msg msg; 356 u32 hipct, hipcte; 357 358 hipct = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); 359 hipcte = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCTE); 360 361 /* ensure DSP sent new response to process */ 362 if (!(hipct & SKL_ADSP_HIPCT_BUSY)) 363 return IRQ_NONE; 364 365 msg.primary = hipct; 366 msg.ext.val = hipcte; 367 avs_dsp_process_response(adev, msg.val); 368 369 /* tell DSP we accepted its message */ 370 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCT, 371 SKL_ADSP_HIPCT_BUSY, SKL_ADSP_HIPCT_BUSY); 372 /* unmask busy interrupt */ 373 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, 374 AVS_ADSP_HIPCCTL_BUSY, AVS_ADSP_HIPCCTL_BUSY); 375 376 return IRQ_HANDLED; 377 } 378 379 static bool avs_ipc_is_busy(struct avs_ipc *ipc) 380 { 381 struct avs_dev *adev = to_avs_dev(ipc->dev); 382 u32 hipc_rsp; 383 384 hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); 385 return hipc_rsp & SKL_ADSP_HIPCT_BUSY; 386 } 387 388 static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout) 389 { 390 u32 repeats_left = 128; /* to avoid infinite looping */ 391 int ret; 392 393 again: 394 ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout)); 395 396 /* DSP could be unresponsive at this point. */ 397 if (!ipc->ready) 398 return -EPERM; 399 400 if (!ret) { 401 if (!avs_ipc_is_busy(ipc)) 402 return -ETIMEDOUT; 403 /* 404 * Firmware did its job, either notification or reply 405 * has been received - now wait until it's processed. 406 */ 407 wait_for_completion_killable(&ipc->busy_completion); 408 } 409 410 /* Ongoing notification's bottom-half may cause early wakeup */ 411 spin_lock(&ipc->rx_lock); 412 if (!ipc->rx_completed) { 413 if (repeats_left) { 414 /* Reply delayed due to notification. */ 415 repeats_left--; 416 reinit_completion(&ipc->busy_completion); 417 spin_unlock(&ipc->rx_lock); 418 goto again; 419 } 420 421 spin_unlock(&ipc->rx_lock); 422 return -ETIMEDOUT; 423 } 424 425 spin_unlock(&ipc->rx_lock); 426 return 0; 427 } 428 429 static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply) 430 { 431 lockdep_assert_held(&ipc->rx_lock); 432 433 ipc->rx.header = 0; 434 ipc->rx.size = reply ? reply->size : 0; 435 ipc->rx_completed = false; 436 437 reinit_completion(&ipc->done_completion); 438 reinit_completion(&ipc->busy_completion); 439 } 440 441 static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs) 442 { 443 u64 reg = ULONG_MAX; 444 445 tx->header |= SKL_ADSP_HIPCI_BUSY; 446 if (read_fwregs) 447 reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); 448 449 trace_avs_request(tx, reg); 450 451 if (tx->size) 452 memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size); 453 snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCIE, tx->header >> 32); 454 snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCI, tx->header & UINT_MAX); 455 } 456 457 static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, 458 struct avs_ipc_msg *reply, int timeout, const char *name) 459 { 460 struct avs_ipc *ipc = adev->ipc; 461 int ret; 462 463 if (!ipc->ready) 464 return -EPERM; 465 466 mutex_lock(&ipc->msg_mutex); 467 468 spin_lock(&ipc->rx_lock); 469 avs_ipc_msg_init(ipc, reply); 470 avs_dsp_send_tx(adev, request, true); 471 spin_unlock(&ipc->rx_lock); 472 473 ret = avs_ipc_wait_busy_completion(ipc, timeout); 474 if (ret) { 475 if (ret == -ETIMEDOUT) { 476 union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT); 477 478 /* Same treatment as on exception, just stack_dump=0. */ 479 avs_dsp_exception_caught(adev, &msg); 480 } 481 goto exit; 482 } 483 484 ret = ipc->rx.rsp.status; 485 /* 486 * If IPC channel is blocked e.g.: due to ongoing recovery, 487 * -EPERM error code is expected and thus it's not an actual error. 488 * 489 * Unsupported IPCs are of no harm either. 490 */ 491 if (ret == -EPERM || ret == AVS_IPC_NOT_SUPPORTED) 492 dev_dbg(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n", 493 name, request->glb.primary, request->glb.ext.val, ret); 494 else if (ret) 495 dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n", 496 name, request->glb.primary, request->glb.ext.val, ret); 497 498 if (reply) { 499 reply->header = ipc->rx.header; 500 reply->size = ipc->rx.size; 501 if (reply->data && ipc->rx.size) 502 memcpy(reply->data, ipc->rx.data, reply->size); 503 } 504 505 exit: 506 mutex_unlock(&ipc->msg_mutex); 507 return ret; 508 } 509 510 static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request, 511 struct avs_ipc_msg *reply, int timeout, bool wake_d0i0, 512 bool schedule_d0ix, const char *name) 513 { 514 int ret; 515 516 trace_avs_d0ix("wake", wake_d0i0, request->header); 517 if (wake_d0i0) { 518 ret = avs_dsp_wake_d0i0(adev, request); 519 if (ret) 520 return ret; 521 } 522 523 ret = avs_dsp_do_send_msg(adev, request, reply, timeout, name); 524 if (ret) 525 return ret; 526 527 trace_avs_d0ix("schedule", schedule_d0ix, request->header); 528 if (schedule_d0ix) 529 avs_dsp_schedule_d0ix(adev, request); 530 531 return 0; 532 } 533 534 int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, 535 struct avs_ipc_msg *reply, int timeout, const char *name) 536 { 537 bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true); 538 bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false); 539 540 return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix, 541 name); 542 } 543 544 int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, 545 struct avs_ipc_msg *reply, const char *name) 546 { 547 return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, name); 548 } 549 550 int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, 551 struct avs_ipc_msg *reply, int timeout, bool wake_d0i0, 552 const char *name) 553 { 554 return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false, name); 555 } 556 557 int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request, 558 struct avs_ipc_msg *reply, bool wake_d0i0, const char *name) 559 { 560 return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, 561 wake_d0i0, name); 562 } 563 564 static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout, 565 const char *name) 566 { 567 struct avs_ipc *ipc = adev->ipc; 568 int ret; 569 570 mutex_lock(&ipc->msg_mutex); 571 572 spin_lock(&ipc->rx_lock); 573 avs_ipc_msg_init(ipc, NULL); 574 /* 575 * with hw still stalled, memory windows may not be 576 * configured properly so avoid accessing SRAM 577 */ 578 avs_dsp_send_tx(adev, request, false); 579 spin_unlock(&ipc->rx_lock); 580 581 /* ROM messages must be sent before main core is unstalled */ 582 ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false); 583 if (!ret) { 584 ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout)); 585 ret = ret ? 0 : -ETIMEDOUT; 586 } 587 if (ret) 588 dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n", 589 name, request->glb.primary, request->glb.ext.val, ret); 590 591 mutex_unlock(&ipc->msg_mutex); 592 593 return ret; 594 } 595 596 int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout, 597 const char *name) 598 { 599 return avs_dsp_do_send_rom_msg(adev, request, timeout, name); 600 } 601 602 int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, const char *name) 603 { 604 return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms, name); 605 } 606 607 void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable) 608 { 609 u32 value, mask; 610 611 /* 612 * No particular bit setting order. All of these are required 613 * to have a functional SW <-> FW communication. 614 */ 615 value = enable ? AVS_ADSP_ADSPIC_IPC : 0; 616 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value); 617 618 mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY; 619 value = enable ? mask : 0; 620 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, mask, value); 621 } 622 623 int avs_ipc_init(struct avs_ipc *ipc, struct device *dev) 624 { 625 ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL); 626 if (!ipc->rx.data) 627 return -ENOMEM; 628 629 ipc->dev = dev; 630 ipc->ready = false; 631 ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS; 632 INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work); 633 INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work); 634 init_completion(&ipc->done_completion); 635 init_completion(&ipc->busy_completion); 636 spin_lock_init(&ipc->rx_lock); 637 mutex_init(&ipc->msg_mutex); 638 639 return 0; 640 } 641 642 void avs_ipc_block(struct avs_ipc *ipc) 643 { 644 ipc->ready = false; 645 cancel_work_sync(&ipc->recovery_work); 646 cancel_delayed_work_sync(&ipc->d0ix_work); 647 ipc->in_d0ix = false; 648 } 649