Lines Matching +full:ssi +full:- +full:all
1 // SPDX-License-Identifier: GPL-2.0-only
3 * cmt_speech.c - HSI CMT speech driver
5 * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
27 #include <linux/hsi/cs-protocol.h>
63 /* Number of pre-allocated commands buffers */
144 dev_err(&cs_char_data.cl->device, in cs_notify()
150 entry->msg = message; in cs_notify()
151 list_add_tail(&entry->list, head); in cs_notify()
167 entry = list_entry(head->next, struct char_queue, list); in cs_pop_entry()
168 data = entry->msg; in cs_pop_entry()
169 list_del(&entry->list); in cs_pop_entry()
188 dev_dbg(&cs_char_data.cl->device, "data notification " in cs_notify_data()
192 cs_char_data.dataind_pending--; in cs_notify_data()
199 u32 *data = sg_virt(msg->sgt.sgl); in cs_set_cmd()
205 u32 *data = sg_virt(msg->sgt.sgl); in cs_get_cmd()
211 struct cs_hsi_iface *hi = msg->context; in cs_release_cmd()
213 list_add_tail(&msg->link, &hi->cmdqueue); in cs_release_cmd()
218 struct cs_hsi_iface *hi = msg->context; in cs_cmd_destructor()
220 spin_lock(&hi->lock); in cs_cmd_destructor()
222 dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n"); in cs_cmd_destructor()
224 if (hi->iface_state != CS_STATE_CLOSED) in cs_cmd_destructor()
225 dev_err(&hi->cl->device, "Cmd flushed while driver active\n"); in cs_cmd_destructor()
227 if (msg->ttype == HSI_MSG_READ) in cs_cmd_destructor()
228 hi->control_state &= in cs_cmd_destructor()
230 else if (msg->ttype == HSI_MSG_WRITE && in cs_cmd_destructor()
231 hi->control_state & SSI_CHANNEL_STATE_WRITING) in cs_cmd_destructor()
232 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_cmd_destructor()
236 spin_unlock(&hi->lock); in cs_cmd_destructor()
239 static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi) in cs_claim_cmd() argument
243 BUG_ON(list_empty(&ssi->cmdqueue)); in cs_claim_cmd()
245 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); in cs_claim_cmd()
246 list_del(&msg->link); in cs_claim_cmd()
247 msg->destructor = cs_cmd_destructor; in cs_claim_cmd()
252 static void cs_free_cmds(struct cs_hsi_iface *ssi) in cs_free_cmds() argument
256 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { in cs_free_cmds()
257 list_del(&msg->link); in cs_free_cmds()
258 msg->destructor = NULL; in cs_free_cmds()
259 kfree(sg_virt(msg->sgt.sgl)); in cs_free_cmds()
270 INIT_LIST_HEAD(&hi->cmdqueue); in cs_alloc_cmds()
281 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); in cs_alloc_cmds()
282 msg->channel = cs_char_data.channel_id_cmd; in cs_alloc_cmds()
283 msg->context = hi; in cs_alloc_cmds()
284 list_add_tail(&msg->link, &hi->cmdqueue); in cs_alloc_cmds()
291 return -ENOMEM; in cs_alloc_cmds()
296 struct cs_hsi_iface *hi = msg->context; in cs_hsi_data_destructor()
297 const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX"; in cs_hsi_data_destructor()
299 dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir); in cs_hsi_data_destructor()
301 spin_lock(&hi->lock); in cs_hsi_data_destructor()
302 if (hi->iface_state != CS_STATE_CLOSED) in cs_hsi_data_destructor()
303 dev_err(&cs_char_data.cl->device, in cs_hsi_data_destructor()
305 if (msg->ttype == HSI_MSG_READ) in cs_hsi_data_destructor()
306 hi->data_state &= in cs_hsi_data_destructor()
309 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_hsi_data_destructor()
311 msg->status = HSI_STATUS_COMPLETED; in cs_hsi_data_destructor()
312 if (unlikely(waitqueue_active(&hi->datawait))) in cs_hsi_data_destructor()
313 wake_up_interruptible(&hi->datawait); in cs_hsi_data_destructor()
315 spin_unlock(&hi->lock); in cs_hsi_data_destructor()
325 res = -ENOMEM; in cs_hsi_alloc_data()
328 rxmsg->channel = cs_char_data.channel_id_data; in cs_hsi_alloc_data()
329 rxmsg->destructor = cs_hsi_data_destructor; in cs_hsi_alloc_data()
330 rxmsg->context = hi; in cs_hsi_alloc_data()
334 res = -ENOMEM; in cs_hsi_alloc_data()
337 txmsg->channel = cs_char_data.channel_id_data; in cs_hsi_alloc_data()
338 txmsg->destructor = cs_hsi_data_destructor; in cs_hsi_alloc_data()
339 txmsg->context = hi; in cs_hsi_alloc_data()
341 hi->data_rx_msg = rxmsg; in cs_hsi_alloc_data()
342 hi->data_tx_msg = txmsg; in cs_hsi_alloc_data()
354 WARN_ON(msg->status != HSI_STATUS_COMPLETED && in cs_hsi_free_data_msg()
355 msg->status != HSI_STATUS_ERROR); in cs_hsi_free_data_msg()
361 cs_hsi_free_data_msg(hi->data_rx_msg); in cs_hsi_free_data()
362 cs_hsi_free_data_msg(hi->data_tx_msg); in cs_hsi_free_data()
369 spin_lock(&hi->lock); in __cs_hsi_error_pre()
370 dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n", in __cs_hsi_error_pre()
371 info, msg->status, *state); in __cs_hsi_error_pre()
376 spin_unlock(&hi->lock); in __cs_hsi_error_post()
394 __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state); in cs_hsi_control_read_error()
396 __cs_hsi_error_read_bits(&hi->control_state); in cs_hsi_control_read_error()
403 __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state); in cs_hsi_control_write_error()
405 __cs_hsi_error_write_bits(&hi->control_state); in cs_hsi_control_write_error()
412 __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state); in cs_hsi_data_read_error()
413 __cs_hsi_error_read_bits(&hi->data_state); in cs_hsi_data_read_error()
420 __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state); in cs_hsi_data_write_error()
421 __cs_hsi_error_write_bits(&hi->data_state); in cs_hsi_data_write_error()
428 struct cs_hsi_iface *hi = msg->context; in cs_hsi_read_on_control_complete()
430 spin_lock(&hi->lock); in cs_hsi_read_on_control_complete()
431 hi->control_state &= ~SSI_CHANNEL_STATE_READING; in cs_hsi_read_on_control_complete()
432 if (msg->status == HSI_STATUS_ERROR) { in cs_hsi_read_on_control_complete()
433 dev_err(&hi->cl->device, "Control RX error detected\n"); in cs_hsi_read_on_control_complete()
434 spin_unlock(&hi->lock); in cs_hsi_read_on_control_complete()
438 dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd); in cs_hsi_read_on_control_complete()
440 if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) { in cs_hsi_read_on_control_complete()
443 &hi->mmap_cfg->tstamp_rx_ctrl; in cs_hsi_read_on_control_complete()
447 tstamp->tv_sec = (__u32) tspec.tv_sec; in cs_hsi_read_on_control_complete()
448 tstamp->tv_nsec = (__u32) tspec.tv_nsec; in cs_hsi_read_on_control_complete()
450 spin_unlock(&hi->lock); in cs_hsi_read_on_control_complete()
460 struct cs_hsi_iface *hi = msg->context; in cs_hsi_peek_on_control_complete()
463 if (msg->status == HSI_STATUS_ERROR) { in cs_hsi_peek_on_control_complete()
464 dev_err(&hi->cl->device, "Control peek RX error detected\n"); in cs_hsi_peek_on_control_complete()
469 WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING)); in cs_hsi_peek_on_control_complete()
471 dev_dbg(&hi->cl->device, "Peek on control complete, reading\n"); in cs_hsi_peek_on_control_complete()
472 msg->sgt.nents = 1; in cs_hsi_peek_on_control_complete()
473 msg->complete = cs_hsi_read_on_control_complete; in cs_hsi_peek_on_control_complete()
474 ret = hsi_async_read(hi->cl, msg); in cs_hsi_peek_on_control_complete()
484 spin_lock(&hi->lock); in cs_hsi_read_on_control()
485 if (hi->control_state & SSI_CHANNEL_STATE_READING) { in cs_hsi_read_on_control()
486 dev_err(&hi->cl->device, "Control read already pending (%d)\n", in cs_hsi_read_on_control()
487 hi->control_state); in cs_hsi_read_on_control()
488 spin_unlock(&hi->lock); in cs_hsi_read_on_control()
491 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { in cs_hsi_read_on_control()
492 dev_err(&hi->cl->device, "Control read error (%d)\n", in cs_hsi_read_on_control()
493 hi->control_state); in cs_hsi_read_on_control()
494 spin_unlock(&hi->lock); in cs_hsi_read_on_control()
497 hi->control_state |= SSI_CHANNEL_STATE_READING; in cs_hsi_read_on_control()
498 dev_dbg(&hi->cl->device, "Issuing RX on control\n"); in cs_hsi_read_on_control()
500 spin_unlock(&hi->lock); in cs_hsi_read_on_control()
502 msg->sgt.nents = 0; in cs_hsi_read_on_control()
503 msg->complete = cs_hsi_peek_on_control_complete; in cs_hsi_read_on_control()
504 ret = hsi_async_read(hi->cl, msg); in cs_hsi_read_on_control()
511 struct cs_hsi_iface *hi = msg->context; in cs_hsi_write_on_control_complete()
512 if (msg->status == HSI_STATUS_COMPLETED) { in cs_hsi_write_on_control_complete()
513 spin_lock(&hi->lock); in cs_hsi_write_on_control_complete()
514 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_control_complete()
516 spin_unlock(&hi->lock); in cs_hsi_write_on_control_complete()
517 } else if (msg->status == HSI_STATUS_ERROR) { in cs_hsi_write_on_control_complete()
520 dev_err(&hi->cl->device, in cs_hsi_write_on_control_complete()
522 msg->status); in cs_hsi_write_on_control_complete()
531 spin_lock(&hi->lock); in cs_hsi_write_on_control()
532 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { in cs_hsi_write_on_control()
533 spin_unlock(&hi->lock); in cs_hsi_write_on_control()
534 return -EIO; in cs_hsi_write_on_control()
536 if (hi->control_state & SSI_CHANNEL_STATE_WRITING) { in cs_hsi_write_on_control()
537 dev_err(&hi->cl->device, in cs_hsi_write_on_control()
539 spin_unlock(&hi->lock); in cs_hsi_write_on_control()
540 return -EBUSY; in cs_hsi_write_on_control()
542 hi->control_state |= SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_control()
544 spin_unlock(&hi->lock); in cs_hsi_write_on_control()
547 msg->sgt.nents = 1; in cs_hsi_write_on_control()
548 msg->complete = cs_hsi_write_on_control_complete; in cs_hsi_write_on_control()
549 dev_dbg(&hi->cl->device, in cs_hsi_write_on_control()
551 ret = hsi_async_write(hi->cl, msg); in cs_hsi_write_on_control()
553 dev_err(&hi->cl->device, in cs_hsi_write_on_control()
565 if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) { in cs_hsi_write_on_control()
566 dev_err(&hi->cl->device, "Restarting control reads\n"); in cs_hsi_write_on_control()
575 struct cs_hsi_iface *hi = msg->context; in cs_hsi_read_on_data_complete()
578 if (unlikely(msg->status == HSI_STATUS_ERROR)) { in cs_hsi_read_on_data_complete()
583 spin_lock(&hi->lock); in cs_hsi_read_on_data_complete()
584 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING)); in cs_hsi_read_on_data_complete()
585 hi->data_state &= ~SSI_CHANNEL_STATE_READING; in cs_hsi_read_on_data_complete()
587 payload |= hi->rx_slot; in cs_hsi_read_on_data_complete()
588 hi->rx_slot++; in cs_hsi_read_on_data_complete()
589 hi->rx_slot %= hi->rx_ptr_boundary; in cs_hsi_read_on_data_complete()
591 hi->mmap_cfg->rx_ptr = hi->rx_slot; in cs_hsi_read_on_data_complete()
592 if (unlikely(waitqueue_active(&hi->datawait))) in cs_hsi_read_on_data_complete()
593 wake_up_interruptible(&hi->datawait); in cs_hsi_read_on_data_complete()
594 spin_unlock(&hi->lock); in cs_hsi_read_on_data_complete()
596 cs_notify_data(payload, hi->rx_bufs); in cs_hsi_read_on_data_complete()
602 struct cs_hsi_iface *hi = msg->context; in cs_hsi_peek_on_data_complete()
606 if (unlikely(msg->status == HSI_STATUS_ERROR)) { in cs_hsi_peek_on_data_complete()
610 if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) { in cs_hsi_peek_on_data_complete()
611 dev_err(&hi->cl->device, "Data received in invalid state\n"); in cs_hsi_peek_on_data_complete()
616 spin_lock(&hi->lock); in cs_hsi_peek_on_data_complete()
617 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL)); in cs_hsi_peek_on_data_complete()
618 hi->data_state &= ~SSI_CHANNEL_STATE_POLL; in cs_hsi_peek_on_data_complete()
619 hi->data_state |= SSI_CHANNEL_STATE_READING; in cs_hsi_peek_on_data_complete()
620 spin_unlock(&hi->lock); in cs_hsi_peek_on_data_complete()
622 address = (u32 *)(hi->mmap_base + in cs_hsi_peek_on_data_complete()
623 hi->rx_offsets[hi->rx_slot % hi->rx_bufs]); in cs_hsi_peek_on_data_complete()
624 sg_init_one(msg->sgt.sgl, address, hi->buf_size); in cs_hsi_peek_on_data_complete()
625 msg->sgt.nents = 1; in cs_hsi_peek_on_data_complete()
626 msg->complete = cs_hsi_read_on_data_complete; in cs_hsi_peek_on_data_complete()
627 ret = hsi_async_read(hi->cl, msg); in cs_hsi_peek_on_data_complete()
655 spin_lock(&hi->lock); in cs_hsi_read_on_data()
656 if (hi->data_state & in cs_hsi_read_on_data()
658 dev_dbg(&hi->cl->device, "Data read already pending (%u)\n", in cs_hsi_read_on_data()
659 hi->data_state); in cs_hsi_read_on_data()
660 spin_unlock(&hi->lock); in cs_hsi_read_on_data()
663 hi->data_state |= SSI_CHANNEL_STATE_POLL; in cs_hsi_read_on_data()
664 spin_unlock(&hi->lock); in cs_hsi_read_on_data()
666 rxmsg = hi->data_rx_msg; in cs_hsi_read_on_data()
667 sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0); in cs_hsi_read_on_data()
668 rxmsg->sgt.nents = 0; in cs_hsi_read_on_data()
669 rxmsg->complete = cs_hsi_peek_on_data_complete; in cs_hsi_read_on_data()
671 ret = hsi_async_read(hi->cl, rxmsg); in cs_hsi_read_on_data()
678 struct cs_hsi_iface *hi = msg->context; in cs_hsi_write_on_data_complete()
680 if (msg->status == HSI_STATUS_COMPLETED) { in cs_hsi_write_on_data_complete()
681 spin_lock(&hi->lock); in cs_hsi_write_on_data_complete()
682 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_data_complete()
683 if (unlikely(waitqueue_active(&hi->datawait))) in cs_hsi_write_on_data_complete()
684 wake_up_interruptible(&hi->datawait); in cs_hsi_write_on_data_complete()
685 spin_unlock(&hi->lock); in cs_hsi_write_on_data_complete()
697 spin_lock(&hi->lock); in cs_hsi_write_on_data()
698 if (hi->iface_state != CS_STATE_CONFIGURED) { in cs_hsi_write_on_data()
699 dev_err(&hi->cl->device, "Not configured, aborting\n"); in cs_hsi_write_on_data()
700 ret = -EINVAL; in cs_hsi_write_on_data()
703 if (hi->data_state & SSI_CHANNEL_STATE_ERROR) { in cs_hsi_write_on_data()
704 dev_err(&hi->cl->device, "HSI error, aborting\n"); in cs_hsi_write_on_data()
705 ret = -EIO; in cs_hsi_write_on_data()
708 if (hi->data_state & SSI_CHANNEL_STATE_WRITING) { in cs_hsi_write_on_data()
709 dev_err(&hi->cl->device, "Write pending on data channel.\n"); in cs_hsi_write_on_data()
710 ret = -EBUSY; in cs_hsi_write_on_data()
713 hi->data_state |= SSI_CHANNEL_STATE_WRITING; in cs_hsi_write_on_data()
714 spin_unlock(&hi->lock); in cs_hsi_write_on_data()
716 hi->tx_slot = slot; in cs_hsi_write_on_data()
717 address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]); in cs_hsi_write_on_data()
718 txmsg = hi->data_tx_msg; in cs_hsi_write_on_data()
719 sg_init_one(txmsg->sgt.sgl, address, hi->buf_size); in cs_hsi_write_on_data()
720 txmsg->complete = cs_hsi_write_on_data_complete; in cs_hsi_write_on_data()
721 ret = hsi_async_write(hi->cl, txmsg); in cs_hsi_write_on_data()
728 spin_unlock(&hi->lock); in cs_hsi_write_on_data()
729 if (ret == -EIO) in cs_hsi_write_on_data()
730 cs_hsi_data_write_error(hi, hi->data_tx_msg); in cs_hsi_write_on_data()
737 return hi->iface_state; in cs_hsi_get_state()
753 ret = -EINVAL; in cs_hsi_command()
756 ret = -EINVAL; in cs_hsi_command()
768 spin_lock_bh(&hi->lock); in cs_hsi_set_wakeline()
769 if (hi->wakeline_state != new_state) { in cs_hsi_set_wakeline()
770 hi->wakeline_state = new_state; in cs_hsi_set_wakeline()
772 dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n", in cs_hsi_set_wakeline()
773 new_state, hi->cl); in cs_hsi_set_wakeline()
775 spin_unlock_bh(&hi->lock); in cs_hsi_set_wakeline()
779 ssip_slave_start_tx(hi->master); in cs_hsi_set_wakeline()
781 ssip_slave_stop_tx(hi->master); in cs_hsi_set_wakeline()
784 dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n", in cs_hsi_set_wakeline()
785 new_state, hi->cl); in cs_hsi_set_wakeline()
790 hi->rx_bufs = rx_bufs; in set_buffer_sizes()
791 hi->tx_bufs = tx_bufs; in set_buffer_sizes()
792 hi->mmap_cfg->rx_bufs = rx_bufs; in set_buffer_sizes()
793 hi->mmap_cfg->tx_bufs = tx_bufs; in set_buffer_sizes()
795 if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) { in set_buffer_sizes()
798 * pointer run in range 0..'boundary-1'. Boundary in set_buffer_sizes()
800 * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff in set_buffer_sizes()
803 hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT); in set_buffer_sizes()
804 hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary; in set_buffer_sizes()
806 hi->rx_ptr_boundary = hi->rx_bufs; in set_buffer_sizes()
813 size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) * in check_buf_params()
814 (buf_cfg->rx_bufs + buf_cfg->tx_bufs); in check_buf_params()
815 size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); in check_buf_params()
818 if (buf_cfg->rx_bufs > CS_MAX_BUFFERS || in check_buf_params()
819 buf_cfg->tx_bufs > CS_MAX_BUFFERS) { in check_buf_params()
820 r = -EINVAL; in check_buf_params()
821 } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) { in check_buf_params()
822 dev_err(&hi->cl->device, "No space for the requested buffer " in check_buf_params()
824 r = -ENOBUFS; in check_buf_params()
837 spin_lock_bh(&hi->lock); in cs_hsi_data_sync()
839 if (!cs_state_xfer_active(hi->data_state)) { in cs_hsi_data_sync()
840 dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n"); in cs_hsi_data_sync()
847 if (!cs_state_xfer_active(hi->data_state)) in cs_hsi_data_sync()
850 r = -ERESTARTSYS; in cs_hsi_data_sync()
854 * prepare_to_wait must be called with hi->lock held in cs_hsi_data_sync()
857 prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE); in cs_hsi_data_sync()
858 spin_unlock_bh(&hi->lock); in cs_hsi_data_sync()
861 spin_lock_bh(&hi->lock); in cs_hsi_data_sync()
862 finish_wait(&hi->datawait, &wait); in cs_hsi_data_sync()
864 dev_dbg(&hi->cl->device, in cs_hsi_data_sync()
867 r = -EIO; in cs_hsi_data_sync()
873 spin_unlock_bh(&hi->lock); in cs_hsi_data_sync()
874 dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r); in cs_hsi_data_sync()
884 BUG_ON(hi->buf_size == 0); in cs_hsi_data_enable()
886 set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs); in cs_hsi_data_enable()
888 hi->slot_size = L1_CACHE_ALIGN(hi->buf_size); in cs_hsi_data_enable()
889 dev_dbg(&hi->cl->device, in cs_hsi_data_enable()
891 hi->slot_size, hi->buf_size, L1_CACHE_BYTES); in cs_hsi_data_enable()
893 data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); in cs_hsi_data_enable()
894 dev_dbg(&hi->cl->device, in cs_hsi_data_enable()
896 data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES); in cs_hsi_data_enable()
898 for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) { in cs_hsi_data_enable()
899 hi->rx_offsets[i] = data_start + i * hi->slot_size; in cs_hsi_data_enable()
900 hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i]; in cs_hsi_data_enable()
901 dev_dbg(&hi->cl->device, "DL buf #%u at %u\n", in cs_hsi_data_enable()
902 i, hi->rx_offsets[i]); in cs_hsi_data_enable()
904 for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) { in cs_hsi_data_enable()
905 hi->tx_offsets[i] = data_start + in cs_hsi_data_enable()
906 (i + hi->mmap_cfg->rx_bufs) * hi->slot_size; in cs_hsi_data_enable()
907 hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i]; in cs_hsi_data_enable()
908 dev_dbg(&hi->cl->device, "UL buf #%u at %u\n", in cs_hsi_data_enable()
909 i, hi->rx_offsets[i]); in cs_hsi_data_enable()
912 hi->iface_state = CS_STATE_CONFIGURED; in cs_hsi_data_enable()
918 dev_dbg(&hi->cl->device, in cs_hsi_data_disable()
920 hi->iface_state = CS_STATE_OPENED; in cs_hsi_data_disable()
928 unsigned int old_state = hi->iface_state; in cs_hsi_buf_config()
930 spin_lock_bh(&hi->lock); in cs_hsi_buf_config()
933 hi->iface_state = CS_STATE_OPENED; in cs_hsi_buf_config()
934 spin_unlock_bh(&hi->lock); in cs_hsi_buf_config()
937 * make sure that no non-zero data reads are ongoing before in cs_hsi_buf_config()
944 WARN_ON(cs_state_xfer_active(hi->data_state)); in cs_hsi_buf_config()
946 spin_lock_bh(&hi->lock); in cs_hsi_buf_config()
951 hi->buf_size = buf_cfg->buf_size; in cs_hsi_buf_config()
952 hi->mmap_cfg->buf_size = hi->buf_size; in cs_hsi_buf_config()
953 hi->flags = buf_cfg->flags; in cs_hsi_buf_config()
955 hi->rx_slot = 0; in cs_hsi_buf_config()
956 hi->tx_slot = 0; in cs_hsi_buf_config()
957 hi->slot_size = 0; in cs_hsi_buf_config()
959 if (hi->buf_size) in cs_hsi_buf_config()
964 spin_unlock_bh(&hi->lock); in cs_hsi_buf_config()
966 if (old_state != hi->iface_state) { in cs_hsi_buf_config()
967 if (hi->iface_state == CS_STATE_CONFIGURED) { in cs_hsi_buf_config()
968 cpu_latency_qos_add_request(&hi->pm_qos_req, in cs_hsi_buf_config()
974 cpu_latency_qos_remove_request(&hi->pm_qos_req); in cs_hsi_buf_config()
980 spin_unlock_bh(&hi->lock); in cs_hsi_buf_config()
990 dev_dbg(&cl->device, "cs_hsi_start\n"); in cs_hsi_start()
993 err = -ENOMEM; in cs_hsi_start()
996 spin_lock_init(&hsi_if->lock); in cs_hsi_start()
997 hsi_if->cl = cl; in cs_hsi_start()
998 hsi_if->iface_state = CS_STATE_CLOSED; in cs_hsi_start()
999 hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base; in cs_hsi_start()
1000 hsi_if->mmap_base = mmap_base; in cs_hsi_start()
1001 hsi_if->mmap_size = mmap_size; in cs_hsi_start()
1002 memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg)); in cs_hsi_start()
1003 init_waitqueue_head(&hsi_if->datawait); in cs_hsi_start()
1006 dev_err(&cl->device, "Unable to alloc HSI messages\n"); in cs_hsi_start()
1011 dev_err(&cl->device, "Unable to alloc HSI messages for data\n"); in cs_hsi_start()
1016 dev_err(&cl->device, in cs_hsi_start()
1020 hsi_if->master = ssip_slave_get_master(cl); in cs_hsi_start()
1021 if (IS_ERR(hsi_if->master)) { in cs_hsi_start()
1022 err = PTR_ERR(hsi_if->master); in cs_hsi_start()
1023 dev_err(&cl->device, "Could not get HSI master client\n"); in cs_hsi_start()
1026 if (!ssip_slave_running(hsi_if->master)) { in cs_hsi_start()
1027 err = -ENODEV; in cs_hsi_start()
1028 dev_err(&cl->device, in cs_hsi_start()
1033 hsi_if->iface_state = CS_STATE_OPENED; in cs_hsi_start()
1038 dev_dbg(&cl->device, "cs_hsi_start...done\n"); in cs_hsi_start()
1054 dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n"); in cs_hsi_start()
1061 dev_dbg(&hi->cl->device, "cs_hsi_stop\n"); in cs_hsi_stop()
1063 ssip_slave_put_master(hi->master); in cs_hsi_stop()
1066 hi->iface_state = CS_STATE_CLOSED; in cs_hsi_stop()
1067 hsi_release_port(hi->cl); in cs_hsi_stop()
1070 * hsi_release_port() should flush out all the pending in cs_hsi_stop()
1074 WARN_ON(!cs_state_idle(hi->control_state)); in cs_hsi_stop()
1075 WARN_ON(!cs_state_idle(hi->data_state)); in cs_hsi_stop()
1077 if (cpu_latency_qos_request_active(&hi->pm_qos_req)) in cs_hsi_stop()
1078 cpu_latency_qos_remove_request(&hi->pm_qos_req); in cs_hsi_stop()
1080 spin_lock_bh(&hi->lock); in cs_hsi_stop()
1083 spin_unlock_bh(&hi->lock); in cs_hsi_stop()
1089 struct cs_char *csdata = vmf->vma->vm_private_data; in cs_char_vma_fault()
1092 page = virt_to_page((void *)csdata->mmap_base); in cs_char_vma_fault()
1094 vmf->page = page; in cs_char_vma_fault()
1105 struct cs_char *csdata = file->private_data; in cs_char_fasync()
1107 if (fasync_helper(fd, file, on, &csdata->async_queue) < 0) in cs_char_fasync()
1108 return -EIO; in cs_char_fasync()
1115 struct cs_char *csdata = file->private_data; in cs_char_poll()
1119 spin_lock_bh(&csdata->lock); in cs_char_poll()
1120 if (!list_empty(&csdata->chardev_queue)) in cs_char_poll()
1122 else if (!list_empty(&csdata->dataind_queue)) in cs_char_poll()
1124 spin_unlock_bh(&csdata->lock); in cs_char_poll()
1132 struct cs_char *csdata = file->private_data; in cs_char_read()
1137 return -EINVAL; in cs_char_read()
1142 spin_lock_bh(&csdata->lock); in cs_char_read()
1143 if (!list_empty(&csdata->chardev_queue)) { in cs_char_read()
1144 data = cs_pop_entry(&csdata->chardev_queue); in cs_char_read()
1145 } else if (!list_empty(&csdata->dataind_queue)) { in cs_char_read()
1146 data = cs_pop_entry(&csdata->dataind_queue); in cs_char_read()
1147 csdata->dataind_pending--; in cs_char_read()
1151 spin_unlock_bh(&csdata->lock); in cs_char_read()
1155 if (file->f_flags & O_NONBLOCK) { in cs_char_read()
1156 retval = -EAGAIN; in cs_char_read()
1159 retval = -ERESTARTSYS; in cs_char_read()
1162 prepare_to_wait_exclusive(&csdata->wait, &wait, in cs_char_read()
1165 finish_wait(&csdata->wait, &wait); in cs_char_read()
1179 struct cs_char *csdata = file->private_data; in cs_char_write()
1185 return -EINVAL; in cs_char_write()
1188 retval = -EFAULT; in cs_char_write()
1192 err = cs_hsi_command(csdata->hi, data); in cs_char_write()
1202 struct cs_char *csdata = file->private_data; in cs_char_ioctl()
1209 state = cs_hsi_get_state(csdata->hi); in cs_char_ioctl()
1211 r = -EFAULT; in cs_char_ioctl()
1219 r = -EFAULT; in cs_char_ioctl()
1224 r = -EINVAL; in cs_char_ioctl()
1228 cs_hsi_set_wakeline(csdata->hi, !!state); in cs_char_ioctl()
1236 r = -EFAULT; in cs_char_ioctl()
1245 r = -EFAULT; in cs_char_ioctl()
1247 r = cs_hsi_buf_config(csdata->hi, &buf_cfg); in cs_char_ioctl()
1252 r = -ENOTTY; in cs_char_ioctl()
1261 if (vma->vm_end < vma->vm_start) in cs_char_mmap()
1262 return -EINVAL; in cs_char_mmap()
1265 return -EINVAL; in cs_char_mmap()
1268 vma->vm_ops = &cs_char_vm_ops; in cs_char_mmap()
1269 vma->vm_private_data = file->private_data; in cs_char_mmap()
1281 ret = -EBUSY; in cs_char_open()
1291 ret = -ENOMEM; in cs_char_open()
1297 dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n"); in cs_char_open()
1305 file->private_data = &cs_char_data; in cs_char_open()
1327 list_del(&entry->list); in cs_free_char_queue()
1336 struct cs_char *csdata = file->private_data; in cs_char_release()
1338 cs_hsi_stop(csdata->hi); in cs_char_release()
1339 spin_lock_bh(&csdata->lock); in cs_char_release()
1340 csdata->hi = NULL; in cs_char_release()
1341 free_page(csdata->mmap_base); in cs_char_release()
1342 cs_free_char_queue(&csdata->chardev_queue); in cs_char_release()
1343 cs_free_char_queue(&csdata->dataind_queue); in cs_char_release()
1344 csdata->opened = 0; in cs_char_release()
1345 spin_unlock_bh(&csdata->lock); in cs_char_release()
1383 "speech-control"); in cs_hsi_client_probe()
1391 "speech-data"); in cs_hsi_client_probe()
1423 .name = "cmt-speech",
1444 MODULE_ALIAS("hsi:cmt-speech");