Lines Matching +full:tx +full:- +full:mailbox +full:- +full:count
1 // SPDX-License-Identifier: GPL-2.0-only
11 * SoC specific ring manager driver is implemented as a mailbox controller
19 * hardware devices for achieving high through-put.
22 * except submitting request to SBA hardware device via mailbox channels.
24 * mailbox channel provided by Broadcom SoC specific ring manager driver.
32 #include <linux/dma-mapping.h>
36 #include <linux/mailbox/brcm-message.h>
85 #define to_sba_request(tx) \ argument
86 container_of(tx, struct sba_request, tx)
113 struct dma_async_tx_descriptor tx; member
138 /* Mailbox client and Mailbox channels */
201 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_alloc_request()
202 list_for_each_entry(req, &sba->reqs_free_list, node) { in sba_alloc_request()
203 if (async_tx_test_ack(&req->tx)) { in sba_alloc_request()
204 list_move_tail(&req->node, &sba->reqs_alloc_list); in sba_alloc_request()
209 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_alloc_request()
214 * mailbox channels hoping few active requests in sba_alloc_request()
218 mbox_client_peek_data(sba->mchan); in sba_alloc_request()
222 req->flags = SBA_REQUEST_STATE_ALLOCED; in sba_alloc_request()
223 req->first = req; in sba_alloc_request()
224 INIT_LIST_HEAD(&req->next); in sba_alloc_request()
225 atomic_set(&req->next_pending_count, 1); in sba_alloc_request()
227 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_alloc_request()
228 async_tx_ack(&req->tx); in sba_alloc_request()
233 /* Note: Must be called with sba->reqs_lock held */
237 lockdep_assert_held(&sba->reqs_lock); in _sba_pending_request()
238 req->flags &= ~SBA_REQUEST_STATE_MASK; in _sba_pending_request()
239 req->flags |= SBA_REQUEST_STATE_PENDING; in _sba_pending_request()
240 list_move_tail(&req->node, &sba->reqs_pending_list); in _sba_pending_request()
241 if (list_empty(&sba->reqs_active_list)) in _sba_pending_request()
242 sba->reqs_fence = false; in _sba_pending_request()
245 /* Note: Must be called with sba->reqs_lock held */
249 lockdep_assert_held(&sba->reqs_lock); in _sba_active_request()
250 if (list_empty(&sba->reqs_active_list)) in _sba_active_request()
251 sba->reqs_fence = false; in _sba_active_request()
252 if (sba->reqs_fence) in _sba_active_request()
254 req->flags &= ~SBA_REQUEST_STATE_MASK; in _sba_active_request()
255 req->flags |= SBA_REQUEST_STATE_ACTIVE; in _sba_active_request()
256 list_move_tail(&req->node, &sba->reqs_active_list); in _sba_active_request()
257 if (req->flags & SBA_REQUEST_FENCE) in _sba_active_request()
258 sba->reqs_fence = true; in _sba_active_request()
262 /* Note: Must be called with sba->reqs_lock held */
266 lockdep_assert_held(&sba->reqs_lock); in _sba_abort_request()
267 req->flags &= ~SBA_REQUEST_STATE_MASK; in _sba_abort_request()
268 req->flags |= SBA_REQUEST_STATE_ABORTED; in _sba_abort_request()
269 list_move_tail(&req->node, &sba->reqs_aborted_list); in _sba_abort_request()
270 if (list_empty(&sba->reqs_active_list)) in _sba_abort_request()
271 sba->reqs_fence = false; in _sba_abort_request()
274 /* Note: Must be called with sba->reqs_lock held */
278 lockdep_assert_held(&sba->reqs_lock); in _sba_free_request()
279 req->flags &= ~SBA_REQUEST_STATE_MASK; in _sba_free_request()
280 req->flags |= SBA_REQUEST_STATE_FREE; in _sba_free_request()
281 list_move_tail(&req->node, &sba->reqs_free_list); in _sba_free_request()
282 if (list_empty(&sba->reqs_active_list)) in _sba_free_request()
283 sba->reqs_fence = false; in _sba_free_request()
290 struct sba_device *sba = req->sba; in sba_free_chained_requests()
292 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_free_chained_requests()
295 list_for_each_entry(nreq, &req->next, next) in sba_free_chained_requests()
298 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_free_chained_requests()
305 struct sba_device *sba = req->sba; in sba_chain_request()
307 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_chain_request()
309 list_add_tail(&req->next, &first->next); in sba_chain_request()
310 req->first = first; in sba_chain_request()
311 atomic_inc(&first->next_pending_count); in sba_chain_request()
313 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_chain_request()
321 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
324 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) in sba_cleanup_nonpending_requests()
328 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) in sba_cleanup_nonpending_requests()
336 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_nonpending_requests()
344 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
347 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) in sba_cleanup_pending_requests()
350 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_cleanup_pending_requests()
359 req->msg.error = 0; in sba_send_mbox_request()
360 ret = mbox_send_message(sba->mchan, &req->msg); in sba_send_mbox_request()
362 dev_err(sba->dev, "send message failed with error %d", ret); in sba_send_mbox_request()
366 /* Check error returned by mailbox controller */ in sba_send_mbox_request()
367 ret = req->msg.error; in sba_send_mbox_request()
369 dev_err(sba->dev, "message error %d", ret); in sba_send_mbox_request()
372 /* Signal txdone for mailbox channel */ in sba_send_mbox_request()
373 mbox_client_txdone(sba->mchan, ret); in sba_send_mbox_request()
378 /* Note: Must be called with sba->reqs_lock held */
382 u32 count; in _sba_process_pending_requests() local
386 count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL; in _sba_process_pending_requests()
387 while (!list_empty(&sba->reqs_pending_list) && count) { in _sba_process_pending_requests()
389 req = list_first_entry(&sba->reqs_pending_list, in _sba_process_pending_requests()
396 /* Send request to mailbox channel */ in _sba_process_pending_requests()
403 count--; in _sba_process_pending_requests()
411 struct dma_async_tx_descriptor *tx; in sba_process_received_request() local
412 struct sba_request *nreq, *first = req->first; in sba_process_received_request()
415 if (!atomic_dec_return(&first->next_pending_count)) { in sba_process_received_request()
416 tx = &first->tx; in sba_process_received_request()
418 WARN_ON(tx->cookie < 0); in sba_process_received_request()
419 if (tx->cookie > 0) { in sba_process_received_request()
420 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
421 dma_cookie_complete(tx); in sba_process_received_request()
422 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
423 dmaengine_desc_get_callback_invoke(tx, NULL); in sba_process_received_request()
424 dma_descriptor_unmap(tx); in sba_process_received_request()
425 tx->callback = NULL; in sba_process_received_request()
426 tx->callback_result = NULL; in sba_process_received_request()
429 dma_run_dependencies(tx); in sba_process_received_request()
431 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_process_received_request()
434 list_for_each_entry(nreq, &first->next, next) in sba_process_received_request()
436 INIT_LIST_HEAD(&first->next); in sba_process_received_request()
444 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_process_received_request()
456 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
458 list_for_each_entry(req, &sba->reqs_free_list, node) in sba_write_stats_in_seqfile()
459 if (async_tx_test_ack(&req->tx)) in sba_write_stats_in_seqfile()
462 list_for_each_entry(req, &sba->reqs_alloc_list, node) in sba_write_stats_in_seqfile()
465 list_for_each_entry(req, &sba->reqs_pending_list, node) in sba_write_stats_in_seqfile()
468 list_for_each_entry(req, &sba->reqs_active_list, node) in sba_write_stats_in_seqfile()
471 list_for_each_entry(req, &sba->reqs_aborted_list, node) in sba_write_stats_in_seqfile()
474 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_write_stats_in_seqfile()
476 seq_printf(file, "maximum requests = %d\n", sba->max_req); in sba_write_stats_in_seqfile()
489 * Channel resources are pre-alloced so we just free-up in sba_free_chan_resources()
490 * whatever we can so that we can re-use pre-alloced in sba_free_chan_resources()
510 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_issue_pending()
512 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_issue_pending()
515 static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx) in sba_tx_submit() argument
522 if (unlikely(!tx)) in sba_tx_submit()
523 return -EINVAL; in sba_tx_submit()
525 sba = to_sba_device(tx->chan); in sba_tx_submit()
526 req = to_sba_request(tx); in sba_tx_submit()
529 spin_lock_irqsave(&sba->reqs_lock, flags); in sba_tx_submit()
530 cookie = dma_cookie_assign(tx); in sba_tx_submit()
532 list_for_each_entry(nreq, &req->next, next) in sba_tx_submit()
534 spin_unlock_irqrestore(&sba->reqs_lock, flags); in sba_tx_submit()
550 mbox_client_peek_data(sba->mchan); in sba_tx_status()
561 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_interrupt_msg()
564 /* Type-B command to load dummy data into buf0 */ in sba_fillup_interrupt_msg()
567 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
574 cmdsp->cmd = cmd; in sba_fillup_interrupt_msg()
575 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_interrupt_msg()
576 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_interrupt_msg()
577 cmdsp->data = resp_dma; in sba_fillup_interrupt_msg()
578 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
581 /* Type-A command to write buf0 to dummy location */ in sba_fillup_interrupt_msg()
584 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, in sba_fillup_interrupt_msg()
593 cmdsp->cmd = cmd; in sba_fillup_interrupt_msg()
594 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_interrupt_msg()
595 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_interrupt_msg()
596 if (req->sba->hw_resp_size) { in sba_fillup_interrupt_msg()
597 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_interrupt_msg()
598 cmdsp->resp = resp_dma; in sba_fillup_interrupt_msg()
599 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
601 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_interrupt_msg()
602 cmdsp->data = resp_dma; in sba_fillup_interrupt_msg()
603 cmdsp->data_len = req->sba->hw_resp_size; in sba_fillup_interrupt_msg()
607 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_interrupt_msg()
608 msg->sba.cmds = cmds; in sba_fillup_interrupt_msg()
609 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_interrupt_msg()
610 msg->ctx = req; in sba_fillup_interrupt_msg()
611 msg->error = 0; in sba_fillup_interrupt_msg()
629 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_interrupt()
632 sba_fillup_interrupt_msg(req, req->cmds, &req->msg); in sba_prep_dma_interrupt()
635 req->tx.flags = flags; in sba_prep_dma_interrupt()
636 req->tx.cookie = -EBUSY; in sba_prep_dma_interrupt()
638 return &req->tx; in sba_prep_dma_interrupt()
649 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_memcpy_msg()
652 /* Type-B command to load data into buf0 */ in sba_fillup_memcpy_msg()
662 cmdsp->cmd = cmd; in sba_fillup_memcpy_msg()
663 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_memcpy_msg()
664 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_memcpy_msg()
665 cmdsp->data = src + msg_offset; in sba_fillup_memcpy_msg()
666 cmdsp->data_len = msg_len; in sba_fillup_memcpy_msg()
669 /* Type-A command to write buf0 */ in sba_fillup_memcpy_msg()
681 cmdsp->cmd = cmd; in sba_fillup_memcpy_msg()
682 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_memcpy_msg()
683 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_memcpy_msg()
684 if (req->sba->hw_resp_size) { in sba_fillup_memcpy_msg()
685 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_memcpy_msg()
686 cmdsp->resp = resp_dma; in sba_fillup_memcpy_msg()
687 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_memcpy_msg()
689 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_memcpy_msg()
690 cmdsp->data = dst + msg_offset; in sba_fillup_memcpy_msg()
691 cmdsp->data_len = msg_len; in sba_fillup_memcpy_msg()
695 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_memcpy_msg()
696 msg->sba.cmds = cmds; in sba_fillup_memcpy_msg()
697 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_memcpy_msg()
698 msg->ctx = req; in sba_fillup_memcpy_msg()
699 msg->error = 0; in sba_fillup_memcpy_msg()
714 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_memcpy_req()
717 sba_fillup_memcpy_msg(req, req->cmds, &req->msg, in sba_prep_dma_memcpy_req()
721 req->tx.flags = flags; in sba_prep_dma_memcpy_req()
722 req->tx.cookie = -EBUSY; in sba_prep_dma_memcpy_req()
738 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_memcpy()
754 len -= req_len; in sba_prep_dma_memcpy()
757 return (first) ? &first->tx : NULL; in sba_prep_dma_memcpy()
769 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_xor_msg()
772 /* Type-B command to load data into buf0 */ in sba_fillup_xor_msg()
782 cmdsp->cmd = cmd; in sba_fillup_xor_msg()
783 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_xor_msg()
784 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_xor_msg()
785 cmdsp->data = src[0] + msg_offset; in sba_fillup_xor_msg()
786 cmdsp->data_len = msg_len; in sba_fillup_xor_msg()
789 /* Type-B commands to xor data with buf0 and put it back in buf0 */ in sba_fillup_xor_msg()
800 cmdsp->cmd = cmd; in sba_fillup_xor_msg()
801 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_xor_msg()
802 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_xor_msg()
803 cmdsp->data = src[i] + msg_offset; in sba_fillup_xor_msg()
804 cmdsp->data_len = msg_len; in sba_fillup_xor_msg()
808 /* Type-A command to write buf0 */ in sba_fillup_xor_msg()
820 cmdsp->cmd = cmd; in sba_fillup_xor_msg()
821 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_xor_msg()
822 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_xor_msg()
823 if (req->sba->hw_resp_size) { in sba_fillup_xor_msg()
824 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_xor_msg()
825 cmdsp->resp = resp_dma; in sba_fillup_xor_msg()
826 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_xor_msg()
828 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_xor_msg()
829 cmdsp->data = dst + msg_offset; in sba_fillup_xor_msg()
830 cmdsp->data_len = msg_len; in sba_fillup_xor_msg()
834 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_xor_msg()
835 msg->sba.cmds = cmds; in sba_fillup_xor_msg()
836 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_xor_msg()
837 msg->ctx = req; in sba_fillup_xor_msg()
838 msg->error = 0; in sba_fillup_xor_msg()
853 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_xor_req()
856 sba_fillup_xor_msg(req, req->cmds, &req->msg, in sba_prep_dma_xor_req()
860 req->tx.flags = flags; in sba_prep_dma_xor_req()
861 req->tx.cookie = -EBUSY; in sba_prep_dma_xor_req()
876 if (unlikely(src_cnt > sba->max_xor_srcs)) in sba_prep_dma_xor()
881 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_xor()
897 len -= req_len; in sba_prep_dma_xor()
900 return (first) ? &first->tx : NULL; in sba_prep_dma_xor()
914 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_pq_msg()
918 /* Type-B command to load old P into buf0 */ in sba_fillup_pq_msg()
929 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
930 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
931 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_msg()
932 cmdsp->data = *dst_p + msg_offset; in sba_fillup_pq_msg()
933 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
937 /* Type-B command to load old Q into buf1 */ in sba_fillup_pq_msg()
948 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
949 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
950 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_msg()
951 cmdsp->data = *dst_q + msg_offset; in sba_fillup_pq_msg()
952 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
956 /* Type-A command to zero all buffers */ in sba_fillup_pq_msg()
963 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
964 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
965 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_msg()
969 /* Type-B commands for generate P onto buf0 and Q onto buf1 */ in sba_fillup_pq_msg()
982 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
983 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
984 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_msg()
985 cmdsp->data = src[i] + msg_offset; in sba_fillup_pq_msg()
986 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
990 /* Type-A command to write buf0 */ in sba_fillup_pq_msg()
1003 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
1004 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
1005 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_msg()
1006 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1007 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_pq_msg()
1008 cmdsp->resp = resp_dma; in sba_fillup_pq_msg()
1009 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1011 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_pq_msg()
1012 cmdsp->data = *dst_p + msg_offset; in sba_fillup_pq_msg()
1013 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
1017 /* Type-A command to write buf1 */ in sba_fillup_pq_msg()
1030 cmdsp->cmd = cmd; in sba_fillup_pq_msg()
1031 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_msg()
1032 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_msg()
1033 if (req->sba->hw_resp_size) { in sba_fillup_pq_msg()
1034 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_pq_msg()
1035 cmdsp->resp = resp_dma; in sba_fillup_pq_msg()
1036 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_msg()
1038 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_pq_msg()
1039 cmdsp->data = *dst_q + msg_offset; in sba_fillup_pq_msg()
1040 cmdsp->data_len = msg_len; in sba_fillup_pq_msg()
1045 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_pq_msg()
1046 msg->sba.cmds = cmds; in sba_fillup_pq_msg()
1047 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_msg()
1048 msg->ctx = req; in sba_fillup_pq_msg()
1049 msg->error = 0; in sba_fillup_pq_msg()
1064 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_pq_req()
1068 req->cmds, &req->msg, in sba_prep_dma_pq_req()
1072 req->tx.flags = flags; in sba_prep_dma_pq_req()
1073 req->tx.cookie = -EBUSY; in sba_prep_dma_pq_req()
1089 dma_addr_t resp_dma = req->tx.phys; in sba_fillup_pq_single_msg()
1096 /* Type-B command to load old P into buf0 */ in sba_fillup_pq_single_msg()
1106 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1107 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1108 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1109 cmdsp->data = *dst_p + msg_offset; in sba_fillup_pq_single_msg()
1110 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1114 * Type-B commands to xor data with buf0 and put it in sba_fillup_pq_single_msg()
1126 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1127 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1128 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1129 cmdsp->data = src + msg_offset; in sba_fillup_pq_single_msg()
1130 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1133 /* Type-B command to load old P into buf0 */ in sba_fillup_pq_single_msg()
1143 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1144 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1145 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1146 cmdsp->data = src + msg_offset; in sba_fillup_pq_single_msg()
1147 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1151 /* Type-A command to write buf0 */ in sba_fillup_pq_single_msg()
1163 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1164 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1165 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_single_msg()
1166 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1167 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_pq_single_msg()
1168 cmdsp->resp = resp_dma; in sba_fillup_pq_single_msg()
1169 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1171 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_pq_single_msg()
1172 cmdsp->data = *dst_p + msg_offset; in sba_fillup_pq_single_msg()
1173 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1180 /* Type-A command to zero all buffers */ in sba_fillup_pq_single_msg()
1187 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1188 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1189 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_single_msg()
1194 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1195 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1198 * Type-B command to generate initial Q from data in sba_fillup_pq_single_msg()
1212 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1213 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1214 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1215 cmdsp->data = src + msg_offset; in sba_fillup_pq_single_msg()
1216 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1219 dpos -= pos; in sba_fillup_pq_single_msg()
1221 /* Multiple Type-A command to generate final Q */ in sba_fillup_pq_single_msg()
1223 pos = (dpos < req->sba->max_pq_coefs) ? in sba_fillup_pq_single_msg()
1224 dpos : (req->sba->max_pq_coefs - 1); in sba_fillup_pq_single_msg()
1227 * Type-A command to generate Q with buf0 and in sba_fillup_pq_single_msg()
1241 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1242 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1243 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_single_msg()
1246 dpos -= pos; in sba_fillup_pq_single_msg()
1252 * Type-B command to XOR previous output with in sba_fillup_pq_single_msg()
1264 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1265 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1266 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; in sba_fillup_pq_single_msg()
1267 cmdsp->data = *dst_q + msg_offset; in sba_fillup_pq_single_msg()
1268 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1272 /* Type-A command to write buf0 */ in sba_fillup_pq_single_msg()
1284 cmdsp->cmd = cmd; in sba_fillup_pq_single_msg()
1285 *cmdsp->cmd_dma = cpu_to_le64(cmd); in sba_fillup_pq_single_msg()
1286 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; in sba_fillup_pq_single_msg()
1287 if (req->sba->hw_resp_size) { in sba_fillup_pq_single_msg()
1288 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; in sba_fillup_pq_single_msg()
1289 cmdsp->resp = resp_dma; in sba_fillup_pq_single_msg()
1290 cmdsp->resp_len = req->sba->hw_resp_size; in sba_fillup_pq_single_msg()
1292 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; in sba_fillup_pq_single_msg()
1293 cmdsp->data = *dst_q + msg_offset; in sba_fillup_pq_single_msg()
1294 cmdsp->data_len = msg_len; in sba_fillup_pq_single_msg()
1299 msg->type = BRCM_MESSAGE_SBA; in sba_fillup_pq_single_msg()
1300 msg->sba.cmds = cmds; in sba_fillup_pq_single_msg()
1301 msg->sba.cmds_count = cmdsp - cmds; in sba_fillup_pq_single_msg()
1302 msg->ctx = req; in sba_fillup_pq_single_msg()
1303 msg->error = 0; in sba_fillup_pq_single_msg()
1319 req->flags |= SBA_REQUEST_FENCE; in sba_prep_dma_pq_single_req()
1323 req->cmds, &req->msg, off, len, in sba_prep_dma_pq_single_req()
1327 req->tx.flags = flags; in sba_prep_dma_pq_single_req()
1328 req->tx.cookie = -EBUSY; in sba_prep_dma_pq_single_req()
1346 if (unlikely(src_cnt > sba->max_pq_srcs)) in sba_prep_dma_pq()
1349 if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) in sba_prep_dma_pq()
1352 /* Figure-out P and Q destination addresses */ in sba_prep_dma_pq()
1360 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; in sba_prep_dma_pq()
1421 len -= req_len; in sba_prep_dma_pq()
1424 return (first) ? &first->tx : NULL; in sba_prep_dma_pq()
1432 /* ====== Mailbox callbacks ===== */
1437 struct sba_request *req = m->ctx; in sba_receive_message()
1438 struct sba_device *sba = req->sba; in sba_receive_message()
1440 /* Error count if message has error */ in sba_receive_message()
1441 if (m->error < 0) in sba_receive_message()
1442 dev_err(sba->dev, "%s got message with error %d", in sba_receive_message()
1443 dma_chan_name(&sba->dma_chan), m->error); in sba_receive_message()
1453 struct sba_device *sba = dev_get_drvdata(file->private); in sba_debugfs_stats_show()
1468 sba->resp_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1469 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1470 &sba->resp_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1471 if (!sba->resp_base) in sba_prealloc_channel_resources()
1472 return -ENOMEM; in sba_prealloc_channel_resources()
1474 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1475 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1476 &sba->cmds_dma_base, GFP_KERNEL); in sba_prealloc_channel_resources()
1477 if (!sba->cmds_base) { in sba_prealloc_channel_resources()
1478 ret = -ENOMEM; in sba_prealloc_channel_resources()
1482 spin_lock_init(&sba->reqs_lock); in sba_prealloc_channel_resources()
1483 sba->reqs_fence = false; in sba_prealloc_channel_resources()
1484 INIT_LIST_HEAD(&sba->reqs_alloc_list); in sba_prealloc_channel_resources()
1485 INIT_LIST_HEAD(&sba->reqs_pending_list); in sba_prealloc_channel_resources()
1486 INIT_LIST_HEAD(&sba->reqs_active_list); in sba_prealloc_channel_resources()
1487 INIT_LIST_HEAD(&sba->reqs_aborted_list); in sba_prealloc_channel_resources()
1488 INIT_LIST_HEAD(&sba->reqs_free_list); in sba_prealloc_channel_resources()
1490 for (i = 0; i < sba->max_req; i++) { in sba_prealloc_channel_resources()
1491 req = devm_kzalloc(sba->dev, in sba_prealloc_channel_resources()
1492 struct_size(req, cmds, sba->max_cmd_per_req), in sba_prealloc_channel_resources()
1495 ret = -ENOMEM; in sba_prealloc_channel_resources()
1498 INIT_LIST_HEAD(&req->node); in sba_prealloc_channel_resources()
1499 req->sba = sba; in sba_prealloc_channel_resources()
1500 req->flags = SBA_REQUEST_STATE_FREE; in sba_prealloc_channel_resources()
1501 INIT_LIST_HEAD(&req->next); in sba_prealloc_channel_resources()
1502 atomic_set(&req->next_pending_count, 0); in sba_prealloc_channel_resources()
1503 for (j = 0; j < sba->max_cmd_per_req; j++) { in sba_prealloc_channel_resources()
1504 req->cmds[j].cmd = 0; in sba_prealloc_channel_resources()
1505 req->cmds[j].cmd_dma = sba->cmds_base + in sba_prealloc_channel_resources()
1506 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1507 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + in sba_prealloc_channel_resources()
1508 (i * sba->max_cmd_per_req + j) * sizeof(u64); in sba_prealloc_channel_resources()
1509 req->cmds[j].flags = 0; in sba_prealloc_channel_resources()
1511 memset(&req->msg, 0, sizeof(req->msg)); in sba_prealloc_channel_resources()
1512 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); in sba_prealloc_channel_resources()
1513 async_tx_ack(&req->tx); in sba_prealloc_channel_resources()
1514 req->tx.tx_submit = sba_tx_submit; in sba_prealloc_channel_resources()
1515 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; in sba_prealloc_channel_resources()
1516 list_add_tail(&req->node, &sba->reqs_free_list); in sba_prealloc_channel_resources()
1522 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1523 sba->max_cmds_pool_size, in sba_prealloc_channel_resources()
1524 sba->cmds_base, sba->cmds_dma_base); in sba_prealloc_channel_resources()
1526 dma_free_coherent(sba->mbox_dev, in sba_prealloc_channel_resources()
1527 sba->max_resp_pool_size, in sba_prealloc_channel_resources()
1528 sba->resp_base, sba->resp_dma_base); in sba_prealloc_channel_resources()
1534 dmaengine_terminate_all(&sba->dma_chan); in sba_freeup_channel_resources()
1535 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, in sba_freeup_channel_resources()
1536 sba->cmds_base, sba->cmds_dma_base); in sba_freeup_channel_resources()
1537 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, in sba_freeup_channel_resources()
1538 sba->resp_base, sba->resp_dma_base); in sba_freeup_channel_resources()
1539 sba->resp_base = NULL; in sba_freeup_channel_resources()
1540 sba->resp_dma_base = 0; in sba_freeup_channel_resources()
1546 struct dma_device *dma_dev = &sba->dma_dev; in sba_async_register()
1549 sba->dma_chan.device = dma_dev; in sba_async_register()
1550 dma_cookie_init(&sba->dma_chan); in sba_async_register()
1553 dma_cap_zero(dma_dev->cap_mask); in sba_async_register()
1554 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); in sba_async_register()
1555 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); in sba_async_register()
1556 dma_cap_set(DMA_XOR, dma_dev->cap_mask); in sba_async_register()
1557 dma_cap_set(DMA_PQ, dma_dev->cap_mask); in sba_async_register()
1560 * Set mailbox channel device as the base device of in sba_async_register()
1562 * will be done by mailbox controller in sba_async_register()
1564 dma_dev->dev = sba->mbox_dev; in sba_async_register()
1567 dma_dev->device_free_chan_resources = sba_free_chan_resources; in sba_async_register()
1568 dma_dev->device_terminate_all = sba_device_terminate_all; in sba_async_register()
1569 dma_dev->device_issue_pending = sba_issue_pending; in sba_async_register()
1570 dma_dev->device_tx_status = sba_tx_status; in sba_async_register()
1573 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) in sba_async_register()
1574 dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt; in sba_async_register()
1577 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) in sba_async_register()
1578 dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy; in sba_async_register()
1581 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { in sba_async_register()
1582 dma_dev->device_prep_dma_xor = sba_prep_dma_xor; in sba_async_register()
1583 dma_dev->max_xor = sba->max_xor_srcs; in sba_async_register()
1587 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { in sba_async_register()
1588 dma_dev->device_prep_dma_pq = sba_prep_dma_pq; in sba_async_register()
1589 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); in sba_async_register()
1593 INIT_LIST_HEAD(&dma_dev->channels); in sba_async_register()
1594 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); in sba_async_register()
1599 dev_err(sba->dev, "async device register error %d", ret); in sba_async_register()
1603 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", in sba_async_register()
1604 dma_chan_name(&sba->dma_chan), in sba_async_register()
1605 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "", in sba_async_register()
1606 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "", in sba_async_register()
1607 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", in sba_async_register()
1608 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : ""); in sba_async_register()
1621 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); in sba_probe()
1623 return -ENOMEM; in sba_probe()
1625 sba->dev = &pdev->dev; in sba_probe()
1628 /* Number of mailbox channels should be atleast 1 */ in sba_probe()
1629 ret = of_count_phandle_with_args(pdev->dev.of_node, in sba_probe()
1630 "mboxes", "#mbox-cells"); in sba_probe()
1632 return -ENODEV; in sba_probe()
1635 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) in sba_probe()
1636 sba->ver = SBA_VER_1; in sba_probe()
1637 else if (of_device_is_compatible(sba->dev->of_node, in sba_probe()
1638 "brcm,iproc-sba-v2")) in sba_probe()
1639 sba->ver = SBA_VER_2; in sba_probe()
1641 return -ENODEV; in sba_probe()
1644 switch (sba->ver) { in sba_probe()
1646 sba->hw_buf_size = 4096; in sba_probe()
1647 sba->hw_resp_size = 8; in sba_probe()
1648 sba->max_pq_coefs = 6; in sba_probe()
1649 sba->max_pq_srcs = 6; in sba_probe()
1652 sba->hw_buf_size = 4096; in sba_probe()
1653 sba->hw_resp_size = 8; in sba_probe()
1654 sba->max_pq_coefs = 30; in sba_probe()
1660 sba->max_pq_srcs = 12; in sba_probe()
1663 return -EINVAL; in sba_probe()
1665 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; in sba_probe()
1666 sba->max_cmd_per_req = sba->max_pq_srcs + 3; in sba_probe()
1667 sba->max_xor_srcs = sba->max_cmd_per_req - 1; in sba_probe()
1668 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; in sba_probe()
1669 sba->max_cmds_pool_size = sba->max_req * in sba_probe()
1670 sba->max_cmd_per_req * sizeof(u64); in sba_probe()
1672 /* Setup mailbox client */ in sba_probe()
1673 sba->client.dev = &pdev->dev; in sba_probe()
1674 sba->client.rx_callback = sba_receive_message; in sba_probe()
1675 sba->client.tx_block = false; in sba_probe()
1676 sba->client.knows_txdone = true; in sba_probe()
1677 sba->client.tx_tout = 0; in sba_probe()
1679 /* Request mailbox channel */ in sba_probe()
1680 sba->mchan = mbox_request_channel(&sba->client, 0); in sba_probe()
1681 if (IS_ERR(sba->mchan)) { in sba_probe()
1682 ret = PTR_ERR(sba->mchan); in sba_probe()
1686 /* Find-out underlying mailbox device */ in sba_probe()
1687 ret = of_parse_phandle_with_args(pdev->dev.of_node, in sba_probe()
1688 "mboxes", "#mbox-cells", 0, &args); in sba_probe()
1694 ret = -ENODEV; in sba_probe()
1697 sba->mbox_dev = &mbox_pdev->dev; in sba_probe()
1709 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); in sba_probe()
1712 debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, in sba_probe()
1723 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", in sba_probe()
1724 dma_chan_name(&sba->dma_chan), sba->ver+1, in sba_probe()
1725 dev_name(sba->mbox_dev)); in sba_probe()
1730 debugfs_remove_recursive(sba->root); in sba_probe()
1733 mbox_free_channel(sba->mchan); in sba_probe()
1741 dma_async_device_unregister(&sba->dma_dev); in sba_remove()
1743 debugfs_remove_recursive(sba->root); in sba_remove()
1747 mbox_free_channel(sba->mchan); in sba_remove()
1751 { .compatible = "brcm,iproc-sba", },
1752 { .compatible = "brcm,iproc-sba-v2", },
1761 .name = "bcm-sba-raid",