Lines Matching +full:dma +full:- +full:poll +full:- +full:cnt

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2018-2020 Broadcom.
12 #include <linux/poll.h>
46 u32 q_num = msg->trans_id & BCM_VK_MSG_Q_MASK; in get_q_num()
62 msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | trans_q; in set_q_num()
67 return ((msg->trans_id >> BCM_VK_MSG_Q_SHIFT) & BCM_VK_MSG_ID_MASK); in get_msg_id()
72 msg->trans_id = (val << BCM_VK_MSG_Q_SHIFT) | get_q_num(msg); in set_msg_id()
77 return ((idx + inc) & qinfo->q_mask); in msgq_inc()
84 return qinfo->q_start + (VK_MSGQ_BLK_SIZE * idx); in msgq_blk_addr()
92 wr_idx = readl_relaxed(&msgq->wr_idx); in msgq_occupied()
93 rd_idx = readl_relaxed(&msgq->rd_idx); in msgq_occupied()
95 return ((wr_idx - rd_idx) & qinfo->q_mask); in msgq_occupied()
102 return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1); in msgq_avail_space()
111 return (!!atomic_read(&vk->msgq_inited)); in bcm_vk_drv_access_ok()
116 struct bcm_vk_alert *alert = &vk->host_alert; in bcm_vk_set_host_alert()
120 spin_lock_irqsave(&vk->host_alert_lock, flags); in bcm_vk_set_host_alert()
121 alert->notfs |= bit_mask; in bcm_vk_set_host_alert()
122 spin_unlock_irqrestore(&vk->host_alert_lock, flags); in bcm_vk_set_host_alert()
124 if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0) in bcm_vk_set_host_alert()
125 queue_work(vk->wq_thread, &vk->wq_work); in bcm_vk_set_host_alert()
151 if (uptime_s == hb->last_uptime) in bcm_vk_hb_poll()
152 hb->lost_cnt++; in bcm_vk_hb_poll()
154 hb->lost_cnt = 0; in bcm_vk_hb_poll()
156 dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n", in bcm_vk_hb_poll()
157 hb->last_uptime, uptime_s, hb->lost_cnt); in bcm_vk_hb_poll()
164 hb->last_uptime = uptime_s; in bcm_vk_hb_poll()
166 /* reset heart beat lost cnt */ in bcm_vk_hb_poll()
167 hb->lost_cnt = 0; in bcm_vk_hb_poll()
171 if (hb->lost_cnt > BCM_VK_HB_LOST_MAX) { in bcm_vk_hb_poll()
172 dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n", in bcm_vk_hb_poll()
179 /* re-arm timer */ in bcm_vk_hb_poll()
180 schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE); in bcm_vk_hb_poll()
185 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl; in bcm_vk_hb_init()
187 INIT_DELAYED_WORK(&hb->work, bcm_vk_hb_poll); in bcm_vk_hb_init()
188 schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE); in bcm_vk_hb_init()
193 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl; in bcm_vk_hb_deinit()
195 cancel_delayed_work_sync(&hb->work); in bcm_vk_hb_deinit()
202 spin_lock(&vk->msg_id_lock); in bcm_vk_msgid_bitmap_clear()
203 bitmap_clear(vk->bmap, start, nbits); in bcm_vk_msgid_bitmap_clear()
204 spin_unlock(&vk->msg_id_lock); in bcm_vk_msgid_bitmap_clear()
216 spin_lock(&vk->ctx_lock); in bcm_vk_get_ctx()
219 if (vk->reset_pid) { in bcm_vk_get_ctx()
220 dev_err(&vk->pdev->dev, in bcm_vk_get_ctx()
222 vk->reset_pid); in bcm_vk_get_ctx()
227 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) { in bcm_vk_get_ctx()
228 if (!vk->ctx[i].in_use) { in bcm_vk_get_ctx()
229 vk->ctx[i].in_use = true; in bcm_vk_get_ctx()
230 ctx = &vk->ctx[i]; in bcm_vk_get_ctx()
236 dev_err(&vk->pdev->dev, "All context in use\n"); in bcm_vk_get_ctx()
242 ctx->pid = pid; in bcm_vk_get_ctx()
243 ctx->hash_idx = hash_idx; in bcm_vk_get_ctx()
244 list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head); in bcm_vk_get_ctx()
247 kref_get(&vk->kref); in bcm_vk_get_ctx()
250 atomic_set(&ctx->pend_cnt, 0); in bcm_vk_get_ctx()
251 atomic_set(&ctx->dma_cnt, 0); in bcm_vk_get_ctx()
252 init_waitqueue_head(&ctx->rd_wq); in bcm_vk_get_ctx()
256 spin_unlock(&vk->ctx_lock); in bcm_vk_get_ctx()
266 spin_lock(&vk->msg_id_lock); in bcm_vk_get_msg_id()
267 while (test_bit_count < (VK_MSG_ID_BITMAP_SIZE - 1)) { in bcm_vk_get_msg_id()
274 vk->msg_id++; in bcm_vk_get_msg_id()
275 if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE) in bcm_vk_get_msg_id()
276 vk->msg_id = 1; in bcm_vk_get_msg_id()
278 if (test_bit(vk->msg_id, vk->bmap)) { in bcm_vk_get_msg_id()
282 rc = vk->msg_id; in bcm_vk_get_msg_id()
283 bitmap_set(vk->bmap, vk->msg_id, 1); in bcm_vk_get_msg_id()
286 spin_unlock(&vk->msg_id_lock); in bcm_vk_get_msg_id()
300 dev_err(&vk->pdev->dev, "NULL context detected\n"); in bcm_vk_free_ctx()
301 return -EINVAL; in bcm_vk_free_ctx()
303 idx = ctx->idx; in bcm_vk_free_ctx()
304 pid = ctx->pid; in bcm_vk_free_ctx()
306 spin_lock(&vk->ctx_lock); in bcm_vk_free_ctx()
308 if (!vk->ctx[idx].in_use) { in bcm_vk_free_ctx()
309 dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx); in bcm_vk_free_ctx()
311 vk->ctx[idx].in_use = false; in bcm_vk_free_ctx()
312 vk->ctx[idx].miscdev = NULL; in bcm_vk_free_ctx()
315 list_del(&ctx->node); in bcm_vk_free_ctx()
316 hash_idx = ctx->hash_idx; in bcm_vk_free_ctx()
317 list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) { in bcm_vk_free_ctx()
318 if (entry->pid == pid) in bcm_vk_free_ctx()
323 spin_unlock(&vk->ctx_lock); in bcm_vk_free_ctx()
332 bcm_vk_sg_free(dev, entry->dma, VK_DMA_MAX_ADDRS, &proc_cnt); in bcm_vk_free_wkent()
334 atomic_dec(&entry->ctx->dma_cnt); in bcm_vk_free_wkent()
336 kfree(entry->to_h_msg); in bcm_vk_free_wkent()
350 vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); in bcm_vk_drain_all_pend()
353 spin_lock(&chan->pendq_lock); in bcm_vk_drain_all_pend()
354 for (num = 0; num < chan->q_nr; num++) { in bcm_vk_drain_all_pend()
355 list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) { in bcm_vk_drain_all_pend()
356 if ((!ctx) || (entry->ctx->idx == ctx->idx)) { in bcm_vk_drain_all_pend()
357 list_move_tail(&entry->node, &del_q); in bcm_vk_drain_all_pend()
361 spin_unlock(&chan->pendq_lock); in bcm_vk_drain_all_pend()
366 list_del(&entry->node); in bcm_vk_drain_all_pend()
375 msg = entry->to_v_msg; in bcm_vk_drain_all_pend()
377 bit_set = test_bit(msg_id, vk->bmap); in bcm_vk_drain_all_pend()
378 responded = entry->to_h_msg ? true : false; in bcm_vk_drain_all_pend()
381 … "Drained: fid %u size %u msg 0x%x(seq-%x) ctx 0x%x[fd-%d] args:[0x%x 0x%x] resp %s, bmap %d\n", in bcm_vk_drain_all_pend()
382 msg->function_id, msg->size, in bcm_vk_drain_all_pend()
383 msg_id, entry->seq_num, in bcm_vk_drain_all_pend()
384 msg->context_id, entry->ctx->idx, in bcm_vk_drain_all_pend()
385 msg->cmd, msg->arg, in bcm_vk_drain_all_pend()
388 atomic_dec(&ctx->pend_cnt); in bcm_vk_drain_all_pend()
395 dev_info(dev, "Total drained items %d [fd-%d]\n", in bcm_vk_drain_all_pend()
396 num, ctx->idx); in bcm_vk_drain_all_pend()
401 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL); in bcm_vk_drain_msg_on_reset()
402 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL); in bcm_vk_drain_msg_on_reset()
411 struct device *dev = &vk->pdev->dev; in bcm_vk_sync_msgq()
414 struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan, in bcm_vk_sync_msgq()
415 &vk->to_h_msg_chan}; in bcm_vk_sync_msgq()
422 * the msgq-info may not be available until a later time. In in bcm_vk_sync_msgq()
428 return -EAGAIN; in bcm_vk_sync_msgq()
437 "Advertised msgq %d error - max %d allowed\n", in bcm_vk_sync_msgq()
439 return -EINVAL; in bcm_vk_sync_msgq()
442 vk->to_v_msg_chan.q_nr = num_q; in bcm_vk_sync_msgq()
443 vk->to_h_msg_chan.q_nr = num_q; in bcm_vk_sync_msgq()
446 msgq = vk->bar[BAR_1] + msgq_off; in bcm_vk_sync_msgq()
454 return -EPERM; in bcm_vk_sync_msgq()
459 memset(chan->sync_qinfo, 0, sizeof(chan->sync_qinfo)); in bcm_vk_sync_msgq()
468 chan->msgq[j] = msgq; in bcm_vk_sync_msgq()
469 msgq_start = readl_relaxed(&msgq->start); in bcm_vk_sync_msgq()
470 msgq_size = readl_relaxed(&msgq->size); in bcm_vk_sync_msgq()
471 msgq_nxt = readl_relaxed(&msgq->nxt); in bcm_vk_sync_msgq()
472 msgq_db_offset = readl_relaxed(&msgq->db_offset); in bcm_vk_sync_msgq()
473 q_db_offset = (msgq_db_offset & ((1 << DB_SHIFT) - 1)); in bcm_vk_sync_msgq()
483 readw_relaxed(&msgq->type), in bcm_vk_sync_msgq()
484 readw_relaxed(&msgq->num), in bcm_vk_sync_msgq()
487 readl_relaxed(&msgq->rd_idx), in bcm_vk_sync_msgq()
488 readl_relaxed(&msgq->wr_idx), in bcm_vk_sync_msgq()
492 qinfo = &chan->sync_qinfo[j]; in bcm_vk_sync_msgq()
494 qinfo->q_start = vk->bar[BAR_1] + msgq_start; in bcm_vk_sync_msgq()
495 qinfo->q_size = msgq_size; in bcm_vk_sync_msgq()
497 qinfo->q_low = qinfo->q_size >> 1; in bcm_vk_sync_msgq()
498 qinfo->q_mask = qinfo->q_size - 1; in bcm_vk_sync_msgq()
499 qinfo->q_db_offset = msgq_db_offset; in bcm_vk_sync_msgq()
504 atomic_set(&vk->msgq_inited, 1); in bcm_vk_sync_msgq()
513 mutex_init(&chan->msgq_mutex); in bcm_vk_msg_chan_init()
514 spin_lock_init(&chan->pendq_lock); in bcm_vk_msg_chan_init()
516 INIT_LIST_HEAD(&chan->pendq[i]); in bcm_vk_msg_chan_init()
526 spin_lock(&chan->pendq_lock); in bcm_vk_append_pendq()
527 list_add_tail(&entry->node, &chan->pendq[q_num]); in bcm_vk_append_pendq()
528 if (entry->to_h_msg) { in bcm_vk_append_pendq()
529 ctx = entry->ctx; in bcm_vk_append_pendq()
530 atomic_inc(&ctx->pend_cnt); in bcm_vk_append_pendq()
531 wake_up_interruptible(&ctx->rd_wq); in bcm_vk_append_pendq()
533 spin_unlock(&chan->pendq_lock); in bcm_vk_append_pendq()
543 struct device *dev = &vk->pdev->dev; in bcm_vk_append_ib_sgl()
544 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan; in bcm_vk_append_ib_sgl()
545 struct vk_msg_blk *msg = &entry->to_v_msg[0]; in bcm_vk_append_ib_sgl()
549 u8 *buf = (u8 *)&entry->to_v_msg[entry->to_v_blks]; in bcm_vk_append_ib_sgl()
555 msgq = chan->msgq[q_num]; in bcm_vk_append_ib_sgl()
556 qinfo = &chan->sync_qinfo[q_num]; in bcm_vk_append_ib_sgl()
558 if (avail < qinfo->q_low) { in bcm_vk_append_ib_sgl()
560 avail, qinfo->q_size); in bcm_vk_append_ib_sgl()
566 (ib_sgl_size + data[i].size) <= vk->ib_sgl_size) { in bcm_vk_append_ib_sgl()
568 memcpy(buf, entry->dma[i].sglist, data[i].size); in bcm_vk_append_ib_sgl()
575 item_cnt, ib_sgl_size, vk->ib_sgl_size); in bcm_vk_append_ib_sgl()
578 ib_sgl_size = (ib_sgl_size + VK_MSGQ_BLK_SIZE - 1) in bcm_vk_append_ib_sgl()
586 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan; in bcm_to_v_q_doorbell()
587 struct bcm_vk_sync_qinfo *qinfo = &chan->sync_qinfo[q_num]; in bcm_to_v_q_doorbell()
589 vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset); in bcm_to_v_q_doorbell()
595 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan; in bcm_to_v_msg_enqueue()
596 struct device *dev = &vk->pdev->dev; in bcm_to_v_msg_enqueue()
597 struct vk_msg_blk *src = &entry->to_v_msg[0]; in bcm_to_v_msg_enqueue()
608 if (entry->to_v_blks != src->size + 1) { in bcm_to_v_msg_enqueue()
610 entry->to_v_blks, in bcm_to_v_msg_enqueue()
611 src->size + 1, in bcm_to_v_msg_enqueue()
613 src->function_id, in bcm_to_v_msg_enqueue()
614 src->context_id); in bcm_to_v_msg_enqueue()
615 return -EMSGSIZE; in bcm_to_v_msg_enqueue()
618 msgq = chan->msgq[q_num]; in bcm_to_v_msg_enqueue()
619 qinfo = &chan->sync_qinfo[q_num]; in bcm_to_v_msg_enqueue()
621 mutex_lock(&chan->msgq_mutex); in bcm_to_v_msg_enqueue()
627 while ((avail < entry->to_v_blks) && in bcm_to_v_msg_enqueue()
629 mutex_unlock(&chan->msgq_mutex); in bcm_to_v_msg_enqueue()
632 mutex_lock(&chan->msgq_mutex); in bcm_to_v_msg_enqueue()
636 mutex_unlock(&chan->msgq_mutex); in bcm_to_v_msg_enqueue()
637 return -EAGAIN; in bcm_to_v_msg_enqueue()
641 entry->seq_num = seq_num++; /* update debug seq number */ in bcm_to_v_msg_enqueue()
642 wr_idx = readl_relaxed(&msgq->wr_idx); in bcm_to_v_msg_enqueue()
644 if (wr_idx >= qinfo->q_size) { in bcm_to_v_msg_enqueue()
646 wr_idx, qinfo->q_size); in bcm_to_v_msg_enqueue()
653 for (i = 0; i < entry->to_v_blks; i++) { in bcm_to_v_msg_enqueue()
662 writel(wr_idx, &msgq->wr_idx); in bcm_to_v_msg_enqueue()
666 "MsgQ[%d] [Rd Wr] = [%d %d] blks inserted %d - Q = [u-%d a-%d]/%d\n", in bcm_to_v_msg_enqueue()
667 readl_relaxed(&msgq->num), in bcm_to_v_msg_enqueue()
668 readl_relaxed(&msgq->rd_idx), in bcm_to_v_msg_enqueue()
670 entry->to_v_blks, in bcm_to_v_msg_enqueue()
673 readl_relaxed(&msgq->size)); in bcm_to_v_msg_enqueue()
681 mutex_unlock(&chan->msgq_mutex); in bcm_to_v_msg_enqueue()
690 struct device *dev = &vk->pdev->dev; in bcm_vk_send_shutdown_msg()
698 dev_info(dev, "PCIe comm chan - invalid marker (0x%x)!\n", in bcm_vk_send_shutdown_msg()
700 return -EINVAL; in bcm_vk_send_shutdown_msg()
705 return -ENOMEM; in bcm_vk_send_shutdown_msg()
706 entry->to_v_blks = 1; /* always 1 block */ in bcm_vk_send_shutdown_msg()
709 entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN; in bcm_vk_send_shutdown_msg()
710 set_q_num(&entry->to_v_msg[0], q_num); in bcm_vk_send_shutdown_msg()
711 set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID); in bcm_vk_send_shutdown_msg()
713 entry->to_v_msg[0].cmd = shut_type; in bcm_vk_send_shutdown_msg()
714 entry->to_v_msg[0].arg = pid; in bcm_vk_send_shutdown_msg()
720 get_q_num(&entry->to_v_msg[0]), pid); in bcm_vk_send_shutdown_msg()
731 struct device *dev = &vk->pdev->dev; in bcm_vk_handle_last_sess()
738 if (vk->reset_pid == pid) in bcm_vk_handle_last_sess()
739 vk->reset_pid = 0; in bcm_vk_handle_last_sess()
740 return -EPERM; in bcm_vk_handle_last_sess()
746 if (vk->reset_pid != pid) in bcm_vk_handle_last_sess()
750 vk->reset_pid = 0; in bcm_vk_handle_last_sess()
762 spin_lock(&chan->pendq_lock); in bcm_vk_dequeue_pending()
763 list_for_each_entry(iter, &chan->pendq[q_num], node) { in bcm_vk_dequeue_pending()
764 if (get_msg_id(&iter->to_v_msg[0]) == msg_id) { in bcm_vk_dequeue_pending()
765 list_del(&iter->node); in bcm_vk_dequeue_pending()
771 spin_unlock(&chan->pendq_lock); in bcm_vk_dequeue_pending()
777 struct device *dev = &vk->pdev->dev; in bcm_to_h_msg_dequeue()
778 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan; in bcm_to_h_msg_dequeue()
789 int cnt = 0; in bcm_to_h_msg_dequeue() local
800 mutex_lock(&chan->msgq_mutex); in bcm_to_h_msg_dequeue()
802 for (q_num = 0; q_num < chan->q_nr; q_num++) { in bcm_to_h_msg_dequeue()
803 msgq = chan->msgq[q_num]; in bcm_to_h_msg_dequeue()
804 qinfo = &chan->sync_qinfo[q_num]; in bcm_to_h_msg_dequeue()
805 max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size; in bcm_to_h_msg_dequeue()
807 rd_idx = readl_relaxed(&msgq->rd_idx); in bcm_to_h_msg_dequeue()
808 wr_idx = readl_relaxed(&msgq->wr_idx); in bcm_to_h_msg_dequeue()
819 * the buffer, but subsequent src->size check would be in bcm_to_h_msg_dequeue()
822 src = msgq_blk_addr(qinfo, rd_idx & qinfo->q_mask); in bcm_to_h_msg_dequeue()
823 src_size = readb(&src->size); in bcm_to_h_msg_dequeue()
825 if ((rd_idx >= qinfo->q_size) || in bcm_to_h_msg_dequeue()
826 (src_size > (qinfo->q_size - 1))) { in bcm_to_h_msg_dequeue()
829 rd_idx, src_size, qinfo->q_size); in bcm_to_h_msg_dequeue()
855 total = -ENOMEM; in bcm_to_h_msg_dequeue()
860 writel(rd_idx, &msgq->rd_idx); in bcm_to_h_msg_dequeue()
864 "MsgQ[%d] [Rd Wr] = [%d %d] blks extracted %d - Q = [u-%d a-%d]/%d\n", in bcm_to_h_msg_dequeue()
865 readl_relaxed(&msgq->num), in bcm_to_h_msg_dequeue()
871 readl_relaxed(&msgq->size)); in bcm_to_h_msg_dequeue()
874 * No need to search if it is an autonomous one-way in bcm_to_h_msg_dequeue()
879 if (data->function_id == VK_FID_SHUTDOWN) { in bcm_to_h_msg_dequeue()
887 &vk->to_v_msg_chan, in bcm_to_h_msg_dequeue()
896 entry->to_h_blks = num_blks; in bcm_to_h_msg_dequeue()
897 entry->to_h_msg = data; in bcm_to_h_msg_dequeue()
898 bcm_vk_append_pendq(&vk->to_h_msg_chan, in bcm_to_h_msg_dequeue()
902 if (cnt++ < batch_log) in bcm_to_h_msg_dequeue()
905 msg_id, data->function_id, in bcm_to_h_msg_dequeue()
906 test_bit(msg_id, vk->bmap)); in bcm_to_h_msg_dequeue()
909 /* Fetch wr_idx to handle more back-to-back events */ in bcm_to_h_msg_dequeue()
910 wr_idx = readl(&msgq->wr_idx); in bcm_to_h_msg_dequeue()
913 * cap the max so that even we try to handle more back-to-back events, in bcm_to_h_msg_dequeue()
925 mutex_unlock(&chan->msgq_mutex); in bcm_to_h_msg_dequeue()
938 spin_lock_init(&vk->ctx_lock); in bcm_vk_data_init()
939 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) { in bcm_vk_data_init()
940 vk->ctx[i].in_use = false; in bcm_vk_data_init()
941 vk->ctx[i].idx = i; /* self identity */ in bcm_vk_data_init()
942 vk->ctx[i].miscdev = NULL; in bcm_vk_data_init()
944 spin_lock_init(&vk->msg_id_lock); in bcm_vk_data_init()
945 spin_lock_init(&vk->host_alert_lock); in bcm_vk_data_init()
946 vk->msg_id = 0; in bcm_vk_data_init()
950 INIT_LIST_HEAD(&vk->pid_ht[i].head); in bcm_vk_data_init()
960 dev_err(&vk->pdev->dev, in bcm_vk_msgq_irqhandler()
965 queue_work(vk->wq_thread, &vk->wq_work); in bcm_vk_msgq_irqhandler()
974 struct miscdevice *miscdev = (struct miscdevice *)p_file->private_data; in bcm_vk_open()
976 struct device *dev = &vk->pdev->dev; in bcm_vk_open()
983 rc = -ENOMEM; in bcm_vk_open()
991 * returned. The context->idx will be used for such binding in bcm_vk_open()
993 ctx->miscdev = miscdev; in bcm_vk_open()
994 p_file->private_data = ctx; in bcm_vk_open()
996 ctx->idx, ctx->pid); in bcm_vk_open()
1006 ssize_t rc = -ENOMSG; in bcm_vk_read()
1007 struct bcm_vk_ctx *ctx = p_file->private_data; in bcm_vk_read()
1008 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, in bcm_vk_read()
1010 struct device *dev = &vk->pdev->dev; in bcm_vk_read()
1011 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan; in bcm_vk_read()
1017 return -EPERM; in bcm_vk_read()
1026 spin_lock(&chan->pendq_lock); in bcm_vk_read()
1027 for (q_num = 0; q_num < chan->q_nr; q_num++) { in bcm_vk_read()
1028 list_for_each_entry(iter, &chan->pendq[q_num], node) { in bcm_vk_read()
1029 if (iter->ctx->idx == ctx->idx) { in bcm_vk_read()
1031 (iter->to_h_blks * VK_MSGQ_BLK_SIZE)) { in bcm_vk_read()
1032 list_del(&iter->node); in bcm_vk_read()
1033 atomic_dec(&ctx->pend_cnt); in bcm_vk_read()
1037 rc = -EMSGSIZE; in bcm_vk_read()
1044 spin_unlock(&chan->pendq_lock); in bcm_vk_read()
1048 set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id); in bcm_vk_read()
1049 rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE; in bcm_vk_read()
1050 if (copy_to_user(buf, entry->to_h_msg, rsp_length) == 0) in bcm_vk_read()
1054 } else if (rc == -EMSGSIZE) { in bcm_vk_read()
1055 struct vk_msg_blk tmp_msg = entry->to_h_msg[0]; in bcm_vk_read()
1061 set_msg_id(&tmp_msg, entry->usr_msg_id); in bcm_vk_read()
1062 tmp_msg.size = entry->to_h_blks - 1; in bcm_vk_read()
1064 dev_err(dev, "Error return 1st block in -EMSGSIZE\n"); in bcm_vk_read()
1065 rc = -EFAULT; in bcm_vk_read()
1077 struct bcm_vk_ctx *ctx = p_file->private_data; in bcm_vk_write()
1078 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, in bcm_vk_write()
1081 struct device *dev = &vk->pdev->dev; in bcm_vk_write()
1089 return -EPERM; in bcm_vk_write()
1094 if (count & (VK_MSGQ_BLK_SIZE - 1)) { in bcm_vk_write()
1097 rc = -EINVAL; in bcm_vk_write()
1102 entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size, in bcm_vk_write()
1105 rc = -ENOMEM; in bcm_vk_write()
1110 if (copy_from_user(&entry->to_v_msg[0], buf, count)) { in bcm_vk_write()
1111 rc = -EFAULT; in bcm_vk_write()
1115 entry->to_v_blks = count >> VK_MSGQ_BLK_SZ_SHIFT; in bcm_vk_write()
1116 entry->ctx = ctx; in bcm_vk_write()
1119 q_num = get_q_num(&entry->to_v_msg[0]); in bcm_vk_write()
1120 msgq = vk->to_v_msg_chan.msgq[q_num]; in bcm_vk_write()
1121 msgq_size = readl_relaxed(&msgq->size); in bcm_vk_write()
1122 if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT) in bcm_vk_write()
1123 > (msgq_size - 1)) { in bcm_vk_write()
1125 entry->to_v_blks, msgq_size - 1); in bcm_vk_write()
1126 rc = -EINVAL; in bcm_vk_write()
1131 entry->usr_msg_id = get_msg_id(&entry->to_v_msg[0]); in bcm_vk_write()
1135 rc = -EOVERFLOW; in bcm_vk_write()
1138 set_msg_id(&entry->to_v_msg[0], rc); in bcm_vk_write()
1139 ctx->q_num = q_num; in bcm_vk_write()
1142 "[Q-%d]Message ctx id %d, usr_msg_id 0x%x sent msg_id 0x%x\n", in bcm_vk_write()
1143 ctx->q_num, ctx->idx, entry->usr_msg_id, in bcm_vk_write()
1144 get_msg_id(&entry->to_v_msg[0])); in bcm_vk_write()
1146 if (entry->to_v_msg[0].function_id == VK_FID_TRANS_BUF) { in bcm_vk_write()
1156 if (vk->reset_pid) { in bcm_vk_write()
1158 ctx->pid); in bcm_vk_write()
1159 rc = -EACCES; in bcm_vk_write()
1163 num_planes = entry->to_v_msg[0].cmd & VK_CMD_PLANES_MASK; in bcm_vk_write()
1164 if ((entry->to_v_msg[0].cmd & VK_CMD_MASK) == VK_CMD_DOWNLOAD) in bcm_vk_write()
1171 msg_size = entry->to_v_msg[0].size; in bcm_vk_write()
1172 if (msg_size > entry->to_v_blks) { in bcm_vk_write()
1173 rc = -EMSGSIZE; in bcm_vk_write()
1177 data = (struct _vk_data *)&entry->to_v_msg[msg_size + 1]; in bcm_vk_write()
1180 data -= num_planes; in bcm_vk_write()
1182 /* Convert user addresses to DMA SG List */ in bcm_vk_write()
1183 rc = bcm_vk_sg_alloc(dev, entry->dma, dir, data, num_planes); in bcm_vk_write()
1187 atomic_inc(&ctx->dma_cnt); in bcm_vk_write()
1191 entry->to_v_blks += sgl_extra_blks; in bcm_vk_write()
1192 entry->to_v_msg[0].size += sgl_extra_blks; in bcm_vk_write()
1193 } else if (entry->to_v_msg[0].function_id == VK_FID_INIT && in bcm_vk_write()
1194 entry->to_v_msg[0].context_id == VK_NEW_CTX) { in bcm_vk_write()
1208 org_pid = (entry->to_v_msg[0].arg & VK_MSG_PID_MASK) in bcm_vk_write()
1212 entry->to_v_msg[0].arg = in bcm_vk_write()
1213 (entry->to_v_msg[0].arg & ~VK_MSG_PID_MASK) | in bcm_vk_write()
1224 bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry); in bcm_vk_write()
1233 &vk->to_v_msg_chan, in bcm_vk_write()
1235 get_msg_id(&entry->to_v_msg[0])); in bcm_vk_write()
1242 bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1); in bcm_vk_write()
1252 int cnt; in bcm_vk_poll() local
1253 struct bcm_vk_ctx *ctx = p_file->private_data; in bcm_vk_poll()
1254 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); in bcm_vk_poll()
1255 struct device *dev = &vk->pdev->dev; in bcm_vk_poll()
1257 poll_wait(p_file, &ctx->rd_wq, wait); in bcm_vk_poll()
1259 cnt = atomic_read(&ctx->pend_cnt); in bcm_vk_poll()
1260 if (cnt) { in bcm_vk_poll()
1262 if (cnt < 0) { in bcm_vk_poll()
1263 dev_err(dev, "Error cnt %d, setting back to 0", cnt); in bcm_vk_poll()
1264 atomic_set(&ctx->pend_cnt, 0); in bcm_vk_poll()
1274 struct bcm_vk_ctx *ctx = p_file->private_data; in bcm_vk_release()
1275 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); in bcm_vk_release()
1276 struct device *dev = &vk->pdev->dev; in bcm_vk_release()
1277 pid_t pid = ctx->pid; in bcm_vk_release()
1282 * if there are outstanding DMA transactions, need to delay long enough in bcm_vk_release()
1285 * abruptly, eg kill -9, while some DMA transfer orders are still inflight. in bcm_vk_release()
1293 dev_warn(dev, "%d dma still pending for [fd-%d] pid %d\n", in bcm_vk_release()
1294 dma_cnt, ctx->idx, pid); in bcm_vk_release()
1297 dma_cnt = atomic_read(&ctx->dma_cnt); in bcm_vk_release()
1301 dev_dbg(dev, "Draining for [fd-%d] pid %d - delay %d ms\n", in bcm_vk_release()
1302 ctx->idx, pid, jiffies_to_msecs(jiffies - start_time)); in bcm_vk_release()
1304 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx); in bcm_vk_release()
1305 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx); in bcm_vk_release()
1309 ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num); in bcm_vk_release()
1313 kref_put(&vk->kref, bcm_vk_release_data); in bcm_vk_release()
1320 struct device *dev = &vk->pdev->dev; in bcm_vk_msg_init()
1325 return -EINVAL; in bcm_vk_msg_init()
1328 if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) || in bcm_vk_msg_init()
1329 bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) { in bcm_vk_msg_init()
1331 return -EIO; in bcm_vk_msg_init()
1336 if (ret && (ret != -EAGAIN)) { in bcm_vk_msg_init()
1338 return -EIO; in bcm_vk_msg_init()
1349 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL); in bcm_vk_msg_remove()
1350 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL); in bcm_vk_msg_remove()