Lines Matching refs:qdev

236 	struct qaic_device *qdev;  member
297 static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources, in save_dbc_buf() argument
303 wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use); in save_dbc_buf()
304 qdev->dbc[dbc_id].req_q_base = resources->buf; in save_dbc_buf()
305 qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base; in save_dbc_buf()
306 qdev->dbc[dbc_id].dma_addr = resources->dma_addr; in save_dbc_buf()
307 qdev->dbc[dbc_id].total_size = resources->total_size; in save_dbc_buf()
308 qdev->dbc[dbc_id].nelem = resources->nelem; in save_dbc_buf()
309 enable_dbc(qdev, dbc_id, usr); in save_dbc_buf()
310 qdev->dbc[dbc_id].in_use = true; in save_dbc_buf()
315 static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources) in free_dbc_buf() argument
318 dma_free_coherent(&qdev->pdev->dev, resources->total_size, resources->buf, in free_dbc_buf()
323 static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources) in free_dma_xfers() argument
330 dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0); in free_dma_xfers()
353 static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, in encode_passthrough() argument
391 static int find_and_map_user_pages(struct qaic_device *qdev, in find_and_map_user_pages() argument
457 ret = dma_map_sgtable(&qdev->pdev->dev, sgt, DMA_TO_DEVICE, 0); in find_and_map_user_pages()
548 static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer) in cleanup_xfer() argument
552 dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0); in cleanup_xfer()
560 static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, in encode_dma() argument
586 ret = find_and_map_user_pages(qdev, in_trans, resources, xfer); in encode_dma()
623 cleanup_xfer(qdev, xfer); in encode_dma()
629 static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, in encode_activate() argument
667 buf = dma_alloc_coherent(&qdev->pdev->dev, size, &dma_addr, GFP_KERNEL); in encode_activate()
701 dma_free_coherent(&qdev->pdev->dev, size, buf, dma_addr); in encode_activate()
705 static int encode_deactivate(struct qaic_device *qdev, void *trans, in encode_deactivate() argument
710 if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad) in encode_deactivate()
715 return disable_dbc(qdev, in_trans->dbc_id, usr); in encode_deactivate()
718 static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, in encode_status() argument
751 static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg, in encode_message() argument
774 ret = encode_dma(qdev, resources->trans_hdr, wrappers, &user_len, resources, usr); in encode_message()
793 ret = encode_passthrough(qdev, trans_hdr, wrappers, &user_len); in encode_message()
796 ret = encode_dma(qdev, trans_hdr, wrappers, &user_len, resources, usr); in encode_message()
799 ret = encode_activate(qdev, trans_hdr, wrappers, &user_len, resources); in encode_message()
802 ret = encode_deactivate(qdev, trans_hdr, &user_len, usr); in encode_message()
805 ret = encode_status(qdev, trans_hdr, wrappers, &user_len); in encode_message()
820 free_dma_xfers(qdev, resources); in encode_message()
821 free_dbc_buf(qdev, resources); in encode_message()
828 static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, in decode_passthrough() argument
853 static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, in decode_activate() argument
878 if (out_trans->dbc_id >= qdev->num_dbc) in decode_activate()
894 save_dbc_buf(qdev, resources, usr); in decode_activate()
899 static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len, in decode_deactivate() argument
906 if (dbc_id >= qdev->num_dbc) in decode_deactivate()
919 enable_dbc(qdev, dbc_id, usr); in decode_deactivate()
923 release_dbc(qdev, dbc_id); in decode_deactivate()
929 static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, in decode_status() argument
959 static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg, in decode_message() argument
990 ret = decode_passthrough(qdev, trans_hdr, user_msg, &msg_len); in decode_message()
993 ret = decode_activate(qdev, trans_hdr, user_msg, &msg_len, resources, usr); in decode_message()
996 ret = decode_deactivate(qdev, trans_hdr, &msg_len, usr); in decode_message()
999 ret = decode_status(qdev, trans_hdr, user_msg, &msg_len, msg); in decode_message()
1015 static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num, in msg_xfer() argument
1026 if (qdev->dev_state == QAIC_OFFLINE) { in msg_xfer()
1027 mutex_unlock(&qdev->cntl_mutex); in msg_xfer()
1036 if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) { in msg_xfer()
1046 mutex_unlock(&qdev->cntl_mutex); in msg_xfer()
1053 if (likely(!qdev->cntl_lost_buf)) { in msg_xfer()
1060 mutex_unlock(&qdev->cntl_mutex); in msg_xfer()
1064 ret = mhi_queue_buf(qdev->cntl_ch, DMA_FROM_DEVICE, out_buf, in msg_xfer()
1067 mutex_unlock(&qdev->cntl_mutex); in msg_xfer()
1077 qdev->cntl_lost_buf = false; in msg_xfer()
1083 ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len, in msg_xfer()
1086 qdev->cntl_lost_buf = true; in msg_xfer()
1088 mutex_unlock(&qdev->cntl_mutex); in msg_xfer()
1093 list_add_tail(&elem.list, &qdev->cntl_xfer_list); in msg_xfer()
1094 mutex_unlock(&qdev->cntl_mutex); in msg_xfer()
1105 mutex_lock(&qdev->cntl_mutex); in msg_xfer()
1112 mutex_unlock(&qdev->cntl_mutex); in msg_xfer()
1117 } else if (!qdev->valid_crc(elem.buf)) { in msg_xfer()
1126 static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id) in abort_dma_cont() argument
1174 static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr, in qaic_manage_msg_xfer() argument
1198 ret = encode_message(qdev, user_msg, wrappers, resources, usr); in qaic_manage_msg_xfer()
1200 ret = abort_dma_cont(qdev, wrappers, resources->dma_chunk_id); in qaic_manage_msg_xfer()
1204 ret = mutex_lock_interruptible(&qdev->cntl_mutex); in qaic_manage_msg_xfer()
1209 msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++); in qaic_manage_msg_xfer()
1220 msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers)); in qaic_manage_msg_xfer()
1223 *rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, false); in qaic_manage_msg_xfer()
1228 free_dma_xfers(qdev, resources); in qaic_manage_msg_xfer()
1241 static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg) in qaic_manage() argument
1257 ret = qaic_manage_msg_xfer(qdev, usr, user_msg, &resources, &rsp); in qaic_manage()
1277 ret = decode_message(qdev, user_msg, rsp, &resources, usr); in qaic_manage()
1280 free_dbc_buf(qdev, &resources); in qaic_manage()
1288 struct qaic_device *qdev; in qaic_manage_ioctl() local
1307 qdev = usr->qddev->qdev; in qaic_manage_ioctl()
1309 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); in qaic_manage_ioctl()
1310 if (qdev->dev_state != QAIC_ONLINE) { in qaic_manage_ioctl()
1311 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); in qaic_manage_ioctl()
1332 ret = qaic_manage(qdev, usr, msg); in qaic_manage_ioctl()
1352 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); in qaic_manage_ioctl()
1357 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor) in get_cntl_version() argument
1376 ret = qaic_manage(qdev, usr, user_msg); in get_cntl_version()
1385 qdev->valid_crc = valid_crc; in get_cntl_version()
1388 qdev->gen_crc = gen_crc_stub; in get_cntl_version()
1400 struct qaic_device *qdev = resp->qdev; in resp_worker() local
1406 mutex_lock(&qdev->cntl_mutex); in resp_worker()
1407 list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) { in resp_worker()
1416 mutex_unlock(&qdev->cntl_mutex); in resp_worker()
1448 struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev); in qaic_mhi_dl_xfer_cb() local
1464 resp->qdev = qdev; in qaic_mhi_dl_xfer_cb()
1466 queue_work(qdev->cntl_wq, &resp->work); in qaic_mhi_dl_xfer_cb()
1469 int qaic_control_open(struct qaic_device *qdev) in qaic_control_open() argument
1471 if (!qdev->cntl_ch) in qaic_control_open()
1474 qdev->cntl_lost_buf = false; in qaic_control_open()
1487 qdev->gen_crc = gen_crc; in qaic_control_open()
1488 qdev->valid_crc = valid_crc_stub; in qaic_control_open()
1490 return mhi_prepare_for_transfer(qdev->cntl_ch); in qaic_control_open()
1493 void qaic_control_close(struct qaic_device *qdev) in qaic_control_close() argument
1495 mhi_unprepare_from_transfer(qdev->cntl_ch); in qaic_control_close()
1498 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr) in qaic_release_usr() argument
1522 mutex_lock(&qdev->cntl_mutex); in qaic_release_usr()
1525 msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++); in qaic_release_usr()
1530 msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers)); in qaic_release_usr()
1540 rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, true); in qaic_release_usr()
1546 void wake_all_cntl(struct qaic_device *qdev) in wake_all_cntl() argument
1551 mutex_lock(&qdev->cntl_mutex); in wake_all_cntl()
1552 list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) { in wake_all_cntl()
1556 mutex_unlock(&qdev->cntl_mutex); in wake_all_cntl()