Lines Matching refs:drvdata
86 void cc_req_mgr_fini(struct cc_drvdata *drvdata) in cc_req_mgr_fini() argument
88 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in cc_req_mgr_fini()
89 struct device *dev = drvdata_to_dev(drvdata); in cc_req_mgr_fini()
110 drvdata->request_mgr_handle = NULL; in cc_req_mgr_fini()
113 int cc_req_mgr_init(struct cc_drvdata *drvdata) in cc_req_mgr_init() argument
116 struct device *dev = drvdata_to_dev(drvdata); in cc_req_mgr_init()
125 drvdata->request_mgr_handle = req_mgr_h; in cc_req_mgr_init()
143 (unsigned long)drvdata); in cc_req_mgr_init()
145 req_mgr_h->hw_queue_size = cc_ioread(drvdata, in cc_req_mgr_init()
175 set_queue_last_ind(drvdata, &req_mgr_h->compl_desc); in cc_req_mgr_init()
180 cc_req_mgr_fini(drvdata); in cc_req_mgr_init()
184 static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[], in enqueue_seq() argument
188 void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0); in enqueue_seq()
189 struct device *dev = drvdata_to_dev(drvdata); in enqueue_seq()
224 static int cc_queues_status(struct cc_drvdata *drvdata, in cc_queues_status() argument
229 struct device *dev = drvdata_to_dev(drvdata); in cc_queues_status()
248 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); in cc_queues_status()
278 static void cc_do_send_request(struct cc_drvdata *drvdata, in cc_do_send_request() argument
283 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in cc_do_send_request()
286 struct device *dev = drvdata_to_dev(drvdata); in cc_do_send_request()
310 enqueue_seq(drvdata, desc, len); in cc_do_send_request()
313 enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1); in cc_do_send_request()
330 static void cc_enqueue_backlog(struct cc_drvdata *drvdata, in cc_enqueue_backlog() argument
333 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_enqueue_backlog()
334 struct device *dev = drvdata_to_dev(drvdata); in cc_enqueue_backlog()
344 static void cc_proc_backlog(struct cc_drvdata *drvdata) in cc_proc_backlog() argument
346 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_proc_backlog()
350 struct device *dev = drvdata_to_dev(drvdata); in cc_proc_backlog()
376 rc = cc_queues_status(drvdata, mgr, bli->len); in cc_proc_backlog()
387 cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len, in cc_proc_backlog()
401 int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, in cc_send_request() argument
406 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_send_request()
407 struct device *dev = drvdata_to_dev(drvdata); in cc_send_request()
419 rc = cc_queues_status(drvdata, mgr, len); in cc_send_request()
439 cc_enqueue_backlog(drvdata, bli); in cc_send_request()
444 cc_do_send_request(drvdata, cc_req, desc, len, false); in cc_send_request()
452 int cc_send_sync_request(struct cc_drvdata *drvdata, in cc_send_sync_request() argument
457 struct device *dev = drvdata_to_dev(drvdata); in cc_send_sync_request()
458 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_send_sync_request()
472 rc = cc_queues_status(drvdata, mgr, len + 1); in cc_send_sync_request()
478 wait_for_completion_interruptible(&drvdata->hw_queue_avail); in cc_send_sync_request()
479 reinit_completion(&drvdata->hw_queue_avail); in cc_send_sync_request()
482 cc_do_send_request(drvdata, cc_req, desc, len, true); in cc_send_sync_request()
501 int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc, in send_request_init() argument
504 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in send_request_init()
510 rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len); in send_request_init()
514 set_queue_last_ind(drvdata, &desc[(len - 1)]); in send_request_init()
522 enqueue_seq(drvdata, desc, len); in send_request_init()
526 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); in send_request_init()
531 void complete_request(struct cc_drvdata *drvdata) in complete_request() argument
534 drvdata->request_mgr_handle; in complete_request()
536 complete(&drvdata->hw_queue_avail); in complete_request()
548 struct cc_drvdata *drvdata = in comp_work_handler() local
551 comp_handler((unsigned long)drvdata); in comp_work_handler()
555 static void proc_completions(struct cc_drvdata *drvdata) in proc_completions() argument
558 struct device *dev = drvdata_to_dev(drvdata); in proc_completions()
560 drvdata->request_mgr_handle; in proc_completions()
588 rc = (drvdata->irq & mask ? -EPERM : 0); in proc_completions()
590 drvdata->irq, rc); in proc_completions()
606 static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata) in cc_axi_comp_count() argument
609 cc_ioread(drvdata, drvdata->axim_mon_offset)); in cc_axi_comp_count()
615 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; in comp_handler() local
617 drvdata->request_mgr_handle; in comp_handler()
618 struct device *dev = drvdata_to_dev(drvdata); in comp_handler()
622 irq = (drvdata->irq & drvdata->comp_mask); in comp_handler()
627 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq); in comp_handler()
631 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); in comp_handler()
638 drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR)); in comp_handler()
639 irq = (drvdata->irq & drvdata->comp_mask); in comp_handler()
640 proc_completions(drvdata); in comp_handler()
646 cc_axi_comp_count(drvdata); in comp_handler()
649 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq); in comp_handler()
651 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); in comp_handler()
657 cc_iowrite(drvdata, CC_REG(HOST_IMR), in comp_handler()
658 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask); in comp_handler()
660 cc_proc_backlog(drvdata); in comp_handler()