Lines Matching refs:ccp

22 	struct ccp_device *ccp = cmd_q->ccp;  in ccp_alloc_ksb()  local
25 mutex_lock(&ccp->sb_mutex); in ccp_alloc_ksb()
27 start = (u32)bitmap_find_next_zero_area(ccp->sb, in ccp_alloc_ksb()
28 ccp->sb_count, in ccp_alloc_ksb()
29 ccp->sb_start, in ccp_alloc_ksb()
31 if (start <= ccp->sb_count) { in ccp_alloc_ksb()
32 bitmap_set(ccp->sb, start, count); in ccp_alloc_ksb()
34 mutex_unlock(&ccp->sb_mutex); in ccp_alloc_ksb()
38 ccp->sb_avail = 0; in ccp_alloc_ksb()
40 mutex_unlock(&ccp->sb_mutex); in ccp_alloc_ksb()
43 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) in ccp_alloc_ksb()
53 struct ccp_device *ccp = cmd_q->ccp; in ccp_free_ksb() local
58 mutex_lock(&ccp->sb_mutex); in ccp_free_ksb()
60 bitmap_clear(ccp->sb, start - KSB_START, count); in ccp_free_ksb()
62 ccp->sb_avail = 1; in ccp_free_ksb()
64 mutex_unlock(&ccp->sb_mutex); in ccp_free_ksb()
66 wake_up_interruptible_all(&ccp->sb_queue); in ccp_free_ksb()
77 struct ccp_device *ccp = cmd_q->ccp; in ccp_do_cmd() local
101 cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR; in ccp_do_cmd()
103 mutex_lock(&ccp->req_mutex); in ccp_do_cmd()
111 iowrite32(cr0, ccp->io_regs + CMD_REQ0); in ccp_do_cmd()
113 mutex_unlock(&ccp->req_mutex); in ccp_do_cmd()
124 ccp_log_error(cmd_q->ccp, in ccp_do_cmd()
127 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); in ccp_do_cmd()
137 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); in ccp_do_cmd()
314 static void ccp_disable_queue_interrupts(struct ccp_device *ccp) in ccp_disable_queue_interrupts() argument
316 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); in ccp_disable_queue_interrupts()
319 static void ccp_enable_queue_interrupts(struct ccp_device *ccp) in ccp_enable_queue_interrupts() argument
321 iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG); in ccp_enable_queue_interrupts()
326 struct ccp_device *ccp = (struct ccp_device *)data; in ccp_irq_bh() local
331 status = ioread32(ccp->io_regs + IRQ_STATUS_REG); in ccp_irq_bh()
333 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_irq_bh()
334 cmd_q = &ccp->cmd_q[i]; in ccp_irq_bh()
349 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); in ccp_irq_bh()
353 ccp_enable_queue_interrupts(ccp); in ccp_irq_bh()
358 struct ccp_device *ccp = (struct ccp_device *)data; in ccp_irq_handler() local
360 ccp_disable_queue_interrupts(ccp); in ccp_irq_handler()
361 if (ccp->use_tasklet) in ccp_irq_handler()
362 tasklet_schedule(&ccp->irq_tasklet); in ccp_irq_handler()
364 ccp_irq_bh((unsigned long)ccp); in ccp_irq_handler()
369 static int ccp_init(struct ccp_device *ccp) in ccp_init() argument
371 struct device *dev = ccp->dev; in ccp_init()
379 ccp->qim = 0; in ccp_init()
380 qmr = ioread32(ccp->io_regs + Q_MASK_REG); in ccp_init()
381 for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { in ccp_init()
387 ccp->name, i); in ccp_init()
397 cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; in ccp_init()
398 ccp->cmd_q_count++; in ccp_init()
400 cmd_q->ccp = ccp; in ccp_init()
405 cmd_q->sb_key = KSB_START + ccp->sb_start++; in ccp_init()
406 cmd_q->sb_ctx = KSB_START + ccp->sb_start++; in ccp_init()
407 ccp->sb_count -= 2; in ccp_init()
412 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE + in ccp_init()
414 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE + in ccp_init()
424 ccp->qim |= cmd_q->int_ok | cmd_q->int_err; in ccp_init()
428 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + in ccp_init()
434 if (ccp->cmd_q_count == 0) { in ccp_init()
439 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); in ccp_init()
442 ccp_disable_queue_interrupts(ccp); in ccp_init()
443 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_init()
444 cmd_q = &ccp->cmd_q[i]; in ccp_init()
449 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); in ccp_init()
452 ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp); in ccp_init()
459 if (ccp->use_tasklet) in ccp_init()
460 tasklet_init(&ccp->irq_tasklet, ccp_irq_bh, in ccp_init()
461 (unsigned long)ccp); in ccp_init()
465 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_init()
468 cmd_q = &ccp->cmd_q[i]; in ccp_init()
471 "%s-q%u", ccp->name, cmd_q->id); in ccp_init()
484 ccp_enable_queue_interrupts(ccp); in ccp_init()
487 ccp_add_device(ccp); in ccp_init()
489 ret = ccp_register_rng(ccp); in ccp_init()
494 ret = ccp_dmaengine_register(ccp); in ccp_init()
501 ccp_unregister_rng(ccp); in ccp_init()
504 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_init()
505 if (ccp->cmd_q[i].kthread) in ccp_init()
506 kthread_stop(ccp->cmd_q[i].kthread); in ccp_init()
508 sp_free_ccp_irq(ccp->sp, ccp); in ccp_init()
511 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_init()
512 dma_pool_destroy(ccp->cmd_q[i].dma_pool); in ccp_init()
517 static void ccp_destroy(struct ccp_device *ccp) in ccp_destroy() argument
524 ccp_dmaengine_unregister(ccp); in ccp_destroy()
527 ccp_unregister_rng(ccp); in ccp_destroy()
530 ccp_del_device(ccp); in ccp_destroy()
533 ccp_disable_queue_interrupts(ccp); in ccp_destroy()
534 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_destroy()
535 cmd_q = &ccp->cmd_q[i]; in ccp_destroy()
540 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); in ccp_destroy()
543 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_destroy()
544 if (ccp->cmd_q[i].kthread) in ccp_destroy()
545 kthread_stop(ccp->cmd_q[i].kthread); in ccp_destroy()
547 sp_free_ccp_irq(ccp->sp, ccp); in ccp_destroy()
549 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_destroy()
550 dma_pool_destroy(ccp->cmd_q[i].dma_pool); in ccp_destroy()
553 while (!list_empty(&ccp->cmd)) { in ccp_destroy()
555 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); in ccp_destroy()
559 while (!list_empty(&ccp->backlog)) { in ccp_destroy()
561 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); in ccp_destroy()