Lines Matching +full:keystone +full:- +full:irq
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Keystone accumulator queue manager
5 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
11 #include <linux/dma-mapping.h>
21 (range->queue_base_inst + (q << kdev->inst_shift))
26 struct knav_device *kdev = range->kdev; in __knav_acc_notify()
30 range_base = kdev->base_id + range->queue_base; in __knav_acc_notify()
32 if (range->flags & RANGE_MULTI_QUEUE) { in __knav_acc_notify()
33 for (queue = 0; queue < range->num_queues; queue++) { in __knav_acc_notify()
36 if (inst->notify_needed) { in __knav_acc_notify()
37 inst->notify_needed = 0; in __knav_acc_notify()
38 dev_dbg(kdev->dev, "acc-irq: notifying %d\n", in __knav_acc_notify()
44 queue = acc->channel - range->acc_info.start_channel; in __knav_acc_notify()
46 dev_dbg(kdev->dev, "acc-irq: notifying %d\n", in __knav_acc_notify()
56 struct knav_pdsp_info *pdsp = range->acc_info.pdsp; in knav_acc_set_notify()
57 struct knav_device *kdev = range->kdev; in knav_acc_set_notify()
61 * when enabling, we need to re-trigger an interrupt if we in knav_acc_set_notify()
64 if (!enabled || atomic_read(&kq->desc_count) <= 0) in knav_acc_set_notify()
67 kq->notify_needed = 1; in knav_acc_set_notify()
68 atomic_inc(&kq->acc->retrigger_count); in knav_acc_set_notify()
69 mask = BIT(kq->acc->channel % 32); in knav_acc_set_notify()
70 offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel); in knav_acc_set_notify()
71 dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n", in knav_acc_set_notify()
72 kq->acc->name); in knav_acc_set_notify()
73 writel_relaxed(mask, pdsp->intd + offset); in knav_acc_set_notify()
77 static irqreturn_t knav_acc_int_handler(int irq, void *_instdata) in knav_acc_int_handler() argument
91 info = &range->acc_info; in knav_acc_int_handler()
92 kdev = range->kdev; in knav_acc_int_handler()
93 pdsp = range->acc_info.pdsp; in knav_acc_int_handler()
94 acc = range->acc; in knav_acc_int_handler()
96 range_base = kdev->base_id + range->queue_base; in knav_acc_int_handler()
97 if ((range->flags & RANGE_MULTI_QUEUE) == 0) { in knav_acc_int_handler()
98 for (queue = 0; queue < range->num_irqs; queue++) in knav_acc_int_handler()
99 if (range->irqs[queue].irq == irq) in knav_acc_int_handler()
105 channel = acc->channel; in knav_acc_int_handler()
106 list_dma = acc->list_dma[acc->list_index]; in knav_acc_int_handler()
107 list_cpu = acc->list_cpu[acc->list_index]; in knav_acc_int_handler()
108 dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, dma %pad\n", in knav_acc_int_handler()
109 channel, acc->list_index, list_cpu, &list_dma); in knav_acc_int_handler()
110 if (atomic_read(&acc->retrigger_count)) { in knav_acc_int_handler()
111 atomic_dec(&acc->retrigger_count); in knav_acc_int_handler()
113 writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); in knav_acc_int_handler()
116 pdsp->intd + ACC_INTD_OFFSET_EOI); in knav_acc_int_handler()
121 notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); in knav_acc_int_handler()
123 dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size, in knav_acc_int_handler()
126 for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32)); in knav_acc_int_handler()
129 dev_dbg(kdev->dev, in knav_acc_int_handler()
130 "acc-irq: list %d, entry @%p, %08x\n", in knav_acc_int_handler()
131 acc->list_index, list, list[0]); in knav_acc_int_handler()
133 dev_dbg(kdev->dev, in knav_acc_int_handler()
134 "acc-irq: list %d, entry @%p, %08x %08x\n", in knav_acc_int_handler()
135 acc->list_index, list, list[0], list[1]); in knav_acc_int_handler()
137 dev_dbg(kdev->dev, in knav_acc_int_handler()
138 "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n", in knav_acc_int_handler()
139 acc->list_index, list, list[0], list[1], in knav_acc_int_handler()
147 if (range->flags & RANGE_MULTI_QUEUE) { in knav_acc_int_handler()
150 queue >= range_base + range->num_queues) { in knav_acc_int_handler()
151 dev_err(kdev->dev, in knav_acc_int_handler()
152 "bad queue %d, expecting %d-%d\n", in knav_acc_int_handler()
154 range_base + range->num_queues); in knav_acc_int_handler()
157 queue -= range_base; in knav_acc_int_handler()
162 if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) { in knav_acc_int_handler()
163 atomic_dec(&kq->desc_count); in knav_acc_int_handler()
164 dev_err(kdev->dev, in knav_acc_int_handler()
165 "acc-irq: queue %d full, entry dropped\n", in knav_acc_int_handler()
170 idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK; in knav_acc_int_handler()
171 kq->descs[idx] = val; in knav_acc_int_handler()
172 kq->notify_needed = 1; in knav_acc_int_handler()
173 dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n", in knav_acc_int_handler()
178 memset(list_cpu, 0, info->list_size); in knav_acc_int_handler()
179 dma_sync_single_for_device(kdev->dev, list_dma, info->list_size, in knav_acc_int_handler()
183 acc->list_index ^= 1; in knav_acc_int_handler()
186 writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); in knav_acc_int_handler()
190 pdsp->intd + ACC_INTD_OFFSET_EOI); in knav_acc_int_handler()
198 struct knav_device *kdev = range->kdev; in knav_range_setup_acc_irq()
201 int ret = 0, irq; in knav_range_setup_acc_irq() local
204 if (range->flags & RANGE_MULTI_QUEUE) { in knav_range_setup_acc_irq()
205 acc = range->acc; in knav_range_setup_acc_irq()
206 irq = range->irqs[0].irq; in knav_range_setup_acc_irq()
207 cpu_mask = range->irqs[0].cpu_mask; in knav_range_setup_acc_irq()
209 acc = range->acc + queue; in knav_range_setup_acc_irq()
210 irq = range->irqs[queue].irq; in knav_range_setup_acc_irq()
211 cpu_mask = range->irqs[queue].cpu_mask; in knav_range_setup_acc_irq()
214 old = acc->open_mask; in knav_range_setup_acc_irq()
219 acc->open_mask = new; in knav_range_setup_acc_irq()
221 dev_dbg(kdev->dev, in knav_range_setup_acc_irq()
222 "setup-acc-irq: open mask old %08x, new %08x, channel %s\n", in knav_range_setup_acc_irq()
223 old, new, acc->name); in knav_range_setup_acc_irq()
229 dev_dbg(kdev->dev, in knav_range_setup_acc_irq()
230 "setup-acc-irq: requesting %s for channel %s\n", in knav_range_setup_acc_irq()
231 acc->name, acc->name); in knav_range_setup_acc_irq()
232 ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, in knav_range_setup_acc_irq()
235 ret = irq_set_affinity_hint(irq, cpu_mask); in knav_range_setup_acc_irq()
237 dev_warn(range->kdev->dev, in knav_range_setup_acc_irq()
238 "Failed to set IRQ affinity\n"); in knav_range_setup_acc_irq()
245 dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n", in knav_range_setup_acc_irq()
246 acc->name, acc->name); in knav_range_setup_acc_irq()
247 ret = irq_set_affinity_hint(irq, NULL); in knav_range_setup_acc_irq()
249 dev_warn(range->kdev->dev, in knav_range_setup_acc_irq()
250 "Failed to set IRQ affinity\n"); in knav_range_setup_acc_irq()
251 free_irq(irq, range); in knav_range_setup_acc_irq()
282 dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n", in knav_acc_write()
283 cmd->command, cmd->queue_mask, cmd->list_dma, in knav_acc_write()
284 cmd->queue_num, cmd->timer_config); in knav_acc_write()
286 writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config); in knav_acc_write()
287 writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num); in knav_acc_write()
288 writel_relaxed(cmd->list_dma, &pdsp->acc_command->list_dma); in knav_acc_write()
289 writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask); in knav_acc_write()
290 writel_relaxed(cmd->command, &pdsp->acc_command->command); in knav_acc_write()
294 result = readl_relaxed(&pdsp->acc_command->command); in knav_acc_write()
305 struct knav_acc_info *info = &range->acc_info; in knav_acc_setup_cmd()
310 if (range->flags & RANGE_MULTI_QUEUE) { in knav_acc_setup_cmd()
311 acc = range->acc; in knav_acc_setup_cmd()
312 queue_base = range->queue_base; in knav_acc_setup_cmd()
313 queue_mask = BIT(range->num_queues) - 1; in knav_acc_setup_cmd()
315 acc = range->acc + queue; in knav_acc_setup_cmd()
316 queue_base = range->queue_base + queue; in knav_acc_setup_cmd()
321 cmd->command = acc->channel; in knav_acc_setup_cmd()
322 cmd->queue_mask = queue_mask; in knav_acc_setup_cmd()
323 cmd->list_dma = (u32)acc->list_dma[0]; in knav_acc_setup_cmd()
324 cmd->queue_num = info->list_entries << 16; in knav_acc_setup_cmd()
325 cmd->queue_num |= queue_base; in knav_acc_setup_cmd()
327 cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18; in knav_acc_setup_cmd()
328 if (range->flags & RANGE_MULTI_QUEUE) in knav_acc_setup_cmd()
329 cmd->timer_config |= ACC_CFG_MULTI_QUEUE; in knav_acc_setup_cmd()
330 cmd->timer_config |= info->pacing_mode << 16; in knav_acc_setup_cmd()
331 cmd->timer_config |= info->timer_count; in knav_acc_setup_cmd()
342 acc = range->acc + queue; in knav_acc_stop()
346 result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd); in knav_acc_stop()
348 dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n", in knav_acc_stop()
349 acc->name, knav_acc_result_str(result)); in knav_acc_stop()
360 acc = range->acc + queue; in knav_acc_start()
364 result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd); in knav_acc_start()
366 dev_dbg(kdev->dev, "started acc channel %s, result %s\n", in knav_acc_start()
367 acc->name, knav_acc_result_str(result)); in knav_acc_start()
374 struct knav_device *kdev = range->kdev; in knav_acc_init_range()
379 for (queue = 0; queue < range->num_queues; queue++) { in knav_acc_init_range()
380 acc = range->acc + queue; in knav_acc_init_range()
383 acc->list_index = 0; in knav_acc_init_range()
387 return -EIO; in knav_acc_init_range()
389 if (range->flags & RANGE_MULTI_QUEUE) in knav_acc_init_range()
398 unsigned id = kq->id - range->queue_base; in knav_acc_init_queue()
400 kq->descs = devm_kcalloc(range->kdev->dev, in knav_acc_init_queue()
402 if (!kq->descs) in knav_acc_init_queue()
403 return -ENOMEM; in knav_acc_init_queue()
405 kq->acc = range->acc; in knav_acc_init_queue()
406 if ((range->flags & RANGE_MULTI_QUEUE) == 0) in knav_acc_init_queue()
407 kq->acc += id; in knav_acc_init_queue()
414 unsigned id = inst->id - range->queue_base; in knav_acc_open_queue()
422 unsigned id = inst->id - range->queue_base; in knav_acc_close_queue()
429 struct knav_device *kdev = range->kdev; in knav_acc_free_range()
434 info = &range->acc_info; in knav_acc_free_range()
436 if (range->flags & RANGE_MULTI_QUEUE) in knav_acc_free_range()
439 channels = range->num_queues; in knav_acc_free_range()
442 acc = range->acc + channel; in knav_acc_free_range()
443 if (!acc->list_cpu[0]) in knav_acc_free_range()
445 dma_unmap_single(kdev->dev, acc->list_dma[0], in knav_acc_free_range()
446 info->mem_size, DMA_BIDIRECTIONAL); in knav_acc_free_range()
447 free_pages_exact(acc->list_cpu[0], info->mem_size); in knav_acc_free_range()
449 devm_kfree(range->kdev->dev, range->acc); in knav_acc_free_range()
484 range->flags |= RANGE_HAS_ACCUMULATOR; in knav_init_acc_range()
485 info = &range->acc_info; in knav_init_acc_range()
491 info->pdsp_id = config[0]; in knav_init_acc_range()
492 info->start_channel = config[1]; in knav_init_acc_range()
493 info->list_entries = config[2]; in knav_init_acc_range()
494 info->pacing_mode = config[3]; in knav_init_acc_range()
495 info->timer_count = config[4] / ACC_DEFAULT_PERIOD; in knav_init_acc_range()
497 if (info->start_channel > ACC_MAX_CHANNEL) { in knav_init_acc_range()
498 dev_err(kdev->dev, "channel %d invalid for range %s\n", in knav_init_acc_range()
499 info->start_channel, range->name); in knav_init_acc_range()
500 return -EINVAL; in knav_init_acc_range()
503 if (info->pacing_mode > 3) { in knav_init_acc_range()
504 dev_err(kdev->dev, "pacing mode %d invalid for range %s\n", in knav_init_acc_range()
505 info->pacing_mode, range->name); in knav_init_acc_range()
506 return -EINVAL; in knav_init_acc_range()
509 pdsp = knav_find_pdsp(kdev, info->pdsp_id); in knav_init_acc_range()
511 dev_err(kdev->dev, "pdsp id %d not found for range %s\n", in knav_init_acc_range()
512 info->pdsp_id, range->name); in knav_init_acc_range()
513 return -EINVAL; in knav_init_acc_range()
516 if (!pdsp->started) { in knav_init_acc_range()
517 dev_err(kdev->dev, "pdsp id %d not started for range %s\n", in knav_init_acc_range()
518 info->pdsp_id, range->name); in knav_init_acc_range()
519 return -ENODEV; in knav_init_acc_range()
522 info->pdsp = pdsp; in knav_init_acc_range()
523 channels = range->num_queues; in knav_init_acc_range()
524 if (of_property_read_bool(node, "multi-queue")) { in knav_init_acc_range()
525 range->flags |= RANGE_MULTI_QUEUE; in knav_init_acc_range()
527 if (range->queue_base & (32 - 1)) { in knav_init_acc_range()
528 dev_err(kdev->dev, in knav_init_acc_range()
529 "misaligned multi-queue accumulator range %s\n", in knav_init_acc_range()
530 range->name); in knav_init_acc_range()
531 return -EINVAL; in knav_init_acc_range()
533 if (range->num_queues > 32) { in knav_init_acc_range()
534 dev_err(kdev->dev, in knav_init_acc_range()
536 range->name); in knav_init_acc_range()
537 return -EINVAL; in knav_init_acc_range()
542 list_size = info->list_entries; in knav_init_acc_range()
544 info->list_size = list_size; in knav_init_acc_range()
546 info->mem_size = mem_size; in knav_init_acc_range()
547 range->acc = devm_kcalloc(kdev->dev, channels, sizeof(*range->acc), in knav_init_acc_range()
549 if (!range->acc) in knav_init_acc_range()
550 return -ENOMEM; in knav_init_acc_range()
553 acc = range->acc + channel; in knav_init_acc_range()
554 acc->channel = info->start_channel + channel; in knav_init_acc_range()
559 return -ENOMEM; in knav_init_acc_range()
561 list_dma = dma_map_single(kdev->dev, list_mem, mem_size, in knav_init_acc_range()
563 if (dma_mapping_error(kdev->dev, list_dma)) { in knav_init_acc_range()
565 return -ENOMEM; in knav_init_acc_range()
569 dma_sync_single_for_device(kdev->dev, list_dma, mem_size, in knav_init_acc_range()
571 scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d", in knav_init_acc_range()
572 acc->channel); in knav_init_acc_range()
573 acc->list_cpu[0] = list_mem; in knav_init_acc_range()
574 acc->list_cpu[1] = list_mem + list_size; in knav_init_acc_range()
575 acc->list_dma[0] = list_dma; in knav_init_acc_range()
576 acc->list_dma[1] = list_dma + list_size; in knav_init_acc_range()
577 dev_dbg(kdev->dev, "%s: channel %d, dma %pad, virt %8p\n", in knav_init_acc_range()
578 acc->name, acc->channel, &list_dma, list_mem); in knav_init_acc_range()
581 range->ops = &knav_acc_range_ops; in knav_init_acc_range()