1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Keystone Queue Manager subsystem driver
4 *
5 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
6 * Authors: Sandeep Nair <sandeep_n@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 */
10
11 #include <linux/debugfs.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/firmware.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/property.h>
23 #include <linux/slab.h>
24 #include <linux/soc/ti/knav_qmss.h>
25
26 #include "knav_qmss.h"
27
28 static struct knav_device *kdev;
29 static DEFINE_MUTEX(knav_dev_lock);
30 #define knav_dev_lock_held() \
31 lockdep_is_held(&knav_dev_lock)
32
33 /* Queue manager register indices in DTS */
34 #define KNAV_QUEUE_PEEK_REG_INDEX 0
35 #define KNAV_QUEUE_STATUS_REG_INDEX 1
36 #define KNAV_QUEUE_CONFIG_REG_INDEX 2
37 #define KNAV_QUEUE_REGION_REG_INDEX 3
38 #define KNAV_QUEUE_PUSH_REG_INDEX 4
39 #define KNAV_QUEUE_POP_REG_INDEX 5
40
41 /* Queue manager register indices in DTS for QMSS in K2G NAVSS.
42 * There are no status and vbusm push registers on this version
43 * of QMSS. Push registers are same as pop, So all indices above 1
44 * are to be re-defined
45 */
46 #define KNAV_L_QUEUE_CONFIG_REG_INDEX 1
47 #define KNAV_L_QUEUE_REGION_REG_INDEX 2
48 #define KNAV_L_QUEUE_PUSH_REG_INDEX 3
49
50 /* PDSP register indices in DTS */
51 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
52 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
53 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
54 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
55
56 #define knav_queue_idx_to_inst(kdev, idx) \
57 (kdev->instances + (idx << kdev->inst_shift))
58
59 #define for_each_handle_rcu(qh, inst) \
60 list_for_each_entry_rcu(qh, &inst->handles, list, \
61 knav_dev_lock_held())
62
63 #define for_each_instance(idx, inst, kdev) \
64 for (idx = 0, inst = kdev->instances; \
65 idx < (kdev)->num_queues_in_use; \
66 idx++, inst = knav_queue_idx_to_inst(kdev, idx))
67
68 /* All firmware file names end up here. List the firmware file names below.
69 * Newest followed by older ones. Search is done from start of the array
70 * until a firmware file is found.
71 */
72 static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
73
74 static bool device_ready;
knav_qmss_device_ready(void)75 bool knav_qmss_device_ready(void)
76 {
77 return device_ready;
78 }
79 EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
80
81 /**
82 * knav_queue_notify: qmss queue notfier call
83 *
84 * @inst: - qmss queue instance like accumulator
85 */
knav_queue_notify(struct knav_queue_inst * inst)86 void knav_queue_notify(struct knav_queue_inst *inst)
87 {
88 struct knav_queue *qh;
89
90 if (!inst)
91 return;
92
93 rcu_read_lock();
94 for_each_handle_rcu(qh, inst) {
95 if (atomic_read(&qh->notifier_enabled) <= 0)
96 continue;
97 if (WARN_ON(!qh->notifier_fn))
98 continue;
99 this_cpu_inc(qh->stats->notifies);
100 qh->notifier_fn(qh->notifier_fn_arg);
101 }
102 rcu_read_unlock();
103 }
104 EXPORT_SYMBOL_GPL(knav_queue_notify);
105
knav_queue_int_handler(int irq,void * _instdata)106 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
107 {
108 struct knav_queue_inst *inst = _instdata;
109
110 knav_queue_notify(inst);
111 return IRQ_HANDLED;
112 }
113
knav_queue_setup_irq(struct knav_range_info * range,struct knav_queue_inst * inst)114 static int knav_queue_setup_irq(struct knav_range_info *range,
115 struct knav_queue_inst *inst)
116 {
117 unsigned queue = inst->id - range->queue_base;
118 int ret = 0, irq;
119
120 if (range->flags & RANGE_HAS_IRQ) {
121 irq = range->irqs[queue].irq;
122 ret = request_irq(irq, knav_queue_int_handler, 0,
123 inst->irq_name, inst);
124 if (ret)
125 return ret;
126 disable_irq(irq);
127 if (range->irqs[queue].cpu_mask) {
128 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
129 if (ret) {
130 dev_warn(range->kdev->dev,
131 "Failed to set IRQ affinity\n");
132 return ret;
133 }
134 }
135 }
136 return ret;
137 }
138
knav_queue_free_irq(struct knav_queue_inst * inst)139 static void knav_queue_free_irq(struct knav_queue_inst *inst)
140 {
141 struct knav_range_info *range = inst->range;
142 unsigned queue = inst->id - inst->range->queue_base;
143 int irq;
144
145 if (range->flags & RANGE_HAS_IRQ) {
146 irq = range->irqs[queue].irq;
147 irq_set_affinity_hint(irq, NULL);
148 free_irq(irq, inst);
149 }
150 }
151
knav_queue_is_busy(struct knav_queue_inst * inst)152 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
153 {
154 return !list_empty(&inst->handles);
155 }
156
knav_queue_is_reserved(struct knav_queue_inst * inst)157 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
158 {
159 return inst->range->flags & RANGE_RESERVED;
160 }
161
knav_queue_is_shared(struct knav_queue_inst * inst)162 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
163 {
164 struct knav_queue *tmp;
165
166 rcu_read_lock();
167 for_each_handle_rcu(tmp, inst) {
168 if (tmp->flags & KNAV_QUEUE_SHARED) {
169 rcu_read_unlock();
170 return true;
171 }
172 }
173 rcu_read_unlock();
174 return false;
175 }
176
knav_queue_match_type(struct knav_queue_inst * inst,unsigned type)177 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
178 unsigned type)
179 {
180 if ((type == KNAV_QUEUE_QPEND) &&
181 (inst->range->flags & RANGE_HAS_IRQ)) {
182 return true;
183 } else if ((type == KNAV_QUEUE_ACC) &&
184 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
185 return true;
186 } else if ((type == KNAV_QUEUE_GP) &&
187 !(inst->range->flags &
188 (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
189 return true;
190 }
191 return false;
192 }
193
194 static inline struct knav_queue_inst *
knav_queue_match_id_to_inst(struct knav_device * kdev,unsigned id)195 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
196 {
197 struct knav_queue_inst *inst;
198 int idx;
199
200 for_each_instance(idx, inst, kdev) {
201 if (inst->id == id)
202 return inst;
203 }
204 return NULL;
205 }
206
knav_queue_find_by_id(int id)207 static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
208 {
209 if (kdev->base_id <= id &&
210 kdev->base_id + kdev->num_queues > id) {
211 id -= kdev->base_id;
212 return knav_queue_match_id_to_inst(kdev, id);
213 }
214 return NULL;
215 }
216
__knav_queue_open(struct knav_queue_inst * inst,const char * name,unsigned flags)217 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
218 const char *name, unsigned flags)
219 {
220 struct knav_queue *qh;
221 unsigned id;
222 int ret = 0;
223
224 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
225 if (!qh)
226 return ERR_PTR(-ENOMEM);
227
228 qh->stats = alloc_percpu(struct knav_queue_stats);
229 if (!qh->stats) {
230 ret = -ENOMEM;
231 goto err;
232 }
233
234 qh->flags = flags;
235 qh->inst = inst;
236 id = inst->id - inst->qmgr->start_queue;
237 qh->reg_push = &inst->qmgr->reg_push[id];
238 qh->reg_pop = &inst->qmgr->reg_pop[id];
239 qh->reg_peek = &inst->qmgr->reg_peek[id];
240
241 /* first opener? */
242 if (!knav_queue_is_busy(inst)) {
243 struct knav_range_info *range = inst->range;
244
245 inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
246 if (range->ops && range->ops->open_queue)
247 ret = range->ops->open_queue(range, inst, flags);
248
249 if (ret)
250 goto err;
251 }
252 list_add_tail_rcu(&qh->list, &inst->handles);
253 return qh;
254
255 err:
256 if (qh->stats)
257 free_percpu(qh->stats);
258 devm_kfree(inst->kdev->dev, qh);
259 return ERR_PTR(ret);
260 }
261
262 static struct knav_queue *
knav_queue_open_by_id(const char * name,unsigned id,unsigned flags)263 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
264 {
265 struct knav_queue_inst *inst;
266 struct knav_queue *qh;
267
268 mutex_lock(&knav_dev_lock);
269
270 qh = ERR_PTR(-ENODEV);
271 inst = knav_queue_find_by_id(id);
272 if (!inst)
273 goto unlock_ret;
274
275 qh = ERR_PTR(-EEXIST);
276 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
277 goto unlock_ret;
278
279 qh = ERR_PTR(-EBUSY);
280 if ((flags & KNAV_QUEUE_SHARED) &&
281 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
282 goto unlock_ret;
283
284 qh = __knav_queue_open(inst, name, flags);
285
286 unlock_ret:
287 mutex_unlock(&knav_dev_lock);
288
289 return qh;
290 }
291
knav_queue_open_by_type(const char * name,unsigned type,unsigned flags)292 static struct knav_queue *knav_queue_open_by_type(const char *name,
293 unsigned type, unsigned flags)
294 {
295 struct knav_queue_inst *inst;
296 struct knav_queue *qh = ERR_PTR(-EINVAL);
297 int idx;
298
299 mutex_lock(&knav_dev_lock);
300
301 for_each_instance(idx, inst, kdev) {
302 if (knav_queue_is_reserved(inst))
303 continue;
304 if (!knav_queue_match_type(inst, type))
305 continue;
306 if (knav_queue_is_busy(inst))
307 continue;
308 qh = __knav_queue_open(inst, name, flags);
309 goto unlock_ret;
310 }
311
312 unlock_ret:
313 mutex_unlock(&knav_dev_lock);
314 return qh;
315 }
316
knav_queue_set_notify(struct knav_queue_inst * inst,bool enabled)317 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
318 {
319 struct knav_range_info *range = inst->range;
320
321 if (range->ops && range->ops->set_notify)
322 range->ops->set_notify(range, inst, enabled);
323 }
324
knav_queue_enable_notifier(struct knav_queue * qh)325 static int knav_queue_enable_notifier(struct knav_queue *qh)
326 {
327 struct knav_queue_inst *inst = qh->inst;
328 bool first;
329
330 if (WARN_ON(!qh->notifier_fn))
331 return -EINVAL;
332
333 /* Adjust the per handle notifier count */
334 first = (atomic_inc_return(&qh->notifier_enabled) == 1);
335 if (!first)
336 return 0; /* nothing to do */
337
338 /* Now adjust the per instance notifier count */
339 first = (atomic_inc_return(&inst->num_notifiers) == 1);
340 if (first)
341 knav_queue_set_notify(inst, true);
342
343 return 0;
344 }
345
knav_queue_disable_notifier(struct knav_queue * qh)346 static int knav_queue_disable_notifier(struct knav_queue *qh)
347 {
348 struct knav_queue_inst *inst = qh->inst;
349 bool last;
350
351 last = (atomic_dec_return(&qh->notifier_enabled) == 0);
352 if (!last)
353 return 0; /* nothing to do */
354
355 last = (atomic_dec_return(&inst->num_notifiers) == 0);
356 if (last)
357 knav_queue_set_notify(inst, false);
358
359 return 0;
360 }
361
knav_queue_set_notifier(struct knav_queue * qh,struct knav_queue_notify_config * cfg)362 static int knav_queue_set_notifier(struct knav_queue *qh,
363 struct knav_queue_notify_config *cfg)
364 {
365 knav_queue_notify_fn old_fn = qh->notifier_fn;
366
367 if (!cfg)
368 return -EINVAL;
369
370 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
371 return -ENOTSUPP;
372
373 if (!cfg->fn && old_fn)
374 knav_queue_disable_notifier(qh);
375
376 qh->notifier_fn = cfg->fn;
377 qh->notifier_fn_arg = cfg->fn_arg;
378
379 if (cfg->fn && !old_fn)
380 knav_queue_enable_notifier(qh);
381
382 return 0;
383 }
384
knav_gp_set_notify(struct knav_range_info * range,struct knav_queue_inst * inst,bool enabled)385 static int knav_gp_set_notify(struct knav_range_info *range,
386 struct knav_queue_inst *inst,
387 bool enabled)
388 {
389 unsigned queue;
390
391 if (range->flags & RANGE_HAS_IRQ) {
392 queue = inst->id - range->queue_base;
393 if (enabled)
394 enable_irq(range->irqs[queue].irq);
395 else
396 disable_irq_nosync(range->irqs[queue].irq);
397 }
398 return 0;
399 }
400
knav_gp_open_queue(struct knav_range_info * range,struct knav_queue_inst * inst,unsigned flags)401 static int knav_gp_open_queue(struct knav_range_info *range,
402 struct knav_queue_inst *inst, unsigned flags)
403 {
404 return knav_queue_setup_irq(range, inst);
405 }
406
knav_gp_close_queue(struct knav_range_info * range,struct knav_queue_inst * inst)407 static int knav_gp_close_queue(struct knav_range_info *range,
408 struct knav_queue_inst *inst)
409 {
410 knav_queue_free_irq(inst);
411 return 0;
412 }
413
414 static const struct knav_range_ops knav_gp_range_ops = {
415 .set_notify = knav_gp_set_notify,
416 .open_queue = knav_gp_open_queue,
417 .close_queue = knav_gp_close_queue,
418 };
419
420
knav_queue_get_count(void * qhandle)421 static int knav_queue_get_count(void *qhandle)
422 {
423 struct knav_queue *qh = qhandle;
424 struct knav_queue_inst *inst = qh->inst;
425
426 return readl_relaxed(&qh->reg_peek[0].entry_count) +
427 atomic_read(&inst->desc_count);
428 }
429
knav_queue_debug_show_instance(struct seq_file * s,struct knav_queue_inst * inst)430 static void knav_queue_debug_show_instance(struct seq_file *s,
431 struct knav_queue_inst *inst)
432 {
433 struct knav_device *kdev = inst->kdev;
434 struct knav_queue *qh;
435 int cpu = 0;
436 int pushes = 0;
437 int pops = 0;
438 int push_errors = 0;
439 int pop_errors = 0;
440 int notifies = 0;
441
442 if (!knav_queue_is_busy(inst))
443 return;
444
445 seq_printf(s, "\tqueue id %d (%s)\n",
446 kdev->base_id + inst->id, inst->name);
447 for_each_handle_rcu(qh, inst) {
448 for_each_possible_cpu(cpu) {
449 pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
450 pops += per_cpu_ptr(qh->stats, cpu)->pops;
451 push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
452 pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
453 notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
454 }
455
456 seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
457 qh,
458 pushes,
459 pops,
460 knav_queue_get_count(qh),
461 notifies,
462 push_errors,
463 pop_errors);
464 }
465 }
466
knav_queue_debug_show(struct seq_file * s,void * v)467 static int knav_queue_debug_show(struct seq_file *s, void *v)
468 {
469 struct knav_queue_inst *inst;
470 int idx;
471
472 mutex_lock(&knav_dev_lock);
473 seq_printf(s, "%s: %u-%u\n",
474 dev_name(kdev->dev), kdev->base_id,
475 kdev->base_id + kdev->num_queues - 1);
476 for_each_instance(idx, inst, kdev)
477 knav_queue_debug_show_instance(s, inst);
478 mutex_unlock(&knav_dev_lock);
479
480 return 0;
481 }
482
483 DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
484
knav_queue_pdsp_wait(u32 * __iomem addr,unsigned timeout,u32 flags)485 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
486 u32 flags)
487 {
488 unsigned long end;
489 u32 val = 0;
490
491 end = jiffies + msecs_to_jiffies(timeout);
492 while (time_after(end, jiffies)) {
493 val = readl_relaxed(addr);
494 if (flags)
495 val &= flags;
496 if (!val)
497 break;
498 cpu_relax();
499 }
500 return val ? -ETIMEDOUT : 0;
501 }
502
503
knav_queue_flush(struct knav_queue * qh)504 static int knav_queue_flush(struct knav_queue *qh)
505 {
506 struct knav_queue_inst *inst = qh->inst;
507 unsigned id = inst->id - inst->qmgr->start_queue;
508
509 atomic_set(&inst->desc_count, 0);
510 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
511 return 0;
512 }
513
514 /**
515 * knav_queue_open() - open a hardware queue
516 * @name: - name to give the queue handle
517 * @id: - desired queue number if any or specifes the type
518 * of queue
519 * @flags: - the following flags are applicable to queues:
520 * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
521 * exclusive by default.
522 * Subsequent attempts to open a shared queue should
523 * also have this flag.
524 *
525 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
526 * to check the returned value for error codes.
527 */
knav_queue_open(const char * name,unsigned id,unsigned flags)528 void *knav_queue_open(const char *name, unsigned id,
529 unsigned flags)
530 {
531 struct knav_queue *qh = ERR_PTR(-EINVAL);
532
533 switch (id) {
534 case KNAV_QUEUE_QPEND:
535 case KNAV_QUEUE_ACC:
536 case KNAV_QUEUE_GP:
537 qh = knav_queue_open_by_type(name, id, flags);
538 break;
539
540 default:
541 qh = knav_queue_open_by_id(name, id, flags);
542 break;
543 }
544 return qh;
545 }
546 EXPORT_SYMBOL_GPL(knav_queue_open);
547
548 /**
549 * knav_queue_close() - close a hardware queue handle
550 * @qhandle: - handle to close
551 */
knav_queue_close(void * qhandle)552 void knav_queue_close(void *qhandle)
553 {
554 struct knav_queue *qh = qhandle;
555 struct knav_queue_inst *inst = qh->inst;
556
557 while (atomic_read(&qh->notifier_enabled) > 0)
558 knav_queue_disable_notifier(qh);
559
560 mutex_lock(&knav_dev_lock);
561 list_del_rcu(&qh->list);
562 mutex_unlock(&knav_dev_lock);
563 synchronize_rcu();
564 if (!knav_queue_is_busy(inst)) {
565 struct knav_range_info *range = inst->range;
566
567 if (range->ops && range->ops->close_queue)
568 range->ops->close_queue(range, inst);
569 }
570 free_percpu(qh->stats);
571 devm_kfree(inst->kdev->dev, qh);
572 }
573 EXPORT_SYMBOL_GPL(knav_queue_close);
574
575 /**
576 * knav_queue_device_control() - Perform control operations on a queue
577 * @qhandle: - queue handle
578 * @cmd: - control commands
579 * @arg: - command argument
580 *
581 * Returns 0 on success, errno otherwise.
582 */
knav_queue_device_control(void * qhandle,enum knav_queue_ctrl_cmd cmd,unsigned long arg)583 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
584 unsigned long arg)
585 {
586 struct knav_queue *qh = qhandle;
587 struct knav_queue_notify_config *cfg;
588 int ret;
589
590 switch ((int)cmd) {
591 case KNAV_QUEUE_GET_ID:
592 ret = qh->inst->kdev->base_id + qh->inst->id;
593 break;
594
595 case KNAV_QUEUE_FLUSH:
596 ret = knav_queue_flush(qh);
597 break;
598
599 case KNAV_QUEUE_SET_NOTIFIER:
600 cfg = (void *)arg;
601 ret = knav_queue_set_notifier(qh, cfg);
602 break;
603
604 case KNAV_QUEUE_ENABLE_NOTIFY:
605 ret = knav_queue_enable_notifier(qh);
606 break;
607
608 case KNAV_QUEUE_DISABLE_NOTIFY:
609 ret = knav_queue_disable_notifier(qh);
610 break;
611
612 case KNAV_QUEUE_GET_COUNT:
613 ret = knav_queue_get_count(qh);
614 break;
615
616 default:
617 ret = -ENOTSUPP;
618 break;
619 }
620 return ret;
621 }
622 EXPORT_SYMBOL_GPL(knav_queue_device_control);
623
624
625
626 /**
627 * knav_queue_push() - push data (or descriptor) to the tail of a queue
628 * @qhandle: - hardware queue handle
629 * @dma: - DMA data to push
630 * @size: - size of data to push
631 * @flags: - can be used to pass additional information
632 *
633 * Returns 0 on success, errno otherwise.
634 */
knav_queue_push(void * qhandle,dma_addr_t dma,unsigned size,unsigned flags)635 int knav_queue_push(void *qhandle, dma_addr_t dma,
636 unsigned size, unsigned flags)
637 {
638 struct knav_queue *qh = qhandle;
639 u32 val;
640
641 val = (u32)dma | ((size / 16) - 1);
642 writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
643
644 this_cpu_inc(qh->stats->pushes);
645 return 0;
646 }
647 EXPORT_SYMBOL_GPL(knav_queue_push);
648
649 /**
650 * knav_queue_pop() - pop data (or descriptor) from the head of a queue
651 * @qhandle: - hardware queue handle
652 * @size: - (optional) size of the data pop'ed.
653 *
654 * Returns a DMA address on success, 0 on failure.
655 */
knav_queue_pop(void * qhandle,unsigned * size)656 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
657 {
658 struct knav_queue *qh = qhandle;
659 struct knav_queue_inst *inst = qh->inst;
660 dma_addr_t dma;
661 u32 val, idx;
662
663 /* are we accumulated? */
664 if (inst->descs) {
665 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
666 atomic_inc(&inst->desc_count);
667 return 0;
668 }
669 idx = atomic_inc_return(&inst->desc_head);
670 idx &= ACC_DESCS_MASK;
671 val = inst->descs[idx];
672 } else {
673 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
674 if (unlikely(!val))
675 return 0;
676 }
677
678 dma = val & DESC_PTR_MASK;
679 if (size)
680 *size = ((val & DESC_SIZE_MASK) + 1) * 16;
681
682 this_cpu_inc(qh->stats->pops);
683 return dma;
684 }
685 EXPORT_SYMBOL_GPL(knav_queue_pop);
686
687 /* carve out descriptors and push into queue */
kdesc_fill_pool(struct knav_pool * pool)688 static void kdesc_fill_pool(struct knav_pool *pool)
689 {
690 struct knav_region *region;
691 int i;
692
693 region = pool->region;
694 pool->desc_size = region->desc_size;
695 for (i = 0; i < pool->num_desc; i++) {
696 int index = pool->region_offset + i;
697 dma_addr_t dma_addr;
698 unsigned dma_size;
699 dma_addr = region->dma_start + (region->desc_size * index);
700 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
701 dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
702 DMA_TO_DEVICE);
703 knav_queue_push(pool->queue, dma_addr, dma_size, 0);
704 }
705 }
706
707 /* pop out descriptors and close the queue */
kdesc_empty_pool(struct knav_pool * pool)708 static void kdesc_empty_pool(struct knav_pool *pool)
709 {
710 dma_addr_t dma;
711 unsigned size;
712 void *desc;
713 int i;
714
715 if (!pool->queue)
716 return;
717
718 for (i = 0;; i++) {
719 dma = knav_queue_pop(pool->queue, &size);
720 if (!dma)
721 break;
722 desc = knav_pool_desc_dma_to_virt(pool, dma);
723 if (!desc) {
724 dev_dbg(pool->kdev->dev,
725 "couldn't unmap desc, continuing\n");
726 continue;
727 }
728 }
729 WARN_ON(i != pool->num_desc);
730 knav_queue_close(pool->queue);
731 }
732
733
734 /* Get the DMA address of a descriptor */
knav_pool_desc_virt_to_dma(void * ph,void * virt)735 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
736 {
737 struct knav_pool *pool = ph;
738 return pool->region->dma_start + (virt - pool->region->virt_start);
739 }
740 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
741
knav_pool_desc_dma_to_virt(void * ph,dma_addr_t dma)742 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
743 {
744 struct knav_pool *pool = ph;
745 return pool->region->virt_start + (dma - pool->region->dma_start);
746 }
747 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
748
749 /**
750 * knav_pool_create() - Create a pool of descriptors
751 * @name: - name to give the pool handle
752 * @num_desc: - numbers of descriptors in the pool
753 * @region_id: - QMSS region id from which the descriptors are to be
754 * allocated.
755 *
756 * Returns a pool handle on success.
757 * Use IS_ERR_OR_NULL() to identify error values on return.
758 */
knav_pool_create(const char * name,int num_desc,int region_id)759 void *knav_pool_create(const char *name,
760 int num_desc, int region_id)
761 {
762 struct knav_region *reg_itr, *region = NULL;
763 struct knav_pool *pool, *pi = NULL, *iter;
764 struct list_head *node;
765 unsigned last_offset;
766 int ret;
767
768 if (!kdev)
769 return ERR_PTR(-EPROBE_DEFER);
770
771 if (!kdev->dev)
772 return ERR_PTR(-ENODEV);
773
774 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
775 if (!pool) {
776 dev_err(kdev->dev, "out of memory allocating pool\n");
777 return ERR_PTR(-ENOMEM);
778 }
779
780 for_each_region(kdev, reg_itr) {
781 if (reg_itr->id != region_id)
782 continue;
783 region = reg_itr;
784 break;
785 }
786
787 if (!region) {
788 dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
789 ret = -EINVAL;
790 goto err;
791 }
792
793 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
794 if (IS_ERR(pool->queue)) {
795 dev_err(kdev->dev,
796 "failed to open queue for pool(%s), error %ld\n",
797 name, PTR_ERR(pool->queue));
798 ret = PTR_ERR(pool->queue);
799 goto err;
800 }
801
802 pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
803 pool->kdev = kdev;
804 pool->dev = kdev->dev;
805
806 mutex_lock(&knav_dev_lock);
807
808 if (num_desc > (region->num_desc - region->used_desc)) {
809 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
810 region_id, name);
811 ret = -ENOMEM;
812 goto err_unlock;
813 }
814
815 /* Region maintains a sorted (by region offset) list of pools
816 * use the first free slot which is large enough to accomodate
817 * the request
818 */
819 last_offset = 0;
820 node = ®ion->pools;
821 list_for_each_entry(iter, ®ion->pools, region_inst) {
822 if ((iter->region_offset - last_offset) >= num_desc) {
823 pi = iter;
824 break;
825 }
826 last_offset = iter->region_offset + iter->num_desc;
827 }
828
829 if (pi) {
830 node = &pi->region_inst;
831 pool->region = region;
832 pool->num_desc = num_desc;
833 pool->region_offset = last_offset;
834 region->used_desc += num_desc;
835 list_add_tail(&pool->list, &kdev->pools);
836 list_add_tail(&pool->region_inst, node);
837 } else {
838 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
839 name, region_id);
840 ret = -ENOMEM;
841 goto err_unlock;
842 }
843
844 mutex_unlock(&knav_dev_lock);
845 kdesc_fill_pool(pool);
846 return pool;
847
848 err_unlock:
849 mutex_unlock(&knav_dev_lock);
850 err:
851 kfree(pool->name);
852 devm_kfree(kdev->dev, pool);
853 return ERR_PTR(ret);
854 }
855 EXPORT_SYMBOL_GPL(knav_pool_create);
856
857 /**
858 * knav_pool_destroy() - Free a pool of descriptors
859 * @ph: - pool handle
860 */
knav_pool_destroy(void * ph)861 void knav_pool_destroy(void *ph)
862 {
863 struct knav_pool *pool = ph;
864
865 if (!pool)
866 return;
867
868 if (!pool->region)
869 return;
870
871 kdesc_empty_pool(pool);
872 mutex_lock(&knav_dev_lock);
873
874 pool->region->used_desc -= pool->num_desc;
875 list_del(&pool->region_inst);
876 list_del(&pool->list);
877
878 mutex_unlock(&knav_dev_lock);
879 kfree(pool->name);
880 devm_kfree(kdev->dev, pool);
881 }
882 EXPORT_SYMBOL_GPL(knav_pool_destroy);
883
884
885 /**
886 * knav_pool_desc_get() - Get a descriptor from the pool
887 * @ph: - pool handle
888 *
889 * Returns descriptor from the pool.
890 */
knav_pool_desc_get(void * ph)891 void *knav_pool_desc_get(void *ph)
892 {
893 struct knav_pool *pool = ph;
894 dma_addr_t dma;
895 unsigned size;
896 void *data;
897
898 dma = knav_queue_pop(pool->queue, &size);
899 if (unlikely(!dma))
900 return ERR_PTR(-ENOMEM);
901 data = knav_pool_desc_dma_to_virt(pool, dma);
902 return data;
903 }
904 EXPORT_SYMBOL_GPL(knav_pool_desc_get);
905
906 /**
907 * knav_pool_desc_put() - return a descriptor to the pool
908 * @ph: - pool handle
909 * @desc: - virtual address
910 */
knav_pool_desc_put(void * ph,void * desc)911 void knav_pool_desc_put(void *ph, void *desc)
912 {
913 struct knav_pool *pool = ph;
914 dma_addr_t dma;
915 dma = knav_pool_desc_virt_to_dma(pool, desc);
916 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
917 }
918 EXPORT_SYMBOL_GPL(knav_pool_desc_put);
919
920 /**
921 * knav_pool_desc_map() - Map descriptor for DMA transfer
922 * @ph: - pool handle
923 * @desc: - address of descriptor to map
924 * @size: - size of descriptor to map
925 * @dma: - DMA address return pointer
926 * @dma_sz: - adjusted return pointer
927 *
928 * Returns 0 on success, errno otherwise.
929 */
knav_pool_desc_map(void * ph,void * desc,unsigned size,dma_addr_t * dma,unsigned * dma_sz)930 int knav_pool_desc_map(void *ph, void *desc, unsigned size,
931 dma_addr_t *dma, unsigned *dma_sz)
932 {
933 struct knav_pool *pool = ph;
934 *dma = knav_pool_desc_virt_to_dma(pool, desc);
935 size = min(size, pool->region->desc_size);
936 size = ALIGN(size, SMP_CACHE_BYTES);
937 *dma_sz = size;
938 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
939
940 /* Ensure the descriptor reaches to the memory */
941 __iowmb();
942
943 return 0;
944 }
945 EXPORT_SYMBOL_GPL(knav_pool_desc_map);
946
947 /**
948 * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
949 * @ph: - pool handle
950 * @dma: - DMA address of descriptor to unmap
951 * @dma_sz: - size of descriptor to unmap
952 *
953 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
954 * error values on return.
955 */
knav_pool_desc_unmap(void * ph,dma_addr_t dma,unsigned dma_sz)956 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
957 {
958 struct knav_pool *pool = ph;
959 unsigned desc_sz;
960 void *desc;
961
962 desc_sz = min(dma_sz, pool->region->desc_size);
963 desc = knav_pool_desc_dma_to_virt(pool, dma);
964 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
965 prefetch(desc);
966 return desc;
967 }
968 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
969
970 /**
971 * knav_pool_count() - Get the number of descriptors in pool.
972 * @ph: - pool handle
973 * Returns number of elements in the pool.
974 */
knav_pool_count(void * ph)975 int knav_pool_count(void *ph)
976 {
977 struct knav_pool *pool = ph;
978 return knav_queue_get_count(pool->queue);
979 }
980 EXPORT_SYMBOL_GPL(knav_pool_count);
981
knav_queue_setup_region(struct knav_device * kdev,struct knav_region * region)982 static void knav_queue_setup_region(struct knav_device *kdev,
983 struct knav_region *region)
984 {
985 unsigned hw_num_desc, hw_desc_size, size;
986 struct knav_reg_region __iomem *regs;
987 struct knav_qmgr_info *qmgr;
988 struct knav_pool *pool;
989 int id = region->id;
990 struct page *page;
991
992 /* unused region? */
993 if (!region->num_desc) {
994 dev_warn(kdev->dev, "unused region %s\n", region->name);
995 return;
996 }
997
998 /* get hardware descriptor value */
999 hw_num_desc = ilog2(region->num_desc - 1) + 1;
1000
1001 /* did we force fit ourselves into nothingness? */
1002 if (region->num_desc < 32) {
1003 region->num_desc = 0;
1004 dev_warn(kdev->dev, "too few descriptors in region %s\n",
1005 region->name);
1006 return;
1007 }
1008
1009 size = region->num_desc * region->desc_size;
1010 region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1011 GFP_DMA32);
1012 if (!region->virt_start) {
1013 region->num_desc = 0;
1014 dev_err(kdev->dev, "memory alloc failed for region %s\n",
1015 region->name);
1016 return;
1017 }
1018 region->virt_end = region->virt_start + size;
1019 page = virt_to_page(region->virt_start);
1020
1021 region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1022 DMA_BIDIRECTIONAL);
1023 if (dma_mapping_error(kdev->dev, region->dma_start)) {
1024 dev_err(kdev->dev, "dma map failed for region %s\n",
1025 region->name);
1026 goto fail;
1027 }
1028 region->dma_end = region->dma_start + size;
1029
1030 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1031 if (!pool) {
1032 dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1033 goto fail;
1034 }
1035 pool->num_desc = 0;
1036 pool->region_offset = region->num_desc;
1037 list_add(&pool->region_inst, ®ion->pools);
1038
1039 dev_dbg(kdev->dev,
1040 "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1041 region->name, id, region->desc_size, region->num_desc,
1042 region->link_index, ®ion->dma_start, ®ion->dma_end,
1043 region->virt_start, region->virt_end);
1044
1045 hw_desc_size = (region->desc_size / 16) - 1;
1046 hw_num_desc -= 5;
1047
1048 for_each_qmgr(kdev, qmgr) {
1049 regs = qmgr->reg_region + id;
1050 writel_relaxed((u32)region->dma_start, ®s->base);
1051 writel_relaxed(region->link_index, ®s->start_index);
1052 writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1053 ®s->size_count);
1054 }
1055 return;
1056
1057 fail:
1058 if (region->dma_start)
1059 dma_unmap_page(kdev->dev, region->dma_start, size,
1060 DMA_BIDIRECTIONAL);
1061 if (region->virt_start)
1062 free_pages_exact(region->virt_start, size);
1063 region->num_desc = 0;
1064 return;
1065 }
1066
knav_queue_find_name(struct device_node * node)1067 static const char *knav_queue_find_name(struct device_node *node)
1068 {
1069 const char *name;
1070
1071 if (of_property_read_string(node, "label", &name) < 0)
1072 name = node->name;
1073 if (!name)
1074 name = "unknown";
1075 return name;
1076 }
1077
knav_queue_setup_regions(struct knav_device * kdev,struct device_node * node)1078 static int knav_queue_setup_regions(struct knav_device *kdev,
1079 struct device_node *node)
1080 {
1081 struct device *dev = kdev->dev;
1082 struct device_node *regions __free(device_node) =
1083 of_get_child_by_name(node, "descriptor-regions");
1084 struct knav_region *region;
1085 struct device_node *child;
1086 u32 temp[2];
1087 int ret;
1088
1089 if (!regions)
1090 return dev_err_probe(dev, -ENODEV,
1091 "descriptor-regions not specified\n");
1092
1093 for_each_child_of_node(regions, child) {
1094 region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1095 if (!region) {
1096 of_node_put(child);
1097 dev_err(dev, "out of memory allocating region\n");
1098 return -ENOMEM;
1099 }
1100
1101 region->name = knav_queue_find_name(child);
1102 of_property_read_u32(child, "id", ®ion->id);
1103 ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1104 if (!ret) {
1105 region->num_desc = temp[0];
1106 region->desc_size = temp[1];
1107 } else {
1108 dev_err(dev, "invalid region info %s\n", region->name);
1109 devm_kfree(dev, region);
1110 continue;
1111 }
1112
1113 ret = of_property_read_u32(child, "link-index",
1114 ®ion->link_index);
1115 if (ret) {
1116 dev_err(dev, "link index not found for %s\n",
1117 region->name);
1118 devm_kfree(dev, region);
1119 continue;
1120 }
1121
1122 INIT_LIST_HEAD(®ion->pools);
1123 list_add_tail(®ion->list, &kdev->regions);
1124 }
1125 if (list_empty(&kdev->regions))
1126 return dev_err_probe(dev, -ENODEV,
1127 "no valid region information found\n");
1128
1129 /* Next, we run through the regions and set things up */
1130 for_each_region(kdev, region)
1131 knav_queue_setup_region(kdev, region);
1132
1133 return 0;
1134 }
1135
knav_get_link_ram(struct knav_device * kdev,const char * name,struct knav_link_ram_block * block)1136 static int knav_get_link_ram(struct knav_device *kdev,
1137 const char *name,
1138 struct knav_link_ram_block *block)
1139 {
1140 struct platform_device *pdev = to_platform_device(kdev->dev);
1141 struct device_node *node = pdev->dev.of_node;
1142 u32 temp[2];
1143
1144 /*
1145 * Note: link ram resources are specified in "entry" sized units. In
1146 * reality, although entries are ~40bits in hardware, we treat them as
1147 * 64-bit entities here.
1148 *
1149 * For example, to specify the internal link ram for Keystone-I class
1150 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1151 *
1152 * This gets a bit weird when other link rams are used. For example,
1153 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1154 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1155 * which accounts for 64-bits per entry, for 16K entries.
1156 */
1157 if (!of_property_read_u32_array(node, name , temp, 2)) {
1158 if (temp[0]) {
1159 /*
1160 * queue_base specified => using internal or onchip
1161 * link ram WARNING - we do not "reserve" this block
1162 */
1163 block->dma = (dma_addr_t)temp[0];
1164 block->virt = NULL;
1165 block->size = temp[1];
1166 } else {
1167 block->size = temp[1];
1168 /* queue_base not specific => allocate requested size */
1169 block->virt = dmam_alloc_coherent(kdev->dev,
1170 8 * block->size, &block->dma,
1171 GFP_KERNEL);
1172 if (!block->virt) {
1173 dev_err(kdev->dev, "failed to alloc linkram\n");
1174 return -ENOMEM;
1175 }
1176 }
1177 } else {
1178 return -ENODEV;
1179 }
1180 return 0;
1181 }
1182
knav_queue_setup_link_ram(struct knav_device * kdev)1183 static int knav_queue_setup_link_ram(struct knav_device *kdev)
1184 {
1185 struct knav_link_ram_block *block;
1186 struct knav_qmgr_info *qmgr;
1187
1188 for_each_qmgr(kdev, qmgr) {
1189 block = &kdev->link_rams[0];
1190 dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1191 &block->dma, block->virt, block->size);
1192 writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1193 if (kdev->version == QMSS_66AK2G)
1194 writel_relaxed(block->size,
1195 &qmgr->reg_config->link_ram_size0);
1196 else
1197 writel_relaxed(block->size - 1,
1198 &qmgr->reg_config->link_ram_size0);
1199 block++;
1200 if (!block->size)
1201 continue;
1202
1203 dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1204 &block->dma, block->virt, block->size);
1205 writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1206 }
1207
1208 return 0;
1209 }
1210
knav_setup_queue_range(struct knav_device * kdev,struct device_node * node)1211 static int knav_setup_queue_range(struct knav_device *kdev,
1212 struct device_node *node)
1213 {
1214 struct device *dev = kdev->dev;
1215 struct knav_range_info *range;
1216 struct knav_qmgr_info *qmgr;
1217 u32 temp[2], start, end, id, index;
1218 int ret, i;
1219
1220 range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1221 if (!range) {
1222 dev_err(dev, "out of memory allocating range\n");
1223 return -ENOMEM;
1224 }
1225
1226 range->kdev = kdev;
1227 range->name = knav_queue_find_name(node);
1228 ret = of_property_read_u32_array(node, "qrange", temp, 2);
1229 if (!ret) {
1230 range->queue_base = temp[0] - kdev->base_id;
1231 range->num_queues = temp[1];
1232 } else {
1233 dev_err(dev, "invalid queue range %s\n", range->name);
1234 devm_kfree(dev, range);
1235 return -EINVAL;
1236 }
1237
1238 for (i = 0; i < RANGE_MAX_IRQS; i++) {
1239 struct of_phandle_args oirq;
1240
1241 if (of_irq_parse_one(node, i, &oirq))
1242 break;
1243
1244 range->irqs[i].irq = irq_create_of_mapping(&oirq);
1245 if (range->irqs[i].irq == IRQ_NONE)
1246 break;
1247
1248 range->num_irqs++;
1249
1250 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1251 unsigned long mask;
1252 int bit;
1253
1254 range->irqs[i].cpu_mask = devm_kzalloc(dev,
1255 cpumask_size(), GFP_KERNEL);
1256 if (!range->irqs[i].cpu_mask)
1257 return -ENOMEM;
1258
1259 mask = (oirq.args[2] & 0x0000ff00) >> 8;
1260 for_each_set_bit(bit, &mask, BITS_PER_LONG)
1261 cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1262 }
1263 }
1264
1265 range->num_irqs = min(range->num_irqs, range->num_queues);
1266 if (range->num_irqs)
1267 range->flags |= RANGE_HAS_IRQ;
1268
1269 if (of_property_read_bool(node, "qalloc-by-id"))
1270 range->flags |= RANGE_RESERVED;
1271
1272 if (of_property_present(node, "accumulator")) {
1273 ret = knav_init_acc_range(kdev, node, range);
1274 if (ret < 0) {
1275 devm_kfree(dev, range);
1276 return ret;
1277 }
1278 } else {
1279 range->ops = &knav_gp_range_ops;
1280 }
1281
1282 /* set threshold to 1, and flush out the queues */
1283 for_each_qmgr(kdev, qmgr) {
1284 start = max(qmgr->start_queue, range->queue_base);
1285 end = min(qmgr->start_queue + qmgr->num_queues,
1286 range->queue_base + range->num_queues);
1287 for (id = start; id < end; id++) {
1288 index = id - qmgr->start_queue;
1289 writel_relaxed(THRESH_GTE | 1,
1290 &qmgr->reg_peek[index].ptr_size_thresh);
1291 writel_relaxed(0,
1292 &qmgr->reg_push[index].ptr_size_thresh);
1293 }
1294 }
1295
1296 list_add_tail(&range->list, &kdev->queue_ranges);
1297 dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1298 range->name, range->queue_base,
1299 range->queue_base + range->num_queues - 1,
1300 range->num_irqs,
1301 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1302 (range->flags & RANGE_RESERVED) ? ", reserved" : "",
1303 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1304 kdev->num_queues_in_use += range->num_queues;
1305 return 0;
1306 }
1307
knav_setup_queue_pools(struct knav_device * kdev,struct device_node * node)1308 static int knav_setup_queue_pools(struct knav_device *kdev,
1309 struct device_node *node)
1310 {
1311 struct device_node *queue_pools __free(device_node) =
1312 of_get_child_by_name(node, "queue-pools");
1313 struct device_node *type, *range;
1314
1315 if (!queue_pools)
1316 return dev_err_probe(kdev->dev, -ENODEV,
1317 "queue-pools not specified\n");
1318
1319 for_each_child_of_node(queue_pools, type) {
1320 for_each_child_of_node(type, range) {
1321 /* return value ignored, we init the rest... */
1322 knav_setup_queue_range(kdev, range);
1323 }
1324 }
1325
1326 /* ... and barf if they all failed! */
1327 if (list_empty(&kdev->queue_ranges))
1328 return dev_err_probe(kdev->dev, -ENODEV,
1329 "no valid queue range found\n");
1330 return 0;
1331 }
1332
knav_free_queue_range(struct knav_device * kdev,struct knav_range_info * range)1333 static void knav_free_queue_range(struct knav_device *kdev,
1334 struct knav_range_info *range)
1335 {
1336 if (range->ops && range->ops->free_range)
1337 range->ops->free_range(range);
1338 list_del(&range->list);
1339 devm_kfree(kdev->dev, range);
1340 }
1341
knav_free_queue_ranges(struct knav_device * kdev)1342 static void knav_free_queue_ranges(struct knav_device *kdev)
1343 {
1344 struct knav_range_info *range;
1345
1346 for (;;) {
1347 range = first_queue_range(kdev);
1348 if (!range)
1349 break;
1350 knav_free_queue_range(kdev, range);
1351 }
1352 }
1353
knav_queue_free_regions(struct knav_device * kdev)1354 static void knav_queue_free_regions(struct knav_device *kdev)
1355 {
1356 struct knav_region *region;
1357 struct knav_pool *pool, *tmp;
1358 unsigned size;
1359
1360 for (;;) {
1361 region = first_region(kdev);
1362 if (!region)
1363 break;
1364 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst)
1365 knav_pool_destroy(pool);
1366
1367 size = region->virt_end - region->virt_start;
1368 if (size)
1369 free_pages_exact(region->virt_start, size);
1370 list_del(®ion->list);
1371 devm_kfree(kdev->dev, region);
1372 }
1373 }
1374
knav_queue_map_reg(struct knav_device * kdev,struct device_node * node,int index)1375 static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1376 struct device_node *node, int index)
1377 {
1378 struct resource res;
1379 void __iomem *regs;
1380 int ret;
1381
1382 ret = of_address_to_resource(node, index, &res);
1383 if (ret) {
1384 dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1385 node, index);
1386 return ERR_PTR(ret);
1387 }
1388
1389 regs = devm_ioremap_resource(kdev->dev, &res);
1390 if (IS_ERR(regs))
1391 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1392 index, node);
1393 return regs;
1394 }
1395
knav_queue_init_qmgrs(struct knav_device * kdev,struct device_node * node)1396 static int knav_queue_init_qmgrs(struct knav_device *kdev,
1397 struct device_node *node)
1398 {
1399 struct device *dev = kdev->dev;
1400 struct device_node *qmgrs __free(device_node) =
1401 of_get_child_by_name(node, "qmgrs");
1402 struct knav_qmgr_info *qmgr;
1403 struct device_node *child;
1404 u32 temp[2];
1405 int ret;
1406
1407 if (!qmgrs)
1408 return dev_err_probe(dev, -ENODEV,
1409 "queue manager info not specified\n");
1410
1411 for_each_child_of_node(qmgrs, child) {
1412 qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1413 if (!qmgr) {
1414 of_node_put(child);
1415 dev_err(dev, "out of memory allocating qmgr\n");
1416 return -ENOMEM;
1417 }
1418
1419 ret = of_property_read_u32_array(child, "managed-queues",
1420 temp, 2);
1421 if (!ret) {
1422 qmgr->start_queue = temp[0];
1423 qmgr->num_queues = temp[1];
1424 } else {
1425 dev_err(dev, "invalid qmgr queue range\n");
1426 devm_kfree(dev, qmgr);
1427 continue;
1428 }
1429
1430 dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1431 qmgr->start_queue, qmgr->num_queues);
1432
1433 qmgr->reg_peek =
1434 knav_queue_map_reg(kdev, child,
1435 KNAV_QUEUE_PEEK_REG_INDEX);
1436
1437 if (kdev->version == QMSS) {
1438 qmgr->reg_status =
1439 knav_queue_map_reg(kdev, child,
1440 KNAV_QUEUE_STATUS_REG_INDEX);
1441 }
1442
1443 qmgr->reg_config =
1444 knav_queue_map_reg(kdev, child,
1445 (kdev->version == QMSS_66AK2G) ?
1446 KNAV_L_QUEUE_CONFIG_REG_INDEX :
1447 KNAV_QUEUE_CONFIG_REG_INDEX);
1448 qmgr->reg_region =
1449 knav_queue_map_reg(kdev, child,
1450 (kdev->version == QMSS_66AK2G) ?
1451 KNAV_L_QUEUE_REGION_REG_INDEX :
1452 KNAV_QUEUE_REGION_REG_INDEX);
1453
1454 qmgr->reg_push =
1455 knav_queue_map_reg(kdev, child,
1456 (kdev->version == QMSS_66AK2G) ?
1457 KNAV_L_QUEUE_PUSH_REG_INDEX :
1458 KNAV_QUEUE_PUSH_REG_INDEX);
1459
1460 if (kdev->version == QMSS) {
1461 qmgr->reg_pop =
1462 knav_queue_map_reg(kdev, child,
1463 KNAV_QUEUE_POP_REG_INDEX);
1464 }
1465
1466 if (IS_ERR(qmgr->reg_peek) ||
1467 ((kdev->version == QMSS) &&
1468 (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1469 IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1470 IS_ERR(qmgr->reg_push)) {
1471 dev_err(dev, "failed to map qmgr regs\n");
1472 if (kdev->version == QMSS) {
1473 if (!IS_ERR(qmgr->reg_status))
1474 devm_iounmap(dev, qmgr->reg_status);
1475 if (!IS_ERR(qmgr->reg_pop))
1476 devm_iounmap(dev, qmgr->reg_pop);
1477 }
1478 if (!IS_ERR(qmgr->reg_peek))
1479 devm_iounmap(dev, qmgr->reg_peek);
1480 if (!IS_ERR(qmgr->reg_config))
1481 devm_iounmap(dev, qmgr->reg_config);
1482 if (!IS_ERR(qmgr->reg_region))
1483 devm_iounmap(dev, qmgr->reg_region);
1484 if (!IS_ERR(qmgr->reg_push))
1485 devm_iounmap(dev, qmgr->reg_push);
1486 devm_kfree(dev, qmgr);
1487 continue;
1488 }
1489
1490 /* Use same push register for pop as well */
1491 if (kdev->version == QMSS_66AK2G)
1492 qmgr->reg_pop = qmgr->reg_push;
1493
1494 list_add_tail(&qmgr->list, &kdev->qmgrs);
1495 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1496 qmgr->start_queue, qmgr->num_queues,
1497 qmgr->reg_peek, qmgr->reg_status,
1498 qmgr->reg_config, qmgr->reg_region,
1499 qmgr->reg_push, qmgr->reg_pop);
1500 }
1501 return 0;
1502 }
1503
knav_queue_init_pdsps(struct knav_device * kdev,struct device_node * pdsps)1504 static int knav_queue_init_pdsps(struct knav_device *kdev,
1505 struct device_node *pdsps)
1506 {
1507 struct device *dev = kdev->dev;
1508 struct knav_pdsp_info *pdsp;
1509 struct device_node *child;
1510
1511 for_each_child_of_node(pdsps, child) {
1512 pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1513 if (!pdsp) {
1514 of_node_put(child);
1515 dev_err(dev, "out of memory allocating pdsp\n");
1516 return -ENOMEM;
1517 }
1518 pdsp->name = knav_queue_find_name(child);
1519 pdsp->iram =
1520 knav_queue_map_reg(kdev, child,
1521 KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1522 pdsp->regs =
1523 knav_queue_map_reg(kdev, child,
1524 KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1525 pdsp->intd =
1526 knav_queue_map_reg(kdev, child,
1527 KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1528 pdsp->command =
1529 knav_queue_map_reg(kdev, child,
1530 KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1531
1532 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1533 IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1534 dev_err(dev, "failed to map pdsp %s regs\n",
1535 pdsp->name);
1536 if (!IS_ERR(pdsp->command))
1537 devm_iounmap(dev, pdsp->command);
1538 if (!IS_ERR(pdsp->iram))
1539 devm_iounmap(dev, pdsp->iram);
1540 if (!IS_ERR(pdsp->regs))
1541 devm_iounmap(dev, pdsp->regs);
1542 if (!IS_ERR(pdsp->intd))
1543 devm_iounmap(dev, pdsp->intd);
1544 devm_kfree(dev, pdsp);
1545 continue;
1546 }
1547 of_property_read_u32(child, "id", &pdsp->id);
1548 list_add_tail(&pdsp->list, &kdev->pdsps);
1549 dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1550 pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1551 pdsp->intd);
1552 }
1553 return 0;
1554 }
1555
knav_queue_stop_pdsp(struct knav_device * kdev,struct knav_pdsp_info * pdsp)1556 static int knav_queue_stop_pdsp(struct knav_device *kdev,
1557 struct knav_pdsp_info *pdsp)
1558 {
1559 u32 val, timeout = 1000;
1560 int ret;
1561
1562 val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1563 writel_relaxed(val, &pdsp->regs->control);
1564 ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1565 PDSP_CTRL_RUNNING);
1566 if (ret < 0) {
1567 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1568 return ret;
1569 }
1570 pdsp->loaded = false;
1571 pdsp->started = false;
1572 return 0;
1573 }
1574
knav_queue_load_pdsp(struct knav_device * kdev,struct knav_pdsp_info * pdsp)1575 static int knav_queue_load_pdsp(struct knav_device *kdev,
1576 struct knav_pdsp_info *pdsp)
1577 {
1578 int i, ret, fwlen;
1579 const struct firmware *fw;
1580 bool found = false;
1581 u32 *fwdata;
1582
1583 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1584 if (knav_acc_firmwares[i]) {
1585 ret = request_firmware_direct(&fw,
1586 knav_acc_firmwares[i],
1587 kdev->dev);
1588 if (!ret) {
1589 found = true;
1590 break;
1591 }
1592 }
1593 }
1594
1595 if (!found) {
1596 dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1597 return -ENODEV;
1598 }
1599
1600 dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1601 knav_acc_firmwares[i]);
1602
1603 writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1604 /* download the firmware */
1605 fwdata = (u32 *)fw->data;
1606 fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1607 for (i = 0; i < fwlen; i++)
1608 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1609
1610 release_firmware(fw);
1611 return 0;
1612 }
1613
knav_queue_start_pdsp(struct knav_device * kdev,struct knav_pdsp_info * pdsp)1614 static int knav_queue_start_pdsp(struct knav_device *kdev,
1615 struct knav_pdsp_info *pdsp)
1616 {
1617 u32 val, timeout = 1000;
1618 int ret;
1619
1620 /* write a command for sync */
1621 writel_relaxed(0xffffffff, pdsp->command);
1622 while (readl_relaxed(pdsp->command) != 0xffffffff)
1623 cpu_relax();
1624
1625 /* soft reset the PDSP */
1626 val = readl_relaxed(&pdsp->regs->control);
1627 val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1628 writel_relaxed(val, &pdsp->regs->control);
1629
1630 /* enable pdsp */
1631 val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1632 writel_relaxed(val, &pdsp->regs->control);
1633
1634 /* wait for command register to clear */
1635 ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1636 if (ret < 0) {
1637 dev_err(kdev->dev,
1638 "timed out on pdsp %s command register wait\n",
1639 pdsp->name);
1640 return ret;
1641 }
1642 return 0;
1643 }
1644
knav_queue_stop_pdsps(struct knav_device * kdev)1645 static void knav_queue_stop_pdsps(struct knav_device *kdev)
1646 {
1647 struct knav_pdsp_info *pdsp;
1648
1649 /* disable all pdsps */
1650 for_each_pdsp(kdev, pdsp)
1651 knav_queue_stop_pdsp(kdev, pdsp);
1652 }
1653
knav_queue_start_pdsps(struct knav_device * kdev)1654 static int knav_queue_start_pdsps(struct knav_device *kdev)
1655 {
1656 struct knav_pdsp_info *pdsp;
1657 int ret;
1658
1659 knav_queue_stop_pdsps(kdev);
1660 /* now load them all. We return success even if pdsp
1661 * is not loaded as acc channels are optional on having
1662 * firmware availability in the system. We set the loaded
1663 * and stated flag and when initialize the acc range, check
1664 * it and init the range only if pdsp is started.
1665 */
1666 for_each_pdsp(kdev, pdsp) {
1667 ret = knav_queue_load_pdsp(kdev, pdsp);
1668 if (!ret)
1669 pdsp->loaded = true;
1670 }
1671
1672 for_each_pdsp(kdev, pdsp) {
1673 if (pdsp->loaded) {
1674 ret = knav_queue_start_pdsp(kdev, pdsp);
1675 if (!ret)
1676 pdsp->started = true;
1677 }
1678 }
1679 return 0;
1680 }
1681
knav_queue_setup_pdsps(struct knav_device * kdev,struct device_node * node)1682 static int knav_queue_setup_pdsps(struct knav_device *kdev,
1683 struct device_node *node)
1684 {
1685 struct device_node *pdsps __free(device_node) =
1686 of_get_child_by_name(node, "pdsps");
1687
1688 if (pdsps) {
1689 int ret;
1690
1691 ret = knav_queue_init_pdsps(kdev, pdsps);
1692 if (ret)
1693 return ret;
1694
1695 ret = knav_queue_start_pdsps(kdev);
1696 if (ret)
1697 return ret;
1698 }
1699 return 0;
1700 }
1701
knav_find_qmgr(unsigned id)1702 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1703 {
1704 struct knav_qmgr_info *qmgr;
1705
1706 for_each_qmgr(kdev, qmgr) {
1707 if ((id >= qmgr->start_queue) &&
1708 (id < qmgr->start_queue + qmgr->num_queues))
1709 return qmgr;
1710 }
1711 return NULL;
1712 }
1713
knav_queue_init_queue(struct knav_device * kdev,struct knav_range_info * range,struct knav_queue_inst * inst,unsigned id)1714 static int knav_queue_init_queue(struct knav_device *kdev,
1715 struct knav_range_info *range,
1716 struct knav_queue_inst *inst,
1717 unsigned id)
1718 {
1719 char irq_name[KNAV_NAME_SIZE];
1720 inst->qmgr = knav_find_qmgr(id);
1721 if (!inst->qmgr)
1722 return -1;
1723
1724 INIT_LIST_HEAD(&inst->handles);
1725 inst->kdev = kdev;
1726 inst->range = range;
1727 inst->irq_num = -1;
1728 inst->id = id;
1729 scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1730 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1731
1732 if (range->ops && range->ops->init_queue)
1733 return range->ops->init_queue(range, inst);
1734 else
1735 return 0;
1736 }
1737
knav_queue_init_queues(struct knav_device * kdev)1738 static int knav_queue_init_queues(struct knav_device *kdev)
1739 {
1740 struct knav_range_info *range;
1741 int size, id, base_idx;
1742 int idx = 0, ret = 0;
1743
1744 /* how much do we need for instance data? */
1745 size = sizeof(struct knav_queue_inst);
1746
1747 /* round this up to a power of 2, keep the index to instance
1748 * arithmetic fast.
1749 * */
1750 kdev->inst_shift = order_base_2(size);
1751 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1752 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1753 if (!kdev->instances)
1754 return -ENOMEM;
1755
1756 for_each_queue_range(kdev, range) {
1757 if (range->ops && range->ops->init_range)
1758 range->ops->init_range(range);
1759 base_idx = idx;
1760 for (id = range->queue_base;
1761 id < range->queue_base + range->num_queues; id++, idx++) {
1762 ret = knav_queue_init_queue(kdev, range,
1763 knav_queue_idx_to_inst(kdev, idx), id);
1764 if (ret < 0)
1765 return ret;
1766 }
1767 range->queue_base_inst =
1768 knav_queue_idx_to_inst(kdev, base_idx);
1769 }
1770 return 0;
1771 }
1772
1773 /* Match table for of_platform binding */
1774 static const struct of_device_id keystone_qmss_of_match[] = {
1775 {
1776 .compatible = "ti,keystone-navigator-qmss",
1777 },
1778 {
1779 .compatible = "ti,66ak2g-navss-qm",
1780 .data = (void *)QMSS_66AK2G,
1781 },
1782 {},
1783 };
1784 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1785
knav_queue_probe(struct platform_device * pdev)1786 static int knav_queue_probe(struct platform_device *pdev)
1787 {
1788 struct device_node *node = pdev->dev.of_node;
1789 struct device *dev = &pdev->dev;
1790 u32 temp[2];
1791 int ret;
1792
1793 if (!node) {
1794 dev_err(dev, "device tree info unavailable\n");
1795 return -ENODEV;
1796 }
1797
1798 kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1799 if (!kdev) {
1800 dev_err(dev, "memory allocation failed\n");
1801 return -ENOMEM;
1802 }
1803
1804 if (device_get_match_data(dev))
1805 kdev->version = QMSS_66AK2G;
1806
1807 platform_set_drvdata(pdev, kdev);
1808 kdev->dev = dev;
1809 INIT_LIST_HEAD(&kdev->queue_ranges);
1810 INIT_LIST_HEAD(&kdev->qmgrs);
1811 INIT_LIST_HEAD(&kdev->pools);
1812 INIT_LIST_HEAD(&kdev->regions);
1813 INIT_LIST_HEAD(&kdev->pdsps);
1814
1815 pm_runtime_enable(&pdev->dev);
1816 ret = pm_runtime_resume_and_get(&pdev->dev);
1817 if (ret < 0) {
1818 pm_runtime_disable(&pdev->dev);
1819 dev_err(dev, "Failed to enable QMSS\n");
1820 return ret;
1821 }
1822
1823 if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1824 dev_err(dev, "queue-range not specified\n");
1825 ret = -ENODEV;
1826 goto err;
1827 }
1828 kdev->base_id = temp[0];
1829 kdev->num_queues = temp[1];
1830
1831 /* Initialize queue managers using device tree configuration */
1832 ret = knav_queue_init_qmgrs(kdev, node);
1833 if (ret)
1834 goto err;
1835
1836 /* get pdsp configuration values from device tree */
1837 ret = knav_queue_setup_pdsps(kdev, node);
1838 if (ret)
1839 goto err;
1840
1841 /* get usable queue range values from device tree */
1842 ret = knav_setup_queue_pools(kdev, node);
1843 if (ret)
1844 goto err;
1845
1846 ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1847 if (ret) {
1848 dev_err(kdev->dev, "could not setup linking ram\n");
1849 goto err;
1850 }
1851
1852 ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1853 if (ret) {
1854 /*
1855 * nothing really, we have one linking ram already, so we just
1856 * live within our means
1857 */
1858 }
1859
1860 ret = knav_queue_setup_link_ram(kdev);
1861 if (ret)
1862 goto err;
1863
1864 ret = knav_queue_setup_regions(kdev, node);
1865 if (ret)
1866 goto err;
1867
1868 ret = knav_queue_init_queues(kdev);
1869 if (ret < 0) {
1870 dev_err(dev, "hwqueue initialization failed\n");
1871 goto err;
1872 }
1873
1874 debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1875 &knav_queue_debug_fops);
1876 device_ready = true;
1877 return 0;
1878
1879 err:
1880 knav_queue_stop_pdsps(kdev);
1881 knav_queue_free_regions(kdev);
1882 knav_free_queue_ranges(kdev);
1883 pm_runtime_put_sync(&pdev->dev);
1884 pm_runtime_disable(&pdev->dev);
1885 return ret;
1886 }
1887
knav_queue_remove(struct platform_device * pdev)1888 static void knav_queue_remove(struct platform_device *pdev)
1889 {
1890 /* TODO: Free resources */
1891 pm_runtime_put_sync(&pdev->dev);
1892 pm_runtime_disable(&pdev->dev);
1893 }
1894
1895 static struct platform_driver keystone_qmss_driver = {
1896 .probe = knav_queue_probe,
1897 .remove_new = knav_queue_remove,
1898 .driver = {
1899 .name = "keystone-navigator-qmss",
1900 .of_match_table = keystone_qmss_of_match,
1901 },
1902 };
1903 module_platform_driver(keystone_qmss_driver);
1904
1905 MODULE_LICENSE("GPL v2");
1906 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1907 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1908 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
1909