xref: /linux/drivers/soc/ti/knav_qmss_queue.c (revision 6b222f28999c481a6531c7b5fbbf95f877875d23)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Keystone Queue Manager subsystem driver
4  *
5  * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
6  * Authors:	Sandeep Nair <sandeep_n@ti.com>
7  *		Cyril Chemparathy <cyril@ti.com>
8  *		Santosh Shilimkar <santosh.shilimkar@ti.com>
9  */
10 
11 #include <linux/debugfs.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/firmware.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/property.h>
23 #include <linux/slab.h>
24 #include <linux/soc/ti/knav_qmss.h>
25 
26 #include "knav_qmss.h"
27 
28 static struct knav_device *kdev;
29 static DEFINE_MUTEX(knav_dev_lock);
30 #define knav_dev_lock_held() \
31 	lockdep_is_held(&knav_dev_lock)
32 
33 /* Queue manager register indices in DTS */
34 #define KNAV_QUEUE_PEEK_REG_INDEX	0
35 #define KNAV_QUEUE_STATUS_REG_INDEX	1
36 #define KNAV_QUEUE_CONFIG_REG_INDEX	2
37 #define KNAV_QUEUE_REGION_REG_INDEX	3
38 #define KNAV_QUEUE_PUSH_REG_INDEX	4
39 #define KNAV_QUEUE_POP_REG_INDEX	5
40 
41 /* Queue manager register indices in DTS for QMSS in K2G NAVSS.
42  * There are no status and vbusm push registers on this version
43  * of QMSS. Push registers are same as pop, So all indices above 1
44  * are to be re-defined
45  */
46 #define KNAV_L_QUEUE_CONFIG_REG_INDEX	1
47 #define KNAV_L_QUEUE_REGION_REG_INDEX	2
48 #define KNAV_L_QUEUE_PUSH_REG_INDEX	3
49 
50 /* PDSP register indices in DTS */
51 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX	0
52 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX	1
53 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX	2
54 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX	3
55 
56 #define knav_queue_idx_to_inst(kdev, idx)			\
57 	(kdev->instances + (idx << kdev->inst_shift))
58 
59 #define for_each_handle_rcu(qh, inst)				\
60 	list_for_each_entry_rcu(qh, &inst->handles, list,	\
61 				knav_dev_lock_held())
62 
63 #define for_each_instance(idx, inst, kdev)		\
64 	for (idx = 0, inst = kdev->instances;		\
65 	     idx < (kdev)->num_queues_in_use;			\
66 	     idx++, inst = knav_queue_idx_to_inst(kdev, idx))
67 
68 /* All firmware file names end up here. List the firmware file names below.
69  * Newest followed by older ones. Search is done from start of the array
70  * until a firmware file is found.
71  */
72 static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
73 
74 static bool device_ready;
75 bool knav_qmss_device_ready(void)
76 {
77 	return device_ready;
78 }
79 EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
80 
81 /**
82  * knav_queue_notify: qmss queue notfier call
83  *
84  * @inst:		- qmss queue instance like accumulator
85  */
86 void knav_queue_notify(struct knav_queue_inst *inst)
87 {
88 	struct knav_queue *qh;
89 
90 	if (!inst)
91 		return;
92 
93 	rcu_read_lock();
94 	for_each_handle_rcu(qh, inst) {
95 		if (atomic_read(&qh->notifier_enabled) <= 0)
96 			continue;
97 		if (WARN_ON(!qh->notifier_fn))
98 			continue;
99 		this_cpu_inc(qh->stats->notifies);
100 		qh->notifier_fn(qh->notifier_fn_arg);
101 	}
102 	rcu_read_unlock();
103 }
104 EXPORT_SYMBOL_GPL(knav_queue_notify);
105 
106 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
107 {
108 	struct knav_queue_inst *inst = _instdata;
109 
110 	knav_queue_notify(inst);
111 	return IRQ_HANDLED;
112 }
113 
114 static int knav_queue_setup_irq(struct knav_range_info *range,
115 			  struct knav_queue_inst *inst)
116 {
117 	unsigned queue = inst->id - range->queue_base;
118 	int ret = 0, irq;
119 
120 	if (range->flags & RANGE_HAS_IRQ) {
121 		irq = range->irqs[queue].irq;
122 		ret = request_irq(irq, knav_queue_int_handler, IRQF_NO_AUTOEN,
123 				  inst->irq_name, inst);
124 		if (ret)
125 			return ret;
126 		if (range->irqs[queue].cpu_mask) {
127 			ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
128 			if (ret) {
129 				dev_warn(range->kdev->dev,
130 					 "Failed to set IRQ affinity\n");
131 				return ret;
132 			}
133 		}
134 	}
135 	return ret;
136 }
137 
138 static void knav_queue_free_irq(struct knav_queue_inst *inst)
139 {
140 	struct knav_range_info *range = inst->range;
141 	unsigned queue = inst->id - inst->range->queue_base;
142 	int irq;
143 
144 	if (range->flags & RANGE_HAS_IRQ) {
145 		irq = range->irqs[queue].irq;
146 		irq_set_affinity_hint(irq, NULL);
147 		free_irq(irq, inst);
148 	}
149 }
150 
151 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
152 {
153 	return !list_empty(&inst->handles);
154 }
155 
156 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
157 {
158 	return inst->range->flags & RANGE_RESERVED;
159 }
160 
161 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
162 {
163 	struct knav_queue *tmp;
164 
165 	rcu_read_lock();
166 	for_each_handle_rcu(tmp, inst) {
167 		if (tmp->flags & KNAV_QUEUE_SHARED) {
168 			rcu_read_unlock();
169 			return true;
170 		}
171 	}
172 	rcu_read_unlock();
173 	return false;
174 }
175 
176 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
177 						unsigned type)
178 {
179 	if ((type == KNAV_QUEUE_QPEND) &&
180 	    (inst->range->flags & RANGE_HAS_IRQ)) {
181 		return true;
182 	} else if ((type == KNAV_QUEUE_ACC) &&
183 		(inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
184 		return true;
185 	} else if ((type == KNAV_QUEUE_GP) &&
186 		!(inst->range->flags &
187 			(RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
188 		return true;
189 	}
190 	return false;
191 }
192 
193 static inline struct knav_queue_inst *
194 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
195 {
196 	struct knav_queue_inst *inst;
197 	int idx;
198 
199 	for_each_instance(idx, inst, kdev) {
200 		if (inst->id == id)
201 			return inst;
202 	}
203 	return NULL;
204 }
205 
206 static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
207 {
208 	if (kdev->base_id <= id &&
209 	    kdev->base_id + kdev->num_queues > id) {
210 		id -= kdev->base_id;
211 		return knav_queue_match_id_to_inst(kdev, id);
212 	}
213 	return NULL;
214 }
215 
216 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
217 				      const char *name, unsigned flags)
218 {
219 	struct knav_queue *qh;
220 	unsigned id;
221 	int ret = 0;
222 
223 	qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
224 	if (!qh)
225 		return ERR_PTR(-ENOMEM);
226 
227 	qh->stats = alloc_percpu(struct knav_queue_stats);
228 	if (!qh->stats) {
229 		ret = -ENOMEM;
230 		goto err;
231 	}
232 
233 	qh->flags = flags;
234 	qh->inst = inst;
235 	id = inst->id - inst->qmgr->start_queue;
236 	qh->reg_push = &inst->qmgr->reg_push[id];
237 	qh->reg_pop = &inst->qmgr->reg_pop[id];
238 	qh->reg_peek = &inst->qmgr->reg_peek[id];
239 
240 	/* first opener? */
241 	if (!knav_queue_is_busy(inst)) {
242 		struct knav_range_info *range = inst->range;
243 
244 		inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
245 		if (range->ops && range->ops->open_queue)
246 			ret = range->ops->open_queue(range, inst, flags);
247 
248 		if (ret)
249 			goto err;
250 	}
251 	list_add_tail_rcu(&qh->list, &inst->handles);
252 	return qh;
253 
254 err:
255 	free_percpu(qh->stats);
256 	devm_kfree(inst->kdev->dev, qh);
257 	return ERR_PTR(ret);
258 }
259 
260 static struct knav_queue *
261 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
262 {
263 	struct knav_queue_inst *inst;
264 	struct knav_queue *qh;
265 
266 	mutex_lock(&knav_dev_lock);
267 
268 	qh = ERR_PTR(-ENODEV);
269 	inst = knav_queue_find_by_id(id);
270 	if (!inst)
271 		goto unlock_ret;
272 
273 	qh = ERR_PTR(-EEXIST);
274 	if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
275 		goto unlock_ret;
276 
277 	qh = ERR_PTR(-EBUSY);
278 	if ((flags & KNAV_QUEUE_SHARED) &&
279 	    (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
280 		goto unlock_ret;
281 
282 	qh = __knav_queue_open(inst, name, flags);
283 
284 unlock_ret:
285 	mutex_unlock(&knav_dev_lock);
286 
287 	return qh;
288 }
289 
290 static struct knav_queue *knav_queue_open_by_type(const char *name,
291 						unsigned type, unsigned flags)
292 {
293 	struct knav_queue_inst *inst;
294 	struct knav_queue *qh = ERR_PTR(-EINVAL);
295 	int idx;
296 
297 	mutex_lock(&knav_dev_lock);
298 
299 	for_each_instance(idx, inst, kdev) {
300 		if (knav_queue_is_reserved(inst))
301 			continue;
302 		if (!knav_queue_match_type(inst, type))
303 			continue;
304 		if (knav_queue_is_busy(inst))
305 			continue;
306 		qh = __knav_queue_open(inst, name, flags);
307 		goto unlock_ret;
308 	}
309 
310 unlock_ret:
311 	mutex_unlock(&knav_dev_lock);
312 	return qh;
313 }
314 
315 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
316 {
317 	struct knav_range_info *range = inst->range;
318 
319 	if (range->ops && range->ops->set_notify)
320 		range->ops->set_notify(range, inst, enabled);
321 }
322 
323 static int knav_queue_enable_notifier(struct knav_queue *qh)
324 {
325 	struct knav_queue_inst *inst = qh->inst;
326 	bool first;
327 
328 	if (WARN_ON(!qh->notifier_fn))
329 		return -EINVAL;
330 
331 	/* Adjust the per handle notifier count */
332 	first = (atomic_inc_return(&qh->notifier_enabled) == 1);
333 	if (!first)
334 		return 0; /* nothing to do */
335 
336 	/* Now adjust the per instance notifier count */
337 	first = (atomic_inc_return(&inst->num_notifiers) == 1);
338 	if (first)
339 		knav_queue_set_notify(inst, true);
340 
341 	return 0;
342 }
343 
344 static int knav_queue_disable_notifier(struct knav_queue *qh)
345 {
346 	struct knav_queue_inst *inst = qh->inst;
347 	bool last;
348 
349 	last = (atomic_dec_return(&qh->notifier_enabled) == 0);
350 	if (!last)
351 		return 0; /* nothing to do */
352 
353 	last = (atomic_dec_return(&inst->num_notifiers) == 0);
354 	if (last)
355 		knav_queue_set_notify(inst, false);
356 
357 	return 0;
358 }
359 
360 static int knav_queue_set_notifier(struct knav_queue *qh,
361 				struct knav_queue_notify_config *cfg)
362 {
363 	knav_queue_notify_fn old_fn = qh->notifier_fn;
364 
365 	if (!cfg)
366 		return -EINVAL;
367 
368 	if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
369 		return -ENOTSUPP;
370 
371 	if (!cfg->fn && old_fn)
372 		knav_queue_disable_notifier(qh);
373 
374 	qh->notifier_fn = cfg->fn;
375 	qh->notifier_fn_arg = cfg->fn_arg;
376 
377 	if (cfg->fn && !old_fn)
378 		knav_queue_enable_notifier(qh);
379 
380 	return 0;
381 }
382 
383 static int knav_gp_set_notify(struct knav_range_info *range,
384 			       struct knav_queue_inst *inst,
385 			       bool enabled)
386 {
387 	unsigned queue;
388 
389 	if (range->flags & RANGE_HAS_IRQ) {
390 		queue = inst->id - range->queue_base;
391 		if (enabled)
392 			enable_irq(range->irqs[queue].irq);
393 		else
394 			disable_irq_nosync(range->irqs[queue].irq);
395 	}
396 	return 0;
397 }
398 
399 static int knav_gp_open_queue(struct knav_range_info *range,
400 				struct knav_queue_inst *inst, unsigned flags)
401 {
402 	return knav_queue_setup_irq(range, inst);
403 }
404 
405 static int knav_gp_close_queue(struct knav_range_info *range,
406 				struct knav_queue_inst *inst)
407 {
408 	knav_queue_free_irq(inst);
409 	return 0;
410 }
411 
412 static const struct knav_range_ops knav_gp_range_ops = {
413 	.set_notify	= knav_gp_set_notify,
414 	.open_queue	= knav_gp_open_queue,
415 	.close_queue	= knav_gp_close_queue,
416 };
417 
418 
419 static int knav_queue_get_count(void *qhandle)
420 {
421 	struct knav_queue *qh = qhandle;
422 	struct knav_queue_inst *inst = qh->inst;
423 
424 	return readl_relaxed(&qh->reg_peek[0].entry_count) +
425 		atomic_read(&inst->desc_count);
426 }
427 
428 static void knav_queue_debug_show_instance(struct seq_file *s,
429 					struct knav_queue_inst *inst)
430 {
431 	struct knav_device *kdev = inst->kdev;
432 	struct knav_queue *qh;
433 	int cpu = 0;
434 	int pushes = 0;
435 	int pops = 0;
436 	int push_errors = 0;
437 	int pop_errors = 0;
438 	int notifies = 0;
439 
440 	if (!knav_queue_is_busy(inst))
441 		return;
442 
443 	seq_printf(s, "\tqueue id %d (%s)\n",
444 		   kdev->base_id + inst->id, inst->name);
445 	for_each_handle_rcu(qh, inst) {
446 		for_each_possible_cpu(cpu) {
447 			pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
448 			pops += per_cpu_ptr(qh->stats, cpu)->pops;
449 			push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
450 			pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
451 			notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
452 		}
453 
454 		seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
455 				qh,
456 				pushes,
457 				pops,
458 				knav_queue_get_count(qh),
459 				notifies,
460 				push_errors,
461 				pop_errors);
462 	}
463 }
464 
465 static int knav_queue_debug_show(struct seq_file *s, void *v)
466 {
467 	struct knav_queue_inst *inst;
468 	int idx;
469 
470 	mutex_lock(&knav_dev_lock);
471 	seq_printf(s, "%s: %u-%u\n",
472 		   dev_name(kdev->dev), kdev->base_id,
473 		   kdev->base_id + kdev->num_queues - 1);
474 	for_each_instance(idx, inst, kdev)
475 		knav_queue_debug_show_instance(s, inst);
476 	mutex_unlock(&knav_dev_lock);
477 
478 	return 0;
479 }
480 
481 DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
482 
483 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
484 					u32 flags)
485 {
486 	unsigned long end;
487 	u32 val = 0;
488 
489 	end = jiffies + msecs_to_jiffies(timeout);
490 	while (time_after(end, jiffies)) {
491 		val = readl_relaxed(addr);
492 		if (flags)
493 			val &= flags;
494 		if (!val)
495 			break;
496 		cpu_relax();
497 	}
498 	return val ? -ETIMEDOUT : 0;
499 }
500 
501 
502 static int knav_queue_flush(struct knav_queue *qh)
503 {
504 	struct knav_queue_inst *inst = qh->inst;
505 	unsigned id = inst->id - inst->qmgr->start_queue;
506 
507 	atomic_set(&inst->desc_count, 0);
508 	writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
509 	return 0;
510 }
511 
512 /**
513  * knav_queue_open()	- open a hardware queue
514  * @name:		- name to give the queue handle
515  * @id:			- desired queue number if any or specifes the type
516  *			  of queue
517  * @flags:		- the following flags are applicable to queues:
518  *	KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
519  *			     exclusive by default.
520  *			     Subsequent attempts to open a shared queue should
521  *			     also have this flag.
522  *
523  * Returns a handle to the open hardware queue if successful. Use IS_ERR()
524  * to check the returned value for error codes.
525  */
526 void *knav_queue_open(const char *name, unsigned id,
527 					unsigned flags)
528 {
529 	struct knav_queue *qh = ERR_PTR(-EINVAL);
530 
531 	switch (id) {
532 	case KNAV_QUEUE_QPEND:
533 	case KNAV_QUEUE_ACC:
534 	case KNAV_QUEUE_GP:
535 		qh = knav_queue_open_by_type(name, id, flags);
536 		break;
537 
538 	default:
539 		qh = knav_queue_open_by_id(name, id, flags);
540 		break;
541 	}
542 	return qh;
543 }
544 EXPORT_SYMBOL_GPL(knav_queue_open);
545 
546 /**
547  * knav_queue_close()	- close a hardware queue handle
548  * @qhandle:		- handle to close
549  */
550 void knav_queue_close(void *qhandle)
551 {
552 	struct knav_queue *qh = qhandle;
553 	struct knav_queue_inst *inst = qh->inst;
554 
555 	while (atomic_read(&qh->notifier_enabled) > 0)
556 		knav_queue_disable_notifier(qh);
557 
558 	mutex_lock(&knav_dev_lock);
559 	list_del_rcu(&qh->list);
560 	mutex_unlock(&knav_dev_lock);
561 	synchronize_rcu();
562 	if (!knav_queue_is_busy(inst)) {
563 		struct knav_range_info *range = inst->range;
564 
565 		if (range->ops && range->ops->close_queue)
566 			range->ops->close_queue(range, inst);
567 	}
568 	free_percpu(qh->stats);
569 	devm_kfree(inst->kdev->dev, qh);
570 }
571 EXPORT_SYMBOL_GPL(knav_queue_close);
572 
573 /**
574  * knav_queue_device_control()	- Perform control operations on a queue
575  * @qhandle:			- queue handle
576  * @cmd:			- control commands
577  * @arg:			- command argument
578  *
579  * Returns 0 on success, errno otherwise.
580  */
581 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
582 				unsigned long arg)
583 {
584 	struct knav_queue *qh = qhandle;
585 	struct knav_queue_notify_config *cfg;
586 	int ret;
587 
588 	switch ((int)cmd) {
589 	case KNAV_QUEUE_GET_ID:
590 		ret = qh->inst->kdev->base_id + qh->inst->id;
591 		break;
592 
593 	case KNAV_QUEUE_FLUSH:
594 		ret = knav_queue_flush(qh);
595 		break;
596 
597 	case KNAV_QUEUE_SET_NOTIFIER:
598 		cfg = (void *)arg;
599 		ret = knav_queue_set_notifier(qh, cfg);
600 		break;
601 
602 	case KNAV_QUEUE_ENABLE_NOTIFY:
603 		ret = knav_queue_enable_notifier(qh);
604 		break;
605 
606 	case KNAV_QUEUE_DISABLE_NOTIFY:
607 		ret = knav_queue_disable_notifier(qh);
608 		break;
609 
610 	case KNAV_QUEUE_GET_COUNT:
611 		ret = knav_queue_get_count(qh);
612 		break;
613 
614 	default:
615 		ret = -ENOTSUPP;
616 		break;
617 	}
618 	return ret;
619 }
620 EXPORT_SYMBOL_GPL(knav_queue_device_control);
621 
622 
623 
624 /**
625  * knav_queue_push()	- push data (or descriptor) to the tail of a queue
626  * @qhandle:		- hardware queue handle
627  * @dma:		- DMA data to push
628  * @size:		- size of data to push
629  * @flags:		- can be used to pass additional information
630  *
631  * Returns 0 on success, errno otherwise.
632  */
633 int knav_queue_push(void *qhandle, dma_addr_t dma,
634 					unsigned size, unsigned flags)
635 {
636 	struct knav_queue *qh = qhandle;
637 	u32 val;
638 
639 	val = (u32)dma | ((size / 16) - 1);
640 	writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
641 
642 	this_cpu_inc(qh->stats->pushes);
643 	return 0;
644 }
645 EXPORT_SYMBOL_GPL(knav_queue_push);
646 
647 /**
648  * knav_queue_pop()	- pop data (or descriptor) from the head of a queue
649  * @qhandle:		- hardware queue handle
650  * @size:		- (optional) size of the data pop'ed.
651  *
652  * Returns a DMA address on success, 0 on failure.
653  */
654 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
655 {
656 	struct knav_queue *qh = qhandle;
657 	struct knav_queue_inst *inst = qh->inst;
658 	dma_addr_t dma;
659 	u32 val, idx;
660 
661 	/* are we accumulated? */
662 	if (inst->descs) {
663 		if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
664 			atomic_inc(&inst->desc_count);
665 			return 0;
666 		}
667 		idx  = atomic_inc_return(&inst->desc_head);
668 		idx &= ACC_DESCS_MASK;
669 		val = inst->descs[idx];
670 	} else {
671 		val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
672 		if (unlikely(!val))
673 			return 0;
674 	}
675 
676 	dma = val & DESC_PTR_MASK;
677 	if (size)
678 		*size = ((val & DESC_SIZE_MASK) + 1) * 16;
679 
680 	this_cpu_inc(qh->stats->pops);
681 	return dma;
682 }
683 EXPORT_SYMBOL_GPL(knav_queue_pop);
684 
685 /* carve out descriptors and push into queue */
686 static void kdesc_fill_pool(struct knav_pool *pool)
687 {
688 	struct knav_region *region;
689 	int i;
690 
691 	region = pool->region;
692 	pool->desc_size = region->desc_size;
693 	for (i = 0; i < pool->num_desc; i++) {
694 		int index = pool->region_offset + i;
695 		dma_addr_t dma_addr;
696 		unsigned dma_size;
697 		dma_addr = region->dma_start + (region->desc_size * index);
698 		dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
699 		dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
700 					   DMA_TO_DEVICE);
701 		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
702 	}
703 }
704 
705 /* pop out descriptors and close the queue */
706 static void kdesc_empty_pool(struct knav_pool *pool)
707 {
708 	dma_addr_t dma;
709 	unsigned size;
710 	void *desc;
711 	int i;
712 
713 	if (!pool->queue)
714 		return;
715 
716 	for (i = 0;; i++) {
717 		dma = knav_queue_pop(pool->queue, &size);
718 		if (!dma)
719 			break;
720 		desc = knav_pool_desc_dma_to_virt(pool, dma);
721 		if (!desc) {
722 			dev_dbg(pool->kdev->dev,
723 				"couldn't unmap desc, continuing\n");
724 		}
725 	}
726 	WARN_ON(i != pool->num_desc);
727 	knav_queue_close(pool->queue);
728 }
729 
730 
731 /* Get the DMA address of a descriptor */
732 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
733 {
734 	struct knav_pool *pool = ph;
735 	return pool->region->dma_start + (virt - pool->region->virt_start);
736 }
737 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
738 
739 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
740 {
741 	struct knav_pool *pool = ph;
742 	return pool->region->virt_start + (dma - pool->region->dma_start);
743 }
744 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
745 
746 /**
747  * knav_pool_create()	- Create a pool of descriptors
748  * @name:		- name to give the pool handle
749  * @num_desc:		- numbers of descriptors in the pool
750  * @region_id:		- QMSS region id from which the descriptors are to be
751  *			  allocated.
752  *
753  * Returns a pool handle on success.
754  * Use IS_ERR_OR_NULL() to identify error values on return.
755  */
756 void *knav_pool_create(const char *name,
757 					int num_desc, int region_id)
758 {
759 	struct knav_region *reg_itr, *region = NULL;
760 	struct knav_pool *pool, *pi = NULL, *iter;
761 	struct list_head *node;
762 	unsigned last_offset;
763 	int ret;
764 
765 	if (!kdev)
766 		return ERR_PTR(-EPROBE_DEFER);
767 
768 	if (!kdev->dev)
769 		return ERR_PTR(-ENODEV);
770 
771 	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
772 	if (!pool) {
773 		dev_err(kdev->dev, "out of memory allocating pool\n");
774 		return ERR_PTR(-ENOMEM);
775 	}
776 
777 	for_each_region(kdev, reg_itr) {
778 		if (reg_itr->id != region_id)
779 			continue;
780 		region = reg_itr;
781 		break;
782 	}
783 
784 	if (!region) {
785 		dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
786 		ret = -EINVAL;
787 		goto err;
788 	}
789 
790 	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
791 	if (IS_ERR(pool->queue)) {
792 		dev_err(kdev->dev,
793 			"failed to open queue for pool(%s), error %ld\n",
794 			name, PTR_ERR(pool->queue));
795 		ret = PTR_ERR(pool->queue);
796 		goto err;
797 	}
798 
799 	pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
800 	pool->kdev = kdev;
801 	pool->dev = kdev->dev;
802 
803 	mutex_lock(&knav_dev_lock);
804 
805 	if (num_desc > (region->num_desc - region->used_desc)) {
806 		dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
807 			region_id, name);
808 		ret = -ENOMEM;
809 		goto err_unlock;
810 	}
811 
812 	/* Region maintains a sorted (by region offset) list of pools
813 	 * use the first free slot which is large enough to accomodate
814 	 * the request
815 	 */
816 	last_offset = 0;
817 	node = &region->pools;
818 	list_for_each_entry(iter, &region->pools, region_inst) {
819 		if ((iter->region_offset - last_offset) >= num_desc) {
820 			pi = iter;
821 			break;
822 		}
823 		last_offset = iter->region_offset + iter->num_desc;
824 	}
825 
826 	if (pi) {
827 		node = &pi->region_inst;
828 		pool->region = region;
829 		pool->num_desc = num_desc;
830 		pool->region_offset = last_offset;
831 		region->used_desc += num_desc;
832 		list_add_tail(&pool->list, &kdev->pools);
833 		list_add_tail(&pool->region_inst, node);
834 	} else {
835 		dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
836 			name, region_id);
837 		ret = -ENOMEM;
838 		goto err_unlock;
839 	}
840 
841 	mutex_unlock(&knav_dev_lock);
842 	kdesc_fill_pool(pool);
843 	return pool;
844 
845 err_unlock:
846 	mutex_unlock(&knav_dev_lock);
847 err:
848 	kfree(pool->name);
849 	devm_kfree(kdev->dev, pool);
850 	return ERR_PTR(ret);
851 }
852 EXPORT_SYMBOL_GPL(knav_pool_create);
853 
854 /**
855  * knav_pool_destroy()	- Free a pool of descriptors
856  * @ph:		- pool handle
857  */
858 void knav_pool_destroy(void *ph)
859 {
860 	struct knav_pool *pool = ph;
861 
862 	if (!pool)
863 		return;
864 
865 	if (!pool->region)
866 		return;
867 
868 	kdesc_empty_pool(pool);
869 	mutex_lock(&knav_dev_lock);
870 
871 	pool->region->used_desc -= pool->num_desc;
872 	list_del(&pool->region_inst);
873 	list_del(&pool->list);
874 
875 	mutex_unlock(&knav_dev_lock);
876 	kfree(pool->name);
877 	devm_kfree(kdev->dev, pool);
878 }
879 EXPORT_SYMBOL_GPL(knav_pool_destroy);
880 
881 
882 /**
883  * knav_pool_desc_get()	- Get a descriptor from the pool
884  * @ph:		- pool handle
885  *
886  * Returns descriptor from the pool.
887  */
888 void *knav_pool_desc_get(void *ph)
889 {
890 	struct knav_pool *pool = ph;
891 	dma_addr_t dma;
892 	unsigned size;
893 	void *data;
894 
895 	dma = knav_queue_pop(pool->queue, &size);
896 	if (unlikely(!dma))
897 		return ERR_PTR(-ENOMEM);
898 	data = knav_pool_desc_dma_to_virt(pool, dma);
899 	return data;
900 }
901 EXPORT_SYMBOL_GPL(knav_pool_desc_get);
902 
903 /**
904  * knav_pool_desc_put()	- return a descriptor to the pool
905  * @ph:		- pool handle
906  * @desc:	- virtual address
907  */
908 void knav_pool_desc_put(void *ph, void *desc)
909 {
910 	struct knav_pool *pool = ph;
911 	dma_addr_t dma;
912 	dma = knav_pool_desc_virt_to_dma(pool, desc);
913 	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
914 }
915 EXPORT_SYMBOL_GPL(knav_pool_desc_put);
916 
917 /**
918  * knav_pool_desc_map()	- Map descriptor for DMA transfer
919  * @ph:				- pool handle
920  * @desc:			- address of descriptor to map
921  * @size:			- size of descriptor to map
922  * @dma:			- DMA address return pointer
923  * @dma_sz:			- adjusted return pointer
924  *
925  * Returns 0 on success, errno otherwise.
926  */
927 int knav_pool_desc_map(void *ph, void *desc, unsigned size,
928 					dma_addr_t *dma, unsigned *dma_sz)
929 {
930 	struct knav_pool *pool = ph;
931 	*dma = knav_pool_desc_virt_to_dma(pool, desc);
932 	size = min(size, pool->region->desc_size);
933 	size = ALIGN(size, SMP_CACHE_BYTES);
934 	*dma_sz = size;
935 	dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
936 
937 	/* Ensure the descriptor reaches to the memory */
938 	__iowmb();
939 
940 	return 0;
941 }
942 EXPORT_SYMBOL_GPL(knav_pool_desc_map);
943 
944 /**
945  * knav_pool_desc_unmap()	- Unmap descriptor after DMA transfer
946  * @ph:				- pool handle
947  * @dma:			- DMA address of descriptor to unmap
948  * @dma_sz:			- size of descriptor to unmap
949  *
950  * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
951  * error values on return.
952  */
953 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
954 {
955 	struct knav_pool *pool = ph;
956 	unsigned desc_sz;
957 	void *desc;
958 
959 	desc_sz = min(dma_sz, pool->region->desc_size);
960 	desc = knav_pool_desc_dma_to_virt(pool, dma);
961 	dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
962 	prefetch(desc);
963 	return desc;
964 }
965 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
966 
967 /**
968  * knav_pool_count()	- Get the number of descriptors in pool.
969  * @ph:			- pool handle
970  * Returns number of elements in the pool.
971  */
972 int knav_pool_count(void *ph)
973 {
974 	struct knav_pool *pool = ph;
975 	return knav_queue_get_count(pool->queue);
976 }
977 EXPORT_SYMBOL_GPL(knav_pool_count);
978 
979 static void knav_queue_setup_region(struct knav_device *kdev,
980 					struct knav_region *region)
981 {
982 	unsigned hw_num_desc, hw_desc_size, size;
983 	struct knav_reg_region __iomem  *regs;
984 	struct knav_qmgr_info *qmgr;
985 	struct knav_pool *pool;
986 	int id = region->id;
987 	struct page *page;
988 
989 	/* unused region? */
990 	if (!region->num_desc) {
991 		dev_warn(kdev->dev, "unused region %s\n", region->name);
992 		return;
993 	}
994 
995 	/* get hardware descriptor value */
996 	hw_num_desc = ilog2(region->num_desc - 1) + 1;
997 
998 	/* did we force fit ourselves into nothingness? */
999 	if (region->num_desc < 32) {
1000 		region->num_desc = 0;
1001 		dev_warn(kdev->dev, "too few descriptors in region %s\n",
1002 			 region->name);
1003 		return;
1004 	}
1005 
1006 	size = region->num_desc * region->desc_size;
1007 	region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
1008 						GFP_DMA32);
1009 	if (!region->virt_start) {
1010 		region->num_desc = 0;
1011 		dev_err(kdev->dev, "memory alloc failed for region %s\n",
1012 			region->name);
1013 		return;
1014 	}
1015 	region->virt_end = region->virt_start + size;
1016 	page = virt_to_page(region->virt_start);
1017 
1018 	region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1019 					 DMA_BIDIRECTIONAL);
1020 	if (dma_mapping_error(kdev->dev, region->dma_start)) {
1021 		dev_err(kdev->dev, "dma map failed for region %s\n",
1022 			region->name);
1023 		goto fail;
1024 	}
1025 	region->dma_end = region->dma_start + size;
1026 
1027 	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1028 	if (!pool) {
1029 		dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1030 		goto fail;
1031 	}
1032 	pool->num_desc = 0;
1033 	pool->region_offset = region->num_desc;
1034 	list_add(&pool->region_inst, &region->pools);
1035 
1036 	dev_dbg(kdev->dev,
1037 		"region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1038 		region->name, id, region->desc_size, region->num_desc,
1039 		region->link_index, &region->dma_start, &region->dma_end,
1040 		region->virt_start, region->virt_end);
1041 
1042 	hw_desc_size = (region->desc_size / 16) - 1;
1043 	hw_num_desc -= 5;
1044 
1045 	for_each_qmgr(kdev, qmgr) {
1046 		regs = qmgr->reg_region + id;
1047 		writel_relaxed((u32)region->dma_start, &regs->base);
1048 		writel_relaxed(region->link_index, &regs->start_index);
1049 		writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1050 			       &regs->size_count);
1051 	}
1052 	return;
1053 
1054 fail:
1055 	if (region->dma_start)
1056 		dma_unmap_page(kdev->dev, region->dma_start, size,
1057 				DMA_BIDIRECTIONAL);
1058 	if (region->virt_start)
1059 		free_pages_exact(region->virt_start, size);
1060 	region->num_desc = 0;
1061 	return;
1062 }
1063 
1064 static const char *knav_queue_find_name(struct device_node *node)
1065 {
1066 	const char *name;
1067 
1068 	if (of_property_read_string(node, "label", &name) < 0)
1069 		name = node->name;
1070 	if (!name)
1071 		name = "unknown";
1072 	return name;
1073 }
1074 
1075 static int knav_queue_setup_regions(struct knav_device *kdev,
1076 				    struct device_node *node)
1077 {
1078 	struct device *dev = kdev->dev;
1079 	struct device_node *regions __free(device_node) =
1080 			of_get_child_by_name(node, "descriptor-regions");
1081 	struct knav_region *region;
1082 	u32 temp[2];
1083 	int ret;
1084 
1085 	if (!regions)
1086 		return dev_err_probe(dev, -ENODEV,
1087 				     "descriptor-regions not specified\n");
1088 
1089 	for_each_child_of_node_scoped(regions, child) {
1090 		region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1091 		if (!region)
1092 			return -ENOMEM;
1093 
1094 		region->name = knav_queue_find_name(child);
1095 		of_property_read_u32(child, "id", &region->id);
1096 		ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1097 		if (!ret) {
1098 			region->num_desc  = temp[0];
1099 			region->desc_size = temp[1];
1100 		} else {
1101 			dev_err(dev, "invalid region info %s\n", region->name);
1102 			devm_kfree(dev, region);
1103 			continue;
1104 		}
1105 
1106 		ret = of_property_read_u32(child, "link-index",
1107 					   &region->link_index);
1108 		if (ret) {
1109 			dev_err(dev, "link index not found for %s\n",
1110 				region->name);
1111 			devm_kfree(dev, region);
1112 			continue;
1113 		}
1114 
1115 		INIT_LIST_HEAD(&region->pools);
1116 		list_add_tail(&region->list, &kdev->regions);
1117 	}
1118 	if (list_empty(&kdev->regions))
1119 		return dev_err_probe(dev, -ENODEV,
1120 				     "no valid region information found\n");
1121 
1122 	/* Next, we run through the regions and set things up */
1123 	for_each_region(kdev, region)
1124 		knav_queue_setup_region(kdev, region);
1125 
1126 	return 0;
1127 }
1128 
1129 static int knav_get_link_ram(struct knav_device *kdev,
1130 				       const char *name,
1131 				       struct knav_link_ram_block *block)
1132 {
1133 	struct platform_device *pdev = to_platform_device(kdev->dev);
1134 	struct device_node *node = pdev->dev.of_node;
1135 	u32 temp[2];
1136 
1137 	/*
1138 	 * Note: link ram resources are specified in "entry" sized units. In
1139 	 * reality, although entries are ~40bits in hardware, we treat them as
1140 	 * 64-bit entities here.
1141 	 *
1142 	 * For example, to specify the internal link ram for Keystone-I class
1143 	 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1144 	 *
1145 	 * This gets a bit weird when other link rams are used.  For example,
1146 	 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1147 	 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1148 	 * which accounts for 64-bits per entry, for 16K entries.
1149 	 */
1150 	if (!of_property_read_u32_array(node, name , temp, 2)) {
1151 		if (temp[0]) {
1152 			/*
1153 			 * queue_base specified => using internal or onchip
1154 			 * link ram WARNING - we do not "reserve" this block
1155 			 */
1156 			block->dma = (dma_addr_t)temp[0];
1157 			block->virt = NULL;
1158 			block->size = temp[1];
1159 		} else {
1160 			block->size = temp[1];
1161 			/* queue_base not specific => allocate requested size */
1162 			block->virt = dmam_alloc_coherent(kdev->dev,
1163 						  8 * block->size, &block->dma,
1164 						  GFP_KERNEL);
1165 			if (!block->virt) {
1166 				dev_err(kdev->dev, "failed to alloc linkram\n");
1167 				return -ENOMEM;
1168 			}
1169 		}
1170 	} else {
1171 		return -ENODEV;
1172 	}
1173 	return 0;
1174 }
1175 
1176 static int knav_queue_setup_link_ram(struct knav_device *kdev)
1177 {
1178 	struct knav_link_ram_block *block;
1179 	struct knav_qmgr_info *qmgr;
1180 
1181 	for_each_qmgr(kdev, qmgr) {
1182 		block = &kdev->link_rams[0];
1183 		dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1184 			&block->dma, block->virt, block->size);
1185 		writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1186 		if (kdev->version == QMSS_66AK2G)
1187 			writel_relaxed(block->size,
1188 				       &qmgr->reg_config->link_ram_size0);
1189 		else
1190 			writel_relaxed(block->size - 1,
1191 				       &qmgr->reg_config->link_ram_size0);
1192 		block++;
1193 		if (!block->size)
1194 			continue;
1195 
1196 		dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1197 			&block->dma, block->virt, block->size);
1198 		writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1199 	}
1200 
1201 	return 0;
1202 }
1203 
1204 static int knav_setup_queue_range(struct knav_device *kdev,
1205 					struct device_node *node)
1206 {
1207 	struct device *dev = kdev->dev;
1208 	struct knav_range_info *range;
1209 	struct knav_qmgr_info *qmgr;
1210 	u32 temp[2], start, end, id, index;
1211 	int ret, i;
1212 
1213 	range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1214 	if (!range) {
1215 		dev_err(dev, "out of memory allocating range\n");
1216 		return -ENOMEM;
1217 	}
1218 
1219 	range->kdev = kdev;
1220 	range->name = knav_queue_find_name(node);
1221 	ret = of_property_read_u32_array(node, "qrange", temp, 2);
1222 	if (!ret) {
1223 		range->queue_base = temp[0] - kdev->base_id;
1224 		range->num_queues = temp[1];
1225 	} else {
1226 		dev_err(dev, "invalid queue range %s\n", range->name);
1227 		devm_kfree(dev, range);
1228 		return -EINVAL;
1229 	}
1230 
1231 	for (i = 0; i < RANGE_MAX_IRQS; i++) {
1232 		struct of_phandle_args oirq;
1233 
1234 		if (of_irq_parse_one(node, i, &oirq))
1235 			break;
1236 
1237 		range->irqs[i].irq = irq_create_of_mapping(&oirq);
1238 		if (range->irqs[i].irq == IRQ_NONE)
1239 			break;
1240 
1241 		range->num_irqs++;
1242 
1243 		if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1244 			unsigned long mask;
1245 			int bit;
1246 
1247 			range->irqs[i].cpu_mask = devm_kzalloc(dev,
1248 							       cpumask_size(), GFP_KERNEL);
1249 			if (!range->irqs[i].cpu_mask)
1250 				return -ENOMEM;
1251 
1252 			mask = (oirq.args[2] & 0x0000ff00) >> 8;
1253 			for_each_set_bit(bit, &mask, BITS_PER_LONG)
1254 				cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1255 		}
1256 	}
1257 
1258 	range->num_irqs = min(range->num_irqs, range->num_queues);
1259 	if (range->num_irqs)
1260 		range->flags |= RANGE_HAS_IRQ;
1261 
1262 	if (of_property_read_bool(node, "qalloc-by-id"))
1263 		range->flags |= RANGE_RESERVED;
1264 
1265 	if (of_property_present(node, "accumulator")) {
1266 		ret = knav_init_acc_range(kdev, node, range);
1267 		if (ret < 0) {
1268 			devm_kfree(dev, range);
1269 			return ret;
1270 		}
1271 	} else {
1272 		range->ops = &knav_gp_range_ops;
1273 	}
1274 
1275 	/* set threshold to 1, and flush out the queues */
1276 	for_each_qmgr(kdev, qmgr) {
1277 		start = max(qmgr->start_queue, range->queue_base);
1278 		end   = min(qmgr->start_queue + qmgr->num_queues,
1279 			    range->queue_base + range->num_queues);
1280 		for (id = start; id < end; id++) {
1281 			index = id - qmgr->start_queue;
1282 			writel_relaxed(THRESH_GTE | 1,
1283 				       &qmgr->reg_peek[index].ptr_size_thresh);
1284 			writel_relaxed(0,
1285 				       &qmgr->reg_push[index].ptr_size_thresh);
1286 		}
1287 	}
1288 
1289 	list_add_tail(&range->list, &kdev->queue_ranges);
1290 	dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1291 		range->name, range->queue_base,
1292 		range->queue_base + range->num_queues - 1,
1293 		range->num_irqs,
1294 		(range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1295 		(range->flags & RANGE_RESERVED) ? ", reserved" : "",
1296 		(range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1297 	kdev->num_queues_in_use += range->num_queues;
1298 	return 0;
1299 }
1300 
1301 static int knav_setup_queue_pools(struct knav_device *kdev,
1302 				  struct device_node *node)
1303 {
1304 	struct device_node *queue_pools __free(device_node) =
1305 			of_get_child_by_name(node, "queue-pools");
1306 	struct device_node *type, *range;
1307 
1308 	if (!queue_pools)
1309 		return dev_err_probe(kdev->dev, -ENODEV,
1310 				     "queue-pools not specified\n");
1311 
1312 	for_each_child_of_node(queue_pools, type) {
1313 		for_each_child_of_node(type, range) {
1314 			/* return value ignored, we init the rest... */
1315 			knav_setup_queue_range(kdev, range);
1316 		}
1317 	}
1318 
1319 	/* ... and barf if they all failed! */
1320 	if (list_empty(&kdev->queue_ranges))
1321 		return dev_err_probe(kdev->dev, -ENODEV,
1322 				     "no valid queue range found\n");
1323 	return 0;
1324 }
1325 
1326 static void knav_free_queue_range(struct knav_device *kdev,
1327 				  struct knav_range_info *range)
1328 {
1329 	if (range->ops && range->ops->free_range)
1330 		range->ops->free_range(range);
1331 	list_del(&range->list);
1332 	devm_kfree(kdev->dev, range);
1333 }
1334 
1335 static void knav_free_queue_ranges(struct knav_device *kdev)
1336 {
1337 	struct knav_range_info *range;
1338 
1339 	for (;;) {
1340 		range = first_queue_range(kdev);
1341 		if (!range)
1342 			break;
1343 		knav_free_queue_range(kdev, range);
1344 	}
1345 }
1346 
1347 static void knav_queue_free_regions(struct knav_device *kdev)
1348 {
1349 	struct knav_region *region;
1350 	struct knav_pool *pool, *tmp;
1351 	unsigned size;
1352 
1353 	for (;;) {
1354 		region = first_region(kdev);
1355 		if (!region)
1356 			break;
1357 		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1358 			knav_pool_destroy(pool);
1359 
1360 		size = region->virt_end - region->virt_start;
1361 		if (size)
1362 			free_pages_exact(region->virt_start, size);
1363 		list_del(&region->list);
1364 		devm_kfree(kdev->dev, region);
1365 	}
1366 }
1367 
1368 static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1369 					struct device_node *node, int index)
1370 {
1371 	struct resource res;
1372 	void __iomem *regs;
1373 	int ret;
1374 
1375 	ret = of_address_to_resource(node, index, &res);
1376 	if (ret) {
1377 		dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
1378 			node, index);
1379 		return ERR_PTR(ret);
1380 	}
1381 
1382 	regs = devm_ioremap_resource(kdev->dev, &res);
1383 	if (IS_ERR(regs))
1384 		dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
1385 			index, node);
1386 	return regs;
1387 }
1388 
1389 static int knav_queue_init_qmgrs(struct knav_device *kdev,
1390 				 struct device_node *node)
1391 {
1392 	struct device *dev = kdev->dev;
1393 	struct device_node *qmgrs __free(device_node) =
1394 			of_get_child_by_name(node, "qmgrs");
1395 	struct knav_qmgr_info *qmgr;
1396 	u32 temp[2];
1397 	int ret;
1398 
1399 	if (!qmgrs)
1400 		return dev_err_probe(dev, -ENODEV,
1401 				     "queue manager info not specified\n");
1402 
1403 	for_each_child_of_node_scoped(qmgrs, child) {
1404 		qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1405 		if (!qmgr)
1406 			return -ENOMEM;
1407 
1408 		ret = of_property_read_u32_array(child, "managed-queues",
1409 						 temp, 2);
1410 		if (!ret) {
1411 			qmgr->start_queue = temp[0];
1412 			qmgr->num_queues = temp[1];
1413 		} else {
1414 			dev_err(dev, "invalid qmgr queue range\n");
1415 			devm_kfree(dev, qmgr);
1416 			continue;
1417 		}
1418 
1419 		dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1420 			 qmgr->start_queue, qmgr->num_queues);
1421 
1422 		qmgr->reg_peek =
1423 			knav_queue_map_reg(kdev, child,
1424 					   KNAV_QUEUE_PEEK_REG_INDEX);
1425 
1426 		if (kdev->version == QMSS) {
1427 			qmgr->reg_status =
1428 				knav_queue_map_reg(kdev, child,
1429 						   KNAV_QUEUE_STATUS_REG_INDEX);
1430 		}
1431 
1432 		qmgr->reg_config =
1433 			knav_queue_map_reg(kdev, child,
1434 					   (kdev->version == QMSS_66AK2G) ?
1435 					   KNAV_L_QUEUE_CONFIG_REG_INDEX :
1436 					   KNAV_QUEUE_CONFIG_REG_INDEX);
1437 		qmgr->reg_region =
1438 			knav_queue_map_reg(kdev, child,
1439 					   (kdev->version == QMSS_66AK2G) ?
1440 					   KNAV_L_QUEUE_REGION_REG_INDEX :
1441 					   KNAV_QUEUE_REGION_REG_INDEX);
1442 
1443 		qmgr->reg_push =
1444 			knav_queue_map_reg(kdev, child,
1445 					   (kdev->version == QMSS_66AK2G) ?
1446 					    KNAV_L_QUEUE_PUSH_REG_INDEX :
1447 					    KNAV_QUEUE_PUSH_REG_INDEX);
1448 
1449 		if (kdev->version == QMSS) {
1450 			qmgr->reg_pop =
1451 				knav_queue_map_reg(kdev, child,
1452 						   KNAV_QUEUE_POP_REG_INDEX);
1453 		}
1454 
1455 		if (IS_ERR(qmgr->reg_peek) ||
1456 		    ((kdev->version == QMSS) &&
1457 		    (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
1458 		    IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1459 		    IS_ERR(qmgr->reg_push)) {
1460 			dev_err(dev, "failed to map qmgr regs\n");
1461 			if (kdev->version == QMSS) {
1462 				if (!IS_ERR(qmgr->reg_status))
1463 					devm_iounmap(dev, qmgr->reg_status);
1464 				if (!IS_ERR(qmgr->reg_pop))
1465 					devm_iounmap(dev, qmgr->reg_pop);
1466 			}
1467 			if (!IS_ERR(qmgr->reg_peek))
1468 				devm_iounmap(dev, qmgr->reg_peek);
1469 			if (!IS_ERR(qmgr->reg_config))
1470 				devm_iounmap(dev, qmgr->reg_config);
1471 			if (!IS_ERR(qmgr->reg_region))
1472 				devm_iounmap(dev, qmgr->reg_region);
1473 			if (!IS_ERR(qmgr->reg_push))
1474 				devm_iounmap(dev, qmgr->reg_push);
1475 			devm_kfree(dev, qmgr);
1476 			continue;
1477 		}
1478 
1479 		/* Use same push register for pop as well */
1480 		if (kdev->version == QMSS_66AK2G)
1481 			qmgr->reg_pop = qmgr->reg_push;
1482 
1483 		list_add_tail(&qmgr->list, &kdev->qmgrs);
1484 		dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1485 			 qmgr->start_queue, qmgr->num_queues,
1486 			 qmgr->reg_peek, qmgr->reg_status,
1487 			 qmgr->reg_config, qmgr->reg_region,
1488 			 qmgr->reg_push, qmgr->reg_pop);
1489 	}
1490 	return 0;
1491 }
1492 
1493 static int knav_queue_init_pdsps(struct knav_device *kdev,
1494 					struct device_node *pdsps)
1495 {
1496 	struct device *dev = kdev->dev;
1497 	struct knav_pdsp_info *pdsp;
1498 
1499 	for_each_child_of_node_scoped(pdsps, child) {
1500 		pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1501 		if (!pdsp)
1502 			return -ENOMEM;
1503 
1504 		pdsp->name = knav_queue_find_name(child);
1505 		pdsp->iram =
1506 			knav_queue_map_reg(kdev, child,
1507 					   KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1508 		pdsp->regs =
1509 			knav_queue_map_reg(kdev, child,
1510 					   KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1511 		pdsp->intd =
1512 			knav_queue_map_reg(kdev, child,
1513 					   KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1514 		pdsp->command =
1515 			knav_queue_map_reg(kdev, child,
1516 					   KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1517 
1518 		if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1519 		    IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1520 			dev_err(dev, "failed to map pdsp %s regs\n",
1521 				pdsp->name);
1522 			if (!IS_ERR(pdsp->command))
1523 				devm_iounmap(dev, pdsp->command);
1524 			if (!IS_ERR(pdsp->iram))
1525 				devm_iounmap(dev, pdsp->iram);
1526 			if (!IS_ERR(pdsp->regs))
1527 				devm_iounmap(dev, pdsp->regs);
1528 			if (!IS_ERR(pdsp->intd))
1529 				devm_iounmap(dev, pdsp->intd);
1530 			devm_kfree(dev, pdsp);
1531 			continue;
1532 		}
1533 		of_property_read_u32(child, "id", &pdsp->id);
1534 		list_add_tail(&pdsp->list, &kdev->pdsps);
1535 		dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1536 			pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1537 			pdsp->intd);
1538 	}
1539 	return 0;
1540 }
1541 
1542 static int knav_queue_stop_pdsp(struct knav_device *kdev,
1543 			  struct knav_pdsp_info *pdsp)
1544 {
1545 	u32 val, timeout = 1000;
1546 	int ret;
1547 
1548 	val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1549 	writel_relaxed(val, &pdsp->regs->control);
1550 	ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1551 					PDSP_CTRL_RUNNING);
1552 	if (ret < 0) {
1553 		dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1554 		return ret;
1555 	}
1556 	pdsp->loaded = false;
1557 	pdsp->started = false;
1558 	return 0;
1559 }
1560 
1561 static int knav_queue_load_pdsp(struct knav_device *kdev,
1562 			  struct knav_pdsp_info *pdsp)
1563 {
1564 	int i, ret, fwlen;
1565 	const struct firmware *fw;
1566 	bool found = false;
1567 	u32 *fwdata;
1568 
1569 	for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1570 		if (knav_acc_firmwares[i]) {
1571 			ret = request_firmware_direct(&fw,
1572 						      knav_acc_firmwares[i],
1573 						      kdev->dev);
1574 			if (!ret) {
1575 				found = true;
1576 				break;
1577 			}
1578 		}
1579 	}
1580 
1581 	if (!found) {
1582 		dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1583 		return -ENODEV;
1584 	}
1585 
1586 	dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1587 		 knav_acc_firmwares[i]);
1588 
1589 	writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1590 	/* download the firmware */
1591 	fwdata = (u32 *)fw->data;
1592 	fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1593 	for (i = 0; i < fwlen; i++)
1594 		writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1595 
1596 	release_firmware(fw);
1597 	return 0;
1598 }
1599 
1600 static int knav_queue_start_pdsp(struct knav_device *kdev,
1601 			   struct knav_pdsp_info *pdsp)
1602 {
1603 	u32 val, timeout = 1000;
1604 	int ret;
1605 
1606 	/* write a command for sync */
1607 	writel_relaxed(0xffffffff, pdsp->command);
1608 	while (readl_relaxed(pdsp->command) != 0xffffffff)
1609 		cpu_relax();
1610 
1611 	/* soft reset the PDSP */
1612 	val  = readl_relaxed(&pdsp->regs->control);
1613 	val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1614 	writel_relaxed(val, &pdsp->regs->control);
1615 
1616 	/* enable pdsp */
1617 	val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1618 	writel_relaxed(val, &pdsp->regs->control);
1619 
1620 	/* wait for command register to clear */
1621 	ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1622 	if (ret < 0) {
1623 		dev_err(kdev->dev,
1624 			"timed out on pdsp %s command register wait\n",
1625 			pdsp->name);
1626 		return ret;
1627 	}
1628 	return 0;
1629 }
1630 
1631 static void knav_queue_stop_pdsps(struct knav_device *kdev)
1632 {
1633 	struct knav_pdsp_info *pdsp;
1634 
1635 	/* disable all pdsps */
1636 	for_each_pdsp(kdev, pdsp)
1637 		knav_queue_stop_pdsp(kdev, pdsp);
1638 }
1639 
1640 static int knav_queue_start_pdsps(struct knav_device *kdev)
1641 {
1642 	struct knav_pdsp_info *pdsp;
1643 	int ret;
1644 
1645 	knav_queue_stop_pdsps(kdev);
1646 	/* now load them all. We return success even if pdsp
1647 	 * is not loaded as acc channels are optional on having
1648 	 * firmware availability in the system. We set the loaded
1649 	 * and stated flag and when initialize the acc range, check
1650 	 * it and init the range only if pdsp is started.
1651 	 */
1652 	for_each_pdsp(kdev, pdsp) {
1653 		ret = knav_queue_load_pdsp(kdev, pdsp);
1654 		if (!ret)
1655 			pdsp->loaded = true;
1656 	}
1657 
1658 	for_each_pdsp(kdev, pdsp) {
1659 		if (pdsp->loaded) {
1660 			ret = knav_queue_start_pdsp(kdev, pdsp);
1661 			if (!ret)
1662 				pdsp->started = true;
1663 		}
1664 	}
1665 	return 0;
1666 }
1667 
1668 static int knav_queue_setup_pdsps(struct knav_device *kdev,
1669 				  struct device_node *node)
1670 {
1671 	struct device_node *pdsps __free(device_node) =
1672 			of_get_child_by_name(node, "pdsps");
1673 
1674 	if (pdsps) {
1675 		int ret;
1676 
1677 		ret = knav_queue_init_pdsps(kdev, pdsps);
1678 		if (ret)
1679 			return ret;
1680 
1681 		ret = knav_queue_start_pdsps(kdev);
1682 		if (ret)
1683 			return ret;
1684 	}
1685 	return 0;
1686 }
1687 
1688 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1689 {
1690 	struct knav_qmgr_info *qmgr;
1691 
1692 	for_each_qmgr(kdev, qmgr) {
1693 		if ((id >= qmgr->start_queue) &&
1694 		    (id < qmgr->start_queue + qmgr->num_queues))
1695 			return qmgr;
1696 	}
1697 	return NULL;
1698 }
1699 
1700 static int knav_queue_init_queue(struct knav_device *kdev,
1701 					struct knav_range_info *range,
1702 					struct knav_queue_inst *inst,
1703 					unsigned id)
1704 {
1705 	char irq_name[KNAV_NAME_SIZE];
1706 	inst->qmgr = knav_find_qmgr(id);
1707 	if (!inst->qmgr)
1708 		return -1;
1709 
1710 	INIT_LIST_HEAD(&inst->handles);
1711 	inst->kdev = kdev;
1712 	inst->range = range;
1713 	inst->irq_num = -1;
1714 	inst->id = id;
1715 	scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1716 	inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1717 
1718 	if (range->ops && range->ops->init_queue)
1719 		return range->ops->init_queue(range, inst);
1720 	else
1721 		return 0;
1722 }
1723 
1724 static int knav_queue_init_queues(struct knav_device *kdev)
1725 {
1726 	struct knav_range_info *range;
1727 	int size, id, base_idx;
1728 	int idx = 0, ret = 0;
1729 
1730 	/* how much do we need for instance data? */
1731 	size = sizeof(struct knav_queue_inst);
1732 
1733 	/* round this up to a power of 2, keep the index to instance
1734 	 * arithmetic fast.
1735 	 * */
1736 	kdev->inst_shift = order_base_2(size);
1737 	size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1738 	kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1739 	if (!kdev->instances)
1740 		return -ENOMEM;
1741 
1742 	for_each_queue_range(kdev, range) {
1743 		if (range->ops && range->ops->init_range)
1744 			range->ops->init_range(range);
1745 		base_idx = idx;
1746 		for (id = range->queue_base;
1747 		     id < range->queue_base + range->num_queues; id++, idx++) {
1748 			ret = knav_queue_init_queue(kdev, range,
1749 					knav_queue_idx_to_inst(kdev, idx), id);
1750 			if (ret < 0)
1751 				return ret;
1752 		}
1753 		range->queue_base_inst =
1754 			knav_queue_idx_to_inst(kdev, base_idx);
1755 	}
1756 	return 0;
1757 }
1758 
1759 /* Match table for of_platform binding */
1760 static const struct of_device_id keystone_qmss_of_match[] = {
1761 	{
1762 		.compatible = "ti,keystone-navigator-qmss",
1763 	},
1764 	{
1765 		.compatible = "ti,66ak2g-navss-qm",
1766 		.data	= (void *)QMSS_66AK2G,
1767 	},
1768 	{},
1769 };
1770 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1771 
1772 static int knav_queue_probe(struct platform_device *pdev)
1773 {
1774 	struct device_node *node = pdev->dev.of_node;
1775 	struct device *dev = &pdev->dev;
1776 	u32 temp[2];
1777 	int ret;
1778 
1779 	if (!node) {
1780 		dev_err(dev, "device tree info unavailable\n");
1781 		return -ENODEV;
1782 	}
1783 
1784 	kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1785 	if (!kdev) {
1786 		dev_err(dev, "memory allocation failed\n");
1787 		return -ENOMEM;
1788 	}
1789 
1790 	if (device_get_match_data(dev))
1791 		kdev->version = QMSS_66AK2G;
1792 
1793 	platform_set_drvdata(pdev, kdev);
1794 	kdev->dev = dev;
1795 	INIT_LIST_HEAD(&kdev->queue_ranges);
1796 	INIT_LIST_HEAD(&kdev->qmgrs);
1797 	INIT_LIST_HEAD(&kdev->pools);
1798 	INIT_LIST_HEAD(&kdev->regions);
1799 	INIT_LIST_HEAD(&kdev->pdsps);
1800 
1801 	pm_runtime_enable(&pdev->dev);
1802 	ret = pm_runtime_resume_and_get(&pdev->dev);
1803 	if (ret < 0) {
1804 		pm_runtime_disable(&pdev->dev);
1805 		dev_err(dev, "Failed to enable QMSS\n");
1806 		return ret;
1807 	}
1808 
1809 	if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1810 		dev_err(dev, "queue-range not specified\n");
1811 		ret = -ENODEV;
1812 		goto err;
1813 	}
1814 	kdev->base_id    = temp[0];
1815 	kdev->num_queues = temp[1];
1816 
1817 	/* Initialize queue managers using device tree configuration */
1818 	ret = knav_queue_init_qmgrs(kdev, node);
1819 	if (ret)
1820 		goto err;
1821 
1822 	/* get pdsp configuration values from device tree */
1823 	ret = knav_queue_setup_pdsps(kdev, node);
1824 	if (ret)
1825 		goto err;
1826 
1827 	/* get usable queue range values from device tree */
1828 	ret = knav_setup_queue_pools(kdev, node);
1829 	if (ret)
1830 		goto err;
1831 
1832 	ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1833 	if (ret) {
1834 		dev_err(kdev->dev, "could not setup linking ram\n");
1835 		goto err;
1836 	}
1837 
1838 	ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1839 	if (ret) {
1840 		/*
1841 		 * nothing really, we have one linking ram already, so we just
1842 		 * live within our means
1843 		 */
1844 	}
1845 
1846 	ret = knav_queue_setup_link_ram(kdev);
1847 	if (ret)
1848 		goto err;
1849 
1850 	ret = knav_queue_setup_regions(kdev, node);
1851 	if (ret)
1852 		goto err;
1853 
1854 	ret = knav_queue_init_queues(kdev);
1855 	if (ret < 0) {
1856 		dev_err(dev, "hwqueue initialization failed\n");
1857 		goto err;
1858 	}
1859 
1860 	debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1861 			    &knav_queue_debug_fops);
1862 	device_ready = true;
1863 	return 0;
1864 
1865 err:
1866 	knav_queue_stop_pdsps(kdev);
1867 	knav_queue_free_regions(kdev);
1868 	knav_free_queue_ranges(kdev);
1869 	pm_runtime_put_sync(&pdev->dev);
1870 	pm_runtime_disable(&pdev->dev);
1871 	return ret;
1872 }
1873 
1874 static void knav_queue_remove(struct platform_device *pdev)
1875 {
1876 	/* TODO: Free resources */
1877 	pm_runtime_put_sync(&pdev->dev);
1878 	pm_runtime_disable(&pdev->dev);
1879 }
1880 
1881 static struct platform_driver keystone_qmss_driver = {
1882 	.probe		= knav_queue_probe,
1883 	.remove		= knav_queue_remove,
1884 	.driver		= {
1885 		.name	= "keystone-navigator-qmss",
1886 		.of_match_table = keystone_qmss_of_match,
1887 	},
1888 };
1889 module_platform_driver(keystone_qmss_driver);
1890 
1891 MODULE_LICENSE("GPL v2");
1892 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1893 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1894 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
1895