xref: /linux/drivers/soc/fsl/dpio/dpio-service.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2014-2016 Freescale Semiconductor Inc.
4  * Copyright 2016 NXP
5  *
6  */
7 #include <linux/types.h>
8 #include <linux/fsl/mc.h>
9 #include <soc/fsl/dpaa2-io.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 
17 #include "dpio.h"
18 #include "qbman-portal.h"
19 
20 struct dpaa2_io {
21 	struct dpaa2_io_desc dpio_desc;
22 	struct qbman_swp_desc swp_desc;
23 	struct qbman_swp *swp;
24 	struct list_head node;
25 	/* protect against multiple management commands */
26 	spinlock_t lock_mgmt_cmd;
27 	/* protect notifications list */
28 	spinlock_t lock_notifications;
29 	struct list_head notifications;
30 };
31 
32 struct dpaa2_io_store {
33 	unsigned int max;
34 	dma_addr_t paddr;
35 	struct dpaa2_dq *vaddr;
36 	void *alloced_addr;    /* unaligned value from kmalloc() */
37 	unsigned int idx;      /* position of the next-to-be-returned entry */
38 	struct qbman_swp *swp; /* portal used to issue VDQCR */
39 	struct device *dev;    /* device used for DMA mapping */
40 };
41 
42 /* keep a per cpu array of DPIOs for fast access */
43 static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
44 static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
45 static DEFINE_SPINLOCK(dpio_list_lock);
46 
47 static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
48 						     int cpu)
49 {
50 	if (d)
51 		return d;
52 
53 	if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
54 		return NULL;
55 
56 	/*
57 	 * If cpu == -1, choose the current cpu, with no guarantees about
58 	 * potentially being migrated away.
59 	 */
60 	if (unlikely(cpu < 0))
61 		cpu = smp_processor_id();
62 
63 	/* If a specific cpu was requested, pick it up immediately */
64 	return dpio_by_cpu[cpu];
65 }
66 
67 static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
68 {
69 	if (d)
70 		return d;
71 
72 	spin_lock(&dpio_list_lock);
73 	d = list_entry(dpio_list.next, struct dpaa2_io, node);
74 	list_del(&d->node);
75 	list_add_tail(&d->node, &dpio_list);
76 	spin_unlock(&dpio_list_lock);
77 
78 	return d;
79 }
80 
81 /**
82  * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
83  * @cpu: the cpu id
84  *
85  * Return the affine dpaa2_io service, or NULL if there is no service affined
86  * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
87  * service.
88  */
89 struct dpaa2_io *dpaa2_io_service_select(int cpu)
90 {
91 	if (cpu == DPAA2_IO_ANY_CPU)
92 		return service_select(NULL);
93 
94 	return service_select_by_cpu(NULL, cpu);
95 }
96 EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
97 
98 /**
99  * dpaa2_io_create() - create a dpaa2_io object.
100  * @desc: the dpaa2_io descriptor
101  *
102  * Activates a "struct dpaa2_io" corresponding to the given config of an actual
103  * DPIO object.
104  *
105  * Return a valid dpaa2_io object for success, or NULL for failure.
106  */
107 struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
108 {
109 	struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
110 
111 	if (!obj)
112 		return NULL;
113 
114 	/* check if CPU is out of range (-1 means any cpu) */
115 	if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
116 		kfree(obj);
117 		return NULL;
118 	}
119 
120 	obj->dpio_desc = *desc;
121 	obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
122 	obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
123 	obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
124 	obj->swp = qbman_swp_init(&obj->swp_desc);
125 
126 	if (!obj->swp) {
127 		kfree(obj);
128 		return NULL;
129 	}
130 
131 	INIT_LIST_HEAD(&obj->node);
132 	spin_lock_init(&obj->lock_mgmt_cmd);
133 	spin_lock_init(&obj->lock_notifications);
134 	INIT_LIST_HEAD(&obj->notifications);
135 
136 	/* For now only enable DQRR interrupts */
137 	qbman_swp_interrupt_set_trigger(obj->swp,
138 					QBMAN_SWP_INTERRUPT_DQRI);
139 	qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
140 	if (obj->dpio_desc.receives_notifications)
141 		qbman_swp_push_set(obj->swp, 0, 1);
142 
143 	spin_lock(&dpio_list_lock);
144 	list_add_tail(&obj->node, &dpio_list);
145 	if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
146 		dpio_by_cpu[desc->cpu] = obj;
147 	spin_unlock(&dpio_list_lock);
148 
149 	return obj;
150 }
151 
152 /**
153  * dpaa2_io_down() - release the dpaa2_io object.
154  * @d: the dpaa2_io object to be released.
155  *
156  * The "struct dpaa2_io" type can represent an individual DPIO object (as
157  * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
158  * which can be used to group/encapsulate multiple DPIO objects. In all cases,
159  * each handle obtained should be released using this function.
160  */
161 void dpaa2_io_down(struct dpaa2_io *d)
162 {
163 	kfree(d);
164 }
165 
166 #define DPAA_POLL_MAX 32
167 
168 /**
169  * dpaa2_io_irq() - ISR for DPIO interrupts
170  *
171  * @obj: the given DPIO object.
172  *
173  * Return IRQ_HANDLED for success or IRQ_NONE if there
174  * were no pending interrupts.
175  */
176 irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
177 {
178 	const struct dpaa2_dq *dq;
179 	int max = 0;
180 	struct qbman_swp *swp;
181 	u32 status;
182 
183 	swp = obj->swp;
184 	status = qbman_swp_interrupt_read_status(swp);
185 	if (!status)
186 		return IRQ_NONE;
187 
188 	dq = qbman_swp_dqrr_next(swp);
189 	while (dq) {
190 		if (qbman_result_is_SCN(dq)) {
191 			struct dpaa2_io_notification_ctx *ctx;
192 			u64 q64;
193 
194 			q64 = qbman_result_SCN_ctx(dq);
195 			ctx = (void *)(uintptr_t)q64;
196 			ctx->cb(ctx);
197 		} else {
198 			pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
199 		}
200 		qbman_swp_dqrr_consume(swp, dq);
201 		++max;
202 		if (max > DPAA_POLL_MAX)
203 			goto done;
204 		dq = qbman_swp_dqrr_next(swp);
205 	}
206 done:
207 	qbman_swp_interrupt_clear_status(swp, status);
208 	qbman_swp_interrupt_set_inhibit(swp, 0);
209 	return IRQ_HANDLED;
210 }
211 
212 /**
213  * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
214  *                               notifications on the given DPIO service.
215  * @d:   the given DPIO service.
216  * @ctx: the notification context.
217  *
218  * The caller should make the MC command to attach a DPAA2 object to
219  * a DPIO after this function completes successfully.  In that way:
220  *    (a) The DPIO service is "ready" to handle a notification arrival
221  *        (which might happen before the "attach" command to MC has
222  *        returned control of execution back to the caller)
223  *    (b) The DPIO service can provide back to the caller the 'dpio_id' and
224  *        'qman64' parameters that it should pass along in the MC command
225  *        in order for the object to be configured to produce the right
226  *        notification fields to the DPIO service.
227  *
228  * Return 0 for success, or -ENODEV for failure.
229  */
230 int dpaa2_io_service_register(struct dpaa2_io *d,
231 			      struct dpaa2_io_notification_ctx *ctx)
232 {
233 	unsigned long irqflags;
234 
235 	d = service_select_by_cpu(d, ctx->desired_cpu);
236 	if (!d)
237 		return -ENODEV;
238 
239 	ctx->dpio_id = d->dpio_desc.dpio_id;
240 	ctx->qman64 = (u64)(uintptr_t)ctx;
241 	ctx->dpio_private = d;
242 	spin_lock_irqsave(&d->lock_notifications, irqflags);
243 	list_add(&ctx->node, &d->notifications);
244 	spin_unlock_irqrestore(&d->lock_notifications, irqflags);
245 
246 	/* Enable the generation of CDAN notifications */
247 	if (ctx->is_cdan)
248 		return qbman_swp_CDAN_set_context_enable(d->swp,
249 							 (u16)ctx->id,
250 							 ctx->qman64);
251 	return 0;
252 }
253 EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
254 
255 /**
256  * dpaa2_io_service_deregister - The opposite of 'register'.
257  * @service: the given DPIO service.
258  * @ctx: the notification context.
259  *
260  * This function should be called only after sending the MC command to
261  * to detach the notification-producing device from the DPIO.
262  */
263 void dpaa2_io_service_deregister(struct dpaa2_io *service,
264 				 struct dpaa2_io_notification_ctx *ctx)
265 {
266 	struct dpaa2_io *d = ctx->dpio_private;
267 	unsigned long irqflags;
268 
269 	if (ctx->is_cdan)
270 		qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
271 
272 	spin_lock_irqsave(&d->lock_notifications, irqflags);
273 	list_del(&ctx->node);
274 	spin_unlock_irqrestore(&d->lock_notifications, irqflags);
275 }
276 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
277 
278 /**
279  * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
280  * @d: the given DPIO service.
281  * @ctx: the notification context.
282  *
283  * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
284  * considered "disarmed". Ie. the user can issue pull dequeue operations on that
285  * traffic source for as long as it likes. Eventually it may wish to "rearm"
286  * that source to allow it to produce another FQDAN/CDAN, that's what this
287  * function achieves.
288  *
289  * Return 0 for success.
290  */
291 int dpaa2_io_service_rearm(struct dpaa2_io *d,
292 			   struct dpaa2_io_notification_ctx *ctx)
293 {
294 	unsigned long irqflags;
295 	int err;
296 
297 	d = service_select_by_cpu(d, ctx->desired_cpu);
298 	if (!unlikely(d))
299 		return -ENODEV;
300 
301 	spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
302 	if (ctx->is_cdan)
303 		err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
304 	else
305 		err = qbman_swp_fq_schedule(d->swp, ctx->id);
306 	spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
307 
308 	return err;
309 }
310 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
311 
312 /**
313  * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
314  * @d: the given DPIO service.
315  * @channelid: the given channel id.
316  * @s: the dpaa2_io_store object for the result.
317  *
318  * Return 0 for success, or error code for failure.
319  */
320 int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
321 				  struct dpaa2_io_store *s)
322 {
323 	struct qbman_pull_desc pd;
324 	int err;
325 
326 	qbman_pull_desc_clear(&pd);
327 	qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
328 	qbman_pull_desc_set_numframes(&pd, (u8)s->max);
329 	qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
330 
331 	d = service_select(d);
332 	if (!d)
333 		return -ENODEV;
334 
335 	s->swp = d->swp;
336 	err = qbman_swp_pull(d->swp, &pd);
337 	if (err)
338 		s->swp = NULL;
339 
340 	return err;
341 }
342 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
343 
344 /**
345  * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
346  * @d: the given DPIO service.
347  * @qdid: the given queuing destination id.
348  * @prio: the given queuing priority.
349  * @qdbin: the given queuing destination bin.
350  * @fd: the frame descriptor which is enqueued.
351  *
352  * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
353  * or -ENODEV if there is no dpio service.
354  */
355 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
356 				u32 qdid, u8 prio, u16 qdbin,
357 				const struct dpaa2_fd *fd)
358 {
359 	struct qbman_eq_desc ed;
360 
361 	d = service_select(d);
362 	if (!d)
363 		return -ENODEV;
364 
365 	qbman_eq_desc_clear(&ed);
366 	qbman_eq_desc_set_no_orp(&ed, 0);
367 	qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
368 
369 	return qbman_swp_enqueue(d->swp, &ed, fd);
370 }
371 EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
372 
373 /**
374  * dpaa2_io_service_release() - Release buffers to a buffer pool.
375  * @d: the given DPIO object.
376  * @bpid: the buffer pool id.
377  * @buffers: the buffers to be released.
378  * @num_buffers: the number of the buffers to be released.
379  *
380  * Return 0 for success, and negative error code for failure.
381  */
382 int dpaa2_io_service_release(struct dpaa2_io *d,
383 			     u32 bpid,
384 			     const u64 *buffers,
385 			     unsigned int num_buffers)
386 {
387 	struct qbman_release_desc rd;
388 
389 	d = service_select(d);
390 	if (!d)
391 		return -ENODEV;
392 
393 	qbman_release_desc_clear(&rd);
394 	qbman_release_desc_set_bpid(&rd, bpid);
395 
396 	return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
397 }
398 EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
399 
400 /**
401  * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
402  * @d: the given DPIO object.
403  * @bpid: the buffer pool id.
404  * @buffers: the buffer addresses for acquired buffers.
405  * @num_buffers: the expected number of the buffers to acquire.
406  *
407  * Return a negative error code if the command failed, otherwise it returns
408  * the number of buffers acquired, which may be less than the number requested.
409  * Eg. if the buffer pool is empty, this will return zero.
410  */
411 int dpaa2_io_service_acquire(struct dpaa2_io *d,
412 			     u32 bpid,
413 			     u64 *buffers,
414 			     unsigned int num_buffers)
415 {
416 	unsigned long irqflags;
417 	int err;
418 
419 	d = service_select(d);
420 	if (!d)
421 		return -ENODEV;
422 
423 	spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
424 	err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
425 	spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
426 
427 	return err;
428 }
429 EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
430 
431 /*
432  * 'Stores' are reusable memory blocks for holding dequeue results, and to
433  * assist with parsing those results.
434  */
435 
436 /**
437  * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
438  * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
439  * @dev:        the device to allow mapping/unmapping the DMAable region.
440  *
441  * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
442  * The 'dpaa2_io_store' returned is a DPIO service managed object.
443  *
444  * Return pointer to dpaa2_io_store struct for successfully created storage
445  * memory, or NULL on error.
446  */
447 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
448 					     struct device *dev)
449 {
450 	struct dpaa2_io_store *ret;
451 	size_t size;
452 
453 	if (!max_frames || (max_frames > 16))
454 		return NULL;
455 
456 	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
457 	if (!ret)
458 		return NULL;
459 
460 	ret->max = max_frames;
461 	size = max_frames * sizeof(struct dpaa2_dq) + 64;
462 	ret->alloced_addr = kzalloc(size, GFP_KERNEL);
463 	if (!ret->alloced_addr) {
464 		kfree(ret);
465 		return NULL;
466 	}
467 
468 	ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
469 	ret->paddr = dma_map_single(dev, ret->vaddr,
470 				    sizeof(struct dpaa2_dq) * max_frames,
471 				    DMA_FROM_DEVICE);
472 	if (dma_mapping_error(dev, ret->paddr)) {
473 		kfree(ret->alloced_addr);
474 		kfree(ret);
475 		return NULL;
476 	}
477 
478 	ret->idx = 0;
479 	ret->dev = dev;
480 
481 	return ret;
482 }
483 EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
484 
485 /**
486  * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
487  *                            result.
488  * @s: the storage memory to be destroyed.
489  */
490 void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
491 {
492 	dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
493 			 DMA_FROM_DEVICE);
494 	kfree(s->alloced_addr);
495 	kfree(s);
496 }
497 EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
498 
499 /**
500  * dpaa2_io_store_next() - Determine when the next dequeue result is available.
501  * @s: the dpaa2_io_store object.
502  * @is_last: indicate whether this is the last frame in the pull command.
503  *
504  * When an object driver performs dequeues to a dpaa2_io_store, this function
505  * can be used to determine when the next frame result is available. Once
506  * this function returns non-NULL, a subsequent call to it will try to find
507  * the next dequeue result.
508  *
509  * Note that if a pull-dequeue has a NULL result because the target FQ/channel
510  * was empty, then this function will also return NULL (rather than expecting
511  * the caller to always check for this. As such, "is_last" can be used to
512  * differentiate between "end-of-empty-dequeue" and "still-waiting".
513  *
514  * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
515  */
516 struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
517 {
518 	int match;
519 	struct dpaa2_dq *ret = &s->vaddr[s->idx];
520 
521 	match = qbman_result_has_new_result(s->swp, ret);
522 	if (!match) {
523 		*is_last = 0;
524 		return NULL;
525 	}
526 
527 	s->idx++;
528 
529 	if (dpaa2_dq_is_pull_complete(ret)) {
530 		*is_last = 1;
531 		s->idx = 0;
532 		/*
533 		 * If we get an empty dequeue result to terminate a zero-results
534 		 * vdqcr, return NULL to the caller rather than expecting him to
535 		 * check non-NULL results every time.
536 		 */
537 		if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
538 			ret = NULL;
539 	} else {
540 		*is_last = 0;
541 	}
542 
543 	return ret;
544 }
545 EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
546