xref: /linux/drivers/soc/fsl/qbman/bman.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2  *
3  * Redistribution and use in source and binary forms, with or without
4  * modification, are permitted provided that the following conditions are met:
5  *     * Redistributions of source code must retain the above copyright
6  *	 notice, this list of conditions and the following disclaimer.
7  *     * Redistributions in binary form must reproduce the above copyright
8  *	 notice, this list of conditions and the following disclaimer in the
9  *	 documentation and/or other materials provided with the distribution.
10  *     * Neither the name of Freescale Semiconductor nor the
11  *	 names of its contributors may be used to endorse or promote products
12  *	 derived from this software without specific prior written permission.
13  *
14  * ALTERNATIVELY, this software may be distributed under the terms of the
15  * GNU General Public License ("GPL") as published by the Free Software
16  * Foundation, either version 2 of that License or (at your option) any
17  * later version.
18  *
19  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "bman_priv.h"
32 
33 #define IRQNAME		"BMan portal %d"
34 #define MAX_IRQNAME	16	/* big enough for "BMan portal %d" */
35 
36 /* Portal register assists */
37 
38 /* Cache-inhibited register offsets */
39 #define BM_REG_RCR_PI_CINH	0x0000
40 #define BM_REG_RCR_CI_CINH	0x0004
41 #define BM_REG_RCR_ITR		0x0008
42 #define BM_REG_CFG		0x0100
43 #define BM_REG_SCN(n)		(0x0200 + ((n) << 2))
44 #define BM_REG_ISR		0x0e00
45 #define BM_REG_IER		0x0e04
46 #define BM_REG_ISDR		0x0e08
47 #define BM_REG_IIR		0x0e0c
48 
49 /* Cache-enabled register offsets */
50 #define BM_CL_CR		0x0000
51 #define BM_CL_RR0		0x0100
52 #define BM_CL_RR1		0x0140
53 #define BM_CL_RCR		0x1000
54 #define BM_CL_RCR_PI_CENA	0x3000
55 #define BM_CL_RCR_CI_CENA	0x3100
56 
57 /*
58  * Portal modes.
59  *   Enum types;
60  *     pmode == production mode
61  *     cmode == consumption mode,
62  *   Enum values use 3 letter codes. First letter matches the portal mode,
63  *   remaining two letters indicate;
64  *     ci == cache-inhibited portal register
65  *     ce == cache-enabled portal register
66  *     vb == in-band valid-bit (cache-enabled)
67  */
68 enum bm_rcr_pmode {		/* matches BCSP_CFG::RPM */
69 	bm_rcr_pci = 0,		/* PI index, cache-inhibited */
70 	bm_rcr_pce = 1,		/* PI index, cache-enabled */
71 	bm_rcr_pvb = 2		/* valid-bit */
72 };
73 enum bm_rcr_cmode {		/* s/w-only */
74 	bm_rcr_cci,		/* CI index, cache-inhibited */
75 	bm_rcr_cce		/* CI index, cache-enabled */
76 };
77 
78 
79 /* --- Portal structures --- */
80 
81 #define BM_RCR_SIZE		8
82 
83 /* Release Command */
84 struct bm_rcr_entry {
85 	union {
86 		struct {
87 			u8 _ncw_verb; /* writes to this are non-coherent */
88 			u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
89 			u8 __reserved1[62];
90 		};
91 		struct bm_buffer bufs[8];
92 	};
93 };
94 #define BM_RCR_VERB_VBIT		0x80
95 #define BM_RCR_VERB_CMD_MASK		0x70	/* one of two values; */
96 #define BM_RCR_VERB_CMD_BPID_SINGLE	0x20
97 #define BM_RCR_VERB_CMD_BPID_MULTI	0x30
98 #define BM_RCR_VERB_BUFCOUNT_MASK	0x0f	/* values 1..8 */
99 
100 struct bm_rcr {
101 	struct bm_rcr_entry *ring, *cursor;
102 	u8 ci, available, ithresh, vbit;
103 #ifdef CONFIG_FSL_DPAA_CHECKING
104 	u32 busy;
105 	enum bm_rcr_pmode pmode;
106 	enum bm_rcr_cmode cmode;
107 #endif
108 };
109 
110 /* MC (Management Command) command */
111 struct bm_mc_command {
112 	u8 _ncw_verb; /* writes to this are non-coherent */
113 	u8 bpid; /* used by acquire command */
114 	u8 __reserved[62];
115 };
116 #define BM_MCC_VERB_VBIT		0x80
117 #define BM_MCC_VERB_CMD_MASK		0x70	/* where the verb contains; */
118 #define BM_MCC_VERB_CMD_ACQUIRE		0x10
119 #define BM_MCC_VERB_CMD_QUERY		0x40
120 #define BM_MCC_VERB_ACQUIRE_BUFCOUNT	0x0f	/* values 1..8 go here */
121 
122 /* MC result, Acquire and Query Response */
123 union bm_mc_result {
124 	struct {
125 		u8 verb;
126 		u8 bpid;
127 		u8 __reserved[62];
128 	};
129 	struct bm_buffer bufs[8];
130 };
131 #define BM_MCR_VERB_VBIT		0x80
132 #define BM_MCR_VERB_CMD_MASK		BM_MCC_VERB_CMD_MASK
133 #define BM_MCR_VERB_CMD_ACQUIRE		BM_MCC_VERB_CMD_ACQUIRE
134 #define BM_MCR_VERB_CMD_QUERY		BM_MCC_VERB_CMD_QUERY
135 #define BM_MCR_VERB_CMD_ERR_INVALID	0x60
136 #define BM_MCR_VERB_CMD_ERR_ECC		0x70
137 #define BM_MCR_VERB_ACQUIRE_BUFCOUNT	BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
138 #define BM_MCR_TIMEOUT			10000 /* us */
139 
140 struct bm_mc {
141 	struct bm_mc_command *cr;
142 	union bm_mc_result *rr;
143 	u8 rridx, vbit;
144 #ifdef CONFIG_FSL_DPAA_CHECKING
145 	enum {
146 		/* Can only be _mc_start()ed */
147 		mc_idle,
148 		/* Can only be _mc_commit()ed or _mc_abort()ed */
149 		mc_user,
150 		/* Can only be _mc_retry()ed */
151 		mc_hw
152 	} state;
153 #endif
154 };
155 
156 struct bm_addr {
157 	void __iomem *ce;	/* cache-enabled */
158 	void __iomem *ci;	/* cache-inhibited */
159 };
160 
161 struct bm_portal {
162 	struct bm_addr addr;
163 	struct bm_rcr rcr;
164 	struct bm_mc mc;
165 } ____cacheline_aligned;
166 
167 /* Cache-inhibited register access. */
168 static inline u32 bm_in(struct bm_portal *p, u32 offset)
169 {
170 	return be32_to_cpu(__raw_readl(p->addr.ci + offset));
171 }
172 
173 static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
174 {
175 	__raw_writel(cpu_to_be32(val), p->addr.ci + offset);
176 }
177 
178 /* Cache Enabled Portal Access */
179 static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
180 {
181 	dpaa_invalidate(p->addr.ce + offset);
182 }
183 
184 static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
185 {
186 	dpaa_touch_ro(p->addr.ce + offset);
187 }
188 
189 static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
190 {
191 	return be32_to_cpu(__raw_readl(p->addr.ce + offset));
192 }
193 
194 struct bman_portal {
195 	struct bm_portal p;
196 	/* interrupt sources processed by portal_isr(), configurable */
197 	unsigned long irq_sources;
198 	/* probing time config params for cpu-affine portals */
199 	const struct bm_portal_config *config;
200 	char irqname[MAX_IRQNAME];
201 };
202 
203 static cpumask_t affine_mask;
204 static DEFINE_SPINLOCK(affine_mask_lock);
205 static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
206 
207 static inline struct bman_portal *get_affine_portal(void)
208 {
209 	return &get_cpu_var(bman_affine_portal);
210 }
211 
212 static inline void put_affine_portal(void)
213 {
214 	put_cpu_var(bman_affine_portal);
215 }
216 
217 /*
218  * This object type refers to a pool, it isn't *the* pool. There may be
219  * more than one such object per BMan buffer pool, eg. if different users of the
220  * pool are operating via different portals.
221  */
222 struct bman_pool {
223 	/* index of the buffer pool to encapsulate (0-63) */
224 	u32 bpid;
225 	/* Used for hash-table admin when using depletion notifications. */
226 	struct bman_portal *portal;
227 	struct bman_pool *next;
228 };
229 
230 static u32 poll_portal_slow(struct bman_portal *p, u32 is);
231 
232 static irqreturn_t portal_isr(int irq, void *ptr)
233 {
234 	struct bman_portal *p = ptr;
235 	struct bm_portal *portal = &p->p;
236 	u32 clear = p->irq_sources;
237 	u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
238 
239 	if (unlikely(!is))
240 		return IRQ_NONE;
241 
242 	clear |= poll_portal_slow(p, is);
243 	bm_out(portal, BM_REG_ISR, clear);
244 	return IRQ_HANDLED;
245 }
246 
247 /* --- RCR API --- */
248 
249 #define RCR_SHIFT	ilog2(sizeof(struct bm_rcr_entry))
250 #define RCR_CARRY	(uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
251 
252 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
253 static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
254 {
255 	uintptr_t addr = (uintptr_t)p;
256 
257 	addr &= ~RCR_CARRY;
258 
259 	return (struct bm_rcr_entry *)addr;
260 }
261 
262 #ifdef CONFIG_FSL_DPAA_CHECKING
263 /* Bit-wise logic to convert a ring pointer to a ring index */
264 static int rcr_ptr2idx(struct bm_rcr_entry *e)
265 {
266 	return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
267 }
268 #endif
269 
270 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
271 static inline void rcr_inc(struct bm_rcr *rcr)
272 {
273 	/* increment to the next RCR pointer and handle overflow and 'vbit' */
274 	struct bm_rcr_entry *partial = rcr->cursor + 1;
275 
276 	rcr->cursor = rcr_carryclear(partial);
277 	if (partial != rcr->cursor)
278 		rcr->vbit ^= BM_RCR_VERB_VBIT;
279 }
280 
281 static int bm_rcr_get_avail(struct bm_portal *portal)
282 {
283 	struct bm_rcr *rcr = &portal->rcr;
284 
285 	return rcr->available;
286 }
287 
288 static int bm_rcr_get_fill(struct bm_portal *portal)
289 {
290 	struct bm_rcr *rcr = &portal->rcr;
291 
292 	return BM_RCR_SIZE - 1 - rcr->available;
293 }
294 
295 static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
296 {
297 	struct bm_rcr *rcr = &portal->rcr;
298 
299 	rcr->ithresh = ithresh;
300 	bm_out(portal, BM_REG_RCR_ITR, ithresh);
301 }
302 
303 static void bm_rcr_cce_prefetch(struct bm_portal *portal)
304 {
305 	__maybe_unused struct bm_rcr *rcr = &portal->rcr;
306 
307 	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
308 	bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
309 }
310 
311 static u8 bm_rcr_cce_update(struct bm_portal *portal)
312 {
313 	struct bm_rcr *rcr = &portal->rcr;
314 	u8 diff, old_ci = rcr->ci;
315 
316 	DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
317 	rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
318 	bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
319 	diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
320 	rcr->available += diff;
321 	return diff;
322 }
323 
324 static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
325 {
326 	struct bm_rcr *rcr = &portal->rcr;
327 
328 	DPAA_ASSERT(!rcr->busy);
329 	if (!rcr->available)
330 		return NULL;
331 #ifdef CONFIG_FSL_DPAA_CHECKING
332 	rcr->busy = 1;
333 #endif
334 	dpaa_zero(rcr->cursor);
335 	return rcr->cursor;
336 }
337 
338 static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
339 {
340 	struct bm_rcr *rcr = &portal->rcr;
341 	struct bm_rcr_entry *rcursor;
342 
343 	DPAA_ASSERT(rcr->busy);
344 	DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
345 	DPAA_ASSERT(rcr->available >= 1);
346 	dma_wmb();
347 	rcursor = rcr->cursor;
348 	rcursor->_ncw_verb = myverb | rcr->vbit;
349 	dpaa_flush(rcursor);
350 	rcr_inc(rcr);
351 	rcr->available--;
352 #ifdef CONFIG_FSL_DPAA_CHECKING
353 	rcr->busy = 0;
354 #endif
355 }
356 
357 static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
358 		       enum bm_rcr_cmode cmode)
359 {
360 	struct bm_rcr *rcr = &portal->rcr;
361 	u32 cfg;
362 	u8 pi;
363 
364 	rcr->ring = portal->addr.ce + BM_CL_RCR;
365 	rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
366 	pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
367 	rcr->cursor = rcr->ring + pi;
368 	rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
369 		BM_RCR_VERB_VBIT : 0;
370 	rcr->available = BM_RCR_SIZE - 1
371 		- dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
372 	rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
373 #ifdef CONFIG_FSL_DPAA_CHECKING
374 	rcr->busy = 0;
375 	rcr->pmode = pmode;
376 	rcr->cmode = cmode;
377 #endif
378 	cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
379 		| (pmode & 0x3); /* BCSP_CFG::RPM */
380 	bm_out(portal, BM_REG_CFG, cfg);
381 	return 0;
382 }
383 
384 static void bm_rcr_finish(struct bm_portal *portal)
385 {
386 #ifdef CONFIG_FSL_DPAA_CHECKING
387 	struct bm_rcr *rcr = &portal->rcr;
388 	int i;
389 
390 	DPAA_ASSERT(!rcr->busy);
391 
392 	i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
393 	if (i != rcr_ptr2idx(rcr->cursor))
394 		pr_crit("losing uncommitted RCR entries\n");
395 
396 	i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
397 	if (i != rcr->ci)
398 		pr_crit("missing existing RCR completions\n");
399 	if (rcr->ci != rcr_ptr2idx(rcr->cursor))
400 		pr_crit("RCR destroyed unquiesced\n");
401 #endif
402 }
403 
404 /* --- Management command API --- */
405 static int bm_mc_init(struct bm_portal *portal)
406 {
407 	struct bm_mc *mc = &portal->mc;
408 
409 	mc->cr = portal->addr.ce + BM_CL_CR;
410 	mc->rr = portal->addr.ce + BM_CL_RR0;
411 	mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
412 		    0 : 1;
413 	mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
414 #ifdef CONFIG_FSL_DPAA_CHECKING
415 	mc->state = mc_idle;
416 #endif
417 	return 0;
418 }
419 
420 static void bm_mc_finish(struct bm_portal *portal)
421 {
422 #ifdef CONFIG_FSL_DPAA_CHECKING
423 	struct bm_mc *mc = &portal->mc;
424 
425 	DPAA_ASSERT(mc->state == mc_idle);
426 	if (mc->state != mc_idle)
427 		pr_crit("Losing incomplete MC command\n");
428 #endif
429 }
430 
431 static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
432 {
433 	struct bm_mc *mc = &portal->mc;
434 
435 	DPAA_ASSERT(mc->state == mc_idle);
436 #ifdef CONFIG_FSL_DPAA_CHECKING
437 	mc->state = mc_user;
438 #endif
439 	dpaa_zero(mc->cr);
440 	return mc->cr;
441 }
442 
443 static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
444 {
445 	struct bm_mc *mc = &portal->mc;
446 	union bm_mc_result *rr = mc->rr + mc->rridx;
447 
448 	DPAA_ASSERT(mc->state == mc_user);
449 	dma_wmb();
450 	mc->cr->_ncw_verb = myverb | mc->vbit;
451 	dpaa_flush(mc->cr);
452 	dpaa_invalidate_touch_ro(rr);
453 #ifdef CONFIG_FSL_DPAA_CHECKING
454 	mc->state = mc_hw;
455 #endif
456 }
457 
458 static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
459 {
460 	struct bm_mc *mc = &portal->mc;
461 	union bm_mc_result *rr = mc->rr + mc->rridx;
462 
463 	DPAA_ASSERT(mc->state == mc_hw);
464 	/*
465 	 * The inactive response register's verb byte always returns zero until
466 	 * its command is submitted and completed. This includes the valid-bit,
467 	 * in case you were wondering...
468 	 */
469 	if (!__raw_readb(&rr->verb)) {
470 		dpaa_invalidate_touch_ro(rr);
471 		return NULL;
472 	}
473 	mc->rridx ^= 1;
474 	mc->vbit ^= BM_MCC_VERB_VBIT;
475 #ifdef CONFIG_FSL_DPAA_CHECKING
476 	mc->state = mc_idle;
477 #endif
478 	return rr;
479 }
480 
481 static inline int bm_mc_result_timeout(struct bm_portal *portal,
482 				       union bm_mc_result **mcr)
483 {
484 	int timeout = BM_MCR_TIMEOUT;
485 
486 	do {
487 		*mcr = bm_mc_result(portal);
488 		if (*mcr)
489 			break;
490 		udelay(1);
491 	} while (--timeout);
492 
493 	return timeout;
494 }
495 
496 /* Disable all BSCN interrupts for the portal */
497 static void bm_isr_bscn_disable(struct bm_portal *portal)
498 {
499 	bm_out(portal, BM_REG_SCN(0), 0);
500 	bm_out(portal, BM_REG_SCN(1), 0);
501 }
502 
503 static int bman_create_portal(struct bman_portal *portal,
504 			      const struct bm_portal_config *c)
505 {
506 	struct bm_portal *p;
507 	int ret;
508 
509 	p = &portal->p;
510 	/*
511 	 * prep the low-level portal struct with the mapped addresses from the
512 	 * config, everything that follows depends on it and "config" is more
513 	 * for (de)reference...
514 	 */
515 	p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
516 	p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
517 	if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
518 		dev_err(c->dev, "RCR initialisation failed\n");
519 		goto fail_rcr;
520 	}
521 	if (bm_mc_init(p)) {
522 		dev_err(c->dev, "MC initialisation failed\n");
523 		goto fail_mc;
524 	}
525 	/*
526 	 * Default to all BPIDs disabled, we enable as required at
527 	 * run-time.
528 	 */
529 	bm_isr_bscn_disable(p);
530 
531 	/* Write-to-clear any stale interrupt status bits */
532 	bm_out(p, BM_REG_ISDR, 0xffffffff);
533 	portal->irq_sources = 0;
534 	bm_out(p, BM_REG_IER, 0);
535 	bm_out(p, BM_REG_ISR, 0xffffffff);
536 	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
537 	if (request_irq(c->irq, portal_isr, 0, portal->irqname,	portal)) {
538 		dev_err(c->dev, "request_irq() failed\n");
539 		goto fail_irq;
540 	}
541 	if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
542 	    irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
543 		dev_err(c->dev, "irq_set_affinity() failed\n");
544 		goto fail_affinity;
545 	}
546 
547 	/* Need RCR to be empty before continuing */
548 	ret = bm_rcr_get_fill(p);
549 	if (ret) {
550 		dev_err(c->dev, "RCR unclean\n");
551 		goto fail_rcr_empty;
552 	}
553 	/* Success */
554 	portal->config = c;
555 
556 	bm_out(p, BM_REG_ISDR, 0);
557 	bm_out(p, BM_REG_IIR, 0);
558 
559 	return 0;
560 
561 fail_rcr_empty:
562 fail_affinity:
563 	free_irq(c->irq, portal);
564 fail_irq:
565 	bm_mc_finish(p);
566 fail_mc:
567 	bm_rcr_finish(p);
568 fail_rcr:
569 	return -EIO;
570 }
571 
572 struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
573 {
574 	struct bman_portal *portal;
575 	int err;
576 
577 	portal = &per_cpu(bman_affine_portal, c->cpu);
578 	err = bman_create_portal(portal, c);
579 	if (err)
580 		return NULL;
581 
582 	spin_lock(&affine_mask_lock);
583 	cpumask_set_cpu(c->cpu, &affine_mask);
584 	spin_unlock(&affine_mask_lock);
585 
586 	return portal;
587 }
588 
589 static u32 poll_portal_slow(struct bman_portal *p, u32 is)
590 {
591 	u32 ret = is;
592 
593 	if (is & BM_PIRQ_RCRI) {
594 		bm_rcr_cce_update(&p->p);
595 		bm_rcr_set_ithresh(&p->p, 0);
596 		bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
597 		is &= ~BM_PIRQ_RCRI;
598 	}
599 
600 	/* There should be no status register bits left undefined */
601 	DPAA_ASSERT(!is);
602 	return ret;
603 }
604 
605 int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
606 {
607 	unsigned long irqflags;
608 
609 	local_irq_save(irqflags);
610 	set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
611 	bm_out(&p->p, BM_REG_IER, p->irq_sources);
612 	local_irq_restore(irqflags);
613 	return 0;
614 }
615 
616 static int bm_shutdown_pool(u32 bpid)
617 {
618 	struct bm_mc_command *bm_cmd;
619 	union bm_mc_result *bm_res;
620 
621 	while (1) {
622 		struct bman_portal *p = get_affine_portal();
623 		/* Acquire buffers until empty */
624 		bm_cmd = bm_mc_start(&p->p);
625 		bm_cmd->bpid = bpid;
626 		bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
627 		if (!bm_mc_result_timeout(&p->p, &bm_res)) {
628 			put_affine_portal();
629 			pr_crit("BMan Acquire Command timedout\n");
630 			return -ETIMEDOUT;
631 		}
632 		if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
633 			put_affine_portal();
634 			/* Pool is empty */
635 			return 0;
636 		}
637 		put_affine_portal();
638 	}
639 
640 	return 0;
641 }
642 
643 struct gen_pool *bm_bpalloc;
644 
645 static int bm_alloc_bpid_range(u32 *result, u32 count)
646 {
647 	unsigned long addr;
648 
649 	addr = gen_pool_alloc(bm_bpalloc, count);
650 	if (!addr)
651 		return -ENOMEM;
652 
653 	*result = addr & ~DPAA_GENALLOC_OFF;
654 
655 	return 0;
656 }
657 
658 static int bm_release_bpid(u32 bpid)
659 {
660 	int ret;
661 
662 	ret = bm_shutdown_pool(bpid);
663 	if (ret) {
664 		pr_debug("BPID %d leaked\n", bpid);
665 		return ret;
666 	}
667 
668 	gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
669 	return 0;
670 }
671 
672 struct bman_pool *bman_new_pool(void)
673 {
674 	struct bman_pool *pool = NULL;
675 	u32 bpid;
676 
677 	if (bm_alloc_bpid_range(&bpid, 1))
678 		return NULL;
679 
680 	pool = kmalloc(sizeof(*pool), GFP_KERNEL);
681 	if (!pool)
682 		goto err;
683 
684 	pool->bpid = bpid;
685 
686 	return pool;
687 err:
688 	bm_release_bpid(bpid);
689 	kfree(pool);
690 	return NULL;
691 }
692 EXPORT_SYMBOL(bman_new_pool);
693 
694 void bman_free_pool(struct bman_pool *pool)
695 {
696 	bm_release_bpid(pool->bpid);
697 
698 	kfree(pool);
699 }
700 EXPORT_SYMBOL(bman_free_pool);
701 
702 int bman_get_bpid(const struct bman_pool *pool)
703 {
704 	return pool->bpid;
705 }
706 EXPORT_SYMBOL(bman_get_bpid);
707 
708 static void update_rcr_ci(struct bman_portal *p, int avail)
709 {
710 	if (avail)
711 		bm_rcr_cce_prefetch(&p->p);
712 	else
713 		bm_rcr_cce_update(&p->p);
714 }
715 
716 int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
717 {
718 	struct bman_portal *p;
719 	struct bm_rcr_entry *r;
720 	unsigned long irqflags;
721 	int avail, timeout = 1000; /* 1ms */
722 	int i = num - 1;
723 
724 	DPAA_ASSERT(num > 0 && num <= 8);
725 
726 	do {
727 		p = get_affine_portal();
728 		local_irq_save(irqflags);
729 		avail = bm_rcr_get_avail(&p->p);
730 		if (avail < 2)
731 			update_rcr_ci(p, avail);
732 		r = bm_rcr_start(&p->p);
733 		local_irq_restore(irqflags);
734 		put_affine_portal();
735 		if (likely(r))
736 			break;
737 
738 		udelay(1);
739 	} while (--timeout);
740 
741 	if (unlikely(!timeout))
742 		return -ETIMEDOUT;
743 
744 	p = get_affine_portal();
745 	local_irq_save(irqflags);
746 	/*
747 	 * we can copy all but the first entry, as this can trigger badness
748 	 * with the valid-bit
749 	 */
750 	bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
751 	bm_buffer_set_bpid(r->bufs, pool->bpid);
752 	if (i)
753 		memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
754 
755 	bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
756 			  (num & BM_RCR_VERB_BUFCOUNT_MASK));
757 
758 	local_irq_restore(irqflags);
759 	put_affine_portal();
760 	return 0;
761 }
762 EXPORT_SYMBOL(bman_release);
763 
764 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
765 {
766 	struct bman_portal *p = get_affine_portal();
767 	struct bm_mc_command *mcc;
768 	union bm_mc_result *mcr;
769 	int ret;
770 
771 	DPAA_ASSERT(num > 0 && num <= 8);
772 
773 	mcc = bm_mc_start(&p->p);
774 	mcc->bpid = pool->bpid;
775 	bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
776 		     (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
777 	if (!bm_mc_result_timeout(&p->p, &mcr)) {
778 		put_affine_portal();
779 		pr_crit("BMan Acquire Timeout\n");
780 		return -ETIMEDOUT;
781 	}
782 	ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
783 	if (bufs)
784 		memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
785 
786 	put_affine_portal();
787 	if (ret != num)
788 		ret = -ENOMEM;
789 	return ret;
790 }
791 EXPORT_SYMBOL(bman_acquire);
792 
793 const struct bm_portal_config *
794 bman_get_bm_portal_config(const struct bman_portal *portal)
795 {
796 	return portal->config;
797 }
798