xref: /freebsd/sys/contrib/ncsw/Peripherals/BM/bman_low.c (revision d9f0ce31900a48d1a2bfc1c8c86f79d1e831451a)
1 /******************************************************************************
2 
3  � 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
4  All rights reserved.
5 
6  This is proprietary source code of Freescale Semiconductor Inc.,
7  and its use is subject to the NetComm Device Drivers EULA.
8  The copyright notice above does not evidence any actual or intended
9  publication of such source code.
10 
11  ALTERNATIVELY, redistribution and use in source and binary forms, with
12  or without modification, are permitted provided that the following
13  conditions are met:
14      * Redistributions of source code must retain the above copyright
15        notice, this list of conditions and the following disclaimer.
16      * Redistributions in binary form must reproduce the above copyright
17        notice, this list of conditions and the following disclaimer in the
18        documentation and/or other materials provided with the distribution.
19      * Neither the name of Freescale Semiconductor nor the
20        names of its contributors may be used to endorse or promote products
21        derived from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34 
35  **************************************************************************/
36 /******************************************************************************
37  @File          bman_low.c
38 
39  @Description   BM low-level implementation
40 *//***************************************************************************/
41 #include "std_ext.h"
42 #include "core_ext.h"
43 #include "xx_ext.h"
44 #include "error_ext.h"
45 
46 #include "bman_private.h"
47 
48 
49 /***************************/
50 /* Portal register assists */
51 /***************************/
52 
53 /* Cache-inhibited register offsets */
54 #define REG_RCR_PI_CINH     (void *)0x0000
55 #define REG_RCR_CI_CINH     (void *)0x0004
56 #define REG_RCR_ITR         (void *)0x0008
57 #define REG_CFG             (void *)0x0100
58 #define REG_SCN(n)          ((void *)(0x0200 + ((n) << 2)))
59 #define REG_ISR             (void *)0x0e00
60 #define REG_IER             (void *)0x0e04
61 #define REG_ISDR            (void *)0x0e08
62 #define REG_IIR             (void *)0x0e0c
63 
64 /* Cache-enabled register offsets */
65 #define CL_CR               (void *)0x0000
66 #define CL_RR0              (void *)0x0100
67 #define CL_RR1              (void *)0x0140
68 #define CL_RCR              (void *)0x1000
69 #define CL_RCR_PI_CENA      (void *)0x3000
70 #define CL_RCR_CI_CENA      (void *)0x3100
71 
72 /* The h/w design requires mappings to be size-aligned so that "add"s can be
73  * reduced to "or"s. The primitives below do the same for s/w. */
74 
75 static __inline__ void *ptr_ADD(void *a, void *b)
76 {
77     return (void *)((uintptr_t)a + (uintptr_t)b);
78 }
79 
80 /* Bitwise-OR two pointers */
81 static __inline__ void *ptr_OR(void *a, void *b)
82 {
83     return (void *)((uintptr_t)a | (uintptr_t)b);
84 }
85 
86 /* Cache-inhibited register access */
87 static __inline__ uint32_t __bm_in(struct bm_addr *bm, void *offset)
88 {
89     uint32_t    *tmp = (uint32_t *)ptr_ADD(bm->addr_ci, offset);
90     return GET_UINT32(*tmp);
91 }
92 static __inline__ void __bm_out(struct bm_addr *bm, void *offset, uint32_t val)
93 {
94     uint32_t    *tmp = (uint32_t *)ptr_ADD(bm->addr_ci, offset);
95     WRITE_UINT32(*tmp, val);
96 }
97 #define bm_in(reg)        __bm_in(&portal->addr, REG_##reg)
98 #define bm_out(reg, val)    __bm_out(&portal->addr, REG_##reg, val)
99 
100 /* Convert 'n' cachelines to a pointer value for bitwise OR */
101 #define bm_cl(n)        (void *)((n) << 6)
102 
103 /* Cache-enabled (index) register access */
104 static __inline__ void __bm_cl_touch_ro(struct bm_addr *bm, void *offset)
105 {
106     dcbt_ro(ptr_ADD(bm->addr_ce, offset));
107 }
108 static __inline__ void __bm_cl_touch_rw(struct bm_addr *bm, void *offset)
109 {
110     dcbt_rw(ptr_ADD(bm->addr_ce, offset));
111 }
112 static __inline__ uint32_t __bm_cl_in(struct bm_addr *bm, void *offset)
113 {
114     uint32_t    *tmp = (uint32_t *)ptr_ADD(bm->addr_ce, offset);
115     return GET_UINT32(*tmp);
116 }
117 static __inline__ void __bm_cl_out(struct bm_addr *bm, void *offset, uint32_t val)
118 {
119     uint32_t    *tmp = (uint32_t *)ptr_ADD(bm->addr_ce, offset);
120     WRITE_UINT32(*tmp, val);
121     dcbf(tmp);
122 }
123 static __inline__ void __bm_cl_invalidate(struct bm_addr *bm, void *offset)
124 {
125     dcbi(ptr_ADD(bm->addr_ce, offset));
126 }
127 #define bm_cl_touch_ro(reg)    __bm_cl_touch_ro(&portal->addr, CL_##reg##_CENA)
128 #define bm_cl_touch_rw(reg)    __bm_cl_touch_rw(&portal->addr, CL_##reg##_CENA)
129 #define bm_cl_in(reg)        __bm_cl_in(&portal->addr, CL_##reg##_CENA)
130 #define bm_cl_out(reg, val)    __bm_cl_out(&portal->addr, CL_##reg##_CENA, val)
131 #define bm_cl_invalidate(reg) __bm_cl_invalidate(&portal->addr, CL_##reg##_CENA)
132 
133 /* Cyclic helper for rings. TODO: once we are able to do fine-grain perf
134  * analysis, look at using the "extra" bit in the ring index registers to avoid
135  * cyclic issues. */
136 static __inline__ uint8_t cyc_diff(uint8_t ringsize, uint8_t first, uint8_t last)
137 {
138     /* 'first' is included, 'last' is excluded */
139     if (first <= last)
140         return (uint8_t)(last - first);
141     return (uint8_t)(ringsize + last - first);
142 }
143 
144 /* --------------- */
145 /* --- RCR API --- */
146 
147 /* It's safer to code in terms of the 'rcr' object than the 'portal' object,
148  * because the latter runs the risk of copy-n-paste errors from other code where
149  * we could manipulate some other structure within 'portal'. */
150 /* #define RCR_API_START()      register struct bm_rcr *rcr = &portal->rcr */
151 
152 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
153 #define RCR_CARRYCLEAR(p) \
154     (void *)((uintptr_t)(p) & (~(uintptr_t)(BM_RCR_SIZE << 6)))
155 
156 /* Bit-wise logic to convert a ring pointer to a ring index */
157 static __inline__ uint8_t RCR_PTR2IDX(struct bm_rcr_entry *e)
158 {
159     return (uint8_t)(((uint32_t)e >> 6) & (BM_RCR_SIZE - 1));
160 }
161 
162 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
163 static __inline__ void RCR_INC(struct bm_rcr *rcr)
164 {
165     /* NB: this is odd-looking, but experiments show that it generates
166      * fast code with essentially no branching overheads. We increment to
167      * the next RCR pointer and handle overflow and 'vbit'. */
168     struct bm_rcr_entry *partial = rcr->cursor + 1;
169     rcr->cursor = RCR_CARRYCLEAR(partial);
170     if (partial != rcr->cursor)
171         rcr->vbit ^= BM_RCR_VERB_VBIT;
172 }
173 
174 t_Error bm_rcr_init(struct bm_portal *portal,
175                     e_BmPortalProduceMode pmode,
176                     e_BmPortalRcrConsumeMode cmode)
177 {
178     register struct bm_rcr *rcr = &portal->rcr;
179     uint32_t cfg;
180     uint8_t pi;
181 
182     rcr->ring = ptr_ADD(portal->addr.addr_ce, CL_RCR);
183     rcr->ci = (uint8_t)(bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1));
184     pi = (uint8_t)(bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1));
185     rcr->cursor = rcr->ring + pi;
186     rcr->vbit = (uint8_t)((bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ?  BM_RCR_VERB_VBIT : 0);
187     rcr->available = (uint8_t)(BM_RCR_SIZE - 1 - cyc_diff(BM_RCR_SIZE, rcr->ci, pi));
188     rcr->ithresh = (uint8_t)bm_in(RCR_ITR);
189 #ifdef BM_CHECKING
190     rcr->busy = 0;
191     rcr->pmode = pmode;
192     rcr->cmode = cmode;
193 #else
194     UNUSED(cmode);
195 #endif /* BM_CHECKING */
196     cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
197     bm_out(CFG, cfg);
198     return 0;
199 }
200 
201 void bm_rcr_finish(struct bm_portal *portal)
202 {
203     register struct bm_rcr *rcr = &portal->rcr;
204     uint8_t pi = (uint8_t)(bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1));
205     uint8_t ci = (uint8_t)(bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1));
206     ASSERT_COND(!rcr->busy);
207     if (pi != RCR_PTR2IDX(rcr->cursor))
208         REPORT_ERROR(WARNING, E_INVALID_STATE, ("losing uncommitted RCR entries"));
209     if (ci != rcr->ci)
210         REPORT_ERROR(WARNING, E_INVALID_STATE, ("missing existing RCR completions"));
211     if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
212         REPORT_ERROR(WARNING, E_INVALID_STATE, ("RCR destroyed unquiesced"));
213 }
214 
215 struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
216 {
217     register struct bm_rcr *rcr = &portal->rcr;
218     ASSERT_COND(!rcr->busy);
219     if (!rcr->available)
220         return NULL;
221 #ifdef BM_CHECKING
222     rcr->busy = 1;
223 #endif /* BM_CHECKING */
224     dcbz_64(rcr->cursor);
225     return rcr->cursor;
226 }
227 
228 void bm_rcr_abort(struct bm_portal *portal)
229 {
230     register struct bm_rcr *rcr = &portal->rcr;
231     ASSERT_COND(rcr->busy);
232 #ifdef BM_CHECKING
233     rcr->busy = 0;
234 #else
235     UNUSED(rcr);
236 #endif /* BM_CHECKING */
237 }
238 
239 struct bm_rcr_entry *bm_rcr_pend_and_next(struct bm_portal *portal, uint8_t myverb)
240 {
241     register struct bm_rcr *rcr = &portal->rcr;
242     ASSERT_COND(rcr->busy);
243     ASSERT_COND(rcr->pmode != e_BmPortalPVB);
244     if (rcr->available == 1)
245         return NULL;
246     rcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | rcr->vbit);
247     dcbf_64(rcr->cursor);
248     RCR_INC(rcr);
249     rcr->available--;
250     dcbz_64(rcr->cursor);
251     return rcr->cursor;
252 }
253 
254 void bm_rcr_pci_commit(struct bm_portal *portal, uint8_t myverb)
255 {
256     register struct bm_rcr *rcr = &portal->rcr;
257     ASSERT_COND(rcr->busy);
258     ASSERT_COND(rcr->pmode == e_BmPortalPCI);
259     rcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | rcr->vbit);
260     RCR_INC(rcr);
261     rcr->available--;
262     hwsync();
263     bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
264 #ifdef BM_CHECKING
265     rcr->busy = 0;
266 #endif /* BM_CHECKING */
267 }
268 
269 void bm_rcr_pce_prefetch(struct bm_portal *portal)
270 {
271     ASSERT_COND(((struct bm_rcr *)&portal->rcr)->pmode == e_BmPortalPCE);
272     bm_cl_invalidate(RCR_PI);
273     bm_cl_touch_rw(RCR_PI);
274 }
275 
276 void bm_rcr_pce_commit(struct bm_portal *portal, uint8_t myverb)
277 {
278     register struct bm_rcr *rcr = &portal->rcr;
279     ASSERT_COND(rcr->busy);
280     ASSERT_COND(rcr->pmode == e_BmPortalPCE);
281     rcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | rcr->vbit);
282     RCR_INC(rcr);
283     rcr->available--;
284     lwsync();
285     bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
286 #ifdef BM_CHECKING
287     rcr->busy = 0;
288 #endif /* BM_CHECKING */
289 }
290 
291 void bm_rcr_pvb_commit(struct bm_portal *portal, uint8_t myverb)
292 {
293     register struct bm_rcr *rcr = &portal->rcr;
294     struct bm_rcr_entry *rcursor;
295     ASSERT_COND(rcr->busy);
296     ASSERT_COND(rcr->pmode == e_BmPortalPVB);
297     lwsync();
298     rcursor = rcr->cursor;
299     rcursor->__dont_write_directly__verb = (uint8_t)(myverb | rcr->vbit);
300     dcbf_64(rcursor);
301     RCR_INC(rcr);
302     rcr->available--;
303 #ifdef BM_CHECKING
304     rcr->busy = 0;
305 #endif /* BM_CHECKING */
306 }
307 
308 
309 uint8_t bm_rcr_cci_update(struct bm_portal *portal)
310 {
311     register struct bm_rcr *rcr = &portal->rcr;
312     uint8_t diff, old_ci = rcr->ci;
313     ASSERT_COND(rcr->cmode == e_BmPortalRcrCCI);
314     rcr->ci = (uint8_t)(bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1));
315     diff = cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
316     rcr->available += diff;
317     return diff;
318 }
319 
320 
321 void bm_rcr_cce_prefetch(struct bm_portal *portal)
322 {
323     ASSERT_COND(((struct bm_rcr *)&portal->rcr)->cmode == e_BmPortalRcrCCE);
324     bm_cl_touch_ro(RCR_CI);
325 }
326 
327 
328 uint8_t bm_rcr_cce_update(struct bm_portal *portal)
329 {
330     register struct bm_rcr *rcr = &portal->rcr;
331     uint8_t diff, old_ci = rcr->ci;
332     ASSERT_COND(rcr->cmode == e_BmPortalRcrCCE);
333     rcr->ci = (uint8_t)(bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1));
334     bm_cl_invalidate(RCR_CI);
335     diff = cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
336     rcr->available += diff;
337     return diff;
338 }
339 
340 
341 uint8_t bm_rcr_get_ithresh(struct bm_portal *portal)
342 {
343     register struct bm_rcr *rcr = &portal->rcr;
344     return rcr->ithresh;
345 }
346 
347 
348 void bm_rcr_set_ithresh(struct bm_portal *portal, uint8_t ithresh)
349 {
350     register struct bm_rcr *rcr = &portal->rcr;
351     rcr->ithresh = ithresh;
352     bm_out(RCR_ITR, ithresh);
353 }
354 
355 
356 uint8_t bm_rcr_get_avail(struct bm_portal *portal)
357 {
358     register struct bm_rcr *rcr = &portal->rcr;
359     return rcr->available;
360 }
361 
362 
363 uint8_t bm_rcr_get_fill(struct bm_portal *portal)
364 {
365     register struct bm_rcr *rcr = &portal->rcr;
366     return (uint8_t)(BM_RCR_SIZE - 1 - rcr->available);
367 }
368 
369 
370 /* ------------------------------ */
371 /* --- Management command API --- */
372 
373 /* It's safer to code in terms of the 'mc' object than the 'portal' object,
374  * because the latter runs the risk of copy-n-paste errors from other code where
375  * we could manipulate some other structure within 'portal'. */
376 /* #define MC_API_START()      register struct bm_mc *mc = &portal->mc */
377 
378 
379 t_Error bm_mc_init(struct bm_portal *portal)
380 {
381     register struct bm_mc *mc = &portal->mc;
382     mc->cr = ptr_ADD(portal->addr.addr_ce, CL_CR);
383     mc->rr = ptr_ADD(portal->addr.addr_ce, CL_RR0);
384     mc->rridx = (uint8_t)((mc->cr->__dont_write_directly__verb & BM_MCC_VERB_VBIT) ?
385             0 : 1);
386     mc->vbit = (uint8_t)(mc->rridx ? BM_MCC_VERB_VBIT : 0);
387 #ifdef BM_CHECKING
388     mc->state = mc_idle;
389 #endif /* BM_CHECKING */
390     return 0;
391 }
392 
393 
394 void bm_mc_finish(struct bm_portal *portal)
395 {
396     register struct bm_mc *mc = &portal->mc;
397     ASSERT_COND(mc->state == mc_idle);
398 #ifdef BM_CHECKING
399     if (mc->state != mc_idle)
400         REPORT_ERROR(WARNING, E_INVALID_STATE, ("Losing incomplete MC command"));
401 #else
402     UNUSED(mc);
403 #endif /* BM_CHECKING */
404 }
405 
406 
407 struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
408 {
409     register struct bm_mc *mc = &portal->mc;
410     ASSERT_COND(mc->state == mc_idle);
411 #ifdef BM_CHECKING
412     mc->state = mc_user;
413 #endif /* BM_CHECKING */
414     dcbz_64(mc->cr);
415     return mc->cr;
416 }
417 
418 
419 void bm_mc_abort(struct bm_portal *portal)
420 {
421     register struct bm_mc *mc = &portal->mc;
422     ASSERT_COND(mc->state == mc_user);
423 #ifdef BM_CHECKING
424     mc->state = mc_idle;
425 #else
426     UNUSED(mc);
427 #endif /* BM_CHECKING */
428 }
429 
430 
431 void bm_mc_commit(struct bm_portal *portal, uint8_t myverb)
432 {
433     register struct bm_mc *mc = &portal->mc;
434     ASSERT_COND(mc->state == mc_user);
435     lwsync();
436     mc->cr->__dont_write_directly__verb = (uint8_t)(myverb | mc->vbit);
437     dcbf_64(mc->cr);
438     dcbit_ro(mc->rr + mc->rridx);
439 #ifdef BM_CHECKING
440     mc->state = mc_hw;
441 #endif /* BM_CHECKING */
442 }
443 
444 
445 struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
446 {
447     register struct bm_mc *mc = &portal->mc;
448     struct bm_mc_result *rr = mc->rr + mc->rridx;
449     ASSERT_COND(mc->state == mc_hw);
450     /* The inactive response register's verb byte always returns zero until
451      * its command is submitted and completed. This includes the valid-bit,
452      * in case you were wondering... */
453     if (!rr->verb) {
454         dcbit_ro(rr);
455         return NULL;
456     }
457     mc->rridx ^= 1;
458     mc->vbit ^= BM_MCC_VERB_VBIT;
459 #ifdef BM_CHECKING
460     mc->state = mc_idle;
461 #endif /* BM_CHECKING */
462     return rr;
463 }
464 
465 /* ------------------------------------- */
466 /* --- Portal interrupt register API --- */
467 
468 #define SCN_REG(bpid) REG_SCN((bpid) / 32)
469 #define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
470 void bm_isr_bscn_mask(struct bm_portal *portal, uint8_t bpid, int enable)
471 {
472     uint32_t val;
473     ASSERT_COND(bpid < BM_MAX_NUM_OF_POOLS);
474     /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
475     val = __bm_in(&portal->addr, SCN_REG(bpid));
476     if (enable)
477         val |= SCN_BIT(bpid);
478     else
479         val &= ~SCN_BIT(bpid);
480     __bm_out(&portal->addr, SCN_REG(bpid), val);
481 }
482 
483 
484 uint32_t __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
485 {
486     return __bm_in(&portal->addr, PTR_MOVE(REG_ISR, (n << 2)));
487 }
488 
489 
490 void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n, uint32_t val)
491 {
492     __bm_out(&portal->addr, PTR_MOVE(REG_ISR, (n << 2)), val);
493 }
494 
495