1 /*-
2 * Copyright (c) 2011-2012 Semihalf.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/kernel.h>
30 #include <sys/bus.h>
31 #include <sys/lock.h>
32 #include <sys/module.h>
33 #include <sys/mutex.h>
34 #include <sys/proc.h>
35 #include <sys/pcpu.h>
36 #include <sys/rman.h>
37 #include <sys/sched.h>
38
39 #include <machine/tlb.h>
40
41 #include "bman.h"
42
43 static struct bman_softc *bman_sc;
44
45 extern t_Handle bman_portal_setup(struct bman_softc *bsc);
46
47 static void
bman_exception(t_Handle h_App,e_BmExceptions exception)48 bman_exception(t_Handle h_App, e_BmExceptions exception)
49 {
50 struct bman_softc *sc;
51 const char *message;
52
53 sc = h_App;
54
55 switch (exception) {
56 case e_BM_EX_INVALID_COMMAND:
57 message = "Invalid Command Verb";
58 break;
59 case e_BM_EX_FBPR_THRESHOLD:
60 message = "FBPR pool exhaused. Consider increasing "
61 "BMAN_MAX_BUFFERS";
62 break;
63 case e_BM_EX_SINGLE_ECC:
64 message = "Single bit ECC error";
65 break;
66 case e_BM_EX_MULTI_ECC:
67 message = "Multi bit ECC error";
68 break;
69 default:
70 message = "Unknown error";
71 }
72
73 device_printf(sc->sc_dev, "BMAN Exception: %s.\n", message);
74 }
75
76 int
bman_attach(device_t dev)77 bman_attach(device_t dev)
78 {
79 struct bman_softc *sc;
80 t_BmRevisionInfo rev;
81 t_Error error;
82 t_BmParam bp;
83
84 sc = device_get_softc(dev);
85 sc->sc_dev = dev;
86 bman_sc = sc;
87
88 /* Check if MallocSmart allocator is ready */
89 if (XX_MallocSmartInit() != E_OK)
90 return (ENXIO);
91
92 /* Allocate resources */
93 sc->sc_rrid = 0;
94 sc->sc_rres = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY,
95 &sc->sc_rrid, BMAN_CCSR_SIZE, RF_ACTIVE);
96 if (sc->sc_rres == NULL)
97 return (ENXIO);
98
99 sc->sc_irid = 0;
100 sc->sc_ires = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ,
101 &sc->sc_irid, RF_ACTIVE | RF_SHAREABLE);
102 if (sc->sc_ires == NULL)
103 goto err;
104
105 /* Initialize BMAN */
106 memset(&bp, 0, sizeof(bp));
107 bp.guestId = NCSW_MASTER_ID;
108 bp.baseAddress = rman_get_bushandle(sc->sc_rres);
109 bp.totalNumOfBuffers = BMAN_MAX_BUFFERS;
110 bp.f_Exception = bman_exception;
111 bp.h_App = sc;
112 bp.errIrq = (uintptr_t)sc->sc_ires;
113 bp.partBpidBase = 0;
114 bp.partNumOfPools = BM_MAX_NUM_OF_POOLS;
115
116 sc->sc_bh = BM_Config(&bp);
117 if (sc->sc_bh == NULL)
118 goto err;
119
120 /* Warn if there is less than 5% free FPBR's in pool */
121 error = BM_ConfigFbprThreshold(sc->sc_bh, (BMAN_MAX_BUFFERS / 8) / 20);
122 if (error != E_OK)
123 goto err;
124
125 error = BM_Init(sc->sc_bh);
126 if (error != E_OK)
127 goto err;
128
129 error = BM_GetRevision(sc->sc_bh, &rev);
130 if (error != E_OK)
131 goto err;
132
133 device_printf(dev, "Hardware version: %d.%d.\n",
134 rev.majorRev, rev.minorRev);
135
136 return (0);
137
138 err:
139 bman_detach(dev);
140 return (ENXIO);
141 }
142
143 int
bman_detach(device_t dev)144 bman_detach(device_t dev)
145 {
146 struct bman_softc *sc;
147
148 sc = device_get_softc(dev);
149
150 if (sc->sc_bh != NULL)
151 BM_Free(sc->sc_bh);
152
153 if (sc->sc_ires != NULL)
154 bus_release_resource(dev, SYS_RES_IRQ,
155 sc->sc_irid, sc->sc_ires);
156
157 if (sc->sc_rres != NULL)
158 bus_release_resource(dev, SYS_RES_MEMORY,
159 sc->sc_rrid, sc->sc_rres);
160
161 return (0);
162 }
163
164 int
bman_suspend(device_t dev)165 bman_suspend(device_t dev)
166 {
167
168 return (0);
169 }
170
171 int
bman_resume(device_t dev)172 bman_resume(device_t dev)
173 {
174
175 return (0);
176 }
177
178 int
bman_shutdown(device_t dev)179 bman_shutdown(device_t dev)
180 {
181
182 return (0);
183 }
184
185 /*
186 * BMAN API
187 */
188
189 t_Handle
bman_pool_create(uint8_t * bpid,uint16_t bufferSize,uint16_t maxBuffers,uint16_t minBuffers,uint16_t allocBuffers,t_GetBufFunction * f_GetBuf,t_PutBufFunction * f_PutBuf,uint32_t dep_sw_entry,uint32_t dep_sw_exit,uint32_t dep_hw_entry,uint32_t dep_hw_exit,t_BmDepletionCallback * f_Depletion,t_Handle h_BufferPool,t_PhysToVirt * f_PhysToVirt,t_VirtToPhys * f_VirtToPhys)190 bman_pool_create(uint8_t *bpid, uint16_t bufferSize, uint16_t maxBuffers,
191 uint16_t minBuffers, uint16_t allocBuffers, t_GetBufFunction *f_GetBuf,
192 t_PutBufFunction *f_PutBuf, uint32_t dep_sw_entry, uint32_t dep_sw_exit,
193 uint32_t dep_hw_entry, uint32_t dep_hw_exit,
194 t_BmDepletionCallback *f_Depletion, t_Handle h_BufferPool,
195 t_PhysToVirt *f_PhysToVirt, t_VirtToPhys *f_VirtToPhys)
196 {
197 uint32_t thresholds[MAX_DEPLETION_THRESHOLDS];
198 struct bman_softc *sc;
199 t_Handle pool, portal;
200 t_BmPoolParam bpp;
201 int error;
202
203 sc = bman_sc;
204 pool = NULL;
205
206 sched_pin();
207
208 portal = bman_portal_setup(sc);
209 if (portal == NULL)
210 goto err;
211
212 memset(&bpp, 0, sizeof(bpp));
213 bpp.h_Bm = sc->sc_bh;
214 bpp.h_BmPortal = portal;
215 bpp.h_App = h_BufferPool;
216 bpp.numOfBuffers = allocBuffers;
217
218 bpp.bufferPoolInfo.h_BufferPool = h_BufferPool;
219 bpp.bufferPoolInfo.f_GetBuf = f_GetBuf;
220 bpp.bufferPoolInfo.f_PutBuf = f_PutBuf;
221 bpp.bufferPoolInfo.f_PhysToVirt = f_PhysToVirt;
222 bpp.bufferPoolInfo.f_VirtToPhys = f_VirtToPhys;
223 bpp.bufferPoolInfo.bufferSize = bufferSize;
224
225 pool = BM_POOL_Config(&bpp);
226 if (pool == NULL)
227 goto err;
228
229 /*
230 * Buffer context must be disabled on FreeBSD
231 * as it could cause memory corruption.
232 */
233 BM_POOL_ConfigBuffContextMode(pool, 0);
234
235 if (minBuffers != 0 || maxBuffers != 0) {
236 error = BM_POOL_ConfigStockpile(pool, maxBuffers, minBuffers);
237 if (error != E_OK)
238 goto err;
239 }
240
241 if (f_Depletion != NULL) {
242 thresholds[BM_POOL_DEP_THRESH_SW_ENTRY] = dep_sw_entry;
243 thresholds[BM_POOL_DEP_THRESH_SW_EXIT] = dep_sw_exit;
244 thresholds[BM_POOL_DEP_THRESH_HW_ENTRY] = dep_hw_entry;
245 thresholds[BM_POOL_DEP_THRESH_HW_EXIT] = dep_hw_exit;
246 error = BM_POOL_ConfigDepletion(pool, f_Depletion, thresholds);
247 if (error != E_OK)
248 goto err;
249 }
250
251 error = BM_POOL_Init(pool);
252 if (error != E_OK)
253 goto err;
254
255 *bpid = BM_POOL_GetId(pool);
256 sc->sc_bpool_cpu[*bpid] = PCPU_GET(cpuid);
257
258 sched_unpin();
259
260 return (pool);
261
262 err:
263 if (pool != NULL)
264 BM_POOL_Free(pool);
265
266 sched_unpin();
267
268 return (NULL);
269 }
270
271 int
bman_pool_destroy(t_Handle pool)272 bman_pool_destroy(t_Handle pool)
273 {
274 struct bman_softc *sc;
275
276 sc = bman_sc;
277 thread_lock(curthread);
278 sched_bind(curthread, sc->sc_bpool_cpu[BM_POOL_GetId(pool)]);
279 thread_unlock(curthread);
280
281 BM_POOL_Free(pool);
282
283 thread_lock(curthread);
284 sched_unbind(curthread);
285 thread_unlock(curthread);
286
287 return (0);
288 }
289
290 int
bman_pool_fill(t_Handle pool,uint16_t nbufs)291 bman_pool_fill(t_Handle pool, uint16_t nbufs)
292 {
293 struct bman_softc *sc;
294 t_Handle portal;
295 int error;
296
297 sc = bman_sc;
298 sched_pin();
299
300 portal = bman_portal_setup(sc);
301 if (portal == NULL) {
302 sched_unpin();
303 return (EIO);
304 }
305
306 error = BM_POOL_FillBufs(pool, portal, nbufs);
307
308 sched_unpin();
309
310 return ((error == E_OK) ? 0 : EIO);
311 }
312
313 void *
bman_get_buffer(t_Handle pool)314 bman_get_buffer(t_Handle pool)
315 {
316 struct bman_softc *sc;
317 t_Handle portal;
318 void *buffer;
319
320 sc = bman_sc;
321 sched_pin();
322
323 portal = bman_portal_setup(sc);
324 if (portal == NULL) {
325 sched_unpin();
326 return (NULL);
327 }
328
329 buffer = BM_POOL_GetBuf(pool, portal);
330
331 sched_unpin();
332
333 return (buffer);
334 }
335
336 int
bman_put_buffer(t_Handle pool,void * buffer)337 bman_put_buffer(t_Handle pool, void *buffer)
338 {
339 struct bman_softc *sc;
340 t_Handle portal;
341 int error;
342
343 sc = bman_sc;
344 sched_pin();
345
346 portal = bman_portal_setup(sc);
347 if (portal == NULL) {
348 sched_unpin();
349 return (EIO);
350 }
351
352 error = BM_POOL_PutBuf(pool, portal, buffer);
353
354 sched_unpin();
355
356 return ((error == E_OK) ? 0 : EIO);
357 }
358
359 uint32_t
bman_count(t_Handle pool)360 bman_count(t_Handle pool)
361 {
362
363 return (BM_POOL_GetCounter(pool, e_BM_POOL_COUNTERS_CONTENT));
364 }
365