1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2012 Olivier Houchard <cognet@FreeBSD.org>
5 * Copyright (c) 2011
6 * Ben Gray <ben.r.gray@gmail.com>.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the company nor the name of the author may be used to
18 * endorse or promote products derived from this software without specific
19 * prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
30 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/rman.h>
38 #include <sys/module.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <machine/intr.h>
42
43 #include <machine/bus.h>
44 #include <machine/pl310.h>
45 #ifdef PLATFORM
46 #include <machine/platformvar.h>
47 #endif
48
49 #include <dev/ofw/openfirm.h>
50 #include <dev/ofw/ofw_bus.h>
51 #include <dev/ofw/ofw_bus_subr.h>
52
53 #ifdef PLATFORM
54 #include "platform_pl310_if.h"
55 #endif
56
57 /*
58 * Define this if you need to disable PL310 for debugging purpose
59 * Spec:
60 * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0246e/DDI0246E_l2c310_r3p1_trm.pdf
61 */
62
63 /*
64 * Hardcode errata for now
65 * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246b/pr01s02s02.html
66 */
67 #define PL310_ERRATA_588369
68 #define PL310_ERRATA_753970
69 #define PL310_ERRATA_727915
70
71 #define PL310_LOCK(sc) do { \
72 mtx_lock_spin(&(sc)->sc_mtx); \
73 } while(0);
74
75 #define PL310_UNLOCK(sc) do { \
76 mtx_unlock_spin(&(sc)->sc_mtx); \
77 } while(0);
78
79 static int pl310_enabled = 1;
80 TUNABLE_INT("hw.pl310.enabled", &pl310_enabled);
81
82 static uint32_t g_l2cache_way_mask;
83
84 static const uint32_t g_l2cache_line_size = 32;
85 static const uint32_t g_l2cache_align_mask = (32 - 1);
86
87 static uint32_t g_l2cache_size;
88 static uint32_t g_way_size;
89 static uint32_t g_ways_assoc;
90
91 static struct pl310_softc *pl310_softc;
92
93 static struct ofw_compat_data compat_data[] = {
94 {"arm,pl310", true}, /* Non-standard, FreeBSD. */
95 {"arm,pl310-cache", true},
96 {NULL, false}
97 };
98
99 #ifdef PLATFORM
100 static void
platform_pl310_init(struct pl310_softc * sc)101 platform_pl310_init(struct pl310_softc *sc)
102 {
103
104 PLATFORM_PL310_INIT(platform_obj(), sc);
105 }
106
107 static void
platform_pl310_write_ctrl(struct pl310_softc * sc,uint32_t val)108 platform_pl310_write_ctrl(struct pl310_softc *sc, uint32_t val)
109 {
110
111 PLATFORM_PL310_WRITE_CTRL(platform_obj(), sc, val);
112 }
113
114 static void
platform_pl310_write_debug(struct pl310_softc * sc,uint32_t val)115 platform_pl310_write_debug(struct pl310_softc *sc, uint32_t val)
116 {
117
118 PLATFORM_PL310_WRITE_DEBUG(platform_obj(), sc, val);
119 }
120 #endif
121
122 static void
pl310_print_config(struct pl310_softc * sc)123 pl310_print_config(struct pl310_softc *sc)
124 {
125 uint32_t aux, prefetch;
126 const char *dis = "disabled";
127 const char *ena = "enabled";
128
129 aux = pl310_read4(sc, PL310_AUX_CTRL);
130 prefetch = pl310_read4(sc, PL310_PREFETCH_CTRL);
131
132 device_printf(sc->sc_dev, "Early BRESP response: %s\n",
133 (aux & AUX_CTRL_EARLY_BRESP) ? ena : dis);
134 device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
135 (aux & AUX_CTRL_INSTR_PREFETCH) ? ena : dis);
136 device_printf(sc->sc_dev, "Data prefetch: %s\n",
137 (aux & AUX_CTRL_DATA_PREFETCH) ? ena : dis);
138 device_printf(sc->sc_dev, "Non-secure interrupt control: %s\n",
139 (aux & AUX_CTRL_NS_INT_CTRL) ? ena : dis);
140 device_printf(sc->sc_dev, "Non-secure lockdown: %s\n",
141 (aux & AUX_CTRL_NS_LOCKDOWN) ? ena : dis);
142 device_printf(sc->sc_dev, "Share override: %s\n",
143 (aux & AUX_CTRL_SHARE_OVERRIDE) ? ena : dis);
144
145 device_printf(sc->sc_dev, "Double linefill: %s\n",
146 (prefetch & PREFETCH_CTRL_DL) ? ena : dis);
147 device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
148 (prefetch & PREFETCH_CTRL_INSTR_PREFETCH) ? ena : dis);
149 device_printf(sc->sc_dev, "Data prefetch: %s\n",
150 (prefetch & PREFETCH_CTRL_DATA_PREFETCH) ? ena : dis);
151 device_printf(sc->sc_dev, "Double linefill on WRAP request: %s\n",
152 (prefetch & PREFETCH_CTRL_DL_ON_WRAP) ? ena : dis);
153 device_printf(sc->sc_dev, "Prefetch drop: %s\n",
154 (prefetch & PREFETCH_CTRL_PREFETCH_DROP) ? ena : dis);
155 device_printf(sc->sc_dev, "Incr double Linefill: %s\n",
156 (prefetch & PREFETCH_CTRL_INCR_DL) ? ena : dis);
157 device_printf(sc->sc_dev, "Not same ID on exclusive sequence: %s\n",
158 (prefetch & PREFETCH_CTRL_NOTSAMEID) ? ena : dis);
159 device_printf(sc->sc_dev, "Prefetch offset: %d\n",
160 (prefetch & PREFETCH_CTRL_OFFSET_MASK));
161 }
162
163 void
pl310_set_ram_latency(struct pl310_softc * sc,uint32_t which_reg,uint32_t read,uint32_t write,uint32_t setup)164 pl310_set_ram_latency(struct pl310_softc *sc, uint32_t which_reg,
165 uint32_t read, uint32_t write, uint32_t setup)
166 {
167 uint32_t v;
168
169 KASSERT(which_reg == PL310_TAG_RAM_CTRL ||
170 which_reg == PL310_DATA_RAM_CTRL,
171 ("bad pl310 ram latency register address"));
172
173 v = pl310_read4(sc, which_reg);
174 if (setup != 0) {
175 KASSERT(setup <= 8, ("bad pl310 setup latency: %d", setup));
176 v &= ~RAM_CTRL_SETUP_MASK;
177 v |= (setup - 1) << RAM_CTRL_SETUP_SHIFT;
178 }
179 if (read != 0) {
180 KASSERT(read <= 8, ("bad pl310 read latency: %d", read));
181 v &= ~RAM_CTRL_READ_MASK;
182 v |= (read - 1) << RAM_CTRL_READ_SHIFT;
183 }
184 if (write != 0) {
185 KASSERT(write <= 8, ("bad pl310 write latency: %d", write));
186 v &= ~RAM_CTRL_WRITE_MASK;
187 v |= (write - 1) << RAM_CTRL_WRITE_SHIFT;
188 }
189 pl310_write4(sc, which_reg, v);
190 }
191
192 static int
pl310_filter(void * arg)193 pl310_filter(void *arg)
194 {
195 struct pl310_softc *sc = arg;
196 uint32_t intr;
197
198 intr = pl310_read4(sc, PL310_INTR_MASK);
199
200 if (!sc->sc_enabled && (intr & INTR_MASK_ECNTR)) {
201 /*
202 * This is for debug purpose, so be blunt about it
203 * We disable PL310 only when something fishy is going
204 * on and we need to make sure L2 cache is 100% disabled
205 */
206 panic("pl310: caches disabled but cache event detected\n");
207 }
208
209 return (FILTER_HANDLED);
210 }
211
212 static __inline void
pl310_wait_background_op(uint32_t off,uint32_t mask)213 pl310_wait_background_op(uint32_t off, uint32_t mask)
214 {
215
216 while (pl310_read4(pl310_softc, off) & mask)
217 continue;
218 }
219
220 /**
221 * pl310_cache_sync - performs a cache sync operation
222 *
223 * According to the TRM:
224 *
225 * "Before writing to any other register you must perform an explicit
226 * Cache Sync operation. This is particularly important when the cache is
227 * enabled and changes to how the cache allocates new lines are to be made."
228 *
229 *
230 */
231 static __inline void
pl310_cache_sync(void)232 pl310_cache_sync(void)
233 {
234
235 if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
236 return;
237
238 /* Do not sync outer cache on IO coherent platform */
239 if (pl310_softc->sc_io_coherent)
240 return;
241
242 #ifdef PL310_ERRATA_753970
243 if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
244 /* Write uncached PL310 register */
245 pl310_write4(pl310_softc, 0x740, 0xffffffff);
246 else
247 #endif
248 pl310_write4(pl310_softc, PL310_CACHE_SYNC, 0xffffffff);
249 }
250
251 static void
pl310_wbinv_all(void)252 pl310_wbinv_all(void)
253 {
254
255 if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
256 return;
257
258 PL310_LOCK(pl310_softc);
259 #ifdef PL310_ERRATA_727915
260 if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r2p0) {
261 int i, j;
262
263 for (i = 0; i < g_ways_assoc; i++) {
264 for (j = 0; j < g_way_size / g_l2cache_line_size; j++) {
265 pl310_write4(pl310_softc,
266 PL310_CLEAN_INV_LINE_IDX,
267 (i << 28 | j << 5));
268 }
269 }
270 pl310_cache_sync();
271 PL310_UNLOCK(pl310_softc);
272 return;
273 }
274 if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
275 platform_pl310_write_debug(pl310_softc, 3);
276 #endif
277 pl310_write4(pl310_softc, PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
278 pl310_wait_background_op(PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
279 pl310_cache_sync();
280 #ifdef PL310_ERRATA_727915
281 if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
282 platform_pl310_write_debug(pl310_softc, 0);
283 #endif
284 PL310_UNLOCK(pl310_softc);
285 }
286
287 static void
pl310_wbinv_range(vm_paddr_t start,vm_size_t size)288 pl310_wbinv_range(vm_paddr_t start, vm_size_t size)
289 {
290
291 if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
292 return;
293
294 PL310_LOCK(pl310_softc);
295 if (start & g_l2cache_align_mask) {
296 size += start & g_l2cache_align_mask;
297 start &= ~g_l2cache_align_mask;
298 }
299 if (size & g_l2cache_align_mask) {
300 size &= ~g_l2cache_align_mask;
301 size += g_l2cache_line_size;
302 }
303
304 #ifdef PL310_ERRATA_727915
305 if (pl310_softc->sc_rtl_revision >= CACHE_ID_RELEASE_r2p0 &&
306 pl310_softc->sc_rtl_revision < CACHE_ID_RELEASE_r3p1)
307 platform_pl310_write_debug(pl310_softc, 3);
308 #endif
309 while (size > 0) {
310 #ifdef PL310_ERRATA_588369
311 if (pl310_softc->sc_rtl_revision <= CACHE_ID_RELEASE_r1p0) {
312 /*
313 * Errata 588369 says that clean + inv may keep the
314 * cache line if it was clean, the recommanded
315 * workaround is to clean then invalidate the cache
316 * line, with write-back and cache linefill disabled.
317 */
318 pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
319 pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
320 } else
321 #endif
322 pl310_write4(pl310_softc, PL310_CLEAN_INV_LINE_PA,
323 start);
324 start += g_l2cache_line_size;
325 size -= g_l2cache_line_size;
326 }
327 #ifdef PL310_ERRATA_727915
328 if (pl310_softc->sc_rtl_revision >= CACHE_ID_RELEASE_r2p0 &&
329 pl310_softc->sc_rtl_revision < CACHE_ID_RELEASE_r3p1)
330 platform_pl310_write_debug(pl310_softc, 0);
331 #endif
332
333 pl310_cache_sync();
334 PL310_UNLOCK(pl310_softc);
335 }
336
337 static void
pl310_wb_range(vm_paddr_t start,vm_size_t size)338 pl310_wb_range(vm_paddr_t start, vm_size_t size)
339 {
340
341 if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
342 return;
343
344 PL310_LOCK(pl310_softc);
345 if (start & g_l2cache_align_mask) {
346 size += start & g_l2cache_align_mask;
347 start &= ~g_l2cache_align_mask;
348 }
349
350 if (size & g_l2cache_align_mask) {
351 size &= ~g_l2cache_align_mask;
352 size += g_l2cache_line_size;
353 }
354
355 while (size > 0) {
356 pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
357 start += g_l2cache_line_size;
358 size -= g_l2cache_line_size;
359 }
360
361 pl310_cache_sync();
362 PL310_UNLOCK(pl310_softc);
363 }
364
365 static void
pl310_inv_range(vm_paddr_t start,vm_size_t size)366 pl310_inv_range(vm_paddr_t start, vm_size_t size)
367 {
368
369 if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
370 return;
371
372 PL310_LOCK(pl310_softc);
373 if (start & g_l2cache_align_mask) {
374 size += start & g_l2cache_align_mask;
375 start &= ~g_l2cache_align_mask;
376 }
377 if (size & g_l2cache_align_mask) {
378 size &= ~g_l2cache_align_mask;
379 size += g_l2cache_line_size;
380 }
381 while (size > 0) {
382 pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
383 start += g_l2cache_line_size;
384 size -= g_l2cache_line_size;
385 }
386
387 pl310_cache_sync();
388 PL310_UNLOCK(pl310_softc);
389 }
390
391 static void
pl310_drain_writebuf(void)392 pl310_drain_writebuf(void)
393 {
394
395 if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
396 return;
397
398 PL310_LOCK(pl310_softc);
399 pl310_cache_sync();
400 PL310_UNLOCK(pl310_softc);
401 }
402
403 static void
pl310_set_way_sizes(struct pl310_softc * sc)404 pl310_set_way_sizes(struct pl310_softc *sc)
405 {
406 uint32_t aux_value;
407
408 aux_value = pl310_read4(sc, PL310_AUX_CTRL);
409 g_way_size = (aux_value & AUX_CTRL_WAY_SIZE_MASK) >>
410 AUX_CTRL_WAY_SIZE_SHIFT;
411 g_way_size = 1 << (g_way_size + 13);
412 if (aux_value & (1 << AUX_CTRL_ASSOCIATIVITY_SHIFT))
413 g_ways_assoc = 16;
414 else
415 g_ways_assoc = 8;
416 g_l2cache_way_mask = (1 << g_ways_assoc) - 1;
417 g_l2cache_size = g_way_size * g_ways_assoc;
418 }
419
420 /*
421 * Setup interrupt handling. This is done only if the cache controller is
422 * disabled, for debugging. We set counters so when a cache event happens we'll
423 * get interrupted and be warned that something is wrong, because no cache
424 * events should happen if we're disabled.
425 */
426 static void
pl310_config_intr(void * arg)427 pl310_config_intr(void *arg)
428 {
429 struct pl310_softc * sc;
430
431 sc = arg;
432
433 /* activate the interrupt */
434 bus_setup_intr(sc->sc_dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
435 pl310_filter, NULL, sc, &sc->sc_irq_h);
436
437 /* Cache Line Eviction for Counter 0 */
438 pl310_write4(sc, PL310_EVENT_COUNTER0_CONF,
439 EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_CO);
440 /* Data Read Request for Counter 1 */
441 pl310_write4(sc, PL310_EVENT_COUNTER1_CONF,
442 EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_DRREQ);
443
444 /* Enable and clear pending interrupts */
445 pl310_write4(sc, PL310_INTR_CLEAR, INTR_MASK_ECNTR);
446 pl310_write4(sc, PL310_INTR_MASK, INTR_MASK_ALL);
447
448 /* Enable counters and reset C0 and C1 */
449 pl310_write4(sc, PL310_EVENT_COUNTER_CTRL,
450 EVENT_COUNTER_CTRL_ENABLED |
451 EVENT_COUNTER_CTRL_C0_RESET |
452 EVENT_COUNTER_CTRL_C1_RESET);
453
454 config_intrhook_disestablish(sc->sc_ich);
455 free(sc->sc_ich, M_DEVBUF);
456 sc->sc_ich = NULL;
457 }
458
459 static int
pl310_probe(device_t dev)460 pl310_probe(device_t dev)
461 {
462
463 if (!ofw_bus_status_okay(dev))
464 return (ENXIO);
465 if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
466 return (ENXIO);
467 device_set_desc(dev, "PL310 L2 cache controller");
468 return (0);
469 }
470
471 static int
pl310_attach(device_t dev)472 pl310_attach(device_t dev)
473 {
474 struct pl310_softc *sc = device_get_softc(dev);
475 int rid;
476 uint32_t cache_id, debug_ctrl;
477 phandle_t node;
478
479 sc->sc_dev = dev;
480 rid = 0;
481 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
482 RF_ACTIVE);
483 if (sc->sc_mem_res == NULL)
484 panic("%s: Cannot map registers", device_get_name(dev));
485
486 /* Allocate an IRQ resource */
487 rid = 0;
488 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
489 RF_ACTIVE | RF_SHAREABLE);
490 if (sc->sc_irq_res == NULL) {
491 device_printf(dev, "cannot allocate IRQ, not using interrupt\n");
492 }
493
494 pl310_softc = sc;
495 mtx_init(&sc->sc_mtx, "pl310lock", NULL, MTX_SPIN);
496
497 cache_id = pl310_read4(sc, PL310_CACHE_ID);
498 sc->sc_rtl_revision = (cache_id >> CACHE_ID_RELEASE_SHIFT) &
499 CACHE_ID_RELEASE_MASK;
500 device_printf(dev, "Part number: 0x%x, release: 0x%x\n",
501 (cache_id >> CACHE_ID_PARTNUM_SHIFT) & CACHE_ID_PARTNUM_MASK,
502 (cache_id >> CACHE_ID_RELEASE_SHIFT) & CACHE_ID_RELEASE_MASK);
503
504 /*
505 * Test for "arm,io-coherent" property and disable sync operation if
506 * platform is I/O coherent. Outer sync operations are not needed
507 * on coherent platform and may be harmful in certain situations.
508 */
509 node = ofw_bus_get_node(dev);
510 if (OF_hasprop(node, "arm,io-coherent"))
511 sc->sc_io_coherent = true;
512
513 /*
514 * If L2 cache is already enabled then something has violated the rules,
515 * because caches are supposed to be off at kernel entry. The cache
516 * must be disabled to write the configuration registers without
517 * triggering an access error (SLVERR), but there's no documented safe
518 * procedure for disabling the L2 cache in the manual. So we'll try to
519 * invent one:
520 * - Use the debug register to force write-through mode and prevent
521 * linefills (allocation of new lines on read); now anything we do
522 * will not cause new data to come into the L2 cache.
523 * - Writeback and invalidate the current contents.
524 * - Disable the controller.
525 * - Restore the original debug settings.
526 */
527 if (pl310_read4(sc, PL310_CTRL) & CTRL_ENABLED) {
528 device_printf(dev, "Warning: L2 Cache should not already be "
529 "active; trying to de-activate and re-initialize...\n");
530 sc->sc_enabled = 1;
531 debug_ctrl = pl310_read4(sc, PL310_DEBUG_CTRL);
532 platform_pl310_write_debug(sc, debug_ctrl |
533 DEBUG_CTRL_DISABLE_WRITEBACK | DEBUG_CTRL_DISABLE_LINEFILL);
534 pl310_set_way_sizes(sc);
535 pl310_wbinv_all();
536 platform_pl310_write_ctrl(sc, CTRL_DISABLED);
537 platform_pl310_write_debug(sc, debug_ctrl);
538 }
539 sc->sc_enabled = pl310_enabled;
540
541 if (sc->sc_enabled) {
542 platform_pl310_init(sc);
543 pl310_set_way_sizes(sc); /* platform init might change these */
544 pl310_write4(pl310_softc, PL310_INV_WAY, 0xffff);
545 pl310_wait_background_op(PL310_INV_WAY, 0xffff);
546 platform_pl310_write_ctrl(sc, CTRL_ENABLED);
547 device_printf(dev, "L2 Cache enabled: %uKB/%dB %d ways\n",
548 (g_l2cache_size / 1024), g_l2cache_line_size, g_ways_assoc);
549 if (bootverbose)
550 pl310_print_config(sc);
551 } else {
552 if (sc->sc_irq_res != NULL) {
553 sc->sc_ich = malloc(sizeof(*sc->sc_ich), M_DEVBUF, M_WAITOK);
554 sc->sc_ich->ich_func = pl310_config_intr;
555 sc->sc_ich->ich_arg = sc;
556 if (config_intrhook_establish(sc->sc_ich) != 0) {
557 device_printf(dev,
558 "config_intrhook_establish failed\n");
559 free(sc->sc_ich, M_DEVBUF);
560 return(ENXIO);
561 }
562 }
563
564 device_printf(dev, "L2 Cache disabled\n");
565 }
566
567 /* Set the l2 functions in the set of cpufuncs */
568 cpufuncs.cf_l2cache_wbinv_all = pl310_wbinv_all;
569 cpufuncs.cf_l2cache_wbinv_range = pl310_wbinv_range;
570 cpufuncs.cf_l2cache_inv_range = pl310_inv_range;
571 cpufuncs.cf_l2cache_wb_range = pl310_wb_range;
572 cpufuncs.cf_l2cache_drain_writebuf = pl310_drain_writebuf;
573
574 return (0);
575 }
576
577 static device_method_t pl310_methods[] = {
578 DEVMETHOD(device_probe, pl310_probe),
579 DEVMETHOD(device_attach, pl310_attach),
580 DEVMETHOD_END
581 };
582
583 static driver_t pl310_driver = {
584 "l2cache",
585 pl310_methods,
586 sizeof(struct pl310_softc),
587 };
588
589 EARLY_DRIVER_MODULE(pl310, simplebus, pl310_driver, 0, 0,
590 BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
591