xref: /linux/arch/arm/mach-shmobile/headsmp.S (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1/*
2 * SMP support for R-Mobile / SH-Mobile
3 *
4 * Copyright (C) 2010  Magnus Damm
5 * Copyright (C) 2010  Takashi Yoshii
6 *
7 * Based on vexpress, Copyright (c) 2003 ARM Limited, All Rights Reserved
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/linkage.h>
14#include <linux/init.h>
15#include <asm/memory.h>
16
17	__CPUINIT
18
19/* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks!
20 *
21 * The secondary kernel init calls v7_flush_dcache_all before it enables
22 * the L1; however, the L1 comes out of reset in an undefined state, so
23 * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
24 * of cache lines with uninitialized data and uninitialized tags to get
25 * written out to memory, which does really unpleasant things to the main
26 * processor.  We fix this by performing an invalidate, rather than a
27 * clean + invalidate, before jumping into the kernel.
28 *
29 * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs
30 * to be called for both secondary cores startup and primary core resume
31 * procedures.  Ideally, it should be moved into arch/arm/mm/cache-v7.S.
32 */
33ENTRY(v7_invalidate_l1)
34	mov	r0, #0
35	mcr	p15, 0, r0, c7, c5, 0	@ invalidate I cache
36	mcr	p15, 2, r0, c0, c0, 0
37	mrc	p15, 1, r0, c0, c0, 0
38
39	ldr	r1, =0x7fff
40	and	r2, r1, r0, lsr #13
41
42	ldr	r1, =0x3ff
43
44	and	r3, r1, r0, lsr #3	@ NumWays - 1
45	add	r2, r2, #1		@ NumSets
46
47	and	r0, r0, #0x7
48	add	r0, r0, #4	@ SetShift
49
50	clz	r1, r3		@ WayShift
51	add	r4, r3, #1	@ NumWays
521:	sub	r2, r2, #1	@ NumSets--
53	mov	r3, r4		@ Temp = NumWays
542:	subs	r3, r3, #1	@ Temp--
55	mov	r5, r3, lsl r1
56	mov	r6, r2, lsl r0
57	orr	r5, r5, r6	@ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
58	mcr	p15, 0, r5, c7, c6, 2
59	bgt	2b
60	cmp	r2, #0
61	bgt	1b
62	dsb
63	isb
64	mov	pc, lr
65ENDPROC(v7_invalidate_l1)
66
67ENTRY(shmobile_invalidate_start)
68	bl	v7_invalidate_l1
69	b	secondary_startup
70ENDPROC(shmobile_invalidate_start)
71
72/*
73 * Reset vector for secondary CPUs.
74 * This will be mapped at address 0 by SBAR register.
75 * We need _long_ jump to the physical address.
76 */
77	.align  12
78ENTRY(shmobile_secondary_vector)
79	ldr     pc, 1f
801:	.long   shmobile_invalidate_start - PAGE_OFFSET + PLAT_PHYS_OFFSET
81ENDPROC(shmobile_secondary_vector)
82