xref: /linux/arch/powerpc/kvm/book3s_64_slb.S (revision 148f9bb87745ed45f7a11b2cbd3bc0f017d5d257)
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#define SHADOW_SLB_ESID(num)	(SLBSHADOW_SAVEAREA + (num * 0x10))
21#define SHADOW_SLB_VSID(num)	(SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22#define UNBOLT_SLB_ENTRY(num) \
23	ld	r9, SHADOW_SLB_ESID(num)(r12); \
24	/* Invalid? Skip. */; \
25	rldicl. r0, r9, 37, 63; \
26	beq	slb_entry_skip_ ## num; \
27	xoris	r9, r9, SLB_ESID_V@h; \
28	std	r9, SHADOW_SLB_ESID(num)(r12); \
29  slb_entry_skip_ ## num:
30
31#define REBOLT_SLB_ENTRY(num) \
32	ld	r10, SHADOW_SLB_ESID(num)(r11); \
33	cmpdi	r10, 0; \
34	beq	slb_exit_skip_ ## num; \
35	oris	r10, r10, SLB_ESID_V@h; \
36	ld	r9, SHADOW_SLB_VSID(num)(r11); \
37	slbmte	r9, r10; \
38	std	r10, SHADOW_SLB_ESID(num)(r11); \
39slb_exit_skip_ ## num:
40
41/******************************************************************************
42 *                                                                            *
43 *                               Entry code                                   *
44 *                                                                            *
45 *****************************************************************************/
46
47.macro LOAD_GUEST_SEGMENTS
48
49	/* Required state:
50	 *
51	 * MSR = ~IR|DR
52	 * R13 = PACA
53	 * R1 = host R1
54	 * R2 = host R2
55	 * R3 = shadow vcpu
56	 * all other volatile GPRS = free except R4, R6
57	 * SVCPU[CR]  = guest CR
58	 * SVCPU[XER] = guest XER
59	 * SVCPU[CTR] = guest CTR
60	 * SVCPU[LR]  = guest LR
61	 */
62
63	/* Remove LPAR shadow entries */
64
65#if SLB_NUM_BOLTED == 3
66
67	ld	r12, PACA_SLBSHADOWPTR(r13)
68
69	/* Remove bolted entries */
70	UNBOLT_SLB_ENTRY(0)
71	UNBOLT_SLB_ENTRY(1)
72	UNBOLT_SLB_ENTRY(2)
73
74#else
75#error unknown number of bolted entries
76#endif
77
78	/* Flush SLB */
79
80	li	r10, 0
81	slbmte	r10, r10
82	slbia
83
84	/* Fill SLB with our shadow */
85
86	lbz	r12, SVCPU_SLB_MAX(r3)
87	mulli	r12, r12, 16
88	addi	r12, r12, SVCPU_SLB
89	add	r12, r12, r3
90
91	/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
92	li	r11, SVCPU_SLB
93	add	r11, r11, r3
94
95slb_loop_enter:
96
97	ld	r10, 0(r11)
98
99	rldicl. r0, r10, 37, 63
100	beq	slb_loop_enter_skip
101
102	ld	r9, 8(r11)
103	slbmte	r9, r10
104
105slb_loop_enter_skip:
106	addi	r11, r11, 16
107	cmpd	cr0, r11, r12
108	blt	slb_loop_enter
109
110slb_do_enter:
111
112.endm
113
114/******************************************************************************
115 *                                                                            *
116 *                               Exit code                                    *
117 *                                                                            *
118 *****************************************************************************/
119
120.macro LOAD_HOST_SEGMENTS
121
122	/* Register usage at this point:
123	 *
124	 * R1         = host R1
125	 * R2         = host R2
126	 * R12        = exit handler id
127	 * R13        = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
128	 * SVCPU.*    = guest *
129	 * SVCPU[CR]  = guest CR
130	 * SVCPU[XER] = guest XER
131	 * SVCPU[CTR] = guest CTR
132	 * SVCPU[LR]  = guest LR
133	 *
134	 */
135
136	/* Restore bolted entries from the shadow and fix it along the way */
137
138	/* We don't store anything in entry 0, so we don't need to take care of it */
139	slbia
140	isync
141
142#if SLB_NUM_BOLTED == 3
143
144	ld	r11, PACA_SLBSHADOWPTR(r13)
145
146	REBOLT_SLB_ENTRY(0)
147	REBOLT_SLB_ENTRY(1)
148	REBOLT_SLB_ENTRY(2)
149
150#else
151#error unknown number of bolted entries
152#endif
153
154slb_do_exit:
155
156.endm
157