xref: /linux/arch/powerpc/kvm/book3s_64_slb.S (revision a06c3fad49a50d5d5eb078f93e70f4d3eca5d5a5)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *
4 * Copyright SUSE Linux Products GmbH 2009
5 *
6 * Authors: Alexander Graf <agraf@suse.de>
7 */
8
9#include <asm/asm-compat.h>
10#include <asm/feature-fixups.h>
11
12#define SHADOW_SLB_ENTRY_LEN	0x10
13#define OFFSET_ESID(x)		(SHADOW_SLB_ENTRY_LEN * x)
14#define OFFSET_VSID(x)		((SHADOW_SLB_ENTRY_LEN * x) + 8)
15
16/******************************************************************************
17 *                                                                            *
18 *                               Entry code                                   *
19 *                                                                            *
20 *****************************************************************************/
21
22.macro LOAD_GUEST_SEGMENTS
23
24	/* Required state:
25	 *
26	 * MSR = ~IR|DR
27	 * R13 = PACA
28	 * R1 = host R1
29	 * R2 = host R2
30	 * R3 = shadow vcpu
31	 * all other volatile GPRS = free except R4, R6
32	 * SVCPU[CR]  = guest CR
33	 * SVCPU[XER] = guest XER
34	 * SVCPU[CTR] = guest CTR
35	 * SVCPU[LR]  = guest LR
36	 */
37
38BEGIN_FW_FTR_SECTION
39
40	/* Declare SLB shadow as 0 entries big */
41
42	ld	r11, PACA_SLBSHADOWPTR(r13)
43	li	r8, 0
44	stb	r8, 3(r11)
45
46END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
47
48	/* Flush SLB */
49
50	li	r10, 0
51	slbmte	r10, r10
52	slbia
53
54	/* Fill SLB with our shadow */
55
56	lbz	r12, SVCPU_SLB_MAX(r3)
57	mulli	r12, r12, 16
58	addi	r12, r12, SVCPU_SLB
59	add	r12, r12, r3
60
61	/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
62	li	r11, SVCPU_SLB
63	add	r11, r11, r3
64
65slb_loop_enter:
66
67	ld	r10, 0(r11)
68
69	andis.	r9, r10, SLB_ESID_V@h
70	beq	slb_loop_enter_skip
71
72	ld	r9, 8(r11)
73	slbmte	r9, r10
74
75slb_loop_enter_skip:
76	addi	r11, r11, 16
77	cmpd	cr0, r11, r12
78	blt	slb_loop_enter
79
80slb_do_enter:
81
82.endm
83
84/******************************************************************************
85 *                                                                            *
86 *                               Exit code                                    *
87 *                                                                            *
88 *****************************************************************************/
89
90.macro LOAD_HOST_SEGMENTS
91
92	/* Register usage at this point:
93	 *
94	 * R1         = host R1
95	 * R2         = host R2
96	 * R12        = exit handler id
97	 * R13        = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
98	 * SVCPU.*    = guest *
99	 * SVCPU[CR]  = guest CR
100	 * SVCPU[XER] = guest XER
101	 * SVCPU[CTR] = guest CTR
102	 * SVCPU[LR]  = guest LR
103	 *
104	 */
105
106	/* Remove all SLB entries that are in use. */
107
108	li	r0, 0
109	slbmte	r0, r0
110	slbia
111
112	/* Restore bolted entries from the shadow */
113
114	ld	r11, PACA_SLBSHADOWPTR(r13)
115
116BEGIN_FW_FTR_SECTION
117
118	/* Declare SLB shadow as SLB_NUM_BOLTED entries big */
119
120	li	r8, SLB_NUM_BOLTED
121	stb	r8, 3(r11)
122
123END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
124
125	/* Manually load all entries from shadow SLB */
126
127	li	r8, SLBSHADOW_SAVEAREA
128	li	r7, SLBSHADOW_SAVEAREA + 8
129
130	.rept	SLB_NUM_BOLTED
131	LDX_BE	r10, r11, r8
132	cmpdi	r10, 0
133	beq	1f
134	LDX_BE	r9, r11, r7
135	slbmte	r9, r10
1361:	addi	r7, r7, SHADOW_SLB_ENTRY_LEN
137	addi	r8, r8, SHADOW_SLB_ENTRY_LEN
138	.endr
139
140	isync
141	sync
142
143slb_do_exit:
144
145.endm
146