xref: /linux/arch/sparc/include/asm/trap_block.h (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _SPARC_TRAP_BLOCK_H
3 #define _SPARC_TRAP_BLOCK_H
4 
5 #include <asm/hypervisor.h>
6 #include <asm/asi.h>
7 
8 #ifndef __ASSEMBLY__
9 
10 /* Trap handling code needs to get at a few critical values upon
11  * trap entry and to process TSB misses.  These cannot be in the
12  * per_cpu() area as we really need to lock them into the TLB and
13  * thus make them part of the main kernel image.  As a result we
14  * try to make this as small as possible.
15  *
16  * This is padded out and aligned to 64-bytes to avoid false sharing
17  * on SMP.
18  */
19 
20 /* If you modify the size of this structure, please update
21  * TRAP_BLOCK_SZ_SHIFT below.
22  */
23 struct thread_info;
24 struct trap_per_cpu {
25 /* D-cache line 1: Basic thread information, cpu and device mondo queues */
26 	struct thread_info	*thread;
27 	unsigned long		pgd_paddr;
28 	unsigned long		cpu_mondo_pa;
29 	unsigned long		dev_mondo_pa;
30 
31 /* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
32 	unsigned long		resum_mondo_pa;
33 	unsigned long		resum_kernel_buf_pa;
34 	unsigned long		nonresum_mondo_pa;
35 	unsigned long		nonresum_kernel_buf_pa;
36 
37 /* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
38 	struct hv_fault_status	fault_info;
39 
40 /* Dcache line 7: Physical addresses of CPU send mondo block and CPU list.  */
41 	unsigned long		cpu_mondo_block_pa;
42 	unsigned long		cpu_list_pa;
43 	unsigned long		tsb_huge;
44 	unsigned long		tsb_huge_temp;
45 
46 /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size.  */
47 	unsigned long		irq_worklist_pa;
48 	unsigned int		cpu_mondo_qmask;
49 	unsigned int		dev_mondo_qmask;
50 	unsigned int		resum_qmask;
51 	unsigned int		nonresum_qmask;
52 	unsigned long		__per_cpu_base;
53 } __attribute__((aligned(64)));
54 extern struct trap_per_cpu trap_block[NR_CPUS];
55 void init_cur_cpu_trap(struct thread_info *);
56 void setup_tba(void);
57 extern int ncpus_probed;
58 extern u64 cpu_mondo_counter[NR_CPUS];
59 
60 unsigned long real_hard_smp_processor_id(void);
61 
62 struct cpuid_patch_entry {
63 	unsigned int	addr;
64 	unsigned int	cheetah_safari[4];
65 	unsigned int	cheetah_jbus[4];
66 	unsigned int	starfire[4];
67 	unsigned int	sun4v[4];
68 };
69 extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
70 
71 struct sun4v_1insn_patch_entry {
72 	unsigned int	addr;
73 	unsigned int	insn;
74 };
75 extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
76 	__sun4v_1insn_patch_end;
77 extern struct sun4v_1insn_patch_entry __fast_win_ctrl_1insn_patch,
78 	__fast_win_ctrl_1insn_patch_end;
79 extern struct sun4v_1insn_patch_entry __sun_m7_1insn_patch,
80 	__sun_m7_1insn_patch_end;
81 
82 struct sun4v_2insn_patch_entry {
83 	unsigned int	addr;
84 	unsigned int	insns[2];
85 };
86 extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
87 	__sun4v_2insn_patch_end;
88 extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch,
89 	__sun_m7_2insn_patch_end;
90 
91 
92 #endif /* !(__ASSEMBLY__) */
93 
94 #define TRAP_PER_CPU_THREAD		0x00
95 #define TRAP_PER_CPU_PGD_PADDR		0x08
96 #define TRAP_PER_CPU_CPU_MONDO_PA	0x10
97 #define TRAP_PER_CPU_DEV_MONDO_PA	0x18
98 #define TRAP_PER_CPU_RESUM_MONDO_PA	0x20
99 #define TRAP_PER_CPU_RESUM_KBUF_PA	0x28
100 #define TRAP_PER_CPU_NONRESUM_MONDO_PA	0x30
101 #define TRAP_PER_CPU_NONRESUM_KBUF_PA	0x38
102 #define TRAP_PER_CPU_FAULT_INFO		0x40
103 #define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA	0xc0
104 #define TRAP_PER_CPU_CPU_LIST_PA	0xc8
105 #define TRAP_PER_CPU_TSB_HUGE		0xd0
106 #define TRAP_PER_CPU_TSB_HUGE_TEMP	0xd8
107 #define TRAP_PER_CPU_IRQ_WORKLIST_PA	0xe0
108 #define TRAP_PER_CPU_CPU_MONDO_QMASK	0xe8
109 #define TRAP_PER_CPU_DEV_MONDO_QMASK	0xec
110 #define TRAP_PER_CPU_RESUM_QMASK	0xf0
111 #define TRAP_PER_CPU_NONRESUM_QMASK	0xf4
112 #define TRAP_PER_CPU_PER_CPU_BASE	0xf8
113 
114 #define TRAP_BLOCK_SZ_SHIFT		8
115 
116 #include <asm/scratchpad.h>
117 
118 #define __GET_CPUID(REG)				\
119 	/* Spitfire implementation (default). */	\
120 661:	ldxa		[%g0] ASI_UPA_CONFIG, REG;	\
121 	srlx		REG, 17, REG;			\
122 	 and		REG, 0x1f, REG;			\
123 	nop;						\
124 	.section	.cpuid_patch, "ax";		\
125 	/* Instruction location. */			\
126 	.word		661b;				\
127 	/* Cheetah Safari implementation. */		\
128 	ldxa		[%g0] ASI_SAFARI_CONFIG, REG;	\
129 	srlx		REG, 17, REG;			\
130 	and		REG, 0x3ff, REG;		\
131 	nop;						\
132 	/* Cheetah JBUS implementation. */		\
133 	ldxa		[%g0] ASI_JBUS_CONFIG, REG;	\
134 	srlx		REG, 17, REG;			\
135 	and		REG, 0x1f, REG;			\
136 	nop;						\
137 	/* Starfire implementation. */			\
138 	sethi		%hi(0x1fff40000d0 >> 9), REG;	\
139 	sllx		REG, 9, REG;			\
140 	or		REG, 0xd0, REG;			\
141 	lduwa		[REG] ASI_PHYS_BYPASS_EC_E, REG;\
142 	/* sun4v implementation. */			\
143 	mov		SCRATCHPAD_CPUID, REG;		\
144 	ldxa		[REG] ASI_SCRATCHPAD, REG;	\
145 	nop;						\
146 	nop;						\
147 	.previous;
148 
149 #ifdef CONFIG_SMP
150 
151 #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
152 	__GET_CPUID(TMP)			\
153 	sethi	%hi(trap_block), DEST;		\
154 	sllx	TMP, TRAP_BLOCK_SZ_SHIFT, TMP;	\
155 	or	DEST, %lo(trap_block), DEST;	\
156 	add	DEST, TMP, DEST;		\
157 
158 /* Clobbers TMP, current address space PGD phys address into DEST.  */
159 #define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
160 	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
161 	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
162 
163 /* Clobbers TMP, loads local processor's IRQ work area into DEST.  */
164 #define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP)	\
165 	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
166 	add	DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
167 
168 /* Clobbers TMP, loads DEST with current thread info pointer.  */
169 #define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
170 	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
171 	ldx	[DEST + TRAP_PER_CPU_THREAD], DEST;
172 
173 /* Given the current thread info pointer in THR, load the per-cpu
174  * area base of the current processor into DEST.  REG1, REG2, and REG3 are
175  * clobbered.
176  *
177  * You absolutely cannot use DEST as a temporary in this code.  The
178  * reason is that traps can happen during execution, and return from
179  * trap will load the fully resolved DEST per-cpu base.  This can corrupt
180  * the calculations done by the macro mid-stream.
181  */
182 #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)	\
183 	lduh	[THR + TI_CPU], REG1;			\
184 	sethi	%hi(trap_block), REG2;			\
185 	sllx	REG1, TRAP_BLOCK_SZ_SHIFT, REG1;	\
186 	or	REG2, %lo(trap_block), REG2;		\
187 	add	REG2, REG1, REG2;			\
188 	ldx	[REG2 + TRAP_PER_CPU_PER_CPU_BASE], DEST;
189 
190 #else
191 
192 #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
193 	sethi	%hi(trap_block), DEST;		\
194 	or	DEST, %lo(trap_block), DEST;	\
195 
196 /* Uniprocessor versions, we know the cpuid is zero.  */
197 #define TRAP_LOAD_PGD_PHYS(DEST, TMP)		\
198 	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
199 	ldx	[DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
200 
201 /* Clobbers TMP, loads local processor's IRQ work area into DEST.  */
202 #define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP)	\
203 	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
204 	add	DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
205 
206 #define TRAP_LOAD_THREAD_REG(DEST, TMP)		\
207 	TRAP_LOAD_TRAP_BLOCK(DEST, TMP)		\
208 	ldx	[DEST + TRAP_PER_CPU_THREAD], DEST;
209 
210 /* No per-cpu areas on uniprocessor, so no need to load DEST.  */
211 #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
212 
213 #endif /* !(CONFIG_SMP) */
214 
215 #endif /* _SPARC_TRAP_BLOCK_H */
216