1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/asm/processor.h
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8 #ifndef __ASM_SPARC64_PROCESSOR_H
9 #define __ASM_SPARC64_PROCESSOR_H
10
11 #include <asm/asi.h>
12 #include <asm/pstate.h>
13 #include <asm/ptrace.h>
14 #include <asm/page.h>
15
16 /*
17 * User lives in his very own context, and cannot reference us. Note
18 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
19 * address that the kernel will allocate out.
20 *
21 * XXX No longer using virtual page tables, kill this upper limit...
22 */
23 #define VA_BITS 44
24 #ifndef __ASSEMBLY__
25 #define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3))
26 #else
27 #define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3))
28 #endif
29
30 #define TASK_SIZE_OF(tsk) \
31 (test_tsk_thread_flag(tsk,TIF_32BIT) ? \
32 (1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
33 #define TASK_SIZE \
34 (test_thread_flag(TIF_32BIT) ? \
35 (1UL << 32UL) : ((unsigned long)-VPTE_SIZE))
36 #ifdef __KERNEL__
37
38 #define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
39 #define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL))
40
41 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
42 STACK_TOP32 : STACK_TOP64)
43
44 #define STACK_TOP_MAX STACK_TOP64
45
46 #endif
47
48 #ifndef __ASSEMBLY__
49
50 /* The Sparc processor specific thread struct. */
51 /* XXX This should die, everything can go into thread_info now. */
52 struct thread_struct {
53 #ifdef CONFIG_DEBUG_SPINLOCK
54 /* How many spinlocks held by this thread.
55 * Used with spin lock debugging to catch tasks
56 * sleeping illegally with locks held.
57 */
58 int smp_lock_count;
59 unsigned int smp_lock_pc;
60 #else
61 int dummy; /* f'in gcc bug... */
62 #endif
63 };
64
65 #endif /* !(__ASSEMBLY__) */
66
67 #ifndef CONFIG_DEBUG_SPINLOCK
68 #define INIT_THREAD { \
69 0, \
70 }
71 #else /* CONFIG_DEBUG_SPINLOCK */
72 #define INIT_THREAD { \
73 /* smp_lock_count, smp_lock_pc, */ \
74 0, 0, \
75 }
76 #endif /* !(CONFIG_DEBUG_SPINLOCK) */
77
78 #ifndef __ASSEMBLY__
79
80 #include <linux/types.h>
81 #include <asm/fpumacro.h>
82
83 struct task_struct;
84
85 /* On Uniprocessor, even in RMO processes see TSO semantics */
86 #ifdef CONFIG_SMP
87 #define TSTATE_INITIAL_MM TSTATE_TSO
88 #else
89 #define TSTATE_INITIAL_MM TSTATE_RMO
90 #endif
91
92 /* Do necessary setup to start up a newly executed thread. */
93 #define start_thread(regs, pc, sp) \
94 do { \
95 unsigned long __asi = ASI_PNF; \
96 regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
97 regs->tpc = ((pc & (~3)) - 4); \
98 regs->tnpc = regs->tpc + 4; \
99 regs->y = 0; \
100 set_thread_wstate(1 << 3); \
101 if (current_thread_info()->utraps) { \
102 if (*(current_thread_info()->utraps) < 2) \
103 kfree(current_thread_info()->utraps); \
104 else \
105 (*(current_thread_info()->utraps))--; \
106 current_thread_info()->utraps = NULL; \
107 } \
108 __asm__ __volatile__( \
109 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
110 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
111 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
112 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
113 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
114 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
115 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
116 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
117 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
118 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
119 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
120 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
121 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
122 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
123 "stx %1, [%0 + %2 + 0x70]\n\t" \
124 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
125 "wrpr %%g0, (1 << 3), %%wstate\n\t" \
126 : \
127 : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
128 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
129 fprs_write(0); \
130 current_thread_info()->xfsr[0] = 0; \
131 current_thread_info()->fpsaved[0] = 0; \
132 regs->tstate &= ~TSTATE_PEF; \
133 } while (0)
134
135 #define start_thread32(regs, pc, sp) \
136 do { \
137 unsigned long __asi = ASI_PNF; \
138 pc &= 0x00000000ffffffffUL; \
139 sp &= 0x00000000ffffffffUL; \
140 regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
141 regs->tpc = ((pc & (~3)) - 4); \
142 regs->tnpc = regs->tpc + 4; \
143 regs->y = 0; \
144 set_thread_wstate(2 << 3); \
145 if (current_thread_info()->utraps) { \
146 if (*(current_thread_info()->utraps) < 2) \
147 kfree(current_thread_info()->utraps); \
148 else \
149 (*(current_thread_info()->utraps))--; \
150 current_thread_info()->utraps = NULL; \
151 } \
152 __asm__ __volatile__( \
153 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
154 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
155 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
156 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
157 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
158 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
159 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
160 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
161 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
162 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
163 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
164 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
165 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
166 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
167 "stx %1, [%0 + %2 + 0x70]\n\t" \
168 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
169 "wrpr %%g0, (2 << 3), %%wstate\n\t" \
170 : \
171 : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
172 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
173 fprs_write(0); \
174 current_thread_info()->xfsr[0] = 0; \
175 current_thread_info()->fpsaved[0] = 0; \
176 regs->tstate &= ~TSTATE_PEF; \
177 } while (0)
178
179 unsigned long __get_wchan(struct task_struct *task);
180
181 #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
182 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
183 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
184
185 /* Please see the commentary in asm/backoff.h for a description of
186 * what these instructions are doing and how they have been chosen.
187 * To make a long story short, we are trying to yield the current cpu
188 * strand during busy loops.
189 */
190 #ifdef BUILD_VDSO
191 #define cpu_relax() asm volatile("\n99:\n\t" \
192 "rd %%ccr, %%g0\n\t" \
193 "rd %%ccr, %%g0\n\t" \
194 "rd %%ccr, %%g0\n\t" \
195 ::: "memory")
196 #else /* ! BUILD_VDSO */
197 #define cpu_relax() asm volatile("\n99:\n\t" \
198 "rd %%ccr, %%g0\n\t" \
199 "rd %%ccr, %%g0\n\t" \
200 "rd %%ccr, %%g0\n\t" \
201 ".section .pause_3insn_patch,\"ax\"\n\t"\
202 ".word 99b\n\t" \
203 "wr %%g0, 128, %%asr27\n\t" \
204 "nop\n\t" \
205 "nop\n\t" \
206 ".previous" \
207 ::: "memory")
208 #endif
209
210 /* Prefetch support. This is tuned for UltraSPARC-III and later.
211 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
212 * a shallower prefetch queue than later chips.
213 */
214 #define ARCH_HAS_PREFETCH
215 #define ARCH_HAS_PREFETCHW
216
prefetch(const void * x)217 static inline void prefetch(const void *x)
218 {
219 /* We do not use the read prefetch mnemonic because that
220 * prefetches into the prefetch-cache which only is accessible
221 * by floating point operations in UltraSPARC-III and later.
222 * By contrast, "#one_write" prefetches into the L2 cache
223 * in shared state.
224 */
225 __asm__ __volatile__("prefetch [%0], #one_write"
226 : /* no outputs */
227 : "r" (x));
228 }
229
prefetchw(const void * x)230 static inline void prefetchw(const void *x)
231 {
232 /* The most optimal prefetch to use for writes is
233 * "#n_writes". This brings the cacheline into the
234 * L2 cache in "owned" state.
235 */
236 __asm__ __volatile__("prefetch [%0], #n_writes"
237 : /* no outputs */
238 : "r" (x));
239 }
240
241 #define HAVE_ARCH_PICK_MMAP_LAYOUT
242
243 int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap);
244
245 #endif /* !(__ASSEMBLY__) */
246
247 #endif /* !(__ASM_SPARC64_PROCESSOR_H) */
248