xref: /linux/arch/loongarch/include/asm/processor.h (revision b8fc42dc065742bc68df6a61a2aff8cbe364fa17)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #ifndef _ASM_PROCESSOR_H
6 #define _ASM_PROCESSOR_H
7 
8 #include <linux/atomic.h>
9 #include <linux/cpumask.h>
10 #include <linux/sizes.h>
11 
12 #include <asm/cpu.h>
13 #include <asm/cpu-info.h>
14 #include <asm/hw_breakpoint.h>
15 #include <asm/loongarch.h>
16 #include <asm/vdso/processor.h>
17 #include <uapi/asm/ptrace.h>
18 #include <uapi/asm/sigcontext.h>
19 
20 #ifdef CONFIG_32BIT
21 
22 #define TASK_SIZE	0x80000000UL
23 #define TASK_SIZE_MIN	TASK_SIZE
24 #define STACK_TOP_MAX	TASK_SIZE
25 
26 #define TASK_IS_32BIT_ADDR 1
27 
28 #endif
29 
30 #ifdef CONFIG_64BIT
31 
32 #define TASK_SIZE32	0x100000000UL
33 #define TASK_SIZE64     (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
34 
35 #define TASK_SIZE	(test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
36 #define TASK_SIZE_MIN	TASK_SIZE32
37 #define STACK_TOP_MAX	TASK_SIZE64
38 
39 #define TASK_SIZE_OF(tsk)						\
40 	(test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
41 
42 #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
43 
44 #endif
45 
46 #define VDSO_RANDOMIZE_SIZE	(TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
47 
48 unsigned long stack_top(void);
49 #define STACK_TOP stack_top()
50 
51 /*
52  * This decides where the kernel will search for a free chunk of vm
53  * space during mmap's.
54  */
55 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
56 
57 #define FPU_REG_WIDTH		256
58 #define FPU_ALIGN		__attribute__((aligned(32)))
59 
60 union fpureg {
61 	__u32	val32[FPU_REG_WIDTH / 32];
62 	__u64	val64[FPU_REG_WIDTH / 64];
63 };
64 
65 #define FPR_IDX(width, idx)	(idx)
66 
67 #define BUILD_FPR_ACCESS(width) \
68 static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx)	\
69 {									\
70 	return fpr->val##width[FPR_IDX(width, idx)];			\
71 }									\
72 									\
73 static inline void set_fpr##width(union fpureg *fpr, unsigned int idx,	\
74 				  u##width val)				\
75 {									\
76 	fpr->val##width[FPR_IDX(width, idx)] = val;			\
77 }
78 
79 BUILD_FPR_ACCESS(32)
80 BUILD_FPR_ACCESS(64)
81 
82 struct loongarch_fpu {
83 	uint64_t	fcc;	/* 8x8 */
84 	uint32_t	fcsr;
85 	uint32_t	ftop;
86 	union fpureg	fpr[NUM_FPU_REGS];
87 };
88 
89 struct loongarch_lbt {
90 	/* Scratch registers */
91 	unsigned long scr0;
92 	unsigned long scr1;
93 	unsigned long scr2;
94 	unsigned long scr3;
95 	/* Eflags register */
96 	unsigned long eflags;
97 };
98 
99 #define INIT_CPUMASK { \
100 	{0,} \
101 }
102 
103 #define ARCH_MIN_TASKALIGN	32
104 
105 struct loongarch_vdso_info;
106 
107 /*
108  * If you change thread_struct remember to change the #defines below too!
109  */
110 struct thread_struct {
111 	/* Main processor registers. */
112 	unsigned long reg01, reg03, reg22; /* ra sp fp */
113 	unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
114 	unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
115 
116 	/* __schedule() return address / call frame address */
117 	unsigned long sched_ra;
118 	unsigned long sched_cfa;
119 
120 	/* CSR registers */
121 	unsigned long csr_prmd;
122 	unsigned long csr_crmd;
123 	unsigned long csr_euen;
124 	unsigned long csr_ecfg;
125 	unsigned long csr_badvaddr;	/* Last user fault */
126 
127 	/* Other stuff associated with the thread. */
128 	unsigned long trap_nr;
129 	unsigned long error_code;
130 	unsigned long single_step; /* Used by PTRACE_SINGLESTEP */
131 	struct loongarch_vdso_info *vdso;
132 
133 	/*
134 	 * FPU & vector registers, must be at the last of inherited
135 	 * context because they are conditionally copied at fork().
136 	 */
137 	struct loongarch_fpu fpu FPU_ALIGN;
138 	struct loongarch_lbt lbt; /* Also conditionally copied */
139 
140 	/* Hardware breakpoints pinned to this task. */
141 	struct perf_event *hbp_break[LOONGARCH_MAX_BRP];
142 	struct perf_event *hbp_watch[LOONGARCH_MAX_WRP];
143 };
144 
145 #define thread_saved_ra(tsk)	(tsk->thread.sched_ra)
146 #define thread_saved_fp(tsk)	(tsk->thread.sched_cfa)
147 
148 #define INIT_THREAD  {						\
149 	/*							\
150 	 * Main processor registers				\
151 	 */							\
152 	.reg01			= 0,				\
153 	.reg03			= 0,				\
154 	.reg22			= 0,				\
155 	.reg23			= 0,				\
156 	.reg24			= 0,				\
157 	.reg25			= 0,				\
158 	.reg26			= 0,				\
159 	.reg27			= 0,				\
160 	.reg28			= 0,				\
161 	.reg29			= 0,				\
162 	.reg30			= 0,				\
163 	.reg31			= 0,				\
164 	.sched_ra		= 0,				\
165 	.sched_cfa		= 0,				\
166 	.csr_crmd		= 0,				\
167 	.csr_prmd		= 0,				\
168 	.csr_euen		= 0,				\
169 	.csr_ecfg		= 0,				\
170 	.csr_badvaddr		= 0,				\
171 	/*							\
172 	 * Other stuff associated with the process		\
173 	 */							\
174 	.trap_nr		= 0,				\
175 	.error_code		= 0,				\
176 	/*							\
177 	 * FPU & vector registers				\
178 	 */							\
179 	.fpu			= {				\
180 		.fcc		= 0,				\
181 		.fcsr		= 0,				\
182 		.ftop		= 0,				\
183 		.fpr		= {{{0,},},},			\
184 	},							\
185 	.hbp_break		= {0},				\
186 	.hbp_watch		= {0},				\
187 }
188 
189 struct task_struct;
190 
191 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
192 
193 extern unsigned long		boot_option_idle_override;
194 /*
195  * Do necessary setup to start up a newly executed thread.
196  */
197 extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp);
198 
199 unsigned long __get_wchan(struct task_struct *p);
200 
201 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
202 			 THREAD_SIZE - sizeof(struct pt_regs))
203 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
204 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
205 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
206 #define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
207 #define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
208 
209 #define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
210 
211 #ifdef CONFIG_CPU_HAS_PREFETCH
212 
213 #define ARCH_HAS_PREFETCH
214 #define prefetch(x) __builtin_prefetch((x), 0, 1)
215 
216 #define ARCH_HAS_PREFETCHW
217 #define prefetchw(x) __builtin_prefetch((x), 1, 1)
218 
219 #endif
220 
221 #endif /* _ASM_PROCESSOR_H */
222