1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author: Huacai Chen <chenhuacai@loongson.cn> 4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 5 * 6 * Derived from MIPS: 7 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. 8 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) 9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 10 * Copyright (C) 2004 Thiemo Seufer 11 * Copyright (C) 2013 Imagination Technologies Ltd. 12 */ 13 #include <linux/cpu.h> 14 #include <linux/init.h> 15 #include <linux/kernel.h> 16 #include <linux/errno.h> 17 #include <linux/sched.h> 18 #include <linux/sched/debug.h> 19 #include <linux/sched/task.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/export.h> 25 #include <linux/ptrace.h> 26 #include <linux/mman.h> 27 #include <linux/personality.h> 28 #include <linux/sys.h> 29 #include <linux/completion.h> 30 #include <linux/kallsyms.h> 31 #include <linux/random.h> 32 #include <linux/prctl.h> 33 #include <linux/nmi.h> 34 35 #include <asm/asm.h> 36 #include <asm/bootinfo.h> 37 #include <asm/cpu.h> 38 #include <asm/elf.h> 39 #include <asm/fpu.h> 40 #include <asm/io.h> 41 #include <asm/irq.h> 42 #include <asm/irq_regs.h> 43 #include <asm/loongarch.h> 44 #include <asm/pgtable.h> 45 #include <asm/processor.h> 46 #include <asm/reg.h> 47 #include <asm/unwind.h> 48 #include <asm/vdso.h> 49 50 #ifdef CONFIG_STACKPROTECTOR 51 #include <linux/stackprotector.h> 52 unsigned long __stack_chk_guard __read_mostly; 53 EXPORT_SYMBOL(__stack_chk_guard); 54 #endif 55 56 /* 57 * Idle related variables and functions 58 */ 59 60 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 61 EXPORT_SYMBOL(boot_option_idle_override); 62 63 #ifdef CONFIG_HOTPLUG_CPU 64 void arch_cpu_idle_dead(void) 65 { 66 play_dead(); 67 } 68 #endif 69 70 asmlinkage void ret_from_fork(void); 71 asmlinkage void ret_from_kernel_thread(void); 72 73 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) 74 { 75 unsigned long crmd; 76 unsigned long prmd; 77 unsigned long euen; 78 79 /* New thread loses kernel privileges. */ 80 crmd = regs->csr_crmd & ~(PLV_MASK); 81 crmd |= PLV_USER; 82 regs->csr_crmd = crmd; 83 84 prmd = regs->csr_prmd & ~(PLV_MASK); 85 prmd |= PLV_USER; 86 regs->csr_prmd = prmd; 87 88 euen = regs->csr_euen & ~(CSR_EUEN_FPEN); 89 regs->csr_euen = euen; 90 lose_fpu(0); 91 92 clear_thread_flag(TIF_LSX_CTX_LIVE); 93 clear_thread_flag(TIF_LASX_CTX_LIVE); 94 clear_used_math(); 95 regs->csr_era = pc; 96 regs->regs[3] = sp; 97 } 98 99 void exit_thread(struct task_struct *tsk) 100 { 101 } 102 103 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 104 { 105 /* 106 * Save any process state which is live in hardware registers to the 107 * parent context prior to duplication. This prevents the new child 108 * state becoming stale if the parent is preempted before copy_thread() 109 * gets a chance to save the parent's live hardware registers to the 110 * child context. 111 */ 112 preempt_disable(); 113 114 if (is_fpu_owner()) 115 save_fp(current); 116 117 preempt_enable(); 118 119 if (used_math()) 120 memcpy(dst, src, sizeof(struct task_struct)); 121 else 122 memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr)); 123 124 return 0; 125 } 126 127 /* 128 * Copy architecture-specific thread state 129 */ 130 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) 131 { 132 unsigned long childksp; 133 unsigned long tls = args->tls; 134 unsigned long usp = args->stack; 135 unsigned long clone_flags = args->flags; 136 struct pt_regs *childregs, *regs = current_pt_regs(); 137 138 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE; 139 140 /* set up new TSS. */ 141 childregs = (struct pt_regs *) childksp - 1; 142 /* Put the stack after the struct pt_regs. */ 143 childksp = (unsigned long) childregs; 144 p->thread.sched_cfa = 0; 145 p->thread.csr_euen = 0; 146 p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD); 147 p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD); 148 p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG); 149 if (unlikely(args->fn)) { 150 /* kernel thread */ 151 p->thread.reg03 = childksp; 152 p->thread.reg23 = (unsigned long)args->fn; 153 p->thread.reg24 = (unsigned long)args->fn_arg; 154 p->thread.reg01 = (unsigned long)ret_from_kernel_thread; 155 p->thread.sched_ra = (unsigned long)ret_from_kernel_thread; 156 memset(childregs, 0, sizeof(struct pt_regs)); 157 childregs->csr_euen = p->thread.csr_euen; 158 childregs->csr_crmd = p->thread.csr_crmd; 159 childregs->csr_prmd = p->thread.csr_prmd; 160 childregs->csr_ecfg = p->thread.csr_ecfg; 161 goto out; 162 } 163 164 /* user thread */ 165 *childregs = *regs; 166 childregs->regs[4] = 0; /* Child gets zero as return value */ 167 if (usp) 168 childregs->regs[3] = usp; 169 170 p->thread.reg03 = (unsigned long) childregs; 171 p->thread.reg01 = (unsigned long) ret_from_fork; 172 p->thread.sched_ra = (unsigned long) ret_from_fork; 173 174 /* 175 * New tasks lose permission to use the fpu. This accelerates context 176 * switching for most programs since they don't use the fpu. 177 */ 178 childregs->csr_euen = 0; 179 180 if (clone_flags & CLONE_SETTLS) 181 childregs->regs[2] = tls; 182 183 out: 184 clear_tsk_thread_flag(p, TIF_USEDFPU); 185 clear_tsk_thread_flag(p, TIF_USEDSIMD); 186 clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE); 187 clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE); 188 189 return 0; 190 } 191 192 unsigned long __get_wchan(struct task_struct *task) 193 { 194 unsigned long pc = 0; 195 struct unwind_state state; 196 197 if (!try_get_task_stack(task)) 198 return 0; 199 200 for (unwind_start(&state, task, NULL); 201 !unwind_done(&state); unwind_next_frame(&state)) { 202 pc = unwind_get_return_address(&state); 203 if (!pc) 204 break; 205 if (in_sched_functions(pc)) 206 continue; 207 break; 208 } 209 210 put_task_stack(task); 211 212 return pc; 213 } 214 215 bool in_irq_stack(unsigned long stack, struct stack_info *info) 216 { 217 unsigned long nextsp; 218 unsigned long begin = (unsigned long)this_cpu_read(irq_stack); 219 unsigned long end = begin + IRQ_STACK_START; 220 221 if (stack < begin || stack >= end) 222 return false; 223 224 nextsp = *(unsigned long *)end; 225 if (nextsp & (SZREG - 1)) 226 return false; 227 228 info->begin = begin; 229 info->end = end; 230 info->next_sp = nextsp; 231 info->type = STACK_TYPE_IRQ; 232 233 return true; 234 } 235 236 bool in_task_stack(unsigned long stack, struct task_struct *task, 237 struct stack_info *info) 238 { 239 unsigned long begin = (unsigned long)task_stack_page(task); 240 unsigned long end = begin + THREAD_SIZE; 241 242 if (stack < begin || stack >= end) 243 return false; 244 245 info->begin = begin; 246 info->end = end; 247 info->next_sp = 0; 248 info->type = STACK_TYPE_TASK; 249 250 return true; 251 } 252 253 int get_stack_info(unsigned long stack, struct task_struct *task, 254 struct stack_info *info) 255 { 256 task = task ? : current; 257 258 if (!stack || stack & (SZREG - 1)) 259 goto unknown; 260 261 if (in_task_stack(stack, task, info)) 262 return 0; 263 264 if (task != current) 265 goto unknown; 266 267 if (in_irq_stack(stack, info)) 268 return 0; 269 270 unknown: 271 info->type = STACK_TYPE_UNKNOWN; 272 return -EINVAL; 273 } 274 275 unsigned long stack_top(void) 276 { 277 unsigned long top = TASK_SIZE & PAGE_MASK; 278 279 /* Space for the VDSO & data page */ 280 top -= PAGE_ALIGN(current->thread.vdso->size); 281 top -= PAGE_SIZE; 282 283 /* Space to randomize the VDSO base */ 284 if (current->flags & PF_RANDOMIZE) 285 top -= VDSO_RANDOMIZE_SIZE; 286 287 return top; 288 } 289 290 /* 291 * Don't forget that the stack pointer must be aligned on a 8 bytes 292 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. 293 */ 294 unsigned long arch_align_stack(unsigned long sp) 295 { 296 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 297 sp -= get_random_u32_below(PAGE_SIZE); 298 299 return sp & STACK_ALIGN; 300 } 301 302 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); 303 static struct cpumask backtrace_csd_busy; 304 305 static void handle_backtrace(void *info) 306 { 307 nmi_cpu_backtrace(get_irq_regs()); 308 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); 309 } 310 311 static void raise_backtrace(cpumask_t *mask) 312 { 313 call_single_data_t *csd; 314 int cpu; 315 316 for_each_cpu(cpu, mask) { 317 /* 318 * If we previously sent an IPI to the target CPU & it hasn't 319 * cleared its bit in the busy cpumask then it didn't handle 320 * our previous IPI & it's not safe for us to reuse the 321 * call_single_data_t. 322 */ 323 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { 324 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", 325 cpu); 326 continue; 327 } 328 329 csd = &per_cpu(backtrace_csd, cpu); 330 csd->func = handle_backtrace; 331 smp_call_function_single_async(cpu, csd); 332 } 333 } 334 335 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) 336 { 337 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); 338 } 339 340 #ifdef CONFIG_64BIT 341 void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs) 342 { 343 unsigned int i; 344 345 for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) { 346 uregs[i] = regs->regs[i - LOONGARCH_EF_R0]; 347 } 348 349 uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0; 350 uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era; 351 uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr; 352 uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd; 353 uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd; 354 uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen; 355 uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg; 356 uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat; 357 } 358 #endif /* CONFIG_64BIT */ 359