1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/errno.h> 4 #include <linux/kernel.h> 5 #include <linux/mm.h> 6 #include <linux/smp.h> 7 #include <linux/prctl.h> 8 #include <linux/slab.h> 9 #include <linux/sched.h> 10 #include <linux/init.h> 11 #include <linux/export.h> 12 #include <linux/pm.h> 13 #include <linux/tick.h> 14 #include <linux/random.h> 15 #include <linux/user-return-notifier.h> 16 #include <linux/dmi.h> 17 #include <linux/utsname.h> 18 #include <linux/stackprotector.h> 19 #include <linux/tick.h> 20 #include <linux/cpuidle.h> 21 #include <trace/events/power.h> 22 #include <linux/hw_breakpoint.h> 23 #include <asm/cpu.h> 24 #include <asm/apic.h> 25 #include <asm/syscalls.h> 26 #include <asm/uaccess.h> 27 #include <asm/mwait.h> 28 #include <asm/fpu/internal.h> 29 #include <asm/debugreg.h> 30 #include <asm/nmi.h> 31 #include <asm/tlbflush.h> 32 #include <asm/mce.h> 33 #include <asm/vm86.h> 34 #include <asm/switch_to.h> 35 36 /* 37 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 38 * no more per-task TSS's. The TSS size is kept cacheline-aligned 39 * so they are allowed to end up in the .data..cacheline_aligned 40 * section. Since TSS's are completely CPU-local, we want them 41 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 42 */ 43 __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { 44 .x86_tss = { 45 .sp0 = TOP_OF_INIT_STACK, 46 #ifdef CONFIG_X86_32 47 .ss0 = __KERNEL_DS, 48 .ss1 = __KERNEL_CS, 49 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, 50 #endif 51 }, 52 #ifdef CONFIG_X86_32 53 /* 54 * Note that the .io_bitmap member must be extra-big. This is because 55 * the CPU will access an additional byte beyond the end of the IO 56 * permission bitmap. The extra byte must be all 1 bits, and must 57 * be within the limit. 58 */ 59 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, 60 #endif 61 #ifdef CONFIG_X86_32 62 .SYSENTER_stack_canary = STACK_END_MAGIC, 63 #endif 64 }; 65 EXPORT_PER_CPU_SYMBOL(cpu_tss); 66 67 /* 68 * this gets called so that we can store lazy state into memory and copy the 69 * current task into the new thread. 70 */ 71 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 72 { 73 memcpy(dst, src, arch_task_struct_size); 74 #ifdef CONFIG_VM86 75 dst->thread.vm86 = NULL; 76 #endif 77 78 return fpu__copy(&dst->thread.fpu, &src->thread.fpu); 79 } 80 81 /* 82 * Free current thread data structures etc.. 83 */ 84 void exit_thread(struct task_struct *tsk) 85 { 86 struct thread_struct *t = &tsk->thread; 87 unsigned long *bp = t->io_bitmap_ptr; 88 struct fpu *fpu = &t->fpu; 89 90 if (bp) { 91 struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); 92 93 t->io_bitmap_ptr = NULL; 94 clear_thread_flag(TIF_IO_BITMAP); 95 /* 96 * Careful, clear this in the TSS too: 97 */ 98 memset(tss->io_bitmap, 0xff, t->io_bitmap_max); 99 t->io_bitmap_max = 0; 100 put_cpu(); 101 kfree(bp); 102 } 103 104 free_vm86(t); 105 106 fpu__drop(fpu); 107 } 108 109 void flush_thread(void) 110 { 111 struct task_struct *tsk = current; 112 113 flush_ptrace_hw_breakpoint(tsk); 114 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 115 116 fpu__clear(&tsk->thread.fpu); 117 } 118 119 static void hard_disable_TSC(void) 120 { 121 cr4_set_bits(X86_CR4_TSD); 122 } 123 124 void disable_TSC(void) 125 { 126 preempt_disable(); 127 if (!test_and_set_thread_flag(TIF_NOTSC)) 128 /* 129 * Must flip the CPU state synchronously with 130 * TIF_NOTSC in the current running context. 131 */ 132 hard_disable_TSC(); 133 preempt_enable(); 134 } 135 136 static void hard_enable_TSC(void) 137 { 138 cr4_clear_bits(X86_CR4_TSD); 139 } 140 141 static void enable_TSC(void) 142 { 143 preempt_disable(); 144 if (test_and_clear_thread_flag(TIF_NOTSC)) 145 /* 146 * Must flip the CPU state synchronously with 147 * TIF_NOTSC in the current running context. 148 */ 149 hard_enable_TSC(); 150 preempt_enable(); 151 } 152 153 int get_tsc_mode(unsigned long adr) 154 { 155 unsigned int val; 156 157 if (test_thread_flag(TIF_NOTSC)) 158 val = PR_TSC_SIGSEGV; 159 else 160 val = PR_TSC_ENABLE; 161 162 return put_user(val, (unsigned int __user *)adr); 163 } 164 165 int set_tsc_mode(unsigned int val) 166 { 167 if (val == PR_TSC_SIGSEGV) 168 disable_TSC(); 169 else if (val == PR_TSC_ENABLE) 170 enable_TSC(); 171 else 172 return -EINVAL; 173 174 return 0; 175 } 176 177 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 178 struct tss_struct *tss) 179 { 180 struct thread_struct *prev, *next; 181 182 prev = &prev_p->thread; 183 next = &next_p->thread; 184 185 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^ 186 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) { 187 unsigned long debugctl = get_debugctlmsr(); 188 189 debugctl &= ~DEBUGCTLMSR_BTF; 190 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) 191 debugctl |= DEBUGCTLMSR_BTF; 192 193 update_debugctlmsr(debugctl); 194 } 195 196 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ 197 test_tsk_thread_flag(next_p, TIF_NOTSC)) { 198 /* prev and next are different */ 199 if (test_tsk_thread_flag(next_p, TIF_NOTSC)) 200 hard_disable_TSC(); 201 else 202 hard_enable_TSC(); 203 } 204 205 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 206 /* 207 * Copy the relevant range of the IO bitmap. 208 * Normally this is 128 bytes or less: 209 */ 210 memcpy(tss->io_bitmap, next->io_bitmap_ptr, 211 max(prev->io_bitmap_max, next->io_bitmap_max)); 212 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { 213 /* 214 * Clear any possible leftover bits: 215 */ 216 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); 217 } 218 propagate_user_return_notify(prev_p, next_p); 219 } 220 221 /* 222 * Idle related variables and functions 223 */ 224 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 225 EXPORT_SYMBOL(boot_option_idle_override); 226 227 static void (*x86_idle)(void); 228 229 #ifndef CONFIG_SMP 230 static inline void play_dead(void) 231 { 232 BUG(); 233 } 234 #endif 235 236 void arch_cpu_idle_enter(void) 237 { 238 local_touch_nmi(); 239 } 240 241 void arch_cpu_idle_dead(void) 242 { 243 play_dead(); 244 } 245 246 /* 247 * Called from the generic idle code. 248 */ 249 void arch_cpu_idle(void) 250 { 251 x86_idle(); 252 } 253 254 /* 255 * We use this if we don't have any better idle routine.. 256 */ 257 void __cpuidle default_idle(void) 258 { 259 trace_cpu_idle_rcuidle(1, smp_processor_id()); 260 safe_halt(); 261 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 262 } 263 #ifdef CONFIG_APM_MODULE 264 EXPORT_SYMBOL(default_idle); 265 #endif 266 267 #ifdef CONFIG_XEN 268 bool xen_set_default_idle(void) 269 { 270 bool ret = !!x86_idle; 271 272 x86_idle = default_idle; 273 274 return ret; 275 } 276 #endif 277 void stop_this_cpu(void *dummy) 278 { 279 local_irq_disable(); 280 /* 281 * Remove this CPU: 282 */ 283 set_cpu_online(smp_processor_id(), false); 284 disable_local_APIC(); 285 mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); 286 287 for (;;) 288 halt(); 289 } 290 291 /* 292 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power 293 * states (local apic timer and TSC stop). 294 */ 295 static void amd_e400_idle(void) 296 { 297 /* 298 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E 299 * gets set after static_cpu_has() places have been converted via 300 * alternatives. 301 */ 302 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) { 303 default_idle(); 304 return; 305 } 306 307 tick_broadcast_enter(); 308 309 default_idle(); 310 311 /* 312 * The switch back from broadcast mode needs to be called with 313 * interrupts disabled. 314 */ 315 local_irq_disable(); 316 tick_broadcast_exit(); 317 local_irq_enable(); 318 } 319 320 /* 321 * Intel Core2 and older machines prefer MWAIT over HALT for C1. 322 * We can't rely on cpuidle installing MWAIT, because it will not load 323 * on systems that support only C1 -- so the boot default must be MWAIT. 324 * 325 * Some AMD machines are the opposite, they depend on using HALT. 326 * 327 * So for default C1, which is used during boot until cpuidle loads, 328 * use MWAIT-C1 on Intel HW that has it, else use HALT. 329 */ 330 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) 331 { 332 if (c->x86_vendor != X86_VENDOR_INTEL) 333 return 0; 334 335 if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR)) 336 return 0; 337 338 return 1; 339 } 340 341 /* 342 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT 343 * with interrupts enabled and no flags, which is backwards compatible with the 344 * original MWAIT implementation. 345 */ 346 static __cpuidle void mwait_idle(void) 347 { 348 if (!current_set_polling_and_test()) { 349 trace_cpu_idle_rcuidle(1, smp_processor_id()); 350 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { 351 mb(); /* quirk */ 352 clflush((void *)¤t_thread_info()->flags); 353 mb(); /* quirk */ 354 } 355 356 __monitor((void *)¤t_thread_info()->flags, 0, 0); 357 if (!need_resched()) 358 __sti_mwait(0, 0); 359 else 360 local_irq_enable(); 361 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 362 } else { 363 local_irq_enable(); 364 } 365 __current_clr_polling(); 366 } 367 368 void select_idle_routine(const struct cpuinfo_x86 *c) 369 { 370 #ifdef CONFIG_SMP 371 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) 372 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 373 #endif 374 if (x86_idle || boot_option_idle_override == IDLE_POLL) 375 return; 376 377 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) { 378 pr_info("using AMD E400 aware idle routine\n"); 379 x86_idle = amd_e400_idle; 380 } else if (prefer_mwait_c1_over_halt(c)) { 381 pr_info("using mwait in idle threads\n"); 382 x86_idle = mwait_idle; 383 } else 384 x86_idle = default_idle; 385 } 386 387 void amd_e400_c1e_apic_setup(void) 388 { 389 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) { 390 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id()); 391 local_irq_disable(); 392 tick_broadcast_force(); 393 local_irq_enable(); 394 } 395 } 396 397 void __init arch_post_acpi_subsys_init(void) 398 { 399 u32 lo, hi; 400 401 if (!boot_cpu_has_bug(X86_BUG_AMD_E400)) 402 return; 403 404 /* 405 * AMD E400 detection needs to happen after ACPI has been enabled. If 406 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in 407 * MSR_K8_INT_PENDING_MSG. 408 */ 409 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); 410 if (!(lo & K8_INTP_C1E_ACTIVE_MASK)) 411 return; 412 413 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E); 414 415 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 416 mark_tsc_unstable("TSC halt in AMD C1E"); 417 pr_info("System has AMD C1E enabled\n"); 418 } 419 420 static int __init idle_setup(char *str) 421 { 422 if (!str) 423 return -EINVAL; 424 425 if (!strcmp(str, "poll")) { 426 pr_info("using polling idle threads\n"); 427 boot_option_idle_override = IDLE_POLL; 428 cpu_idle_poll_ctrl(true); 429 } else if (!strcmp(str, "halt")) { 430 /* 431 * When the boot option of idle=halt is added, halt is 432 * forced to be used for CPU idle. In such case CPU C2/C3 433 * won't be used again. 434 * To continue to load the CPU idle driver, don't touch 435 * the boot_option_idle_override. 436 */ 437 x86_idle = default_idle; 438 boot_option_idle_override = IDLE_HALT; 439 } else if (!strcmp(str, "nomwait")) { 440 /* 441 * If the boot option of "idle=nomwait" is added, 442 * it means that mwait will be disabled for CPU C2/C3 443 * states. In such case it won't touch the variable 444 * of boot_option_idle_override. 445 */ 446 boot_option_idle_override = IDLE_NOMWAIT; 447 } else 448 return -1; 449 450 return 0; 451 } 452 early_param("idle", idle_setup); 453 454 unsigned long arch_align_stack(unsigned long sp) 455 { 456 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 457 sp -= get_random_int() % 8192; 458 return sp & ~0xf; 459 } 460 461 unsigned long arch_randomize_brk(struct mm_struct *mm) 462 { 463 return randomize_page(mm->brk, 0x02000000); 464 } 465 466 /* 467 * Return saved PC of a blocked thread. 468 * What is this good for? it will be always the scheduler or ret_from_fork. 469 */ 470 unsigned long thread_saved_pc(struct task_struct *tsk) 471 { 472 struct inactive_task_frame *frame = 473 (struct inactive_task_frame *) READ_ONCE(tsk->thread.sp); 474 return READ_ONCE_NOCHECK(frame->ret_addr); 475 } 476 477 /* 478 * Called from fs/proc with a reference on @p to find the function 479 * which called into schedule(). This needs to be done carefully 480 * because the task might wake up and we might look at a stack 481 * changing under us. 482 */ 483 unsigned long get_wchan(struct task_struct *p) 484 { 485 unsigned long start, bottom, top, sp, fp, ip, ret = 0; 486 int count = 0; 487 488 if (!p || p == current || p->state == TASK_RUNNING) 489 return 0; 490 491 if (!try_get_task_stack(p)) 492 return 0; 493 494 start = (unsigned long)task_stack_page(p); 495 if (!start) 496 goto out; 497 498 /* 499 * Layout of the stack page: 500 * 501 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) 502 * PADDING 503 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING 504 * stack 505 * ----------- bottom = start 506 * 507 * The tasks stack pointer points at the location where the 508 * framepointer is stored. The data on the stack is: 509 * ... IP FP ... IP FP 510 * 511 * We need to read FP and IP, so we need to adjust the upper 512 * bound by another unsigned long. 513 */ 514 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; 515 top -= 2 * sizeof(unsigned long); 516 bottom = start; 517 518 sp = READ_ONCE(p->thread.sp); 519 if (sp < bottom || sp > top) 520 goto out; 521 522 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp); 523 do { 524 if (fp < bottom || fp > top) 525 goto out; 526 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long))); 527 if (!in_sched_functions(ip)) { 528 ret = ip; 529 goto out; 530 } 531 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); 532 } while (count++ < 16 && p->state != TASK_RUNNING); 533 534 out: 535 put_task_stack(p); 536 return ret; 537 } 538