1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PROCESSOR_H 3 #define _ASM_X86_PROCESSOR_H 4 5 #include <asm/processor-flags.h> 6 7 /* Forward declaration, a strange C thing */ 8 struct task_struct; 9 struct mm_struct; 10 struct io_bitmap; 11 struct vm86; 12 13 #include <asm/math_emu.h> 14 #include <asm/segment.h> 15 #include <asm/types.h> 16 #include <uapi/asm/sigcontext.h> 17 #include <asm/current.h> 18 #include <asm/cpufeatures.h> 19 #include <asm/cpuid.h> 20 #include <asm/page.h> 21 #include <asm/pgtable_types.h> 22 #include <asm/percpu.h> 23 #include <asm/msr.h> 24 #include <asm/desc_defs.h> 25 #include <asm/nops.h> 26 #include <asm/special_insns.h> 27 #include <asm/fpu/types.h> 28 #include <asm/unwind_hints.h> 29 #include <asm/vmxfeatures.h> 30 #include <asm/vdso/processor.h> 31 #include <asm/shstk.h> 32 33 #include <linux/personality.h> 34 #include <linux/cache.h> 35 #include <linux/threads.h> 36 #include <linux/math64.h> 37 #include <linux/err.h> 38 #include <linux/irqflags.h> 39 #include <linux/mem_encrypt.h> 40 41 /* 42 * We handle most unaligned accesses in hardware. On the other hand 43 * unaligned DMA can be quite expensive on some Nehalem processors. 44 * 45 * Based on this we disable the IP header alignment in network drivers. 46 */ 47 #define NET_IP_ALIGN 0 48 49 #define HBP_NUM 4 50 51 /* 52 * These alignment constraints are for performance in the vSMP case, 53 * but in the task_struct case we must also meet hardware imposed 54 * alignment requirements of the FPU state: 55 */ 56 #ifdef CONFIG_X86_VSMP 57 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 58 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 59 #else 60 # define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state) 61 # define ARCH_MIN_MMSTRUCT_ALIGN 0 62 #endif 63 64 enum tlb_infos { 65 ENTRIES, 66 NR_INFO 67 }; 68 69 extern u16 __read_mostly tlb_lli_4k[NR_INFO]; 70 extern u16 __read_mostly tlb_lli_2m[NR_INFO]; 71 extern u16 __read_mostly tlb_lli_4m[NR_INFO]; 72 extern u16 __read_mostly tlb_lld_4k[NR_INFO]; 73 extern u16 __read_mostly tlb_lld_2m[NR_INFO]; 74 extern u16 __read_mostly tlb_lld_4m[NR_INFO]; 75 extern u16 __read_mostly tlb_lld_1g[NR_INFO]; 76 77 /* 78 * CPU type and hardware bug flags. Kept separately for each CPU. 79 */ 80 81 struct cpuinfo_topology { 82 // Real APIC ID read from the local APIC 83 u32 apicid; 84 // The initial APIC ID provided by CPUID 85 u32 initial_apicid; 86 87 // Physical package ID 88 u32 pkg_id; 89 90 // Physical die ID on AMD, Relative on Intel 91 u32 die_id; 92 93 // Compute unit ID - AMD specific 94 u32 cu_id; 95 96 // Core ID relative to the package 97 u32 core_id; 98 99 // Logical ID mappings 100 u32 logical_pkg_id; 101 u32 logical_die_id; 102 103 // AMD Node ID and Nodes per Package info 104 u32 amd_node_id; 105 106 // Cache level topology IDs 107 u32 llc_id; 108 u32 l2c_id; 109 }; 110 111 struct cpuinfo_x86 { 112 __u8 x86; /* CPU family */ 113 __u8 x86_vendor; /* CPU vendor */ 114 __u8 x86_model; 115 __u8 x86_stepping; 116 #ifdef CONFIG_X86_64 117 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 118 int x86_tlbsize; 119 #endif 120 #ifdef CONFIG_X86_VMX_FEATURE_NAMES 121 __u32 vmx_capability[NVMXINTS]; 122 #endif 123 __u8 x86_virt_bits; 124 __u8 x86_phys_bits; 125 /* Max extended CPUID function supported: */ 126 __u32 extended_cpuid_level; 127 /* Maximum supported CPUID level, -1=no CPUID: */ 128 int cpuid_level; 129 /* 130 * Align to size of unsigned long because the x86_capability array 131 * is passed to bitops which require the alignment. Use unnamed 132 * union to enforce the array is aligned to size of unsigned long. 133 */ 134 union { 135 __u32 x86_capability[NCAPINTS + NBUGINTS]; 136 unsigned long x86_capability_alignment; 137 }; 138 char x86_vendor_id[16]; 139 char x86_model_id[64]; 140 struct cpuinfo_topology topo; 141 /* in KB - valid for CPUS which support this call: */ 142 unsigned int x86_cache_size; 143 int x86_cache_alignment; /* In bytes */ 144 /* Cache QoS architectural values, valid only on the BSP: */ 145 int x86_cache_max_rmid; /* max index */ 146 int x86_cache_occ_scale; /* scale to bytes */ 147 int x86_cache_mbm_width_offset; 148 int x86_power; 149 unsigned long loops_per_jiffy; 150 /* protected processor identification number */ 151 u64 ppin; 152 u16 x86_clflush_size; 153 /* number of cores as seen by the OS: */ 154 u16 booted_cores; 155 /* Index into per_cpu list: */ 156 u16 cpu_index; 157 /* Is SMT active on this core? */ 158 bool smt_active; 159 u32 microcode; 160 /* Address space bits used by the cache internally */ 161 u8 x86_cache_bits; 162 unsigned initialized : 1; 163 } __randomize_layout; 164 165 #define X86_VENDOR_INTEL 0 166 #define X86_VENDOR_CYRIX 1 167 #define X86_VENDOR_AMD 2 168 #define X86_VENDOR_UMC 3 169 #define X86_VENDOR_CENTAUR 5 170 #define X86_VENDOR_TRANSMETA 7 171 #define X86_VENDOR_NSC 8 172 #define X86_VENDOR_HYGON 9 173 #define X86_VENDOR_ZHAOXIN 10 174 #define X86_VENDOR_VORTEX 11 175 #define X86_VENDOR_NUM 12 176 177 #define X86_VENDOR_UNKNOWN 0xff 178 179 /* 180 * capabilities of CPUs 181 */ 182 extern struct cpuinfo_x86 boot_cpu_data; 183 extern struct cpuinfo_x86 new_cpu_data; 184 185 extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; 186 extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; 187 188 #ifdef CONFIG_SMP 189 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); 190 #define cpu_data(cpu) per_cpu(cpu_info, cpu) 191 #else 192 #define cpu_info boot_cpu_data 193 #define cpu_data(cpu) boot_cpu_data 194 #endif 195 196 extern const struct seq_operations cpuinfo_op; 197 198 #define cache_line_size() (boot_cpu_data.x86_cache_alignment) 199 200 extern void cpu_detect(struct cpuinfo_x86 *c); 201 202 static inline unsigned long long l1tf_pfn_limit(void) 203 { 204 return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); 205 } 206 207 extern void early_cpu_init(void); 208 extern void identify_secondary_cpu(struct cpuinfo_x86 *); 209 extern void print_cpu_info(struct cpuinfo_x86 *); 210 void print_cpu_msr(struct cpuinfo_x86 *); 211 212 /* 213 * Friendlier CR3 helpers. 214 */ 215 static inline unsigned long read_cr3_pa(void) 216 { 217 return __read_cr3() & CR3_ADDR_MASK; 218 } 219 220 static inline unsigned long native_read_cr3_pa(void) 221 { 222 return __native_read_cr3() & CR3_ADDR_MASK; 223 } 224 225 static inline void load_cr3(pgd_t *pgdir) 226 { 227 write_cr3(__sme_pa(pgdir)); 228 } 229 230 /* 231 * Note that while the legacy 'TSS' name comes from 'Task State Segment', 232 * on modern x86 CPUs the TSS also holds information important to 64-bit mode, 233 * unrelated to the task-switch mechanism: 234 */ 235 #ifdef CONFIG_X86_32 236 /* This is the TSS defined by the hardware. */ 237 struct x86_hw_tss { 238 unsigned short back_link, __blh; 239 unsigned long sp0; 240 unsigned short ss0, __ss0h; 241 unsigned long sp1; 242 243 /* 244 * We don't use ring 1, so ss1 is a convenient scratch space in 245 * the same cacheline as sp0. We use ss1 to cache the value in 246 * MSR_IA32_SYSENTER_CS. When we context switch 247 * MSR_IA32_SYSENTER_CS, we first check if the new value being 248 * written matches ss1, and, if it's not, then we wrmsr the new 249 * value and update ss1. 250 * 251 * The only reason we context switch MSR_IA32_SYSENTER_CS is 252 * that we set it to zero in vm86 tasks to avoid corrupting the 253 * stack if we were to go through the sysenter path from vm86 254 * mode. 255 */ 256 unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ 257 258 unsigned short __ss1h; 259 unsigned long sp2; 260 unsigned short ss2, __ss2h; 261 unsigned long __cr3; 262 unsigned long ip; 263 unsigned long flags; 264 unsigned long ax; 265 unsigned long cx; 266 unsigned long dx; 267 unsigned long bx; 268 unsigned long sp; 269 unsigned long bp; 270 unsigned long si; 271 unsigned long di; 272 unsigned short es, __esh; 273 unsigned short cs, __csh; 274 unsigned short ss, __ssh; 275 unsigned short ds, __dsh; 276 unsigned short fs, __fsh; 277 unsigned short gs, __gsh; 278 unsigned short ldt, __ldth; 279 unsigned short trace; 280 unsigned short io_bitmap_base; 281 282 } __attribute__((packed)); 283 #else 284 struct x86_hw_tss { 285 u32 reserved1; 286 u64 sp0; 287 u64 sp1; 288 289 /* 290 * Since Linux does not use ring 2, the 'sp2' slot is unused by 291 * hardware. entry_SYSCALL_64 uses it as scratch space to stash 292 * the user RSP value. 293 */ 294 u64 sp2; 295 296 u64 reserved2; 297 u64 ist[7]; 298 u32 reserved3; 299 u32 reserved4; 300 u16 reserved5; 301 u16 io_bitmap_base; 302 303 } __attribute__((packed)); 304 #endif 305 306 /* 307 * IO-bitmap sizes: 308 */ 309 #define IO_BITMAP_BITS 65536 310 #define IO_BITMAP_BYTES (IO_BITMAP_BITS / BITS_PER_BYTE) 311 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long)) 312 313 #define IO_BITMAP_OFFSET_VALID_MAP \ 314 (offsetof(struct tss_struct, io_bitmap.bitmap) - \ 315 offsetof(struct tss_struct, x86_tss)) 316 317 #define IO_BITMAP_OFFSET_VALID_ALL \ 318 (offsetof(struct tss_struct, io_bitmap.mapall) - \ 319 offsetof(struct tss_struct, x86_tss)) 320 321 #ifdef CONFIG_X86_IOPL_IOPERM 322 /* 323 * sizeof(unsigned long) coming from an extra "long" at the end of the 324 * iobitmap. The limit is inclusive, i.e. the last valid byte. 325 */ 326 # define __KERNEL_TSS_LIMIT \ 327 (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \ 328 sizeof(unsigned long) - 1) 329 #else 330 # define __KERNEL_TSS_LIMIT \ 331 (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1) 332 #endif 333 334 /* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */ 335 #define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1) 336 337 struct entry_stack { 338 char stack[PAGE_SIZE]; 339 }; 340 341 struct entry_stack_page { 342 struct entry_stack stack; 343 } __aligned(PAGE_SIZE); 344 345 /* 346 * All IO bitmap related data stored in the TSS: 347 */ 348 struct x86_io_bitmap { 349 /* The sequence number of the last active bitmap. */ 350 u64 prev_sequence; 351 352 /* 353 * Store the dirty size of the last io bitmap offender. The next 354 * one will have to do the cleanup as the switch out to a non io 355 * bitmap user will just set x86_tss.io_bitmap_base to a value 356 * outside of the TSS limit. So for sane tasks there is no need to 357 * actually touch the io_bitmap at all. 358 */ 359 unsigned int prev_max; 360 361 /* 362 * The extra 1 is there because the CPU will access an 363 * additional byte beyond the end of the IO permission 364 * bitmap. The extra byte must be all 1 bits, and must 365 * be within the limit. 366 */ 367 unsigned long bitmap[IO_BITMAP_LONGS + 1]; 368 369 /* 370 * Special I/O bitmap to emulate IOPL(3). All bytes zero, 371 * except the additional byte at the end. 372 */ 373 unsigned long mapall[IO_BITMAP_LONGS + 1]; 374 }; 375 376 struct tss_struct { 377 /* 378 * The fixed hardware portion. This must not cross a page boundary 379 * at risk of violating the SDM's advice and potentially triggering 380 * errata. 381 */ 382 struct x86_hw_tss x86_tss; 383 384 struct x86_io_bitmap io_bitmap; 385 } __aligned(PAGE_SIZE); 386 387 DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw); 388 389 /* Per CPU interrupt stacks */ 390 struct irq_stack { 391 char stack[IRQ_STACK_SIZE]; 392 } __aligned(IRQ_STACK_SIZE); 393 394 #ifdef CONFIG_X86_64 395 struct fixed_percpu_data { 396 /* 397 * GCC hardcodes the stack canary as %gs:40. Since the 398 * irq_stack is the object at %gs:0, we reserve the bottom 399 * 48 bytes of the irq stack for the canary. 400 * 401 * Once we are willing to require -mstack-protector-guard-symbol= 402 * support for x86_64 stackprotector, we can get rid of this. 403 */ 404 char gs_base[40]; 405 unsigned long stack_canary; 406 }; 407 408 DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible; 409 DECLARE_INIT_PER_CPU(fixed_percpu_data); 410 411 static inline unsigned long cpu_kernelmode_gs_base(int cpu) 412 { 413 return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu); 414 } 415 416 extern asmlinkage void entry_SYSCALL32_ignore(void); 417 418 /* Save actual FS/GS selectors and bases to current->thread */ 419 void current_save_fsgs(void); 420 #else /* X86_64 */ 421 #ifdef CONFIG_STACKPROTECTOR 422 DECLARE_PER_CPU(unsigned long, __stack_chk_guard); 423 #endif 424 #endif /* !X86_64 */ 425 426 struct perf_event; 427 428 struct thread_struct { 429 /* Cached TLS descriptors: */ 430 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 431 #ifdef CONFIG_X86_32 432 unsigned long sp0; 433 #endif 434 unsigned long sp; 435 #ifdef CONFIG_X86_32 436 unsigned long sysenter_cs; 437 #else 438 unsigned short es; 439 unsigned short ds; 440 unsigned short fsindex; 441 unsigned short gsindex; 442 #endif 443 444 #ifdef CONFIG_X86_64 445 unsigned long fsbase; 446 unsigned long gsbase; 447 #else 448 /* 449 * XXX: this could presumably be unsigned short. Alternatively, 450 * 32-bit kernels could be taught to use fsindex instead. 451 */ 452 unsigned long fs; 453 unsigned long gs; 454 #endif 455 456 /* Save middle states of ptrace breakpoints */ 457 struct perf_event *ptrace_bps[HBP_NUM]; 458 /* Debug status used for traps, single steps, etc... */ 459 unsigned long virtual_dr6; 460 /* Keep track of the exact dr7 value set by the user */ 461 unsigned long ptrace_dr7; 462 /* Fault info: */ 463 unsigned long cr2; 464 unsigned long trap_nr; 465 unsigned long error_code; 466 #ifdef CONFIG_VM86 467 /* Virtual 86 mode info */ 468 struct vm86 *vm86; 469 #endif 470 /* IO permissions: */ 471 struct io_bitmap *io_bitmap; 472 473 /* 474 * IOPL. Privilege level dependent I/O permission which is 475 * emulated via the I/O bitmap to prevent user space from disabling 476 * interrupts. 477 */ 478 unsigned long iopl_emul; 479 480 unsigned int iopl_warn:1; 481 unsigned int sig_on_uaccess_err:1; 482 483 /* 484 * Protection Keys Register for Userspace. Loaded immediately on 485 * context switch. Store it in thread_struct to avoid a lookup in 486 * the tasks's FPU xstate buffer. This value is only valid when a 487 * task is scheduled out. For 'current' the authoritative source of 488 * PKRU is the hardware itself. 489 */ 490 u32 pkru; 491 492 #ifdef CONFIG_X86_USER_SHADOW_STACK 493 unsigned long features; 494 unsigned long features_locked; 495 496 struct thread_shstk shstk; 497 #endif 498 499 /* Floating point and extended processor state */ 500 struct fpu fpu; 501 /* 502 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at 503 * the end. 504 */ 505 }; 506 507 extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size); 508 509 static inline void arch_thread_struct_whitelist(unsigned long *offset, 510 unsigned long *size) 511 { 512 fpu_thread_struct_whitelist(offset, size); 513 } 514 515 static inline void 516 native_load_sp0(unsigned long sp0) 517 { 518 this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); 519 } 520 521 static __always_inline void native_swapgs(void) 522 { 523 #ifdef CONFIG_X86_64 524 asm volatile("swapgs" ::: "memory"); 525 #endif 526 } 527 528 static __always_inline unsigned long current_top_of_stack(void) 529 { 530 /* 531 * We can't read directly from tss.sp0: sp0 on x86_32 is special in 532 * and around vm86 mode and sp0 on x86_64 is special because of the 533 * entry trampoline. 534 */ 535 return this_cpu_read_stable(pcpu_hot.top_of_stack); 536 } 537 538 static __always_inline bool on_thread_stack(void) 539 { 540 return (unsigned long)(current_top_of_stack() - 541 current_stack_pointer) < THREAD_SIZE; 542 } 543 544 #ifdef CONFIG_PARAVIRT_XXL 545 #include <asm/paravirt.h> 546 #else 547 548 static inline void load_sp0(unsigned long sp0) 549 { 550 native_load_sp0(sp0); 551 } 552 553 #endif /* CONFIG_PARAVIRT_XXL */ 554 555 unsigned long __get_wchan(struct task_struct *p); 556 557 extern void select_idle_routine(const struct cpuinfo_x86 *c); 558 extern void amd_e400_c1e_apic_setup(void); 559 560 extern unsigned long boot_option_idle_override; 561 562 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 563 IDLE_POLL}; 564 565 extern void enable_sep_cpu(void); 566 567 568 /* Defined in head.S */ 569 extern struct desc_ptr early_gdt_descr; 570 571 extern void switch_gdt_and_percpu_base(int); 572 extern void load_direct_gdt(int); 573 extern void load_fixmap_gdt(int); 574 extern void cpu_init(void); 575 extern void cpu_init_exception_handling(void); 576 extern void cr4_init(void); 577 578 static inline unsigned long get_debugctlmsr(void) 579 { 580 unsigned long debugctlmsr = 0; 581 582 #ifndef CONFIG_X86_DEBUGCTLMSR 583 if (boot_cpu_data.x86 < 6) 584 return 0; 585 #endif 586 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 587 588 return debugctlmsr; 589 } 590 591 static inline void update_debugctlmsr(unsigned long debugctlmsr) 592 { 593 #ifndef CONFIG_X86_DEBUGCTLMSR 594 if (boot_cpu_data.x86 < 6) 595 return; 596 #endif 597 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 598 } 599 600 extern void set_task_blockstep(struct task_struct *task, bool on); 601 602 /* Boot loader type from the setup header: */ 603 extern int bootloader_type; 604 extern int bootloader_version; 605 606 extern char ignore_fpu_irq; 607 608 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 609 #define ARCH_HAS_PREFETCHW 610 611 #ifdef CONFIG_X86_32 612 # define BASE_PREFETCH "" 613 # define ARCH_HAS_PREFETCH 614 #else 615 # define BASE_PREFETCH "prefetcht0 %P1" 616 #endif 617 618 /* 619 * Prefetch instructions for Pentium III (+) and AMD Athlon (+) 620 * 621 * It's not worth to care about 3dnow prefetches for the K6 622 * because they are microcoded there and very slow. 623 */ 624 static inline void prefetch(const void *x) 625 { 626 alternative_input(BASE_PREFETCH, "prefetchnta %P1", 627 X86_FEATURE_XMM, 628 "m" (*(const char *)x)); 629 } 630 631 /* 632 * 3dnow prefetch to get an exclusive cache line. 633 * Useful for spinlocks to avoid one state transition in the 634 * cache coherency protocol: 635 */ 636 static __always_inline void prefetchw(const void *x) 637 { 638 alternative_input(BASE_PREFETCH, "prefetchw %P1", 639 X86_FEATURE_3DNOWPREFETCH, 640 "m" (*(const char *)x)); 641 } 642 643 #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ 644 TOP_OF_KERNEL_STACK_PADDING) 645 646 #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) 647 648 #define task_pt_regs(task) \ 649 ({ \ 650 unsigned long __ptr = (unsigned long)task_stack_page(task); \ 651 __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ 652 ((struct pt_regs *)__ptr) - 1; \ 653 }) 654 655 #ifdef CONFIG_X86_32 656 #define INIT_THREAD { \ 657 .sp0 = TOP_OF_INIT_STACK, \ 658 .sysenter_cs = __KERNEL_CS, \ 659 } 660 661 #define KSTK_ESP(task) (task_pt_regs(task)->sp) 662 663 #else 664 extern unsigned long __end_init_task[]; 665 666 #define INIT_THREAD { \ 667 .sp = (unsigned long)&__end_init_task - \ 668 TOP_OF_KERNEL_STACK_PADDING - \ 669 sizeof(struct pt_regs), \ 670 } 671 672 extern unsigned long KSTK_ESP(struct task_struct *task); 673 674 #endif /* CONFIG_X86_64 */ 675 676 extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 677 unsigned long new_sp); 678 679 /* 680 * This decides where the kernel will search for a free chunk of vm 681 * space during mmap's. 682 */ 683 #define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3)) 684 #define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW) 685 686 #define KSTK_EIP(task) (task_pt_regs(task)->ip) 687 688 /* Get/set a process' ability to use the timestamp counter instruction */ 689 #define GET_TSC_CTL(adr) get_tsc_mode((adr)) 690 #define SET_TSC_CTL(val) set_tsc_mode((val)) 691 692 extern int get_tsc_mode(unsigned long adr); 693 extern int set_tsc_mode(unsigned int val); 694 695 DECLARE_PER_CPU(u64, msr_misc_features_shadow); 696 697 static inline u32 per_cpu_llc_id(unsigned int cpu) 698 { 699 return per_cpu(cpu_info.topo.llc_id, cpu); 700 } 701 702 static inline u32 per_cpu_l2c_id(unsigned int cpu) 703 { 704 return per_cpu(cpu_info.topo.l2c_id, cpu); 705 } 706 707 #ifdef CONFIG_CPU_SUP_AMD 708 extern u32 amd_get_highest_perf(void); 709 extern void amd_clear_divider(void); 710 extern void amd_check_microcode(void); 711 #else 712 static inline u32 amd_get_highest_perf(void) { return 0; } 713 static inline void amd_clear_divider(void) { } 714 static inline void amd_check_microcode(void) { } 715 #endif 716 717 extern unsigned long arch_align_stack(unsigned long sp); 718 void free_init_pages(const char *what, unsigned long begin, unsigned long end); 719 extern void free_kernel_image_pages(const char *what, void *begin, void *end); 720 721 void default_idle(void); 722 #ifdef CONFIG_XEN 723 bool xen_set_default_idle(void); 724 #else 725 #define xen_set_default_idle 0 726 #endif 727 728 void __noreturn stop_this_cpu(void *dummy); 729 void microcode_check(struct cpuinfo_x86 *prev_info); 730 void store_cpu_caps(struct cpuinfo_x86 *info); 731 732 enum l1tf_mitigations { 733 L1TF_MITIGATION_OFF, 734 L1TF_MITIGATION_FLUSH_NOWARN, 735 L1TF_MITIGATION_FLUSH, 736 L1TF_MITIGATION_FLUSH_NOSMT, 737 L1TF_MITIGATION_FULL, 738 L1TF_MITIGATION_FULL_FORCE 739 }; 740 741 extern enum l1tf_mitigations l1tf_mitigation; 742 743 enum mds_mitigations { 744 MDS_MITIGATION_OFF, 745 MDS_MITIGATION_FULL, 746 MDS_MITIGATION_VMWERV, 747 }; 748 749 extern bool gds_ucode_mitigated(void); 750 751 /* 752 * Make previous memory operations globally visible before 753 * a WRMSR. 754 * 755 * MFENCE makes writes visible, but only affects load/store 756 * instructions. WRMSR is unfortunately not a load/store 757 * instruction and is unaffected by MFENCE. The LFENCE ensures 758 * that the WRMSR is not reordered. 759 * 760 * Most WRMSRs are full serializing instructions themselves and 761 * do not require this barrier. This is only required for the 762 * IA32_TSC_DEADLINE and X2APIC MSRs. 763 */ 764 static inline void weak_wrmsr_fence(void) 765 { 766 alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE)); 767 } 768 769 #endif /* _ASM_X86_PROCESSOR_H */ 770