generic.c (45fa71f19a2d73f157d6892a8d677a738a0414fd) | generic.c (d5f66d5d10611978c3a93cc94a811d74e0cf6cbc) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 4 * because MTRRs can span up to 40 bits (36bits on most modern x86) 5 */ 6 7#include <linux/export.h> 8#include <linux/init.h> 9#include <linux/io.h> 10#include <linux/mm.h> 11 12#include <asm/processor-flags.h> | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 4 * because MTRRs can span up to 40 bits (36bits on most modern x86) 5 */ 6 7#include <linux/export.h> 8#include <linux/init.h> 9#include <linux/io.h> 10#include <linux/mm.h> 11 12#include <asm/processor-flags.h> |
13#include <asm/cacheinfo.h> |
|
13#include <asm/cpufeature.h> 14#include <asm/tlbflush.h> 15#include <asm/mtrr.h> 16#include <asm/msr.h> 17#include <asm/memtype.h> 18 19#include "mtrr.h" 20 --- 370 unchanged lines hidden (view full) --- 391 continue; 392 } 393 /* new segments: gap or different type */ 394 print_fixed_last(); 395 update_fixed_last(base, base + step, *types); 396 } 397} 398 | 14#include <asm/cpufeature.h> 15#include <asm/tlbflush.h> 16#include <asm/mtrr.h> 17#include <asm/msr.h> 18#include <asm/memtype.h> 19 20#include "mtrr.h" 21 --- 370 unchanged lines hidden (view full) --- 392 continue; 393 } 394 /* new segments: gap or different type */ 395 print_fixed_last(); 396 update_fixed_last(base, base + step, *types); 397 } 398} 399 |
399static void prepare_set(void); 400static void post_set(void); 401 | |
402static void __init print_mtrr_state(void) 403{ 404 unsigned int i; 405 int high_width; 406 407 pr_debug("MTRR default type: %s\n", 408 mtrr_attrib_to_str(mtrr_state.def_type)); 409 if (mtrr_state.have_fixed) { --- 35 unchanged lines hidden (view full) --- 445} 446 447/* PAT setup for BP. We need to go through sync steps here */ 448void __init mtrr_bp_pat_init(void) 449{ 450 unsigned long flags; 451 452 local_irq_save(flags); | 400static void __init print_mtrr_state(void) 401{ 402 unsigned int i; 403 int high_width; 404 405 pr_debug("MTRR default type: %s\n", 406 mtrr_attrib_to_str(mtrr_state.def_type)); 407 if (mtrr_state.have_fixed) { --- 35 unchanged lines hidden (view full) --- 443} 444 445/* PAT setup for BP. We need to go through sync steps here */ 446void __init mtrr_bp_pat_init(void) 447{ 448 unsigned long flags; 449 450 local_irq_save(flags); |
453 prepare_set(); | 451 cache_disable(); |
454 455 pat_init(); 456 | 452 453 pat_init(); 454 |
457 post_set(); | 455 cache_enable(); |
458 local_irq_restore(flags); 459} 460 461/* Grab all of the MTRR state for this CPU into *state */ 462bool __init get_mtrr_state(void) 463{ 464 struct mtrr_var_range *vrs; 465 unsigned lo, dummy; --- 216 unchanged lines hidden (view full) --- 682static u32 deftype_lo, deftype_hi; 683 684/** 685 * set_mtrr_state - Set the MTRR state for this CPU. 686 * 687 * NOTE: The CPU must already be in a safe state for MTRR changes, including 688 * measures that only a single CPU can be active in set_mtrr_state() in 689 * order to not be subject to races for usage of deftype_lo. This is | 456 local_irq_restore(flags); 457} 458 459/* Grab all of the MTRR state for this CPU into *state */ 460bool __init get_mtrr_state(void) 461{ 462 struct mtrr_var_range *vrs; 463 unsigned lo, dummy; --- 216 unchanged lines hidden (view full) --- 680static u32 deftype_lo, deftype_hi; 681 682/** 683 * set_mtrr_state - Set the MTRR state for this CPU. 684 * 685 * NOTE: The CPU must already be in a safe state for MTRR changes, including 686 * measures that only a single CPU can be active in set_mtrr_state() in 687 * order to not be subject to races for usage of deftype_lo. This is |
690 * accomplished by taking set_atomicity_lock. | 688 * accomplished by taking cache_disable_lock. |
691 * RETURNS: 0 if no changes made, else a mask indicating what was changed. 692 */ 693static unsigned long set_mtrr_state(void) 694{ 695 unsigned long change_mask = 0; 696 unsigned int i; 697 698 for (i = 0; i < num_var_ranges; i++) { --- 14 unchanged lines hidden (view full) --- 713 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | 714 (mtrr_state.enabled << 10); 715 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 716 } 717 718 return change_mask; 719} 720 | 689 * RETURNS: 0 if no changes made, else a mask indicating what was changed. 690 */ 691static unsigned long set_mtrr_state(void) 692{ 693 unsigned long change_mask = 0; 694 unsigned int i; 695 696 for (i = 0; i < num_var_ranges; i++) { --- 14 unchanged lines hidden (view full) --- 711 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | 712 (mtrr_state.enabled << 10); 713 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 714 } 715 716 return change_mask; 717} 718 |
721 722static unsigned long cr4; 723static DEFINE_RAW_SPINLOCK(set_atomicity_lock); 724 | |
725/* | 719/* |
720 * Disable and enable caches. Needed for changing MTRRs and the PAT MSR. 721 * |
|
726 * Since we are disabling the cache don't allow any interrupts, 727 * they would run extremely slow and would only increase the pain. 728 * 729 * The caller must ensure that local interrupts are disabled and | 722 * Since we are disabling the cache don't allow any interrupts, 723 * they would run extremely slow and would only increase the pain. 724 * 725 * The caller must ensure that local interrupts are disabled and |
730 * are reenabled after post_set() has been called. | 726 * are reenabled after cache_enable() has been called. |
731 */ | 727 */ |
732static void prepare_set(void) __acquires(set_atomicity_lock) | 728static unsigned long saved_cr4; 729static DEFINE_RAW_SPINLOCK(cache_disable_lock); 730 731void cache_disable(void) __acquires(cache_disable_lock) |
733{ 734 unsigned long cr0; 735 736 /* 737 * Note that this is not ideal 738 * since the cache is only flushed/disabled for this CPU while the 739 * MTRRs are changed, but changing this requires more invasive 740 * changes to the way the kernel boots 741 */ 742 | 732{ 733 unsigned long cr0; 734 735 /* 736 * Note that this is not ideal 737 * since the cache is only flushed/disabled for this CPU while the 738 * MTRRs are changed, but changing this requires more invasive 739 * changes to the way the kernel boots 740 */ 741 |
743 raw_spin_lock(&set_atomicity_lock); | 742 raw_spin_lock(&cache_disable_lock); |
744 745 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ 746 cr0 = read_cr0() | X86_CR0_CD; 747 write_cr0(cr0); 748 749 /* 750 * Cache flushing is the most time-consuming step when programming 751 * the MTRRs. Fortunately, as per the Intel Software Development 752 * Manual, we can skip it if the processor supports cache self- 753 * snooping. 754 */ 755 if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) 756 wbinvd(); 757 758 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 759 if (boot_cpu_has(X86_FEATURE_PGE)) { | 743 744 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ 745 cr0 = read_cr0() | X86_CR0_CD; 746 write_cr0(cr0); 747 748 /* 749 * Cache flushing is the most time-consuming step when programming 750 * the MTRRs. Fortunately, as per the Intel Software Development 751 * Manual, we can skip it if the processor supports cache self- 752 * snooping. 753 */ 754 if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) 755 wbinvd(); 756 757 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 758 if (boot_cpu_has(X86_FEATURE_PGE)) { |
760 cr4 = __read_cr4(); 761 __write_cr4(cr4 & ~X86_CR4_PGE); | 759 saved_cr4 = __read_cr4(); 760 __write_cr4(saved_cr4 & ~X86_CR4_PGE); |
762 } 763 764 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 765 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 766 flush_tlb_local(); 767 768 /* Save MTRR state */ 769 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 770 771 /* Disable MTRRs, and set the default type to uncached */ 772 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); 773 774 /* Again, only flush caches if we have to. */ 775 if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) 776 wbinvd(); 777} 778 | 761 } 762 763 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 764 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 765 flush_tlb_local(); 766 767 /* Save MTRR state */ 768 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 769 770 /* Disable MTRRs, and set the default type to uncached */ 771 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); 772 773 /* Again, only flush caches if we have to. */ 774 if (!static_cpu_has(X86_FEATURE_SELFSNOOP)) 775 wbinvd(); 776} 777 |
779static void post_set(void) __releases(set_atomicity_lock) | 778void cache_enable(void) __releases(cache_disable_lock) |
780{ 781 /* Flush TLBs (no need to flush caches - they are disabled) */ 782 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 783 flush_tlb_local(); 784 785 /* Intel (P6) standard MTRRs */ 786 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 787 788 /* Enable caches */ 789 write_cr0(read_cr0() & ~X86_CR0_CD); 790 791 /* Restore value of CR4 */ 792 if (boot_cpu_has(X86_FEATURE_PGE)) | 779{ 780 /* Flush TLBs (no need to flush caches - they are disabled) */ 781 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 782 flush_tlb_local(); 783 784 /* Intel (P6) standard MTRRs */ 785 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 786 787 /* Enable caches */ 788 write_cr0(read_cr0() & ~X86_CR0_CD); 789 790 /* Restore value of CR4 */ 791 if (boot_cpu_has(X86_FEATURE_PGE)) |
793 __write_cr4(cr4); 794 raw_spin_unlock(&set_atomicity_lock); | 792 __write_cr4(saved_cr4); 793 raw_spin_unlock(&cache_disable_lock); |
795} 796 797static void generic_set_all(void) 798{ 799 unsigned long mask, count; 800 unsigned long flags; 801 802 local_irq_save(flags); | 794} 795 796static void generic_set_all(void) 797{ 798 unsigned long mask, count; 799 unsigned long flags; 800 801 local_irq_save(flags); |
803 prepare_set(); | 802 cache_disable(); |
804 805 /* Actually set the state */ 806 mask = set_mtrr_state(); 807 808 /* also set PAT */ 809 pat_init(); 810 | 803 804 /* Actually set the state */ 805 mask = set_mtrr_state(); 806 807 /* also set PAT */ 808 pat_init(); 809 |
811 post_set(); | 810 cache_enable(); |
812 local_irq_restore(flags); 813 814 /* Use the atomic bitops to update the global mask */ 815 for (count = 0; count < sizeof(mask) * 8; ++count) { 816 if (mask & 0x01) 817 set_bit(count, &smp_changes_mask); 818 mask >>= 1; 819 } --- 14 unchanged lines hidden (view full) --- 834 unsigned long size, mtrr_type type) 835{ 836 unsigned long flags; 837 struct mtrr_var_range *vr; 838 839 vr = &mtrr_state.var_ranges[reg]; 840 841 local_irq_save(flags); | 811 local_irq_restore(flags); 812 813 /* Use the atomic bitops to update the global mask */ 814 for (count = 0; count < sizeof(mask) * 8; ++count) { 815 if (mask & 0x01) 816 set_bit(count, &smp_changes_mask); 817 mask >>= 1; 818 } --- 14 unchanged lines hidden (view full) --- 833 unsigned long size, mtrr_type type) 834{ 835 unsigned long flags; 836 struct mtrr_var_range *vr; 837 838 vr = &mtrr_state.var_ranges[reg]; 839 840 local_irq_save(flags); |
842 prepare_set(); | 841 cache_disable(); |
843 844 if (size == 0) { 845 /* 846 * The invalid bit is kept in the mask, so we simply 847 * clear the relevant mask register to disable a range. 848 */ 849 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 850 memset(vr, 0, sizeof(struct mtrr_var_range)); 851 } else { 852 vr->base_lo = base << PAGE_SHIFT | type; 853 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); 854 vr->mask_lo = -size << PAGE_SHIFT | 0x800; 855 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); 856 857 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); 858 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); 859 } 860 | 842 843 if (size == 0) { 844 /* 845 * The invalid bit is kept in the mask, so we simply 846 * clear the relevant mask register to disable a range. 847 */ 848 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 849 memset(vr, 0, sizeof(struct mtrr_var_range)); 850 } else { 851 vr->base_lo = base << PAGE_SHIFT | type; 852 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); 853 vr->mask_lo = -size << PAGE_SHIFT | 0x800; 854 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); 855 856 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); 857 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); 858 } 859 |
861 post_set(); | 860 cache_enable(); |
862 local_irq_restore(flags); 863} 864 865int generic_validate_add_page(unsigned long base, unsigned long size, 866 unsigned int type) 867{ 868 unsigned long lbase, last; 869 --- 57 unchanged lines hidden --- | 861 local_irq_restore(flags); 862} 863 864int generic_validate_add_page(unsigned long base, unsigned long size, 865 unsigned int type) 866{ 867 unsigned long lbase, last; 868 --- 57 unchanged lines hidden --- |