acpi_cpu.c (51dd214c84efceda87c2ac10d34b7e3ee5b6c28f) | acpi_cpu.c (b57a73f8e758fddf66180287569506e099f55607) |
---|---|
1/*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 33 unchanged lines hidden (view full) --- 42#include <sys/sbuf.h> 43#include <sys/smp.h> 44 45#include <dev/pci/pcivar.h> 46#include <machine/atomic.h> 47#include <machine/bus.h> 48#if defined(__amd64__) || defined(__i386__) 49#include <machine/clock.h> | 1/*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 33 unchanged lines hidden (view full) --- 42#include <sys/sbuf.h> 43#include <sys/smp.h> 44 45#include <dev/pci/pcivar.h> 46#include <machine/atomic.h> 47#include <machine/bus.h> 48#if defined(__amd64__) || defined(__i386__) 49#include <machine/clock.h> |
50#include <machine/specialreg.h> 51#include <machine/md_var.h> |
|
50#endif 51#include <sys/rman.h> 52 53#include <contrib/dev/acpica/include/acpi.h> 54#include <contrib/dev/acpica/include/accommon.h> 55 56#include <dev/acpica/acpivar.h> 57 --- 7 unchanged lines hidden (view full) --- 65 66struct acpi_cx { 67 struct resource *p_lvlx; /* Register to read to enter state. */ 68 uint32_t type; /* C1-3 (C4 and up treated as C3). */ 69 uint32_t trans_lat; /* Transition latency (usec). */ 70 uint32_t power; /* Power consumed (mW). */ 71 int res_type; /* Resource type for p_lvlx. */ 72 int res_rid; /* Resource ID for p_lvlx. */ | 52#endif 53#include <sys/rman.h> 54 55#include <contrib/dev/acpica/include/acpi.h> 56#include <contrib/dev/acpica/include/accommon.h> 57 58#include <dev/acpica/acpivar.h> 59 --- 7 unchanged lines hidden (view full) --- 67 68struct acpi_cx { 69 struct resource *p_lvlx; /* Register to read to enter state. */ 70 uint32_t type; /* C1-3 (C4 and up treated as C3). */ 71 uint32_t trans_lat; /* Transition latency (usec). */ 72 uint32_t power; /* Power consumed (mW). */ 73 int res_type; /* Resource type for p_lvlx. */ 74 int res_rid; /* Resource ID for p_lvlx. */ |
75 bool do_mwait; 76 uint32_t mwait_hint; 77 bool mwait_hw_coord; 78 bool mwait_bm_avoidance; |
|
73}; 74#define MAX_CX_STATES 8 75 76struct acpi_cpu_softc { 77 device_t cpu_dev; 78 ACPI_HANDLE cpu_handle; 79 struct pcpu *cpu_pcpu; 80 uint32_t cpu_acpi_id; /* ACPI processor id */ --- 42 unchanged lines hidden (view full) --- 123#define PCI_REVISION_4M 3 124#define PIIX4_DEVACTB_REG 0x58 125#define PIIX4_BRLD_EN_IRQ0 (1<<0) 126#define PIIX4_BRLD_EN_IRQ (1<<1) 127#define PIIX4_BRLD_EN_IRQ8 (1<<5) 128#define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8) 129#define PIIX4_PCNTRL_BST_EN (1<<10) 130 | 79}; 80#define MAX_CX_STATES 8 81 82struct acpi_cpu_softc { 83 device_t cpu_dev; 84 ACPI_HANDLE cpu_handle; 85 struct pcpu *cpu_pcpu; 86 uint32_t cpu_acpi_id; /* ACPI processor id */ --- 42 unchanged lines hidden (view full) --- 129#define PCI_REVISION_4M 3 130#define PIIX4_DEVACTB_REG 0x58 131#define PIIX4_BRLD_EN_IRQ0 (1<<0) 132#define PIIX4_BRLD_EN_IRQ (1<<1) 133#define PIIX4_BRLD_EN_IRQ8 (1<<5) 134#define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8) 135#define PIIX4_PCNTRL_BST_EN (1<<10) 136 |
137#define CST_FFH_VENDOR_INTEL 1 138#define CST_FFH_INTEL_CL_C1IO 1 139#define CST_FFH_INTEL_CL_MWAIT 2 140#define CST_FFH_MWAIT_HW_COORD 0x0001 141#define CST_FFH_MWAIT_BM_AVOID 0x0002 142 |
|
131/* Allow users to ignore processor orders in MADT. */ 132static int cpu_unordered; 133SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN, 134 &cpu_unordered, 0, 135 "Do not use the MADT to match ACPI Processor objects to CPUs."); 136 137/* Knob to disable acpi_cpu devices */ 138bool acpi_cpu_disabled = false; --- 35 unchanged lines hidden (view full) --- 174static void acpi_cpu_idle(sbintime_t sbt); 175static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context); 176static int acpi_cpu_quirks(void); 177static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); 178static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS); 179static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc); 180static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 181static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); | 143/* Allow users to ignore processor orders in MADT. */ 144static int cpu_unordered; 145SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN, 146 &cpu_unordered, 0, 147 "Do not use the MADT to match ACPI Processor objects to CPUs."); 148 149/* Knob to disable acpi_cpu devices */ 150bool acpi_cpu_disabled = false; --- 35 unchanged lines hidden (view full) --- 186static void acpi_cpu_idle(sbintime_t sbt); 187static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context); 188static int acpi_cpu_quirks(void); 189static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); 190static int acpi_cpu_usage_counters_sysctl(SYSCTL_HANDLER_ARGS); 191static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc); 192static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 193static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); |
194#if defined(__i386__) || defined(__amd64__) 195static int acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS); 196#endif |
|
182 183static device_method_t acpi_cpu_methods[] = { 184 /* Device interface */ 185 DEVMETHOD(device_probe, acpi_cpu_probe), 186 DEVMETHOD(device_attach, acpi_cpu_attach), 187 DEVMETHOD(device_detach, bus_generic_detach), 188 DEVMETHOD(device_shutdown, acpi_cpu_shutdown), 189 DEVMETHOD(device_suspend, acpi_cpu_suspend), --- 153 unchanged lines hidden (view full) --- 343 } 344 345 /* 346 * Before calling any CPU methods, collect child driver feature hints 347 * and notify ACPI of them. We support unified SMP power control 348 * so advertise this ourselves. Note this is not the same as independent 349 * SMP control where each CPU can have different settings. 350 */ | 197 198static device_method_t acpi_cpu_methods[] = { 199 /* Device interface */ 200 DEVMETHOD(device_probe, acpi_cpu_probe), 201 DEVMETHOD(device_attach, acpi_cpu_attach), 202 DEVMETHOD(device_detach, bus_generic_detach), 203 DEVMETHOD(device_shutdown, acpi_cpu_shutdown), 204 DEVMETHOD(device_suspend, acpi_cpu_suspend), --- 153 unchanged lines hidden (view full) --- 358 } 359 360 /* 361 * Before calling any CPU methods, collect child driver feature hints 362 * and notify ACPI of them. We support unified SMP power control 363 * so advertise this ourselves. Note this is not the same as independent 364 * SMP control where each CPU can have different settings. 365 */ |
351 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3; | 366 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3 | 367 ACPI_CAP_C1_IO_HALT; 368 369#if defined(__i386__) || defined(__amd64__) 370 /* 371 * Ask for MWAIT modes if not disabled and interrupts work 372 * reasonable with MWAIT. 373 */ 374 if (!acpi_disabled("mwait") && cpu_mwait_usable()) 375 sc->cpu_features |= ACPI_CAP_SMP_C1_NATIVE | ACPI_CAP_SMP_C3_NATIVE; 376#endif 377 |
352 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) { 353 for (i = 0; i < drv_count; i++) { 354 if (ACPI_GET_FEATURES(drivers[i], &features) == 0) 355 sc->cpu_features |= features; 356 } 357 free(drivers, M_TEMP); 358 } 359 --- 355 unchanged lines hidden (view full) --- 715 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 716 cx_ptr++; 717 sc->cpu_cx_count++; 718 cpu_deepest_sleep = 3; 719 } 720 } 721} 722 | 378 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) { 379 for (i = 0; i < drv_count; i++) { 380 if (ACPI_GET_FEATURES(drivers[i], &features) == 0) 381 sc->cpu_features |= features; 382 } 383 free(drivers, M_TEMP); 384 } 385 --- 355 unchanged lines hidden (view full) --- 741 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 742 cx_ptr++; 743 sc->cpu_cx_count++; 744 cpu_deepest_sleep = 3; 745 } 746 } 747} 748 |
749static void 750acpi_cpu_cx_cst_mwait(struct acpi_cx *cx_ptr, uint64_t address, int accsize) 751{ 752 753 cx_ptr->do_mwait = true; 754 cx_ptr->mwait_hint = address & 0xffffffff; 755 cx_ptr->mwait_hw_coord = (accsize & CST_FFH_MWAIT_HW_COORD) != 0; 756 cx_ptr->mwait_bm_avoidance = (accsize & CST_FFH_MWAIT_BM_AVOID) != 0; 757} 758 759static void 760acpi_cpu_cx_cst_free_plvlx(device_t cpu_dev, struct acpi_cx *cx_ptr) 761{ 762 763 if (cx_ptr->p_lvlx == NULL) 764 return; 765 bus_release_resource(cpu_dev, cx_ptr->res_type, cx_ptr->res_rid, 766 cx_ptr->p_lvlx); 767 cx_ptr->p_lvlx = NULL; 768} 769 |
|
723/* 724 * Parse a _CST package and set up its Cx states. Since the _CST object 725 * can change dynamically, our notify handler may call this function 726 * to clean up and probe the new _CST package. 727 */ 728static int 729acpi_cpu_cx_cst(struct acpi_cpu_softc *sc) 730{ 731 struct acpi_cx *cx_ptr; 732 ACPI_STATUS status; 733 ACPI_BUFFER buf; 734 ACPI_OBJECT *top; 735 ACPI_OBJECT *pkg; 736 uint32_t count; | 770/* 771 * Parse a _CST package and set up its Cx states. Since the _CST object 772 * can change dynamically, our notify handler may call this function 773 * to clean up and probe the new _CST package. 774 */ 775static int 776acpi_cpu_cx_cst(struct acpi_cpu_softc *sc) 777{ 778 struct acpi_cx *cx_ptr; 779 ACPI_STATUS status; 780 ACPI_BUFFER buf; 781 ACPI_OBJECT *top; 782 ACPI_OBJECT *pkg; 783 uint32_t count; |
737 int i; | 784 uint64_t address; 785 int i, vendor, class, accsize; |
738 739 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 740 741 buf.Pointer = NULL; 742 buf.Length = ACPI_ALLOCATE_BUFFER; 743 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); 744 if (ACPI_FAILURE(status)) 745 return (ENXIO); --- 39 unchanged lines hidden (view full) --- 785 786 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n"); 787 continue; 788 } 789 790 /* Validate the state to see if we should use it. */ 791 switch (cx_ptr->type) { 792 case ACPI_STATE_C1: | 786 787 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 788 789 buf.Pointer = NULL; 790 buf.Length = ACPI_ALLOCATE_BUFFER; 791 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); 792 if (ACPI_FAILURE(status)) 793 return (ENXIO); --- 39 unchanged lines hidden (view full) --- 833 834 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n"); 835 continue; 836 } 837 838 /* Validate the state to see if we should use it. */ 839 switch (cx_ptr->type) { 840 case ACPI_STATE_C1: |
841 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr); 842#if defined(__i386__) || defined(__amd64__) 843 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address, 844 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL) { 845 if (class == CST_FFH_INTEL_CL_C1IO) { 846 /* C1 I/O then Halt */ 847 cx_ptr->res_rid = sc->cpu_cx_count; 848 bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT, 849 cx_ptr->res_rid, address, 1); 850 cx_ptr->p_lvlx = bus_alloc_resource_any(sc->cpu_dev, 851 SYS_RES_IOPORT, &cx_ptr->res_rid, RF_ACTIVE | 852 RF_SHAREABLE); 853 if (cx_ptr->p_lvlx == NULL) { 854 bus_delete_resource(sc->cpu_dev, SYS_RES_IOPORT, 855 cx_ptr->res_rid); 856 device_printf(sc->cpu_dev, 857 "C1 I/O failed to allocate port %d, " 858 "degrading to C1 Halt", (int)address); 859 } 860 } else if (class == CST_FFH_INTEL_CL_MWAIT) { 861 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize); 862 } 863 } 864#endif |
|
793 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) { 794 /* This is the first C1 state. Use the reserved slot. */ 795 sc->cpu_cx_states[0] = *cx_ptr; 796 } else { 797 sc->cpu_non_c2 = sc->cpu_cx_count; 798 sc->cpu_non_c3 = sc->cpu_cx_count; 799 cx_ptr++; 800 sc->cpu_cx_count++; --- 12 unchanged lines hidden (view full) --- 813 device_get_unit(sc->cpu_dev), i)); 814 continue; 815 } else 816 cpu_deepest_sleep = 3; 817 break; 818 } 819 820 /* Free up any previous register. */ | 865 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) { 866 /* This is the first C1 state. Use the reserved slot. */ 867 sc->cpu_cx_states[0] = *cx_ptr; 868 } else { 869 sc->cpu_non_c2 = sc->cpu_cx_count; 870 sc->cpu_non_c3 = sc->cpu_cx_count; 871 cx_ptr++; 872 sc->cpu_cx_count++; --- 12 unchanged lines hidden (view full) --- 885 device_get_unit(sc->cpu_dev), i)); 886 continue; 887 } else 888 cpu_deepest_sleep = 3; 889 break; 890 } 891 892 /* Free up any previous register. */ |
821 if (cx_ptr->p_lvlx != NULL) { 822 bus_release_resource(sc->cpu_dev, cx_ptr->res_type, cx_ptr->res_rid, 823 cx_ptr->p_lvlx); 824 cx_ptr->p_lvlx = NULL; 825 } | 893 acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr); |
826 827 /* Allocate the control register for C2 or C3. */ | 894 895 /* Allocate the control register for C2 or C3. */ |
828 cx_ptr->res_rid = sc->cpu_cx_count; 829 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->res_rid, 830 &cx_ptr->p_lvlx, RF_SHAREABLE); 831 if (cx_ptr->p_lvlx) { | 896#if defined(__i386__) || defined(__amd64__) 897 if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address, 898 &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL && 899 class == CST_FFH_INTEL_CL_MWAIT) { 900 /* Native C State Instruction use (mwait) */ 901 acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize); |
832 ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 902 ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
833 "acpi_cpu%d: Got C%d - %d latency\n", 834 device_get_unit(sc->cpu_dev), cx_ptr->type, 835 cx_ptr->trans_lat)); | 903 "acpi_cpu%d: Got C%d/mwait - %d latency\n", 904 device_get_unit(sc->cpu_dev), cx_ptr->type, cx_ptr->trans_lat)); |
836 cx_ptr++; 837 sc->cpu_cx_count++; | 905 cx_ptr++; 906 sc->cpu_cx_count++; |
907 } else 908#endif 909 { 910 cx_ptr->res_rid = sc->cpu_cx_count; 911 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, 912 &cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE); 913 if (cx_ptr->p_lvlx) { 914 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 915 "acpi_cpu%d: Got C%d - %d latency\n", 916 device_get_unit(sc->cpu_dev), cx_ptr->type, 917 cx_ptr->trans_lat)); 918 cx_ptr++; 919 sc->cpu_cx_count++; 920 } |
|
838 } 839 } 840 AcpiOsFree(buf.Pointer); 841 842 /* If C1 state was not found, we need one now. */ 843 cx_ptr = sc->cpu_cx_states; 844 if (cx_ptr->type == ACPI_STATE_C0) { 845 cx_ptr->type = ACPI_STATE_C1; --- 105 unchanged lines hidden (view full) --- 951 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 952 (void *)sc, 0, acpi_cpu_usage_sysctl, "A", 953 "percent usage for each Cx state"); 954 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, 955 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 956 OID_AUTO, "cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD, 957 (void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A", 958 "Cx sleep state counters"); | 921 } 922 } 923 AcpiOsFree(buf.Pointer); 924 925 /* If C1 state was not found, we need one now. */ 926 cx_ptr = sc->cpu_cx_states; 927 if (cx_ptr->type == ACPI_STATE_C0) { 928 cx_ptr->type = ACPI_STATE_C1; --- 105 unchanged lines hidden (view full) --- 1034 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 1035 (void *)sc, 0, acpi_cpu_usage_sysctl, "A", 1036 "percent usage for each Cx state"); 1037 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, 1038 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 1039 OID_AUTO, "cx_usage_counters", CTLTYPE_STRING | CTLFLAG_RD, 1040 (void *)sc, 0, acpi_cpu_usage_counters_sysctl, "A", 1041 "Cx sleep state counters"); |
1042#if defined(__i386__) || defined(__amd64__) 1043 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, 1044 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 1045 OID_AUTO, "cx_method", CTLTYPE_STRING | CTLFLAG_RD, 1046 (void *)sc, 0, acpi_cpu_method_sysctl, "A", 1047 "Cx entrance methods"); 1048#endif |
|
959 960 /* Signal platform that we can handle _CST notification. */ 961 if (!cpu_cx_generic && cpu_cst_cnt != 0) { 962 ACPI_LOCK(acpi); 963 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); 964 ACPI_UNLOCK(acpi); 965 } 966} --- 71 unchanged lines hidden (view full) --- 1038 /* 1039 * Execute HLT (or equivalent) and wait for an interrupt. We can't 1040 * precisely calculate the time spent in C1 since the place we wake up 1041 * is an ISR. Assume we slept no more then half of quantum, unless 1042 * we are called inside critical section, delaying context switch. 1043 */ 1044 if (cx_next->type == ACPI_STATE_C1) { 1045 cputicks = cpu_ticks(); | 1049 1050 /* Signal platform that we can handle _CST notification. */ 1051 if (!cpu_cx_generic && cpu_cst_cnt != 0) { 1052 ACPI_LOCK(acpi); 1053 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); 1054 ACPI_UNLOCK(acpi); 1055 } 1056} --- 71 unchanged lines hidden (view full) --- 1128 /* 1129 * Execute HLT (or equivalent) and wait for an interrupt. We can't 1130 * precisely calculate the time spent in C1 since the place we wake up 1131 * is an ISR. Assume we slept no more then half of quantum, unless 1132 * we are called inside critical section, delaying context switch. 1133 */ 1134 if (cx_next->type == ACPI_STATE_C1) { 1135 cputicks = cpu_ticks(); |
1046 acpi_cpu_c1(); | 1136 if (cx_next->p_lvlx != NULL) { 1137 /* C1 I/O then Halt */ 1138 CPU_GET_REG(cx_next->p_lvlx, 1); 1139 } 1140 if (cx_next->do_mwait) 1141 acpi_cpu_idle_mwait(cx_next->mwait_hint); 1142 else 1143 acpi_cpu_c1(); |
1047 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate(); 1048 if (curthread->td_critnest == 0) 1049 end_time = min(end_time, 500000 / hz); 1050 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4; 1051 return; 1052 } 1053 1054 /* 1055 * For C3, disable bus master arbitration and enable bus master wake 1056 * if BM control is available, otherwise flush the CPU cache. 1057 */ | 1144 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate(); 1145 if (curthread->td_critnest == 0) 1146 end_time = min(end_time, 500000 / hz); 1147 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4; 1148 return; 1149 } 1150 1151 /* 1152 * For C3, disable bus master arbitration and enable bus master wake 1153 * if BM control is available, otherwise flush the CPU cache. 1154 */ |
1058 if (cx_next->type == ACPI_STATE_C3) { | 1155 if (cx_next->type == ACPI_STATE_C3 || cx_next->mwait_bm_avoidance) { |
1059 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 1060 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 1061 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 1062 } else 1063 ACPI_FLUSH_CPU_CACHE(); 1064 } 1065 1066 /* --- 4 unchanged lines hidden (view full) --- 1071 */ 1072 if (cx_next->type == ACPI_STATE_C3) { 1073 AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock); 1074 cputicks = 0; 1075 } else { 1076 start_time = 0; 1077 cputicks = cpu_ticks(); 1078 } | 1156 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 1157 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 1158 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 1159 } else 1160 ACPI_FLUSH_CPU_CACHE(); 1161 } 1162 1163 /* --- 4 unchanged lines hidden (view full) --- 1168 */ 1169 if (cx_next->type == ACPI_STATE_C3) { 1170 AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock); 1171 cputicks = 0; 1172 } else { 1173 start_time = 0; 1174 cputicks = cpu_ticks(); 1175 } |
1079 CPU_GET_REG(cx_next->p_lvlx, 1); | 1176 if (cx_next->do_mwait) 1177 acpi_cpu_idle_mwait(cx_next->mwait_hint); 1178 else 1179 CPU_GET_REG(cx_next->p_lvlx, 1); |
1080 1081 /* 1082 * Read the end time twice. Since it may take an arbitrary time 1083 * to enter the idle state, the first read may be executed before 1084 * the processor has stopped. Doing it again provides enough 1085 * margin that we are certain to have a correct value. 1086 */ 1087 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 1088 if (cx_next->type == ACPI_STATE_C3) { 1089 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 1090 end_time = acpi_TimerDelta(end_time, start_time); 1091 } else 1092 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate(); 1093 1094 /* Enable bus master arbitration and disable bus master wakeup. */ | 1180 1181 /* 1182 * Read the end time twice. Since it may take an arbitrary time 1183 * to enter the idle state, the first read may be executed before 1184 * the processor has stopped. Doing it again provides enough 1185 * margin that we are certain to have a correct value. 1186 */ 1187 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 1188 if (cx_next->type == ACPI_STATE_C3) { 1189 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 1190 end_time = acpi_TimerDelta(end_time, start_time); 1191 } else 1192 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate(); 1193 1194 /* Enable bus master arbitration and disable bus master wakeup. */ |
1095 if (cx_next->type == ACPI_STATE_C3 && 1096 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { | 1195 if ((cx_next->type == ACPI_STATE_C3 || cx_next->mwait_bm_avoidance) && 1196 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { |
1097 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 1098 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 1099 } 1100 ACPI_ENABLE_IRQS(); 1101 1102 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4; 1103} 1104 --- 176 unchanged lines hidden (view full) --- 1281 sbuf_trim(&sb); 1282 sbuf_finish(&sb); 1283 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1284 sbuf_delete(&sb); 1285 1286 return (0); 1287} 1288 | 1197 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 1198 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 1199 } 1200 ACPI_ENABLE_IRQS(); 1201 1202 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4; 1203} 1204 --- 176 unchanged lines hidden (view full) --- 1381 sbuf_trim(&sb); 1382 sbuf_finish(&sb); 1383 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1384 sbuf_delete(&sb); 1385 1386 return (0); 1387} 1388 |
1389#if defined(__i386__) || defined(__amd64__) |
|
1289static int | 1390static int |
1391acpi_cpu_method_sysctl(SYSCTL_HANDLER_ARGS) 1392{ 1393 struct acpi_cpu_softc *sc; 1394 struct acpi_cx *cx; 1395 struct sbuf sb; 1396 char buf[128]; 1397 int i; 1398 1399 sc = (struct acpi_cpu_softc *)arg1; 1400 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 1401 for (i = 0; i < sc->cpu_cx_count; i++) { 1402 cx = &sc->cpu_cx_states[i]; 1403 sbuf_printf(&sb, "C%d/", i + 1); 1404 if (cx->do_mwait) { 1405 sbuf_cat(&sb, "mwait"); 1406 if (cx->mwait_hw_coord) 1407 sbuf_cat(&sb, "/hwc"); 1408 if (cx->mwait_bm_avoidance) 1409 sbuf_cat(&sb, "/bma"); 1410 } else if (cx->type == ACPI_STATE_C1) { 1411 sbuf_cat(&sb, "hlt"); 1412 } else { 1413 sbuf_cat(&sb, "io"); 1414 } 1415 if (cx->type == ACPI_STATE_C1 && cx->p_lvlx != NULL) 1416 sbuf_cat(&sb, "/iohlt"); 1417 sbuf_putc(&sb, ' '); 1418 } 1419 sbuf_trim(&sb); 1420 sbuf_finish(&sb); 1421 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1422 sbuf_delete(&sb); 1423 return (0); 1424} 1425#endif 1426 1427static int |
|
1290acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc) 1291{ 1292 int i; 1293 1294 ACPI_SERIAL_ASSERT(cpu); 1295 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1); 1296 1297 /* If not disabling, cache the new lowest non-C3 state. */ --- 76 unchanged lines hidden --- | 1428acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc) 1429{ 1430 int i; 1431 1432 ACPI_SERIAL_ASSERT(cpu); 1433 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1); 1434 1435 /* If not disabling, cache the new lowest non-C3 state. */ --- 76 unchanged lines hidden --- |