processor_idle.c (851462444d421c223965b12b836bef63da61b57f) processor_idle.c (a474a515497ef3566cfc17a2cab3d54d6d50ff1c)
1/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support

--- 721 unchanged lines hidden (view full) ---

730 * @drv: cpuidle driver containing cpuidle state info
731 * @index: index of target state
732 *
733 * This is equivalent to the HALT instruction.
734 */
735static int acpi_idle_enter_c1(struct cpuidle_device *dev,
736 struct cpuidle_driver *drv, int index)
737{
1/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support

--- 721 unchanged lines hidden (view full) ---

730 * @drv: cpuidle driver containing cpuidle state info
731 * @index: index of target state
732 *
733 * This is equivalent to the HALT instruction.
734 */
735static int acpi_idle_enter_c1(struct cpuidle_device *dev,
736 struct cpuidle_driver *drv, int index)
737{
738 ktime_t kt1, kt2;
739 s64 idle_time;
740 struct acpi_processor *pr;
741 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
742 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
743
744 pr = __this_cpu_read(processors);
738 struct acpi_processor *pr;
739 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
740 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
741
742 pr = __this_cpu_read(processors);
745 dev->last_residency = 0;
746
747 if (unlikely(!pr))
748 return -EINVAL;
749
743
744 if (unlikely(!pr))
745 return -EINVAL;
746
750 local_irq_disable();
751
752
753 lapic_timer_state_broadcast(pr, cx, 1);
747 lapic_timer_state_broadcast(pr, cx, 1);
754 kt1 = ktime_get_real();
755 acpi_idle_do_entry(cx);
748 acpi_idle_do_entry(cx);
756 kt2 = ktime_get_real();
757 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
758
749
759 /* Update device last_residency*/
760 dev->last_residency = (int)idle_time;
761
762 local_irq_enable();
763 lapic_timer_state_broadcast(pr, cx, 0);
764
765 return index;
766}
767
768
769/**
770 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)

--- 30 unchanged lines hidden (view full) ---

801 * @index: the index of suggested state
802 */
803static int acpi_idle_enter_simple(struct cpuidle_device *dev,
804 struct cpuidle_driver *drv, int index)
805{
806 struct acpi_processor *pr;
807 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
808 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
750 lapic_timer_state_broadcast(pr, cx, 0);
751
752 return index;
753}
754
755
756/**
757 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)

--- 30 unchanged lines hidden (view full) ---

788 * @index: the index of suggested state
789 */
790static int acpi_idle_enter_simple(struct cpuidle_device *dev,
791 struct cpuidle_driver *drv, int index)
792{
793 struct acpi_processor *pr;
794 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
795 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
809 ktime_t kt1, kt2;
810 s64 idle_time_ns;
811 s64 idle_time;
812
813 pr = __this_cpu_read(processors);
796
797 pr = __this_cpu_read(processors);
814 dev->last_residency = 0;
815
816 if (unlikely(!pr))
817 return -EINVAL;
818
798
799 if (unlikely(!pr))
800 return -EINVAL;
801
819 local_irq_disable();
820
821
822 if (cx->entry_method != ACPI_CSTATE_FFH) {
823 current_thread_info()->status &= ~TS_POLLING;
824 /*
825 * TS_POLLING-cleared state must be visible before we test
826 * NEED_RESCHED:
827 */
828 smp_mb();
829
830 if (unlikely(need_resched())) {
831 current_thread_info()->status |= TS_POLLING;
802 if (cx->entry_method != ACPI_CSTATE_FFH) {
803 current_thread_info()->status &= ~TS_POLLING;
804 /*
805 * TS_POLLING-cleared state must be visible before we test
806 * NEED_RESCHED:
807 */
808 smp_mb();
809
810 if (unlikely(need_resched())) {
811 current_thread_info()->status |= TS_POLLING;
832 local_irq_enable();
833 return -EINVAL;
834 }
835 }
836
837 /*
838 * Must be done before busmaster disable as we might need to
839 * access HPET !
840 */
841 lapic_timer_state_broadcast(pr, cx, 1);
842
843 if (cx->type == ACPI_STATE_C3)
844 ACPI_FLUSH_CPU_CACHE();
845
812 return -EINVAL;
813 }
814 }
815
816 /*
817 * Must be done before busmaster disable as we might need to
818 * access HPET !
819 */
820 lapic_timer_state_broadcast(pr, cx, 1);
821
822 if (cx->type == ACPI_STATE_C3)
823 ACPI_FLUSH_CPU_CACHE();
824
846 kt1 = ktime_get_real();
847 /* Tell the scheduler that we are going deep-idle: */
848 sched_clock_idle_sleep_event();
849 acpi_idle_do_entry(cx);
825 /* Tell the scheduler that we are going deep-idle: */
826 sched_clock_idle_sleep_event();
827 acpi_idle_do_entry(cx);
850 kt2 = ktime_get_real();
851 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
852 idle_time = idle_time_ns;
853 do_div(idle_time, NSEC_PER_USEC);
854
828
855 /* Update device last_residency*/
856 dev->last_residency = (int)idle_time;
829 sched_clock_idle_wakeup_event(0);
857
830
858 /* Tell the scheduler how much we idled: */
859 sched_clock_idle_wakeup_event(idle_time_ns);
860
861 local_irq_enable();
862 if (cx->entry_method != ACPI_CSTATE_FFH)
863 current_thread_info()->status |= TS_POLLING;
864
865 lapic_timer_state_broadcast(pr, cx, 0);
866 return index;
867}
868
869static int c3_cpu_count;

--- 8 unchanged lines hidden (view full) ---

878 * If BM is detected, the deepest non-C3 idle state is entered instead.
879 */
880static int acpi_idle_enter_bm(struct cpuidle_device *dev,
881 struct cpuidle_driver *drv, int index)
882{
883 struct acpi_processor *pr;
884 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
885 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
831 if (cx->entry_method != ACPI_CSTATE_FFH)
832 current_thread_info()->status |= TS_POLLING;
833
834 lapic_timer_state_broadcast(pr, cx, 0);
835 return index;
836}
837
838static int c3_cpu_count;

--- 8 unchanged lines hidden (view full) ---

847 * If BM is detected, the deepest non-C3 idle state is entered instead.
848 */
849static int acpi_idle_enter_bm(struct cpuidle_device *dev,
850 struct cpuidle_driver *drv, int index)
851{
852 struct acpi_processor *pr;
853 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
854 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
886 ktime_t kt1, kt2;
887 s64 idle_time_ns;
888 s64 idle_time;
889
855
890
891 pr = __this_cpu_read(processors);
856 pr = __this_cpu_read(processors);
892 dev->last_residency = 0;
893
894 if (unlikely(!pr))
895 return -EINVAL;
896
897 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
898 if (drv->safe_state_index >= 0) {
899 return drv->states[drv->safe_state_index].enter(dev,
900 drv, drv->safe_state_index);
901 } else {
857
858 if (unlikely(!pr))
859 return -EINVAL;
860
861 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
862 if (drv->safe_state_index >= 0) {
863 return drv->states[drv->safe_state_index].enter(dev,
864 drv, drv->safe_state_index);
865 } else {
902 local_irq_disable();
903 acpi_safe_halt();
866 acpi_safe_halt();
904 local_irq_enable();
905 return -EBUSY;
906 }
907 }
908
867 return -EBUSY;
868 }
869 }
870
909 local_irq_disable();
910
911
912 if (cx->entry_method != ACPI_CSTATE_FFH) {
913 current_thread_info()->status &= ~TS_POLLING;
914 /*
915 * TS_POLLING-cleared state must be visible before we test
916 * NEED_RESCHED:
917 */
918 smp_mb();
919
920 if (unlikely(need_resched())) {
921 current_thread_info()->status |= TS_POLLING;
871 if (cx->entry_method != ACPI_CSTATE_FFH) {
872 current_thread_info()->status &= ~TS_POLLING;
873 /*
874 * TS_POLLING-cleared state must be visible before we test
875 * NEED_RESCHED:
876 */
877 smp_mb();
878
879 if (unlikely(need_resched())) {
880 current_thread_info()->status |= TS_POLLING;
922 local_irq_enable();
923 return -EINVAL;
924 }
925 }
926
927 acpi_unlazy_tlb(smp_processor_id());
928
929 /* Tell the scheduler that we are going deep-idle: */
930 sched_clock_idle_sleep_event();
931 /*
932 * Must be done before busmaster disable as we might need to
933 * access HPET !
934 */
935 lapic_timer_state_broadcast(pr, cx, 1);
936
881 return -EINVAL;
882 }
883 }
884
885 acpi_unlazy_tlb(smp_processor_id());
886
887 /* Tell the scheduler that we are going deep-idle: */
888 sched_clock_idle_sleep_event();
889 /*
890 * Must be done before busmaster disable as we might need to
891 * access HPET !
892 */
893 lapic_timer_state_broadcast(pr, cx, 1);
894
937 kt1 = ktime_get_real();
938 /*
939 * disable bus master
940 * bm_check implies we need ARB_DIS
941 * !bm_check implies we need cache flush
942 * bm_control implies whether we can do ARB_DIS
943 *
944 * That leaves a case where bm_check is set and bm_control is
945 * not set. In that case we cannot do much, we enter C3

--- 14 unchanged lines hidden (view full) ---

960
961 /* Re-enable bus master arbitration */
962 if (pr->flags.bm_check && pr->flags.bm_control) {
963 raw_spin_lock(&c3_lock);
964 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
965 c3_cpu_count--;
966 raw_spin_unlock(&c3_lock);
967 }
895 /*
896 * disable bus master
897 * bm_check implies we need ARB_DIS
898 * !bm_check implies we need cache flush
899 * bm_control implies whether we can do ARB_DIS
900 *
901 * That leaves a case where bm_check is set and bm_control is
902 * not set. In that case we cannot do much, we enter C3

--- 14 unchanged lines hidden (view full) ---

917
918 /* Re-enable bus master arbitration */
919 if (pr->flags.bm_check && pr->flags.bm_control) {
920 raw_spin_lock(&c3_lock);
921 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
922 c3_cpu_count--;
923 raw_spin_unlock(&c3_lock);
924 }
968 kt2 = ktime_get_real();
969 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
970 idle_time = idle_time_ns;
971 do_div(idle_time, NSEC_PER_USEC);
972
925
973 /* Update device last_residency*/
974 dev->last_residency = (int)idle_time;
926 sched_clock_idle_wakeup_event(0);
975
927
976 /* Tell the scheduler how much we idled: */
977 sched_clock_idle_wakeup_event(idle_time_ns);
978
979 local_irq_enable();
980 if (cx->entry_method != ACPI_CSTATE_FFH)
981 current_thread_info()->status |= TS_POLLING;
982
983 lapic_timer_state_broadcast(pr, cx, 0);
984 return index;
985}
986
987struct cpuidle_driver acpi_idle_driver = {
988 .name = "acpi_idle",
989 .owner = THIS_MODULE,
928 if (cx->entry_method != ACPI_CSTATE_FFH)
929 current_thread_info()->status |= TS_POLLING;
930
931 lapic_timer_state_broadcast(pr, cx, 0);
932 return index;
933}
934
935struct cpuidle_driver acpi_idle_driver = {
936 .name = "acpi_idle",
937 .owner = THIS_MODULE,
938 .en_core_tk_irqen = 1,
990};
991
992/**
993 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
994 * device i.e. per-cpu data
995 *
996 * @pr: the ACPI processor
997 */

--- 317 unchanged lines hidden ---
939};
940
941/**
942 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
943 * device i.e. per-cpu data
944 *
945 * @pr: the ACPI processor
946 */

--- 317 unchanged lines hidden ---