1 /* 2 * processor_thermal.c - Passive cooling submodule of the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/cpufreq.h> 33 #include <linux/acpi.h> 34 #include <acpi/processor.h> 35 #include <asm/uaccess.h> 36 37 #define PREFIX "ACPI: " 38 39 #define ACPI_PROCESSOR_CLASS "processor" 40 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 41 ACPI_MODULE_NAME("processor_thermal"); 42 43 #ifdef CONFIG_CPU_FREQ 44 45 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it 46 * offers (in most cases) voltage scaling in addition to frequency scaling, and 47 * thus a cubic (instead of linear) reduction of energy. Also, we allow for 48 * _any_ cpufreq driver and not only the acpi-cpufreq driver. 49 */ 50 51 #define CPUFREQ_THERMAL_MIN_STEP 0 52 #define CPUFREQ_THERMAL_MAX_STEP 3 53 54 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); 55 static unsigned int acpi_thermal_cpufreq_is_init = 0; 56 57 #define reduction_pctg(cpu) \ 58 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) 59 60 /* 61 * Emulate "per package data" using per cpu data (which should really be 62 * provided elsewhere) 63 * 64 * Note we can lose a CPU on cpu hotunplug, in this case we forget the state 65 * temporarily. Fortunately that's not a big issue here (I hope) 66 */ 67 static int phys_package_first_cpu(int cpu) 68 { 69 int i; 70 int id = topology_physical_package_id(cpu); 71 72 for_each_online_cpu(i) 73 if (topology_physical_package_id(i) == id) 74 return i; 75 return 0; 76 } 77 78 static int cpu_has_cpufreq(unsigned int cpu) 79 { 80 struct cpufreq_policy policy; 81 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) 82 return 0; 83 return 1; 84 } 85 86 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, 87 unsigned long event, void *data) 88 { 89 struct cpufreq_policy *policy = data; 90 unsigned long max_freq = 0; 91 92 if (event != CPUFREQ_ADJUST) 93 goto out; 94 95 max_freq = ( 96 policy->cpuinfo.max_freq * 97 (100 - reduction_pctg(policy->cpu) * 20) 98 ) / 100; 99 100 cpufreq_verify_within_limits(policy, 0, max_freq); 101 102 out: 103 return 0; 104 } 105 106 static struct notifier_block acpi_thermal_cpufreq_notifier_block = { 107 .notifier_call = acpi_thermal_cpufreq_notifier, 108 }; 109 110 static int cpufreq_get_max_state(unsigned int cpu) 111 { 112 if (!cpu_has_cpufreq(cpu)) 113 return 0; 114 115 return CPUFREQ_THERMAL_MAX_STEP; 116 } 117 118 static int cpufreq_get_cur_state(unsigned int cpu) 119 { 120 if (!cpu_has_cpufreq(cpu)) 121 return 0; 122 123 return reduction_pctg(cpu); 124 } 125 126 static int cpufreq_set_cur_state(unsigned int cpu, int state) 127 { 128 int i; 129 130 if (!cpu_has_cpufreq(cpu)) 131 return 0; 132 133 reduction_pctg(cpu) = state; 134 135 /* 136 * Update all the CPUs in the same package because they all 137 * contribute to the temperature and often share the same 138 * frequency. 139 */ 140 for_each_online_cpu(i) { 141 if (topology_physical_package_id(i) == 142 topology_physical_package_id(cpu)) 143 cpufreq_update_policy(i); 144 } 145 return 0; 146 } 147 148 void acpi_thermal_cpufreq_init(void) 149 { 150 int i; 151 152 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, 153 CPUFREQ_POLICY_NOTIFIER); 154 if (!i) 155 acpi_thermal_cpufreq_is_init = 1; 156 } 157 158 void acpi_thermal_cpufreq_exit(void) 159 { 160 if (acpi_thermal_cpufreq_is_init) 161 cpufreq_unregister_notifier 162 (&acpi_thermal_cpufreq_notifier_block, 163 CPUFREQ_POLICY_NOTIFIER); 164 165 acpi_thermal_cpufreq_is_init = 0; 166 } 167 168 #else /* ! CONFIG_CPU_FREQ */ 169 static int cpufreq_get_max_state(unsigned int cpu) 170 { 171 return 0; 172 } 173 174 static int cpufreq_get_cur_state(unsigned int cpu) 175 { 176 return 0; 177 } 178 179 static int cpufreq_set_cur_state(unsigned int cpu, int state) 180 { 181 return 0; 182 } 183 184 #endif 185 186 /* thermal cooling device callbacks */ 187 static int acpi_processor_max_state(struct acpi_processor *pr) 188 { 189 int max_state = 0; 190 191 /* 192 * There exists four states according to 193 * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3 194 */ 195 max_state += cpufreq_get_max_state(pr->id); 196 if (pr->flags.throttling) 197 max_state += (pr->throttling.state_count -1); 198 199 return max_state; 200 } 201 static int 202 processor_get_max_state(struct thermal_cooling_device *cdev, 203 unsigned long *state) 204 { 205 struct acpi_device *device = cdev->devdata; 206 struct acpi_processor *pr; 207 208 if (!device) 209 return -EINVAL; 210 211 pr = acpi_driver_data(device); 212 if (!pr) 213 return -EINVAL; 214 215 *state = acpi_processor_max_state(pr); 216 return 0; 217 } 218 219 static int 220 processor_get_cur_state(struct thermal_cooling_device *cdev, 221 unsigned long *cur_state) 222 { 223 struct acpi_device *device = cdev->devdata; 224 struct acpi_processor *pr; 225 226 if (!device) 227 return -EINVAL; 228 229 pr = acpi_driver_data(device); 230 if (!pr) 231 return -EINVAL; 232 233 *cur_state = cpufreq_get_cur_state(pr->id); 234 if (pr->flags.throttling) 235 *cur_state += pr->throttling.state; 236 return 0; 237 } 238 239 static int 240 processor_set_cur_state(struct thermal_cooling_device *cdev, 241 unsigned long state) 242 { 243 struct acpi_device *device = cdev->devdata; 244 struct acpi_processor *pr; 245 int result = 0; 246 int max_pstate; 247 248 if (!device) 249 return -EINVAL; 250 251 pr = acpi_driver_data(device); 252 if (!pr) 253 return -EINVAL; 254 255 max_pstate = cpufreq_get_max_state(pr->id); 256 257 if (state > acpi_processor_max_state(pr)) 258 return -EINVAL; 259 260 if (state <= max_pstate) { 261 if (pr->flags.throttling && pr->throttling.state) 262 result = acpi_processor_set_throttling(pr, 0, false); 263 cpufreq_set_cur_state(pr->id, state); 264 } else { 265 cpufreq_set_cur_state(pr->id, max_pstate); 266 result = acpi_processor_set_throttling(pr, 267 state - max_pstate, false); 268 } 269 return result; 270 } 271 272 const struct thermal_cooling_device_ops processor_cooling_ops = { 273 .get_max_state = processor_get_max_state, 274 .get_cur_state = processor_get_cur_state, 275 .set_cur_state = processor_set_cur_state, 276 }; 277