xref: /linux/drivers/thermal/gov_power_allocator.c (revision d045c46c52740b0d5e92d376f0b7843b0c0d935a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * A power allocator to manage temperature
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  *
7  */
8 
9 #define pr_fmt(fmt) "Power allocator: " fmt
10 
11 #include <linux/slab.h>
12 #include <linux/thermal.h>
13 
14 #define CREATE_TRACE_POINTS
15 #include "thermal_trace_ipa.h"
16 
17 #include "thermal_core.h"
18 
19 #define FRAC_BITS 10
20 #define int_to_frac(x) ((x) << FRAC_BITS)
21 #define frac_to_int(x) ((x) >> FRAC_BITS)
22 
23 /**
24  * mul_frac() - multiply two fixed-point numbers
25  * @x:	first multiplicand
26  * @y:	second multiplicand
27  *
28  * Return: the result of multiplying two fixed-point numbers.  The
29  * result is also a fixed-point number.
30  */
mul_frac(s64 x,s64 y)31 static inline s64 mul_frac(s64 x, s64 y)
32 {
33 	return (x * y) >> FRAC_BITS;
34 }
35 
36 /**
37  * div_frac() - divide two fixed-point numbers
38  * @x:	the dividend
39  * @y:	the divisor
40  *
41  * Return: the result of dividing two fixed-point numbers.  The
42  * result is also a fixed-point number.
43  */
div_frac(s64 x,s64 y)44 static inline s64 div_frac(s64 x, s64 y)
45 {
46 	return div_s64(x << FRAC_BITS, y);
47 }
48 
49 /**
50  * struct power_actor - internal power information for power actor
51  * @req_power:		requested power value (not weighted)
52  * @max_power:		max allocatable power for this actor
53  * @granted_power:	granted power for this actor
54  * @extra_actor_power:	extra power that this actor can receive
55  * @weighted_req_power:	weighted requested power as input to IPA
56  */
57 struct power_actor {
58 	u32 req_power;
59 	u32 max_power;
60 	u32 granted_power;
61 	u32 extra_actor_power;
62 	u32 weighted_req_power;
63 };
64 
65 /**
66  * struct power_allocator_params - parameters for the power allocator governor
67  * @allocated_tzp:	whether we have allocated tzp for this thermal zone and
68  *			it needs to be freed on unbind
69  * @update_cdevs:	whether or not update cdevs on the next run
70  * @err_integral:	accumulated error in the PID controller.
71  * @prev_err:	error in the previous iteration of the PID controller.
72  *		Used to calculate the derivative term.
73  * @sustainable_power:	Sustainable power (heat) that this thermal zone can
74  *			dissipate
75  * @trip_switch_on:	first passive trip point of the thermal zone.  The
76  *			governor switches on when this trip point is crossed.
77  *			If the thermal zone only has one passive trip point,
78  *			@trip_switch_on should be NULL.
79  * @trip_max:		last passive trip point of the thermal zone. The
80  *			temperature we are controlling for.
81  * @total_weight:	Sum of all thermal instances weights
82  * @num_actors:		number of cooling devices supporting IPA callbacks
83  * @buffer_size:	internal buffer size, to avoid runtime re-calculation
84  * @power:		buffer for all power actors internal power information
85  */
86 struct power_allocator_params {
87 	bool allocated_tzp;
88 	bool update_cdevs;
89 	s64 err_integral;
90 	s32 prev_err;
91 	u32 sustainable_power;
92 	const struct thermal_trip *trip_switch_on;
93 	const struct thermal_trip *trip_max;
94 	int total_weight;
95 	unsigned int num_actors;
96 	unsigned int buffer_size;
97 	struct power_actor *power;
98 };
99 
power_actor_is_valid(struct power_allocator_params * params,struct thermal_instance * instance)100 static bool power_actor_is_valid(struct power_allocator_params *params,
101 				 struct thermal_instance *instance)
102 {
103 	return (instance->trip == params->trip_max &&
104 		 cdev_is_power_actor(instance->cdev));
105 }
106 
107 /**
108  * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone
109  * @tz: thermal zone we are operating in
110  *
111  * For thermal zones that don't provide a sustainable_power in their
112  * thermal_zone_params, estimate one.  Calculate it using the minimum
113  * power of all the cooling devices as that gives a valid value that
114  * can give some degree of functionality.  For optimal performance of
115  * this governor, provide a sustainable_power in the thermal zone's
116  * thermal_zone_params.
117  */
estimate_sustainable_power(struct thermal_zone_device * tz)118 static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
119 {
120 	struct power_allocator_params *params = tz->governor_data;
121 	struct thermal_cooling_device *cdev;
122 	struct thermal_instance *instance;
123 	u32 sustainable_power = 0;
124 	u32 min_power;
125 
126 	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
127 		if (!power_actor_is_valid(params, instance))
128 			continue;
129 
130 		cdev = instance->cdev;
131 		if (cdev->ops->state2power(cdev, instance->upper, &min_power))
132 			continue;
133 
134 		sustainable_power += min_power;
135 	}
136 
137 	return sustainable_power;
138 }
139 
140 /**
141  * estimate_pid_constants() - Estimate the constants for the PID controller
142  * @tz:		thermal zone for which to estimate the constants
143  * @sustainable_power:	sustainable power for the thermal zone
144  * @trip_switch_on:	trip point for the switch on temperature
145  * @control_temp:	target temperature for the power allocator governor
146  *
147  * This function is used to update the estimation of the PID
148  * controller constants in struct thermal_zone_parameters.
149  */
estimate_pid_constants(struct thermal_zone_device * tz,u32 sustainable_power,const struct thermal_trip * trip_switch_on,int control_temp)150 static void estimate_pid_constants(struct thermal_zone_device *tz,
151 				   u32 sustainable_power,
152 				   const struct thermal_trip *trip_switch_on,
153 				   int control_temp)
154 {
155 	u32 temperature_threshold = control_temp;
156 	s32 k_i;
157 
158 	if (trip_switch_on)
159 		temperature_threshold -= trip_switch_on->temperature;
160 
161 	/*
162 	 * estimate_pid_constants() tries to find appropriate default
163 	 * values for thermal zones that don't provide them. If a
164 	 * system integrator has configured a thermal zone with two
165 	 * passive trip points at the same temperature, that person
166 	 * hasn't put any effort to set up the thermal zone properly
167 	 * so just give up.
168 	 */
169 	if (!temperature_threshold)
170 		return;
171 
172 	tz->tzp->k_po = int_to_frac(sustainable_power) /
173 		temperature_threshold;
174 
175 	tz->tzp->k_pu = int_to_frac(2 * sustainable_power) /
176 		temperature_threshold;
177 
178 	k_i = tz->tzp->k_pu / 10;
179 	tz->tzp->k_i = k_i > 0 ? k_i : 1;
180 
181 	/*
182 	 * The default for k_d and integral_cutoff is 0, so we can
183 	 * leave them as they are.
184 	 */
185 }
186 
187 /**
188  * get_sustainable_power() - Get the right sustainable power
189  * @tz:		thermal zone for which to estimate the constants
190  * @params:	parameters for the power allocator governor
191  * @control_temp:	target temperature for the power allocator governor
192  *
193  * This function is used for getting the proper sustainable power value based
194  * on variables which might be updated by the user sysfs interface. If that
195  * happen the new value is going to be estimated and updated. It is also used
196  * after thermal zone binding, where the initial values where set to 0.
197  */
get_sustainable_power(struct thermal_zone_device * tz,struct power_allocator_params * params,int control_temp)198 static u32 get_sustainable_power(struct thermal_zone_device *tz,
199 				 struct power_allocator_params *params,
200 				 int control_temp)
201 {
202 	u32 sustainable_power;
203 
204 	if (!tz->tzp->sustainable_power)
205 		sustainable_power = estimate_sustainable_power(tz);
206 	else
207 		sustainable_power = tz->tzp->sustainable_power;
208 
209 	/* Check if it's init value 0 or there was update via sysfs */
210 	if (sustainable_power != params->sustainable_power) {
211 		estimate_pid_constants(tz, sustainable_power,
212 				       params->trip_switch_on, control_temp);
213 
214 		/* Do the estimation only once and make available in sysfs */
215 		tz->tzp->sustainable_power = sustainable_power;
216 		params->sustainable_power = sustainable_power;
217 	}
218 
219 	return sustainable_power;
220 }
221 
222 /**
223  * pid_controller() - PID controller
224  * @tz:	thermal zone we are operating in
225  * @control_temp:	the target temperature in millicelsius
226  * @max_allocatable_power:	maximum allocatable power for this thermal zone
227  *
228  * This PID controller increases the available power budget so that the
229  * temperature of the thermal zone gets as close as possible to
230  * @control_temp and limits the power if it exceeds it.  k_po is the
231  * proportional term when we are overshooting, k_pu is the
232  * proportional term when we are undershooting.  integral_cutoff is a
233  * threshold below which we stop accumulating the error.  The
234  * accumulated error is only valid if the requested power will make
235  * the system warmer.  If the system is mostly idle, there's no point
236  * in accumulating positive error.
237  *
238  * Return: The power budget for the next period.
239  */
pid_controller(struct thermal_zone_device * tz,int control_temp,u32 max_allocatable_power)240 static u32 pid_controller(struct thermal_zone_device *tz,
241 			  int control_temp,
242 			  u32 max_allocatable_power)
243 {
244 	struct power_allocator_params *params = tz->governor_data;
245 	s64 p, i, d, power_range;
246 	s32 err, max_power_frac;
247 	u32 sustainable_power;
248 
249 	max_power_frac = int_to_frac(max_allocatable_power);
250 
251 	sustainable_power = get_sustainable_power(tz, params, control_temp);
252 
253 	err = control_temp - tz->temperature;
254 	err = int_to_frac(err);
255 
256 	/* Calculate the proportional term */
257 	p = mul_frac(err < 0 ? tz->tzp->k_po : tz->tzp->k_pu, err);
258 
259 	/*
260 	 * Calculate the integral term
261 	 *
262 	 * if the error is less than cut off allow integration (but
263 	 * the integral is limited to max power)
264 	 */
265 	i = mul_frac(tz->tzp->k_i, params->err_integral);
266 
267 	if (err < int_to_frac(tz->tzp->integral_cutoff)) {
268 		s64 i_next = i + mul_frac(tz->tzp->k_i, err);
269 
270 		if (abs(i_next) < max_power_frac) {
271 			i = i_next;
272 			params->err_integral += err;
273 		}
274 	}
275 
276 	/*
277 	 * Calculate the derivative term
278 	 *
279 	 * We do err - prev_err, so with a positive k_d, a decreasing
280 	 * error (i.e. driving closer to the line) results in less
281 	 * power being applied, slowing down the controller)
282 	 */
283 	d = mul_frac(tz->tzp->k_d, err - params->prev_err);
284 	d = div_frac(d, jiffies_to_msecs(tz->passive_delay_jiffies));
285 	params->prev_err = err;
286 
287 	power_range = p + i + d;
288 
289 	/* feed-forward the known sustainable dissipatable power */
290 	power_range = sustainable_power + frac_to_int(power_range);
291 
292 	power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power);
293 
294 	trace_thermal_power_allocator_pid(tz, frac_to_int(err),
295 					  frac_to_int(params->err_integral),
296 					  frac_to_int(p), frac_to_int(i),
297 					  frac_to_int(d), power_range);
298 
299 	return power_range;
300 }
301 
302 /**
303  * power_actor_set_power() - limit the maximum power a cooling device consumes
304  * @cdev:	pointer to &thermal_cooling_device
305  * @instance:	thermal instance to update
306  * @power:	the power in milliwatts
307  *
308  * Set the cooling device to consume at most @power milliwatts. The limit is
309  * expected to be a cap at the maximum power consumption.
310  *
311  * Return: 0 on success, -EINVAL if the cooling device does not
312  * implement the power actor API or -E* for other failures.
313  */
314 static int
power_actor_set_power(struct thermal_cooling_device * cdev,struct thermal_instance * instance,u32 power)315 power_actor_set_power(struct thermal_cooling_device *cdev,
316 		      struct thermal_instance *instance, u32 power)
317 {
318 	unsigned long state;
319 	int ret;
320 
321 	ret = cdev->ops->power2state(cdev, power, &state);
322 	if (ret)
323 		return ret;
324 
325 	instance->target = clamp_val(state, instance->lower, instance->upper);
326 	mutex_lock(&cdev->lock);
327 	__thermal_cdev_update(cdev);
328 	mutex_unlock(&cdev->lock);
329 
330 	return 0;
331 }
332 
333 /**
334  * divvy_up_power() - divvy the allocated power between the actors
335  * @power:		buffer for all power actors internal power information
336  * @num_actors:		number of power actors in this thermal zone
337  * @total_req_power:	sum of all weighted requested power for all actors
338  * @power_range:	total allocated power
339  *
340  * This function divides the total allocated power (@power_range)
341  * fairly between the actors.  It first tries to give each actor a
342  * share of the @power_range according to how much power it requested
343  * compared to the rest of the actors.  For example, if only one actor
344  * requests power, then it receives all the @power_range.  If
345  * three actors each requests 1mW, each receives a third of the
346  * @power_range.
347  *
348  * If any actor received more than their maximum power, then that
349  * surplus is re-divvied among the actors based on how far they are
350  * from their respective maximums.
351  */
divvy_up_power(struct power_actor * power,int num_actors,u32 total_req_power,u32 power_range)352 static void divvy_up_power(struct power_actor *power, int num_actors,
353 			   u32 total_req_power, u32 power_range)
354 {
355 	u32 capped_extra_power = 0;
356 	u32 extra_power = 0;
357 	int i;
358 
359 	/*
360 	 * Prevent division by 0 if none of the actors request power.
361 	 */
362 	if (!total_req_power)
363 		total_req_power = 1;
364 
365 	for (i = 0; i < num_actors; i++) {
366 		struct power_actor *pa = &power[i];
367 		u64 req_range = (u64)pa->req_power * power_range;
368 
369 		pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range,
370 							  total_req_power);
371 
372 		if (pa->granted_power > pa->max_power) {
373 			extra_power += pa->granted_power - pa->max_power;
374 			pa->granted_power = pa->max_power;
375 		}
376 
377 		pa->extra_actor_power = pa->max_power - pa->granted_power;
378 		capped_extra_power += pa->extra_actor_power;
379 	}
380 
381 	if (!extra_power || !capped_extra_power)
382 		return;
383 
384 	/*
385 	 * Re-divvy the reclaimed extra among actors based on
386 	 * how far they are from the max
387 	 */
388 	extra_power = min(extra_power, capped_extra_power);
389 
390 	for (i = 0; i < num_actors; i++) {
391 		struct power_actor *pa = &power[i];
392 		u64 extra_range = pa->extra_actor_power;
393 
394 		extra_range *= extra_power;
395 		pa->granted_power += DIV_ROUND_CLOSEST_ULL(extra_range,
396 						capped_extra_power);
397 	}
398 }
399 
allocate_power(struct thermal_zone_device * tz,int control_temp)400 static void allocate_power(struct thermal_zone_device *tz, int control_temp)
401 {
402 	struct power_allocator_params *params = tz->governor_data;
403 	unsigned int num_actors = params->num_actors;
404 	struct power_actor *power = params->power;
405 	struct thermal_cooling_device *cdev;
406 	struct thermal_instance *instance;
407 	u32 total_weighted_req_power = 0;
408 	u32 max_allocatable_power = 0;
409 	u32 total_granted_power = 0;
410 	u32 total_req_power = 0;
411 	u32 power_range, weight;
412 	int i = 0, ret;
413 
414 	if (!num_actors)
415 		return;
416 
417 	/* Clean all buffers for new power estimations */
418 	memset(power, 0, params->buffer_size);
419 
420 	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
421 		struct power_actor *pa = &power[i];
422 
423 		if (!power_actor_is_valid(params, instance))
424 			continue;
425 
426 		cdev = instance->cdev;
427 
428 		ret = cdev->ops->get_requested_power(cdev, &pa->req_power);
429 		if (ret)
430 			continue;
431 
432 		if (!params->total_weight)
433 			weight = 1 << FRAC_BITS;
434 		else
435 			weight = instance->weight;
436 
437 		pa->weighted_req_power = frac_to_int(weight * pa->req_power);
438 
439 		ret = cdev->ops->state2power(cdev, instance->lower,
440 					     &pa->max_power);
441 		if (ret)
442 			continue;
443 
444 		total_req_power += pa->req_power;
445 		max_allocatable_power += pa->max_power;
446 		total_weighted_req_power += pa->weighted_req_power;
447 
448 		i++;
449 	}
450 
451 	power_range = pid_controller(tz, control_temp, max_allocatable_power);
452 
453 	divvy_up_power(power, num_actors, total_weighted_req_power,
454 		       power_range);
455 
456 	i = 0;
457 	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
458 		struct power_actor *pa = &power[i];
459 
460 		if (!power_actor_is_valid(params, instance))
461 			continue;
462 
463 		power_actor_set_power(instance->cdev, instance,
464 				      pa->granted_power);
465 		total_granted_power += pa->granted_power;
466 
467 		trace_thermal_power_actor(tz, i, pa->req_power,
468 					  pa->granted_power);
469 		i++;
470 	}
471 
472 	trace_thermal_power_allocator(tz, total_req_power, total_granted_power,
473 				      num_actors, power_range,
474 				      max_allocatable_power, tz->temperature,
475 				      control_temp - tz->temperature);
476 }
477 
478 /**
479  * get_governor_trips() - get the two trip points that are key for this governor
480  * @tz:	thermal zone to operate on
481  * @params:	pointer to private data for this governor
482  *
483  * The power allocator governor works optimally with two trips points:
484  * a "switch on" trip point and a "maximum desired temperature".  These
485  * are defined as the first and last passive trip points.
486  *
487  * If there is only one trip point, then that's considered to be the
488  * "maximum desired temperature" trip point and the governor is always
489  * on.  If there are no passive or active trip points, then the
490  * governor won't do anything.  In fact, its throttle function
491  * won't be called at all.
492  */
get_governor_trips(struct thermal_zone_device * tz,struct power_allocator_params * params)493 static void get_governor_trips(struct thermal_zone_device *tz,
494 			       struct power_allocator_params *params)
495 {
496 	const struct thermal_trip *first_passive = NULL;
497 	const struct thermal_trip *last_passive = NULL;
498 	const struct thermal_trip *last_active = NULL;
499 	const struct thermal_trip_desc *td;
500 
501 	for_each_trip_desc(tz, td) {
502 		const struct thermal_trip *trip = &td->trip;
503 
504 		switch (trip->type) {
505 		case THERMAL_TRIP_PASSIVE:
506 			if (!first_passive) {
507 				first_passive = trip;
508 				break;
509 			}
510 			last_passive = trip;
511 			break;
512 		case THERMAL_TRIP_ACTIVE:
513 			last_active = trip;
514 			break;
515 		default:
516 			break;
517 		}
518 	}
519 
520 	if (last_passive) {
521 		params->trip_switch_on = first_passive;
522 		params->trip_max = last_passive;
523 	} else if (first_passive) {
524 		params->trip_switch_on = NULL;
525 		params->trip_max = first_passive;
526 	} else {
527 		params->trip_switch_on = NULL;
528 		params->trip_max = last_active;
529 	}
530 }
531 
reset_pid_controller(struct power_allocator_params * params)532 static void reset_pid_controller(struct power_allocator_params *params)
533 {
534 	params->err_integral = 0;
535 	params->prev_err = 0;
536 }
537 
allow_maximum_power(struct thermal_zone_device * tz)538 static void allow_maximum_power(struct thermal_zone_device *tz)
539 {
540 	struct power_allocator_params *params = tz->governor_data;
541 	struct thermal_cooling_device *cdev;
542 	struct thermal_instance *instance;
543 	u32 req_power;
544 
545 	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
546 		if (!power_actor_is_valid(params, instance))
547 			continue;
548 
549 		cdev = instance->cdev;
550 
551 		instance->target = 0;
552 		mutex_lock(&cdev->lock);
553 		/*
554 		 * Call for updating the cooling devices local stats and avoid
555 		 * periods of dozen of seconds when those have not been
556 		 * maintained.
557 		 */
558 		cdev->ops->get_requested_power(cdev, &req_power);
559 
560 		if (params->update_cdevs)
561 			__thermal_cdev_update(cdev);
562 
563 		mutex_unlock(&cdev->lock);
564 	}
565 }
566 
567 /**
568  * check_power_actors() - Check all cooling devices and warn when they are
569  *			not power actors
570  * @tz:		thermal zone to operate on
571  * @params:	power allocator private data
572  *
573  * Check all cooling devices in the @tz and warn every time they are missing
574  * power actor API. The warning should help to investigate the issue, which
575  * could be e.g. lack of Energy Model for a given device.
576  *
577  * If all of the cooling devices currently attached to @tz implement the power
578  * actor API, return the number of them (which may be 0, because some cooling
579  * devices may be attached later). Otherwise, return -EINVAL.
580  */
check_power_actors(struct thermal_zone_device * tz,struct power_allocator_params * params)581 static int check_power_actors(struct thermal_zone_device *tz,
582 			      struct power_allocator_params *params)
583 {
584 	struct thermal_instance *instance;
585 	int ret = 0;
586 
587 	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
588 		if (instance->trip != params->trip_max)
589 			continue;
590 
591 		if (!cdev_is_power_actor(instance->cdev)) {
592 			dev_warn(&tz->device, "power_allocator: %s is not a power actor\n",
593 				 instance->cdev->type);
594 			return -EINVAL;
595 		}
596 		ret++;
597 	}
598 
599 	return ret;
600 }
601 
allocate_actors_buffer(struct power_allocator_params * params,int num_actors)602 static int allocate_actors_buffer(struct power_allocator_params *params,
603 				  int num_actors)
604 {
605 	int ret;
606 
607 	kfree(params->power);
608 
609 	/* There might be no cooling devices yet. */
610 	if (!num_actors) {
611 		ret = 0;
612 		goto clean_state;
613 	}
614 
615 	params->power = kcalloc(num_actors, sizeof(struct power_actor),
616 				GFP_KERNEL);
617 	if (!params->power) {
618 		ret = -ENOMEM;
619 		goto clean_state;
620 	}
621 
622 	params->num_actors = num_actors;
623 	params->buffer_size = num_actors * sizeof(struct power_actor);
624 
625 	return 0;
626 
627 clean_state:
628 	params->num_actors = 0;
629 	params->buffer_size = 0;
630 	params->power = NULL;
631 	return ret;
632 }
633 
power_allocator_update_tz(struct thermal_zone_device * tz,enum thermal_notify_event reason)634 static void power_allocator_update_tz(struct thermal_zone_device *tz,
635 				      enum thermal_notify_event reason)
636 {
637 	struct power_allocator_params *params = tz->governor_data;
638 	struct thermal_instance *instance;
639 	int num_actors = 0;
640 
641 	switch (reason) {
642 	case THERMAL_TZ_BIND_CDEV:
643 	case THERMAL_TZ_UNBIND_CDEV:
644 		list_for_each_entry(instance, &tz->thermal_instances, tz_node)
645 			if (power_actor_is_valid(params, instance))
646 				num_actors++;
647 
648 		if (num_actors == params->num_actors)
649 			return;
650 
651 		allocate_actors_buffer(params, num_actors);
652 		break;
653 	case THERMAL_INSTANCE_WEIGHT_CHANGED:
654 		params->total_weight = 0;
655 		list_for_each_entry(instance, &tz->thermal_instances, tz_node)
656 			if (power_actor_is_valid(params, instance))
657 				params->total_weight += instance->weight;
658 		break;
659 	default:
660 		break;
661 	}
662 }
663 
664 /**
665  * power_allocator_bind() - bind the power_allocator governor to a thermal zone
666  * @tz:	thermal zone to bind it to
667  *
668  * Initialize the PID controller parameters and bind it to the thermal
669  * zone.
670  *
671  * Return: 0 on success, or -ENOMEM if we ran out of memory, or -EINVAL
672  * when there are unsupported cooling devices in the @tz.
673  */
power_allocator_bind(struct thermal_zone_device * tz)674 static int power_allocator_bind(struct thermal_zone_device *tz)
675 {
676 	struct power_allocator_params *params;
677 	int ret;
678 
679 	params = kzalloc(sizeof(*params), GFP_KERNEL);
680 	if (!params)
681 		return -ENOMEM;
682 
683 	get_governor_trips(tz, params);
684 
685 	ret = check_power_actors(tz, params);
686 	if (ret < 0) {
687 		dev_warn(&tz->device, "power_allocator: binding failed\n");
688 		kfree(params);
689 		return ret;
690 	}
691 
692 	ret = allocate_actors_buffer(params, ret);
693 	if (ret) {
694 		dev_warn(&tz->device, "power_allocator: allocation failed\n");
695 		kfree(params);
696 		return ret;
697 	}
698 
699 	if (!tz->tzp) {
700 		tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL);
701 		if (!tz->tzp) {
702 			ret = -ENOMEM;
703 			goto free_params;
704 		}
705 
706 		params->allocated_tzp = true;
707 	}
708 
709 	if (!tz->tzp->sustainable_power)
710 		dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
711 	else
712 		params->sustainable_power = tz->tzp->sustainable_power;
713 
714 	if (params->trip_max)
715 		estimate_pid_constants(tz, tz->tzp->sustainable_power,
716 				       params->trip_switch_on,
717 				       params->trip_max->temperature);
718 
719 	reset_pid_controller(params);
720 
721 	tz->governor_data = params;
722 
723 	return 0;
724 
725 free_params:
726 	kfree(params->power);
727 	kfree(params);
728 
729 	return ret;
730 }
731 
power_allocator_unbind(struct thermal_zone_device * tz)732 static void power_allocator_unbind(struct thermal_zone_device *tz)
733 {
734 	struct power_allocator_params *params = tz->governor_data;
735 
736 	dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
737 
738 	if (params->allocated_tzp) {
739 		kfree(tz->tzp);
740 		tz->tzp = NULL;
741 	}
742 
743 	kfree(params->power);
744 	kfree(tz->governor_data);
745 	tz->governor_data = NULL;
746 }
747 
power_allocator_manage(struct thermal_zone_device * tz)748 static void power_allocator_manage(struct thermal_zone_device *tz)
749 {
750 	struct power_allocator_params *params = tz->governor_data;
751 	const struct thermal_trip *trip = params->trip_switch_on;
752 
753 	lockdep_assert_held(&tz->lock);
754 
755 	if (trip && tz->temperature < trip->temperature) {
756 		reset_pid_controller(params);
757 		allow_maximum_power(tz);
758 		params->update_cdevs = false;
759 		return;
760 	}
761 
762 	if (!params->trip_max)
763 		return;
764 
765 	allocate_power(tz, params->trip_max->temperature);
766 	params->update_cdevs = true;
767 }
768 
769 static struct thermal_governor thermal_gov_power_allocator = {
770 	.name		= "power_allocator",
771 	.bind_to_tz	= power_allocator_bind,
772 	.unbind_from_tz	= power_allocator_unbind,
773 	.manage		= power_allocator_manage,
774 	.update_tz	= power_allocator_update_tz,
775 };
776 THERMAL_GOVERNOR_DECLARE(thermal_gov_power_allocator);
777