xref: /linux/drivers/thermal/intel/intel_hfi.c (revision a36e9f5cfe9eb3a1dce8769c7058251c42705357)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Hardware Feedback Interface Driver
4  *
5  * Copyright (c) 2021, Intel Corporation.
6  *
7  * Authors: Aubrey Li <aubrey.li@linux.intel.com>
8  *          Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
9  *
10  *
11  * The Hardware Feedback Interface provides a performance and energy efficiency
12  * capability information for each CPU in the system. Depending on the processor
13  * model, hardware may periodically update these capabilities as a result of
14  * changes in the operating conditions (e.g., power limits or thermal
15  * constraints). On other processor models, there is a single HFI update
16  * at boot.
17  *
18  * This file provides functionality to process HFI updates and relay these
19  * updates to userspace.
20  */
21 
22 #define pr_fmt(fmt)  "intel-hfi: " fmt
23 
24 #include <linux/bitops.h>
25 #include <linux/cpufeature.h>
26 #include <linux/cpumask.h>
27 #include <linux/delay.h>
28 #include <linux/gfp.h>
29 #include <linux/io.h>
30 #include <linux/kernel.h>
31 #include <linux/math.h>
32 #include <linux/mutex.h>
33 #include <linux/percpu-defs.h>
34 #include <linux/printk.h>
35 #include <linux/processor.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/suspend.h>
39 #include <linux/string.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/topology.h>
42 #include <linux/workqueue.h>
43 
44 #include <asm/msr.h>
45 
46 #include "intel_hfi.h"
47 #include "thermal_interrupt.h"
48 
49 #include "../thermal_netlink.h"
50 
51 /* Hardware Feedback Interface MSR configuration bits */
52 #define HW_FEEDBACK_PTR_VALID_BIT		BIT(0)
53 #define HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT	BIT(0)
54 
55 /* CPUID detection and enumeration definitions for HFI */
56 
57 #define CPUID_HFI_LEAF 6
58 
59 union hfi_capabilities {
60 	struct {
61 		u8	performance:1;
62 		u8	energy_efficiency:1;
63 		u8	__reserved:6;
64 	} split;
65 	u8 bits;
66 };
67 
68 union cpuid6_edx {
69 	struct {
70 		union hfi_capabilities	capabilities;
71 		u32			table_pages:4;
72 		u32			__reserved:4;
73 		s32			index:16;
74 	} split;
75 	u32 full;
76 };
77 
78 /**
79  * struct hfi_cpu_data - HFI capabilities per CPU
80  * @perf_cap:		Performance capability
81  * @ee_cap:		Energy efficiency capability
82  *
83  * Capabilities of a logical processor in the HFI table. These capabilities are
84  * unitless.
85  */
86 struct hfi_cpu_data {
87 	u8	perf_cap;
88 	u8	ee_cap;
89 } __packed;
90 
91 /**
92  * struct hfi_hdr - Header of the HFI table
93  * @perf_updated:	Hardware updated performance capabilities
94  * @ee_updated:		Hardware updated energy efficiency capabilities
95  *
96  * Properties of the data in an HFI table.
97  */
98 struct hfi_hdr {
99 	u8	perf_updated;
100 	u8	ee_updated;
101 } __packed;
102 
103 /**
104  * struct hfi_instance - Representation of an HFI instance (i.e., a table)
105  * @local_table:	Base of the local copy of the HFI table
106  * @timestamp:		Timestamp of the last update of the local table.
107  *			Located at the base of the local table.
108  * @hdr:		Base address of the header of the local table
109  * @data:		Base address of the data of the local table
110  * @cpus:		CPUs represented in this HFI table instance
111  * @hw_table:		Pointer to the HFI table of this instance
112  * @update_work:	Delayed work to process HFI updates
113  * @table_lock:		Lock to protect acceses to the table of this instance
114  * @event_lock:		Lock to process HFI interrupts
115  *
116  * A set of parameters to parse and navigate a specific HFI table.
117  */
118 struct hfi_instance {
119 	union {
120 		void			*local_table;
121 		u64			*timestamp;
122 	};
123 	void			*hdr;
124 	void			*data;
125 	cpumask_var_t		cpus;
126 	void			*hw_table;
127 	struct delayed_work	update_work;
128 	raw_spinlock_t		table_lock;
129 	raw_spinlock_t		event_lock;
130 };
131 
132 /**
133  * struct hfi_features - Supported HFI features
134  * @nr_table_pages:	Size of the HFI table in 4KB pages
135  * @cpu_stride:		Stride size to locate the capability data of a logical
136  *			processor within the table (i.e., row stride)
137  * @hdr_size:		Size of the table header
138  *
139  * Parameters and supported features that are common to all HFI instances
140  */
141 struct hfi_features {
142 	size_t		nr_table_pages;
143 	unsigned int	cpu_stride;
144 	unsigned int	hdr_size;
145 };
146 
147 /**
148  * struct hfi_cpu_info - Per-CPU attributes to consume HFI data
149  * @index:		Row of this CPU in its HFI table
150  * @hfi_instance:	Attributes of the HFI table to which this CPU belongs
151  *
152  * Parameters to link a logical processor to an HFI table and a row within it.
153  */
154 struct hfi_cpu_info {
155 	s16			index;
156 	struct hfi_instance	*hfi_instance;
157 };
158 
159 static DEFINE_PER_CPU(struct hfi_cpu_info, hfi_cpu_info) = { .index = -1 };
160 
161 static int max_hfi_instances;
162 static int hfi_clients_nr;
163 static struct hfi_instance *hfi_instances;
164 
165 static struct hfi_features hfi_features;
166 static DEFINE_MUTEX(hfi_instance_lock);
167 
168 static struct workqueue_struct *hfi_updates_wq;
169 #define HFI_UPDATE_DELAY_MS		100
170 #define HFI_THERMNL_CAPS_PER_EVENT	64
171 
172 static void get_hfi_caps(struct hfi_instance *hfi_instance,
173 			 struct thermal_genl_cpu_caps *cpu_caps)
174 {
175 	int cpu, i = 0;
176 
177 	raw_spin_lock_irq(&hfi_instance->table_lock);
178 	for_each_cpu(cpu, hfi_instance->cpus) {
179 		struct hfi_cpu_data *caps;
180 		s16 index;
181 
182 		index = per_cpu(hfi_cpu_info, cpu).index;
183 		caps = hfi_instance->data + index * hfi_features.cpu_stride;
184 		cpu_caps[i].cpu = cpu;
185 
186 		/*
187 		 * Scale performance and energy efficiency to
188 		 * the [0, 1023] interval that thermal netlink uses.
189 		 */
190 		cpu_caps[i].performance = caps->perf_cap << 2;
191 		cpu_caps[i].efficiency = caps->ee_cap << 2;
192 
193 		++i;
194 	}
195 	raw_spin_unlock_irq(&hfi_instance->table_lock);
196 }
197 
198 /*
199  * Call update_capabilities() when there are changes in the HFI table.
200  */
201 static void update_capabilities(struct hfi_instance *hfi_instance)
202 {
203 	struct thermal_genl_cpu_caps *cpu_caps;
204 	int i = 0, cpu_count;
205 
206 	/* CPUs may come online/offline while processing an HFI update. */
207 	mutex_lock(&hfi_instance_lock);
208 
209 	cpu_count = cpumask_weight(hfi_instance->cpus);
210 
211 	/* No CPUs to report in this hfi_instance. */
212 	if (!cpu_count)
213 		goto out;
214 
215 	cpu_caps = kcalloc(cpu_count, sizeof(*cpu_caps), GFP_KERNEL);
216 	if (!cpu_caps)
217 		goto out;
218 
219 	get_hfi_caps(hfi_instance, cpu_caps);
220 
221 	if (cpu_count < HFI_THERMNL_CAPS_PER_EVENT)
222 		goto last_cmd;
223 
224 	/* Process complete chunks of HFI_THERMNL_CAPS_PER_EVENT capabilities. */
225 	for (i = 0;
226 	     (i + HFI_THERMNL_CAPS_PER_EVENT) <= cpu_count;
227 	     i += HFI_THERMNL_CAPS_PER_EVENT)
228 		thermal_genl_cpu_capability_event(HFI_THERMNL_CAPS_PER_EVENT,
229 						  &cpu_caps[i]);
230 
231 	cpu_count = cpu_count - i;
232 
233 last_cmd:
234 	/* Process the remaining capabilities if any. */
235 	if (cpu_count)
236 		thermal_genl_cpu_capability_event(cpu_count, &cpu_caps[i]);
237 
238 	kfree(cpu_caps);
239 out:
240 	mutex_unlock(&hfi_instance_lock);
241 }
242 
243 static void hfi_update_work_fn(struct work_struct *work)
244 {
245 	struct hfi_instance *hfi_instance;
246 
247 	hfi_instance = container_of(to_delayed_work(work), struct hfi_instance,
248 				    update_work);
249 
250 	update_capabilities(hfi_instance);
251 }
252 
253 void intel_hfi_process_event(__u64 pkg_therm_status_msr_val)
254 {
255 	struct hfi_instance *hfi_instance;
256 	int cpu = smp_processor_id();
257 	struct hfi_cpu_info *info;
258 	u64 new_timestamp, msr, hfi;
259 
260 	if (!pkg_therm_status_msr_val)
261 		return;
262 
263 	info = &per_cpu(hfi_cpu_info, cpu);
264 	if (!info)
265 		return;
266 
267 	/*
268 	 * A CPU is linked to its HFI instance before the thermal vector in the
269 	 * local APIC is unmasked. Hence, info->hfi_instance cannot be NULL
270 	 * when receiving an HFI event.
271 	 */
272 	hfi_instance = info->hfi_instance;
273 	if (unlikely(!hfi_instance)) {
274 		pr_debug("Received event on CPU %d but instance was null", cpu);
275 		return;
276 	}
277 
278 	/*
279 	 * On most systems, all CPUs in the package receive a package-level
280 	 * thermal interrupt when there is an HFI update. It is sufficient to
281 	 * let a single CPU to acknowledge the update and queue work to
282 	 * process it. The remaining CPUs can resume their work.
283 	 */
284 	if (!raw_spin_trylock(&hfi_instance->event_lock))
285 		return;
286 
287 	rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr);
288 	hfi = msr & PACKAGE_THERM_STATUS_HFI_UPDATED;
289 	if (!hfi) {
290 		raw_spin_unlock(&hfi_instance->event_lock);
291 		return;
292 	}
293 
294 	/*
295 	 * Ack duplicate update. Since there is an active HFI
296 	 * status from HW, it must be a new event, not a case
297 	 * where a lagging CPU entered the locked region.
298 	 */
299 	new_timestamp = *(u64 *)hfi_instance->hw_table;
300 	if (*hfi_instance->timestamp == new_timestamp) {
301 		thermal_clear_package_intr_status(PACKAGE_LEVEL, PACKAGE_THERM_STATUS_HFI_UPDATED);
302 		raw_spin_unlock(&hfi_instance->event_lock);
303 		return;
304 	}
305 
306 	raw_spin_lock(&hfi_instance->table_lock);
307 
308 	/*
309 	 * Copy the updated table into our local copy. This includes the new
310 	 * timestamp.
311 	 */
312 	memcpy(hfi_instance->local_table, hfi_instance->hw_table,
313 	       hfi_features.nr_table_pages << PAGE_SHIFT);
314 
315 	/*
316 	 * Let hardware know that we are done reading the HFI table and it is
317 	 * free to update it again.
318 	 */
319 	thermal_clear_package_intr_status(PACKAGE_LEVEL, PACKAGE_THERM_STATUS_HFI_UPDATED);
320 
321 	raw_spin_unlock(&hfi_instance->table_lock);
322 	raw_spin_unlock(&hfi_instance->event_lock);
323 
324 	queue_delayed_work(hfi_updates_wq, &hfi_instance->update_work,
325 			   msecs_to_jiffies(HFI_UPDATE_DELAY_MS));
326 }
327 
328 static void init_hfi_cpu_index(struct hfi_cpu_info *info)
329 {
330 	union cpuid6_edx edx;
331 
332 	/* Do not re-read @cpu's index if it has already been initialized. */
333 	if (info->index > -1)
334 		return;
335 
336 	edx.full = cpuid_edx(CPUID_HFI_LEAF);
337 	info->index = edx.split.index;
338 }
339 
340 /*
341  * The format of the HFI table depends on the number of capabilities that the
342  * hardware supports. Keep a data structure to navigate the table.
343  */
344 static void init_hfi_instance(struct hfi_instance *hfi_instance)
345 {
346 	/* The HFI header is below the time-stamp. */
347 	hfi_instance->hdr = hfi_instance->local_table +
348 			    sizeof(*hfi_instance->timestamp);
349 
350 	/* The HFI data starts below the header. */
351 	hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size;
352 }
353 
354 /* Caller must hold hfi_instance_lock. */
355 static void hfi_enable(void)
356 {
357 	u64 msr_val;
358 
359 	rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
360 	msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
361 	wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
362 }
363 
364 static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
365 {
366 	phys_addr_t hw_table_pa;
367 	u64 msr_val;
368 
369 	hw_table_pa = virt_to_phys(hfi_instance->hw_table);
370 	msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
371 	wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
372 }
373 
374 /* Caller must hold hfi_instance_lock. */
375 static void hfi_disable(void)
376 {
377 	u64 msr_val;
378 	int i;
379 
380 	rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
381 	msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
382 	wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
383 
384 	/*
385 	 * Wait for hardware to acknowledge the disabling of HFI. Some
386 	 * processors may not do it. Wait for ~2ms. This is a reasonable
387 	 * time for hardware to complete any pending actions on the HFI
388 	 * memory.
389 	 */
390 	for (i = 0; i < 2000; i++) {
391 		rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
392 		if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED)
393 			break;
394 
395 		udelay(1);
396 		cpu_relax();
397 	}
398 }
399 
400 /**
401  * intel_hfi_online() - Enable HFI on @cpu
402  * @cpu:	CPU in which the HFI will be enabled
403  *
404  * Enable the HFI to be used in @cpu. The HFI is enabled at the package
405  * level. The first CPU in the package to come online does the full HFI
406  * initialization. Subsequent CPUs will just link themselves to the HFI
407  * instance of their package.
408  *
409  * This function is called before enabling the thermal vector in the local APIC
410  * in order to ensure that @cpu has an associated HFI instance when it receives
411  * an HFI event.
412  */
413 void intel_hfi_online(unsigned int cpu)
414 {
415 	struct hfi_instance *hfi_instance;
416 	struct hfi_cpu_info *info;
417 	u16 pkg_id;
418 
419 	/* Nothing to do if hfi_instances are missing. */
420 	if (!hfi_instances)
421 		return;
422 
423 	/*
424 	 * Link @cpu to the HFI instance of its package. It does not
425 	 * matter whether the instance has been initialized.
426 	 */
427 	info = &per_cpu(hfi_cpu_info, cpu);
428 	pkg_id = topology_logical_package_id(cpu);
429 	hfi_instance = info->hfi_instance;
430 	if (!hfi_instance) {
431 		if (pkg_id >= max_hfi_instances)
432 			return;
433 
434 		hfi_instance = &hfi_instances[pkg_id];
435 		info->hfi_instance = hfi_instance;
436 	}
437 
438 	init_hfi_cpu_index(info);
439 
440 	/*
441 	 * Now check if the HFI instance of the package of @cpu has been
442 	 * initialized (by checking its header). In such case, all we have to
443 	 * do is to add @cpu to this instance's cpumask and enable the instance
444 	 * if needed.
445 	 */
446 	mutex_lock(&hfi_instance_lock);
447 	if (hfi_instance->hdr)
448 		goto enable;
449 
450 	/*
451 	 * Hardware is programmed with the physical address of the first page
452 	 * frame of the table. Hence, the allocated memory must be page-aligned.
453 	 *
454 	 * Some processors do not forget the initial address of the HFI table
455 	 * even after having been reprogrammed. Keep using the same pages. Do
456 	 * not free them.
457 	 */
458 	hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages,
459 						   GFP_KERNEL | __GFP_ZERO);
460 	if (!hfi_instance->hw_table)
461 		goto unlock;
462 
463 	/*
464 	 * Allocate memory to keep a local copy of the table that
465 	 * hardware generates.
466 	 */
467 	hfi_instance->local_table = kzalloc(hfi_features.nr_table_pages << PAGE_SHIFT,
468 					    GFP_KERNEL);
469 	if (!hfi_instance->local_table)
470 		goto free_hw_table;
471 
472 	init_hfi_instance(hfi_instance);
473 
474 	INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn);
475 	raw_spin_lock_init(&hfi_instance->table_lock);
476 	raw_spin_lock_init(&hfi_instance->event_lock);
477 
478 enable:
479 	cpumask_set_cpu(cpu, hfi_instance->cpus);
480 
481 	/*
482 	 * Enable this HFI instance if this is its first online CPU and
483 	 * there are user-space clients of thermal events.
484 	 */
485 	if (cpumask_weight(hfi_instance->cpus) == 1 && hfi_clients_nr > 0) {
486 		hfi_set_hw_table(hfi_instance);
487 		hfi_enable();
488 	}
489 
490 unlock:
491 	mutex_unlock(&hfi_instance_lock);
492 	return;
493 
494 free_hw_table:
495 	free_pages_exact(hfi_instance->hw_table, hfi_features.nr_table_pages);
496 	goto unlock;
497 }
498 
499 /**
500  * intel_hfi_offline() - Disable HFI on @cpu
501  * @cpu:	CPU in which the HFI will be disabled
502  *
503  * Remove @cpu from those covered by its HFI instance.
504  *
505  * On some processors, hardware remembers previous programming settings even
506  * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the
507  * package of @cpu are offline. See note in intel_hfi_online().
508  */
509 void intel_hfi_offline(unsigned int cpu)
510 {
511 	struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, cpu);
512 	struct hfi_instance *hfi_instance;
513 
514 	/*
515 	 * Check if @cpu as an associated, initialized (i.e., with a non-NULL
516 	 * header). Also, HFI instances are only initialized if X86_FEATURE_HFI
517 	 * is present.
518 	 */
519 	hfi_instance = info->hfi_instance;
520 	if (!hfi_instance)
521 		return;
522 
523 	if (!hfi_instance->hdr)
524 		return;
525 
526 	mutex_lock(&hfi_instance_lock);
527 	cpumask_clear_cpu(cpu, hfi_instance->cpus);
528 
529 	if (!cpumask_weight(hfi_instance->cpus))
530 		hfi_disable();
531 
532 	mutex_unlock(&hfi_instance_lock);
533 }
534 
535 static __init int hfi_parse_features(void)
536 {
537 	unsigned int nr_capabilities;
538 	union cpuid6_edx edx;
539 
540 	if (!boot_cpu_has(X86_FEATURE_HFI))
541 		return -ENODEV;
542 
543 	/*
544 	 * If we are here we know that CPUID_HFI_LEAF exists. Parse the
545 	 * supported capabilities and the size of the HFI table.
546 	 */
547 	edx.full = cpuid_edx(CPUID_HFI_LEAF);
548 
549 	if (!edx.split.capabilities.split.performance) {
550 		pr_debug("Performance reporting not supported! Not using HFI\n");
551 		return -ENODEV;
552 	}
553 
554 	/*
555 	 * The number of supported capabilities determines the number of
556 	 * columns in the HFI table. Exclude the reserved bits.
557 	 */
558 	edx.split.capabilities.split.__reserved = 0;
559 	nr_capabilities = hweight8(edx.split.capabilities.bits);
560 
561 	/* The number of 4KB pages required by the table */
562 	hfi_features.nr_table_pages = edx.split.table_pages + 1;
563 
564 	/*
565 	 * The header contains change indications for each supported feature.
566 	 * The size of the table header is rounded up to be a multiple of 8
567 	 * bytes.
568 	 */
569 	hfi_features.hdr_size = DIV_ROUND_UP(nr_capabilities, 8) * 8;
570 
571 	/*
572 	 * Data of each logical processor is also rounded up to be a multiple
573 	 * of 8 bytes.
574 	 */
575 	hfi_features.cpu_stride = DIV_ROUND_UP(nr_capabilities, 8) * 8;
576 
577 	return 0;
578 }
579 
580 /*
581  * If concurrency is not prevented by other means, the HFI enable/disable
582  * routines must be called under hfi_instance_lock."
583  */
584 static void hfi_enable_instance(void *ptr)
585 {
586 	hfi_set_hw_table(ptr);
587 	hfi_enable();
588 }
589 
590 static void hfi_disable_instance(void *ptr)
591 {
592 	hfi_disable();
593 }
594 
595 static void hfi_syscore_resume(void)
596 {
597 	/* This code runs only on the boot CPU. */
598 	struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
599 	struct hfi_instance *hfi_instance = info->hfi_instance;
600 
601 	/* No locking needed. There is no concurrency with CPU online. */
602 	if (hfi_clients_nr > 0)
603 		hfi_enable_instance(hfi_instance);
604 }
605 
606 static int hfi_syscore_suspend(void)
607 {
608 	/* No locking needed. There is no concurrency with CPU offline. */
609 	hfi_disable();
610 
611 	return 0;
612 }
613 
614 static struct syscore_ops hfi_pm_ops = {
615 	.resume = hfi_syscore_resume,
616 	.suspend = hfi_syscore_suspend,
617 };
618 
619 static int hfi_thermal_notify(struct notifier_block *nb, unsigned long state,
620 			      void *_notify)
621 {
622 	struct thermal_genl_notify *notify = _notify;
623 	struct hfi_instance *hfi_instance;
624 	smp_call_func_t func = NULL;
625 	unsigned int cpu;
626 	int i;
627 
628 	if (notify->mcgrp != THERMAL_GENL_EVENT_GROUP)
629 		return NOTIFY_DONE;
630 
631 	if (state != THERMAL_NOTIFY_BIND && state != THERMAL_NOTIFY_UNBIND)
632 		return NOTIFY_DONE;
633 
634 	mutex_lock(&hfi_instance_lock);
635 
636 	switch (state) {
637 	case THERMAL_NOTIFY_BIND:
638 		if (++hfi_clients_nr == 1)
639 			func = hfi_enable_instance;
640 		break;
641 	case THERMAL_NOTIFY_UNBIND:
642 		if (--hfi_clients_nr == 0)
643 			func = hfi_disable_instance;
644 		break;
645 	}
646 
647 	if (!func)
648 		goto out;
649 
650 	for (i = 0; i < max_hfi_instances; i++) {
651 		hfi_instance = &hfi_instances[i];
652 		if (cpumask_empty(hfi_instance->cpus))
653 			continue;
654 
655 		cpu = cpumask_any(hfi_instance->cpus);
656 		smp_call_function_single(cpu, func, hfi_instance, true);
657 	}
658 
659 out:
660 	mutex_unlock(&hfi_instance_lock);
661 
662 	return NOTIFY_OK;
663 }
664 
665 static struct notifier_block hfi_thermal_nb = {
666 	.notifier_call = hfi_thermal_notify,
667 };
668 
669 void __init intel_hfi_init(void)
670 {
671 	struct hfi_instance *hfi_instance;
672 	int i, j;
673 
674 	if (hfi_parse_features())
675 		return;
676 
677 	/*
678 	 * Note: HFI resources are managed at the physical package scope.
679 	 * There could be platforms that enumerate packages as Linux dies.
680 	 * Special handling would be needed if this happens on an HFI-capable
681 	 * platform.
682 	 */
683 	max_hfi_instances = topology_max_packages();
684 
685 	/*
686 	 * This allocation may fail. CPU hotplug callbacks must check
687 	 * for a null pointer.
688 	 */
689 	hfi_instances = kcalloc(max_hfi_instances, sizeof(*hfi_instances),
690 				GFP_KERNEL);
691 	if (!hfi_instances)
692 		return;
693 
694 	for (i = 0; i < max_hfi_instances; i++) {
695 		hfi_instance = &hfi_instances[i];
696 		if (!zalloc_cpumask_var(&hfi_instance->cpus, GFP_KERNEL))
697 			goto err_nomem;
698 	}
699 
700 	hfi_updates_wq = create_singlethread_workqueue("hfi-updates");
701 	if (!hfi_updates_wq)
702 		goto err_nomem;
703 
704 	/*
705 	 * Both thermal core and Intel HFI can not be build as modules.
706 	 * As kernel build-in drivers they are initialized before user-space
707 	 * starts, hence we can not miss BIND/UNBIND events when applications
708 	 * add/remove thermal multicast group to/from a netlink socket.
709 	 */
710 	if (thermal_genl_register_notifier(&hfi_thermal_nb))
711 		goto err_nl_notif;
712 
713 	register_syscore_ops(&hfi_pm_ops);
714 
715 	return;
716 
717 err_nl_notif:
718 	destroy_workqueue(hfi_updates_wq);
719 
720 err_nomem:
721 	for (j = 0; j < i; ++j) {
722 		hfi_instance = &hfi_instances[j];
723 		free_cpumask_var(hfi_instance->cpus);
724 	}
725 
726 	kfree(hfi_instances);
727 	hfi_instances = NULL;
728 }
729