1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * isst_tpmi.c: SST TPMI interface core
4 *
5 * Copyright (c) 2023, Intel Corporation.
6 * All Rights Reserved.
7 *
8 * This information will be useful to understand flows:
9 * In the current generation of platforms, TPMI is supported via OOB
10 * PCI device. This PCI device has one instance per CPU package.
11 * There is a unique TPMI ID for SST. Each TPMI ID also has multiple
12 * entries, representing per power domain information.
13 *
14 * There is one dev file for complete SST information and control same as the
15 * prior generation of hardware. User spaces don't need to know how the
16 * information is presented by the hardware. The TPMI core module implements
17 * the hardware mapping.
18 */
19
20 #define dev_fmt(fmt) "tpmi_sst: " fmt
21
22 #include <linux/auxiliary_bus.h>
23 #include <linux/delay.h>
24 #include <linux/intel_tpmi.h>
25 #include <linux/intel_vsec.h>
26 #include <linux/fs.h>
27 #include <linux/io.h>
28 #include <linux/kernel.h>
29 #include <linux/minmax.h>
30 #include <linux/module.h>
31 #include <asm/msr.h>
32 #include <uapi/linux/isst_if.h>
33
34 #include "isst_tpmi_core.h"
35 #include "isst_if_common.h"
36
37 /* Supported SST hardware version by this driver */
38 #define ISST_MAJOR_VERSION 0
39 #define ISST_MINOR_VERSION 2
40
41 /*
42 * Used to indicate if value read from MMIO needs to get multiplied
43 * to get to a standard unit or not.
44 */
45 #define SST_MUL_FACTOR_NONE 1
46
47 /* Define 100 as a scaling factor frequency ratio to frequency conversion */
48 #define SST_MUL_FACTOR_FREQ 100
49
50 /* All SST regs are 64 bit size */
51 #define SST_REG_SIZE 8
52
53 /**
54 * struct sst_header - SST main header
55 * @interface_version: Version number for this interface
56 * @cap_mask: Bitmask of the supported sub features. 1=the sub feature is enabled.
57 * 0=disabled.
58 * Bit[8]= SST_CP enable (1), disable (0)
59 * bit[9]= SST_PP enable (1), disable (0)
60 * other bits are reserved for future use
61 * @cp_offset: Qword (8 bytes) offset to the SST_CP register bank
62 * @pp_offset: Qword (8 bytes) offset to the SST_PP register bank
63 * @reserved: Reserved for future use
64 *
65 * This register allows SW to discover SST capability and the offsets to SST-CP
66 * and SST-PP register banks.
67 */
68 struct sst_header {
69 u8 interface_version;
70 u8 cap_mask;
71 u8 cp_offset;
72 u8 pp_offset;
73 u32 reserved;
74 } __packed;
75
76 /**
77 * struct cp_header - SST-CP (core-power) header
78 * @feature_id: 0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
79 * @feature_rev: Interface Version number for this SST feature
80 * @ratio_unit: Frequency ratio unit. 00: 100MHz. All others are reserved
81 * @reserved: Reserved for future use
82 *
83 * This structure is used store SST-CP header. This is packed to the same
84 * format as defined in the specifications.
85 */
86 struct cp_header {
87 u64 feature_id :4;
88 u64 feature_rev :8;
89 u64 ratio_unit :2;
90 u64 reserved :50;
91 } __packed;
92
93 /**
94 * struct pp_header - SST-PP (Perf profile) header
95 * @feature_id: 0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
96 * @feature_rev: Interface Version number for this SST feature
97 * @level_en_mask: SST-PP level enable/disable fuse mask
98 * @allowed_level_mask: Allowed level mask used for dynamic config level switching
99 * @reserved0: Reserved for future use
100 * @ratio_unit: Frequency ratio unit. 00: 100MHz. All others are reserved
101 * @block_size: Size of PP block in Qword unit (8 bytes)
102 * @dynamic_switch: If set (1), dynamic switching of SST PP is supported
103 * @memory_ratio_unit: Memory Controller frequency ratio unit. 00: 100MHz, others reserved
104 * @reserved1: Reserved for future use
105 *
106 * This structure is used store SST-PP header. This is packed to the same
107 * format as defined in the specifications.
108 */
109 struct pp_header {
110 u64 feature_id :4;
111 u64 feature_rev :8;
112 u64 level_en_mask :8;
113 u64 allowed_level_mask :8;
114 u64 reserved0 :4;
115 u64 ratio_unit :2;
116 u64 block_size :8;
117 u64 dynamic_switch :1;
118 u64 memory_ratio_unit :2;
119 u64 reserved1 :19;
120 } __packed;
121
122 /**
123 * struct feature_offset - Offsets to SST-PP features
124 * @pp_offset: Qword offset within PP level for the SST_PP register bank
125 * @bf_offset: Qword offset within PP level for the SST_BF register bank
126 * @tf_offset: Qword offset within PP level for the SST_TF register bank
127 * @reserved: Reserved for future use
128 *
129 * This structure is used store offsets for SST features in the register bank.
130 * This is packed to the same format as defined in the specifications.
131 */
132 struct feature_offset {
133 u64 pp_offset :8;
134 u64 bf_offset :8;
135 u64 tf_offset :8;
136 u64 reserved :40;
137 } __packed;
138
139 /**
140 * struct levels_offset - Offsets to each SST PP level
141 * @sst_pp_level0_offset: Qword offset to the register block of PP level 0
142 * @sst_pp_level1_offset: Qword offset to the register block of PP level 1
143 * @sst_pp_level2_offset: Qword offset to the register block of PP level 2
144 * @sst_pp_level3_offset: Qword offset to the register block of PP level 3
145 * @sst_pp_level4_offset: Qword offset to the register block of PP level 4
146 * @reserved: Reserved for future use
147 *
148 * This structure is used store offsets of SST PP levels in the register bank.
149 * This is packed to the same format as defined in the specifications.
150 */
151 struct levels_offset {
152 u64 sst_pp_level0_offset :8;
153 u64 sst_pp_level1_offset :8;
154 u64 sst_pp_level2_offset :8;
155 u64 sst_pp_level3_offset :8;
156 u64 sst_pp_level4_offset :8;
157 u64 reserved :24;
158 } __packed;
159
160 /**
161 * struct pp_control_offset - Offsets for SST PP controls
162 * @perf_level: A SST-PP level that SW intends to switch to
163 * @perf_level_lock: SST-PP level select lock. 0 - unlocked. 1 - locked till next reset
164 * @resvd0: Reserved for future use
165 * @current_state: Bit mask to control the enable(1)/disable(0) state of each feature
166 * of the current PP level, bit 0 = BF, bit 1 = TF, bit 2-7 = reserved
167 * @reserved: Reserved for future use
168 *
169 * This structure is used store offsets of SST PP controls in the register bank.
170 * This is packed to the same format as defined in the specifications.
171 */
172 struct pp_control_offset {
173 u64 perf_level :3;
174 u64 perf_level_lock :1;
175 u64 resvd0 :4;
176 u64 current_state :8;
177 u64 reserved :48;
178 } __packed;
179
180 /**
181 * struct pp_status_offset - Offsets for SST PP status fields
182 * @sst_pp_level: Returns the current SST-PP level
183 * @sst_pp_lock: Returns the lock bit setting of perf_level_lock in pp_control_offset
184 * @error_type: Returns last error of SST-PP level change request. 0: no error,
185 * 1: level change not allowed, others: reserved
186 * @feature_state: Bit mask to indicate the enable(1)/disable(0) state of each feature of the
187 * current PP level. bit 0 = BF, bit 1 = TF, bit 2-7 reserved
188 * @reserved0: Reserved for future use
189 * @feature_error_type: Returns last error of the specific feature. Three error_type bits per
190 * feature. i.e. ERROR_TYPE[2:0] for BF, ERROR_TYPE[5:3] for TF, etc.
191 * 0x0: no error, 0x1: The specific feature is not supported by the hardware.
192 * 0x2-0x6: Reserved. 0x7: feature state change is not allowed.
193 * @reserved1: Reserved for future use
194 *
195 * This structure is used store offsets of SST PP status in the register bank.
196 * This is packed to the same format as defined in the specifications.
197 */
198 struct pp_status_offset {
199 u64 sst_pp_level :3;
200 u64 sst_pp_lock :1;
201 u64 error_type :4;
202 u64 feature_state :8;
203 u64 reserved0 :16;
204 u64 feature_error_type : 24;
205 u64 reserved1 :8;
206 } __packed;
207
208 /**
209 * struct perf_level - Used to store perf level and mmio offset
210 * @mmio_offset: mmio offset for a perf level
211 * @level: perf level for this offset
212 *
213 * This structure is used store final mmio offset of each perf level from the
214 * SST base mmio offset.
215 */
216 struct perf_level {
217 int mmio_offset;
218 int level;
219 };
220
221 /**
222 * struct tpmi_per_power_domain_info - Store per power_domain SST info
223 * @package_id: Package id for this power_domain
224 * @power_domain_id: Power domain id, Each entry from the SST-TPMI instance is a power_domain.
225 * @max_level: Max possible PP level possible for this power_domain
226 * @ratio_unit: Ratio unit for converting to MHz
227 * @avx_levels: Number of AVX levels
228 * @pp_block_size: Block size from PP header
229 * @sst_header: Store SST header for this power_domain
230 * @cp_header: Store SST-CP header for this power_domain
231 * @pp_header: Store SST-PP header for this power_domain
232 * @perf_levels: Pointer to each perf level to map level to mmio offset
233 * @feature_offsets: Store feature offsets for each PP-level
234 * @control_offset: Store the control offset for each PP-level
235 * @status_offset: Store the status offset for each PP-level
236 * @sst_base: Mapped SST base IO memory
237 * @auxdev: Auxiliary device instance enumerated this instance
238 * @saved_sst_cp_control: Save SST-CP control configuration to store restore for suspend/resume
239 * @saved_clos_configs: Save SST-CP CLOS configuration to store restore for suspend/resume
240 * @saved_clos_assocs: Save SST-CP CLOS association to store restore for suspend/resume
241 * @saved_pp_control: Save SST-PP control information to store restore for suspend/resume
242 * @write_blocked: Write operation is blocked, so can't change SST state
243 *
244 * This structure is used store complete SST information for a power_domain. This information
245 * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple
246 * power_domains. Each power domain describes its own SST information and has its own controls.
247 */
248 struct tpmi_per_power_domain_info {
249 int package_id;
250 int power_domain_id;
251 int max_level;
252 int ratio_unit;
253 int avx_levels;
254 int pp_block_size;
255 struct sst_header sst_header;
256 struct cp_header cp_header;
257 struct pp_header pp_header;
258 struct perf_level *perf_levels;
259 struct feature_offset feature_offsets;
260 struct pp_control_offset control_offset;
261 struct pp_status_offset status_offset;
262 void __iomem *sst_base;
263 struct auxiliary_device *auxdev;
264 u64 saved_sst_cp_control;
265 u64 saved_clos_configs[4];
266 u64 saved_clos_assocs[4];
267 u64 saved_pp_control;
268 bool write_blocked;
269 };
270
271 /* Supported maximum partitions */
272 #define SST_MAX_PARTITIONS 2
273
274 /**
275 * struct tpmi_sst_struct - Store sst info for a package
276 * @package_id: Package id for this aux device instance
277 * @number_of_power_domains: Number of power_domains pointed by power_domain_info pointer
278 * @power_domain_info: Pointer to power domains information
279 * @cdie_mask: Mask of compute dies present in a partition from hardware.
280 * This mask is not present in the version 1 information header.
281 * @io_dies: Number of IO dies in a partition. This will be 0 for TPMI
282 * version 1 information header.
283 * @partition_mask: Mask of all partitions.
284 * @partition_mask_current: Current partition mask as some may have been unbound.
285 *
286 * This structure is used store full SST information for a package.
287 * Each package has one or multiple OOB PCI devices. Each package can contain multiple
288 * power domains.
289 */
290 struct tpmi_sst_struct {
291 int package_id;
292 struct tpmi_per_power_domain_info *power_domain_info[SST_MAX_PARTITIONS];
293 u16 cdie_mask[SST_MAX_PARTITIONS];
294 u8 number_of_power_domains[SST_MAX_PARTITIONS];
295 u8 io_dies[SST_MAX_PARTITIONS];
296 u8 partition_mask;
297 u8 partition_mask_current;
298 };
299
300 /**
301 * struct tpmi_sst_common_struct - Store all SST instances
302 * @max_index: Maximum instances currently present
303 * @sst_inst: Pointer to per package instance
304 *
305 * Stores every SST Package instance.
306 */
307 struct tpmi_sst_common_struct {
308 int max_index;
309 struct tpmi_sst_struct **sst_inst;
310 };
311
312 /*
313 * Each IOCTL request is processed under this lock. Also used to protect
314 * registration functions and common data structures.
315 */
316 static DEFINE_MUTEX(isst_tpmi_dev_lock);
317
318 /* Usage count to track, number of TPMI SST instances registered to this core. */
319 static int isst_core_usage_count;
320
321 /* Stores complete SST information for every package and power_domain */
322 static struct tpmi_sst_common_struct isst_common;
323
324 #define SST_MAX_AVX_LEVELS 3
325
326 #define SST_PP_OFFSET_0 8
327 #define SST_PP_OFFSET_1 16
328 #define SST_PP_OFFSET_SIZE 8
329
sst_add_perf_profiles(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info,int levels)330 static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
331 struct tpmi_per_power_domain_info *pd_info,
332 int levels)
333 {
334 struct device *dev = &auxdev->dev;
335 u64 perf_level_offsets;
336 int i;
337
338 pd_info->perf_levels = devm_kcalloc(dev, levels, sizeof(struct perf_level), GFP_KERNEL);
339 if (!pd_info->perf_levels)
340 return 0;
341
342 pd_info->ratio_unit = pd_info->pp_header.ratio_unit;
343 pd_info->avx_levels = SST_MAX_AVX_LEVELS;
344 pd_info->pp_block_size = pd_info->pp_header.block_size;
345
346 /* Read PP Offset 0: Get feature offset with PP level */
347 *((u64 *)&pd_info->feature_offsets) = readq(pd_info->sst_base +
348 pd_info->sst_header.pp_offset +
349 SST_PP_OFFSET_0);
350
351 perf_level_offsets = readq(pd_info->sst_base + pd_info->sst_header.pp_offset +
352 SST_PP_OFFSET_1);
353
354 for (i = 0; i < levels; ++i) {
355 u64 offset;
356
357 offset = perf_level_offsets & (0xffULL << (i * SST_PP_OFFSET_SIZE));
358 offset >>= (i * 8);
359 offset &= 0xff;
360 offset *= 8; /* Convert to byte from QWORD offset */
361 pd_info->perf_levels[i].mmio_offset = pd_info->sst_header.pp_offset + offset;
362 }
363
364 return 0;
365 }
366
sst_main(struct auxiliary_device * auxdev,struct tpmi_per_power_domain_info * pd_info)367 static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info)
368 {
369 struct device *dev = &auxdev->dev;
370 int i, mask, levels;
371
372 *((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base);
373 pd_info->sst_header.cp_offset *= 8;
374 pd_info->sst_header.pp_offset *= 8;
375
376 if (pd_info->sst_header.interface_version == TPMI_VERSION_INVALID)
377 return -ENODEV;
378
379 if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) {
380 dev_err(dev, "SST: Unsupported major version:%lx\n",
381 TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version));
382 return -ENODEV;
383 }
384
385 if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) > ISST_MINOR_VERSION)
386 dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n",
387 TPMI_MINOR_VERSION(pd_info->sst_header.interface_version));
388
389 /* Read SST CP Header */
390 *((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset);
391
392 /* Read PP header */
393 *((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset);
394
395 mask = 0x01;
396 levels = 0;
397 for (i = 0; i < 8; ++i) {
398 if (pd_info->pp_header.level_en_mask & mask)
399 levels = i;
400 mask <<= 1;
401 }
402 pd_info->max_level = levels;
403 sst_add_perf_profiles(auxdev, pd_info, levels + 1);
404
405 return 0;
406 }
407
isst_instance_count(struct tpmi_sst_struct * sst_inst)408 static u8 isst_instance_count(struct tpmi_sst_struct *sst_inst)
409 {
410 u8 i, max_part, count = 0;
411
412 /* Partition mask starts from bit 0 and contains 1s only */
413 max_part = hweight8(sst_inst->partition_mask);
414 for (i = 0; i < max_part; i++)
415 count += sst_inst->number_of_power_domains[i];
416
417 return count;
418 }
419
420 /**
421 * map_cdies() - Map user domain ID to compute domain ID
422 * @sst_inst: TPMI Instance
423 * @id: User domain ID
424 * @partition: Resolved partition
425 *
426 * Helper function to map_partition_power_domain_id() to resolve compute
427 * domain ID and partition. Use hardware provided cdie_mask for a partition
428 * as is to resolve a compute domain ID.
429 *
430 * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
431 */
map_cdies(struct tpmi_sst_struct * sst_inst,u8 id,u8 * partition)432 static int map_cdies(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
433 {
434 u8 i, max_part;
435
436 max_part = hweight8(sst_inst->partition_mask);
437 for (i = 0; i < max_part; i++) {
438 if (!(sst_inst->cdie_mask[i] & BIT(id)))
439 continue;
440
441 *partition = i;
442 return id - ffs(sst_inst->cdie_mask[i]) + 1;
443 }
444
445 return -EINVAL;
446 }
447
448 /**
449 * map_partition_power_domain_id() - Map user domain ID to partition domain ID
450 * @sst_inst: TPMI Instance
451 * @id: User domain ID
452 * @partition: Resolved partition
453 *
454 * In a partitioned system a CPU package has two separate MMIO ranges (Under
455 * two PCI devices). But the CPU package compute die/power domain IDs are
456 * unique in a package. User space can get compute die/power domain ID from
457 * CPUID and MSR 0x54 for a CPU. So, those IDs need to be preserved even if
458 * they are present in two different partitions with its own order.
459 *
460 * For example for command ISST_IF_COUNT_TPMI_INSTANCES, the valid_mask
461 * is 111111b for a 4 compute and 2 IO dies system. This is presented as
462 * provided by the hardware in a non-partitioned system with the following
463 * order:
464 * I1-I0-C3-C2-C1-C0
465 * Here: "C": for compute and "I" for IO die.
466 * Compute dies are always present first in TPMI instances, as they have
467 * to map to the real power domain/die ID of a system. In a non-partitioned
468 * system there is no way to identify compute and IO die boundaries from
469 * this driver without reading each CPU's mapping.
470 *
471 * The same order needs to be preserved, even if those compute dies are
472 * distributed among multiple partitions. For example:
473 * Partition 1 can contain: I1-C1-C0
474 * Partition 2 can contain: I2-C3-C2
475 *
476 * This will require a conversion of user space IDs to the actual index into
477 * array of stored power domains for each partition. For the above example
478 * this function will return partition and index as follows:
479 *
480 * ============= ========= ===== ========
481 * User space ID Partition Index Die type
482 * ============= ========= ===== ========
483 * 0 0 0 Compute
484 * 1 0 1 Compute
485 * 2 1 0 Compute
486 * 3 1 1 Compute
487 * 4 0 2 IO
488 * 5 1 2 IO
489 * ============= ========= ===== ========
490 *
491 * Return: %-EINVAL on error, otherwise mapped domain ID >= 0.
492 */
map_partition_power_domain_id(struct tpmi_sst_struct * sst_inst,u8 id,u8 * partition)493 static int map_partition_power_domain_id(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition)
494 {
495 u8 i, io_start_id, max_part;
496
497 *partition = 0;
498
499 /* If any PCI device for partition is unbound, treat this as failure */
500 if (sst_inst->partition_mask != sst_inst->partition_mask_current)
501 return -EINVAL;
502
503 max_part = hweight8(sst_inst->partition_mask);
504
505 /* IO Index begin here */
506 io_start_id = fls(sst_inst->cdie_mask[max_part - 1]);
507
508 if (id < io_start_id)
509 return map_cdies(sst_inst, id, partition);
510
511 for (i = 0; i < max_part; i++) {
512 u8 io_id;
513
514 io_id = id - io_start_id;
515 if (io_id < sst_inst->io_dies[i]) {
516 u8 cdie_range;
517
518 cdie_range = fls(sst_inst->cdie_mask[i]) - ffs(sst_inst->cdie_mask[i]) + 1;
519 *partition = i;
520 return cdie_range + io_id;
521 }
522 io_start_id += sst_inst->io_dies[i];
523 }
524
525 return -EINVAL;
526 }
527
528 /*
529 * Map a package and power_domain id to SST information structure unique for a power_domain.
530 * The caller should call under isst_tpmi_dev_lock.
531 */
get_instance(int pkg_id,int power_domain_id)532 static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_domain_id)
533 {
534 struct tpmi_per_power_domain_info *power_domain_info;
535 struct tpmi_sst_struct *sst_inst;
536 u8 part;
537
538 if (!in_range(pkg_id, 0, topology_max_packages()) || pkg_id > isst_common.max_index)
539 return NULL;
540
541 sst_inst = isst_common.sst_inst[pkg_id];
542 if (!sst_inst)
543 return NULL;
544
545 power_domain_id = map_partition_power_domain_id(sst_inst, power_domain_id, &part);
546 if (power_domain_id < 0)
547 return NULL;
548
549 power_domain_info = &sst_inst->power_domain_info[part][power_domain_id];
550
551 if (power_domain_info && !power_domain_info->sst_base)
552 return NULL;
553
554 return power_domain_info;
555 }
556
disable_dynamic_sst_features(void)557 static bool disable_dynamic_sst_features(void)
558 {
559 u64 value;
560
561 if (!static_cpu_has(X86_FEATURE_HWP))
562 return true;
563
564 rdmsrq(MSR_PM_ENABLE, value);
565 return !(value & 0x1);
566 }
567
568 #define _read_cp_info(name_str, name, offset, start, width, mult_factor)\
569 {\
570 u64 val, mask;\
571 \
572 val = readq(power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
573 (offset));\
574 mask = GENMASK_ULL((start + width - 1), start);\
575 val &= mask; \
576 val >>= start;\
577 name = (val * mult_factor);\
578 }
579
580 #define _write_cp_info(name_str, name, offset, start, width, div_factor)\
581 {\
582 u64 val, mask;\
583 \
584 val = readq(power_domain_info->sst_base +\
585 power_domain_info->sst_header.cp_offset + (offset));\
586 mask = GENMASK_ULL((start + width - 1), start);\
587 val &= ~mask;\
588 val |= (name / div_factor) << start;\
589 writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
590 (offset));\
591 }
592
593 #define SST_CP_CONTROL_OFFSET 8
594 #define SST_CP_STATUS_OFFSET 16
595
596 #define SST_CP_ENABLE_START 0
597 #define SST_CP_ENABLE_WIDTH 1
598
599 #define SST_CP_PRIORITY_TYPE_START 1
600 #define SST_CP_PRIORITY_TYPE_WIDTH 1
601
isst_if_core_power_state(void __user * argp)602 static long isst_if_core_power_state(void __user *argp)
603 {
604 struct tpmi_per_power_domain_info *power_domain_info;
605 struct isst_core_power core_power;
606
607 if (copy_from_user(&core_power, argp, sizeof(core_power)))
608 return -EFAULT;
609
610 if (core_power.get_set && disable_dynamic_sst_features())
611 return -EFAULT;
612
613 power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
614 if (!power_domain_info)
615 return -EINVAL;
616
617 if (core_power.get_set) {
618 if (power_domain_info->write_blocked || !capable(CAP_SYS_ADMIN))
619 return -EPERM;
620
621 _write_cp_info("cp_enable", core_power.enable, SST_CP_CONTROL_OFFSET,
622 SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
623 _write_cp_info("cp_prio_type", core_power.priority_type, SST_CP_CONTROL_OFFSET,
624 SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
625 SST_MUL_FACTOR_NONE)
626 } else {
627 /* get */
628 _read_cp_info("cp_enable", core_power.enable, SST_CP_STATUS_OFFSET,
629 SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
630 _read_cp_info("cp_prio_type", core_power.priority_type, SST_CP_STATUS_OFFSET,
631 SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
632 SST_MUL_FACTOR_NONE)
633 core_power.supported = !!(power_domain_info->sst_header.cap_mask & BIT(0));
634 if (copy_to_user(argp, &core_power, sizeof(core_power)))
635 return -EFAULT;
636 }
637
638 return 0;
639 }
640
641 #define SST_CLOS_CONFIG_0_OFFSET 24
642
643 #define SST_CLOS_CONFIG_PRIO_START 4
644 #define SST_CLOS_CONFIG_PRIO_WIDTH 4
645
646 #define SST_CLOS_CONFIG_MIN_START 8
647 #define SST_CLOS_CONFIG_MIN_WIDTH 8
648
649 #define SST_CLOS_CONFIG_MAX_START 16
650 #define SST_CLOS_CONFIG_MAX_WIDTH 8
651
isst_if_clos_param(void __user * argp)652 static long isst_if_clos_param(void __user *argp)
653 {
654 struct tpmi_per_power_domain_info *power_domain_info;
655 struct isst_clos_param clos_param;
656
657 if (copy_from_user(&clos_param, argp, sizeof(clos_param)))
658 return -EFAULT;
659
660 power_domain_info = get_instance(clos_param.socket_id, clos_param.power_domain_id);
661 if (!power_domain_info)
662 return -EINVAL;
663
664 if (clos_param.get_set) {
665 if (power_domain_info->write_blocked || !capable(CAP_SYS_ADMIN))
666 return -EPERM;
667
668 _write_cp_info("clos.min_freq", clos_param.min_freq_mhz,
669 (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
670 SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
671 SST_MUL_FACTOR_FREQ);
672 _write_cp_info("clos.max_freq", clos_param.max_freq_mhz,
673 (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
674 SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
675 SST_MUL_FACTOR_FREQ);
676 _write_cp_info("clos.prio", clos_param.prop_prio,
677 (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
678 SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
679 SST_MUL_FACTOR_NONE);
680 } else {
681 /* get */
682 _read_cp_info("clos.min_freq", clos_param.min_freq_mhz,
683 (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
684 SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
685 SST_MUL_FACTOR_FREQ)
686 _read_cp_info("clos.max_freq", clos_param.max_freq_mhz,
687 (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
688 SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
689 SST_MUL_FACTOR_FREQ)
690 _read_cp_info("clos.prio", clos_param.prop_prio,
691 (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
692 SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
693 SST_MUL_FACTOR_NONE)
694
695 if (copy_to_user(argp, &clos_param, sizeof(clos_param)))
696 return -EFAULT;
697 }
698
699 return 0;
700 }
701
702 #define SST_CLOS_ASSOC_0_OFFSET 56
703 #define SST_CLOS_ASSOC_CPUS_PER_REG 16
704 #define SST_CLOS_ASSOC_BITS_PER_CPU 4
705
isst_if_clos_assoc(void __user * argp)706 static long isst_if_clos_assoc(void __user *argp)
707 {
708 struct isst_if_clos_assoc_cmds assoc_cmds;
709 unsigned char __user *ptr;
710 int i;
711
712 /* Each multi command has u16 command count as the first field */
713 if (copy_from_user(&assoc_cmds, argp, sizeof(assoc_cmds)))
714 return -EFAULT;
715
716 if (!assoc_cmds.cmd_count || assoc_cmds.cmd_count > ISST_IF_CMD_LIMIT)
717 return -EINVAL;
718
719 ptr = argp + offsetof(struct isst_if_clos_assoc_cmds, assoc_info);
720 for (i = 0; i < assoc_cmds.cmd_count; ++i) {
721 struct tpmi_per_power_domain_info *power_domain_info;
722 struct isst_if_clos_assoc clos_assoc;
723 int punit_id, punit_cpu_no, pkg_id;
724 struct tpmi_sst_struct *sst_inst;
725 int offset, shift, cpu;
726 u64 val, mask, clos;
727 u8 part;
728
729 if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc)))
730 return -EFAULT;
731
732 if (clos_assoc.socket_id > topology_max_packages())
733 return -EINVAL;
734
735 cpu = clos_assoc.logical_cpu;
736 clos = clos_assoc.clos;
737
738 if (assoc_cmds.punit_cpu_map)
739 punit_cpu_no = cpu;
740 else
741 return -EOPNOTSUPP;
742
743 if (punit_cpu_no < 0)
744 return -EINVAL;
745
746 punit_id = clos_assoc.power_domain_id;
747 pkg_id = clos_assoc.socket_id;
748
749 sst_inst = isst_common.sst_inst[pkg_id];
750
751 punit_id = map_partition_power_domain_id(sst_inst, punit_id, &part);
752 if (punit_id < 0)
753 return -EINVAL;
754
755 power_domain_info = &sst_inst->power_domain_info[part][punit_id];
756
757 if (assoc_cmds.get_set && (power_domain_info->write_blocked ||
758 !capable(CAP_SYS_ADMIN)))
759 return -EPERM;
760
761 offset = SST_CLOS_ASSOC_0_OFFSET +
762 (punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE;
763 shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG;
764 shift *= SST_CLOS_ASSOC_BITS_PER_CPU;
765
766 val = readq(power_domain_info->sst_base +
767 power_domain_info->sst_header.cp_offset + offset);
768 if (assoc_cmds.get_set) {
769 mask = GENMASK_ULL((shift + SST_CLOS_ASSOC_BITS_PER_CPU - 1), shift);
770 val &= ~mask;
771 val |= (clos << shift);
772 writeq(val, power_domain_info->sst_base +
773 power_domain_info->sst_header.cp_offset + offset);
774 } else {
775 val >>= shift;
776 clos_assoc.clos = val & GENMASK(SST_CLOS_ASSOC_BITS_PER_CPU - 1, 0);
777 if (copy_to_user(ptr, &clos_assoc, sizeof(clos_assoc)))
778 return -EFAULT;
779 }
780
781 ptr += sizeof(clos_assoc);
782 }
783
784 return 0;
785 }
786
787 #define _read_pp_info(name_str, name, offset, start, width, mult_factor)\
788 {\
789 u64 val, _mask;\
790 \
791 val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
792 (offset));\
793 _mask = GENMASK_ULL((start + width - 1), start);\
794 val &= _mask;\
795 val >>= start;\
796 name = (val * mult_factor);\
797 }
798
799 #define _write_pp_info(name_str, name, offset, start, width, div_factor)\
800 {\
801 u64 val, _mask;\
802 \
803 val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
804 (offset));\
805 _mask = GENMASK((start + width - 1), start);\
806 val &= ~_mask;\
807 val |= (name / div_factor) << start;\
808 writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
809 (offset));\
810 }
811
812 #define _read_bf_level_info(name_str, name, level, offset, start, width, mult_factor)\
813 {\
814 u64 val, _mask;\
815 \
816 val = readq(power_domain_info->sst_base +\
817 power_domain_info->perf_levels[level].mmio_offset +\
818 (power_domain_info->feature_offsets.bf_offset * 8) + (offset));\
819 _mask = GENMASK_ULL((start + width - 1), start);\
820 val &= _mask; \
821 val >>= start;\
822 name = (val * mult_factor);\
823 }
824
825 #define _read_tf_level_info(name_str, name, level, offset, start, width, mult_factor)\
826 {\
827 u64 val, _mask;\
828 \
829 val = readq(power_domain_info->sst_base +\
830 power_domain_info->perf_levels[level].mmio_offset +\
831 (power_domain_info->feature_offsets.tf_offset * 8) + (offset));\
832 _mask = GENMASK_ULL((start + width - 1), start);\
833 val &= _mask; \
834 val >>= start;\
835 name = (val * mult_factor);\
836 }
837
838 #define SST_PP_STATUS_OFFSET 32
839
840 #define SST_PP_LEVEL_START 0
841 #define SST_PP_LEVEL_WIDTH 3
842
843 #define SST_PP_LOCK_START 3
844 #define SST_PP_LOCK_WIDTH 1
845
846 #define SST_PP_FEATURE_STATE_START 8
847 #define SST_PP_FEATURE_STATE_WIDTH 8
848
849 #define SST_BF_FEATURE_SUPPORTED_START 12
850 #define SST_BF_FEATURE_SUPPORTED_WIDTH 1
851
852 #define SST_TF_FEATURE_SUPPORTED_START 12
853 #define SST_TF_FEATURE_SUPPORTED_WIDTH 1
854
isst_if_get_perf_level(void __user * argp)855 static int isst_if_get_perf_level(void __user *argp)
856 {
857 struct isst_perf_level_info perf_level;
858 struct tpmi_per_power_domain_info *power_domain_info;
859 unsigned long level_mask;
860 u8 level, support;
861
862 if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
863 return -EFAULT;
864
865 power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
866 if (!power_domain_info)
867 return -EINVAL;
868
869 perf_level.max_level = power_domain_info->max_level;
870 perf_level.level_mask = power_domain_info->pp_header.level_en_mask;
871 perf_level.feature_rev = power_domain_info->pp_header.feature_rev;
872 _read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET,
873 SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
874 _read_pp_info("locked", perf_level.locked, SST_PP_STATUS_OFFSET,
875 SST_PP_LOCK_START, SST_PP_LOCK_WIDTH, SST_MUL_FACTOR_NONE)
876 _read_pp_info("feature_state", perf_level.feature_state, SST_PP_STATUS_OFFSET,
877 SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE)
878 perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1));
879
880 level_mask = perf_level.level_mask;
881 perf_level.sst_bf_support = 0;
882 for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
883 /*
884 * Read BF support for a level. Read output is updated
885 * to "support" variable by the below macro.
886 */
887 _read_bf_level_info("bf_support", support, level, 0, SST_BF_FEATURE_SUPPORTED_START,
888 SST_BF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
889
890 /* If supported set the bit for the level */
891 if (support)
892 perf_level.sst_bf_support |= BIT(level);
893 }
894
895 perf_level.sst_tf_support = 0;
896 for_each_set_bit(level, &level_mask, BITS_PER_BYTE) {
897 /*
898 * Read TF support for a level. Read output is updated
899 * to "support" variable by the below macro.
900 */
901 _read_tf_level_info("tf_support", support, level, 0, SST_TF_FEATURE_SUPPORTED_START,
902 SST_TF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE);
903
904 /* If supported set the bit for the level */
905 if (support)
906 perf_level.sst_tf_support |= BIT(level);
907 }
908
909 if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
910 return -EFAULT;
911
912 return 0;
913 }
914
915 #define SST_PP_CONTROL_OFFSET 24
916 #define SST_PP_LEVEL_CHANGE_TIME_MS 5
917 #define SST_PP_LEVEL_CHANGE_RETRY_COUNT 3
918
isst_if_set_perf_level(void __user * argp)919 static int isst_if_set_perf_level(void __user *argp)
920 {
921 struct isst_perf_level_control perf_level;
922 struct tpmi_per_power_domain_info *power_domain_info;
923 int level, retry = 0;
924
925 if (disable_dynamic_sst_features())
926 return -EFAULT;
927
928 if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
929 return -EFAULT;
930
931 power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
932 if (!power_domain_info)
933 return -EINVAL;
934
935 if (power_domain_info->write_blocked || !capable(CAP_SYS_ADMIN))
936 return -EPERM;
937
938 if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level)))
939 return -EINVAL;
940
941 _read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
942 SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
943
944 /* If the requested new level is same as the current level, reject */
945 if (perf_level.level == level)
946 return -EINVAL;
947
948 _write_pp_info("perf_level", perf_level.level, SST_PP_CONTROL_OFFSET,
949 SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
950
951 /* It is possible that firmware is busy (although unlikely), so retry */
952 do {
953 /* Give time to FW to process */
954 msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
955
956 _read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
957 SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
958
959 /* Check if the new level is active */
960 if (perf_level.level == level)
961 break;
962
963 } while (retry++ < SST_PP_LEVEL_CHANGE_RETRY_COUNT);
964
965 /* If the level change didn't happen, return fault */
966 if (perf_level.level != level)
967 return -EFAULT;
968
969 /* Reset the feature state on level change */
970 _write_pp_info("perf_feature", 0, SST_PP_CONTROL_OFFSET,
971 SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
972 SST_MUL_FACTOR_NONE)
973
974 /* Give time to FW to process */
975 msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
976
977 return 0;
978 }
979
isst_if_set_perf_feature(void __user * argp)980 static int isst_if_set_perf_feature(void __user *argp)
981 {
982 struct isst_perf_feature_control perf_feature;
983 struct tpmi_per_power_domain_info *power_domain_info;
984
985 if (disable_dynamic_sst_features())
986 return -EFAULT;
987
988 if (copy_from_user(&perf_feature, argp, sizeof(perf_feature)))
989 return -EFAULT;
990
991 power_domain_info = get_instance(perf_feature.socket_id, perf_feature.power_domain_id);
992 if (!power_domain_info)
993 return -EINVAL;
994
995 if (power_domain_info->write_blocked || !capable(CAP_SYS_ADMIN))
996 return -EPERM;
997
998 _write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET,
999 SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
1000 SST_MUL_FACTOR_NONE)
1001
1002 return 0;
1003 }
1004
1005 #define _read_pp_level_info(name_str, name, level, offset, start, width, mult_factor)\
1006 {\
1007 u64 val, _mask;\
1008 \
1009 val = readq(power_domain_info->sst_base +\
1010 power_domain_info->perf_levels[level].mmio_offset +\
1011 (power_domain_info->feature_offsets.pp_offset * 8) + (offset));\
1012 _mask = GENMASK_ULL((start + width - 1), start);\
1013 val &= _mask; \
1014 val >>= start;\
1015 name = (val * mult_factor);\
1016 }
1017
1018 #define SST_PP_INFO_0_OFFSET 0
1019 #define SST_PP_INFO_1_OFFSET 8
1020 #define SST_PP_INFO_2_OFFSET 16
1021 #define SST_PP_INFO_3_OFFSET 24
1022
1023 /* SST_PP_INFO_4_OFFSET to SST_PP_INFO_9_OFFSET are trl levels */
1024 #define SST_PP_INFO_4_OFFSET 32
1025
1026 #define SST_PP_INFO_10_OFFSET 80
1027 #define SST_PP_INFO_11_OFFSET 88
1028 #define SST_PP_INFO_12_OFFSET 96
1029
1030 #define SST_PP_P1_SSE_START 0
1031 #define SST_PP_P1_SSE_WIDTH 8
1032
1033 #define SST_PP_P1_AVX2_START 8
1034 #define SST_PP_P1_AVX2_WIDTH 8
1035
1036 #define SST_PP_P1_AVX512_START 16
1037 #define SST_PP_P1_AVX512_WIDTH 8
1038
1039 #define SST_PP_P1_AMX_START 24
1040 #define SST_PP_P1_AMX_WIDTH 8
1041
1042 #define SST_PP_TDP_START 32
1043 #define SST_PP_TDP_WIDTH 15
1044
1045 #define SST_PP_T_PROCHOT_START 47
1046 #define SST_PP_T_PROCHOT_WIDTH 8
1047
1048 #define SST_PP_MAX_MEMORY_FREQ_START 55
1049 #define SST_PP_MAX_MEMORY_FREQ_WIDTH 7
1050
1051 #define SST_PP_COOLING_TYPE_START 62
1052 #define SST_PP_COOLING_TYPE_WIDTH 2
1053
1054 #define SST_PP_TRL_0_RATIO_0_START 0
1055 #define SST_PP_TRL_0_RATIO_0_WIDTH 8
1056
1057 #define SST_PP_TRL_CORES_BUCKET_0_START 0
1058 #define SST_PP_TRL_CORES_BUCKET_0_WIDTH 8
1059
1060 #define SST_PP_CORE_RATIO_P0_START 0
1061 #define SST_PP_CORE_RATIO_P0_WIDTH 8
1062
1063 #define SST_PP_CORE_RATIO_P1_START 8
1064 #define SST_PP_CORE_RATIO_P1_WIDTH 8
1065
1066 #define SST_PP_CORE_RATIO_PN_START 16
1067 #define SST_PP_CORE_RATIO_PN_WIDTH 8
1068
1069 #define SST_PP_CORE_RATIO_PM_START 24
1070 #define SST_PP_CORE_RATIO_PM_WIDTH 8
1071
1072 #define SST_PP_CORE_RATIO_P0_FABRIC_START 32
1073 #define SST_PP_CORE_RATIO_P0_FABRIC_WIDTH 8
1074
1075 #define SST_PP_CORE_RATIO_P1_FABRIC_START 40
1076 #define SST_PP_CORE_RATIO_P1_FABRIC_WIDTH 8
1077
1078 #define SST_PP_CORE_RATIO_PM_FABRIC_START 48
1079 #define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH 8
1080
1081 #define SST_PP_CORE_RATIO_P0_FABRIC_1_START 0
1082 #define SST_PP_CORE_RATIO_P0_FABRIC_1_WIDTH 8
1083
1084 #define SST_PP_CORE_RATIO_P1_FABRIC_1_START 8
1085 #define SST_PP_CORE_RATIO_P1_FABRIC_1_WIDTH 8
1086
1087 #define SST_PP_CORE_RATIO_PM_FABRIC_1_START 16
1088 #define SST_PP_CORE_RATIO_PM_FABRIC_1_WIDTH 8
1089
isst_if_get_perf_level_info(void __user * argp)1090 static int isst_if_get_perf_level_info(void __user *argp)
1091 {
1092 struct isst_perf_level_data_info perf_level;
1093 struct tpmi_per_power_domain_info *power_domain_info;
1094 int i, j;
1095
1096 if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
1097 return -EFAULT;
1098
1099 power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
1100 if (!power_domain_info)
1101 return -EINVAL;
1102
1103 if (perf_level.level > power_domain_info->max_level)
1104 return -EINVAL;
1105
1106 if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level.level)))
1107 return -EINVAL;
1108
1109 _read_pp_level_info("tdp_ratio", perf_level.tdp_ratio, perf_level.level,
1110 SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
1111 SST_MUL_FACTOR_NONE)
1112 _read_pp_level_info("base_freq_mhz", perf_level.base_freq_mhz, perf_level.level,
1113 SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
1114 SST_MUL_FACTOR_FREQ)
1115 _read_pp_level_info("base_freq_avx2_mhz", perf_level.base_freq_avx2_mhz, perf_level.level,
1116 SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX2_START, SST_PP_P1_AVX2_WIDTH,
1117 SST_MUL_FACTOR_FREQ)
1118 _read_pp_level_info("base_freq_avx512_mhz", perf_level.base_freq_avx512_mhz,
1119 perf_level.level, SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX512_START,
1120 SST_PP_P1_AVX512_WIDTH, SST_MUL_FACTOR_FREQ)
1121 _read_pp_level_info("base_freq_amx_mhz", perf_level.base_freq_amx_mhz, perf_level.level,
1122 SST_PP_INFO_0_OFFSET, SST_PP_P1_AMX_START, SST_PP_P1_AMX_WIDTH,
1123 SST_MUL_FACTOR_FREQ)
1124
1125 _read_pp_level_info("thermal_design_power_w", perf_level.thermal_design_power_w,
1126 perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_TDP_START,
1127 SST_PP_TDP_WIDTH, SST_MUL_FACTOR_NONE)
1128 perf_level.thermal_design_power_w /= 8; /* units are in 1/8th watt */
1129 _read_pp_level_info("tjunction_max_c", perf_level.tjunction_max_c, perf_level.level,
1130 SST_PP_INFO_1_OFFSET, SST_PP_T_PROCHOT_START, SST_PP_T_PROCHOT_WIDTH,
1131 SST_MUL_FACTOR_NONE)
1132 _read_pp_level_info("max_memory_freq_mhz", perf_level.max_memory_freq_mhz,
1133 perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_MAX_MEMORY_FREQ_START,
1134 SST_PP_MAX_MEMORY_FREQ_WIDTH, SST_MUL_FACTOR_FREQ)
1135 _read_pp_level_info("cooling_type", perf_level.cooling_type, perf_level.level,
1136 SST_PP_INFO_1_OFFSET, SST_PP_COOLING_TYPE_START,
1137 SST_PP_COOLING_TYPE_WIDTH, SST_MUL_FACTOR_NONE)
1138
1139 for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1140 for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1141 _read_pp_level_info("trl*_bucket*_freq_mhz",
1142 perf_level.trl_freq_mhz[i][j], perf_level.level,
1143 SST_PP_INFO_4_OFFSET + (i * SST_PP_TRL_0_RATIO_0_WIDTH),
1144 j * SST_PP_TRL_0_RATIO_0_WIDTH,
1145 SST_PP_TRL_0_RATIO_0_WIDTH,
1146 SST_MUL_FACTOR_FREQ);
1147 }
1148
1149 for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1150 _read_pp_level_info("bucket*_core_count", perf_level.bucket_core_counts[i],
1151 perf_level.level, SST_PP_INFO_10_OFFSET,
1152 SST_PP_TRL_CORES_BUCKET_0_WIDTH * i,
1153 SST_PP_TRL_CORES_BUCKET_0_WIDTH, SST_MUL_FACTOR_NONE)
1154
1155 perf_level.max_buckets = TRL_MAX_BUCKETS;
1156 perf_level.max_trl_levels = TRL_MAX_LEVELS;
1157
1158 _read_pp_level_info("p0_freq_mhz", perf_level.p0_freq_mhz, perf_level.level,
1159 SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P0_START,
1160 SST_PP_CORE_RATIO_P0_WIDTH, SST_MUL_FACTOR_FREQ)
1161 _read_pp_level_info("p1_freq_mhz", perf_level.p1_freq_mhz, perf_level.level,
1162 SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P1_START,
1163 SST_PP_CORE_RATIO_P1_WIDTH, SST_MUL_FACTOR_FREQ)
1164 _read_pp_level_info("pn_freq_mhz", perf_level.pn_freq_mhz, perf_level.level,
1165 SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PN_START,
1166 SST_PP_CORE_RATIO_PN_WIDTH, SST_MUL_FACTOR_FREQ)
1167 _read_pp_level_info("pm_freq_mhz", perf_level.pm_freq_mhz, perf_level.level,
1168 SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PM_START,
1169 SST_PP_CORE_RATIO_PM_WIDTH, SST_MUL_FACTOR_FREQ)
1170 _read_pp_level_info("p0_fabric_freq_mhz", perf_level.p0_fabric_freq_mhz,
1171 perf_level.level, SST_PP_INFO_11_OFFSET,
1172 SST_PP_CORE_RATIO_P0_FABRIC_START,
1173 SST_PP_CORE_RATIO_P0_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1174 _read_pp_level_info("p1_fabric_freq_mhz", perf_level.p1_fabric_freq_mhz,
1175 perf_level.level, SST_PP_INFO_11_OFFSET,
1176 SST_PP_CORE_RATIO_P1_FABRIC_START,
1177 SST_PP_CORE_RATIO_P1_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1178 _read_pp_level_info("pm_fabric_freq_mhz", perf_level.pm_fabric_freq_mhz,
1179 perf_level.level, SST_PP_INFO_11_OFFSET,
1180 SST_PP_CORE_RATIO_PM_FABRIC_START,
1181 SST_PP_CORE_RATIO_PM_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1182
1183 if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
1184 return -EFAULT;
1185
1186 return 0;
1187 }
1188
isst_if_get_perf_level_fabric_info(void __user * argp)1189 static int isst_if_get_perf_level_fabric_info(void __user *argp)
1190 {
1191 struct isst_perf_level_fabric_info perf_level_fabric;
1192 struct tpmi_per_power_domain_info *power_domain_info;
1193 int start = SST_PP_CORE_RATIO_P0_FABRIC_START;
1194 int width = SST_PP_CORE_RATIO_P0_FABRIC_WIDTH;
1195 int offset = SST_PP_INFO_11_OFFSET;
1196 int i;
1197
1198 if (copy_from_user(&perf_level_fabric, argp, sizeof(perf_level_fabric)))
1199 return -EFAULT;
1200
1201 power_domain_info = get_instance(perf_level_fabric.socket_id,
1202 perf_level_fabric.power_domain_id);
1203 if (!power_domain_info)
1204 return -EINVAL;
1205
1206 if (perf_level_fabric.level > power_domain_info->max_level)
1207 return -EINVAL;
1208
1209 if (power_domain_info->pp_header.feature_rev < 2)
1210 return -EINVAL;
1211
1212 if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level_fabric.level)))
1213 return -EINVAL;
1214
1215 /* For revision 2, maximum number of fabrics is 2 */
1216 perf_level_fabric.max_fabrics = 2;
1217
1218 for (i = 0; i < perf_level_fabric.max_fabrics; i++) {
1219 _read_pp_level_info("p0_fabric_freq_mhz", perf_level_fabric.p0_fabric_freq_mhz[i],
1220 perf_level_fabric.level, offset, start, width,
1221 SST_MUL_FACTOR_FREQ)
1222 start += width;
1223
1224 _read_pp_level_info("p1_fabric_freq_mhz", perf_level_fabric.p1_fabric_freq_mhz[i],
1225 perf_level_fabric.level, offset, start, width,
1226 SST_MUL_FACTOR_FREQ)
1227 start += width;
1228
1229 _read_pp_level_info("pm_fabric_freq_mhz", perf_level_fabric.pm_fabric_freq_mhz[i],
1230 perf_level_fabric.level, offset, start, width,
1231 SST_MUL_FACTOR_FREQ)
1232 offset = SST_PP_INFO_12_OFFSET;
1233 start = SST_PP_CORE_RATIO_P0_FABRIC_1_START;
1234 }
1235
1236 if (copy_to_user(argp, &perf_level_fabric, sizeof(perf_level_fabric)))
1237 return -EFAULT;
1238
1239 return 0;
1240 }
1241
1242 #define SST_PP_FUSED_CORE_COUNT_START 0
1243 #define SST_PP_FUSED_CORE_COUNT_WIDTH 8
1244
1245 #define SST_PP_RSLVD_CORE_COUNT_START 8
1246 #define SST_PP_RSLVD_CORE_COUNT_WIDTH 8
1247
1248 #define SST_PP_RSLVD_CORE_MASK_START 0
1249 #define SST_PP_RSLVD_CORE_MASK_WIDTH 64
1250
isst_if_get_perf_level_mask(void __user * argp)1251 static int isst_if_get_perf_level_mask(void __user *argp)
1252 {
1253 static struct isst_perf_level_cpu_mask cpumask;
1254 struct tpmi_per_power_domain_info *power_domain_info;
1255 u64 mask;
1256
1257 if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1258 return -EFAULT;
1259
1260 power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1261 if (!power_domain_info)
1262 return -EINVAL;
1263
1264 _read_pp_level_info("mask", mask, cpumask.level, SST_PP_INFO_2_OFFSET,
1265 SST_PP_RSLVD_CORE_MASK_START, SST_PP_RSLVD_CORE_MASK_WIDTH,
1266 SST_MUL_FACTOR_NONE)
1267
1268 cpumask.mask = mask;
1269
1270 if (!cpumask.punit_cpu_map)
1271 return -EOPNOTSUPP;
1272
1273 if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1274 return -EFAULT;
1275
1276 return 0;
1277 }
1278
1279 #define SST_BF_INFO_0_OFFSET 0
1280 #define SST_BF_INFO_1_OFFSET 8
1281
1282 #define SST_BF_P1_HIGH_START 13
1283 #define SST_BF_P1_HIGH_WIDTH 8
1284
1285 #define SST_BF_P1_LOW_START 21
1286 #define SST_BF_P1_LOW_WIDTH 8
1287
1288 #define SST_BF_T_PROHOT_START 38
1289 #define SST_BF_T_PROHOT_WIDTH 8
1290
1291 #define SST_BF_TDP_START 46
1292 #define SST_BF_TDP_WIDTH 15
1293
isst_if_get_base_freq_info(void __user * argp)1294 static int isst_if_get_base_freq_info(void __user *argp)
1295 {
1296 static struct isst_base_freq_info base_freq;
1297 struct tpmi_per_power_domain_info *power_domain_info;
1298
1299 if (copy_from_user(&base_freq, argp, sizeof(base_freq)))
1300 return -EFAULT;
1301
1302 power_domain_info = get_instance(base_freq.socket_id, base_freq.power_domain_id);
1303 if (!power_domain_info)
1304 return -EINVAL;
1305
1306 if (base_freq.level > power_domain_info->max_level)
1307 return -EINVAL;
1308
1309 _read_bf_level_info("p1_high", base_freq.high_base_freq_mhz, base_freq.level,
1310 SST_BF_INFO_0_OFFSET, SST_BF_P1_HIGH_START, SST_BF_P1_HIGH_WIDTH,
1311 SST_MUL_FACTOR_FREQ)
1312 _read_bf_level_info("p1_low", base_freq.low_base_freq_mhz, base_freq.level,
1313 SST_BF_INFO_0_OFFSET, SST_BF_P1_LOW_START, SST_BF_P1_LOW_WIDTH,
1314 SST_MUL_FACTOR_FREQ)
1315 _read_bf_level_info("BF-TJ", base_freq.tjunction_max_c, base_freq.level,
1316 SST_BF_INFO_0_OFFSET, SST_BF_T_PROHOT_START, SST_BF_T_PROHOT_WIDTH,
1317 SST_MUL_FACTOR_NONE)
1318 _read_bf_level_info("BF-tdp", base_freq.thermal_design_power_w, base_freq.level,
1319 SST_BF_INFO_0_OFFSET, SST_BF_TDP_START, SST_BF_TDP_WIDTH,
1320 SST_MUL_FACTOR_NONE)
1321 base_freq.thermal_design_power_w /= 8; /*unit = 1/8th watt*/
1322
1323 if (copy_to_user(argp, &base_freq, sizeof(base_freq)))
1324 return -EFAULT;
1325
1326 return 0;
1327 }
1328
1329 #define P1_HI_CORE_MASK_START 0
1330 #define P1_HI_CORE_MASK_WIDTH 64
1331
isst_if_get_base_freq_mask(void __user * argp)1332 static int isst_if_get_base_freq_mask(void __user *argp)
1333 {
1334 static struct isst_perf_level_cpu_mask cpumask;
1335 struct tpmi_per_power_domain_info *power_domain_info;
1336 u64 mask;
1337
1338 if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1339 return -EFAULT;
1340
1341 power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1342 if (!power_domain_info)
1343 return -EINVAL;
1344
1345 _read_bf_level_info("BF-cpumask", mask, cpumask.level, SST_BF_INFO_1_OFFSET,
1346 P1_HI_CORE_MASK_START, P1_HI_CORE_MASK_WIDTH,
1347 SST_MUL_FACTOR_NONE)
1348
1349 cpumask.mask = mask;
1350
1351 if (!cpumask.punit_cpu_map)
1352 return -EOPNOTSUPP;
1353
1354 if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1355 return -EFAULT;
1356
1357 return 0;
1358 }
1359
isst_if_get_tpmi_instance_count(void __user * argp)1360 static int isst_if_get_tpmi_instance_count(void __user *argp)
1361 {
1362 struct isst_tpmi_instance_count tpmi_inst;
1363 struct tpmi_sst_struct *sst_inst;
1364 int i;
1365
1366 if (copy_from_user(&tpmi_inst, argp, sizeof(tpmi_inst)))
1367 return -EFAULT;
1368
1369 if (tpmi_inst.socket_id >= topology_max_packages())
1370 return -EINVAL;
1371
1372 sst_inst = isst_common.sst_inst[tpmi_inst.socket_id];
1373
1374 tpmi_inst.count = isst_instance_count(sst_inst);
1375
1376 tpmi_inst.valid_mask = 0;
1377 for (i = 0; i < tpmi_inst.count; i++) {
1378 struct tpmi_per_power_domain_info *pd_info;
1379 u8 part;
1380 int pd;
1381
1382 pd = map_partition_power_domain_id(sst_inst, i, &part);
1383 if (pd < 0)
1384 continue;
1385
1386 pd_info = &sst_inst->power_domain_info[part][pd];
1387 if (pd_info->sst_base)
1388 tpmi_inst.valid_mask |= BIT(i);
1389 }
1390
1391 if (!tpmi_inst.valid_mask)
1392 tpmi_inst.count = 0;
1393
1394 if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst)))
1395 return -EFAULT;
1396
1397 return 0;
1398 }
1399
1400 #define SST_TF_INFO_0_OFFSET 0
1401 #define SST_TF_INFO_1_OFFSET 8
1402 #define SST_TF_INFO_2_OFFSET 16
1403 #define SST_TF_INFO_8_OFFSET 64
1404 #define SST_TF_INFO_8_BUCKETS 3
1405
1406 #define SST_TF_MAX_LP_CLIP_RATIOS TRL_MAX_LEVELS
1407
1408 #define SST_TF_FEATURE_REV_START 4
1409 #define SST_TF_FEATURE_REV_WIDTH 8
1410
1411 #define SST_TF_LP_CLIP_RATIO_0_START 16
1412 #define SST_TF_LP_CLIP_RATIO_0_WIDTH 8
1413
1414 #define SST_TF_RATIO_0_START 0
1415 #define SST_TF_RATIO_0_WIDTH 8
1416
1417 #define SST_TF_NUM_CORE_0_START 0
1418 #define SST_TF_NUM_CORE_0_WIDTH 8
1419
1420 #define SST_TF_NUM_MOD_0_START 0
1421 #define SST_TF_NUM_MOD_0_WIDTH 16
1422
isst_if_get_turbo_freq_info(void __user * argp)1423 static int isst_if_get_turbo_freq_info(void __user *argp)
1424 {
1425 static struct isst_turbo_freq_info turbo_freq;
1426 struct tpmi_per_power_domain_info *power_domain_info;
1427 u8 feature_rev;
1428 int i, j;
1429
1430 if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq)))
1431 return -EFAULT;
1432
1433 power_domain_info = get_instance(turbo_freq.socket_id, turbo_freq.power_domain_id);
1434 if (!power_domain_info)
1435 return -EINVAL;
1436
1437 if (turbo_freq.level > power_domain_info->max_level)
1438 return -EINVAL;
1439
1440 turbo_freq.max_buckets = TRL_MAX_BUCKETS;
1441 turbo_freq.max_trl_levels = TRL_MAX_LEVELS;
1442 turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS;
1443
1444 _read_tf_level_info("feature_rev", feature_rev, turbo_freq.level,
1445 SST_TF_INFO_0_OFFSET, SST_TF_FEATURE_REV_START,
1446 SST_TF_FEATURE_REV_WIDTH, SST_MUL_FACTOR_NONE);
1447
1448 for (i = 0; i < turbo_freq.max_clip_freqs; ++i)
1449 _read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i],
1450 turbo_freq.level, SST_TF_INFO_0_OFFSET,
1451 SST_TF_LP_CLIP_RATIO_0_START +
1452 (i * SST_TF_LP_CLIP_RATIO_0_WIDTH),
1453 SST_TF_LP_CLIP_RATIO_0_WIDTH, SST_MUL_FACTOR_FREQ)
1454
1455 for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1456 for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1457 _read_tf_level_info("cydn*_bucket_*_trl",
1458 turbo_freq.trl_freq_mhz[i][j], turbo_freq.level,
1459 SST_TF_INFO_2_OFFSET + (i * SST_TF_RATIO_0_WIDTH),
1460 j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH,
1461 SST_MUL_FACTOR_FREQ)
1462 }
1463
1464 if (feature_rev >= 2) {
1465 bool has_tf_info_8 = false;
1466
1467 for (i = 0; i < SST_TF_INFO_8_BUCKETS; ++i) {
1468 _read_tf_level_info("bucket_*_mod_count", turbo_freq.bucket_core_counts[i],
1469 turbo_freq.level, SST_TF_INFO_8_OFFSET,
1470 SST_TF_NUM_MOD_0_WIDTH * i, SST_TF_NUM_MOD_0_WIDTH,
1471 SST_MUL_FACTOR_NONE)
1472
1473 if (turbo_freq.bucket_core_counts[i])
1474 has_tf_info_8 = true;
1475 }
1476
1477 if (has_tf_info_8)
1478 goto done_core_count;
1479 }
1480
1481 for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1482 _read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i],
1483 turbo_freq.level, SST_TF_INFO_1_OFFSET,
1484 SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH,
1485 SST_MUL_FACTOR_NONE)
1486
1487
1488 done_core_count:
1489
1490 if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq)))
1491 return -EFAULT;
1492
1493 return 0;
1494 }
1495
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1496 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
1497 unsigned long arg)
1498 {
1499 void __user *argp = (void __user *)arg;
1500 long ret = -ENOTTY;
1501
1502 mutex_lock(&isst_tpmi_dev_lock);
1503 switch (cmd) {
1504 case ISST_IF_COUNT_TPMI_INSTANCES:
1505 ret = isst_if_get_tpmi_instance_count(argp);
1506 break;
1507 case ISST_IF_CORE_POWER_STATE:
1508 ret = isst_if_core_power_state(argp);
1509 break;
1510 case ISST_IF_CLOS_PARAM:
1511 ret = isst_if_clos_param(argp);
1512 break;
1513 case ISST_IF_CLOS_ASSOC:
1514 ret = isst_if_clos_assoc(argp);
1515 break;
1516 case ISST_IF_PERF_LEVELS:
1517 ret = isst_if_get_perf_level(argp);
1518 break;
1519 case ISST_IF_PERF_SET_LEVEL:
1520 ret = isst_if_set_perf_level(argp);
1521 break;
1522 case ISST_IF_PERF_SET_FEATURE:
1523 ret = isst_if_set_perf_feature(argp);
1524 break;
1525 case ISST_IF_GET_PERF_LEVEL_INFO:
1526 ret = isst_if_get_perf_level_info(argp);
1527 break;
1528 case ISST_IF_GET_PERF_LEVEL_FABRIC_INFO:
1529 ret = isst_if_get_perf_level_fabric_info(argp);
1530 break;
1531 case ISST_IF_GET_PERF_LEVEL_CPU_MASK:
1532 ret = isst_if_get_perf_level_mask(argp);
1533 break;
1534 case ISST_IF_GET_BASE_FREQ_INFO:
1535 ret = isst_if_get_base_freq_info(argp);
1536 break;
1537 case ISST_IF_GET_BASE_FREQ_CPU_MASK:
1538 ret = isst_if_get_base_freq_mask(argp);
1539 break;
1540 case ISST_IF_GET_TURBO_FREQ_INFO:
1541 ret = isst_if_get_turbo_freq_info(argp);
1542 break;
1543 default:
1544 break;
1545 }
1546 mutex_unlock(&isst_tpmi_dev_lock);
1547
1548 return ret;
1549 }
1550
1551 #define TPMI_SST_AUTO_SUSPEND_DELAY_MS 2000
1552
tpmi_sst_dev_add(struct auxiliary_device * auxdev)1553 int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
1554 {
1555 struct tpmi_per_power_domain_info *pd_info;
1556 bool read_blocked = 0, write_blocked = 0;
1557 struct oobmsm_plat_info *plat_info;
1558 struct device *dev = &auxdev->dev;
1559 struct tpmi_sst_struct *tpmi_sst;
1560 u8 i, num_resources, io_die_cnt;
1561 int ret, pkg = 0, inst = 0;
1562 bool first_enum = false;
1563 u16 cdie_mask;
1564 u8 partition;
1565
1566 ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked);
1567 if (ret)
1568 dev_info(dev, "Can't read feature status: ignoring read/write blocked status\n");
1569
1570 if (read_blocked) {
1571 dev_info(dev, "Firmware has blocked reads, exiting\n");
1572 return -ENODEV;
1573 }
1574
1575 plat_info = tpmi_get_platform_data(auxdev);
1576 if (!plat_info) {
1577 dev_err(dev, "No platform info\n");
1578 return -EINVAL;
1579 }
1580
1581 pkg = plat_info->package_id;
1582 if (pkg >= topology_max_packages()) {
1583 dev_err(dev, "Invalid package id :%x\n", pkg);
1584 return -EINVAL;
1585 }
1586
1587 partition = plat_info->partition;
1588 if (partition >= SST_MAX_PARTITIONS) {
1589 dev_err(&auxdev->dev, "Invalid partition :%x\n", partition);
1590 return -EINVAL;
1591 }
1592
1593 num_resources = tpmi_get_resource_count(auxdev);
1594
1595 if (!num_resources)
1596 return -EINVAL;
1597
1598 mutex_lock(&isst_tpmi_dev_lock);
1599
1600 if (isst_common.sst_inst[pkg]) {
1601 tpmi_sst = isst_common.sst_inst[pkg];
1602 } else {
1603 /*
1604 * tpmi_sst instance is for a package. So needs to be
1605 * allocated only once for both partitions. We can't use
1606 * devm_* allocation here as each partition is a
1607 * different device, which can be unbound.
1608 */
1609 tpmi_sst = kzalloc_obj(*tpmi_sst);
1610 if (!tpmi_sst) {
1611 ret = -ENOMEM;
1612 goto unlock_exit;
1613 }
1614 first_enum = true;
1615 }
1616
1617 ret = 0;
1618
1619 pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL);
1620 if (!pd_info) {
1621 ret = -ENOMEM;
1622 goto unlock_free;
1623 }
1624
1625 /* Get the IO die count, if cdie_mask is present */
1626 if (plat_info->cdie_mask) {
1627 u8 cdie_range;
1628
1629 cdie_mask = plat_info->cdie_mask;
1630 cdie_range = fls(cdie_mask) - ffs(cdie_mask) + 1;
1631 io_die_cnt = num_resources - cdie_range;
1632 } else {
1633 /*
1634 * This is a synthetic mask, careful when assuming that
1635 * they are compute dies only.
1636 */
1637 cdie_mask = (1 << num_resources) - 1;
1638 io_die_cnt = 0;
1639 }
1640
1641 for (i = 0; i < num_resources; ++i) {
1642 struct resource *res;
1643
1644 res = tpmi_get_resource_at_index(auxdev, i);
1645 if (!res) {
1646 pd_info[i].sst_base = NULL;
1647 continue;
1648 }
1649
1650 pd_info[i].package_id = pkg;
1651 pd_info[i].power_domain_id = i;
1652 pd_info[i].auxdev = auxdev;
1653 pd_info[i].write_blocked = write_blocked;
1654 pd_info[i].sst_base = devm_ioremap_resource(dev, res);
1655 if (IS_ERR(pd_info[i].sst_base)) {
1656 ret = PTR_ERR(pd_info[i].sst_base);
1657 goto unlock_free;
1658 }
1659
1660 if (sst_main(auxdev, &pd_info[i])) {
1661 /*
1662 * This entry is not valid, hardware can partially
1663 * populate dies. In this case MMIO will have 0xFFs.
1664 * Also possible some pre-production hardware has
1665 * invalid data. But don't fail and continue to use
1666 * other dies with valid data.
1667 */
1668 devm_iounmap(dev, pd_info[i].sst_base);
1669 pd_info[i].sst_base = NULL;
1670 continue;
1671 }
1672
1673 ++inst;
1674 }
1675
1676 if (!inst) {
1677 ret = -ENODEV;
1678 goto unlock_free;
1679 }
1680
1681 tpmi_sst->package_id = pkg;
1682
1683 tpmi_sst->power_domain_info[partition] = pd_info;
1684 tpmi_sst->number_of_power_domains[partition] = num_resources;
1685 tpmi_sst->cdie_mask[partition] = cdie_mask;
1686 tpmi_sst->io_dies[partition] = io_die_cnt;
1687 tpmi_sst->partition_mask |= BIT(partition);
1688 tpmi_sst->partition_mask_current |= BIT(partition);
1689
1690 auxiliary_set_drvdata(auxdev, tpmi_sst);
1691
1692 if (isst_common.max_index < pkg)
1693 isst_common.max_index = pkg;
1694 isst_common.sst_inst[pkg] = tpmi_sst;
1695
1696 unlock_free:
1697 if (ret && first_enum)
1698 kfree(tpmi_sst);
1699 unlock_exit:
1700 mutex_unlock(&isst_tpmi_dev_lock);
1701
1702 return ret;
1703 }
1704 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, "INTEL_TPMI_SST");
1705
tpmi_sst_dev_remove(struct auxiliary_device * auxdev)1706 void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
1707 {
1708 struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1709 struct oobmsm_plat_info *plat_info;
1710
1711 plat_info = tpmi_get_platform_data(auxdev);
1712 if (!plat_info)
1713 return;
1714
1715 mutex_lock(&isst_tpmi_dev_lock);
1716 tpmi_sst->power_domain_info[plat_info->partition] = NULL;
1717 tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition);
1718 /* Free the package instance when the all partitions are removed */
1719 if (!tpmi_sst->partition_mask_current) {
1720 isst_common.sst_inst[tpmi_sst->package_id] = NULL;
1721 kfree(tpmi_sst);
1722 }
1723 mutex_unlock(&isst_tpmi_dev_lock);
1724 }
1725 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, "INTEL_TPMI_SST");
1726
1727 #define SST_PP_CAP_CP_ENABLE BIT(0)
1728 #define SST_PP_CAP_PP_ENABLE BIT(1)
1729
tpmi_sst_dev_suspend(struct auxiliary_device * auxdev)1730 void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
1731 {
1732 struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1733 struct tpmi_per_power_domain_info *power_domain_info, *pd_info;
1734 struct oobmsm_plat_info *plat_info;
1735 void __iomem *cp_base;
1736 int num_resources, i;
1737
1738 plat_info = tpmi_get_platform_data(auxdev);
1739 if (!plat_info)
1740 return;
1741
1742 power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
1743 num_resources = tpmi_sst->number_of_power_domains[plat_info->partition];
1744
1745 for (i = 0; i < num_resources; i++) {
1746 pd_info = &power_domain_info[i];
1747 if (!pd_info || !pd_info->sst_base)
1748 continue;
1749
1750 if (!(pd_info->sst_header.cap_mask & SST_PP_CAP_CP_ENABLE))
1751 goto process_pp_suspend;
1752
1753 cp_base = pd_info->sst_base + pd_info->sst_header.cp_offset;
1754 pd_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET);
1755 memcpy_fromio(pd_info->saved_clos_configs, cp_base + SST_CLOS_CONFIG_0_OFFSET,
1756 sizeof(pd_info->saved_clos_configs));
1757 memcpy_fromio(pd_info->saved_clos_assocs, cp_base + SST_CLOS_ASSOC_0_OFFSET,
1758 sizeof(pd_info->saved_clos_assocs));
1759
1760 process_pp_suspend:
1761 if (!(pd_info->sst_header.cap_mask & SST_PP_CAP_PP_ENABLE))
1762 continue;
1763
1764 pd_info->saved_pp_control = readq(pd_info->sst_base +
1765 pd_info->sst_header.pp_offset +
1766 SST_PP_CONTROL_OFFSET);
1767 }
1768 }
1769 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, "INTEL_TPMI_SST");
1770
tpmi_sst_dev_resume(struct auxiliary_device * auxdev)1771 void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
1772 {
1773 struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1774 struct tpmi_per_power_domain_info *power_domain_info, *pd_info;
1775 struct oobmsm_plat_info *plat_info;
1776 void __iomem *cp_base;
1777 int num_resources, i;
1778
1779 plat_info = tpmi_get_platform_data(auxdev);
1780 if (!plat_info)
1781 return;
1782
1783 power_domain_info = tpmi_sst->power_domain_info[plat_info->partition];
1784 num_resources = tpmi_sst->number_of_power_domains[plat_info->partition];
1785
1786 for (i = 0; i < num_resources; i++) {
1787 pd_info = &power_domain_info[i];
1788 if (!pd_info || !pd_info->sst_base)
1789 continue;
1790
1791 if (!(pd_info->sst_header.cap_mask & SST_PP_CAP_CP_ENABLE))
1792 goto process_pp_resume;
1793
1794 cp_base = pd_info->sst_base + pd_info->sst_header.cp_offset;
1795 writeq(pd_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET);
1796 memcpy_toio(cp_base + SST_CLOS_CONFIG_0_OFFSET, pd_info->saved_clos_configs,
1797 sizeof(pd_info->saved_clos_configs));
1798 memcpy_toio(cp_base + SST_CLOS_ASSOC_0_OFFSET, pd_info->saved_clos_assocs,
1799 sizeof(pd_info->saved_clos_assocs));
1800
1801 process_pp_resume:
1802 if (!(pd_info->sst_header.cap_mask & SST_PP_CAP_PP_ENABLE))
1803 continue;
1804
1805 writeq(pd_info->saved_pp_control, power_domain_info->sst_base +
1806 pd_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET);
1807 }
1808 }
1809 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, "INTEL_TPMI_SST");
1810
1811 #define ISST_TPMI_API_VERSION 0x03
1812
tpmi_sst_init(void)1813 int tpmi_sst_init(void)
1814 {
1815 struct isst_if_cmd_cb cb;
1816 int ret = 0;
1817
1818 mutex_lock(&isst_tpmi_dev_lock);
1819
1820 if (isst_core_usage_count) {
1821 ++isst_core_usage_count;
1822 goto init_done;
1823 }
1824
1825 isst_common.sst_inst = kzalloc_objs(*isst_common.sst_inst,
1826 topology_max_packages());
1827 if (!isst_common.sst_inst) {
1828 ret = -ENOMEM;
1829 goto init_done;
1830 }
1831
1832 memset(&cb, 0, sizeof(cb));
1833 cb.cmd_size = sizeof(struct isst_if_io_reg);
1834 cb.offset = offsetof(struct isst_if_io_regs, io_reg);
1835 cb.cmd_callback = NULL;
1836 cb.api_version = ISST_TPMI_API_VERSION;
1837 cb.def_ioctl = isst_if_def_ioctl;
1838 cb.owner = THIS_MODULE;
1839 ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb);
1840 if (ret)
1841 kfree(isst_common.sst_inst);
1842 else
1843 ++isst_core_usage_count;
1844 init_done:
1845 mutex_unlock(&isst_tpmi_dev_lock);
1846 return ret;
1847 }
1848 EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, "INTEL_TPMI_SST");
1849
tpmi_sst_exit(void)1850 void tpmi_sst_exit(void)
1851 {
1852 mutex_lock(&isst_tpmi_dev_lock);
1853 if (isst_core_usage_count)
1854 --isst_core_usage_count;
1855
1856 if (!isst_core_usage_count) {
1857 isst_if_cdev_unregister(ISST_IF_DEV_TPMI);
1858 kfree(isst_common.sst_inst);
1859 }
1860 mutex_unlock(&isst_tpmi_dev_lock);
1861 }
1862 EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, "INTEL_TPMI_SST");
1863
1864 MODULE_IMPORT_NS("INTEL_TPMI");
1865 MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN");
1866
1867 MODULE_DESCRIPTION("ISST TPMI interface module");
1868 MODULE_LICENSE("GPL");
1869