1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel Speed Select Interface: Common functions
4 * Copyright (c) 2019, Intel Corporation.
5 * All rights reserved.
6 *
7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8 */
9
10 #include <linux/cpufeature.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/fs.h>
13 #include <linux/hashtable.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <uapi/linux/isst_if.h>
21
22 #include <asm/cpu_device_id.h>
23 #include <asm/intel-family.h>
24 #include <asm/msr.h>
25
26 #include "isst_if_common.h"
27
28 #define MSR_THREAD_ID_INFO 0x53
29 #define MSR_PM_LOGICAL_ID 0x54
30 #define MSR_CPU_BUS_NUMBER 0x128
31
32 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
33
34 static int punit_msr_white_list[] = {
35 MSR_TURBO_RATIO_LIMIT,
36 MSR_CONFIG_TDP_CONTROL,
37 MSR_TURBO_RATIO_LIMIT1,
38 MSR_TURBO_RATIO_LIMIT2,
39 MSR_PM_LOGICAL_ID,
40 };
41
42 struct isst_valid_cmd_ranges {
43 u16 cmd;
44 u16 sub_cmd_beg;
45 u16 sub_cmd_end;
46 };
47
48 struct isst_cmd_set_req_type {
49 u16 cmd;
50 u16 sub_cmd;
51 u16 param;
52 };
53
54 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
55 {0xD0, 0x00, 0x03},
56 {0x7F, 0x00, 0x0C},
57 {0x7F, 0x10, 0x12},
58 {0x7F, 0x20, 0x23},
59 {0x94, 0x03, 0x03},
60 {0x95, 0x03, 0x03},
61 };
62
63 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
64 {0xD0, 0x00, 0x08},
65 {0xD0, 0x01, 0x08},
66 {0xD0, 0x02, 0x08},
67 {0xD0, 0x03, 0x08},
68 {0x7F, 0x02, 0x00},
69 {0x7F, 0x08, 0x00},
70 {0x95, 0x03, 0x03},
71 };
72
73 struct isst_cmd {
74 struct hlist_node hnode;
75 u64 data;
76 u32 cmd;
77 int cpu;
78 int mbox_cmd_type;
79 u32 param;
80 };
81
82 static bool isst_hpm_support;
83
84 static DECLARE_HASHTABLE(isst_hash, 8);
85 static DEFINE_MUTEX(isst_hash_lock);
86
isst_store_new_cmd(int cmd,u32 cpu,int mbox_cmd_type,u32 param,u64 data)87 static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
88 u64 data)
89 {
90 struct isst_cmd *sst_cmd;
91
92 sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
93 if (!sst_cmd)
94 return -ENOMEM;
95
96 sst_cmd->cpu = cpu;
97 sst_cmd->cmd = cmd;
98 sst_cmd->mbox_cmd_type = mbox_cmd_type;
99 sst_cmd->param = param;
100 sst_cmd->data = data;
101
102 hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
103
104 return 0;
105 }
106
isst_delete_hash(void)107 static void isst_delete_hash(void)
108 {
109 struct isst_cmd *sst_cmd;
110 struct hlist_node *tmp;
111 int i;
112
113 hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
114 hash_del(&sst_cmd->hnode);
115 kfree(sst_cmd);
116 }
117 }
118
119 /**
120 * isst_store_cmd() - Store command to a hash table
121 * @cmd: Mailbox command.
122 * @sub_cmd: Mailbox sub-command or MSR id.
123 * @cpu: Target CPU for the command
124 * @mbox_cmd_type: Mailbox or MSR command.
125 * @param: Mailbox parameter.
126 * @data: Mailbox request data or MSR data.
127 *
128 * Stores the command to a hash table if there is no such command already
129 * stored. If already stored update the latest parameter and data for the
130 * command.
131 *
132 * Return: Return result of store to hash table, 0 for success, others for
133 * failure.
134 */
isst_store_cmd(int cmd,int sub_cmd,u32 cpu,int mbox_cmd_type,u32 param,u64 data)135 int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
136 u32 param, u64 data)
137 {
138 struct isst_cmd *sst_cmd;
139 int full_cmd, ret;
140
141 full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
142 full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
143 mutex_lock(&isst_hash_lock);
144 hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
145 if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
146 sst_cmd->mbox_cmd_type == mbox_cmd_type) {
147 sst_cmd->param = param;
148 sst_cmd->data = data;
149 mutex_unlock(&isst_hash_lock);
150 return 0;
151 }
152 }
153
154 ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
155 mutex_unlock(&isst_hash_lock);
156
157 return ret;
158 }
159 EXPORT_SYMBOL_GPL(isst_store_cmd);
160
isst_mbox_resume_command(struct isst_if_cmd_cb * cb,struct isst_cmd * sst_cmd)161 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
162 struct isst_cmd *sst_cmd)
163 {
164 struct isst_if_mbox_cmd mbox_cmd;
165 int wr_only;
166
167 mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
168 mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
169 mbox_cmd.parameter = sst_cmd->param;
170 mbox_cmd.req_data = sst_cmd->data;
171 mbox_cmd.logical_cpu = sst_cmd->cpu;
172 (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
173 }
174
175 /**
176 * isst_resume_common() - Process Resume request
177 *
178 * On resume replay all mailbox commands and MSRs.
179 *
180 * Return: None.
181 */
isst_resume_common(void)182 void isst_resume_common(void)
183 {
184 struct isst_cmd *sst_cmd;
185 int i;
186
187 hash_for_each(isst_hash, i, sst_cmd, hnode) {
188 struct isst_if_cmd_cb *cb;
189
190 if (sst_cmd->mbox_cmd_type) {
191 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
192 if (cb->registered)
193 isst_mbox_resume_command(cb, sst_cmd);
194 } else {
195 wrmsrq_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
196 sst_cmd->data);
197 }
198 }
199 }
200 EXPORT_SYMBOL_GPL(isst_resume_common);
201
202 /**
203 * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
204 * @cmd: Pointer to the command structure to verify.
205 *
206 * Invalid command to PUNIT to may result in instability of the platform.
207 * This function has a whitelist of commands, which are allowed.
208 *
209 * Return: Return true if the command is invalid, else false.
210 */
isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd * cmd)211 bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
212 {
213 int i;
214
215 if (cmd->logical_cpu >= nr_cpu_ids)
216 return true;
217
218 for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
219 if (cmd->command == isst_valid_cmds[i].cmd &&
220 (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
221 cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
222 return false;
223 }
224 }
225
226 return true;
227 }
228 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
229
230 /**
231 * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
232 * @cmd: Pointer to the command structure to verify.
233 *
234 * Check if the given mail box level is set request and not a get request.
235 *
236 * Return: Return true if the command is set_req, else false.
237 */
isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd * cmd)238 bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
239 {
240 int i;
241
242 for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
243 if (cmd->command == isst_cmd_set_reqs[i].cmd &&
244 cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
245 cmd->parameter == isst_cmd_set_reqs[i].param) {
246 return true;
247 }
248 }
249
250 return false;
251 }
252 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
253
254 static int isst_if_api_version;
255
isst_if_get_platform_info(void __user * argp)256 static int isst_if_get_platform_info(void __user *argp)
257 {
258 struct isst_if_platform_info info;
259
260 info.api_version = isst_if_api_version;
261 info.driver_version = ISST_IF_DRIVER_VERSION;
262 info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
263 info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
264 info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
265
266 if (copy_to_user(argp, &info, sizeof(info)))
267 return -EFAULT;
268
269 return 0;
270 }
271
272 #define ISST_MAX_BUS_NUMBER 2
273
274 struct isst_if_cpu_info {
275 /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
276 int bus_info[ISST_MAX_BUS_NUMBER];
277 struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
278 int punit_cpu_id;
279 int numa_node;
280 };
281
282 struct isst_if_pkg_info {
283 struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
284 };
285
286 static struct isst_if_cpu_info *isst_cpu_info;
287 static struct isst_if_pkg_info *isst_pkg_info;
288
_isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)289 static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
290 {
291 struct pci_dev *matched_pci_dev = NULL;
292 struct pci_dev *pci_dev = NULL;
293 struct pci_dev *_pci_dev = NULL;
294 int no_matches = 0, pkg_id;
295 int bus_number;
296
297 if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
298 cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
299 return NULL;
300
301 pkg_id = topology_logical_package_id(cpu);
302 if (pkg_id >= topology_max_packages())
303 return NULL;
304
305 bus_number = isst_cpu_info[cpu].bus_info[bus_no];
306 if (bus_number < 0)
307 return NULL;
308
309 for_each_pci_dev(_pci_dev) {
310 int node;
311
312 if (_pci_dev->bus->number != bus_number ||
313 _pci_dev->devfn != PCI_DEVFN(dev, fn))
314 continue;
315
316 ++no_matches;
317 if (!matched_pci_dev)
318 matched_pci_dev = _pci_dev;
319
320 node = dev_to_node(&_pci_dev->dev);
321 if (node == NUMA_NO_NODE) {
322 pr_info_once("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
323 cpu, bus_no, dev, fn);
324 continue;
325 }
326
327 if (node == isst_cpu_info[cpu].numa_node) {
328 isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
329
330 pci_dev = _pci_dev;
331 break;
332 }
333 }
334
335 /*
336 * If there is no numa matched pci_dev, then there can be following cases:
337 * 1. CONFIG_NUMA is not defined: In this case if there is only single device
338 * match, then we don't need numa information. Simply return last match.
339 * Othewise return NULL.
340 * 2. NUMA information is not exposed via _SEG method. In this case it is similar
341 * to case 1.
342 * 3. Numa information doesn't match with CPU numa node and more than one match
343 * return NULL.
344 */
345 if (!pci_dev && no_matches == 1)
346 pci_dev = matched_pci_dev;
347
348 /* Return pci_dev pointer for any matched CPU in the package */
349 if (!pci_dev)
350 pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
351
352 return pci_dev;
353 }
354
355 /**
356 * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
357 * @cpu: Logical CPU number.
358 * @bus_no: The bus number assigned by the hardware.
359 * @dev: The device number assigned by the hardware.
360 * @fn: The function number assigned by the hardware.
361 *
362 * Using cached bus information, find out the PCI device for a bus number,
363 * device and function.
364 *
365 * Return: Return pci_dev pointer or NULL.
366 */
isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)367 struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
368 {
369 struct pci_dev *pci_dev;
370
371 if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
372 cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
373 return NULL;
374
375 pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
376
377 if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
378 return pci_dev;
379
380 return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
381 }
382 EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
383
isst_if_cpu_online(unsigned int cpu)384 static int isst_if_cpu_online(unsigned int cpu)
385 {
386 u64 data;
387 int ret;
388
389 isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
390
391 ret = rdmsrq_safe(MSR_CPU_BUS_NUMBER, &data);
392 if (ret) {
393 /* This is not a fatal error on MSR mailbox only I/F */
394 isst_cpu_info[cpu].bus_info[0] = -1;
395 isst_cpu_info[cpu].bus_info[1] = -1;
396 } else {
397 isst_cpu_info[cpu].bus_info[0] = data & 0xff;
398 isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
399 isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
400 isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
401 }
402
403 if (isst_hpm_support) {
404
405 ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data);
406 if (!ret)
407 goto set_punit_id;
408 }
409
410 ret = rdmsrq_safe(MSR_THREAD_ID_INFO, &data);
411 if (ret) {
412 isst_cpu_info[cpu].punit_cpu_id = -1;
413 return ret;
414 }
415
416 set_punit_id:
417 isst_cpu_info[cpu].punit_cpu_id = data;
418
419 return 0;
420 }
421
422 static int isst_if_online_id;
423
isst_if_cpu_info_init(void)424 static int isst_if_cpu_info_init(void)
425 {
426 int ret;
427
428 isst_cpu_info = kcalloc(num_possible_cpus(),
429 sizeof(*isst_cpu_info),
430 GFP_KERNEL);
431 if (!isst_cpu_info)
432 return -ENOMEM;
433
434 isst_pkg_info = kcalloc(topology_max_packages(),
435 sizeof(*isst_pkg_info),
436 GFP_KERNEL);
437 if (!isst_pkg_info) {
438 kfree(isst_cpu_info);
439 return -ENOMEM;
440 }
441
442 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
443 "platform/x86/isst-if:online",
444 isst_if_cpu_online, NULL);
445 if (ret < 0) {
446 kfree(isst_pkg_info);
447 kfree(isst_cpu_info);
448 return ret;
449 }
450
451 isst_if_online_id = ret;
452
453 return 0;
454 }
455
isst_if_cpu_info_exit(void)456 static void isst_if_cpu_info_exit(void)
457 {
458 cpuhp_remove_state(isst_if_online_id);
459 kfree(isst_pkg_info);
460 kfree(isst_cpu_info);
461 };
462
isst_if_proc_phyid_req(u8 * cmd_ptr,int * write_only,int resume)463 static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
464 {
465 struct isst_if_cpu_map *cpu_map;
466
467 cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
468 if (cpu_map->logical_cpu >= nr_cpu_ids ||
469 cpu_map->logical_cpu >= num_possible_cpus())
470 return -EINVAL;
471
472 *write_only = 0;
473 cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
474
475 return 0;
476 }
477
match_punit_msr_white_list(int msr)478 static bool match_punit_msr_white_list(int msr)
479 {
480 int i;
481
482 for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
483 if (punit_msr_white_list[i] == msr)
484 return true;
485 }
486
487 return false;
488 }
489
isst_if_msr_cmd_req(u8 * cmd_ptr,int * write_only,int resume)490 static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
491 {
492 struct isst_if_msr_cmd *msr_cmd;
493 int ret;
494
495 msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
496
497 if (!match_punit_msr_white_list(msr_cmd->msr))
498 return -EINVAL;
499
500 if (msr_cmd->logical_cpu >= nr_cpu_ids)
501 return -EINVAL;
502
503 if (msr_cmd->read_write) {
504 if (!capable(CAP_SYS_ADMIN))
505 return -EPERM;
506
507 ret = wrmsrq_safe_on_cpu(msr_cmd->logical_cpu,
508 msr_cmd->msr,
509 msr_cmd->data);
510 *write_only = 1;
511 if (!ret && !resume)
512 ret = isst_store_cmd(0, msr_cmd->msr,
513 msr_cmd->logical_cpu,
514 0, 0, msr_cmd->data);
515 } else {
516 u64 data;
517
518 ret = rdmsrq_safe_on_cpu(msr_cmd->logical_cpu,
519 msr_cmd->msr, &data);
520 if (!ret) {
521 msr_cmd->data = data;
522 *write_only = 0;
523 }
524 }
525
526
527 return ret;
528 }
529
isst_if_exec_multi_cmd(void __user * argp,struct isst_if_cmd_cb * cb)530 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
531 {
532 unsigned char __user *ptr;
533 u32 cmd_count;
534 u8 *cmd_ptr;
535 long ret;
536 int i;
537
538 /* Each multi command has u32 command count as the first field */
539 if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
540 return -EFAULT;
541
542 if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
543 return -EINVAL;
544
545 cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
546 if (!cmd_ptr)
547 return -ENOMEM;
548
549 /* cb->offset points to start of the command after the command count */
550 ptr = argp + cb->offset;
551
552 for (i = 0; i < cmd_count; ++i) {
553 int wr_only;
554
555 if (signal_pending(current)) {
556 ret = -EINTR;
557 break;
558 }
559
560 if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
561 ret = -EFAULT;
562 break;
563 }
564
565 ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
566 if (ret)
567 break;
568
569 if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
570 ret = -EFAULT;
571 break;
572 }
573
574 ptr += cb->cmd_size;
575 }
576
577 kfree(cmd_ptr);
578
579 return i ? i : ret;
580 }
581
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)582 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
583 unsigned long arg)
584 {
585 void __user *argp = (void __user *)arg;
586 struct isst_if_cmd_cb cmd_cb;
587 struct isst_if_cmd_cb *cb;
588 long ret = -ENOTTY;
589 int i;
590
591 switch (cmd) {
592 case ISST_IF_GET_PLATFORM_INFO:
593 ret = isst_if_get_platform_info(argp);
594 break;
595 case ISST_IF_GET_PHY_ID:
596 cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
597 cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
598 cmd_cb.cmd_callback = isst_if_proc_phyid_req;
599 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
600 break;
601 case ISST_IF_IO_CMD:
602 cb = &punit_callbacks[ISST_IF_DEV_MMIO];
603 if (cb->registered)
604 ret = isst_if_exec_multi_cmd(argp, cb);
605 break;
606 case ISST_IF_MBOX_COMMAND:
607 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
608 if (cb->registered)
609 ret = isst_if_exec_multi_cmd(argp, cb);
610 break;
611 case ISST_IF_MSR_COMMAND:
612 cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
613 cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
614 cmd_cb.cmd_callback = isst_if_msr_cmd_req;
615 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
616 break;
617 default:
618 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
619 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
620 int ret;
621
622 if (cb->def_ioctl) {
623 ret = cb->def_ioctl(file, cmd, arg);
624 if (!ret)
625 return ret;
626 }
627 }
628 break;
629 }
630
631 return ret;
632 }
633
634 /* Lock to prevent module registration when already opened by user space */
635 static DEFINE_MUTEX(punit_misc_dev_open_lock);
636 static int misc_device_open;
637
isst_if_open(struct inode * inode,struct file * file)638 static int isst_if_open(struct inode *inode, struct file *file)
639 {
640 int i, ret = 0;
641
642 /* Fail open, if a module is going away */
643 mutex_lock(&punit_misc_dev_open_lock);
644 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
645 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
646
647 if (cb->registered && !try_module_get(cb->owner)) {
648 ret = -ENODEV;
649 break;
650 }
651 }
652 if (ret) {
653 int j;
654
655 for (j = 0; j < i; ++j) {
656 struct isst_if_cmd_cb *cb;
657
658 cb = &punit_callbacks[j];
659 if (cb->registered)
660 module_put(cb->owner);
661 }
662 } else {
663 misc_device_open++;
664 }
665 mutex_unlock(&punit_misc_dev_open_lock);
666
667 return ret;
668 }
669
isst_if_relase(struct inode * inode,struct file * f)670 static int isst_if_relase(struct inode *inode, struct file *f)
671 {
672 int i;
673
674 mutex_lock(&punit_misc_dev_open_lock);
675 misc_device_open--;
676 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
677 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
678
679 if (cb->registered)
680 module_put(cb->owner);
681 }
682 mutex_unlock(&punit_misc_dev_open_lock);
683
684 return 0;
685 }
686
687 static const struct file_operations isst_if_char_driver_ops = {
688 .open = isst_if_open,
689 .unlocked_ioctl = isst_if_def_ioctl,
690 .release = isst_if_relase,
691 };
692
693 static struct miscdevice isst_if_char_driver = {
694 .minor = MISC_DYNAMIC_MINOR,
695 .name = "isst_interface",
696 .fops = &isst_if_char_driver_ops,
697 };
698
isst_misc_reg(void)699 static int isst_misc_reg(void)
700 {
701 int ret;
702
703 ret = isst_if_cpu_info_init();
704 if (ret)
705 return ret;
706
707 ret = misc_register(&isst_if_char_driver);
708 if (ret)
709 isst_if_cpu_info_exit();
710
711 return ret;
712 }
713
isst_misc_unreg(void)714 static void isst_misc_unreg(void)
715 {
716 misc_deregister(&isst_if_char_driver);
717 isst_if_cpu_info_exit();
718 }
719
720 /**
721 * isst_if_cdev_register() - Register callback for IOCTL
722 * @device_type: The device type this callback handling.
723 * @cb: Callback structure.
724 *
725 * This function registers a callback to device type. On very first call
726 * it will register a misc device, which is used for user kernel interface.
727 * Other calls simply increment ref count. Registry will fail, if the user
728 * already opened misc device for operation. Also if the misc device
729 * creation failed, then it will not try again and all callers will get
730 * failure code.
731 *
732 * Return: Return the return value from the misc creation device or -EINVAL
733 * for unsupported device type.
734 */
isst_if_cdev_register(int device_type,struct isst_if_cmd_cb * cb)735 int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
736 {
737 if (device_type >= ISST_IF_DEV_MAX)
738 return -EINVAL;
739
740 if (device_type < ISST_IF_DEV_TPMI && isst_hpm_support)
741 return -ENODEV;
742
743 mutex_lock(&punit_misc_dev_open_lock);
744 /* Device is already open, we don't want to add new callbacks */
745 if (misc_device_open) {
746 mutex_unlock(&punit_misc_dev_open_lock);
747 return -EAGAIN;
748 }
749 if (!cb->api_version)
750 cb->api_version = ISST_IF_API_VERSION;
751 if (cb->api_version > isst_if_api_version)
752 isst_if_api_version = cb->api_version;
753 memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
754 punit_callbacks[device_type].registered = 1;
755 mutex_unlock(&punit_misc_dev_open_lock);
756
757 return 0;
758 }
759 EXPORT_SYMBOL_GPL(isst_if_cdev_register);
760
761 /**
762 * isst_if_cdev_unregister() - Unregister callback for IOCTL
763 * @device_type: The device type to unregister.
764 *
765 * This function unregisters the previously registered callback. If this
766 * is the last callback unregistering, then misc device is removed.
767 *
768 * Return: None.
769 */
isst_if_cdev_unregister(int device_type)770 void isst_if_cdev_unregister(int device_type)
771 {
772 mutex_lock(&punit_misc_dev_open_lock);
773 punit_callbacks[device_type].def_ioctl = NULL;
774 punit_callbacks[device_type].registered = 0;
775 if (device_type == ISST_IF_DEV_MBOX)
776 isst_delete_hash();
777 mutex_unlock(&punit_misc_dev_open_lock);
778 }
779 EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
780
781 #define SST_HPM_SUPPORTED 0x01
782 #define SST_MBOX_SUPPORTED 0x02
783
784 static const struct x86_cpu_id isst_cpu_ids[] = {
785 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, SST_HPM_SUPPORTED),
786 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, SST_HPM_SUPPORTED),
787 X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, SST_HPM_SUPPORTED),
788 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
789 X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, SST_HPM_SUPPORTED),
790 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED),
791 X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
792 X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
793 X86_MATCH_VFM(INTEL_PANTHERCOVE_X, SST_HPM_SUPPORTED),
794 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
795 X86_MATCH_VFM(INTEL_SKYLAKE_X, SST_MBOX_SUPPORTED),
796 {}
797 };
798 MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids);
799
isst_if_common_init(void)800 static int __init isst_if_common_init(void)
801 {
802 const struct x86_cpu_id *id;
803
804 id = x86_match_cpu(isst_cpu_ids);
805 if (!id)
806 return -ENODEV;
807
808 if (id->driver_data == SST_HPM_SUPPORTED) {
809 isst_hpm_support = true;
810 } else if (id->driver_data == SST_MBOX_SUPPORTED) {
811 u64 data;
812
813 /* Can fail only on some Skylake-X generations */
814 if (rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
815 rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data))
816 return -ENODEV;
817 }
818
819 return isst_misc_reg();
820 }
module_init(isst_if_common_init)821 module_init(isst_if_common_init)
822
823 static void __exit isst_if_common_exit(void)
824 {
825 isst_misc_unreg();
826 }
827 module_exit(isst_if_common_exit)
828
829 MODULE_DESCRIPTION("ISST common interface module");
830 MODULE_LICENSE("GPL v2");
831