1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel On Demand (Software Defined Silicon) driver
4 *
5 * Copyright (c) 2022, Intel Corporation.
6 * All Rights Reserved.
7 *
8 * Author: "David E. Box" <david.e.box@linux.intel.com>
9 */
10
11 #include <linux/auxiliary_bus.h>
12 #include <linux/bits.h>
13 #include <linux/bitfield.h>
14 #include <linux/device.h>
15 #include <linux/intel_vsec.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/overflow.h>
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/sysfs.h>
23 #include <linux/types.h>
24 #include <linux/uaccess.h>
25
26 #define ACCESS_TYPE_BARID 2
27 #define ACCESS_TYPE_LOCAL 3
28
29 #define SDSI_MIN_SIZE_DWORDS 276
30 #define SDSI_SIZE_MAILBOX 1024
31 #define SDSI_SIZE_REGS 80
32 #define SDSI_SIZE_CMD sizeof(u64)
33
34 /*
35 * Write messages are currently up to the size of the mailbox
36 * while read messages are up to 4 times the size of the
37 * mailbox, sent in packets
38 */
39 #define SDSI_SIZE_WRITE_MSG SDSI_SIZE_MAILBOX
40 #define SDSI_SIZE_READ_MSG (SDSI_SIZE_MAILBOX * 4)
41
42 #define SDSI_ENABLED_FEATURES_OFFSET 16
43 #define SDSI_FEATURE_SDSI BIT(3)
44 #define SDSI_FEATURE_METERING BIT(26)
45
46 #define SDSI_SOCKET_ID_OFFSET 64
47 #define SDSI_SOCKET_ID GENMASK(3, 0)
48
49 #define SDSI_MBOX_CMD_SUCCESS 0x40
50 #define SDSI_MBOX_CMD_TIMEOUT 0x80
51
52 #define MBOX_TIMEOUT_US 500000
53 #define MBOX_TIMEOUT_ACQUIRE_US 1000
54 #define MBOX_POLLING_PERIOD_US 100
55 #define MBOX_ACQUIRE_NUM_RETRIES 5
56 #define MBOX_ACQUIRE_RETRY_DELAY_MS 500
57 #define MBOX_MAX_PACKETS 4
58
59 #define MBOX_OWNER_NONE 0x00
60 #define MBOX_OWNER_INBAND 0x01
61
62 #define CTRL_RUN_BUSY BIT(0)
63 #define CTRL_READ_WRITE BIT(1)
64 #define CTRL_SOM BIT(2)
65 #define CTRL_EOM BIT(3)
66 #define CTRL_OWNER GENMASK(5, 4)
67 #define CTRL_COMPLETE BIT(6)
68 #define CTRL_READY BIT(7)
69 #define CTRL_INBAND_LOCK BIT(32)
70 #define CTRL_METER_ENABLE_DRAM BIT(33)
71 #define CTRL_STATUS GENMASK(15, 8)
72 #define CTRL_PACKET_SIZE GENMASK(31, 16)
73 #define CTRL_MSG_SIZE GENMASK(63, 48)
74
75 #define DISC_TABLE_SIZE 12
76 #define DT_ACCESS_TYPE GENMASK(3, 0)
77 #define DT_SIZE GENMASK(27, 12)
78 #define DT_TBIR GENMASK(2, 0)
79 #define DT_OFFSET(v) ((v) & GENMASK(31, 3))
80
81 #define SDSI_GUID_V1 0x006DD191
82 #define GUID_V1_CNTRL_SIZE 8
83 #define GUID_V1_REGS_SIZE 72
84 #define SDSI_GUID_V2 0xF210D9EF
85 #define GUID_V2_CNTRL_SIZE 16
86 #define GUID_V2_REGS_SIZE 80
87
88 enum sdsi_command {
89 SDSI_CMD_PROVISION_AKC = 0x0004,
90 SDSI_CMD_PROVISION_CAP = 0x0008,
91 SDSI_CMD_READ_STATE = 0x0010,
92 SDSI_CMD_READ_METER = 0x0014,
93 };
94
95 struct sdsi_mbox_info {
96 u64 *payload;
97 void *buffer;
98 u64 control_flags;
99 int size;
100 };
101
102 struct disc_table {
103 u32 access_info;
104 u32 guid;
105 u32 offset;
106 };
107
108 struct sdsi_priv {
109 struct mutex mb_lock; /* Mailbox access lock */
110 struct device *dev;
111 void __iomem *control_addr;
112 void __iomem *mbox_addr;
113 void __iomem *regs_addr;
114 int control_size;
115 int maibox_size;
116 int registers_size;
117 u32 guid;
118 u32 features;
119 };
120
121 /* SDSi mailbox operations must be performed using 64bit mov instructions */
122 static __always_inline void
sdsi_memcpy64_toio(u64 __iomem * to,const u64 * from,size_t count_bytes)123 sdsi_memcpy64_toio(u64 __iomem *to, const u64 *from, size_t count_bytes)
124 {
125 size_t count = count_bytes / sizeof(*to);
126 int i;
127
128 for (i = 0; i < count; i++)
129 writeq(from[i], &to[i]);
130 }
131
132 static __always_inline void
sdsi_memcpy64_fromio(u64 * to,const u64 __iomem * from,size_t count_bytes)133 sdsi_memcpy64_fromio(u64 *to, const u64 __iomem *from, size_t count_bytes)
134 {
135 size_t count = count_bytes / sizeof(*to);
136 int i;
137
138 for (i = 0; i < count; i++)
139 to[i] = readq(&from[i]);
140 }
141
sdsi_complete_transaction(struct sdsi_priv * priv)142 static inline void sdsi_complete_transaction(struct sdsi_priv *priv)
143 {
144 u64 control = FIELD_PREP(CTRL_COMPLETE, 1);
145
146 lockdep_assert_held(&priv->mb_lock);
147 writeq(control, priv->control_addr);
148 }
149
sdsi_status_to_errno(u32 status)150 static int sdsi_status_to_errno(u32 status)
151 {
152 switch (status) {
153 case SDSI_MBOX_CMD_SUCCESS:
154 return 0;
155 case SDSI_MBOX_CMD_TIMEOUT:
156 return -ETIMEDOUT;
157 default:
158 return -EIO;
159 }
160 }
161
sdsi_mbox_poll(struct sdsi_priv * priv,struct sdsi_mbox_info * info,size_t * data_size)162 static int sdsi_mbox_poll(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
163 size_t *data_size)
164 {
165 struct device *dev = priv->dev;
166 u32 total, loop, eom, status, message_size;
167 u64 control;
168 int ret;
169
170 lockdep_assert_held(&priv->mb_lock);
171
172 /* For reads, data sizes that are larger than the mailbox size are read in packets. */
173 total = 0;
174 loop = 0;
175 do {
176 u32 packet_size;
177
178 /* Poll on ready bit */
179 ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
180 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
181 if (ret)
182 break;
183
184 eom = FIELD_GET(CTRL_EOM, control);
185 status = FIELD_GET(CTRL_STATUS, control);
186 packet_size = FIELD_GET(CTRL_PACKET_SIZE, control);
187 message_size = FIELD_GET(CTRL_MSG_SIZE, control);
188
189 ret = sdsi_status_to_errno(status);
190 if (ret)
191 break;
192
193 if (!packet_size) {
194 sdsi_complete_transaction(priv);
195 break;
196 }
197
198 /* Only the last packet can be less than the mailbox size. */
199 if (!eom && packet_size != SDSI_SIZE_MAILBOX) {
200 dev_err(dev, "Invalid packet size\n");
201 ret = -EPROTO;
202 break;
203 }
204
205 if (packet_size > SDSI_SIZE_MAILBOX) {
206 dev_err(dev, "Packet size too large\n");
207 ret = -EPROTO;
208 break;
209 }
210
211 if (info->buffer) {
212 void *buf = info->buffer + array_size(SDSI_SIZE_MAILBOX, loop);
213
214 sdsi_memcpy64_fromio(buf, priv->mbox_addr,
215 round_up(packet_size, SDSI_SIZE_CMD));
216 total += packet_size;
217 }
218
219 sdsi_complete_transaction(priv);
220 } while (!eom && ++loop < MBOX_MAX_PACKETS);
221
222 if (ret) {
223 sdsi_complete_transaction(priv);
224 return ret;
225 }
226
227 if (!eom) {
228 dev_err(dev, "Exceeded read attempts\n");
229 return -EPROTO;
230 }
231
232 /* Message size check is only valid for multi-packet transfers */
233 if (loop && total != message_size)
234 dev_warn(dev, "Read count %u differs from expected count %u\n",
235 total, message_size);
236
237 if (data_size)
238 *data_size = total;
239
240 return 0;
241 }
242
sdsi_mbox_cmd_read(struct sdsi_priv * priv,struct sdsi_mbox_info * info,size_t * data_size)243 static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
244 size_t *data_size)
245 {
246 u64 control;
247
248 lockdep_assert_held(&priv->mb_lock);
249
250 /* Format and send the read command */
251 control = FIELD_PREP(CTRL_EOM, 1) |
252 FIELD_PREP(CTRL_SOM, 1) |
253 FIELD_PREP(CTRL_RUN_BUSY, 1) |
254 FIELD_PREP(CTRL_PACKET_SIZE, info->size) |
255 info->control_flags;
256 writeq(control, priv->control_addr);
257
258 return sdsi_mbox_poll(priv, info, data_size);
259 }
260
sdsi_mbox_cmd_write(struct sdsi_priv * priv,struct sdsi_mbox_info * info,size_t * data_size)261 static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
262 size_t *data_size)
263 {
264 u64 control;
265
266 lockdep_assert_held(&priv->mb_lock);
267
268 /* Write rest of the payload */
269 sdsi_memcpy64_toio(priv->mbox_addr + SDSI_SIZE_CMD, info->payload + 1,
270 info->size - SDSI_SIZE_CMD);
271
272 /* Format and send the write command */
273 control = FIELD_PREP(CTRL_EOM, 1) |
274 FIELD_PREP(CTRL_SOM, 1) |
275 FIELD_PREP(CTRL_RUN_BUSY, 1) |
276 FIELD_PREP(CTRL_READ_WRITE, 1) |
277 FIELD_PREP(CTRL_MSG_SIZE, info->size) |
278 FIELD_PREP(CTRL_PACKET_SIZE, info->size);
279 writeq(control, priv->control_addr);
280
281 return sdsi_mbox_poll(priv, info, data_size);
282 }
283
sdsi_mbox_acquire(struct sdsi_priv * priv,struct sdsi_mbox_info * info)284 static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
285 {
286 u64 control;
287 u32 owner;
288 int ret, retries = 0;
289
290 lockdep_assert_held(&priv->mb_lock);
291
292 /* Check mailbox is available */
293 control = readq(priv->control_addr);
294 owner = FIELD_GET(CTRL_OWNER, control);
295 if (owner != MBOX_OWNER_NONE)
296 return -EBUSY;
297
298 /*
299 * If there has been no recent transaction and no one owns the mailbox,
300 * we should acquire it in under 1ms. However, if we've accessed it
301 * recently it may take up to 2.1 seconds to acquire it again.
302 */
303 do {
304 /* Write first qword of payload */
305 writeq(info->payload[0], priv->mbox_addr);
306
307 /* Check for ownership */
308 ret = readq_poll_timeout(priv->control_addr, control,
309 FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_INBAND,
310 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
311
312 if (FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_NONE &&
313 retries++ < MBOX_ACQUIRE_NUM_RETRIES) {
314 msleep(MBOX_ACQUIRE_RETRY_DELAY_MS);
315 continue;
316 }
317
318 /* Either we got it or someone else did. */
319 break;
320 } while (true);
321
322 return ret;
323 }
324
sdsi_mbox_write(struct sdsi_priv * priv,struct sdsi_mbox_info * info,size_t * data_size)325 static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
326 size_t *data_size)
327 {
328 int ret;
329
330 lockdep_assert_held(&priv->mb_lock);
331
332 ret = sdsi_mbox_acquire(priv, info);
333 if (ret)
334 return ret;
335
336 return sdsi_mbox_cmd_write(priv, info, data_size);
337 }
338
sdsi_mbox_read(struct sdsi_priv * priv,struct sdsi_mbox_info * info,size_t * data_size)339 static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, size_t *data_size)
340 {
341 int ret;
342
343 lockdep_assert_held(&priv->mb_lock);
344
345 ret = sdsi_mbox_acquire(priv, info);
346 if (ret)
347 return ret;
348
349 return sdsi_mbox_cmd_read(priv, info, data_size);
350 }
351
sdsi_ib_locked(struct sdsi_priv * priv)352 static bool sdsi_ib_locked(struct sdsi_priv *priv)
353 {
354 return !!FIELD_GET(CTRL_INBAND_LOCK, readq(priv->control_addr));
355 }
356
sdsi_provision(struct sdsi_priv * priv,char * buf,size_t count,enum sdsi_command command)357 static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
358 enum sdsi_command command)
359 {
360 struct sdsi_mbox_info info = {};
361 int ret;
362
363 if (count > (SDSI_SIZE_WRITE_MSG - SDSI_SIZE_CMD))
364 return -EOVERFLOW;
365
366 /* Make sure In-band lock is not set */
367 if (sdsi_ib_locked(priv))
368 return -EPERM;
369
370 /* Qword aligned message + command qword */
371 info.size = round_up(count, SDSI_SIZE_CMD) + SDSI_SIZE_CMD;
372
373 info.payload = kzalloc(info.size, GFP_KERNEL);
374 if (!info.payload)
375 return -ENOMEM;
376
377 /* Copy message to payload buffer */
378 memcpy(info.payload, buf, count);
379
380 /* Command is last qword of payload buffer */
381 info.payload[(info.size - SDSI_SIZE_CMD) / SDSI_SIZE_CMD] = command;
382
383 ret = mutex_lock_interruptible(&priv->mb_lock);
384 if (ret)
385 goto free_payload;
386
387 ret = sdsi_mbox_write(priv, &info, NULL);
388
389 mutex_unlock(&priv->mb_lock);
390
391 free_payload:
392 kfree(info.payload);
393
394 if (ret)
395 return ret;
396
397 return count;
398 }
399
provision_akc_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)400 static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
401 struct bin_attribute *attr, char *buf, loff_t off,
402 size_t count)
403 {
404 struct device *dev = kobj_to_dev(kobj);
405 struct sdsi_priv *priv = dev_get_drvdata(dev);
406
407 if (off)
408 return -ESPIPE;
409
410 return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
411 }
412 static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
413
provision_cap_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)414 static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
415 struct bin_attribute *attr, char *buf, loff_t off,
416 size_t count)
417 {
418 struct device *dev = kobj_to_dev(kobj);
419 struct sdsi_priv *priv = dev_get_drvdata(dev);
420
421 if (off)
422 return -ESPIPE;
423
424 return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
425 }
426 static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
427
428 static ssize_t
certificate_read(u64 command,u64 control_flags,struct sdsi_priv * priv,char * buf,loff_t off,size_t count)429 certificate_read(u64 command, u64 control_flags, struct sdsi_priv *priv,
430 char *buf, loff_t off, size_t count)
431 {
432 struct sdsi_mbox_info info = {};
433 size_t size;
434 int ret;
435
436 if (off)
437 return 0;
438
439 /* Buffer for return data */
440 info.buffer = kmalloc(SDSI_SIZE_READ_MSG, GFP_KERNEL);
441 if (!info.buffer)
442 return -ENOMEM;
443
444 info.payload = &command;
445 info.size = sizeof(command);
446 info.control_flags = control_flags;
447
448 ret = mutex_lock_interruptible(&priv->mb_lock);
449 if (ret)
450 goto free_buffer;
451 ret = sdsi_mbox_read(priv, &info, &size);
452 mutex_unlock(&priv->mb_lock);
453 if (ret < 0)
454 goto free_buffer;
455
456 if (size > count)
457 size = count;
458
459 memcpy(buf, info.buffer, size);
460
461 free_buffer:
462 kfree(info.buffer);
463
464 if (ret)
465 return ret;
466
467 return size;
468 }
469
470 static ssize_t
state_certificate_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)471 state_certificate_read(struct file *filp, struct kobject *kobj,
472 struct bin_attribute *attr, char *buf, loff_t off,
473 size_t count)
474 {
475 struct device *dev = kobj_to_dev(kobj);
476 struct sdsi_priv *priv = dev_get_drvdata(dev);
477
478 return certificate_read(SDSI_CMD_READ_STATE, 0, priv, buf, off, count);
479 }
480 static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
481
482 static ssize_t
meter_certificate_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)483 meter_certificate_read(struct file *filp, struct kobject *kobj,
484 struct bin_attribute *attr, char *buf, loff_t off,
485 size_t count)
486 {
487 struct device *dev = kobj_to_dev(kobj);
488 struct sdsi_priv *priv = dev_get_drvdata(dev);
489
490 return certificate_read(SDSI_CMD_READ_METER, 0, priv, buf, off, count);
491 }
492 static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
493
494 static ssize_t
meter_current_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)495 meter_current_read(struct file *filp, struct kobject *kobj,
496 struct bin_attribute *attr, char *buf, loff_t off,
497 size_t count)
498 {
499 struct device *dev = kobj_to_dev(kobj);
500 struct sdsi_priv *priv = dev_get_drvdata(dev);
501
502 return certificate_read(SDSI_CMD_READ_METER, CTRL_METER_ENABLE_DRAM,
503 priv, buf, off, count);
504 }
505 static BIN_ATTR_ADMIN_RO(meter_current, SDSI_SIZE_READ_MSG);
506
registers_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)507 static ssize_t registers_read(struct file *filp, struct kobject *kobj,
508 struct bin_attribute *attr, char *buf, loff_t off,
509 size_t count)
510 {
511 struct device *dev = kobj_to_dev(kobj);
512 struct sdsi_priv *priv = dev_get_drvdata(dev);
513 void __iomem *addr = priv->regs_addr;
514 int size = priv->registers_size;
515
516 /*
517 * The check below is performed by the sysfs caller based on the static
518 * file size. But this may be greater than the actual size which is based
519 * on the GUID. So check here again based on actual size before reading.
520 */
521 if (off >= size)
522 return 0;
523
524 if (off + count > size)
525 count = size - off;
526
527 memcpy_fromio(buf, addr + off, count);
528
529 return count;
530 }
531 static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
532
533 static struct bin_attribute *sdsi_bin_attrs[] = {
534 &bin_attr_registers,
535 &bin_attr_state_certificate,
536 &bin_attr_meter_certificate,
537 &bin_attr_meter_current,
538 &bin_attr_provision_akc,
539 &bin_attr_provision_cap,
540 NULL
541 };
542
543 static umode_t
sdsi_battr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int n)544 sdsi_battr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int n)
545 {
546 struct device *dev = kobj_to_dev(kobj);
547 struct sdsi_priv *priv = dev_get_drvdata(dev);
548
549 /* Registers file is always readable if the device is present */
550 if (attr == &bin_attr_registers)
551 return attr->attr.mode;
552
553 /* All other attributes not visible if BIOS has not enabled On Demand */
554 if (!(priv->features & SDSI_FEATURE_SDSI))
555 return 0;
556
557 if (attr == &bin_attr_meter_certificate || attr == &bin_attr_meter_current)
558 return (priv->features & SDSI_FEATURE_METERING) ?
559 attr->attr.mode : 0;
560
561 return attr->attr.mode;
562 }
563
guid_show(struct device * dev,struct device_attribute * attr,char * buf)564 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf)
565 {
566 struct sdsi_priv *priv = dev_get_drvdata(dev);
567
568 return sysfs_emit(buf, "0x%x\n", priv->guid);
569 }
570 static DEVICE_ATTR_RO(guid);
571
572 static struct attribute *sdsi_attrs[] = {
573 &dev_attr_guid.attr,
574 NULL
575 };
576
577 static const struct attribute_group sdsi_group = {
578 .attrs = sdsi_attrs,
579 .bin_attrs = sdsi_bin_attrs,
580 .is_bin_visible = sdsi_battr_is_visible,
581 };
582 __ATTRIBUTE_GROUPS(sdsi);
583
sdsi_get_layout(struct sdsi_priv * priv,struct disc_table * table)584 static int sdsi_get_layout(struct sdsi_priv *priv, struct disc_table *table)
585 {
586 switch (table->guid) {
587 case SDSI_GUID_V1:
588 priv->control_size = GUID_V1_CNTRL_SIZE;
589 priv->registers_size = GUID_V1_REGS_SIZE;
590 break;
591 case SDSI_GUID_V2:
592 priv->control_size = GUID_V2_CNTRL_SIZE;
593 priv->registers_size = GUID_V2_REGS_SIZE;
594 break;
595 default:
596 dev_err(priv->dev, "Unrecognized GUID 0x%x\n", table->guid);
597 return -EINVAL;
598 }
599 return 0;
600 }
601
sdsi_map_mbox_registers(struct sdsi_priv * priv,struct pci_dev * parent,struct disc_table * disc_table,struct resource * disc_res)602 static int sdsi_map_mbox_registers(struct sdsi_priv *priv, struct pci_dev *parent,
603 struct disc_table *disc_table, struct resource *disc_res)
604 {
605 u32 access_type = FIELD_GET(DT_ACCESS_TYPE, disc_table->access_info);
606 u32 size = FIELD_GET(DT_SIZE, disc_table->access_info);
607 u32 tbir = FIELD_GET(DT_TBIR, disc_table->offset);
608 u32 offset = DT_OFFSET(disc_table->offset);
609 struct resource res = {};
610
611 /* Starting location of SDSi MMIO region based on access type */
612 switch (access_type) {
613 case ACCESS_TYPE_LOCAL:
614 if (tbir) {
615 dev_err(priv->dev, "Unsupported BAR index %u for access type %u\n",
616 tbir, access_type);
617 return -EINVAL;
618 }
619
620 /*
621 * For access_type LOCAL, the base address is as follows:
622 * base address = end of discovery region + base offset + 1
623 */
624 res.start = disc_res->end + offset + 1;
625 break;
626
627 case ACCESS_TYPE_BARID:
628 res.start = pci_resource_start(parent, tbir) + offset;
629 break;
630
631 default:
632 dev_err(priv->dev, "Unrecognized access_type %u\n", access_type);
633 return -EINVAL;
634 }
635
636 res.end = res.start + size * sizeof(u32) - 1;
637 res.flags = IORESOURCE_MEM;
638
639 priv->control_addr = devm_ioremap_resource(priv->dev, &res);
640 if (IS_ERR(priv->control_addr))
641 return PTR_ERR(priv->control_addr);
642
643 priv->mbox_addr = priv->control_addr + priv->control_size;
644 priv->regs_addr = priv->mbox_addr + SDSI_SIZE_MAILBOX;
645
646 priv->features = readq(priv->regs_addr + SDSI_ENABLED_FEATURES_OFFSET);
647
648 return 0;
649 }
650
sdsi_probe(struct auxiliary_device * auxdev,const struct auxiliary_device_id * id)651 static int sdsi_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
652 {
653 struct intel_vsec_device *intel_cap_dev = auxdev_to_ivdev(auxdev);
654 struct disc_table disc_table;
655 struct resource *disc_res;
656 void __iomem *disc_addr;
657 struct sdsi_priv *priv;
658 int ret;
659
660 priv = devm_kzalloc(&auxdev->dev, sizeof(*priv), GFP_KERNEL);
661 if (!priv)
662 return -ENOMEM;
663
664 priv->dev = &auxdev->dev;
665 mutex_init(&priv->mb_lock);
666 auxiliary_set_drvdata(auxdev, priv);
667
668 /* Get the SDSi discovery table */
669 disc_res = &intel_cap_dev->resource[0];
670 disc_addr = devm_ioremap_resource(&auxdev->dev, disc_res);
671 if (IS_ERR(disc_addr))
672 return PTR_ERR(disc_addr);
673
674 memcpy_fromio(&disc_table, disc_addr, DISC_TABLE_SIZE);
675
676 priv->guid = disc_table.guid;
677
678 /* Get guid based layout info */
679 ret = sdsi_get_layout(priv, &disc_table);
680 if (ret)
681 return ret;
682
683 /* Map the SDSi mailbox registers */
684 ret = sdsi_map_mbox_registers(priv, intel_cap_dev->pcidev, &disc_table, disc_res);
685 if (ret)
686 return ret;
687
688 return 0;
689 }
690
691 static const struct auxiliary_device_id sdsi_aux_id_table[] = {
692 { .name = "intel_vsec.sdsi" },
693 {}
694 };
695 MODULE_DEVICE_TABLE(auxiliary, sdsi_aux_id_table);
696
697 static struct auxiliary_driver sdsi_aux_driver = {
698 .driver = {
699 .dev_groups = sdsi_groups,
700 },
701 .id_table = sdsi_aux_id_table,
702 .probe = sdsi_probe,
703 /* No remove. All resources are handled under devm */
704 };
705 module_auxiliary_driver(sdsi_aux_driver);
706
707 MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
708 MODULE_DESCRIPTION("Intel On Demand (SDSi) driver");
709 MODULE_LICENSE("GPL");
710