1 /* 2 * System Control and Power Interface (SCPI) Message Protocol driver 3 * 4 * SCPI Message Protocol is used between the System Control Processor(SCP) 5 * and the Application Processors(AP). The Message Handling Unit(MHU) 6 * provides a mechanism for inter-processor communication between SCP's 7 * Cortex M3 and AP. 8 * 9 * SCP offers control and management of the core/cluster power states, 10 * various power domain DVFS including the core/cluster, certain system 11 * clocks configuration, thermal sensors and many others. 12 * 13 * Copyright (C) 2015 ARM Ltd. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms and conditions of the GNU General Public License, 17 * version 2, as published by the Free Software Foundation. 18 * 19 * This program is distributed in the hope it will be useful, but WITHOUT 20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 22 * more details. 23 * 24 * You should have received a copy of the GNU General Public License along 25 * with this program. If not, see <http://www.gnu.org/licenses/>. 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #include <linux/bitmap.h> 31 #include <linux/device.h> 32 #include <linux/err.h> 33 #include <linux/export.h> 34 #include <linux/io.h> 35 #include <linux/kernel.h> 36 #include <linux/list.h> 37 #include <linux/mailbox_client.h> 38 #include <linux/module.h> 39 #include <linux/of_address.h> 40 #include <linux/of_platform.h> 41 #include <linux/printk.h> 42 #include <linux/scpi_protocol.h> 43 #include <linux/slab.h> 44 #include <linux/sort.h> 45 #include <linux/spinlock.h> 46 47 #define CMD_ID_SHIFT 0 48 #define CMD_ID_MASK 0x7f 49 #define CMD_TOKEN_ID_SHIFT 8 50 #define CMD_TOKEN_ID_MASK 0xff 51 #define CMD_DATA_SIZE_SHIFT 16 52 #define CMD_DATA_SIZE_MASK 0x1ff 53 #define PACK_SCPI_CMD(cmd_id, tx_sz) \ 54 ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ 55 (((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT)) 56 #define ADD_SCPI_TOKEN(cmd, token) \ 57 ((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT)) 58 59 #define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK) 60 #define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK) 61 #define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK) 62 63 #define SCPI_SLOT 0 64 65 #define MAX_DVFS_DOMAINS 8 66 #define MAX_DVFS_OPPS 8 67 #define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16) 68 #define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff) 69 70 #define PROTOCOL_REV_MINOR_BITS 16 71 #define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1) 72 #define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS) 73 #define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK) 74 75 #define FW_REV_MAJOR_BITS 24 76 #define FW_REV_MINOR_BITS 16 77 #define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1) 78 #define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1) 79 #define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS) 80 #define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS) 81 #define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK) 82 83 #define MAX_RX_TIMEOUT (msecs_to_jiffies(30)) 84 85 enum scpi_error_codes { 86 SCPI_SUCCESS = 0, /* Success */ 87 SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */ 88 SCPI_ERR_ALIGN = 2, /* Invalid alignment */ 89 SCPI_ERR_SIZE = 3, /* Invalid size */ 90 SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */ 91 SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */ 92 SCPI_ERR_RANGE = 6, /* Value out of range */ 93 SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */ 94 SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */ 95 SCPI_ERR_PWRSTATE = 9, /* Invalid power state */ 96 SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */ 97 SCPI_ERR_DEVICE = 11, /* Device error */ 98 SCPI_ERR_BUSY = 12, /* Device busy */ 99 SCPI_ERR_MAX 100 }; 101 102 enum scpi_std_cmd { 103 SCPI_CMD_INVALID = 0x00, 104 SCPI_CMD_SCPI_READY = 0x01, 105 SCPI_CMD_SCPI_CAPABILITIES = 0x02, 106 SCPI_CMD_SET_CSS_PWR_STATE = 0x03, 107 SCPI_CMD_GET_CSS_PWR_STATE = 0x04, 108 SCPI_CMD_SET_SYS_PWR_STATE = 0x05, 109 SCPI_CMD_SET_CPU_TIMER = 0x06, 110 SCPI_CMD_CANCEL_CPU_TIMER = 0x07, 111 SCPI_CMD_DVFS_CAPABILITIES = 0x08, 112 SCPI_CMD_GET_DVFS_INFO = 0x09, 113 SCPI_CMD_SET_DVFS = 0x0a, 114 SCPI_CMD_GET_DVFS = 0x0b, 115 SCPI_CMD_GET_DVFS_STAT = 0x0c, 116 SCPI_CMD_CLOCK_CAPABILITIES = 0x0d, 117 SCPI_CMD_GET_CLOCK_INFO = 0x0e, 118 SCPI_CMD_SET_CLOCK_VALUE = 0x0f, 119 SCPI_CMD_GET_CLOCK_VALUE = 0x10, 120 SCPI_CMD_PSU_CAPABILITIES = 0x11, 121 SCPI_CMD_GET_PSU_INFO = 0x12, 122 SCPI_CMD_SET_PSU = 0x13, 123 SCPI_CMD_GET_PSU = 0x14, 124 SCPI_CMD_SENSOR_CAPABILITIES = 0x15, 125 SCPI_CMD_SENSOR_INFO = 0x16, 126 SCPI_CMD_SENSOR_VALUE = 0x17, 127 SCPI_CMD_SENSOR_CFG_PERIODIC = 0x18, 128 SCPI_CMD_SENSOR_CFG_BOUNDS = 0x19, 129 SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1a, 130 SCPI_CMD_SET_DEVICE_PWR_STATE = 0x1b, 131 SCPI_CMD_GET_DEVICE_PWR_STATE = 0x1c, 132 SCPI_CMD_COUNT 133 }; 134 135 struct scpi_xfer { 136 u32 slot; /* has to be first element */ 137 u32 cmd; 138 u32 status; 139 const void *tx_buf; 140 void *rx_buf; 141 unsigned int tx_len; 142 unsigned int rx_len; 143 struct list_head node; 144 struct completion done; 145 }; 146 147 struct scpi_chan { 148 struct mbox_client cl; 149 struct mbox_chan *chan; 150 void __iomem *tx_payload; 151 void __iomem *rx_payload; 152 struct list_head rx_pending; 153 struct list_head xfers_list; 154 struct scpi_xfer *xfers; 155 spinlock_t rx_lock; /* locking for the rx pending list */ 156 struct mutex xfers_lock; 157 u8 token; 158 }; 159 160 struct scpi_drvinfo { 161 u32 protocol_version; 162 u32 firmware_version; 163 int num_chans; 164 atomic_t next_chan; 165 struct scpi_ops *scpi_ops; 166 struct scpi_chan *channels; 167 struct scpi_dvfs_info *dvfs[MAX_DVFS_DOMAINS]; 168 }; 169 170 /* 171 * The SCP firmware only executes in little-endian mode, so any buffers 172 * shared through SCPI should have their contents converted to little-endian 173 */ 174 struct scpi_shared_mem { 175 __le32 command; 176 __le32 status; 177 u8 payload[0]; 178 } __packed; 179 180 struct scp_capabilities { 181 __le32 protocol_version; 182 __le32 event_version; 183 __le32 platform_version; 184 __le32 commands[4]; 185 } __packed; 186 187 struct clk_get_info { 188 __le16 id; 189 __le16 flags; 190 __le32 min_rate; 191 __le32 max_rate; 192 u8 name[20]; 193 } __packed; 194 195 struct clk_get_value { 196 __le32 rate; 197 } __packed; 198 199 struct clk_set_value { 200 __le16 id; 201 __le16 reserved; 202 __le32 rate; 203 } __packed; 204 205 struct dvfs_info { 206 __le32 header; 207 struct { 208 __le32 freq; 209 __le32 m_volt; 210 } opps[MAX_DVFS_OPPS]; 211 } __packed; 212 213 struct dvfs_set { 214 u8 domain; 215 u8 index; 216 } __packed; 217 218 struct sensor_capabilities { 219 __le16 sensors; 220 } __packed; 221 222 struct _scpi_sensor_info { 223 __le16 sensor_id; 224 u8 class; 225 u8 trigger_type; 226 char name[20]; 227 }; 228 229 struct sensor_value { 230 __le32 lo_val; 231 __le32 hi_val; 232 } __packed; 233 234 struct dev_pstate_set { 235 u16 dev_id; 236 u8 pstate; 237 } __packed; 238 239 static struct scpi_drvinfo *scpi_info; 240 241 static int scpi_linux_errmap[SCPI_ERR_MAX] = { 242 /* better than switch case as long as return value is continuous */ 243 0, /* SCPI_SUCCESS */ 244 -EINVAL, /* SCPI_ERR_PARAM */ 245 -ENOEXEC, /* SCPI_ERR_ALIGN */ 246 -EMSGSIZE, /* SCPI_ERR_SIZE */ 247 -EINVAL, /* SCPI_ERR_HANDLER */ 248 -EACCES, /* SCPI_ERR_ACCESS */ 249 -ERANGE, /* SCPI_ERR_RANGE */ 250 -ETIMEDOUT, /* SCPI_ERR_TIMEOUT */ 251 -ENOMEM, /* SCPI_ERR_NOMEM */ 252 -EINVAL, /* SCPI_ERR_PWRSTATE */ 253 -EOPNOTSUPP, /* SCPI_ERR_SUPPORT */ 254 -EIO, /* SCPI_ERR_DEVICE */ 255 -EBUSY, /* SCPI_ERR_BUSY */ 256 }; 257 258 static inline int scpi_to_linux_errno(int errno) 259 { 260 if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX) 261 return scpi_linux_errmap[errno]; 262 return -EIO; 263 } 264 265 static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) 266 { 267 unsigned long flags; 268 struct scpi_xfer *t, *match = NULL; 269 270 spin_lock_irqsave(&ch->rx_lock, flags); 271 if (list_empty(&ch->rx_pending)) { 272 spin_unlock_irqrestore(&ch->rx_lock, flags); 273 return; 274 } 275 276 list_for_each_entry(t, &ch->rx_pending, node) 277 if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) { 278 list_del(&t->node); 279 match = t; 280 break; 281 } 282 /* check if wait_for_completion is in progress or timed-out */ 283 if (match && !completion_done(&match->done)) { 284 struct scpi_shared_mem *mem = ch->rx_payload; 285 unsigned int len = min(match->rx_len, CMD_SIZE(cmd)); 286 287 match->status = le32_to_cpu(mem->status); 288 memcpy_fromio(match->rx_buf, mem->payload, len); 289 if (match->rx_len > len) 290 memset(match->rx_buf + len, 0, match->rx_len - len); 291 complete(&match->done); 292 } 293 spin_unlock_irqrestore(&ch->rx_lock, flags); 294 } 295 296 static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) 297 { 298 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 299 struct scpi_shared_mem *mem = ch->rx_payload; 300 u32 cmd = le32_to_cpu(mem->command); 301 302 scpi_process_cmd(ch, cmd); 303 } 304 305 static void scpi_tx_prepare(struct mbox_client *c, void *msg) 306 { 307 unsigned long flags; 308 struct scpi_xfer *t = msg; 309 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 310 struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload; 311 312 if (t->tx_buf) 313 memcpy_toio(mem->payload, t->tx_buf, t->tx_len); 314 if (t->rx_buf) { 315 if (!(++ch->token)) 316 ++ch->token; 317 ADD_SCPI_TOKEN(t->cmd, ch->token); 318 spin_lock_irqsave(&ch->rx_lock, flags); 319 list_add_tail(&t->node, &ch->rx_pending); 320 spin_unlock_irqrestore(&ch->rx_lock, flags); 321 } 322 mem->command = cpu_to_le32(t->cmd); 323 } 324 325 static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) 326 { 327 struct scpi_xfer *t; 328 329 mutex_lock(&ch->xfers_lock); 330 if (list_empty(&ch->xfers_list)) { 331 mutex_unlock(&ch->xfers_lock); 332 return NULL; 333 } 334 t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node); 335 list_del(&t->node); 336 mutex_unlock(&ch->xfers_lock); 337 return t; 338 } 339 340 static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch) 341 { 342 mutex_lock(&ch->xfers_lock); 343 list_add_tail(&t->node, &ch->xfers_list); 344 mutex_unlock(&ch->xfers_lock); 345 } 346 347 static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len, 348 void *rx_buf, unsigned int rx_len) 349 { 350 int ret; 351 u8 chan; 352 struct scpi_xfer *msg; 353 struct scpi_chan *scpi_chan; 354 355 chan = atomic_inc_return(&scpi_info->next_chan) % scpi_info->num_chans; 356 scpi_chan = scpi_info->channels + chan; 357 358 msg = get_scpi_xfer(scpi_chan); 359 if (!msg) 360 return -ENOMEM; 361 362 msg->slot = BIT(SCPI_SLOT); 363 msg->cmd = PACK_SCPI_CMD(cmd, tx_len); 364 msg->tx_buf = tx_buf; 365 msg->tx_len = tx_len; 366 msg->rx_buf = rx_buf; 367 msg->rx_len = rx_len; 368 init_completion(&msg->done); 369 370 ret = mbox_send_message(scpi_chan->chan, msg); 371 if (ret < 0 || !rx_buf) 372 goto out; 373 374 if (!wait_for_completion_timeout(&msg->done, MAX_RX_TIMEOUT)) 375 ret = -ETIMEDOUT; 376 else 377 /* first status word */ 378 ret = msg->status; 379 out: 380 if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */ 381 scpi_process_cmd(scpi_chan, msg->cmd); 382 383 put_scpi_xfer(msg, scpi_chan); 384 /* SCPI error codes > 0, translate them to Linux scale*/ 385 return ret > 0 ? scpi_to_linux_errno(ret) : ret; 386 } 387 388 static u32 scpi_get_version(void) 389 { 390 return scpi_info->protocol_version; 391 } 392 393 static int 394 scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max) 395 { 396 int ret; 397 struct clk_get_info clk; 398 __le16 le_clk_id = cpu_to_le16(clk_id); 399 400 ret = scpi_send_message(SCPI_CMD_GET_CLOCK_INFO, &le_clk_id, 401 sizeof(le_clk_id), &clk, sizeof(clk)); 402 if (!ret) { 403 *min = le32_to_cpu(clk.min_rate); 404 *max = le32_to_cpu(clk.max_rate); 405 } 406 return ret; 407 } 408 409 static unsigned long scpi_clk_get_val(u16 clk_id) 410 { 411 int ret; 412 struct clk_get_value clk; 413 __le16 le_clk_id = cpu_to_le16(clk_id); 414 415 ret = scpi_send_message(SCPI_CMD_GET_CLOCK_VALUE, &le_clk_id, 416 sizeof(le_clk_id), &clk, sizeof(clk)); 417 return ret ? ret : le32_to_cpu(clk.rate); 418 } 419 420 static int scpi_clk_set_val(u16 clk_id, unsigned long rate) 421 { 422 int stat; 423 struct clk_set_value clk = { 424 .id = cpu_to_le16(clk_id), 425 .rate = cpu_to_le32(rate) 426 }; 427 428 return scpi_send_message(SCPI_CMD_SET_CLOCK_VALUE, &clk, sizeof(clk), 429 &stat, sizeof(stat)); 430 } 431 432 static int scpi_dvfs_get_idx(u8 domain) 433 { 434 int ret; 435 u8 dvfs_idx; 436 437 ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain), 438 &dvfs_idx, sizeof(dvfs_idx)); 439 return ret ? ret : dvfs_idx; 440 } 441 442 static int scpi_dvfs_set_idx(u8 domain, u8 index) 443 { 444 int stat; 445 struct dvfs_set dvfs = {domain, index}; 446 447 return scpi_send_message(SCPI_CMD_SET_DVFS, &dvfs, sizeof(dvfs), 448 &stat, sizeof(stat)); 449 } 450 451 static int opp_cmp_func(const void *opp1, const void *opp2) 452 { 453 const struct scpi_opp *t1 = opp1, *t2 = opp2; 454 455 return t1->freq - t2->freq; 456 } 457 458 static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) 459 { 460 struct scpi_dvfs_info *info; 461 struct scpi_opp *opp; 462 struct dvfs_info buf; 463 int ret, i; 464 465 if (domain >= MAX_DVFS_DOMAINS) 466 return ERR_PTR(-EINVAL); 467 468 if (scpi_info->dvfs[domain]) /* data already populated */ 469 return scpi_info->dvfs[domain]; 470 471 ret = scpi_send_message(SCPI_CMD_GET_DVFS_INFO, &domain, sizeof(domain), 472 &buf, sizeof(buf)); 473 474 if (ret) 475 return ERR_PTR(ret); 476 477 info = kmalloc(sizeof(*info), GFP_KERNEL); 478 if (!info) 479 return ERR_PTR(-ENOMEM); 480 481 info->count = DVFS_OPP_COUNT(buf.header); 482 info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */ 483 484 info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL); 485 if (!info->opps) { 486 kfree(info); 487 return ERR_PTR(-ENOMEM); 488 } 489 490 for (i = 0, opp = info->opps; i < info->count; i++, opp++) { 491 opp->freq = le32_to_cpu(buf.opps[i].freq); 492 opp->m_volt = le32_to_cpu(buf.opps[i].m_volt); 493 } 494 495 sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL); 496 497 scpi_info->dvfs[domain] = info; 498 return info; 499 } 500 501 static int scpi_sensor_get_capability(u16 *sensors) 502 { 503 struct sensor_capabilities cap_buf; 504 int ret; 505 506 ret = scpi_send_message(SCPI_CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf, 507 sizeof(cap_buf)); 508 if (!ret) 509 *sensors = le16_to_cpu(cap_buf.sensors); 510 511 return ret; 512 } 513 514 static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info) 515 { 516 __le16 id = cpu_to_le16(sensor_id); 517 struct _scpi_sensor_info _info; 518 int ret; 519 520 ret = scpi_send_message(SCPI_CMD_SENSOR_INFO, &id, sizeof(id), 521 &_info, sizeof(_info)); 522 if (!ret) { 523 memcpy(info, &_info, sizeof(*info)); 524 info->sensor_id = le16_to_cpu(_info.sensor_id); 525 } 526 527 return ret; 528 } 529 530 static int scpi_sensor_get_value(u16 sensor, u64 *val) 531 { 532 __le16 id = cpu_to_le16(sensor); 533 struct sensor_value buf; 534 int ret; 535 536 ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &id, sizeof(id), 537 &buf, sizeof(buf)); 538 if (!ret) 539 *val = (u64)le32_to_cpu(buf.hi_val) << 32 | 540 le32_to_cpu(buf.lo_val); 541 542 return ret; 543 } 544 545 static int scpi_device_get_power_state(u16 dev_id) 546 { 547 int ret; 548 u8 pstate; 549 __le16 id = cpu_to_le16(dev_id); 550 551 ret = scpi_send_message(SCPI_CMD_GET_DEVICE_PWR_STATE, &id, 552 sizeof(id), &pstate, sizeof(pstate)); 553 return ret ? ret : pstate; 554 } 555 556 static int scpi_device_set_power_state(u16 dev_id, u8 pstate) 557 { 558 int stat; 559 struct dev_pstate_set dev_set = { 560 .dev_id = cpu_to_le16(dev_id), 561 .pstate = pstate, 562 }; 563 564 return scpi_send_message(SCPI_CMD_SET_DEVICE_PWR_STATE, &dev_set, 565 sizeof(dev_set), &stat, sizeof(stat)); 566 } 567 568 static struct scpi_ops scpi_ops = { 569 .get_version = scpi_get_version, 570 .clk_get_range = scpi_clk_get_range, 571 .clk_get_val = scpi_clk_get_val, 572 .clk_set_val = scpi_clk_set_val, 573 .dvfs_get_idx = scpi_dvfs_get_idx, 574 .dvfs_set_idx = scpi_dvfs_set_idx, 575 .dvfs_get_info = scpi_dvfs_get_info, 576 .sensor_get_capability = scpi_sensor_get_capability, 577 .sensor_get_info = scpi_sensor_get_info, 578 .sensor_get_value = scpi_sensor_get_value, 579 .device_get_power_state = scpi_device_get_power_state, 580 .device_set_power_state = scpi_device_set_power_state, 581 }; 582 583 struct scpi_ops *get_scpi_ops(void) 584 { 585 return scpi_info ? scpi_info->scpi_ops : NULL; 586 } 587 EXPORT_SYMBOL_GPL(get_scpi_ops); 588 589 static int scpi_init_versions(struct scpi_drvinfo *info) 590 { 591 int ret; 592 struct scp_capabilities caps; 593 594 ret = scpi_send_message(SCPI_CMD_SCPI_CAPABILITIES, NULL, 0, 595 &caps, sizeof(caps)); 596 if (!ret) { 597 info->protocol_version = le32_to_cpu(caps.protocol_version); 598 info->firmware_version = le32_to_cpu(caps.platform_version); 599 } 600 return ret; 601 } 602 603 static ssize_t protocol_version_show(struct device *dev, 604 struct device_attribute *attr, char *buf) 605 { 606 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); 607 608 return sprintf(buf, "%d.%d\n", 609 PROTOCOL_REV_MAJOR(scpi_info->protocol_version), 610 PROTOCOL_REV_MINOR(scpi_info->protocol_version)); 611 } 612 static DEVICE_ATTR_RO(protocol_version); 613 614 static ssize_t firmware_version_show(struct device *dev, 615 struct device_attribute *attr, char *buf) 616 { 617 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev); 618 619 return sprintf(buf, "%d.%d.%d\n", 620 FW_REV_MAJOR(scpi_info->firmware_version), 621 FW_REV_MINOR(scpi_info->firmware_version), 622 FW_REV_PATCH(scpi_info->firmware_version)); 623 } 624 static DEVICE_ATTR_RO(firmware_version); 625 626 static struct attribute *versions_attrs[] = { 627 &dev_attr_firmware_version.attr, 628 &dev_attr_protocol_version.attr, 629 NULL, 630 }; 631 ATTRIBUTE_GROUPS(versions); 632 633 static void 634 scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count) 635 { 636 int i; 637 638 for (i = 0; i < count && pchan->chan; i++, pchan++) { 639 mbox_free_channel(pchan->chan); 640 devm_kfree(dev, pchan->xfers); 641 devm_iounmap(dev, pchan->rx_payload); 642 } 643 } 644 645 static int scpi_remove(struct platform_device *pdev) 646 { 647 int i; 648 struct device *dev = &pdev->dev; 649 struct scpi_drvinfo *info = platform_get_drvdata(pdev); 650 651 scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */ 652 653 of_platform_depopulate(dev); 654 sysfs_remove_groups(&dev->kobj, versions_groups); 655 scpi_free_channels(dev, info->channels, info->num_chans); 656 platform_set_drvdata(pdev, NULL); 657 658 for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) { 659 kfree(info->dvfs[i]->opps); 660 kfree(info->dvfs[i]); 661 } 662 devm_kfree(dev, info->channels); 663 devm_kfree(dev, info); 664 665 return 0; 666 } 667 668 #define MAX_SCPI_XFERS 10 669 static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch) 670 { 671 int i; 672 struct scpi_xfer *xfers; 673 674 xfers = devm_kzalloc(dev, MAX_SCPI_XFERS * sizeof(*xfers), GFP_KERNEL); 675 if (!xfers) 676 return -ENOMEM; 677 678 ch->xfers = xfers; 679 for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++) 680 list_add_tail(&xfers->node, &ch->xfers_list); 681 return 0; 682 } 683 684 static int scpi_probe(struct platform_device *pdev) 685 { 686 int count, idx, ret; 687 struct resource res; 688 struct scpi_chan *scpi_chan; 689 struct device *dev = &pdev->dev; 690 struct device_node *np = dev->of_node; 691 692 scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); 693 if (!scpi_info) 694 return -ENOMEM; 695 696 count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); 697 if (count < 0) { 698 dev_err(dev, "no mboxes property in '%s'\n", np->full_name); 699 return -ENODEV; 700 } 701 702 scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL); 703 if (!scpi_chan) 704 return -ENOMEM; 705 706 for (idx = 0; idx < count; idx++) { 707 resource_size_t size; 708 struct scpi_chan *pchan = scpi_chan + idx; 709 struct mbox_client *cl = &pchan->cl; 710 struct device_node *shmem = of_parse_phandle(np, "shmem", idx); 711 712 if (of_address_to_resource(shmem, 0, &res)) { 713 dev_err(dev, "failed to get SCPI payload mem resource\n"); 714 ret = -EINVAL; 715 goto err; 716 } 717 718 size = resource_size(&res); 719 pchan->rx_payload = devm_ioremap(dev, res.start, size); 720 if (!pchan->rx_payload) { 721 dev_err(dev, "failed to ioremap SCPI payload\n"); 722 ret = -EADDRNOTAVAIL; 723 goto err; 724 } 725 pchan->tx_payload = pchan->rx_payload + (size >> 1); 726 727 cl->dev = dev; 728 cl->rx_callback = scpi_handle_remote_msg; 729 cl->tx_prepare = scpi_tx_prepare; 730 cl->tx_block = true; 731 cl->tx_tout = 20; 732 cl->knows_txdone = false; /* controller can't ack */ 733 734 INIT_LIST_HEAD(&pchan->rx_pending); 735 INIT_LIST_HEAD(&pchan->xfers_list); 736 spin_lock_init(&pchan->rx_lock); 737 mutex_init(&pchan->xfers_lock); 738 739 ret = scpi_alloc_xfer_list(dev, pchan); 740 if (!ret) { 741 pchan->chan = mbox_request_channel(cl, idx); 742 if (!IS_ERR(pchan->chan)) 743 continue; 744 ret = PTR_ERR(pchan->chan); 745 if (ret != -EPROBE_DEFER) 746 dev_err(dev, "failed to get channel%d err %d\n", 747 idx, ret); 748 } 749 err: 750 scpi_free_channels(dev, scpi_chan, idx); 751 scpi_info = NULL; 752 return ret; 753 } 754 755 scpi_info->channels = scpi_chan; 756 scpi_info->num_chans = count; 757 platform_set_drvdata(pdev, scpi_info); 758 759 ret = scpi_init_versions(scpi_info); 760 if (ret) { 761 dev_err(dev, "incorrect or no SCP firmware found\n"); 762 scpi_remove(pdev); 763 return ret; 764 } 765 766 _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n", 767 PROTOCOL_REV_MAJOR(scpi_info->protocol_version), 768 PROTOCOL_REV_MINOR(scpi_info->protocol_version), 769 FW_REV_MAJOR(scpi_info->firmware_version), 770 FW_REV_MINOR(scpi_info->firmware_version), 771 FW_REV_PATCH(scpi_info->firmware_version)); 772 scpi_info->scpi_ops = &scpi_ops; 773 774 ret = sysfs_create_groups(&dev->kobj, versions_groups); 775 if (ret) 776 dev_err(dev, "unable to create sysfs version group\n"); 777 778 return of_platform_populate(dev->of_node, NULL, NULL, dev); 779 } 780 781 static const struct of_device_id scpi_of_match[] = { 782 {.compatible = "arm,scpi"}, 783 {}, 784 }; 785 786 MODULE_DEVICE_TABLE(of, scpi_of_match); 787 788 static struct platform_driver scpi_driver = { 789 .driver = { 790 .name = "scpi_protocol", 791 .of_match_table = scpi_of_match, 792 }, 793 .probe = scpi_probe, 794 .remove = scpi_remove, 795 }; 796 module_platform_driver(scpi_driver); 797 798 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); 799 MODULE_DESCRIPTION("ARM SCPI mailbox protocol driver"); 800 MODULE_LICENSE("GPL v2"); 801