1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments System Control Interface Protocol Driver 4 * 5 * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * Nishanth Menon 7 */ 8 9 #define pr_fmt(fmt) "%s: " fmt, __func__ 10 11 #include <linux/bitmap.h> 12 #include <linux/debugfs.h> 13 #include <linux/export.h> 14 #include <linux/io.h> 15 #include <linux/iopoll.h> 16 #include <linux/kernel.h> 17 #include <linux/mailbox_client.h> 18 #include <linux/module.h> 19 #include <linux/of_device.h> 20 #include <linux/semaphore.h> 21 #include <linux/slab.h> 22 #include <linux/soc/ti/ti-msgmgr.h> 23 #include <linux/soc/ti/ti_sci_protocol.h> 24 #include <linux/reboot.h> 25 26 #include "ti_sci.h" 27 28 /* List of all TI SCI devices active in system */ 29 static LIST_HEAD(ti_sci_list); 30 /* Protection for the entire list */ 31 static DEFINE_MUTEX(ti_sci_list_mutex); 32 33 /** 34 * struct ti_sci_xfer - Structure representing a message flow 35 * @tx_message: Transmit message 36 * @rx_len: Receive message length 37 * @xfer_buf: Preallocated buffer to store receive message 38 * Since we work with request-ACK protocol, we can 39 * reuse the same buffer for the rx path as we 40 * use for the tx path. 41 * @done: completion event 42 */ 43 struct ti_sci_xfer { 44 struct ti_msgmgr_message tx_message; 45 u8 rx_len; 46 u8 *xfer_buf; 47 struct completion done; 48 }; 49 50 /** 51 * struct ti_sci_xfers_info - Structure to manage transfer information 52 * @sem_xfer_count: Counting Semaphore for managing max simultaneous 53 * Messages. 54 * @xfer_block: Preallocated Message array 55 * @xfer_alloc_table: Bitmap table for allocated messages. 56 * Index of this bitmap table is also used for message 57 * sequence identifier. 58 * @xfer_lock: Protection for message allocation 59 */ 60 struct ti_sci_xfers_info { 61 struct semaphore sem_xfer_count; 62 struct ti_sci_xfer *xfer_block; 63 unsigned long *xfer_alloc_table; 64 /* protect transfer allocation */ 65 spinlock_t xfer_lock; 66 }; 67 68 /** 69 * struct ti_sci_desc - Description of SoC integration 70 * @default_host_id: Host identifier representing the compute entity 71 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) 72 * @max_msgs: Maximum number of messages that can be pending 73 * simultaneously in the system 74 * @max_msg_size: Maximum size of data per message that can be handled. 75 */ 76 struct ti_sci_desc { 77 u8 default_host_id; 78 int max_rx_timeout_ms; 79 int max_msgs; 80 int max_msg_size; 81 }; 82 83 /** 84 * struct ti_sci_info - Structure representing a TI SCI instance 85 * @dev: Device pointer 86 * @desc: SoC description for this instance 87 * @nb: Reboot Notifier block 88 * @d: Debugfs file entry 89 * @debug_region: Memory region where the debug message are available 90 * @debug_region_size: Debug region size 91 * @debug_buffer: Buffer allocated to copy debug messages. 92 * @handle: Instance of TI SCI handle to send to clients. 93 * @cl: Mailbox Client 94 * @chan_tx: Transmit mailbox channel 95 * @chan_rx: Receive mailbox channel 96 * @minfo: Message info 97 * @node: list head 98 * @host_id: Host ID 99 * @users: Number of users of this instance 100 */ 101 struct ti_sci_info { 102 struct device *dev; 103 struct notifier_block nb; 104 const struct ti_sci_desc *desc; 105 struct dentry *d; 106 void __iomem *debug_region; 107 char *debug_buffer; 108 size_t debug_region_size; 109 struct ti_sci_handle handle; 110 struct mbox_client cl; 111 struct mbox_chan *chan_tx; 112 struct mbox_chan *chan_rx; 113 struct ti_sci_xfers_info minfo; 114 struct list_head node; 115 u8 host_id; 116 /* protected by ti_sci_list_mutex */ 117 int users; 118 }; 119 120 #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) 121 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) 122 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb) 123 124 #ifdef CONFIG_DEBUG_FS 125 126 /** 127 * ti_sci_debug_show() - Helper to dump the debug log 128 * @s: sequence file pointer 129 * @unused: unused. 130 * 131 * Return: 0 132 */ 133 static int ti_sci_debug_show(struct seq_file *s, void *unused) 134 { 135 struct ti_sci_info *info = s->private; 136 137 memcpy_fromio(info->debug_buffer, info->debug_region, 138 info->debug_region_size); 139 /* 140 * We don't trust firmware to leave NULL terminated last byte (hence 141 * we have allocated 1 extra 0 byte). Since we cannot guarantee any 142 * specific data format for debug messages, We just present the data 143 * in the buffer as is - we expect the messages to be self explanatory. 144 */ 145 seq_puts(s, info->debug_buffer); 146 return 0; 147 } 148 149 /* Provide the log file operations interface*/ 150 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug); 151 152 /** 153 * ti_sci_debugfs_create() - Create log debug file 154 * @pdev: platform device pointer 155 * @info: Pointer to SCI entity information 156 * 157 * Return: 0 if all went fine, else corresponding error. 158 */ 159 static int ti_sci_debugfs_create(struct platform_device *pdev, 160 struct ti_sci_info *info) 161 { 162 struct device *dev = &pdev->dev; 163 struct resource *res; 164 char debug_name[50] = "ti_sci_debug@"; 165 166 /* Debug region is optional */ 167 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 168 "debug_messages"); 169 info->debug_region = devm_ioremap_resource(dev, res); 170 if (IS_ERR(info->debug_region)) 171 return 0; 172 info->debug_region_size = resource_size(res); 173 174 info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1, 175 sizeof(char), GFP_KERNEL); 176 if (!info->debug_buffer) 177 return -ENOMEM; 178 /* Setup NULL termination */ 179 info->debug_buffer[info->debug_region_size] = 0; 180 181 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), 182 sizeof(debug_name) - 183 sizeof("ti_sci_debug@")), 184 0444, NULL, info, &ti_sci_debug_fops); 185 if (IS_ERR(info->d)) 186 return PTR_ERR(info->d); 187 188 dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n", 189 info->debug_region, info->debug_region_size, res); 190 return 0; 191 } 192 193 /** 194 * ti_sci_debugfs_destroy() - clean up log debug file 195 * @pdev: platform device pointer 196 * @info: Pointer to SCI entity information 197 */ 198 static void ti_sci_debugfs_destroy(struct platform_device *pdev, 199 struct ti_sci_info *info) 200 { 201 if (IS_ERR(info->debug_region)) 202 return; 203 204 debugfs_remove(info->d); 205 } 206 #else /* CONFIG_DEBUG_FS */ 207 static inline int ti_sci_debugfs_create(struct platform_device *dev, 208 struct ti_sci_info *info) 209 { 210 return 0; 211 } 212 213 static inline void ti_sci_debugfs_destroy(struct platform_device *dev, 214 struct ti_sci_info *info) 215 { 216 } 217 #endif /* CONFIG_DEBUG_FS */ 218 219 /** 220 * ti_sci_dump_header_dbg() - Helper to dump a message header. 221 * @dev: Device pointer corresponding to the SCI entity 222 * @hdr: pointer to header. 223 */ 224 static inline void ti_sci_dump_header_dbg(struct device *dev, 225 struct ti_sci_msg_hdr *hdr) 226 { 227 dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n", 228 hdr->type, hdr->host, hdr->seq, hdr->flags); 229 } 230 231 /** 232 * ti_sci_rx_callback() - mailbox client callback for receive messages 233 * @cl: client pointer 234 * @m: mailbox message 235 * 236 * Processes one received message to appropriate transfer information and 237 * signals completion of the transfer. 238 * 239 * NOTE: This function will be invoked in IRQ context, hence should be 240 * as optimal as possible. 241 */ 242 static void ti_sci_rx_callback(struct mbox_client *cl, void *m) 243 { 244 struct ti_sci_info *info = cl_to_ti_sci_info(cl); 245 struct device *dev = info->dev; 246 struct ti_sci_xfers_info *minfo = &info->minfo; 247 struct ti_msgmgr_message *mbox_msg = m; 248 struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf; 249 struct ti_sci_xfer *xfer; 250 u8 xfer_id; 251 252 xfer_id = hdr->seq; 253 254 /* 255 * Are we even expecting this? 256 * NOTE: barriers were implicit in locks used for modifying the bitmap 257 */ 258 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { 259 dev_err(dev, "Message for %d is not expected!\n", xfer_id); 260 return; 261 } 262 263 xfer = &minfo->xfer_block[xfer_id]; 264 265 /* Is the message of valid length? */ 266 if (mbox_msg->len > info->desc->max_msg_size) { 267 dev_err(dev, "Unable to handle %zu xfer(max %d)\n", 268 mbox_msg->len, info->desc->max_msg_size); 269 ti_sci_dump_header_dbg(dev, hdr); 270 return; 271 } 272 if (mbox_msg->len < xfer->rx_len) { 273 dev_err(dev, "Recv xfer %zu < expected %d length\n", 274 mbox_msg->len, xfer->rx_len); 275 ti_sci_dump_header_dbg(dev, hdr); 276 return; 277 } 278 279 ti_sci_dump_header_dbg(dev, hdr); 280 /* Take a copy to the rx buffer.. */ 281 memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len); 282 complete(&xfer->done); 283 } 284 285 /** 286 * ti_sci_get_one_xfer() - Allocate one message 287 * @info: Pointer to SCI entity information 288 * @msg_type: Message type 289 * @msg_flags: Flag to set for the message 290 * @tx_message_size: transmit message size 291 * @rx_message_size: receive message size 292 * 293 * Helper function which is used by various command functions that are 294 * exposed to clients of this driver for allocating a message traffic event. 295 * 296 * This function can sleep depending on pending requests already in the system 297 * for the SCI entity. Further, this also holds a spinlock to maintain integrity 298 * of internal data structures. 299 * 300 * Return: 0 if all went fine, else corresponding error. 301 */ 302 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info, 303 u16 msg_type, u32 msg_flags, 304 size_t tx_message_size, 305 size_t rx_message_size) 306 { 307 struct ti_sci_xfers_info *minfo = &info->minfo; 308 struct ti_sci_xfer *xfer; 309 struct ti_sci_msg_hdr *hdr; 310 unsigned long flags; 311 unsigned long bit_pos; 312 u8 xfer_id; 313 int ret; 314 int timeout; 315 316 /* Ensure we have sane transfer sizes */ 317 if (rx_message_size > info->desc->max_msg_size || 318 tx_message_size > info->desc->max_msg_size || 319 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr)) 320 return ERR_PTR(-ERANGE); 321 322 /* 323 * Ensure we have only controlled number of pending messages. 324 * Ideally, we might just have to wait a single message, be 325 * conservative and wait 5 times that.. 326 */ 327 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5; 328 ret = down_timeout(&minfo->sem_xfer_count, timeout); 329 if (ret < 0) 330 return ERR_PTR(ret); 331 332 /* Keep the locked section as small as possible */ 333 spin_lock_irqsave(&minfo->xfer_lock, flags); 334 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, 335 info->desc->max_msgs); 336 set_bit(bit_pos, minfo->xfer_alloc_table); 337 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 338 339 /* 340 * We already ensured in probe that we can have max messages that can 341 * fit in hdr.seq - NOTE: this improves access latencies 342 * to predictable O(1) access, BUT, it opens us to risk if 343 * remote misbehaves with corrupted message sequence responses. 344 * If that happens, we are going to be messed up anyways.. 345 */ 346 xfer_id = (u8)bit_pos; 347 348 xfer = &minfo->xfer_block[xfer_id]; 349 350 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 351 xfer->tx_message.len = tx_message_size; 352 xfer->tx_message.chan_rx = info->chan_rx; 353 xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms; 354 xfer->rx_len = (u8)rx_message_size; 355 356 reinit_completion(&xfer->done); 357 358 hdr->seq = xfer_id; 359 hdr->type = msg_type; 360 hdr->host = info->host_id; 361 hdr->flags = msg_flags; 362 363 return xfer; 364 } 365 366 /** 367 * ti_sci_put_one_xfer() - Release a message 368 * @minfo: transfer info pointer 369 * @xfer: message that was reserved by ti_sci_get_one_xfer 370 * 371 * This holds a spinlock to maintain integrity of internal data structures. 372 */ 373 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo, 374 struct ti_sci_xfer *xfer) 375 { 376 unsigned long flags; 377 struct ti_sci_msg_hdr *hdr; 378 u8 xfer_id; 379 380 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 381 xfer_id = hdr->seq; 382 383 /* 384 * Keep the locked section as small as possible 385 * NOTE: we might escape with smp_mb and no lock here.. 386 * but just be conservative and symmetric. 387 */ 388 spin_lock_irqsave(&minfo->xfer_lock, flags); 389 clear_bit(xfer_id, minfo->xfer_alloc_table); 390 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 391 392 /* Increment the count for the next user to get through */ 393 up(&minfo->sem_xfer_count); 394 } 395 396 /** 397 * ti_sci_do_xfer() - Do one transfer 398 * @info: Pointer to SCI entity information 399 * @xfer: Transfer to initiate and wait for response 400 * 401 * Return: -ETIMEDOUT in case of no response, if transmit error, 402 * return corresponding error, else if all goes well, 403 * return 0. 404 */ 405 static inline int ti_sci_do_xfer(struct ti_sci_info *info, 406 struct ti_sci_xfer *xfer) 407 { 408 int ret; 409 int timeout; 410 struct device *dev = info->dev; 411 bool done_state = true; 412 413 ret = mbox_send_message(info->chan_tx, &xfer->tx_message); 414 if (ret < 0) 415 return ret; 416 417 ret = 0; 418 419 if (system_state <= SYSTEM_RUNNING) { 420 /* And we wait for the response. */ 421 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 422 if (!wait_for_completion_timeout(&xfer->done, timeout)) 423 ret = -ETIMEDOUT; 424 } else { 425 /* 426 * If we are !running, we cannot use wait_for_completion_timeout 427 * during noirq phase, so we must manually poll the completion. 428 */ 429 ret = read_poll_timeout_atomic(try_wait_for_completion, done_state, 430 done_state, 1, 431 info->desc->max_rx_timeout_ms * 1000, 432 false, &xfer->done); 433 } 434 435 if (ret == -ETIMEDOUT) 436 dev_err(dev, "Mbox timedout in resp(caller: %pS)\n", 437 (void *)_RET_IP_); 438 439 /* 440 * NOTE: we might prefer not to need the mailbox ticker to manage the 441 * transfer queueing since the protocol layer queues things by itself. 442 * Unfortunately, we have to kick the mailbox framework after we have 443 * received our message. 444 */ 445 mbox_client_txdone(info->chan_tx, ret); 446 447 return ret; 448 } 449 450 /** 451 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity 452 * @info: Pointer to SCI entity information 453 * 454 * Updates the SCI information in the internal data structure. 455 * 456 * Return: 0 if all went fine, else return appropriate error. 457 */ 458 static int ti_sci_cmd_get_revision(struct ti_sci_info *info) 459 { 460 struct device *dev = info->dev; 461 struct ti_sci_handle *handle = &info->handle; 462 struct ti_sci_version_info *ver = &handle->version; 463 struct ti_sci_msg_resp_version *rev_info; 464 struct ti_sci_xfer *xfer; 465 int ret; 466 467 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION, 468 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 469 sizeof(struct ti_sci_msg_hdr), 470 sizeof(*rev_info)); 471 if (IS_ERR(xfer)) { 472 ret = PTR_ERR(xfer); 473 dev_err(dev, "Message alloc failed(%d)\n", ret); 474 return ret; 475 } 476 477 rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf; 478 479 ret = ti_sci_do_xfer(info, xfer); 480 if (ret) { 481 dev_err(dev, "Mbox send fail %d\n", ret); 482 goto fail; 483 } 484 485 ver->abi_major = rev_info->abi_major; 486 ver->abi_minor = rev_info->abi_minor; 487 ver->firmware_revision = rev_info->firmware_revision; 488 strncpy(ver->firmware_description, rev_info->firmware_description, 489 sizeof(ver->firmware_description)); 490 491 fail: 492 ti_sci_put_one_xfer(&info->minfo, xfer); 493 return ret; 494 } 495 496 /** 497 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup 498 * @r: pointer to response buffer 499 * 500 * Return: true if the response was an ACK, else returns false. 501 */ 502 static inline bool ti_sci_is_response_ack(void *r) 503 { 504 struct ti_sci_msg_hdr *hdr = r; 505 506 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false; 507 } 508 509 /** 510 * ti_sci_set_device_state() - Set device state helper 511 * @handle: pointer to TI SCI handle 512 * @id: Device identifier 513 * @flags: flags to setup for the device 514 * @state: State to move the device to 515 * 516 * Return: 0 if all went well, else returns appropriate error value. 517 */ 518 static int ti_sci_set_device_state(const struct ti_sci_handle *handle, 519 u32 id, u32 flags, u8 state) 520 { 521 struct ti_sci_info *info; 522 struct ti_sci_msg_req_set_device_state *req; 523 struct ti_sci_msg_hdr *resp; 524 struct ti_sci_xfer *xfer; 525 struct device *dev; 526 int ret = 0; 527 528 if (IS_ERR(handle)) 529 return PTR_ERR(handle); 530 if (!handle) 531 return -EINVAL; 532 533 info = handle_to_ti_sci_info(handle); 534 dev = info->dev; 535 536 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE, 537 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 538 sizeof(*req), sizeof(*resp)); 539 if (IS_ERR(xfer)) { 540 ret = PTR_ERR(xfer); 541 dev_err(dev, "Message alloc failed(%d)\n", ret); 542 return ret; 543 } 544 req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf; 545 req->id = id; 546 req->state = state; 547 548 ret = ti_sci_do_xfer(info, xfer); 549 if (ret) { 550 dev_err(dev, "Mbox send fail %d\n", ret); 551 goto fail; 552 } 553 554 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 555 556 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 557 558 fail: 559 ti_sci_put_one_xfer(&info->minfo, xfer); 560 561 return ret; 562 } 563 564 /** 565 * ti_sci_get_device_state() - Get device state helper 566 * @handle: Handle to the device 567 * @id: Device Identifier 568 * @clcnt: Pointer to Context Loss Count 569 * @resets: pointer to resets 570 * @p_state: pointer to p_state 571 * @c_state: pointer to c_state 572 * 573 * Return: 0 if all went fine, else return appropriate error. 574 */ 575 static int ti_sci_get_device_state(const struct ti_sci_handle *handle, 576 u32 id, u32 *clcnt, u32 *resets, 577 u8 *p_state, u8 *c_state) 578 { 579 struct ti_sci_info *info; 580 struct ti_sci_msg_req_get_device_state *req; 581 struct ti_sci_msg_resp_get_device_state *resp; 582 struct ti_sci_xfer *xfer; 583 struct device *dev; 584 int ret = 0; 585 586 if (IS_ERR(handle)) 587 return PTR_ERR(handle); 588 if (!handle) 589 return -EINVAL; 590 591 if (!clcnt && !resets && !p_state && !c_state) 592 return -EINVAL; 593 594 info = handle_to_ti_sci_info(handle); 595 dev = info->dev; 596 597 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, 598 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 599 sizeof(*req), sizeof(*resp)); 600 if (IS_ERR(xfer)) { 601 ret = PTR_ERR(xfer); 602 dev_err(dev, "Message alloc failed(%d)\n", ret); 603 return ret; 604 } 605 req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf; 606 req->id = id; 607 608 ret = ti_sci_do_xfer(info, xfer); 609 if (ret) { 610 dev_err(dev, "Mbox send fail %d\n", ret); 611 goto fail; 612 } 613 614 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf; 615 if (!ti_sci_is_response_ack(resp)) { 616 ret = -ENODEV; 617 goto fail; 618 } 619 620 if (clcnt) 621 *clcnt = resp->context_loss_count; 622 if (resets) 623 *resets = resp->resets; 624 if (p_state) 625 *p_state = resp->programmed_state; 626 if (c_state) 627 *c_state = resp->current_state; 628 fail: 629 ti_sci_put_one_xfer(&info->minfo, xfer); 630 631 return ret; 632 } 633 634 /** 635 * ti_sci_cmd_get_device() - command to request for device managed by TISCI 636 * that can be shared with other hosts. 637 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 638 * @id: Device Identifier 639 * 640 * Request for the device - NOTE: the client MUST maintain integrity of 641 * usage count by balancing get_device with put_device. No refcounting is 642 * managed by driver for that purpose. 643 * 644 * Return: 0 if all went fine, else return appropriate error. 645 */ 646 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) 647 { 648 return ti_sci_set_device_state(handle, id, 0, 649 MSG_DEVICE_SW_STATE_ON); 650 } 651 652 /** 653 * ti_sci_cmd_get_device_exclusive() - command to request for device managed by 654 * TISCI that is exclusively owned by the 655 * requesting host. 656 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 657 * @id: Device Identifier 658 * 659 * Request for the device - NOTE: the client MUST maintain integrity of 660 * usage count by balancing get_device with put_device. No refcounting is 661 * managed by driver for that purpose. 662 * 663 * Return: 0 if all went fine, else return appropriate error. 664 */ 665 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle, 666 u32 id) 667 { 668 return ti_sci_set_device_state(handle, id, 669 MSG_FLAG_DEVICE_EXCLUSIVE, 670 MSG_DEVICE_SW_STATE_ON); 671 } 672 673 /** 674 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI 675 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 676 * @id: Device Identifier 677 * 678 * Request for the device - NOTE: the client MUST maintain integrity of 679 * usage count by balancing get_device with put_device. No refcounting is 680 * managed by driver for that purpose. 681 * 682 * Return: 0 if all went fine, else return appropriate error. 683 */ 684 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) 685 { 686 return ti_sci_set_device_state(handle, id, 0, 687 MSG_DEVICE_SW_STATE_RETENTION); 688 } 689 690 /** 691 * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by 692 * TISCI that is exclusively owned by 693 * requesting host. 694 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 695 * @id: Device Identifier 696 * 697 * Request for the device - NOTE: the client MUST maintain integrity of 698 * usage count by balancing get_device with put_device. No refcounting is 699 * managed by driver for that purpose. 700 * 701 * Return: 0 if all went fine, else return appropriate error. 702 */ 703 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle, 704 u32 id) 705 { 706 return ti_sci_set_device_state(handle, id, 707 MSG_FLAG_DEVICE_EXCLUSIVE, 708 MSG_DEVICE_SW_STATE_RETENTION); 709 } 710 711 /** 712 * ti_sci_cmd_put_device() - command to release a device managed by TISCI 713 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 714 * @id: Device Identifier 715 * 716 * Request for the device - NOTE: the client MUST maintain integrity of 717 * usage count by balancing get_device with put_device. No refcounting is 718 * managed by driver for that purpose. 719 * 720 * Return: 0 if all went fine, else return appropriate error. 721 */ 722 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id) 723 { 724 return ti_sci_set_device_state(handle, id, 725 0, MSG_DEVICE_SW_STATE_AUTO_OFF); 726 } 727 728 /** 729 * ti_sci_cmd_dev_is_valid() - Is the device valid 730 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 731 * @id: Device Identifier 732 * 733 * Return: 0 if all went fine and the device ID is valid, else return 734 * appropriate error. 735 */ 736 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id) 737 { 738 u8 unused; 739 740 /* check the device state which will also tell us if the ID is valid */ 741 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused); 742 } 743 744 /** 745 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter 746 * @handle: Pointer to TISCI handle 747 * @id: Device Identifier 748 * @count: Pointer to Context Loss counter to populate 749 * 750 * Return: 0 if all went fine, else return appropriate error. 751 */ 752 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id, 753 u32 *count) 754 { 755 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL); 756 } 757 758 /** 759 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle 760 * @handle: Pointer to TISCI handle 761 * @id: Device Identifier 762 * @r_state: true if requested to be idle 763 * 764 * Return: 0 if all went fine, else return appropriate error. 765 */ 766 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id, 767 bool *r_state) 768 { 769 int ret; 770 u8 state; 771 772 if (!r_state) 773 return -EINVAL; 774 775 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL); 776 if (ret) 777 return ret; 778 779 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION); 780 781 return 0; 782 } 783 784 /** 785 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped 786 * @handle: Pointer to TISCI handle 787 * @id: Device Identifier 788 * @r_state: true if requested to be stopped 789 * @curr_state: true if currently stopped. 790 * 791 * Return: 0 if all went fine, else return appropriate error. 792 */ 793 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id, 794 bool *r_state, bool *curr_state) 795 { 796 int ret; 797 u8 p_state, c_state; 798 799 if (!r_state && !curr_state) 800 return -EINVAL; 801 802 ret = 803 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 804 if (ret) 805 return ret; 806 807 if (r_state) 808 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF); 809 if (curr_state) 810 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF); 811 812 return 0; 813 } 814 815 /** 816 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON 817 * @handle: Pointer to TISCI handle 818 * @id: Device Identifier 819 * @r_state: true if requested to be ON 820 * @curr_state: true if currently ON and active 821 * 822 * Return: 0 if all went fine, else return appropriate error. 823 */ 824 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id, 825 bool *r_state, bool *curr_state) 826 { 827 int ret; 828 u8 p_state, c_state; 829 830 if (!r_state && !curr_state) 831 return -EINVAL; 832 833 ret = 834 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); 835 if (ret) 836 return ret; 837 838 if (r_state) 839 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON); 840 if (curr_state) 841 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON); 842 843 return 0; 844 } 845 846 /** 847 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning 848 * @handle: Pointer to TISCI handle 849 * @id: Device Identifier 850 * @curr_state: true if currently transitioning. 851 * 852 * Return: 0 if all went fine, else return appropriate error. 853 */ 854 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id, 855 bool *curr_state) 856 { 857 int ret; 858 u8 state; 859 860 if (!curr_state) 861 return -EINVAL; 862 863 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state); 864 if (ret) 865 return ret; 866 867 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS); 868 869 return 0; 870 } 871 872 /** 873 * ti_sci_cmd_set_device_resets() - command to set resets for device managed 874 * by TISCI 875 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle 876 * @id: Device Identifier 877 * @reset_state: Device specific reset bit field 878 * 879 * Return: 0 if all went fine, else return appropriate error. 880 */ 881 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle, 882 u32 id, u32 reset_state) 883 { 884 struct ti_sci_info *info; 885 struct ti_sci_msg_req_set_device_resets *req; 886 struct ti_sci_msg_hdr *resp; 887 struct ti_sci_xfer *xfer; 888 struct device *dev; 889 int ret = 0; 890 891 if (IS_ERR(handle)) 892 return PTR_ERR(handle); 893 if (!handle) 894 return -EINVAL; 895 896 info = handle_to_ti_sci_info(handle); 897 dev = info->dev; 898 899 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS, 900 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 901 sizeof(*req), sizeof(*resp)); 902 if (IS_ERR(xfer)) { 903 ret = PTR_ERR(xfer); 904 dev_err(dev, "Message alloc failed(%d)\n", ret); 905 return ret; 906 } 907 req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf; 908 req->id = id; 909 req->resets = reset_state; 910 911 ret = ti_sci_do_xfer(info, xfer); 912 if (ret) { 913 dev_err(dev, "Mbox send fail %d\n", ret); 914 goto fail; 915 } 916 917 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 918 919 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 920 921 fail: 922 ti_sci_put_one_xfer(&info->minfo, xfer); 923 924 return ret; 925 } 926 927 /** 928 * ti_sci_cmd_get_device_resets() - Get reset state for device managed 929 * by TISCI 930 * @handle: Pointer to TISCI handle 931 * @id: Device Identifier 932 * @reset_state: Pointer to reset state to populate 933 * 934 * Return: 0 if all went fine, else return appropriate error. 935 */ 936 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle, 937 u32 id, u32 *reset_state) 938 { 939 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL, 940 NULL); 941 } 942 943 /** 944 * ti_sci_set_clock_state() - Set clock state helper 945 * @handle: pointer to TI SCI handle 946 * @dev_id: Device identifier this request is for 947 * @clk_id: Clock identifier for the device for this request. 948 * Each device has it's own set of clock inputs. This indexes 949 * which clock input to modify. 950 * @flags: Header flags as needed 951 * @state: State to request for the clock. 952 * 953 * Return: 0 if all went well, else returns appropriate error value. 954 */ 955 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle, 956 u32 dev_id, u32 clk_id, 957 u32 flags, u8 state) 958 { 959 struct ti_sci_info *info; 960 struct ti_sci_msg_req_set_clock_state *req; 961 struct ti_sci_msg_hdr *resp; 962 struct ti_sci_xfer *xfer; 963 struct device *dev; 964 int ret = 0; 965 966 if (IS_ERR(handle)) 967 return PTR_ERR(handle); 968 if (!handle) 969 return -EINVAL; 970 971 info = handle_to_ti_sci_info(handle); 972 dev = info->dev; 973 974 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE, 975 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 976 sizeof(*req), sizeof(*resp)); 977 if (IS_ERR(xfer)) { 978 ret = PTR_ERR(xfer); 979 dev_err(dev, "Message alloc failed(%d)\n", ret); 980 return ret; 981 } 982 req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf; 983 req->dev_id = dev_id; 984 if (clk_id < 255) { 985 req->clk_id = clk_id; 986 } else { 987 req->clk_id = 255; 988 req->clk_id_32 = clk_id; 989 } 990 req->request_state = state; 991 992 ret = ti_sci_do_xfer(info, xfer); 993 if (ret) { 994 dev_err(dev, "Mbox send fail %d\n", ret); 995 goto fail; 996 } 997 998 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 999 1000 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1001 1002 fail: 1003 ti_sci_put_one_xfer(&info->minfo, xfer); 1004 1005 return ret; 1006 } 1007 1008 /** 1009 * ti_sci_cmd_get_clock_state() - Get clock state helper 1010 * @handle: pointer to TI SCI handle 1011 * @dev_id: Device identifier this request is for 1012 * @clk_id: Clock identifier for the device for this request. 1013 * Each device has it's own set of clock inputs. This indexes 1014 * which clock input to modify. 1015 * @programmed_state: State requested for clock to move to 1016 * @current_state: State that the clock is currently in 1017 * 1018 * Return: 0 if all went well, else returns appropriate error value. 1019 */ 1020 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle, 1021 u32 dev_id, u32 clk_id, 1022 u8 *programmed_state, u8 *current_state) 1023 { 1024 struct ti_sci_info *info; 1025 struct ti_sci_msg_req_get_clock_state *req; 1026 struct ti_sci_msg_resp_get_clock_state *resp; 1027 struct ti_sci_xfer *xfer; 1028 struct device *dev; 1029 int ret = 0; 1030 1031 if (IS_ERR(handle)) 1032 return PTR_ERR(handle); 1033 if (!handle) 1034 return -EINVAL; 1035 1036 if (!programmed_state && !current_state) 1037 return -EINVAL; 1038 1039 info = handle_to_ti_sci_info(handle); 1040 dev = info->dev; 1041 1042 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE, 1043 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1044 sizeof(*req), sizeof(*resp)); 1045 if (IS_ERR(xfer)) { 1046 ret = PTR_ERR(xfer); 1047 dev_err(dev, "Message alloc failed(%d)\n", ret); 1048 return ret; 1049 } 1050 req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf; 1051 req->dev_id = dev_id; 1052 if (clk_id < 255) { 1053 req->clk_id = clk_id; 1054 } else { 1055 req->clk_id = 255; 1056 req->clk_id_32 = clk_id; 1057 } 1058 1059 ret = ti_sci_do_xfer(info, xfer); 1060 if (ret) { 1061 dev_err(dev, "Mbox send fail %d\n", ret); 1062 goto fail; 1063 } 1064 1065 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf; 1066 1067 if (!ti_sci_is_response_ack(resp)) { 1068 ret = -ENODEV; 1069 goto fail; 1070 } 1071 1072 if (programmed_state) 1073 *programmed_state = resp->programmed_state; 1074 if (current_state) 1075 *current_state = resp->current_state; 1076 1077 fail: 1078 ti_sci_put_one_xfer(&info->minfo, xfer); 1079 1080 return ret; 1081 } 1082 1083 /** 1084 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI 1085 * @handle: pointer to TI SCI handle 1086 * @dev_id: Device identifier this request is for 1087 * @clk_id: Clock identifier for the device for this request. 1088 * Each device has it's own set of clock inputs. This indexes 1089 * which clock input to modify. 1090 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false' 1091 * @can_change_freq: 'true' if frequency change is desired, else 'false' 1092 * @enable_input_term: 'true' if input termination is desired, else 'false' 1093 * 1094 * Return: 0 if all went well, else returns appropriate error value. 1095 */ 1096 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id, 1097 u32 clk_id, bool needs_ssc, 1098 bool can_change_freq, bool enable_input_term) 1099 { 1100 u32 flags = 0; 1101 1102 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0; 1103 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0; 1104 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0; 1105 1106 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags, 1107 MSG_CLOCK_SW_STATE_REQ); 1108 } 1109 1110 /** 1111 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control 1112 * @handle: pointer to TI SCI handle 1113 * @dev_id: Device identifier this request is for 1114 * @clk_id: Clock identifier for the device for this request. 1115 * Each device has it's own set of clock inputs. This indexes 1116 * which clock input to modify. 1117 * 1118 * NOTE: This clock must have been requested by get_clock previously. 1119 * 1120 * Return: 0 if all went well, else returns appropriate error value. 1121 */ 1122 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle, 1123 u32 dev_id, u32 clk_id) 1124 { 1125 return ti_sci_set_clock_state(handle, dev_id, clk_id, 1126 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE, 1127 MSG_CLOCK_SW_STATE_UNREQ); 1128 } 1129 1130 /** 1131 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI 1132 * @handle: pointer to TI SCI handle 1133 * @dev_id: Device identifier this request is for 1134 * @clk_id: Clock identifier for the device for this request. 1135 * Each device has it's own set of clock inputs. This indexes 1136 * which clock input to modify. 1137 * 1138 * NOTE: This clock must have been requested by get_clock previously. 1139 * 1140 * Return: 0 if all went well, else returns appropriate error value. 1141 */ 1142 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle, 1143 u32 dev_id, u32 clk_id) 1144 { 1145 return ti_sci_set_clock_state(handle, dev_id, clk_id, 1146 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE, 1147 MSG_CLOCK_SW_STATE_AUTO); 1148 } 1149 1150 /** 1151 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed 1152 * @handle: pointer to TI SCI handle 1153 * @dev_id: Device identifier this request is for 1154 * @clk_id: Clock identifier for the device for this request. 1155 * Each device has it's own set of clock inputs. This indexes 1156 * which clock input to modify. 1157 * @req_state: state indicating if the clock is auto managed 1158 * 1159 * Return: 0 if all went well, else returns appropriate error value. 1160 */ 1161 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle, 1162 u32 dev_id, u32 clk_id, bool *req_state) 1163 { 1164 u8 state = 0; 1165 int ret; 1166 1167 if (!req_state) 1168 return -EINVAL; 1169 1170 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL); 1171 if (ret) 1172 return ret; 1173 1174 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO); 1175 return 0; 1176 } 1177 1178 /** 1179 * ti_sci_cmd_clk_is_on() - Is the clock ON 1180 * @handle: pointer to TI SCI handle 1181 * @dev_id: Device identifier this request is for 1182 * @clk_id: Clock identifier for the device for this request. 1183 * Each device has it's own set of clock inputs. This indexes 1184 * which clock input to modify. 1185 * @req_state: state indicating if the clock is managed by us and enabled 1186 * @curr_state: state indicating if the clock is ready for operation 1187 * 1188 * Return: 0 if all went well, else returns appropriate error value. 1189 */ 1190 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id, 1191 u32 clk_id, bool *req_state, bool *curr_state) 1192 { 1193 u8 c_state = 0, r_state = 0; 1194 int ret; 1195 1196 if (!req_state && !curr_state) 1197 return -EINVAL; 1198 1199 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 1200 &r_state, &c_state); 1201 if (ret) 1202 return ret; 1203 1204 if (req_state) 1205 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ); 1206 if (curr_state) 1207 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY); 1208 return 0; 1209 } 1210 1211 /** 1212 * ti_sci_cmd_clk_is_off() - Is the clock OFF 1213 * @handle: pointer to TI SCI handle 1214 * @dev_id: Device identifier this request is for 1215 * @clk_id: Clock identifier for the device for this request. 1216 * Each device has it's own set of clock inputs. This indexes 1217 * which clock input to modify. 1218 * @req_state: state indicating if the clock is managed by us and disabled 1219 * @curr_state: state indicating if the clock is NOT ready for operation 1220 * 1221 * Return: 0 if all went well, else returns appropriate error value. 1222 */ 1223 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id, 1224 u32 clk_id, bool *req_state, bool *curr_state) 1225 { 1226 u8 c_state = 0, r_state = 0; 1227 int ret; 1228 1229 if (!req_state && !curr_state) 1230 return -EINVAL; 1231 1232 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, 1233 &r_state, &c_state); 1234 if (ret) 1235 return ret; 1236 1237 if (req_state) 1238 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ); 1239 if (curr_state) 1240 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY); 1241 return 0; 1242 } 1243 1244 /** 1245 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock 1246 * @handle: pointer to TI SCI handle 1247 * @dev_id: Device identifier this request is for 1248 * @clk_id: Clock identifier for the device for this request. 1249 * Each device has it's own set of clock inputs. This indexes 1250 * which clock input to modify. 1251 * @parent_id: Parent clock identifier to set 1252 * 1253 * Return: 0 if all went well, else returns appropriate error value. 1254 */ 1255 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle, 1256 u32 dev_id, u32 clk_id, u32 parent_id) 1257 { 1258 struct ti_sci_info *info; 1259 struct ti_sci_msg_req_set_clock_parent *req; 1260 struct ti_sci_msg_hdr *resp; 1261 struct ti_sci_xfer *xfer; 1262 struct device *dev; 1263 int ret = 0; 1264 1265 if (IS_ERR(handle)) 1266 return PTR_ERR(handle); 1267 if (!handle) 1268 return -EINVAL; 1269 1270 info = handle_to_ti_sci_info(handle); 1271 dev = info->dev; 1272 1273 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT, 1274 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1275 sizeof(*req), sizeof(*resp)); 1276 if (IS_ERR(xfer)) { 1277 ret = PTR_ERR(xfer); 1278 dev_err(dev, "Message alloc failed(%d)\n", ret); 1279 return ret; 1280 } 1281 req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf; 1282 req->dev_id = dev_id; 1283 if (clk_id < 255) { 1284 req->clk_id = clk_id; 1285 } else { 1286 req->clk_id = 255; 1287 req->clk_id_32 = clk_id; 1288 } 1289 if (parent_id < 255) { 1290 req->parent_id = parent_id; 1291 } else { 1292 req->parent_id = 255; 1293 req->parent_id_32 = parent_id; 1294 } 1295 1296 ret = ti_sci_do_xfer(info, xfer); 1297 if (ret) { 1298 dev_err(dev, "Mbox send fail %d\n", ret); 1299 goto fail; 1300 } 1301 1302 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1303 1304 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1305 1306 fail: 1307 ti_sci_put_one_xfer(&info->minfo, xfer); 1308 1309 return ret; 1310 } 1311 1312 /** 1313 * ti_sci_cmd_clk_get_parent() - Get current parent clock source 1314 * @handle: pointer to TI SCI handle 1315 * @dev_id: Device identifier this request is for 1316 * @clk_id: Clock identifier for the device for this request. 1317 * Each device has it's own set of clock inputs. This indexes 1318 * which clock input to modify. 1319 * @parent_id: Current clock parent 1320 * 1321 * Return: 0 if all went well, else returns appropriate error value. 1322 */ 1323 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, 1324 u32 dev_id, u32 clk_id, u32 *parent_id) 1325 { 1326 struct ti_sci_info *info; 1327 struct ti_sci_msg_req_get_clock_parent *req; 1328 struct ti_sci_msg_resp_get_clock_parent *resp; 1329 struct ti_sci_xfer *xfer; 1330 struct device *dev; 1331 int ret = 0; 1332 1333 if (IS_ERR(handle)) 1334 return PTR_ERR(handle); 1335 if (!handle || !parent_id) 1336 return -EINVAL; 1337 1338 info = handle_to_ti_sci_info(handle); 1339 dev = info->dev; 1340 1341 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT, 1342 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1343 sizeof(*req), sizeof(*resp)); 1344 if (IS_ERR(xfer)) { 1345 ret = PTR_ERR(xfer); 1346 dev_err(dev, "Message alloc failed(%d)\n", ret); 1347 return ret; 1348 } 1349 req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf; 1350 req->dev_id = dev_id; 1351 if (clk_id < 255) { 1352 req->clk_id = clk_id; 1353 } else { 1354 req->clk_id = 255; 1355 req->clk_id_32 = clk_id; 1356 } 1357 1358 ret = ti_sci_do_xfer(info, xfer); 1359 if (ret) { 1360 dev_err(dev, "Mbox send fail %d\n", ret); 1361 goto fail; 1362 } 1363 1364 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf; 1365 1366 if (!ti_sci_is_response_ack(resp)) { 1367 ret = -ENODEV; 1368 } else { 1369 if (resp->parent_id < 255) 1370 *parent_id = resp->parent_id; 1371 else 1372 *parent_id = resp->parent_id_32; 1373 } 1374 1375 fail: 1376 ti_sci_put_one_xfer(&info->minfo, xfer); 1377 1378 return ret; 1379 } 1380 1381 /** 1382 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source 1383 * @handle: pointer to TI SCI handle 1384 * @dev_id: Device identifier this request is for 1385 * @clk_id: Clock identifier for the device for this request. 1386 * Each device has it's own set of clock inputs. This indexes 1387 * which clock input to modify. 1388 * @num_parents: Returns he number of parents to the current clock. 1389 * 1390 * Return: 0 if all went well, else returns appropriate error value. 1391 */ 1392 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, 1393 u32 dev_id, u32 clk_id, 1394 u32 *num_parents) 1395 { 1396 struct ti_sci_info *info; 1397 struct ti_sci_msg_req_get_clock_num_parents *req; 1398 struct ti_sci_msg_resp_get_clock_num_parents *resp; 1399 struct ti_sci_xfer *xfer; 1400 struct device *dev; 1401 int ret = 0; 1402 1403 if (IS_ERR(handle)) 1404 return PTR_ERR(handle); 1405 if (!handle || !num_parents) 1406 return -EINVAL; 1407 1408 info = handle_to_ti_sci_info(handle); 1409 dev = info->dev; 1410 1411 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 1412 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1413 sizeof(*req), sizeof(*resp)); 1414 if (IS_ERR(xfer)) { 1415 ret = PTR_ERR(xfer); 1416 dev_err(dev, "Message alloc failed(%d)\n", ret); 1417 return ret; 1418 } 1419 req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf; 1420 req->dev_id = dev_id; 1421 if (clk_id < 255) { 1422 req->clk_id = clk_id; 1423 } else { 1424 req->clk_id = 255; 1425 req->clk_id_32 = clk_id; 1426 } 1427 1428 ret = ti_sci_do_xfer(info, xfer); 1429 if (ret) { 1430 dev_err(dev, "Mbox send fail %d\n", ret); 1431 goto fail; 1432 } 1433 1434 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf; 1435 1436 if (!ti_sci_is_response_ack(resp)) { 1437 ret = -ENODEV; 1438 } else { 1439 if (resp->num_parents < 255) 1440 *num_parents = resp->num_parents; 1441 else 1442 *num_parents = resp->num_parents_32; 1443 } 1444 1445 fail: 1446 ti_sci_put_one_xfer(&info->minfo, xfer); 1447 1448 return ret; 1449 } 1450 1451 /** 1452 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency 1453 * @handle: pointer to TI SCI handle 1454 * @dev_id: Device identifier this request is for 1455 * @clk_id: Clock identifier for the device for this request. 1456 * Each device has it's own set of clock inputs. This indexes 1457 * which clock input to modify. 1458 * @min_freq: The minimum allowable frequency in Hz. This is the minimum 1459 * allowable programmed frequency and does not account for clock 1460 * tolerances and jitter. 1461 * @target_freq: The target clock frequency in Hz. A frequency will be 1462 * processed as close to this target frequency as possible. 1463 * @max_freq: The maximum allowable frequency in Hz. This is the maximum 1464 * allowable programmed frequency and does not account for clock 1465 * tolerances and jitter. 1466 * @match_freq: Frequency match in Hz response. 1467 * 1468 * Return: 0 if all went well, else returns appropriate error value. 1469 */ 1470 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle, 1471 u32 dev_id, u32 clk_id, u64 min_freq, 1472 u64 target_freq, u64 max_freq, 1473 u64 *match_freq) 1474 { 1475 struct ti_sci_info *info; 1476 struct ti_sci_msg_req_query_clock_freq *req; 1477 struct ti_sci_msg_resp_query_clock_freq *resp; 1478 struct ti_sci_xfer *xfer; 1479 struct device *dev; 1480 int ret = 0; 1481 1482 if (IS_ERR(handle)) 1483 return PTR_ERR(handle); 1484 if (!handle || !match_freq) 1485 return -EINVAL; 1486 1487 info = handle_to_ti_sci_info(handle); 1488 dev = info->dev; 1489 1490 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ, 1491 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1492 sizeof(*req), sizeof(*resp)); 1493 if (IS_ERR(xfer)) { 1494 ret = PTR_ERR(xfer); 1495 dev_err(dev, "Message alloc failed(%d)\n", ret); 1496 return ret; 1497 } 1498 req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf; 1499 req->dev_id = dev_id; 1500 if (clk_id < 255) { 1501 req->clk_id = clk_id; 1502 } else { 1503 req->clk_id = 255; 1504 req->clk_id_32 = clk_id; 1505 } 1506 req->min_freq_hz = min_freq; 1507 req->target_freq_hz = target_freq; 1508 req->max_freq_hz = max_freq; 1509 1510 ret = ti_sci_do_xfer(info, xfer); 1511 if (ret) { 1512 dev_err(dev, "Mbox send fail %d\n", ret); 1513 goto fail; 1514 } 1515 1516 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf; 1517 1518 if (!ti_sci_is_response_ack(resp)) 1519 ret = -ENODEV; 1520 else 1521 *match_freq = resp->freq_hz; 1522 1523 fail: 1524 ti_sci_put_one_xfer(&info->minfo, xfer); 1525 1526 return ret; 1527 } 1528 1529 /** 1530 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock 1531 * @handle: pointer to TI SCI handle 1532 * @dev_id: Device identifier this request is for 1533 * @clk_id: Clock identifier for the device for this request. 1534 * Each device has it's own set of clock inputs. This indexes 1535 * which clock input to modify. 1536 * @min_freq: The minimum allowable frequency in Hz. This is the minimum 1537 * allowable programmed frequency and does not account for clock 1538 * tolerances and jitter. 1539 * @target_freq: The target clock frequency in Hz. A frequency will be 1540 * processed as close to this target frequency as possible. 1541 * @max_freq: The maximum allowable frequency in Hz. This is the maximum 1542 * allowable programmed frequency and does not account for clock 1543 * tolerances and jitter. 1544 * 1545 * Return: 0 if all went well, else returns appropriate error value. 1546 */ 1547 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle, 1548 u32 dev_id, u32 clk_id, u64 min_freq, 1549 u64 target_freq, u64 max_freq) 1550 { 1551 struct ti_sci_info *info; 1552 struct ti_sci_msg_req_set_clock_freq *req; 1553 struct ti_sci_msg_hdr *resp; 1554 struct ti_sci_xfer *xfer; 1555 struct device *dev; 1556 int ret = 0; 1557 1558 if (IS_ERR(handle)) 1559 return PTR_ERR(handle); 1560 if (!handle) 1561 return -EINVAL; 1562 1563 info = handle_to_ti_sci_info(handle); 1564 dev = info->dev; 1565 1566 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ, 1567 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1568 sizeof(*req), sizeof(*resp)); 1569 if (IS_ERR(xfer)) { 1570 ret = PTR_ERR(xfer); 1571 dev_err(dev, "Message alloc failed(%d)\n", ret); 1572 return ret; 1573 } 1574 req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf; 1575 req->dev_id = dev_id; 1576 if (clk_id < 255) { 1577 req->clk_id = clk_id; 1578 } else { 1579 req->clk_id = 255; 1580 req->clk_id_32 = clk_id; 1581 } 1582 req->min_freq_hz = min_freq; 1583 req->target_freq_hz = target_freq; 1584 req->max_freq_hz = max_freq; 1585 1586 ret = ti_sci_do_xfer(info, xfer); 1587 if (ret) { 1588 dev_err(dev, "Mbox send fail %d\n", ret); 1589 goto fail; 1590 } 1591 1592 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1593 1594 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1595 1596 fail: 1597 ti_sci_put_one_xfer(&info->minfo, xfer); 1598 1599 return ret; 1600 } 1601 1602 /** 1603 * ti_sci_cmd_clk_get_freq() - Get current frequency 1604 * @handle: pointer to TI SCI handle 1605 * @dev_id: Device identifier this request is for 1606 * @clk_id: Clock identifier for the device for this request. 1607 * Each device has it's own set of clock inputs. This indexes 1608 * which clock input to modify. 1609 * @freq: Currently frequency in Hz 1610 * 1611 * Return: 0 if all went well, else returns appropriate error value. 1612 */ 1613 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle, 1614 u32 dev_id, u32 clk_id, u64 *freq) 1615 { 1616 struct ti_sci_info *info; 1617 struct ti_sci_msg_req_get_clock_freq *req; 1618 struct ti_sci_msg_resp_get_clock_freq *resp; 1619 struct ti_sci_xfer *xfer; 1620 struct device *dev; 1621 int ret = 0; 1622 1623 if (IS_ERR(handle)) 1624 return PTR_ERR(handle); 1625 if (!handle || !freq) 1626 return -EINVAL; 1627 1628 info = handle_to_ti_sci_info(handle); 1629 dev = info->dev; 1630 1631 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ, 1632 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1633 sizeof(*req), sizeof(*resp)); 1634 if (IS_ERR(xfer)) { 1635 ret = PTR_ERR(xfer); 1636 dev_err(dev, "Message alloc failed(%d)\n", ret); 1637 return ret; 1638 } 1639 req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf; 1640 req->dev_id = dev_id; 1641 if (clk_id < 255) { 1642 req->clk_id = clk_id; 1643 } else { 1644 req->clk_id = 255; 1645 req->clk_id_32 = clk_id; 1646 } 1647 1648 ret = ti_sci_do_xfer(info, xfer); 1649 if (ret) { 1650 dev_err(dev, "Mbox send fail %d\n", ret); 1651 goto fail; 1652 } 1653 1654 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf; 1655 1656 if (!ti_sci_is_response_ack(resp)) 1657 ret = -ENODEV; 1658 else 1659 *freq = resp->freq_hz; 1660 1661 fail: 1662 ti_sci_put_one_xfer(&info->minfo, xfer); 1663 1664 return ret; 1665 } 1666 1667 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) 1668 { 1669 struct ti_sci_info *info; 1670 struct ti_sci_msg_req_reboot *req; 1671 struct ti_sci_msg_hdr *resp; 1672 struct ti_sci_xfer *xfer; 1673 struct device *dev; 1674 int ret = 0; 1675 1676 if (IS_ERR(handle)) 1677 return PTR_ERR(handle); 1678 if (!handle) 1679 return -EINVAL; 1680 1681 info = handle_to_ti_sci_info(handle); 1682 dev = info->dev; 1683 1684 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET, 1685 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1686 sizeof(*req), sizeof(*resp)); 1687 if (IS_ERR(xfer)) { 1688 ret = PTR_ERR(xfer); 1689 dev_err(dev, "Message alloc failed(%d)\n", ret); 1690 return ret; 1691 } 1692 req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf; 1693 1694 ret = ti_sci_do_xfer(info, xfer); 1695 if (ret) { 1696 dev_err(dev, "Mbox send fail %d\n", ret); 1697 goto fail; 1698 } 1699 1700 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1701 1702 if (!ti_sci_is_response_ack(resp)) 1703 ret = -ENODEV; 1704 else 1705 ret = 0; 1706 1707 fail: 1708 ti_sci_put_one_xfer(&info->minfo, xfer); 1709 1710 return ret; 1711 } 1712 1713 /** 1714 * ti_sci_get_resource_range - Helper to get a range of resources assigned 1715 * to a host. Resource is uniquely identified by 1716 * type and subtype. 1717 * @handle: Pointer to TISCI handle. 1718 * @dev_id: TISCI device ID. 1719 * @subtype: Resource assignment subtype that is being requested 1720 * from the given device. 1721 * @s_host: Host processor ID to which the resources are allocated 1722 * @desc: Pointer to ti_sci_resource_desc to be updated with the 1723 * resource range start index and number of resources 1724 * 1725 * Return: 0 if all went fine, else return appropriate error. 1726 */ 1727 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, 1728 u32 dev_id, u8 subtype, u8 s_host, 1729 struct ti_sci_resource_desc *desc) 1730 { 1731 struct ti_sci_msg_resp_get_resource_range *resp; 1732 struct ti_sci_msg_req_get_resource_range *req; 1733 struct ti_sci_xfer *xfer; 1734 struct ti_sci_info *info; 1735 struct device *dev; 1736 int ret = 0; 1737 1738 if (IS_ERR(handle)) 1739 return PTR_ERR(handle); 1740 if (!handle || !desc) 1741 return -EINVAL; 1742 1743 info = handle_to_ti_sci_info(handle); 1744 dev = info->dev; 1745 1746 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE, 1747 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1748 sizeof(*req), sizeof(*resp)); 1749 if (IS_ERR(xfer)) { 1750 ret = PTR_ERR(xfer); 1751 dev_err(dev, "Message alloc failed(%d)\n", ret); 1752 return ret; 1753 } 1754 1755 req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf; 1756 req->secondary_host = s_host; 1757 req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK; 1758 req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK; 1759 1760 ret = ti_sci_do_xfer(info, xfer); 1761 if (ret) { 1762 dev_err(dev, "Mbox send fail %d\n", ret); 1763 goto fail; 1764 } 1765 1766 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf; 1767 1768 if (!ti_sci_is_response_ack(resp)) { 1769 ret = -ENODEV; 1770 } else if (!resp->range_num && !resp->range_num_sec) { 1771 /* Neither of the two resource range is valid */ 1772 ret = -ENODEV; 1773 } else { 1774 desc->start = resp->range_start; 1775 desc->num = resp->range_num; 1776 desc->start_sec = resp->range_start_sec; 1777 desc->num_sec = resp->range_num_sec; 1778 } 1779 1780 fail: 1781 ti_sci_put_one_xfer(&info->minfo, xfer); 1782 1783 return ret; 1784 } 1785 1786 /** 1787 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host 1788 * that is same as ti sci interface host. 1789 * @handle: Pointer to TISCI handle. 1790 * @dev_id: TISCI device ID. 1791 * @subtype: Resource assignment subtype that is being requested 1792 * from the given device. 1793 * @desc: Pointer to ti_sci_resource_desc to be updated with the 1794 * resource range start index and number of resources 1795 * 1796 * Return: 0 if all went fine, else return appropriate error. 1797 */ 1798 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle, 1799 u32 dev_id, u8 subtype, 1800 struct ti_sci_resource_desc *desc) 1801 { 1802 return ti_sci_get_resource_range(handle, dev_id, subtype, 1803 TI_SCI_IRQ_SECONDARY_HOST_INVALID, 1804 desc); 1805 } 1806 1807 /** 1808 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources 1809 * assigned to a specified host. 1810 * @handle: Pointer to TISCI handle. 1811 * @dev_id: TISCI device ID. 1812 * @subtype: Resource assignment subtype that is being requested 1813 * from the given device. 1814 * @s_host: Host processor ID to which the resources are allocated 1815 * @desc: Pointer to ti_sci_resource_desc to be updated with the 1816 * resource range start index and number of resources 1817 * 1818 * Return: 0 if all went fine, else return appropriate error. 1819 */ 1820 static 1821 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle, 1822 u32 dev_id, u8 subtype, u8 s_host, 1823 struct ti_sci_resource_desc *desc) 1824 { 1825 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc); 1826 } 1827 1828 /** 1829 * ti_sci_manage_irq() - Helper api to configure/release the irq route between 1830 * the requested source and destination 1831 * @handle: Pointer to TISCI handle. 1832 * @valid_params: Bit fields defining the validity of certain params 1833 * @src_id: Device ID of the IRQ source 1834 * @src_index: IRQ source index within the source device 1835 * @dst_id: Device ID of the IRQ destination 1836 * @dst_host_irq: IRQ number of the destination device 1837 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1838 * @vint: Virtual interrupt to be used within the IA 1839 * @global_event: Global event number to be used for the requesting event 1840 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1841 * @s_host: Secondary host ID to which the irq/event is being 1842 * requested for. 1843 * @type: Request type irq set or release. 1844 * 1845 * Return: 0 if all went fine, else return appropriate error. 1846 */ 1847 static int ti_sci_manage_irq(const struct ti_sci_handle *handle, 1848 u32 valid_params, u16 src_id, u16 src_index, 1849 u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint, 1850 u16 global_event, u8 vint_status_bit, u8 s_host, 1851 u16 type) 1852 { 1853 struct ti_sci_msg_req_manage_irq *req; 1854 struct ti_sci_msg_hdr *resp; 1855 struct ti_sci_xfer *xfer; 1856 struct ti_sci_info *info; 1857 struct device *dev; 1858 int ret = 0; 1859 1860 if (IS_ERR(handle)) 1861 return PTR_ERR(handle); 1862 if (!handle) 1863 return -EINVAL; 1864 1865 info = handle_to_ti_sci_info(handle); 1866 dev = info->dev; 1867 1868 xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 1869 sizeof(*req), sizeof(*resp)); 1870 if (IS_ERR(xfer)) { 1871 ret = PTR_ERR(xfer); 1872 dev_err(dev, "Message alloc failed(%d)\n", ret); 1873 return ret; 1874 } 1875 req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf; 1876 req->valid_params = valid_params; 1877 req->src_id = src_id; 1878 req->src_index = src_index; 1879 req->dst_id = dst_id; 1880 req->dst_host_irq = dst_host_irq; 1881 req->ia_id = ia_id; 1882 req->vint = vint; 1883 req->global_event = global_event; 1884 req->vint_status_bit = vint_status_bit; 1885 req->secondary_host = s_host; 1886 1887 ret = ti_sci_do_xfer(info, xfer); 1888 if (ret) { 1889 dev_err(dev, "Mbox send fail %d\n", ret); 1890 goto fail; 1891 } 1892 1893 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 1894 1895 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 1896 1897 fail: 1898 ti_sci_put_one_xfer(&info->minfo, xfer); 1899 1900 return ret; 1901 } 1902 1903 /** 1904 * ti_sci_set_irq() - Helper api to configure the irq route between the 1905 * requested source and destination 1906 * @handle: Pointer to TISCI handle. 1907 * @valid_params: Bit fields defining the validity of certain params 1908 * @src_id: Device ID of the IRQ source 1909 * @src_index: IRQ source index within the source device 1910 * @dst_id: Device ID of the IRQ destination 1911 * @dst_host_irq: IRQ number of the destination device 1912 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1913 * @vint: Virtual interrupt to be used within the IA 1914 * @global_event: Global event number to be used for the requesting event 1915 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1916 * @s_host: Secondary host ID to which the irq/event is being 1917 * requested for. 1918 * 1919 * Return: 0 if all went fine, else return appropriate error. 1920 */ 1921 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params, 1922 u16 src_id, u16 src_index, u16 dst_id, 1923 u16 dst_host_irq, u16 ia_id, u16 vint, 1924 u16 global_event, u8 vint_status_bit, u8 s_host) 1925 { 1926 pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", 1927 __func__, valid_params, src_id, src_index, 1928 dst_id, dst_host_irq, ia_id, vint, global_event, 1929 vint_status_bit); 1930 1931 return ti_sci_manage_irq(handle, valid_params, src_id, src_index, 1932 dst_id, dst_host_irq, ia_id, vint, 1933 global_event, vint_status_bit, s_host, 1934 TI_SCI_MSG_SET_IRQ); 1935 } 1936 1937 /** 1938 * ti_sci_free_irq() - Helper api to free the irq route between the 1939 * requested source and destination 1940 * @handle: Pointer to TISCI handle. 1941 * @valid_params: Bit fields defining the validity of certain params 1942 * @src_id: Device ID of the IRQ source 1943 * @src_index: IRQ source index within the source device 1944 * @dst_id: Device ID of the IRQ destination 1945 * @dst_host_irq: IRQ number of the destination device 1946 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1947 * @vint: Virtual interrupt to be used within the IA 1948 * @global_event: Global event number to be used for the requesting event 1949 * @vint_status_bit: Virtual interrupt status bit to be used for the event 1950 * @s_host: Secondary host ID to which the irq/event is being 1951 * requested for. 1952 * 1953 * Return: 0 if all went fine, else return appropriate error. 1954 */ 1955 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params, 1956 u16 src_id, u16 src_index, u16 dst_id, 1957 u16 dst_host_irq, u16 ia_id, u16 vint, 1958 u16 global_event, u8 vint_status_bit, u8 s_host) 1959 { 1960 pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", 1961 __func__, valid_params, src_id, src_index, 1962 dst_id, dst_host_irq, ia_id, vint, global_event, 1963 vint_status_bit); 1964 1965 return ti_sci_manage_irq(handle, valid_params, src_id, src_index, 1966 dst_id, dst_host_irq, ia_id, vint, 1967 global_event, vint_status_bit, s_host, 1968 TI_SCI_MSG_FREE_IRQ); 1969 } 1970 1971 /** 1972 * ti_sci_cmd_set_irq() - Configure a host irq route between the requested 1973 * source and destination. 1974 * @handle: Pointer to TISCI handle. 1975 * @src_id: Device ID of the IRQ source 1976 * @src_index: IRQ source index within the source device 1977 * @dst_id: Device ID of the IRQ destination 1978 * @dst_host_irq: IRQ number of the destination device 1979 * 1980 * Return: 0 if all went fine, else return appropriate error. 1981 */ 1982 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id, 1983 u16 src_index, u16 dst_id, u16 dst_host_irq) 1984 { 1985 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; 1986 1987 return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id, 1988 dst_host_irq, 0, 0, 0, 0, 0); 1989 } 1990 1991 /** 1992 * ti_sci_cmd_set_event_map() - Configure an event based irq route between the 1993 * requested source and Interrupt Aggregator. 1994 * @handle: Pointer to TISCI handle. 1995 * @src_id: Device ID of the IRQ source 1996 * @src_index: IRQ source index within the source device 1997 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 1998 * @vint: Virtual interrupt to be used within the IA 1999 * @global_event: Global event number to be used for the requesting event 2000 * @vint_status_bit: Virtual interrupt status bit to be used for the event 2001 * 2002 * Return: 0 if all went fine, else return appropriate error. 2003 */ 2004 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle, 2005 u16 src_id, u16 src_index, u16 ia_id, 2006 u16 vint, u16 global_event, 2007 u8 vint_status_bit) 2008 { 2009 u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID | 2010 MSG_FLAG_GLB_EVNT_VALID | 2011 MSG_FLAG_VINT_STS_BIT_VALID; 2012 2013 return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0, 2014 ia_id, vint, global_event, vint_status_bit, 0); 2015 } 2016 2017 /** 2018 * ti_sci_cmd_free_irq() - Free a host irq route between the between the 2019 * requested source and destination. 2020 * @handle: Pointer to TISCI handle. 2021 * @src_id: Device ID of the IRQ source 2022 * @src_index: IRQ source index within the source device 2023 * @dst_id: Device ID of the IRQ destination 2024 * @dst_host_irq: IRQ number of the destination device 2025 * 2026 * Return: 0 if all went fine, else return appropriate error. 2027 */ 2028 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id, 2029 u16 src_index, u16 dst_id, u16 dst_host_irq) 2030 { 2031 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; 2032 2033 return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id, 2034 dst_host_irq, 0, 0, 0, 0, 0); 2035 } 2036 2037 /** 2038 * ti_sci_cmd_free_event_map() - Free an event map between the requested source 2039 * and Interrupt Aggregator. 2040 * @handle: Pointer to TISCI handle. 2041 * @src_id: Device ID of the IRQ source 2042 * @src_index: IRQ source index within the source device 2043 * @ia_id: Device ID of the IA, if the IRQ flows through this IA 2044 * @vint: Virtual interrupt to be used within the IA 2045 * @global_event: Global event number to be used for the requesting event 2046 * @vint_status_bit: Virtual interrupt status bit to be used for the event 2047 * 2048 * Return: 0 if all went fine, else return appropriate error. 2049 */ 2050 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle, 2051 u16 src_id, u16 src_index, u16 ia_id, 2052 u16 vint, u16 global_event, 2053 u8 vint_status_bit) 2054 { 2055 u32 valid_params = MSG_FLAG_IA_ID_VALID | 2056 MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID | 2057 MSG_FLAG_VINT_STS_BIT_VALID; 2058 2059 return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0, 2060 ia_id, vint, global_event, vint_status_bit, 0); 2061 } 2062 2063 /** 2064 * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring 2065 * @handle: Pointer to TI SCI handle. 2066 * @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure 2067 * 2068 * Return: 0 if all went well, else returns appropriate error value. 2069 * 2070 * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for 2071 * more info. 2072 */ 2073 static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle, 2074 const struct ti_sci_msg_rm_ring_cfg *params) 2075 { 2076 struct ti_sci_msg_rm_ring_cfg_req *req; 2077 struct ti_sci_msg_hdr *resp; 2078 struct ti_sci_xfer *xfer; 2079 struct ti_sci_info *info; 2080 struct device *dev; 2081 int ret = 0; 2082 2083 if (IS_ERR_OR_NULL(handle)) 2084 return -EINVAL; 2085 2086 info = handle_to_ti_sci_info(handle); 2087 dev = info->dev; 2088 2089 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG, 2090 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2091 sizeof(*req), sizeof(*resp)); 2092 if (IS_ERR(xfer)) { 2093 ret = PTR_ERR(xfer); 2094 dev_err(dev, "RM_RA:Message config failed(%d)\n", ret); 2095 return ret; 2096 } 2097 req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf; 2098 req->valid_params = params->valid_params; 2099 req->nav_id = params->nav_id; 2100 req->index = params->index; 2101 req->addr_lo = params->addr_lo; 2102 req->addr_hi = params->addr_hi; 2103 req->count = params->count; 2104 req->mode = params->mode; 2105 req->size = params->size; 2106 req->order_id = params->order_id; 2107 req->virtid = params->virtid; 2108 req->asel = params->asel; 2109 2110 ret = ti_sci_do_xfer(info, xfer); 2111 if (ret) { 2112 dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret); 2113 goto fail; 2114 } 2115 2116 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2117 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2118 2119 fail: 2120 ti_sci_put_one_xfer(&info->minfo, xfer); 2121 dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret); 2122 return ret; 2123 } 2124 2125 /** 2126 * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread 2127 * @handle: Pointer to TI SCI handle. 2128 * @nav_id: Device ID of Navigator Subsystem which should be used for 2129 * pairing 2130 * @src_thread: Source PSI-L thread ID 2131 * @dst_thread: Destination PSI-L thread ID 2132 * 2133 * Return: 0 if all went well, else returns appropriate error value. 2134 */ 2135 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle, 2136 u32 nav_id, u32 src_thread, u32 dst_thread) 2137 { 2138 struct ti_sci_msg_psil_pair *req; 2139 struct ti_sci_msg_hdr *resp; 2140 struct ti_sci_xfer *xfer; 2141 struct ti_sci_info *info; 2142 struct device *dev; 2143 int ret = 0; 2144 2145 if (IS_ERR(handle)) 2146 return PTR_ERR(handle); 2147 if (!handle) 2148 return -EINVAL; 2149 2150 info = handle_to_ti_sci_info(handle); 2151 dev = info->dev; 2152 2153 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR, 2154 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2155 sizeof(*req), sizeof(*resp)); 2156 if (IS_ERR(xfer)) { 2157 ret = PTR_ERR(xfer); 2158 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret); 2159 return ret; 2160 } 2161 req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf; 2162 req->nav_id = nav_id; 2163 req->src_thread = src_thread; 2164 req->dst_thread = dst_thread; 2165 2166 ret = ti_sci_do_xfer(info, xfer); 2167 if (ret) { 2168 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret); 2169 goto fail; 2170 } 2171 2172 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2173 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2174 2175 fail: 2176 ti_sci_put_one_xfer(&info->minfo, xfer); 2177 2178 return ret; 2179 } 2180 2181 /** 2182 * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread 2183 * @handle: Pointer to TI SCI handle. 2184 * @nav_id: Device ID of Navigator Subsystem which should be used for 2185 * unpairing 2186 * @src_thread: Source PSI-L thread ID 2187 * @dst_thread: Destination PSI-L thread ID 2188 * 2189 * Return: 0 if all went well, else returns appropriate error value. 2190 */ 2191 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle, 2192 u32 nav_id, u32 src_thread, u32 dst_thread) 2193 { 2194 struct ti_sci_msg_psil_unpair *req; 2195 struct ti_sci_msg_hdr *resp; 2196 struct ti_sci_xfer *xfer; 2197 struct ti_sci_info *info; 2198 struct device *dev; 2199 int ret = 0; 2200 2201 if (IS_ERR(handle)) 2202 return PTR_ERR(handle); 2203 if (!handle) 2204 return -EINVAL; 2205 2206 info = handle_to_ti_sci_info(handle); 2207 dev = info->dev; 2208 2209 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR, 2210 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2211 sizeof(*req), sizeof(*resp)); 2212 if (IS_ERR(xfer)) { 2213 ret = PTR_ERR(xfer); 2214 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret); 2215 return ret; 2216 } 2217 req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf; 2218 req->nav_id = nav_id; 2219 req->src_thread = src_thread; 2220 req->dst_thread = dst_thread; 2221 2222 ret = ti_sci_do_xfer(info, xfer); 2223 if (ret) { 2224 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret); 2225 goto fail; 2226 } 2227 2228 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2229 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2230 2231 fail: 2232 ti_sci_put_one_xfer(&info->minfo, xfer); 2233 2234 return ret; 2235 } 2236 2237 /** 2238 * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel 2239 * @handle: Pointer to TI SCI handle. 2240 * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config 2241 * structure 2242 * 2243 * Return: 0 if all went well, else returns appropriate error value. 2244 * 2245 * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for 2246 * more info. 2247 */ 2248 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle, 2249 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params) 2250 { 2251 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req; 2252 struct ti_sci_msg_hdr *resp; 2253 struct ti_sci_xfer *xfer; 2254 struct ti_sci_info *info; 2255 struct device *dev; 2256 int ret = 0; 2257 2258 if (IS_ERR_OR_NULL(handle)) 2259 return -EINVAL; 2260 2261 info = handle_to_ti_sci_info(handle); 2262 dev = info->dev; 2263 2264 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG, 2265 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2266 sizeof(*req), sizeof(*resp)); 2267 if (IS_ERR(xfer)) { 2268 ret = PTR_ERR(xfer); 2269 dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret); 2270 return ret; 2271 } 2272 req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf; 2273 req->valid_params = params->valid_params; 2274 req->nav_id = params->nav_id; 2275 req->index = params->index; 2276 req->tx_pause_on_err = params->tx_pause_on_err; 2277 req->tx_filt_einfo = params->tx_filt_einfo; 2278 req->tx_filt_pswords = params->tx_filt_pswords; 2279 req->tx_atype = params->tx_atype; 2280 req->tx_chan_type = params->tx_chan_type; 2281 req->tx_supr_tdpkt = params->tx_supr_tdpkt; 2282 req->tx_fetch_size = params->tx_fetch_size; 2283 req->tx_credit_count = params->tx_credit_count; 2284 req->txcq_qnum = params->txcq_qnum; 2285 req->tx_priority = params->tx_priority; 2286 req->tx_qos = params->tx_qos; 2287 req->tx_orderid = params->tx_orderid; 2288 req->fdepth = params->fdepth; 2289 req->tx_sched_priority = params->tx_sched_priority; 2290 req->tx_burst_size = params->tx_burst_size; 2291 req->tx_tdtype = params->tx_tdtype; 2292 req->extended_ch_type = params->extended_ch_type; 2293 2294 ret = ti_sci_do_xfer(info, xfer); 2295 if (ret) { 2296 dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret); 2297 goto fail; 2298 } 2299 2300 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2301 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2302 2303 fail: 2304 ti_sci_put_one_xfer(&info->minfo, xfer); 2305 dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret); 2306 return ret; 2307 } 2308 2309 /** 2310 * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel 2311 * @handle: Pointer to TI SCI handle. 2312 * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config 2313 * structure 2314 * 2315 * Return: 0 if all went well, else returns appropriate error value. 2316 * 2317 * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for 2318 * more info. 2319 */ 2320 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle, 2321 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params) 2322 { 2323 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req; 2324 struct ti_sci_msg_hdr *resp; 2325 struct ti_sci_xfer *xfer; 2326 struct ti_sci_info *info; 2327 struct device *dev; 2328 int ret = 0; 2329 2330 if (IS_ERR_OR_NULL(handle)) 2331 return -EINVAL; 2332 2333 info = handle_to_ti_sci_info(handle); 2334 dev = info->dev; 2335 2336 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG, 2337 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2338 sizeof(*req), sizeof(*resp)); 2339 if (IS_ERR(xfer)) { 2340 ret = PTR_ERR(xfer); 2341 dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret); 2342 return ret; 2343 } 2344 req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf; 2345 req->valid_params = params->valid_params; 2346 req->nav_id = params->nav_id; 2347 req->index = params->index; 2348 req->rx_fetch_size = params->rx_fetch_size; 2349 req->rxcq_qnum = params->rxcq_qnum; 2350 req->rx_priority = params->rx_priority; 2351 req->rx_qos = params->rx_qos; 2352 req->rx_orderid = params->rx_orderid; 2353 req->rx_sched_priority = params->rx_sched_priority; 2354 req->flowid_start = params->flowid_start; 2355 req->flowid_cnt = params->flowid_cnt; 2356 req->rx_pause_on_err = params->rx_pause_on_err; 2357 req->rx_atype = params->rx_atype; 2358 req->rx_chan_type = params->rx_chan_type; 2359 req->rx_ignore_short = params->rx_ignore_short; 2360 req->rx_ignore_long = params->rx_ignore_long; 2361 req->rx_burst_size = params->rx_burst_size; 2362 2363 ret = ti_sci_do_xfer(info, xfer); 2364 if (ret) { 2365 dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret); 2366 goto fail; 2367 } 2368 2369 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2370 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2371 2372 fail: 2373 ti_sci_put_one_xfer(&info->minfo, xfer); 2374 dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret); 2375 return ret; 2376 } 2377 2378 /** 2379 * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW 2380 * @handle: Pointer to TI SCI handle. 2381 * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config 2382 * structure 2383 * 2384 * Return: 0 if all went well, else returns appropriate error value. 2385 * 2386 * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for 2387 * more info. 2388 */ 2389 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle, 2390 const struct ti_sci_msg_rm_udmap_flow_cfg *params) 2391 { 2392 struct ti_sci_msg_rm_udmap_flow_cfg_req *req; 2393 struct ti_sci_msg_hdr *resp; 2394 struct ti_sci_xfer *xfer; 2395 struct ti_sci_info *info; 2396 struct device *dev; 2397 int ret = 0; 2398 2399 if (IS_ERR_OR_NULL(handle)) 2400 return -EINVAL; 2401 2402 info = handle_to_ti_sci_info(handle); 2403 dev = info->dev; 2404 2405 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG, 2406 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2407 sizeof(*req), sizeof(*resp)); 2408 if (IS_ERR(xfer)) { 2409 ret = PTR_ERR(xfer); 2410 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret); 2411 return ret; 2412 } 2413 req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf; 2414 req->valid_params = params->valid_params; 2415 req->nav_id = params->nav_id; 2416 req->flow_index = params->flow_index; 2417 req->rx_einfo_present = params->rx_einfo_present; 2418 req->rx_psinfo_present = params->rx_psinfo_present; 2419 req->rx_error_handling = params->rx_error_handling; 2420 req->rx_desc_type = params->rx_desc_type; 2421 req->rx_sop_offset = params->rx_sop_offset; 2422 req->rx_dest_qnum = params->rx_dest_qnum; 2423 req->rx_src_tag_hi = params->rx_src_tag_hi; 2424 req->rx_src_tag_lo = params->rx_src_tag_lo; 2425 req->rx_dest_tag_hi = params->rx_dest_tag_hi; 2426 req->rx_dest_tag_lo = params->rx_dest_tag_lo; 2427 req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel; 2428 req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel; 2429 req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel; 2430 req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel; 2431 req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum; 2432 req->rx_fdq1_qnum = params->rx_fdq1_qnum; 2433 req->rx_fdq2_qnum = params->rx_fdq2_qnum; 2434 req->rx_fdq3_qnum = params->rx_fdq3_qnum; 2435 req->rx_ps_location = params->rx_ps_location; 2436 2437 ret = ti_sci_do_xfer(info, xfer); 2438 if (ret) { 2439 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret); 2440 goto fail; 2441 } 2442 2443 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2444 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL; 2445 2446 fail: 2447 ti_sci_put_one_xfer(&info->minfo, xfer); 2448 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret); 2449 return ret; 2450 } 2451 2452 /** 2453 * ti_sci_cmd_proc_request() - Command to request a physical processor control 2454 * @handle: Pointer to TI SCI handle 2455 * @proc_id: Processor ID this request is for 2456 * 2457 * Return: 0 if all went well, else returns appropriate error value. 2458 */ 2459 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle, 2460 u8 proc_id) 2461 { 2462 struct ti_sci_msg_req_proc_request *req; 2463 struct ti_sci_msg_hdr *resp; 2464 struct ti_sci_info *info; 2465 struct ti_sci_xfer *xfer; 2466 struct device *dev; 2467 int ret = 0; 2468 2469 if (!handle) 2470 return -EINVAL; 2471 if (IS_ERR(handle)) 2472 return PTR_ERR(handle); 2473 2474 info = handle_to_ti_sci_info(handle); 2475 dev = info->dev; 2476 2477 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST, 2478 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2479 sizeof(*req), sizeof(*resp)); 2480 if (IS_ERR(xfer)) { 2481 ret = PTR_ERR(xfer); 2482 dev_err(dev, "Message alloc failed(%d)\n", ret); 2483 return ret; 2484 } 2485 req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf; 2486 req->processor_id = proc_id; 2487 2488 ret = ti_sci_do_xfer(info, xfer); 2489 if (ret) { 2490 dev_err(dev, "Mbox send fail %d\n", ret); 2491 goto fail; 2492 } 2493 2494 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2495 2496 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2497 2498 fail: 2499 ti_sci_put_one_xfer(&info->minfo, xfer); 2500 2501 return ret; 2502 } 2503 2504 /** 2505 * ti_sci_cmd_proc_release() - Command to release a physical processor control 2506 * @handle: Pointer to TI SCI handle 2507 * @proc_id: Processor ID this request is for 2508 * 2509 * Return: 0 if all went well, else returns appropriate error value. 2510 */ 2511 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle, 2512 u8 proc_id) 2513 { 2514 struct ti_sci_msg_req_proc_release *req; 2515 struct ti_sci_msg_hdr *resp; 2516 struct ti_sci_info *info; 2517 struct ti_sci_xfer *xfer; 2518 struct device *dev; 2519 int ret = 0; 2520 2521 if (!handle) 2522 return -EINVAL; 2523 if (IS_ERR(handle)) 2524 return PTR_ERR(handle); 2525 2526 info = handle_to_ti_sci_info(handle); 2527 dev = info->dev; 2528 2529 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE, 2530 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2531 sizeof(*req), sizeof(*resp)); 2532 if (IS_ERR(xfer)) { 2533 ret = PTR_ERR(xfer); 2534 dev_err(dev, "Message alloc failed(%d)\n", ret); 2535 return ret; 2536 } 2537 req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf; 2538 req->processor_id = proc_id; 2539 2540 ret = ti_sci_do_xfer(info, xfer); 2541 if (ret) { 2542 dev_err(dev, "Mbox send fail %d\n", ret); 2543 goto fail; 2544 } 2545 2546 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2547 2548 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2549 2550 fail: 2551 ti_sci_put_one_xfer(&info->minfo, xfer); 2552 2553 return ret; 2554 } 2555 2556 /** 2557 * ti_sci_cmd_proc_handover() - Command to handover a physical processor 2558 * control to a host in the processor's access 2559 * control list. 2560 * @handle: Pointer to TI SCI handle 2561 * @proc_id: Processor ID this request is for 2562 * @host_id: Host ID to get the control of the processor 2563 * 2564 * Return: 0 if all went well, else returns appropriate error value. 2565 */ 2566 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle, 2567 u8 proc_id, u8 host_id) 2568 { 2569 struct ti_sci_msg_req_proc_handover *req; 2570 struct ti_sci_msg_hdr *resp; 2571 struct ti_sci_info *info; 2572 struct ti_sci_xfer *xfer; 2573 struct device *dev; 2574 int ret = 0; 2575 2576 if (!handle) 2577 return -EINVAL; 2578 if (IS_ERR(handle)) 2579 return PTR_ERR(handle); 2580 2581 info = handle_to_ti_sci_info(handle); 2582 dev = info->dev; 2583 2584 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER, 2585 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2586 sizeof(*req), sizeof(*resp)); 2587 if (IS_ERR(xfer)) { 2588 ret = PTR_ERR(xfer); 2589 dev_err(dev, "Message alloc failed(%d)\n", ret); 2590 return ret; 2591 } 2592 req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf; 2593 req->processor_id = proc_id; 2594 req->host_id = host_id; 2595 2596 ret = ti_sci_do_xfer(info, xfer); 2597 if (ret) { 2598 dev_err(dev, "Mbox send fail %d\n", ret); 2599 goto fail; 2600 } 2601 2602 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2603 2604 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2605 2606 fail: 2607 ti_sci_put_one_xfer(&info->minfo, xfer); 2608 2609 return ret; 2610 } 2611 2612 /** 2613 * ti_sci_cmd_proc_set_config() - Command to set the processor boot 2614 * configuration flags 2615 * @handle: Pointer to TI SCI handle 2616 * @proc_id: Processor ID this request is for 2617 * @bootvector: Processor Boot vector (start address) 2618 * @config_flags_set: Configuration flags to be set 2619 * @config_flags_clear: Configuration flags to be cleared. 2620 * 2621 * Return: 0 if all went well, else returns appropriate error value. 2622 */ 2623 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle, 2624 u8 proc_id, u64 bootvector, 2625 u32 config_flags_set, 2626 u32 config_flags_clear) 2627 { 2628 struct ti_sci_msg_req_set_config *req; 2629 struct ti_sci_msg_hdr *resp; 2630 struct ti_sci_info *info; 2631 struct ti_sci_xfer *xfer; 2632 struct device *dev; 2633 int ret = 0; 2634 2635 if (!handle) 2636 return -EINVAL; 2637 if (IS_ERR(handle)) 2638 return PTR_ERR(handle); 2639 2640 info = handle_to_ti_sci_info(handle); 2641 dev = info->dev; 2642 2643 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG, 2644 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2645 sizeof(*req), sizeof(*resp)); 2646 if (IS_ERR(xfer)) { 2647 ret = PTR_ERR(xfer); 2648 dev_err(dev, "Message alloc failed(%d)\n", ret); 2649 return ret; 2650 } 2651 req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf; 2652 req->processor_id = proc_id; 2653 req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK; 2654 req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >> 2655 TI_SCI_ADDR_HIGH_SHIFT; 2656 req->config_flags_set = config_flags_set; 2657 req->config_flags_clear = config_flags_clear; 2658 2659 ret = ti_sci_do_xfer(info, xfer); 2660 if (ret) { 2661 dev_err(dev, "Mbox send fail %d\n", ret); 2662 goto fail; 2663 } 2664 2665 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2666 2667 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2668 2669 fail: 2670 ti_sci_put_one_xfer(&info->minfo, xfer); 2671 2672 return ret; 2673 } 2674 2675 /** 2676 * ti_sci_cmd_proc_set_control() - Command to set the processor boot 2677 * control flags 2678 * @handle: Pointer to TI SCI handle 2679 * @proc_id: Processor ID this request is for 2680 * @control_flags_set: Control flags to be set 2681 * @control_flags_clear: Control flags to be cleared 2682 * 2683 * Return: 0 if all went well, else returns appropriate error value. 2684 */ 2685 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle, 2686 u8 proc_id, u32 control_flags_set, 2687 u32 control_flags_clear) 2688 { 2689 struct ti_sci_msg_req_set_ctrl *req; 2690 struct ti_sci_msg_hdr *resp; 2691 struct ti_sci_info *info; 2692 struct ti_sci_xfer *xfer; 2693 struct device *dev; 2694 int ret = 0; 2695 2696 if (!handle) 2697 return -EINVAL; 2698 if (IS_ERR(handle)) 2699 return PTR_ERR(handle); 2700 2701 info = handle_to_ti_sci_info(handle); 2702 dev = info->dev; 2703 2704 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL, 2705 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2706 sizeof(*req), sizeof(*resp)); 2707 if (IS_ERR(xfer)) { 2708 ret = PTR_ERR(xfer); 2709 dev_err(dev, "Message alloc failed(%d)\n", ret); 2710 return ret; 2711 } 2712 req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf; 2713 req->processor_id = proc_id; 2714 req->control_flags_set = control_flags_set; 2715 req->control_flags_clear = control_flags_clear; 2716 2717 ret = ti_sci_do_xfer(info, xfer); 2718 if (ret) { 2719 dev_err(dev, "Mbox send fail %d\n", ret); 2720 goto fail; 2721 } 2722 2723 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; 2724 2725 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; 2726 2727 fail: 2728 ti_sci_put_one_xfer(&info->minfo, xfer); 2729 2730 return ret; 2731 } 2732 2733 /** 2734 * ti_sci_cmd_proc_get_status() - Command to get the processor boot status 2735 * @handle: Pointer to TI SCI handle 2736 * @proc_id: Processor ID this request is for 2737 * @bv: Processor Boot vector (start address) 2738 * @cfg_flags: Processor specific configuration flags 2739 * @ctrl_flags: Processor specific control flags 2740 * @sts_flags: Processor specific status flags 2741 * 2742 * Return: 0 if all went well, else returns appropriate error value. 2743 */ 2744 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle, 2745 u8 proc_id, u64 *bv, u32 *cfg_flags, 2746 u32 *ctrl_flags, u32 *sts_flags) 2747 { 2748 struct ti_sci_msg_resp_get_status *resp; 2749 struct ti_sci_msg_req_get_status *req; 2750 struct ti_sci_info *info; 2751 struct ti_sci_xfer *xfer; 2752 struct device *dev; 2753 int ret = 0; 2754 2755 if (!handle) 2756 return -EINVAL; 2757 if (IS_ERR(handle)) 2758 return PTR_ERR(handle); 2759 2760 info = handle_to_ti_sci_info(handle); 2761 dev = info->dev; 2762 2763 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS, 2764 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2765 sizeof(*req), sizeof(*resp)); 2766 if (IS_ERR(xfer)) { 2767 ret = PTR_ERR(xfer); 2768 dev_err(dev, "Message alloc failed(%d)\n", ret); 2769 return ret; 2770 } 2771 req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf; 2772 req->processor_id = proc_id; 2773 2774 ret = ti_sci_do_xfer(info, xfer); 2775 if (ret) { 2776 dev_err(dev, "Mbox send fail %d\n", ret); 2777 goto fail; 2778 } 2779 2780 resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf; 2781 2782 if (!ti_sci_is_response_ack(resp)) { 2783 ret = -ENODEV; 2784 } else { 2785 *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) | 2786 (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) & 2787 TI_SCI_ADDR_HIGH_MASK); 2788 *cfg_flags = resp->config_flags; 2789 *ctrl_flags = resp->control_flags; 2790 *sts_flags = resp->status_flags; 2791 } 2792 2793 fail: 2794 ti_sci_put_one_xfer(&info->minfo, xfer); 2795 2796 return ret; 2797 } 2798 2799 /* 2800 * ti_sci_setup_ops() - Setup the operations structures 2801 * @info: pointer to TISCI pointer 2802 */ 2803 static void ti_sci_setup_ops(struct ti_sci_info *info) 2804 { 2805 struct ti_sci_ops *ops = &info->handle.ops; 2806 struct ti_sci_core_ops *core_ops = &ops->core_ops; 2807 struct ti_sci_dev_ops *dops = &ops->dev_ops; 2808 struct ti_sci_clk_ops *cops = &ops->clk_ops; 2809 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops; 2810 struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops; 2811 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops; 2812 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops; 2813 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops; 2814 struct ti_sci_proc_ops *pops = &ops->proc_ops; 2815 2816 core_ops->reboot_device = ti_sci_cmd_core_reboot; 2817 2818 dops->get_device = ti_sci_cmd_get_device; 2819 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive; 2820 dops->idle_device = ti_sci_cmd_idle_device; 2821 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive; 2822 dops->put_device = ti_sci_cmd_put_device; 2823 2824 dops->is_valid = ti_sci_cmd_dev_is_valid; 2825 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt; 2826 dops->is_idle = ti_sci_cmd_dev_is_idle; 2827 dops->is_stop = ti_sci_cmd_dev_is_stop; 2828 dops->is_on = ti_sci_cmd_dev_is_on; 2829 dops->is_transitioning = ti_sci_cmd_dev_is_trans; 2830 dops->set_device_resets = ti_sci_cmd_set_device_resets; 2831 dops->get_device_resets = ti_sci_cmd_get_device_resets; 2832 2833 cops->get_clock = ti_sci_cmd_get_clock; 2834 cops->idle_clock = ti_sci_cmd_idle_clock; 2835 cops->put_clock = ti_sci_cmd_put_clock; 2836 cops->is_auto = ti_sci_cmd_clk_is_auto; 2837 cops->is_on = ti_sci_cmd_clk_is_on; 2838 cops->is_off = ti_sci_cmd_clk_is_off; 2839 2840 cops->set_parent = ti_sci_cmd_clk_set_parent; 2841 cops->get_parent = ti_sci_cmd_clk_get_parent; 2842 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents; 2843 2844 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq; 2845 cops->set_freq = ti_sci_cmd_clk_set_freq; 2846 cops->get_freq = ti_sci_cmd_clk_get_freq; 2847 2848 rm_core_ops->get_range = ti_sci_cmd_get_resource_range; 2849 rm_core_ops->get_range_from_shost = 2850 ti_sci_cmd_get_resource_range_from_shost; 2851 2852 iops->set_irq = ti_sci_cmd_set_irq; 2853 iops->set_event_map = ti_sci_cmd_set_event_map; 2854 iops->free_irq = ti_sci_cmd_free_irq; 2855 iops->free_event_map = ti_sci_cmd_free_event_map; 2856 2857 rops->set_cfg = ti_sci_cmd_rm_ring_cfg; 2858 2859 psilops->pair = ti_sci_cmd_rm_psil_pair; 2860 psilops->unpair = ti_sci_cmd_rm_psil_unpair; 2861 2862 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg; 2863 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg; 2864 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg; 2865 2866 pops->request = ti_sci_cmd_proc_request; 2867 pops->release = ti_sci_cmd_proc_release; 2868 pops->handover = ti_sci_cmd_proc_handover; 2869 pops->set_config = ti_sci_cmd_proc_set_config; 2870 pops->set_control = ti_sci_cmd_proc_set_control; 2871 pops->get_status = ti_sci_cmd_proc_get_status; 2872 } 2873 2874 /** 2875 * ti_sci_get_handle() - Get the TI SCI handle for a device 2876 * @dev: Pointer to device for which we want SCI handle 2877 * 2878 * NOTE: The function does not track individual clients of the framework 2879 * and is expected to be maintained by caller of TI SCI protocol library. 2880 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 2881 * Return: pointer to handle if successful, else: 2882 * -EPROBE_DEFER if the instance is not ready 2883 * -ENODEV if the required node handler is missing 2884 * -EINVAL if invalid conditions are encountered. 2885 */ 2886 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) 2887 { 2888 struct device_node *ti_sci_np; 2889 struct list_head *p; 2890 struct ti_sci_handle *handle = NULL; 2891 struct ti_sci_info *info; 2892 2893 if (!dev) { 2894 pr_err("I need a device pointer\n"); 2895 return ERR_PTR(-EINVAL); 2896 } 2897 ti_sci_np = of_get_parent(dev->of_node); 2898 if (!ti_sci_np) { 2899 dev_err(dev, "No OF information\n"); 2900 return ERR_PTR(-EINVAL); 2901 } 2902 2903 mutex_lock(&ti_sci_list_mutex); 2904 list_for_each(p, &ti_sci_list) { 2905 info = list_entry(p, struct ti_sci_info, node); 2906 if (ti_sci_np == info->dev->of_node) { 2907 handle = &info->handle; 2908 info->users++; 2909 break; 2910 } 2911 } 2912 mutex_unlock(&ti_sci_list_mutex); 2913 of_node_put(ti_sci_np); 2914 2915 if (!handle) 2916 return ERR_PTR(-EPROBE_DEFER); 2917 2918 return handle; 2919 } 2920 EXPORT_SYMBOL_GPL(ti_sci_get_handle); 2921 2922 /** 2923 * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle 2924 * @handle: Handle acquired by ti_sci_get_handle 2925 * 2926 * NOTE: The function does not track individual clients of the framework 2927 * and is expected to be maintained by caller of TI SCI protocol library. 2928 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle 2929 * 2930 * Return: 0 is successfully released 2931 * if an error pointer was passed, it returns the error value back, 2932 * if null was passed, it returns -EINVAL; 2933 */ 2934 int ti_sci_put_handle(const struct ti_sci_handle *handle) 2935 { 2936 struct ti_sci_info *info; 2937 2938 if (IS_ERR(handle)) 2939 return PTR_ERR(handle); 2940 if (!handle) 2941 return -EINVAL; 2942 2943 info = handle_to_ti_sci_info(handle); 2944 mutex_lock(&ti_sci_list_mutex); 2945 if (!WARN_ON(!info->users)) 2946 info->users--; 2947 mutex_unlock(&ti_sci_list_mutex); 2948 2949 return 0; 2950 } 2951 EXPORT_SYMBOL_GPL(ti_sci_put_handle); 2952 2953 static void devm_ti_sci_release(struct device *dev, void *res) 2954 { 2955 const struct ti_sci_handle **ptr = res; 2956 const struct ti_sci_handle *handle = *ptr; 2957 int ret; 2958 2959 ret = ti_sci_put_handle(handle); 2960 if (ret) 2961 dev_err(dev, "failed to put handle %d\n", ret); 2962 } 2963 2964 /** 2965 * devm_ti_sci_get_handle() - Managed get handle 2966 * @dev: device for which we want SCI handle for. 2967 * 2968 * NOTE: This releases the handle once the device resources are 2969 * no longer needed. MUST NOT BE released with ti_sci_put_handle. 2970 * The function does not track individual clients of the framework 2971 * and is expected to be maintained by caller of TI SCI protocol library. 2972 * 2973 * Return: 0 if all went fine, else corresponding error. 2974 */ 2975 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) 2976 { 2977 const struct ti_sci_handle **ptr; 2978 const struct ti_sci_handle *handle; 2979 2980 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); 2981 if (!ptr) 2982 return ERR_PTR(-ENOMEM); 2983 handle = ti_sci_get_handle(dev); 2984 2985 if (!IS_ERR(handle)) { 2986 *ptr = handle; 2987 devres_add(dev, ptr); 2988 } else { 2989 devres_free(ptr); 2990 } 2991 2992 return handle; 2993 } 2994 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle); 2995 2996 /** 2997 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle 2998 * @np: device node 2999 * @property: property name containing phandle on TISCI node 3000 * 3001 * NOTE: The function does not track individual clients of the framework 3002 * and is expected to be maintained by caller of TI SCI protocol library. 3003 * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle 3004 * Return: pointer to handle if successful, else: 3005 * -EPROBE_DEFER if the instance is not ready 3006 * -ENODEV if the required node handler is missing 3007 * -EINVAL if invalid conditions are encountered. 3008 */ 3009 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, 3010 const char *property) 3011 { 3012 struct ti_sci_handle *handle = NULL; 3013 struct device_node *ti_sci_np; 3014 struct ti_sci_info *info; 3015 struct list_head *p; 3016 3017 if (!np) { 3018 pr_err("I need a device pointer\n"); 3019 return ERR_PTR(-EINVAL); 3020 } 3021 3022 ti_sci_np = of_parse_phandle(np, property, 0); 3023 if (!ti_sci_np) 3024 return ERR_PTR(-ENODEV); 3025 3026 mutex_lock(&ti_sci_list_mutex); 3027 list_for_each(p, &ti_sci_list) { 3028 info = list_entry(p, struct ti_sci_info, node); 3029 if (ti_sci_np == info->dev->of_node) { 3030 handle = &info->handle; 3031 info->users++; 3032 break; 3033 } 3034 } 3035 mutex_unlock(&ti_sci_list_mutex); 3036 of_node_put(ti_sci_np); 3037 3038 if (!handle) 3039 return ERR_PTR(-EPROBE_DEFER); 3040 3041 return handle; 3042 } 3043 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle); 3044 3045 /** 3046 * devm_ti_sci_get_by_phandle() - Managed get handle using phandle 3047 * @dev: Device pointer requesting TISCI handle 3048 * @property: property name containing phandle on TISCI node 3049 * 3050 * NOTE: This releases the handle once the device resources are 3051 * no longer needed. MUST NOT BE released with ti_sci_put_handle. 3052 * The function does not track individual clients of the framework 3053 * and is expected to be maintained by caller of TI SCI protocol library. 3054 * 3055 * Return: 0 if all went fine, else corresponding error. 3056 */ 3057 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev, 3058 const char *property) 3059 { 3060 const struct ti_sci_handle *handle; 3061 const struct ti_sci_handle **ptr; 3062 3063 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); 3064 if (!ptr) 3065 return ERR_PTR(-ENOMEM); 3066 handle = ti_sci_get_by_phandle(dev_of_node(dev), property); 3067 3068 if (!IS_ERR(handle)) { 3069 *ptr = handle; 3070 devres_add(dev, ptr); 3071 } else { 3072 devres_free(ptr); 3073 } 3074 3075 return handle; 3076 } 3077 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle); 3078 3079 /** 3080 * ti_sci_get_free_resource() - Get a free resource from TISCI resource. 3081 * @res: Pointer to the TISCI resource 3082 * 3083 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL. 3084 */ 3085 u16 ti_sci_get_free_resource(struct ti_sci_resource *res) 3086 { 3087 unsigned long flags; 3088 u16 set, free_bit; 3089 3090 raw_spin_lock_irqsave(&res->lock, flags); 3091 for (set = 0; set < res->sets; set++) { 3092 struct ti_sci_resource_desc *desc = &res->desc[set]; 3093 int res_count = desc->num + desc->num_sec; 3094 3095 free_bit = find_first_zero_bit(desc->res_map, res_count); 3096 if (free_bit != res_count) { 3097 __set_bit(free_bit, desc->res_map); 3098 raw_spin_unlock_irqrestore(&res->lock, flags); 3099 3100 if (desc->num && free_bit < desc->num) 3101 return desc->start + free_bit; 3102 else 3103 return desc->start_sec + free_bit; 3104 } 3105 } 3106 raw_spin_unlock_irqrestore(&res->lock, flags); 3107 3108 return TI_SCI_RESOURCE_NULL; 3109 } 3110 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource); 3111 3112 /** 3113 * ti_sci_release_resource() - Release a resource from TISCI resource. 3114 * @res: Pointer to the TISCI resource 3115 * @id: Resource id to be released. 3116 */ 3117 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id) 3118 { 3119 unsigned long flags; 3120 u16 set; 3121 3122 raw_spin_lock_irqsave(&res->lock, flags); 3123 for (set = 0; set < res->sets; set++) { 3124 struct ti_sci_resource_desc *desc = &res->desc[set]; 3125 3126 if (desc->num && desc->start <= id && 3127 (desc->start + desc->num) > id) 3128 __clear_bit(id - desc->start, desc->res_map); 3129 else if (desc->num_sec && desc->start_sec <= id && 3130 (desc->start_sec + desc->num_sec) > id) 3131 __clear_bit(id - desc->start_sec, desc->res_map); 3132 } 3133 raw_spin_unlock_irqrestore(&res->lock, flags); 3134 } 3135 EXPORT_SYMBOL_GPL(ti_sci_release_resource); 3136 3137 /** 3138 * ti_sci_get_num_resources() - Get the number of resources in TISCI resource 3139 * @res: Pointer to the TISCI resource 3140 * 3141 * Return: Total number of available resources. 3142 */ 3143 u32 ti_sci_get_num_resources(struct ti_sci_resource *res) 3144 { 3145 u32 set, count = 0; 3146 3147 for (set = 0; set < res->sets; set++) 3148 count += res->desc[set].num + res->desc[set].num_sec; 3149 3150 return count; 3151 } 3152 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources); 3153 3154 /** 3155 * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device 3156 * @handle: TISCI handle 3157 * @dev: Device pointer to which the resource is assigned 3158 * @dev_id: TISCI device id to which the resource is assigned 3159 * @sub_types: Array of sub_types assigned corresponding to device 3160 * @sets: Number of sub_types 3161 * 3162 * Return: Pointer to ti_sci_resource if all went well else appropriate 3163 * error pointer. 3164 */ 3165 static struct ti_sci_resource * 3166 devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle, 3167 struct device *dev, u32 dev_id, u32 *sub_types, 3168 u32 sets) 3169 { 3170 struct ti_sci_resource *res; 3171 bool valid_set = false; 3172 int i, ret, res_count; 3173 3174 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); 3175 if (!res) 3176 return ERR_PTR(-ENOMEM); 3177 3178 res->sets = sets; 3179 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc), 3180 GFP_KERNEL); 3181 if (!res->desc) 3182 return ERR_PTR(-ENOMEM); 3183 3184 for (i = 0; i < res->sets; i++) { 3185 ret = handle->ops.rm_core_ops.get_range(handle, dev_id, 3186 sub_types[i], 3187 &res->desc[i]); 3188 if (ret) { 3189 dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n", 3190 dev_id, sub_types[i]); 3191 memset(&res->desc[i], 0, sizeof(res->desc[i])); 3192 continue; 3193 } 3194 3195 dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n", 3196 dev_id, sub_types[i], res->desc[i].start, 3197 res->desc[i].num, res->desc[i].start_sec, 3198 res->desc[i].num_sec); 3199 3200 valid_set = true; 3201 res_count = res->desc[i].num + res->desc[i].num_sec; 3202 res->desc[i].res_map = devm_bitmap_zalloc(dev, res_count, 3203 GFP_KERNEL); 3204 if (!res->desc[i].res_map) 3205 return ERR_PTR(-ENOMEM); 3206 } 3207 raw_spin_lock_init(&res->lock); 3208 3209 if (valid_set) 3210 return res; 3211 3212 return ERR_PTR(-EINVAL); 3213 } 3214 3215 /** 3216 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device 3217 * @handle: TISCI handle 3218 * @dev: Device pointer to which the resource is assigned 3219 * @dev_id: TISCI device id to which the resource is assigned 3220 * @of_prop: property name by which the resource are represented 3221 * 3222 * Return: Pointer to ti_sci_resource if all went well else appropriate 3223 * error pointer. 3224 */ 3225 struct ti_sci_resource * 3226 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, 3227 struct device *dev, u32 dev_id, char *of_prop) 3228 { 3229 struct ti_sci_resource *res; 3230 u32 *sub_types; 3231 int sets; 3232 3233 sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop, 3234 sizeof(u32)); 3235 if (sets < 0) { 3236 dev_err(dev, "%s resource type ids not available\n", of_prop); 3237 return ERR_PTR(sets); 3238 } 3239 3240 sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL); 3241 if (!sub_types) 3242 return ERR_PTR(-ENOMEM); 3243 3244 of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets); 3245 res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types, 3246 sets); 3247 3248 kfree(sub_types); 3249 return res; 3250 } 3251 EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource); 3252 3253 /** 3254 * devm_ti_sci_get_resource() - Get a resource range assigned to the device 3255 * @handle: TISCI handle 3256 * @dev: Device pointer to which the resource is assigned 3257 * @dev_id: TISCI device id to which the resource is assigned 3258 * @sub_type: TISCI resource subytpe representing the resource. 3259 * 3260 * Return: Pointer to ti_sci_resource if all went well else appropriate 3261 * error pointer. 3262 */ 3263 struct ti_sci_resource * 3264 devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, 3265 u32 dev_id, u32 sub_type) 3266 { 3267 return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1); 3268 } 3269 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); 3270 3271 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, 3272 void *cmd) 3273 { 3274 struct ti_sci_info *info = reboot_to_ti_sci_info(nb); 3275 const struct ti_sci_handle *handle = &info->handle; 3276 3277 ti_sci_cmd_core_reboot(handle); 3278 3279 /* call fail OR pass, we should not be here in the first place */ 3280 return NOTIFY_BAD; 3281 } 3282 3283 /* Description for K2G */ 3284 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { 3285 .default_host_id = 2, 3286 /* Conservative duration */ 3287 .max_rx_timeout_ms = 1000, 3288 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ 3289 .max_msgs = 20, 3290 .max_msg_size = 64, 3291 }; 3292 3293 /* Description for AM654 */ 3294 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = { 3295 .default_host_id = 12, 3296 /* Conservative duration */ 3297 .max_rx_timeout_ms = 10000, 3298 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ 3299 .max_msgs = 20, 3300 .max_msg_size = 60, 3301 }; 3302 3303 static const struct of_device_id ti_sci_of_match[] = { 3304 {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc}, 3305 {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc}, 3306 { /* Sentinel */ }, 3307 }; 3308 MODULE_DEVICE_TABLE(of, ti_sci_of_match); 3309 3310 static int ti_sci_probe(struct platform_device *pdev) 3311 { 3312 struct device *dev = &pdev->dev; 3313 const struct of_device_id *of_id; 3314 const struct ti_sci_desc *desc; 3315 struct ti_sci_xfer *xfer; 3316 struct ti_sci_info *info = NULL; 3317 struct ti_sci_xfers_info *minfo; 3318 struct mbox_client *cl; 3319 int ret = -EINVAL; 3320 int i; 3321 int reboot = 0; 3322 u32 h_id; 3323 3324 of_id = of_match_device(ti_sci_of_match, dev); 3325 if (!of_id) { 3326 dev_err(dev, "OF data missing\n"); 3327 return -EINVAL; 3328 } 3329 desc = of_id->data; 3330 3331 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 3332 if (!info) 3333 return -ENOMEM; 3334 3335 info->dev = dev; 3336 info->desc = desc; 3337 ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id); 3338 /* if the property is not present in DT, use a default from desc */ 3339 if (ret < 0) { 3340 info->host_id = info->desc->default_host_id; 3341 } else { 3342 if (!h_id) { 3343 dev_warn(dev, "Host ID 0 is reserved for firmware\n"); 3344 info->host_id = info->desc->default_host_id; 3345 } else { 3346 info->host_id = h_id; 3347 } 3348 } 3349 3350 reboot = of_property_read_bool(dev->of_node, 3351 "ti,system-reboot-controller"); 3352 INIT_LIST_HEAD(&info->node); 3353 minfo = &info->minfo; 3354 3355 /* 3356 * Pre-allocate messages 3357 * NEVER allocate more than what we can indicate in hdr.seq 3358 * if we have data description bug, force a fix.. 3359 */ 3360 if (WARN_ON(desc->max_msgs >= 3361 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq))) 3362 return -EINVAL; 3363 3364 minfo->xfer_block = devm_kcalloc(dev, 3365 desc->max_msgs, 3366 sizeof(*minfo->xfer_block), 3367 GFP_KERNEL); 3368 if (!minfo->xfer_block) 3369 return -ENOMEM; 3370 3371 minfo->xfer_alloc_table = devm_bitmap_zalloc(dev, 3372 desc->max_msgs, 3373 GFP_KERNEL); 3374 if (!minfo->xfer_alloc_table) 3375 return -ENOMEM; 3376 3377 /* Pre-initialize the buffer pointer to pre-allocated buffers */ 3378 for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) { 3379 xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size, 3380 GFP_KERNEL); 3381 if (!xfer->xfer_buf) 3382 return -ENOMEM; 3383 3384 xfer->tx_message.buf = xfer->xfer_buf; 3385 init_completion(&xfer->done); 3386 } 3387 3388 ret = ti_sci_debugfs_create(pdev, info); 3389 if (ret) 3390 dev_warn(dev, "Failed to create debug file\n"); 3391 3392 platform_set_drvdata(pdev, info); 3393 3394 cl = &info->cl; 3395 cl->dev = dev; 3396 cl->tx_block = false; 3397 cl->rx_callback = ti_sci_rx_callback; 3398 cl->knows_txdone = true; 3399 3400 spin_lock_init(&minfo->xfer_lock); 3401 sema_init(&minfo->sem_xfer_count, desc->max_msgs); 3402 3403 info->chan_rx = mbox_request_channel_byname(cl, "rx"); 3404 if (IS_ERR(info->chan_rx)) { 3405 ret = PTR_ERR(info->chan_rx); 3406 goto out; 3407 } 3408 3409 info->chan_tx = mbox_request_channel_byname(cl, "tx"); 3410 if (IS_ERR(info->chan_tx)) { 3411 ret = PTR_ERR(info->chan_tx); 3412 goto out; 3413 } 3414 ret = ti_sci_cmd_get_revision(info); 3415 if (ret) { 3416 dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret); 3417 goto out; 3418 } 3419 3420 ti_sci_setup_ops(info); 3421 3422 if (reboot) { 3423 info->nb.notifier_call = tisci_reboot_handler; 3424 info->nb.priority = 128; 3425 3426 ret = register_restart_handler(&info->nb); 3427 if (ret) { 3428 dev_err(dev, "reboot registration fail(%d)\n", ret); 3429 goto out; 3430 } 3431 } 3432 3433 dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", 3434 info->handle.version.abi_major, info->handle.version.abi_minor, 3435 info->handle.version.firmware_revision, 3436 info->handle.version.firmware_description); 3437 3438 mutex_lock(&ti_sci_list_mutex); 3439 list_add_tail(&info->node, &ti_sci_list); 3440 mutex_unlock(&ti_sci_list_mutex); 3441 3442 return of_platform_populate(dev->of_node, NULL, NULL, dev); 3443 out: 3444 if (!IS_ERR(info->chan_tx)) 3445 mbox_free_channel(info->chan_tx); 3446 if (!IS_ERR(info->chan_rx)) 3447 mbox_free_channel(info->chan_rx); 3448 debugfs_remove(info->d); 3449 return ret; 3450 } 3451 3452 static int ti_sci_remove(struct platform_device *pdev) 3453 { 3454 struct ti_sci_info *info; 3455 struct device *dev = &pdev->dev; 3456 int ret = 0; 3457 3458 of_platform_depopulate(dev); 3459 3460 info = platform_get_drvdata(pdev); 3461 3462 if (info->nb.notifier_call) 3463 unregister_restart_handler(&info->nb); 3464 3465 mutex_lock(&ti_sci_list_mutex); 3466 if (info->users) 3467 ret = -EBUSY; 3468 else 3469 list_del(&info->node); 3470 mutex_unlock(&ti_sci_list_mutex); 3471 3472 if (!ret) { 3473 ti_sci_debugfs_destroy(pdev, info); 3474 3475 /* Safe to free channels since no more users */ 3476 mbox_free_channel(info->chan_tx); 3477 mbox_free_channel(info->chan_rx); 3478 } 3479 3480 return ret; 3481 } 3482 3483 static struct platform_driver ti_sci_driver = { 3484 .probe = ti_sci_probe, 3485 .remove = ti_sci_remove, 3486 .driver = { 3487 .name = "ti-sci", 3488 .of_match_table = of_match_ptr(ti_sci_of_match), 3489 }, 3490 }; 3491 module_platform_driver(ti_sci_driver); 3492 3493 MODULE_LICENSE("GPL v2"); 3494 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver"); 3495 MODULE_AUTHOR("Nishanth Menon"); 3496 MODULE_ALIAS("platform:ti-sci"); 3497