1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2020 Samsung Electronics Co., Ltd. 4 * Copyright 2020 Google LLC. 5 * Copyright 2024 Linaro Ltd. 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/bitmap.h> 10 #include <linux/bits.h> 11 #include <linux/cleanup.h> 12 #include <linux/container_of.h> 13 #include <linux/delay.h> 14 #include <linux/device.h> 15 #include <linux/firmware/samsung/exynos-acpm-protocol.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/ktime.h> 19 #include <linux/mailbox/exynos-message.h> 20 #include <linux/mailbox_client.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/math.h> 24 #include <linux/of.h> 25 #include <linux/of_address.h> 26 #include <linux/of_platform.h> 27 #include <linux/platform_device.h> 28 #include <linux/slab.h> 29 #include <linux/types.h> 30 31 #include "exynos-acpm.h" 32 #include "exynos-acpm-dvfs.h" 33 #include "exynos-acpm-pmic.h" 34 35 #define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16) 36 37 #define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC) 38 #define ACPM_TX_TIMEOUT_US 500000 39 40 #define ACPM_GS101_INITDATA_BASE 0xa000 41 42 /** 43 * struct acpm_shmem - shared memory configuration information. 44 * @reserved: unused fields. 45 * @chans: offset to array of struct acpm_chan_shmem. 46 * @reserved1: unused fields. 47 * @num_chans: number of channels. 48 */ 49 struct acpm_shmem { 50 u32 reserved[2]; 51 u32 chans; 52 u32 reserved1[3]; 53 u32 num_chans; 54 }; 55 56 /** 57 * struct acpm_chan_shmem - descriptor of a shared memory channel. 58 * 59 * @id: channel ID. 60 * @reserved: unused fields. 61 * @rx_rear: rear pointer of APM RX queue (TX for AP). 62 * @rx_front: front pointer of APM RX queue (TX for AP). 63 * @rx_base: base address of APM RX queue (TX for AP). 64 * @reserved1: unused fields. 65 * @tx_rear: rear pointer of APM TX queue (RX for AP). 66 * @tx_front: front pointer of APM TX queue (RX for AP). 67 * @tx_base: base address of APM TX queue (RX for AP). 68 * @qlen: queue length. Applies to both TX/RX queues. 69 * @mlen: message length. Applies to both TX/RX queues. 70 * @reserved2: unused fields. 71 * @poll_completion: true when the channel works on polling. 72 */ 73 struct acpm_chan_shmem { 74 u32 id; 75 u32 reserved[3]; 76 u32 rx_rear; 77 u32 rx_front; 78 u32 rx_base; 79 u32 reserved1[3]; 80 u32 tx_rear; 81 u32 tx_front; 82 u32 tx_base; 83 u32 qlen; 84 u32 mlen; 85 u32 reserved2[2]; 86 u32 poll_completion; 87 }; 88 89 /** 90 * struct acpm_queue - exynos acpm queue. 91 * 92 * @rear: rear address of the queue. 93 * @front: front address of the queue. 94 * @base: base address of the queue. 95 */ 96 struct acpm_queue { 97 void __iomem *rear; 98 void __iomem *front; 99 void __iomem *base; 100 }; 101 102 /** 103 * struct acpm_rx_data - RX queue data. 104 * 105 * @cmd: pointer to where the data shall be saved. 106 * @n_cmd: number of 32-bit commands. 107 * @response: true if the client expects the RX data. 108 */ 109 struct acpm_rx_data { 110 u32 *cmd; 111 size_t n_cmd; 112 bool response; 113 }; 114 115 #define ACPM_SEQNUM_MAX 64 116 117 /** 118 * struct acpm_chan - driver internal representation of a channel. 119 * @cl: mailbox client. 120 * @chan: mailbox channel. 121 * @acpm: pointer to driver private data. 122 * @tx: TX queue. The enqueue is done by the host. 123 * - front index is written by the host. 124 * - rear index is written by the firmware. 125 * 126 * @rx: RX queue. The enqueue is done by the firmware. 127 * - front index is written by the firmware. 128 * - rear index is written by the host. 129 * @tx_lock: protects TX queue. 130 * @rx_lock: protects RX queue. 131 * @qlen: queue length. Applies to both TX/RX queues. 132 * @mlen: message length. Applies to both TX/RX queues. 133 * @seqnum: sequence number of the last message enqueued on TX queue. 134 * @id: channel ID. 135 * @poll_completion: indicates if the transfer needs to be polled for 136 * completion or interrupt mode is used. 137 * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues. 138 * @rx_data: internal buffer used to drain the RX queue. 139 */ 140 struct acpm_chan { 141 struct mbox_client cl; 142 struct mbox_chan *chan; 143 struct acpm_info *acpm; 144 struct acpm_queue tx; 145 struct acpm_queue rx; 146 struct mutex tx_lock; 147 struct mutex rx_lock; 148 149 unsigned int qlen; 150 unsigned int mlen; 151 u8 seqnum; 152 u8 id; 153 bool poll_completion; 154 155 DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1); 156 struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX]; 157 }; 158 159 /** 160 * struct acpm_info - driver's private data. 161 * @shmem: pointer to the SRAM configuration data. 162 * @sram_base: base address of SRAM. 163 * @chans: pointer to the ACPM channel parameters retrieved from SRAM. 164 * @dev: pointer to the exynos-acpm device. 165 * @handle: instance of acpm_handle to send to clients. 166 * @num_chans: number of channels available for this controller. 167 */ 168 struct acpm_info { 169 struct acpm_shmem __iomem *shmem; 170 void __iomem *sram_base; 171 struct acpm_chan *chans; 172 struct device *dev; 173 struct acpm_handle handle; 174 u32 num_chans; 175 }; 176 177 /** 178 * struct acpm_match_data - of_device_id data. 179 * @initdata_base: offset in SRAM where the channels configuration resides. 180 * @acpm_clk_dev_name: base name for the ACPM clocks device that we're registering. 181 */ 182 struct acpm_match_data { 183 loff_t initdata_base; 184 const char *acpm_clk_dev_name; 185 }; 186 187 #define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl) 188 #define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle) 189 190 /** 191 * acpm_get_saved_rx() - get the response if it was already saved. 192 * @achan: ACPM channel info. 193 * @xfer: reference to the transfer to get response for. 194 * @tx_seqnum: xfer TX sequence number. 195 */ 196 static void acpm_get_saved_rx(struct acpm_chan *achan, 197 const struct acpm_xfer *xfer, u32 tx_seqnum) 198 { 199 const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1]; 200 u32 rx_seqnum; 201 202 if (!rx_data->response) 203 return; 204 205 rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]); 206 207 if (rx_seqnum == tx_seqnum) { 208 memcpy(xfer->rxd, rx_data->cmd, xfer->rxcnt * sizeof(*xfer->rxd)); 209 clear_bit(rx_seqnum - 1, achan->bitmap_seqnum); 210 } 211 } 212 213 /** 214 * acpm_get_rx() - get response from RX queue. 215 * @achan: ACPM channel info. 216 * @xfer: reference to the transfer to get response for. 217 * 218 * Return: 0 on success, -errno otherwise. 219 */ 220 static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer) 221 { 222 u32 rx_front, rx_seqnum, tx_seqnum, seqnum; 223 const void __iomem *base, *addr; 224 struct acpm_rx_data *rx_data; 225 u32 i, val, mlen; 226 bool rx_set = false; 227 228 guard(mutex)(&achan->rx_lock); 229 230 rx_front = readl(achan->rx.front); 231 i = readl(achan->rx.rear); 232 233 tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]); 234 235 if (i == rx_front) { 236 acpm_get_saved_rx(achan, xfer, tx_seqnum); 237 return 0; 238 } 239 240 base = achan->rx.base; 241 mlen = achan->mlen; 242 243 /* Drain RX queue. */ 244 do { 245 /* Read RX seqnum. */ 246 addr = base + mlen * i; 247 val = readl(addr); 248 249 rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val); 250 if (!rx_seqnum) 251 return -EIO; 252 /* 253 * mssg seqnum starts with value 1, whereas the driver considers 254 * the first mssg at index 0. 255 */ 256 seqnum = rx_seqnum - 1; 257 rx_data = &achan->rx_data[seqnum]; 258 259 if (rx_data->response) { 260 if (rx_seqnum == tx_seqnum) { 261 __ioread32_copy(xfer->rxd, addr, xfer->rxcnt); 262 rx_set = true; 263 clear_bit(seqnum, achan->bitmap_seqnum); 264 } else { 265 /* 266 * The RX data corresponds to another request. 267 * Save the data to drain the queue, but don't 268 * clear yet the bitmap. It will be cleared 269 * after the response is copied to the request. 270 */ 271 __ioread32_copy(rx_data->cmd, addr, xfer->rxcnt); 272 } 273 } else { 274 clear_bit(seqnum, achan->bitmap_seqnum); 275 } 276 277 i = (i + 1) % achan->qlen; 278 } while (i != rx_front); 279 280 /* We saved all responses, mark RX empty. */ 281 writel(rx_front, achan->rx.rear); 282 283 /* 284 * If the response was not in this iteration of the queue, check if the 285 * RX data was previously saved. 286 */ 287 if (!rx_set) 288 acpm_get_saved_rx(achan, xfer, tx_seqnum); 289 290 return 0; 291 } 292 293 /** 294 * acpm_dequeue_by_polling() - RX dequeue by polling. 295 * @achan: ACPM channel info. 296 * @xfer: reference to the transfer being waited for. 297 * 298 * Return: 0 on success, -errno otherwise. 299 */ 300 static int acpm_dequeue_by_polling(struct acpm_chan *achan, 301 const struct acpm_xfer *xfer) 302 { 303 struct device *dev = achan->acpm->dev; 304 ktime_t timeout; 305 u32 seqnum; 306 int ret; 307 308 seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]); 309 310 timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US); 311 do { 312 ret = acpm_get_rx(achan, xfer); 313 if (ret) 314 return ret; 315 316 if (!test_bit(seqnum - 1, achan->bitmap_seqnum)) 317 return 0; 318 319 /* Determined experimentally. */ 320 udelay(20); 321 } while (ktime_before(ktime_get(), timeout)); 322 323 dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n", 324 achan->id, seqnum, achan->bitmap_seqnum[0]); 325 326 return -ETIME; 327 } 328 329 /** 330 * acpm_wait_for_queue_slots() - wait for queue slots. 331 * 332 * @achan: ACPM channel info. 333 * @next_tx_front: next front index of the TX queue. 334 * 335 * Return: 0 on success, -errno otherwise. 336 */ 337 static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front) 338 { 339 u32 val, ret; 340 341 /* 342 * Wait for RX front to keep up with TX front. Make sure there's at 343 * least one element between them. 344 */ 345 ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0, 346 ACPM_TX_TIMEOUT_US); 347 if (ret) { 348 dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n"); 349 return ret; 350 } 351 352 ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0, 353 ACPM_TX_TIMEOUT_US); 354 if (ret) 355 dev_err(achan->acpm->dev, "TX queue is full.\n"); 356 357 return ret; 358 } 359 360 /** 361 * acpm_prepare_xfer() - prepare a transfer before writing the message to the 362 * TX queue. 363 * @achan: ACPM channel info. 364 * @xfer: reference to the transfer being prepared. 365 */ 366 static void acpm_prepare_xfer(struct acpm_chan *achan, 367 const struct acpm_xfer *xfer) 368 { 369 struct acpm_rx_data *rx_data; 370 u32 *txd = (u32 *)xfer->txd; 371 372 /* Prevent chan->seqnum from being re-used */ 373 do { 374 if (++achan->seqnum == ACPM_SEQNUM_MAX) 375 achan->seqnum = 1; 376 } while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum)); 377 378 txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum); 379 380 /* Clear data for upcoming responses */ 381 rx_data = &achan->rx_data[achan->seqnum - 1]; 382 memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd); 383 if (xfer->rxd) 384 rx_data->response = true; 385 386 /* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */ 387 set_bit(achan->seqnum - 1, achan->bitmap_seqnum); 388 } 389 390 /** 391 * acpm_wait_for_message_response - an helper to group all possible ways of 392 * waiting for a synchronous message response. 393 * 394 * @achan: ACPM channel info. 395 * @xfer: reference to the transfer being waited for. 396 * 397 * Return: 0 on success, -errno otherwise. 398 */ 399 static int acpm_wait_for_message_response(struct acpm_chan *achan, 400 const struct acpm_xfer *xfer) 401 { 402 /* Just polling mode supported for now. */ 403 return acpm_dequeue_by_polling(achan, xfer); 404 } 405 406 /** 407 * acpm_do_xfer() - do one transfer. 408 * @handle: pointer to the acpm handle. 409 * @xfer: transfer to initiate and wait for response. 410 * 411 * Return: 0 on success, -errno otherwise. 412 */ 413 int acpm_do_xfer(struct acpm_handle *handle, const struct acpm_xfer *xfer) 414 { 415 struct acpm_info *acpm = handle_to_acpm_info(handle); 416 struct exynos_mbox_msg msg; 417 struct acpm_chan *achan; 418 u32 idx, tx_front; 419 int ret; 420 421 if (xfer->acpm_chan_id >= acpm->num_chans) 422 return -EINVAL; 423 424 achan = &acpm->chans[xfer->acpm_chan_id]; 425 426 if (!xfer->txd || 427 (xfer->txcnt * sizeof(*xfer->txd) > achan->mlen) || 428 (xfer->rxcnt * sizeof(*xfer->rxd) > achan->mlen)) 429 return -EINVAL; 430 431 if (!achan->poll_completion) { 432 dev_err(achan->acpm->dev, "Interrupt mode not supported\n"); 433 return -EOPNOTSUPP; 434 } 435 436 msg.chan_id = xfer->acpm_chan_id; 437 msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL; 438 439 scoped_guard(mutex, &achan->tx_lock) { 440 tx_front = readl(achan->tx.front); 441 idx = (tx_front + 1) % achan->qlen; 442 443 ret = acpm_wait_for_queue_slots(achan, idx); 444 if (ret) 445 return ret; 446 447 acpm_prepare_xfer(achan, xfer); 448 449 /* Write TX command. */ 450 __iowrite32_copy(achan->tx.base + achan->mlen * tx_front, 451 xfer->txd, xfer->txcnt); 452 453 /* Advance TX front. */ 454 writel(idx, achan->tx.front); 455 456 ret = mbox_send_message(achan->chan, (void *)&msg); 457 if (ret < 0) 458 return ret; 459 460 mbox_client_txdone(achan->chan, 0); 461 } 462 463 return acpm_wait_for_message_response(achan, xfer); 464 } 465 466 /** 467 * acpm_chan_shmem_get_params() - get channel parameters and addresses of the 468 * TX/RX queues. 469 * @achan: ACPM channel info. 470 * @chan_shmem: __iomem pointer to a channel described in shared memory. 471 */ 472 static void acpm_chan_shmem_get_params(struct acpm_chan *achan, 473 struct acpm_chan_shmem __iomem *chan_shmem) 474 { 475 void __iomem *base = achan->acpm->sram_base; 476 struct acpm_queue *rx = &achan->rx; 477 struct acpm_queue *tx = &achan->tx; 478 479 achan->mlen = readl(&chan_shmem->mlen); 480 achan->poll_completion = readl(&chan_shmem->poll_completion); 481 achan->id = readl(&chan_shmem->id); 482 achan->qlen = readl(&chan_shmem->qlen); 483 484 tx->base = base + readl(&chan_shmem->rx_base); 485 tx->rear = base + readl(&chan_shmem->rx_rear); 486 tx->front = base + readl(&chan_shmem->rx_front); 487 488 rx->base = base + readl(&chan_shmem->tx_base); 489 rx->rear = base + readl(&chan_shmem->tx_rear); 490 rx->front = base + readl(&chan_shmem->tx_front); 491 492 dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n", 493 achan->id, achan->poll_completion, achan->mlen, achan->qlen); 494 } 495 496 /** 497 * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM 498 * firmware. 499 * @achan: ACPM channel info. 500 * 501 * Return: 0 on success, -errno otherwise. 502 */ 503 static int acpm_achan_alloc_cmds(struct acpm_chan *achan) 504 { 505 struct device *dev = achan->acpm->dev; 506 struct acpm_rx_data *rx_data; 507 size_t cmd_size, n_cmd; 508 int i; 509 510 if (achan->mlen == 0) 511 return 0; 512 513 cmd_size = sizeof(*(achan->rx_data[0].cmd)); 514 n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size); 515 516 for (i = 0; i < ACPM_SEQNUM_MAX; i++) { 517 rx_data = &achan->rx_data[i]; 518 rx_data->n_cmd = n_cmd; 519 rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL); 520 if (!rx_data->cmd) 521 return -ENOMEM; 522 } 523 524 return 0; 525 } 526 527 /** 528 * acpm_free_mbox_chans() - free mailbox channels. 529 * @acpm: pointer to driver data. 530 */ 531 static void acpm_free_mbox_chans(struct acpm_info *acpm) 532 { 533 int i; 534 535 for (i = 0; i < acpm->num_chans; i++) 536 if (!IS_ERR_OR_NULL(acpm->chans[i].chan)) 537 mbox_free_channel(acpm->chans[i].chan); 538 } 539 540 /** 541 * acpm_channels_init() - initialize channels based on the configuration data in 542 * the shared memory. 543 * @acpm: pointer to driver data. 544 * 545 * Return: 0 on success, -errno otherwise. 546 */ 547 static int acpm_channels_init(struct acpm_info *acpm) 548 { 549 struct acpm_shmem __iomem *shmem = acpm->shmem; 550 struct acpm_chan_shmem __iomem *chans_shmem; 551 struct device *dev = acpm->dev; 552 int i, ret; 553 554 acpm->num_chans = readl(&shmem->num_chans); 555 acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans), 556 GFP_KERNEL); 557 if (!acpm->chans) 558 return -ENOMEM; 559 560 chans_shmem = acpm->sram_base + readl(&shmem->chans); 561 562 for (i = 0; i < acpm->num_chans; i++) { 563 struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i]; 564 struct acpm_chan *achan = &acpm->chans[i]; 565 struct mbox_client *cl = &achan->cl; 566 567 achan->acpm = acpm; 568 569 acpm_chan_shmem_get_params(achan, chan_shmem); 570 571 ret = acpm_achan_alloc_cmds(achan); 572 if (ret) 573 return ret; 574 575 mutex_init(&achan->rx_lock); 576 mutex_init(&achan->tx_lock); 577 578 cl->dev = dev; 579 580 achan->chan = mbox_request_channel(cl, 0); 581 if (IS_ERR(achan->chan)) { 582 acpm_free_mbox_chans(acpm); 583 return PTR_ERR(achan->chan); 584 } 585 } 586 587 return 0; 588 } 589 590 /** 591 * acpm_setup_ops() - setup the operations structures. 592 * @acpm: pointer to the driver data. 593 */ 594 static void acpm_setup_ops(struct acpm_info *acpm) 595 { 596 struct acpm_dvfs_ops *dvfs_ops = &acpm->handle.ops.dvfs_ops; 597 struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops; 598 599 dvfs_ops->set_rate = acpm_dvfs_set_rate; 600 dvfs_ops->get_rate = acpm_dvfs_get_rate; 601 602 pmic_ops->read_reg = acpm_pmic_read_reg; 603 pmic_ops->bulk_read = acpm_pmic_bulk_read; 604 pmic_ops->write_reg = acpm_pmic_write_reg; 605 pmic_ops->bulk_write = acpm_pmic_bulk_write; 606 pmic_ops->update_reg = acpm_pmic_update_reg; 607 } 608 609 static void acpm_clk_pdev_unregister(void *data) 610 { 611 platform_device_unregister(data); 612 } 613 614 static int acpm_probe(struct platform_device *pdev) 615 { 616 const struct acpm_match_data *match_data; 617 struct platform_device *acpm_clk_pdev; 618 struct device *dev = &pdev->dev; 619 struct device_node *shmem; 620 struct acpm_info *acpm; 621 resource_size_t size; 622 struct resource res; 623 int ret; 624 625 acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL); 626 if (!acpm) 627 return -ENOMEM; 628 629 shmem = of_parse_phandle(dev->of_node, "shmem", 0); 630 ret = of_address_to_resource(shmem, 0, &res); 631 of_node_put(shmem); 632 if (ret) 633 return dev_err_probe(dev, ret, 634 "Failed to get shared memory.\n"); 635 636 size = resource_size(&res); 637 acpm->sram_base = devm_ioremap(dev, res.start, size); 638 if (!acpm->sram_base) 639 return dev_err_probe(dev, -ENOMEM, 640 "Failed to ioremap shared memory.\n"); 641 642 match_data = of_device_get_match_data(dev); 643 if (!match_data) 644 return dev_err_probe(dev, -EINVAL, 645 "Failed to get match data.\n"); 646 647 acpm->shmem = acpm->sram_base + match_data->initdata_base; 648 acpm->dev = dev; 649 650 ret = acpm_channels_init(acpm); 651 if (ret) 652 return ret; 653 654 acpm_setup_ops(acpm); 655 656 platform_set_drvdata(pdev, acpm); 657 658 acpm_clk_pdev = platform_device_register_data(dev, 659 match_data->acpm_clk_dev_name, 660 PLATFORM_DEVID_NONE, NULL, 0); 661 if (IS_ERR(acpm_clk_pdev)) 662 return dev_err_probe(dev, PTR_ERR(acpm_clk_pdev), 663 "Failed to register ACPM clocks device.\n"); 664 665 ret = devm_add_action_or_reset(dev, acpm_clk_pdev_unregister, 666 acpm_clk_pdev); 667 if (ret) 668 return dev_err_probe(dev, ret, "Failed to add devm action.\n"); 669 670 return devm_of_platform_populate(dev); 671 } 672 673 /** 674 * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle. 675 * @handle: Handle acquired by acpm_get_by_phandle. 676 */ 677 static void acpm_handle_put(struct acpm_handle *handle) 678 { 679 struct acpm_info *acpm = handle_to_acpm_info(handle); 680 struct device *dev = acpm->dev; 681 682 module_put(dev->driver->owner); 683 /* Drop reference taken with of_find_device_by_node(). */ 684 put_device(dev); 685 } 686 687 /** 688 * devm_acpm_release() - devres release method. 689 * @dev: pointer to device. 690 * @res: pointer to resource. 691 */ 692 static void devm_acpm_release(struct device *dev, void *res) 693 { 694 acpm_handle_put(*(struct acpm_handle **)res); 695 } 696 697 /** 698 * acpm_get_by_node() - get the ACPM handle using node pointer. 699 * @dev: device pointer requesting ACPM handle. 700 * @np: ACPM device tree node. 701 * 702 * Return: pointer to handle on success, ERR_PTR(-errno) otherwise. 703 * 704 * Note: handle CANNOT be pointer to const 705 */ 706 static struct acpm_handle *acpm_get_by_node(struct device *dev, 707 struct device_node *np) 708 { 709 struct platform_device *pdev; 710 struct device_link *link; 711 struct acpm_info *acpm; 712 713 pdev = of_find_device_by_node(np); 714 if (!pdev) 715 return ERR_PTR(-EPROBE_DEFER); 716 717 acpm = platform_get_drvdata(pdev); 718 if (!acpm) { 719 platform_device_put(pdev); 720 return ERR_PTR(-EPROBE_DEFER); 721 } 722 723 if (!try_module_get(pdev->dev.driver->owner)) { 724 platform_device_put(pdev); 725 return ERR_PTR(-EPROBE_DEFER); 726 } 727 728 link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); 729 if (!link) { 730 dev_err(&pdev->dev, 731 "Failed to create device link to consumer %s.\n", 732 dev_name(dev)); 733 platform_device_put(pdev); 734 module_put(pdev->dev.driver->owner); 735 return ERR_PTR(-EINVAL); 736 } 737 738 return &acpm->handle; 739 } 740 741 /** 742 * devm_acpm_get_by_node() - managed get handle using node pointer. 743 * @dev: device pointer requesting ACPM handle. 744 * @np: ACPM device tree node. 745 * 746 * Return: pointer to handle on success, ERR_PTR(-errno) otherwise. 747 */ 748 struct acpm_handle *devm_acpm_get_by_node(struct device *dev, 749 struct device_node *np) 750 { 751 struct acpm_handle **ptr, *handle; 752 753 ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL); 754 if (!ptr) 755 return ERR_PTR(-ENOMEM); 756 757 handle = acpm_get_by_node(dev, np); 758 if (!IS_ERR(handle)) { 759 *ptr = handle; 760 devres_add(dev, ptr); 761 } else { 762 devres_free(ptr); 763 } 764 765 return handle; 766 } 767 EXPORT_SYMBOL_GPL(devm_acpm_get_by_node); 768 769 static const struct acpm_match_data acpm_gs101 = { 770 .initdata_base = ACPM_GS101_INITDATA_BASE, 771 .acpm_clk_dev_name = "gs101-acpm-clk", 772 }; 773 774 static const struct of_device_id acpm_match[] = { 775 { 776 .compatible = "google,gs101-acpm-ipc", 777 .data = &acpm_gs101, 778 }, 779 {}, 780 }; 781 MODULE_DEVICE_TABLE(of, acpm_match); 782 783 static struct platform_driver acpm_driver = { 784 .probe = acpm_probe, 785 .driver = { 786 .name = "exynos-acpm-protocol", 787 .of_match_table = acpm_match, 788 }, 789 }; 790 module_platform_driver(acpm_driver); 791 792 MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>"); 793 MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver"); 794 MODULE_LICENSE("GPL"); 795