1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2020 Samsung Electronics Co., Ltd.
4 * Copyright 2020 Google LLC.
5 * Copyright 2024 Linaro Ltd.
6 */
7
8 #include <linux/bitfield.h>
9 #include <linux/bitmap.h>
10 #include <linux/bits.h>
11 #include <linux/cleanup.h>
12 #include <linux/container_of.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/firmware/samsung/exynos-acpm-protocol.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/ktime.h>
19 #include <linux/mailbox/exynos-message.h>
20 #include <linux/mailbox_client.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/math.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/of_platform.h>
27 #include <linux/platform_device.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30
31 #include "exynos-acpm.h"
32 #include "exynos-acpm-dvfs.h"
33 #include "exynos-acpm-pmic.h"
34
35 #define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
36
37 #define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC)
38 #define ACPM_TX_TIMEOUT_US 500000
39
40 #define ACPM_GS101_INITDATA_BASE 0xa000
41
42 /**
43 * struct acpm_shmem - shared memory configuration information.
44 * @reserved: unused fields.
45 * @chans: offset to array of struct acpm_chan_shmem.
46 * @reserved1: unused fields.
47 * @num_chans: number of channels.
48 */
49 struct acpm_shmem {
50 u32 reserved[2];
51 u32 chans;
52 u32 reserved1[3];
53 u32 num_chans;
54 };
55
56 /**
57 * struct acpm_chan_shmem - descriptor of a shared memory channel.
58 *
59 * @id: channel ID.
60 * @reserved: unused fields.
61 * @rx_rear: rear pointer of APM RX queue (TX for AP).
62 * @rx_front: front pointer of APM RX queue (TX for AP).
63 * @rx_base: base address of APM RX queue (TX for AP).
64 * @reserved1: unused fields.
65 * @tx_rear: rear pointer of APM TX queue (RX for AP).
66 * @tx_front: front pointer of APM TX queue (RX for AP).
67 * @tx_base: base address of APM TX queue (RX for AP).
68 * @qlen: queue length. Applies to both TX/RX queues.
69 * @mlen: message length. Applies to both TX/RX queues.
70 * @reserved2: unused fields.
71 * @poll_completion: true when the channel works on polling.
72 */
73 struct acpm_chan_shmem {
74 u32 id;
75 u32 reserved[3];
76 u32 rx_rear;
77 u32 rx_front;
78 u32 rx_base;
79 u32 reserved1[3];
80 u32 tx_rear;
81 u32 tx_front;
82 u32 tx_base;
83 u32 qlen;
84 u32 mlen;
85 u32 reserved2[2];
86 u32 poll_completion;
87 };
88
89 /**
90 * struct acpm_queue - exynos acpm queue.
91 *
92 * @rear: rear address of the queue.
93 * @front: front address of the queue.
94 * @base: base address of the queue.
95 */
96 struct acpm_queue {
97 void __iomem *rear;
98 void __iomem *front;
99 void __iomem *base;
100 };
101
102 /**
103 * struct acpm_rx_data - RX queue data.
104 *
105 * @cmd: pointer to where the data shall be saved.
106 * @n_cmd: number of 32-bit commands.
107 * @response: true if the client expects the RX data.
108 */
109 struct acpm_rx_data {
110 u32 *cmd;
111 size_t n_cmd;
112 bool response;
113 };
114
115 #define ACPM_SEQNUM_MAX 64
116
117 /**
118 * struct acpm_chan - driver internal representation of a channel.
119 * @cl: mailbox client.
120 * @chan: mailbox channel.
121 * @acpm: pointer to driver private data.
122 * @tx: TX queue. The enqueue is done by the host.
123 * - front index is written by the host.
124 * - rear index is written by the firmware.
125 *
126 * @rx: RX queue. The enqueue is done by the firmware.
127 * - front index is written by the firmware.
128 * - rear index is written by the host.
129 * @tx_lock: protects TX queue.
130 * @rx_lock: protects RX queue.
131 * @qlen: queue length. Applies to both TX/RX queues.
132 * @mlen: message length. Applies to both TX/RX queues.
133 * @seqnum: sequence number of the last message enqueued on TX queue.
134 * @id: channel ID.
135 * @poll_completion: indicates if the transfer needs to be polled for
136 * completion or interrupt mode is used.
137 * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
138 * @rx_data: internal buffer used to drain the RX queue.
139 */
140 struct acpm_chan {
141 struct mbox_client cl;
142 struct mbox_chan *chan;
143 struct acpm_info *acpm;
144 struct acpm_queue tx;
145 struct acpm_queue rx;
146 struct mutex tx_lock;
147 struct mutex rx_lock;
148
149 unsigned int qlen;
150 unsigned int mlen;
151 u8 seqnum;
152 u8 id;
153 bool poll_completion;
154
155 DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
156 struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
157 };
158
159 /**
160 * struct acpm_info - driver's private data.
161 * @shmem: pointer to the SRAM configuration data.
162 * @sram_base: base address of SRAM.
163 * @chans: pointer to the ACPM channel parameters retrieved from SRAM.
164 * @dev: pointer to the exynos-acpm device.
165 * @handle: instance of acpm_handle to send to clients.
166 * @num_chans: number of channels available for this controller.
167 */
168 struct acpm_info {
169 struct acpm_shmem __iomem *shmem;
170 void __iomem *sram_base;
171 struct acpm_chan *chans;
172 struct device *dev;
173 struct acpm_handle handle;
174 u32 num_chans;
175 };
176
177 /**
178 * struct acpm_match_data - of_device_id data.
179 * @initdata_base: offset in SRAM where the channels configuration resides.
180 * @acpm_clk_dev_name: base name for the ACPM clocks device that we're registering.
181 */
182 struct acpm_match_data {
183 loff_t initdata_base;
184 const char *acpm_clk_dev_name;
185 };
186
187 #define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
188 #define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
189
190 /**
191 * acpm_get_saved_rx() - get the response if it was already saved.
192 * @achan: ACPM channel info.
193 * @xfer: reference to the transfer to get response for.
194 * @tx_seqnum: xfer TX sequence number.
195 */
acpm_get_saved_rx(struct acpm_chan * achan,const struct acpm_xfer * xfer,u32 tx_seqnum)196 static void acpm_get_saved_rx(struct acpm_chan *achan,
197 const struct acpm_xfer *xfer, u32 tx_seqnum)
198 {
199 const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1];
200 u32 rx_seqnum;
201
202 if (!rx_data->response)
203 return;
204
205 rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]);
206
207 if (rx_seqnum == tx_seqnum) {
208 memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
209 clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
210 }
211 }
212
213 /**
214 * acpm_get_rx() - get response from RX queue.
215 * @achan: ACPM channel info.
216 * @xfer: reference to the transfer to get response for.
217 *
218 * Return: 0 on success, -errno otherwise.
219 */
acpm_get_rx(struct acpm_chan * achan,const struct acpm_xfer * xfer)220 static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
221 {
222 u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
223 const void __iomem *base, *addr;
224 struct acpm_rx_data *rx_data;
225 u32 i, val, mlen;
226 bool rx_set = false;
227
228 guard(mutex)(&achan->rx_lock);
229
230 rx_front = readl(achan->rx.front);
231 i = readl(achan->rx.rear);
232
233 tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
234
235 if (i == rx_front) {
236 acpm_get_saved_rx(achan, xfer, tx_seqnum);
237 return 0;
238 }
239
240 base = achan->rx.base;
241 mlen = achan->mlen;
242
243 /* Drain RX queue. */
244 do {
245 /* Read RX seqnum. */
246 addr = base + mlen * i;
247 val = readl(addr);
248
249 rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
250 if (!rx_seqnum)
251 return -EIO;
252 /*
253 * mssg seqnum starts with value 1, whereas the driver considers
254 * the first mssg at index 0.
255 */
256 seqnum = rx_seqnum - 1;
257 rx_data = &achan->rx_data[seqnum];
258
259 if (rx_data->response) {
260 if (rx_seqnum == tx_seqnum) {
261 __ioread32_copy(xfer->rxd, addr,
262 xfer->rxlen / 4);
263 rx_set = true;
264 clear_bit(seqnum, achan->bitmap_seqnum);
265 } else {
266 /*
267 * The RX data corresponds to another request.
268 * Save the data to drain the queue, but don't
269 * clear yet the bitmap. It will be cleared
270 * after the response is copied to the request.
271 */
272 __ioread32_copy(rx_data->cmd, addr,
273 xfer->rxlen / 4);
274 }
275 } else {
276 clear_bit(seqnum, achan->bitmap_seqnum);
277 }
278
279 i = (i + 1) % achan->qlen;
280 } while (i != rx_front);
281
282 /* We saved all responses, mark RX empty. */
283 writel(rx_front, achan->rx.rear);
284
285 /*
286 * If the response was not in this iteration of the queue, check if the
287 * RX data was previously saved.
288 */
289 if (!rx_set)
290 acpm_get_saved_rx(achan, xfer, tx_seqnum);
291
292 return 0;
293 }
294
295 /**
296 * acpm_dequeue_by_polling() - RX dequeue by polling.
297 * @achan: ACPM channel info.
298 * @xfer: reference to the transfer being waited for.
299 *
300 * Return: 0 on success, -errno otherwise.
301 */
acpm_dequeue_by_polling(struct acpm_chan * achan,const struct acpm_xfer * xfer)302 static int acpm_dequeue_by_polling(struct acpm_chan *achan,
303 const struct acpm_xfer *xfer)
304 {
305 struct device *dev = achan->acpm->dev;
306 ktime_t timeout;
307 u32 seqnum;
308 int ret;
309
310 seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
311
312 timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US);
313 do {
314 ret = acpm_get_rx(achan, xfer);
315 if (ret)
316 return ret;
317
318 if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
319 return 0;
320
321 /* Determined experimentally. */
322 udelay(20);
323 } while (ktime_before(ktime_get(), timeout));
324
325 dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n",
326 achan->id, seqnum, achan->bitmap_seqnum[0]);
327
328 return -ETIME;
329 }
330
331 /**
332 * acpm_wait_for_queue_slots() - wait for queue slots.
333 *
334 * @achan: ACPM channel info.
335 * @next_tx_front: next front index of the TX queue.
336 *
337 * Return: 0 on success, -errno otherwise.
338 */
acpm_wait_for_queue_slots(struct acpm_chan * achan,u32 next_tx_front)339 static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
340 {
341 u32 val, ret;
342
343 /*
344 * Wait for RX front to keep up with TX front. Make sure there's at
345 * least one element between them.
346 */
347 ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
348 ACPM_TX_TIMEOUT_US);
349 if (ret) {
350 dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
351 return ret;
352 }
353
354 ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
355 ACPM_TX_TIMEOUT_US);
356 if (ret)
357 dev_err(achan->acpm->dev, "TX queue is full.\n");
358
359 return ret;
360 }
361
362 /**
363 * acpm_prepare_xfer() - prepare a transfer before writing the message to the
364 * TX queue.
365 * @achan: ACPM channel info.
366 * @xfer: reference to the transfer being prepared.
367 */
acpm_prepare_xfer(struct acpm_chan * achan,const struct acpm_xfer * xfer)368 static void acpm_prepare_xfer(struct acpm_chan *achan,
369 const struct acpm_xfer *xfer)
370 {
371 struct acpm_rx_data *rx_data;
372 u32 *txd = (u32 *)xfer->txd;
373
374 /* Prevent chan->seqnum from being re-used */
375 do {
376 if (++achan->seqnum == ACPM_SEQNUM_MAX)
377 achan->seqnum = 1;
378 } while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
379
380 txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
381
382 /* Clear data for upcoming responses */
383 rx_data = &achan->rx_data[achan->seqnum - 1];
384 memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
385 if (xfer->rxd)
386 rx_data->response = true;
387
388 /* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
389 set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
390 }
391
392 /**
393 * acpm_wait_for_message_response - an helper to group all possible ways of
394 * waiting for a synchronous message response.
395 *
396 * @achan: ACPM channel info.
397 * @xfer: reference to the transfer being waited for.
398 *
399 * Return: 0 on success, -errno otherwise.
400 */
acpm_wait_for_message_response(struct acpm_chan * achan,const struct acpm_xfer * xfer)401 static int acpm_wait_for_message_response(struct acpm_chan *achan,
402 const struct acpm_xfer *xfer)
403 {
404 /* Just polling mode supported for now. */
405 return acpm_dequeue_by_polling(achan, xfer);
406 }
407
408 /**
409 * acpm_do_xfer() - do one transfer.
410 * @handle: pointer to the acpm handle.
411 * @xfer: transfer to initiate and wait for response.
412 *
413 * Return: 0 on success, -errno otherwise.
414 */
acpm_do_xfer(const struct acpm_handle * handle,const struct acpm_xfer * xfer)415 int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
416 {
417 struct acpm_info *acpm = handle_to_acpm_info(handle);
418 struct exynos_mbox_msg msg;
419 struct acpm_chan *achan;
420 u32 idx, tx_front;
421 int ret;
422
423 if (xfer->acpm_chan_id >= acpm->num_chans)
424 return -EINVAL;
425
426 achan = &acpm->chans[xfer->acpm_chan_id];
427
428 if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
429 return -EINVAL;
430
431 if (!achan->poll_completion) {
432 dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
433 return -EOPNOTSUPP;
434 }
435
436 msg.chan_id = xfer->acpm_chan_id;
437 msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
438
439 scoped_guard(mutex, &achan->tx_lock) {
440 tx_front = readl(achan->tx.front);
441 idx = (tx_front + 1) % achan->qlen;
442
443 ret = acpm_wait_for_queue_slots(achan, idx);
444 if (ret)
445 return ret;
446
447 acpm_prepare_xfer(achan, xfer);
448
449 /* Write TX command. */
450 __iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
451 xfer->txd, xfer->txlen / 4);
452
453 /* Advance TX front. */
454 writel(idx, achan->tx.front);
455
456 ret = mbox_send_message(achan->chan, (void *)&msg);
457 if (ret < 0)
458 return ret;
459
460 mbox_client_txdone(achan->chan, 0);
461 }
462
463 return acpm_wait_for_message_response(achan, xfer);
464 }
465
466 /**
467 * acpm_chan_shmem_get_params() - get channel parameters and addresses of the
468 * TX/RX queues.
469 * @achan: ACPM channel info.
470 * @chan_shmem: __iomem pointer to a channel described in shared memory.
471 */
acpm_chan_shmem_get_params(struct acpm_chan * achan,struct acpm_chan_shmem __iomem * chan_shmem)472 static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
473 struct acpm_chan_shmem __iomem *chan_shmem)
474 {
475 void __iomem *base = achan->acpm->sram_base;
476 struct acpm_queue *rx = &achan->rx;
477 struct acpm_queue *tx = &achan->tx;
478
479 achan->mlen = readl(&chan_shmem->mlen);
480 achan->poll_completion = readl(&chan_shmem->poll_completion);
481 achan->id = readl(&chan_shmem->id);
482 achan->qlen = readl(&chan_shmem->qlen);
483
484 tx->base = base + readl(&chan_shmem->rx_base);
485 tx->rear = base + readl(&chan_shmem->rx_rear);
486 tx->front = base + readl(&chan_shmem->rx_front);
487
488 rx->base = base + readl(&chan_shmem->tx_base);
489 rx->rear = base + readl(&chan_shmem->tx_rear);
490 rx->front = base + readl(&chan_shmem->tx_front);
491
492 dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
493 achan->id, achan->poll_completion, achan->mlen, achan->qlen);
494 }
495
496 /**
497 * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
498 * firmware.
499 * @achan: ACPM channel info.
500 *
501 * Return: 0 on success, -errno otherwise.
502 */
acpm_achan_alloc_cmds(struct acpm_chan * achan)503 static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
504 {
505 struct device *dev = achan->acpm->dev;
506 struct acpm_rx_data *rx_data;
507 size_t cmd_size, n_cmd;
508 int i;
509
510 if (achan->mlen == 0)
511 return 0;
512
513 cmd_size = sizeof(*(achan->rx_data[0].cmd));
514 n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
515
516 for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
517 rx_data = &achan->rx_data[i];
518 rx_data->n_cmd = n_cmd;
519 rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
520 if (!rx_data->cmd)
521 return -ENOMEM;
522 }
523
524 return 0;
525 }
526
527 /**
528 * acpm_free_mbox_chans() - free mailbox channels.
529 * @acpm: pointer to driver data.
530 */
acpm_free_mbox_chans(struct acpm_info * acpm)531 static void acpm_free_mbox_chans(struct acpm_info *acpm)
532 {
533 int i;
534
535 for (i = 0; i < acpm->num_chans; i++)
536 if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
537 mbox_free_channel(acpm->chans[i].chan);
538 }
539
540 /**
541 * acpm_channels_init() - initialize channels based on the configuration data in
542 * the shared memory.
543 * @acpm: pointer to driver data.
544 *
545 * Return: 0 on success, -errno otherwise.
546 */
acpm_channels_init(struct acpm_info * acpm)547 static int acpm_channels_init(struct acpm_info *acpm)
548 {
549 struct acpm_shmem __iomem *shmem = acpm->shmem;
550 struct acpm_chan_shmem __iomem *chans_shmem;
551 struct device *dev = acpm->dev;
552 int i, ret;
553
554 acpm->num_chans = readl(&shmem->num_chans);
555 acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
556 GFP_KERNEL);
557 if (!acpm->chans)
558 return -ENOMEM;
559
560 chans_shmem = acpm->sram_base + readl(&shmem->chans);
561
562 for (i = 0; i < acpm->num_chans; i++) {
563 struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
564 struct acpm_chan *achan = &acpm->chans[i];
565 struct mbox_client *cl = &achan->cl;
566
567 achan->acpm = acpm;
568
569 acpm_chan_shmem_get_params(achan, chan_shmem);
570
571 ret = acpm_achan_alloc_cmds(achan);
572 if (ret)
573 return ret;
574
575 mutex_init(&achan->rx_lock);
576 mutex_init(&achan->tx_lock);
577
578 cl->dev = dev;
579
580 achan->chan = mbox_request_channel(cl, 0);
581 if (IS_ERR(achan->chan)) {
582 acpm_free_mbox_chans(acpm);
583 return PTR_ERR(achan->chan);
584 }
585 }
586
587 return 0;
588 }
589
590 /**
591 * acpm_setup_ops() - setup the operations structures.
592 * @acpm: pointer to the driver data.
593 */
acpm_setup_ops(struct acpm_info * acpm)594 static void acpm_setup_ops(struct acpm_info *acpm)
595 {
596 struct acpm_dvfs_ops *dvfs_ops = &acpm->handle.ops.dvfs_ops;
597 struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
598
599 dvfs_ops->set_rate = acpm_dvfs_set_rate;
600 dvfs_ops->get_rate = acpm_dvfs_get_rate;
601
602 pmic_ops->read_reg = acpm_pmic_read_reg;
603 pmic_ops->bulk_read = acpm_pmic_bulk_read;
604 pmic_ops->write_reg = acpm_pmic_write_reg;
605 pmic_ops->bulk_write = acpm_pmic_bulk_write;
606 pmic_ops->update_reg = acpm_pmic_update_reg;
607 }
608
acpm_clk_pdev_unregister(void * data)609 static void acpm_clk_pdev_unregister(void *data)
610 {
611 platform_device_unregister(data);
612 }
613
acpm_probe(struct platform_device * pdev)614 static int acpm_probe(struct platform_device *pdev)
615 {
616 const struct acpm_match_data *match_data;
617 struct platform_device *acpm_clk_pdev;
618 struct device *dev = &pdev->dev;
619 struct device_node *shmem;
620 struct acpm_info *acpm;
621 resource_size_t size;
622 struct resource res;
623 int ret;
624
625 acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
626 if (!acpm)
627 return -ENOMEM;
628
629 shmem = of_parse_phandle(dev->of_node, "shmem", 0);
630 ret = of_address_to_resource(shmem, 0, &res);
631 of_node_put(shmem);
632 if (ret)
633 return dev_err_probe(dev, ret,
634 "Failed to get shared memory.\n");
635
636 size = resource_size(&res);
637 acpm->sram_base = devm_ioremap(dev, res.start, size);
638 if (!acpm->sram_base)
639 return dev_err_probe(dev, -ENOMEM,
640 "Failed to ioremap shared memory.\n");
641
642 match_data = of_device_get_match_data(dev);
643 if (!match_data)
644 return dev_err_probe(dev, -EINVAL,
645 "Failed to get match data.\n");
646
647 acpm->shmem = acpm->sram_base + match_data->initdata_base;
648 acpm->dev = dev;
649
650 ret = acpm_channels_init(acpm);
651 if (ret)
652 return ret;
653
654 acpm_setup_ops(acpm);
655
656 platform_set_drvdata(pdev, acpm);
657
658 acpm_clk_pdev = platform_device_register_data(dev,
659 match_data->acpm_clk_dev_name,
660 PLATFORM_DEVID_NONE, NULL, 0);
661 if (IS_ERR(acpm_clk_pdev))
662 return dev_err_probe(dev, PTR_ERR(acpm_clk_pdev),
663 "Failed to register ACPM clocks device.\n");
664
665 ret = devm_add_action_or_reset(dev, acpm_clk_pdev_unregister,
666 acpm_clk_pdev);
667 if (ret)
668 return dev_err_probe(dev, ret, "Failed to add devm action.\n");
669
670 return devm_of_platform_populate(dev);
671 }
672
673 /**
674 * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
675 * @handle: Handle acquired by acpm_get_by_phandle.
676 */
acpm_handle_put(const struct acpm_handle * handle)677 static void acpm_handle_put(const struct acpm_handle *handle)
678 {
679 struct acpm_info *acpm = handle_to_acpm_info(handle);
680 struct device *dev = acpm->dev;
681
682 module_put(dev->driver->owner);
683 /* Drop reference taken with of_find_device_by_node(). */
684 put_device(dev);
685 }
686
687 /**
688 * devm_acpm_release() - devres release method.
689 * @dev: pointer to device.
690 * @res: pointer to resource.
691 */
devm_acpm_release(struct device * dev,void * res)692 static void devm_acpm_release(struct device *dev, void *res)
693 {
694 acpm_handle_put(*(struct acpm_handle **)res);
695 }
696
697 /**
698 * acpm_get_by_node() - get the ACPM handle using node pointer.
699 * @dev: device pointer requesting ACPM handle.
700 * @np: ACPM device tree node.
701 *
702 * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
703 */
acpm_get_by_node(struct device * dev,struct device_node * np)704 static const struct acpm_handle *acpm_get_by_node(struct device *dev,
705 struct device_node *np)
706 {
707 struct platform_device *pdev;
708 struct device_link *link;
709 struct acpm_info *acpm;
710
711 pdev = of_find_device_by_node(np);
712 if (!pdev)
713 return ERR_PTR(-EPROBE_DEFER);
714
715 acpm = platform_get_drvdata(pdev);
716 if (!acpm) {
717 platform_device_put(pdev);
718 return ERR_PTR(-EPROBE_DEFER);
719 }
720
721 if (!try_module_get(pdev->dev.driver->owner)) {
722 platform_device_put(pdev);
723 return ERR_PTR(-EPROBE_DEFER);
724 }
725
726 link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
727 if (!link) {
728 dev_err(&pdev->dev,
729 "Failed to create device link to consumer %s.\n",
730 dev_name(dev));
731 platform_device_put(pdev);
732 module_put(pdev->dev.driver->owner);
733 return ERR_PTR(-EINVAL);
734 }
735
736 return &acpm->handle;
737 }
738
739 /**
740 * devm_acpm_get_by_node() - managed get handle using node pointer.
741 * @dev: device pointer requesting ACPM handle.
742 * @np: ACPM device tree node.
743 *
744 * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
745 */
devm_acpm_get_by_node(struct device * dev,struct device_node * np)746 const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
747 struct device_node *np)
748 {
749 const struct acpm_handle **ptr, *handle;
750
751 ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
752 if (!ptr)
753 return ERR_PTR(-ENOMEM);
754
755 handle = acpm_get_by_node(dev, np);
756 if (!IS_ERR(handle)) {
757 *ptr = handle;
758 devres_add(dev, ptr);
759 } else {
760 devres_free(ptr);
761 }
762
763 return handle;
764 }
765 EXPORT_SYMBOL_GPL(devm_acpm_get_by_node);
766
767 static const struct acpm_match_data acpm_gs101 = {
768 .initdata_base = ACPM_GS101_INITDATA_BASE,
769 .acpm_clk_dev_name = "gs101-acpm-clk",
770 };
771
772 static const struct of_device_id acpm_match[] = {
773 {
774 .compatible = "google,gs101-acpm-ipc",
775 .data = &acpm_gs101,
776 },
777 {},
778 };
779 MODULE_DEVICE_TABLE(of, acpm_match);
780
781 static struct platform_driver acpm_driver = {
782 .probe = acpm_probe,
783 .driver = {
784 .name = "exynos-acpm-protocol",
785 .of_match_table = acpm_match,
786 },
787 };
788 module_platform_driver(acpm_driver);
789
790 MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
791 MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
792 MODULE_LICENSE("GPL");
793