1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2020 Samsung Electronics Co., Ltd.
4 * Copyright 2020 Google LLC.
5 * Copyright 2024 Linaro Ltd.
6 */
7
8 #include <linux/bitfield.h>
9 #include <linux/bitmap.h>
10 #include <linux/bits.h>
11 #include <linux/cleanup.h>
12 #include <linux/container_of.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/firmware/samsung/exynos-acpm-protocol.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/ktime.h>
19 #include <linux/mailbox/exynos-message.h>
20 #include <linux/mailbox_client.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/math.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/of_platform.h>
27 #include <linux/platform_device.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30
31 #include "exynos-acpm.h"
32 #include "exynos-acpm-pmic.h"
33
34 #define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
35
36 #define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC)
37 #define ACPM_TX_TIMEOUT_US 500000
38
39 #define ACPM_GS101_INITDATA_BASE 0xa000
40
41 /**
42 * struct acpm_shmem - shared memory configuration information.
43 * @reserved: unused fields.
44 * @chans: offset to array of struct acpm_chan_shmem.
45 * @reserved1: unused fields.
46 * @num_chans: number of channels.
47 */
48 struct acpm_shmem {
49 u32 reserved[2];
50 u32 chans;
51 u32 reserved1[3];
52 u32 num_chans;
53 };
54
55 /**
56 * struct acpm_chan_shmem - descriptor of a shared memory channel.
57 *
58 * @id: channel ID.
59 * @reserved: unused fields.
60 * @rx_rear: rear pointer of APM RX queue (TX for AP).
61 * @rx_front: front pointer of APM RX queue (TX for AP).
62 * @rx_base: base address of APM RX queue (TX for AP).
63 * @reserved1: unused fields.
64 * @tx_rear: rear pointer of APM TX queue (RX for AP).
65 * @tx_front: front pointer of APM TX queue (RX for AP).
66 * @tx_base: base address of APM TX queue (RX for AP).
67 * @qlen: queue length. Applies to both TX/RX queues.
68 * @mlen: message length. Applies to both TX/RX queues.
69 * @reserved2: unused fields.
70 * @poll_completion: true when the channel works on polling.
71 */
72 struct acpm_chan_shmem {
73 u32 id;
74 u32 reserved[3];
75 u32 rx_rear;
76 u32 rx_front;
77 u32 rx_base;
78 u32 reserved1[3];
79 u32 tx_rear;
80 u32 tx_front;
81 u32 tx_base;
82 u32 qlen;
83 u32 mlen;
84 u32 reserved2[2];
85 u32 poll_completion;
86 };
87
88 /**
89 * struct acpm_queue - exynos acpm queue.
90 *
91 * @rear: rear address of the queue.
92 * @front: front address of the queue.
93 * @base: base address of the queue.
94 */
95 struct acpm_queue {
96 void __iomem *rear;
97 void __iomem *front;
98 void __iomem *base;
99 };
100
101 /**
102 * struct acpm_rx_data - RX queue data.
103 *
104 * @cmd: pointer to where the data shall be saved.
105 * @n_cmd: number of 32-bit commands.
106 * @response: true if the client expects the RX data.
107 */
108 struct acpm_rx_data {
109 u32 *cmd;
110 size_t n_cmd;
111 bool response;
112 };
113
114 #define ACPM_SEQNUM_MAX 64
115
116 /**
117 * struct acpm_chan - driver internal representation of a channel.
118 * @cl: mailbox client.
119 * @chan: mailbox channel.
120 * @acpm: pointer to driver private data.
121 * @tx: TX queue. The enqueue is done by the host.
122 * - front index is written by the host.
123 * - rear index is written by the firmware.
124 *
125 * @rx: RX queue. The enqueue is done by the firmware.
126 * - front index is written by the firmware.
127 * - rear index is written by the host.
128 * @tx_lock: protects TX queue.
129 * @rx_lock: protects RX queue.
130 * @qlen: queue length. Applies to both TX/RX queues.
131 * @mlen: message length. Applies to both TX/RX queues.
132 * @seqnum: sequence number of the last message enqueued on TX queue.
133 * @id: channel ID.
134 * @poll_completion: indicates if the transfer needs to be polled for
135 * completion or interrupt mode is used.
136 * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
137 * @rx_data: internal buffer used to drain the RX queue.
138 */
139 struct acpm_chan {
140 struct mbox_client cl;
141 struct mbox_chan *chan;
142 struct acpm_info *acpm;
143 struct acpm_queue tx;
144 struct acpm_queue rx;
145 struct mutex tx_lock;
146 struct mutex rx_lock;
147
148 unsigned int qlen;
149 unsigned int mlen;
150 u8 seqnum;
151 u8 id;
152 bool poll_completion;
153
154 DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
155 struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
156 };
157
158 /**
159 * struct acpm_info - driver's private data.
160 * @shmem: pointer to the SRAM configuration data.
161 * @sram_base: base address of SRAM.
162 * @chans: pointer to the ACPM channel parameters retrieved from SRAM.
163 * @dev: pointer to the exynos-acpm device.
164 * @handle: instance of acpm_handle to send to clients.
165 * @num_chans: number of channels available for this controller.
166 */
167 struct acpm_info {
168 struct acpm_shmem __iomem *shmem;
169 void __iomem *sram_base;
170 struct acpm_chan *chans;
171 struct device *dev;
172 struct acpm_handle handle;
173 u32 num_chans;
174 };
175
176 /**
177 * struct acpm_match_data - of_device_id data.
178 * @initdata_base: offset in SRAM where the channels configuration resides.
179 */
180 struct acpm_match_data {
181 loff_t initdata_base;
182 };
183
184 #define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
185 #define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
186
187 /**
188 * acpm_get_saved_rx() - get the response if it was already saved.
189 * @achan: ACPM channel info.
190 * @xfer: reference to the transfer to get response for.
191 * @tx_seqnum: xfer TX sequence number.
192 */
acpm_get_saved_rx(struct acpm_chan * achan,const struct acpm_xfer * xfer,u32 tx_seqnum)193 static void acpm_get_saved_rx(struct acpm_chan *achan,
194 const struct acpm_xfer *xfer, u32 tx_seqnum)
195 {
196 const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1];
197 u32 rx_seqnum;
198
199 if (!rx_data->response)
200 return;
201
202 rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]);
203
204 if (rx_seqnum == tx_seqnum) {
205 memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
206 clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
207 }
208 }
209
210 /**
211 * acpm_get_rx() - get response from RX queue.
212 * @achan: ACPM channel info.
213 * @xfer: reference to the transfer to get response for.
214 *
215 * Return: 0 on success, -errno otherwise.
216 */
acpm_get_rx(struct acpm_chan * achan,const struct acpm_xfer * xfer)217 static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
218 {
219 u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
220 const void __iomem *base, *addr;
221 struct acpm_rx_data *rx_data;
222 u32 i, val, mlen;
223 bool rx_set = false;
224
225 guard(mutex)(&achan->rx_lock);
226
227 rx_front = readl(achan->rx.front);
228 i = readl(achan->rx.rear);
229
230 tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
231
232 if (i == rx_front) {
233 acpm_get_saved_rx(achan, xfer, tx_seqnum);
234 return 0;
235 }
236
237 base = achan->rx.base;
238 mlen = achan->mlen;
239
240 /* Drain RX queue. */
241 do {
242 /* Read RX seqnum. */
243 addr = base + mlen * i;
244 val = readl(addr);
245
246 rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
247 if (!rx_seqnum)
248 return -EIO;
249 /*
250 * mssg seqnum starts with value 1, whereas the driver considers
251 * the first mssg at index 0.
252 */
253 seqnum = rx_seqnum - 1;
254 rx_data = &achan->rx_data[seqnum];
255
256 if (rx_data->response) {
257 if (rx_seqnum == tx_seqnum) {
258 __ioread32_copy(xfer->rxd, addr,
259 xfer->rxlen / 4);
260 rx_set = true;
261 clear_bit(seqnum, achan->bitmap_seqnum);
262 } else {
263 /*
264 * The RX data corresponds to another request.
265 * Save the data to drain the queue, but don't
266 * clear yet the bitmap. It will be cleared
267 * after the response is copied to the request.
268 */
269 __ioread32_copy(rx_data->cmd, addr,
270 xfer->rxlen / 4);
271 }
272 } else {
273 clear_bit(seqnum, achan->bitmap_seqnum);
274 }
275
276 i = (i + 1) % achan->qlen;
277 } while (i != rx_front);
278
279 /* We saved all responses, mark RX empty. */
280 writel(rx_front, achan->rx.rear);
281
282 /*
283 * If the response was not in this iteration of the queue, check if the
284 * RX data was previously saved.
285 */
286 if (!rx_set)
287 acpm_get_saved_rx(achan, xfer, tx_seqnum);
288
289 return 0;
290 }
291
292 /**
293 * acpm_dequeue_by_polling() - RX dequeue by polling.
294 * @achan: ACPM channel info.
295 * @xfer: reference to the transfer being waited for.
296 *
297 * Return: 0 on success, -errno otherwise.
298 */
acpm_dequeue_by_polling(struct acpm_chan * achan,const struct acpm_xfer * xfer)299 static int acpm_dequeue_by_polling(struct acpm_chan *achan,
300 const struct acpm_xfer *xfer)
301 {
302 struct device *dev = achan->acpm->dev;
303 ktime_t timeout;
304 u32 seqnum;
305 int ret;
306
307 seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
308
309 timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US);
310 do {
311 ret = acpm_get_rx(achan, xfer);
312 if (ret)
313 return ret;
314
315 if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
316 return 0;
317
318 /* Determined experimentally. */
319 udelay(20);
320 } while (ktime_before(ktime_get(), timeout));
321
322 dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n",
323 achan->id, seqnum, achan->bitmap_seqnum[0]);
324
325 return -ETIME;
326 }
327
328 /**
329 * acpm_wait_for_queue_slots() - wait for queue slots.
330 *
331 * @achan: ACPM channel info.
332 * @next_tx_front: next front index of the TX queue.
333 *
334 * Return: 0 on success, -errno otherwise.
335 */
acpm_wait_for_queue_slots(struct acpm_chan * achan,u32 next_tx_front)336 static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
337 {
338 u32 val, ret;
339
340 /*
341 * Wait for RX front to keep up with TX front. Make sure there's at
342 * least one element between them.
343 */
344 ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
345 ACPM_TX_TIMEOUT_US);
346 if (ret) {
347 dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
348 return ret;
349 }
350
351 ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
352 ACPM_TX_TIMEOUT_US);
353 if (ret)
354 dev_err(achan->acpm->dev, "TX queue is full.\n");
355
356 return ret;
357 }
358
359 /**
360 * acpm_prepare_xfer() - prepare a transfer before writing the message to the
361 * TX queue.
362 * @achan: ACPM channel info.
363 * @xfer: reference to the transfer being prepared.
364 */
acpm_prepare_xfer(struct acpm_chan * achan,const struct acpm_xfer * xfer)365 static void acpm_prepare_xfer(struct acpm_chan *achan,
366 const struct acpm_xfer *xfer)
367 {
368 struct acpm_rx_data *rx_data;
369 u32 *txd = (u32 *)xfer->txd;
370
371 /* Prevent chan->seqnum from being re-used */
372 do {
373 if (++achan->seqnum == ACPM_SEQNUM_MAX)
374 achan->seqnum = 1;
375 } while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
376
377 txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
378
379 /* Clear data for upcoming responses */
380 rx_data = &achan->rx_data[achan->seqnum - 1];
381 memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
382 if (xfer->rxd)
383 rx_data->response = true;
384
385 /* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
386 set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
387 }
388
389 /**
390 * acpm_wait_for_message_response - an helper to group all possible ways of
391 * waiting for a synchronous message response.
392 *
393 * @achan: ACPM channel info.
394 * @xfer: reference to the transfer being waited for.
395 *
396 * Return: 0 on success, -errno otherwise.
397 */
acpm_wait_for_message_response(struct acpm_chan * achan,const struct acpm_xfer * xfer)398 static int acpm_wait_for_message_response(struct acpm_chan *achan,
399 const struct acpm_xfer *xfer)
400 {
401 /* Just polling mode supported for now. */
402 return acpm_dequeue_by_polling(achan, xfer);
403 }
404
405 /**
406 * acpm_do_xfer() - do one transfer.
407 * @handle: pointer to the acpm handle.
408 * @xfer: transfer to initiate and wait for response.
409 *
410 * Return: 0 on success, -errno otherwise.
411 */
acpm_do_xfer(const struct acpm_handle * handle,const struct acpm_xfer * xfer)412 int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
413 {
414 struct acpm_info *acpm = handle_to_acpm_info(handle);
415 struct exynos_mbox_msg msg;
416 struct acpm_chan *achan;
417 u32 idx, tx_front;
418 int ret;
419
420 if (xfer->acpm_chan_id >= acpm->num_chans)
421 return -EINVAL;
422
423 achan = &acpm->chans[xfer->acpm_chan_id];
424
425 if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
426 return -EINVAL;
427
428 if (!achan->poll_completion) {
429 dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
430 return -EOPNOTSUPP;
431 }
432
433 msg.chan_id = xfer->acpm_chan_id;
434 msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
435
436 scoped_guard(mutex, &achan->tx_lock) {
437 tx_front = readl(achan->tx.front);
438 idx = (tx_front + 1) % achan->qlen;
439
440 ret = acpm_wait_for_queue_slots(achan, idx);
441 if (ret)
442 return ret;
443
444 acpm_prepare_xfer(achan, xfer);
445
446 /* Write TX command. */
447 __iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
448 xfer->txd, xfer->txlen / 4);
449
450 /* Advance TX front. */
451 writel(idx, achan->tx.front);
452
453 ret = mbox_send_message(achan->chan, (void *)&msg);
454 if (ret < 0)
455 return ret;
456
457 mbox_client_txdone(achan->chan, 0);
458 }
459
460 return acpm_wait_for_message_response(achan, xfer);
461 }
462
463 /**
464 * acpm_chan_shmem_get_params() - get channel parameters and addresses of the
465 * TX/RX queues.
466 * @achan: ACPM channel info.
467 * @chan_shmem: __iomem pointer to a channel described in shared memory.
468 */
acpm_chan_shmem_get_params(struct acpm_chan * achan,struct acpm_chan_shmem __iomem * chan_shmem)469 static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
470 struct acpm_chan_shmem __iomem *chan_shmem)
471 {
472 void __iomem *base = achan->acpm->sram_base;
473 struct acpm_queue *rx = &achan->rx;
474 struct acpm_queue *tx = &achan->tx;
475
476 achan->mlen = readl(&chan_shmem->mlen);
477 achan->poll_completion = readl(&chan_shmem->poll_completion);
478 achan->id = readl(&chan_shmem->id);
479 achan->qlen = readl(&chan_shmem->qlen);
480
481 tx->base = base + readl(&chan_shmem->rx_base);
482 tx->rear = base + readl(&chan_shmem->rx_rear);
483 tx->front = base + readl(&chan_shmem->rx_front);
484
485 rx->base = base + readl(&chan_shmem->tx_base);
486 rx->rear = base + readl(&chan_shmem->tx_rear);
487 rx->front = base + readl(&chan_shmem->tx_front);
488
489 dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
490 achan->id, achan->poll_completion, achan->mlen, achan->qlen);
491 }
492
493 /**
494 * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
495 * firmware.
496 * @achan: ACPM channel info.
497 *
498 * Return: 0 on success, -errno otherwise.
499 */
acpm_achan_alloc_cmds(struct acpm_chan * achan)500 static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
501 {
502 struct device *dev = achan->acpm->dev;
503 struct acpm_rx_data *rx_data;
504 size_t cmd_size, n_cmd;
505 int i;
506
507 if (achan->mlen == 0)
508 return 0;
509
510 cmd_size = sizeof(*(achan->rx_data[0].cmd));
511 n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
512
513 for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
514 rx_data = &achan->rx_data[i];
515 rx_data->n_cmd = n_cmd;
516 rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
517 if (!rx_data->cmd)
518 return -ENOMEM;
519 }
520
521 return 0;
522 }
523
524 /**
525 * acpm_free_mbox_chans() - free mailbox channels.
526 * @acpm: pointer to driver data.
527 */
acpm_free_mbox_chans(struct acpm_info * acpm)528 static void acpm_free_mbox_chans(struct acpm_info *acpm)
529 {
530 int i;
531
532 for (i = 0; i < acpm->num_chans; i++)
533 if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
534 mbox_free_channel(acpm->chans[i].chan);
535 }
536
537 /**
538 * acpm_channels_init() - initialize channels based on the configuration data in
539 * the shared memory.
540 * @acpm: pointer to driver data.
541 *
542 * Return: 0 on success, -errno otherwise.
543 */
acpm_channels_init(struct acpm_info * acpm)544 static int acpm_channels_init(struct acpm_info *acpm)
545 {
546 struct acpm_shmem __iomem *shmem = acpm->shmem;
547 struct acpm_chan_shmem __iomem *chans_shmem;
548 struct device *dev = acpm->dev;
549 int i, ret;
550
551 acpm->num_chans = readl(&shmem->num_chans);
552 acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
553 GFP_KERNEL);
554 if (!acpm->chans)
555 return -ENOMEM;
556
557 chans_shmem = acpm->sram_base + readl(&shmem->chans);
558
559 for (i = 0; i < acpm->num_chans; i++) {
560 struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
561 struct acpm_chan *achan = &acpm->chans[i];
562 struct mbox_client *cl = &achan->cl;
563
564 achan->acpm = acpm;
565
566 acpm_chan_shmem_get_params(achan, chan_shmem);
567
568 ret = acpm_achan_alloc_cmds(achan);
569 if (ret)
570 return ret;
571
572 mutex_init(&achan->rx_lock);
573 mutex_init(&achan->tx_lock);
574
575 cl->dev = dev;
576
577 achan->chan = mbox_request_channel(cl, 0);
578 if (IS_ERR(achan->chan)) {
579 acpm_free_mbox_chans(acpm);
580 return PTR_ERR(achan->chan);
581 }
582 }
583
584 return 0;
585 }
586
587 /**
588 * acpm_setup_ops() - setup the operations structures.
589 * @acpm: pointer to the driver data.
590 */
acpm_setup_ops(struct acpm_info * acpm)591 static void acpm_setup_ops(struct acpm_info *acpm)
592 {
593 struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
594
595 pmic_ops->read_reg = acpm_pmic_read_reg;
596 pmic_ops->bulk_read = acpm_pmic_bulk_read;
597 pmic_ops->write_reg = acpm_pmic_write_reg;
598 pmic_ops->bulk_write = acpm_pmic_bulk_write;
599 pmic_ops->update_reg = acpm_pmic_update_reg;
600 }
601
acpm_probe(struct platform_device * pdev)602 static int acpm_probe(struct platform_device *pdev)
603 {
604 const struct acpm_match_data *match_data;
605 struct device *dev = &pdev->dev;
606 struct device_node *shmem;
607 struct acpm_info *acpm;
608 resource_size_t size;
609 struct resource res;
610 int ret;
611
612 acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
613 if (!acpm)
614 return -ENOMEM;
615
616 shmem = of_parse_phandle(dev->of_node, "shmem", 0);
617 ret = of_address_to_resource(shmem, 0, &res);
618 of_node_put(shmem);
619 if (ret)
620 return dev_err_probe(dev, ret,
621 "Failed to get shared memory.\n");
622
623 size = resource_size(&res);
624 acpm->sram_base = devm_ioremap(dev, res.start, size);
625 if (!acpm->sram_base)
626 return dev_err_probe(dev, -ENOMEM,
627 "Failed to ioremap shared memory.\n");
628
629 match_data = of_device_get_match_data(dev);
630 if (!match_data)
631 return dev_err_probe(dev, -EINVAL,
632 "Failed to get match data.\n");
633
634 acpm->shmem = acpm->sram_base + match_data->initdata_base;
635 acpm->dev = dev;
636
637 ret = acpm_channels_init(acpm);
638 if (ret)
639 return ret;
640
641 acpm_setup_ops(acpm);
642
643 platform_set_drvdata(pdev, acpm);
644
645 return devm_of_platform_populate(dev);
646 }
647
648 /**
649 * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
650 * @handle: Handle acquired by acpm_get_by_phandle.
651 */
acpm_handle_put(const struct acpm_handle * handle)652 static void acpm_handle_put(const struct acpm_handle *handle)
653 {
654 struct acpm_info *acpm = handle_to_acpm_info(handle);
655 struct device *dev = acpm->dev;
656
657 module_put(dev->driver->owner);
658 /* Drop reference taken with of_find_device_by_node(). */
659 put_device(dev);
660 }
661
662 /**
663 * devm_acpm_release() - devres release method.
664 * @dev: pointer to device.
665 * @res: pointer to resource.
666 */
devm_acpm_release(struct device * dev,void * res)667 static void devm_acpm_release(struct device *dev, void *res)
668 {
669 acpm_handle_put(*(struct acpm_handle **)res);
670 }
671
672 /**
673 * acpm_get_by_node() - get the ACPM handle using node pointer.
674 * @dev: device pointer requesting ACPM handle.
675 * @np: ACPM device tree node.
676 *
677 * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
678 */
acpm_get_by_node(struct device * dev,struct device_node * np)679 static const struct acpm_handle *acpm_get_by_node(struct device *dev,
680 struct device_node *np)
681 {
682 struct platform_device *pdev;
683 struct device_link *link;
684 struct acpm_info *acpm;
685
686 pdev = of_find_device_by_node(np);
687 if (!pdev)
688 return ERR_PTR(-EPROBE_DEFER);
689
690 acpm = platform_get_drvdata(pdev);
691 if (!acpm) {
692 platform_device_put(pdev);
693 return ERR_PTR(-EPROBE_DEFER);
694 }
695
696 if (!try_module_get(pdev->dev.driver->owner)) {
697 platform_device_put(pdev);
698 return ERR_PTR(-EPROBE_DEFER);
699 }
700
701 link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
702 if (!link) {
703 dev_err(&pdev->dev,
704 "Failed to create device link to consumer %s.\n",
705 dev_name(dev));
706 platform_device_put(pdev);
707 module_put(pdev->dev.driver->owner);
708 return ERR_PTR(-EINVAL);
709 }
710
711 return &acpm->handle;
712 }
713
714 /**
715 * devm_acpm_get_by_node() - managed get handle using node pointer.
716 * @dev: device pointer requesting ACPM handle.
717 * @np: ACPM device tree node.
718 *
719 * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
720 */
devm_acpm_get_by_node(struct device * dev,struct device_node * np)721 const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
722 struct device_node *np)
723 {
724 const struct acpm_handle **ptr, *handle;
725
726 ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
727 if (!ptr)
728 return ERR_PTR(-ENOMEM);
729
730 handle = acpm_get_by_node(dev, np);
731 if (!IS_ERR(handle)) {
732 *ptr = handle;
733 devres_add(dev, ptr);
734 } else {
735 devres_free(ptr);
736 }
737
738 return handle;
739 }
740 EXPORT_SYMBOL_GPL(devm_acpm_get_by_node);
741
742 static const struct acpm_match_data acpm_gs101 = {
743 .initdata_base = ACPM_GS101_INITDATA_BASE,
744 };
745
746 static const struct of_device_id acpm_match[] = {
747 {
748 .compatible = "google,gs101-acpm-ipc",
749 .data = &acpm_gs101,
750 },
751 {},
752 };
753 MODULE_DEVICE_TABLE(of, acpm_match);
754
755 static struct platform_driver acpm_driver = {
756 .probe = acpm_probe,
757 .driver = {
758 .name = "exynos-acpm-protocol",
759 .of_match_table = acpm_match,
760 },
761 };
762 module_platform_driver(acpm_driver);
763
764 MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
765 MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
766 MODULE_LICENSE("GPL");
767