xref: /linux/drivers/firmware/samsung/exynos-acpm.c (revision 7a9b709e7cc5ce1ffb84ce07bf6d157e1de758df)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2020 Samsung Electronics Co., Ltd.
4  * Copyright 2020 Google LLC.
5  * Copyright 2024 Linaro Ltd.
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/bitmap.h>
10 #include <linux/bits.h>
11 #include <linux/cleanup.h>
12 #include <linux/container_of.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/firmware/samsung/exynos-acpm-protocol.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/mailbox/exynos-message.h>
19 #include <linux/mailbox_client.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/math.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_platform.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 
30 #include "exynos-acpm.h"
31 #include "exynos-acpm-pmic.h"
32 
33 #define ACPM_PROTOCOL_SEQNUM		GENMASK(21, 16)
34 
35 /* The unit of counter is 20 us. 5000 * 20 = 100 ms */
36 #define ACPM_POLL_TIMEOUT		5000
37 #define ACPM_TX_TIMEOUT_US		500000
38 
39 #define ACPM_GS101_INITDATA_BASE	0xa000
40 
41 /**
42  * struct acpm_shmem - shared memory configuration information.
43  * @reserved:	unused fields.
44  * @chans:	offset to array of struct acpm_chan_shmem.
45  * @reserved1:	unused fields.
46  * @num_chans:	number of channels.
47  */
48 struct acpm_shmem {
49 	u32 reserved[2];
50 	u32 chans;
51 	u32 reserved1[3];
52 	u32 num_chans;
53 };
54 
55 /**
56  * struct acpm_chan_shmem - descriptor of a shared memory channel.
57  *
58  * @id:			channel ID.
59  * @reserved:		unused fields.
60  * @rx_rear:		rear pointer of APM RX queue (TX for AP).
61  * @rx_front:		front pointer of APM RX queue (TX for AP).
62  * @rx_base:		base address of APM RX queue (TX for AP).
63  * @reserved1:		unused fields.
64  * @tx_rear:		rear pointer of APM TX queue (RX for AP).
65  * @tx_front:		front pointer of APM TX queue (RX for AP).
66  * @tx_base:		base address of APM TX queue (RX for AP).
67  * @qlen:		queue length. Applies to both TX/RX queues.
68  * @mlen:		message length. Applies to both TX/RX queues.
69  * @reserved2:		unused fields.
70  * @poll_completion:	true when the channel works on polling.
71  */
72 struct acpm_chan_shmem {
73 	u32 id;
74 	u32 reserved[3];
75 	u32 rx_rear;
76 	u32 rx_front;
77 	u32 rx_base;
78 	u32 reserved1[3];
79 	u32 tx_rear;
80 	u32 tx_front;
81 	u32 tx_base;
82 	u32 qlen;
83 	u32 mlen;
84 	u32 reserved2[2];
85 	u32 poll_completion;
86 };
87 
88 /**
89  * struct acpm_queue - exynos acpm queue.
90  *
91  * @rear:	rear address of the queue.
92  * @front:	front address of the queue.
93  * @base:	base address of the queue.
94  */
95 struct acpm_queue {
96 	void __iomem *rear;
97 	void __iomem *front;
98 	void __iomem *base;
99 };
100 
101 /**
102  * struct acpm_rx_data - RX queue data.
103  *
104  * @cmd:	pointer to where the data shall be saved.
105  * @n_cmd:	number of 32-bit commands.
106  * @response:	true if the client expects the RX data.
107  */
108 struct acpm_rx_data {
109 	u32 *cmd;
110 	size_t n_cmd;
111 	bool response;
112 };
113 
114 #define ACPM_SEQNUM_MAX    64
115 
116 /**
117  * struct acpm_chan - driver internal representation of a channel.
118  * @cl:		mailbox client.
119  * @chan:	mailbox channel.
120  * @acpm:	pointer to driver private data.
121  * @tx:		TX queue. The enqueue is done by the host.
122  *			- front index is written by the host.
123  *			- rear index is written by the firmware.
124  *
125  * @rx:		RX queue. The enqueue is done by the firmware.
126  *			- front index is written by the firmware.
127  *			- rear index is written by the host.
128  * @tx_lock:	protects TX queue.
129  * @rx_lock:	protects RX queue.
130  * @qlen:	queue length. Applies to both TX/RX queues.
131  * @mlen:	message length. Applies to both TX/RX queues.
132  * @seqnum:	sequence number of the last message enqueued on TX queue.
133  * @id:		channel ID.
134  * @poll_completion:	indicates if the transfer needs to be polled for
135  *			completion or interrupt mode is used.
136  * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
137  * @rx_data:	internal buffer used to drain the RX queue.
138  */
139 struct acpm_chan {
140 	struct mbox_client cl;
141 	struct mbox_chan *chan;
142 	struct acpm_info *acpm;
143 	struct acpm_queue tx;
144 	struct acpm_queue rx;
145 	struct mutex tx_lock;
146 	struct mutex rx_lock;
147 
148 	unsigned int qlen;
149 	unsigned int mlen;
150 	u8 seqnum;
151 	u8 id;
152 	bool poll_completion;
153 
154 	DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
155 	struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
156 };
157 
158 /**
159  * struct acpm_info - driver's private data.
160  * @shmem:	pointer to the SRAM configuration data.
161  * @sram_base:	base address of SRAM.
162  * @chans:	pointer to the ACPM channel parameters retrieved from SRAM.
163  * @dev:	pointer to the exynos-acpm device.
164  * @handle:	instance of acpm_handle to send to clients.
165  * @num_chans:	number of channels available for this controller.
166  */
167 struct acpm_info {
168 	struct acpm_shmem __iomem *shmem;
169 	void __iomem *sram_base;
170 	struct acpm_chan *chans;
171 	struct device *dev;
172 	struct acpm_handle handle;
173 	u32 num_chans;
174 };
175 
176 /**
177  * struct acpm_match_data - of_device_id data.
178  * @initdata_base:	offset in SRAM where the channels configuration resides.
179  */
180 struct acpm_match_data {
181 	loff_t initdata_base;
182 };
183 
184 #define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
185 #define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
186 
187 /**
188  * acpm_get_rx() - get response from RX queue.
189  * @achan:	ACPM channel info.
190  * @xfer:	reference to the transfer to get response for.
191  *
192  * Return: 0 on success, -errno otherwise.
193  */
194 static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
195 {
196 	u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
197 	const void __iomem *base, *addr;
198 	struct acpm_rx_data *rx_data;
199 	u32 i, val, mlen;
200 	bool rx_set = false;
201 
202 	guard(mutex)(&achan->rx_lock);
203 
204 	rx_front = readl(achan->rx.front);
205 	i = readl(achan->rx.rear);
206 
207 	/* Bail out if RX is empty. */
208 	if (i == rx_front)
209 		return 0;
210 
211 	base = achan->rx.base;
212 	mlen = achan->mlen;
213 
214 	tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
215 
216 	/* Drain RX queue. */
217 	do {
218 		/* Read RX seqnum. */
219 		addr = base + mlen * i;
220 		val = readl(addr);
221 
222 		rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
223 		if (!rx_seqnum)
224 			return -EIO;
225 		/*
226 		 * mssg seqnum starts with value 1, whereas the driver considers
227 		 * the first mssg at index 0.
228 		 */
229 		seqnum = rx_seqnum - 1;
230 		rx_data = &achan->rx_data[seqnum];
231 
232 		if (rx_data->response) {
233 			if (rx_seqnum == tx_seqnum) {
234 				__ioread32_copy(xfer->rxd, addr,
235 						xfer->rxlen / 4);
236 				rx_set = true;
237 				clear_bit(seqnum, achan->bitmap_seqnum);
238 			} else {
239 				/*
240 				 * The RX data corresponds to another request.
241 				 * Save the data to drain the queue, but don't
242 				 * clear yet the bitmap. It will be cleared
243 				 * after the response is copied to the request.
244 				 */
245 				__ioread32_copy(rx_data->cmd, addr,
246 						xfer->rxlen / 4);
247 			}
248 		} else {
249 			clear_bit(seqnum, achan->bitmap_seqnum);
250 		}
251 
252 		i = (i + 1) % achan->qlen;
253 	} while (i != rx_front);
254 
255 	/* We saved all responses, mark RX empty. */
256 	writel(rx_front, achan->rx.rear);
257 
258 	/*
259 	 * If the response was not in this iteration of the queue, check if the
260 	 * RX data was previously saved.
261 	 */
262 	rx_data = &achan->rx_data[tx_seqnum - 1];
263 	if (!rx_set && rx_data->response) {
264 		rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM,
265 				      rx_data->cmd[0]);
266 
267 		if (rx_seqnum == tx_seqnum) {
268 			memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
269 			clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
270 		}
271 	}
272 
273 	return 0;
274 }
275 
276 /**
277  * acpm_dequeue_by_polling() - RX dequeue by polling.
278  * @achan:	ACPM channel info.
279  * @xfer:	reference to the transfer being waited for.
280  *
281  * Return: 0 on success, -errno otherwise.
282  */
283 static int acpm_dequeue_by_polling(struct acpm_chan *achan,
284 				   const struct acpm_xfer *xfer)
285 {
286 	struct device *dev = achan->acpm->dev;
287 	unsigned int cnt_20us = 0;
288 	u32 seqnum;
289 	int ret;
290 
291 	seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
292 
293 	do {
294 		ret = acpm_get_rx(achan, xfer);
295 		if (ret)
296 			return ret;
297 
298 		if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
299 			return 0;
300 
301 		/* Determined experimentally. */
302 		usleep_range(20, 30);
303 		cnt_20us++;
304 	} while (cnt_20us < ACPM_POLL_TIMEOUT);
305 
306 	dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
307 		achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
308 
309 	return -ETIME;
310 }
311 
312 /**
313  * acpm_wait_for_queue_slots() - wait for queue slots.
314  *
315  * @achan:		ACPM channel info.
316  * @next_tx_front:	next front index of the TX queue.
317  *
318  * Return: 0 on success, -errno otherwise.
319  */
320 static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
321 {
322 	u32 val, ret;
323 
324 	/*
325 	 * Wait for RX front to keep up with TX front. Make sure there's at
326 	 * least one element between them.
327 	 */
328 	ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
329 				 ACPM_TX_TIMEOUT_US);
330 	if (ret) {
331 		dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
332 		return ret;
333 	}
334 
335 	ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
336 				 ACPM_TX_TIMEOUT_US);
337 	if (ret)
338 		dev_err(achan->acpm->dev, "TX queue is full.\n");
339 
340 	return ret;
341 }
342 
343 /**
344  * acpm_prepare_xfer() - prepare a transfer before writing the message to the
345  * TX queue.
346  * @achan:	ACPM channel info.
347  * @xfer:	reference to the transfer being prepared.
348  */
349 static void acpm_prepare_xfer(struct acpm_chan *achan,
350 			      const struct acpm_xfer *xfer)
351 {
352 	struct acpm_rx_data *rx_data;
353 	u32 *txd = (u32 *)xfer->txd;
354 
355 	/* Prevent chan->seqnum from being re-used */
356 	do {
357 		if (++achan->seqnum == ACPM_SEQNUM_MAX)
358 			achan->seqnum = 1;
359 	} while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
360 
361 	txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
362 
363 	/* Clear data for upcoming responses */
364 	rx_data = &achan->rx_data[achan->seqnum - 1];
365 	memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
366 	if (xfer->rxd)
367 		rx_data->response = true;
368 
369 	/* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
370 	set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
371 }
372 
373 /**
374  * acpm_wait_for_message_response - an helper to group all possible ways of
375  * waiting for a synchronous message response.
376  *
377  * @achan:	ACPM channel info.
378  * @xfer:	reference to the transfer being waited for.
379  *
380  * Return: 0 on success, -errno otherwise.
381  */
382 static int acpm_wait_for_message_response(struct acpm_chan *achan,
383 					  const struct acpm_xfer *xfer)
384 {
385 	/* Just polling mode supported for now. */
386 	return acpm_dequeue_by_polling(achan, xfer);
387 }
388 
389 /**
390  * acpm_do_xfer() - do one transfer.
391  * @handle:	pointer to the acpm handle.
392  * @xfer:	transfer to initiate and wait for response.
393  *
394  * Return: 0 on success, -errno otherwise.
395  */
396 int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
397 {
398 	struct acpm_info *acpm = handle_to_acpm_info(handle);
399 	struct exynos_mbox_msg msg;
400 	struct acpm_chan *achan;
401 	u32 idx, tx_front;
402 	int ret;
403 
404 	if (xfer->acpm_chan_id >= acpm->num_chans)
405 		return -EINVAL;
406 
407 	achan = &acpm->chans[xfer->acpm_chan_id];
408 
409 	if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
410 		return -EINVAL;
411 
412 	if (!achan->poll_completion) {
413 		dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
414 		return -EOPNOTSUPP;
415 	}
416 
417 	scoped_guard(mutex, &achan->tx_lock) {
418 		tx_front = readl(achan->tx.front);
419 		idx = (tx_front + 1) % achan->qlen;
420 
421 		ret = acpm_wait_for_queue_slots(achan, idx);
422 		if (ret)
423 			return ret;
424 
425 		acpm_prepare_xfer(achan, xfer);
426 
427 		/* Write TX command. */
428 		__iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
429 				 xfer->txd, xfer->txlen / 4);
430 
431 		/* Advance TX front. */
432 		writel(idx, achan->tx.front);
433 	}
434 
435 	msg.chan_id = xfer->acpm_chan_id;
436 	msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
437 	ret = mbox_send_message(achan->chan, (void *)&msg);
438 	if (ret < 0)
439 		return ret;
440 
441 	ret = acpm_wait_for_message_response(achan, xfer);
442 
443 	/*
444 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
445 	 * transfer queueing since the protocol layer queues things by itself.
446 	 * Unfortunately, we have to kick the mailbox framework after we have
447 	 * received our message.
448 	 */
449 	mbox_client_txdone(achan->chan, ret);
450 
451 	return ret;
452 }
453 
454 /**
455  * acpm_chan_shmem_get_params() - get channel parameters and addresses of the
456  * TX/RX queues.
457  * @achan:	ACPM channel info.
458  * @chan_shmem:	__iomem pointer to a channel described in shared memory.
459  */
460 static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
461 				struct acpm_chan_shmem __iomem *chan_shmem)
462 {
463 	void __iomem *base = achan->acpm->sram_base;
464 	struct acpm_queue *rx = &achan->rx;
465 	struct acpm_queue *tx = &achan->tx;
466 
467 	achan->mlen = readl(&chan_shmem->mlen);
468 	achan->poll_completion = readl(&chan_shmem->poll_completion);
469 	achan->id = readl(&chan_shmem->id);
470 	achan->qlen = readl(&chan_shmem->qlen);
471 
472 	tx->base = base + readl(&chan_shmem->rx_base);
473 	tx->rear = base + readl(&chan_shmem->rx_rear);
474 	tx->front = base + readl(&chan_shmem->rx_front);
475 
476 	rx->base = base + readl(&chan_shmem->tx_base);
477 	rx->rear = base + readl(&chan_shmem->tx_rear);
478 	rx->front = base + readl(&chan_shmem->tx_front);
479 
480 	dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
481 		 achan->id, achan->poll_completion, achan->mlen, achan->qlen);
482 }
483 
484 /**
485  * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
486  * firmware.
487  * @achan:	ACPM channel info.
488  *
489  * Return: 0 on success, -errno otherwise.
490  */
491 static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
492 {
493 	struct device *dev = achan->acpm->dev;
494 	struct acpm_rx_data *rx_data;
495 	size_t cmd_size, n_cmd;
496 	int i;
497 
498 	if (achan->mlen == 0)
499 		return 0;
500 
501 	cmd_size = sizeof(*(achan->rx_data[0].cmd));
502 	n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
503 
504 	for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
505 		rx_data = &achan->rx_data[i];
506 		rx_data->n_cmd = n_cmd;
507 		rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
508 		if (!rx_data->cmd)
509 			return -ENOMEM;
510 	}
511 
512 	return 0;
513 }
514 
515 /**
516  * acpm_free_mbox_chans() - free mailbox channels.
517  * @acpm:	pointer to driver data.
518  */
519 static void acpm_free_mbox_chans(struct acpm_info *acpm)
520 {
521 	int i;
522 
523 	for (i = 0; i < acpm->num_chans; i++)
524 		if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
525 			mbox_free_channel(acpm->chans[i].chan);
526 }
527 
528 /**
529  * acpm_channels_init() - initialize channels based on the configuration data in
530  * the shared memory.
531  * @acpm:	pointer to driver data.
532  *
533  * Return: 0 on success, -errno otherwise.
534  */
535 static int acpm_channels_init(struct acpm_info *acpm)
536 {
537 	struct acpm_shmem __iomem *shmem = acpm->shmem;
538 	struct acpm_chan_shmem __iomem *chans_shmem;
539 	struct device *dev = acpm->dev;
540 	int i, ret;
541 
542 	acpm->num_chans = readl(&shmem->num_chans);
543 	acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
544 				   GFP_KERNEL);
545 	if (!acpm->chans)
546 		return -ENOMEM;
547 
548 	chans_shmem = acpm->sram_base + readl(&shmem->chans);
549 
550 	for (i = 0; i < acpm->num_chans; i++) {
551 		struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
552 		struct acpm_chan *achan = &acpm->chans[i];
553 		struct mbox_client *cl = &achan->cl;
554 
555 		achan->acpm = acpm;
556 
557 		acpm_chan_shmem_get_params(achan, chan_shmem);
558 
559 		ret = acpm_achan_alloc_cmds(achan);
560 		if (ret)
561 			return ret;
562 
563 		mutex_init(&achan->rx_lock);
564 		mutex_init(&achan->tx_lock);
565 
566 		cl->dev = dev;
567 
568 		achan->chan = mbox_request_channel(cl, 0);
569 		if (IS_ERR(achan->chan)) {
570 			acpm_free_mbox_chans(acpm);
571 			return PTR_ERR(achan->chan);
572 		}
573 	}
574 
575 	return 0;
576 }
577 
578 /**
579  * acpm_setup_ops() - setup the operations structures.
580  * @acpm:	pointer to the driver data.
581  */
582 static void acpm_setup_ops(struct acpm_info *acpm)
583 {
584 	struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
585 
586 	pmic_ops->read_reg = acpm_pmic_read_reg;
587 	pmic_ops->bulk_read = acpm_pmic_bulk_read;
588 	pmic_ops->write_reg = acpm_pmic_write_reg;
589 	pmic_ops->bulk_write = acpm_pmic_bulk_write;
590 	pmic_ops->update_reg = acpm_pmic_update_reg;
591 }
592 
593 static int acpm_probe(struct platform_device *pdev)
594 {
595 	const struct acpm_match_data *match_data;
596 	struct device *dev = &pdev->dev;
597 	struct device_node *shmem;
598 	struct acpm_info *acpm;
599 	resource_size_t size;
600 	struct resource res;
601 	int ret;
602 
603 	acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
604 	if (!acpm)
605 		return -ENOMEM;
606 
607 	shmem = of_parse_phandle(dev->of_node, "shmem", 0);
608 	ret = of_address_to_resource(shmem, 0, &res);
609 	of_node_put(shmem);
610 	if (ret)
611 		return dev_err_probe(dev, ret,
612 				     "Failed to get shared memory.\n");
613 
614 	size = resource_size(&res);
615 	acpm->sram_base = devm_ioremap(dev, res.start, size);
616 	if (!acpm->sram_base)
617 		return dev_err_probe(dev, -ENOMEM,
618 				     "Failed to ioremap shared memory.\n");
619 
620 	match_data = of_device_get_match_data(dev);
621 	if (!match_data)
622 		return dev_err_probe(dev, -EINVAL,
623 				     "Failed to get match data.\n");
624 
625 	acpm->shmem = acpm->sram_base + match_data->initdata_base;
626 	acpm->dev = dev;
627 
628 	ret = acpm_channels_init(acpm);
629 	if (ret)
630 		return ret;
631 
632 	acpm_setup_ops(acpm);
633 
634 	platform_set_drvdata(pdev, acpm);
635 
636 	return 0;
637 }
638 
639 /**
640  * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
641  * @handle:	Handle acquired by acpm_get_by_phandle.
642  */
643 static void acpm_handle_put(const struct acpm_handle *handle)
644 {
645 	struct acpm_info *acpm = handle_to_acpm_info(handle);
646 	struct device *dev = acpm->dev;
647 
648 	module_put(dev->driver->owner);
649 	/* Drop reference taken with of_find_device_by_node(). */
650 	put_device(dev);
651 }
652 
653 /**
654  * devm_acpm_release() - devres release method.
655  * @dev: pointer to device.
656  * @res: pointer to resource.
657  */
658 static void devm_acpm_release(struct device *dev, void *res)
659 {
660 	acpm_handle_put(*(struct acpm_handle **)res);
661 }
662 
663 /**
664  * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
665  * @dev:        device pointer requesting ACPM handle.
666  * @property:   property name containing phandle on ACPM node.
667  *
668  * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
669  */
670 static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
671 						     const char *property)
672 {
673 	struct platform_device *pdev;
674 	struct device_node *acpm_np;
675 	struct device_link *link;
676 	struct acpm_info *acpm;
677 
678 	acpm_np = of_parse_phandle(dev->of_node, property, 0);
679 	if (!acpm_np)
680 		return ERR_PTR(-ENODEV);
681 
682 	pdev = of_find_device_by_node(acpm_np);
683 	if (!pdev) {
684 		dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
685 		of_node_put(acpm_np);
686 		return ERR_PTR(-EPROBE_DEFER);
687 	}
688 
689 	of_node_put(acpm_np);
690 
691 	acpm = platform_get_drvdata(pdev);
692 	if (!acpm) {
693 		dev_err(dev, "Cannot get drvdata from %s\n",
694 			dev_name(&pdev->dev));
695 		platform_device_put(pdev);
696 		return ERR_PTR(-EPROBE_DEFER);
697 	}
698 
699 	if (!try_module_get(pdev->dev.driver->owner)) {
700 		dev_err(dev, "Cannot get module reference.\n");
701 		platform_device_put(pdev);
702 		return ERR_PTR(-EPROBE_DEFER);
703 	}
704 
705 	link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
706 	if (!link) {
707 		dev_err(&pdev->dev,
708 			"Failed to create device link to consumer %s.\n",
709 			dev_name(dev));
710 		platform_device_put(pdev);
711 		module_put(pdev->dev.driver->owner);
712 		return ERR_PTR(-EINVAL);
713 	}
714 
715 	return &acpm->handle;
716 }
717 
718 /**
719  * devm_acpm_get_by_phandle() - managed get handle using phandle.
720  * @dev:        device pointer requesting ACPM handle.
721  * @property:   property name containing phandle on ACPM node.
722  *
723  * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
724  */
725 const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
726 						   const char *property)
727 {
728 	const struct acpm_handle **ptr, *handle;
729 
730 	ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
731 	if (!ptr)
732 		return ERR_PTR(-ENOMEM);
733 
734 	handle = acpm_get_by_phandle(dev, property);
735 	if (!IS_ERR(handle)) {
736 		*ptr = handle;
737 		devres_add(dev, ptr);
738 	} else {
739 		devres_free(ptr);
740 	}
741 
742 	return handle;
743 }
744 
745 static const struct acpm_match_data acpm_gs101 = {
746 	.initdata_base = ACPM_GS101_INITDATA_BASE,
747 };
748 
749 static const struct of_device_id acpm_match[] = {
750 	{
751 		.compatible = "google,gs101-acpm-ipc",
752 		.data = &acpm_gs101,
753 	},
754 	{},
755 };
756 MODULE_DEVICE_TABLE(of, acpm_match);
757 
758 static struct platform_driver acpm_driver = {
759 	.probe	= acpm_probe,
760 	.driver	= {
761 		.name = "exynos-acpm-protocol",
762 		.of_match_table	= acpm_match,
763 	},
764 };
765 module_platform_driver(acpm_driver);
766 
767 MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
768 MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
769 MODULE_LICENSE("GPL");
770