xref: /linux/drivers/firmware/ti_sci.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments System Control Interface Protocol Driver
4  *
5  * Copyright (C) 2015-2025 Texas Instruments Incorporated - https://www.ti.com/
6  *	Nishanth Menon
7  */
8 
9 #define pr_fmt(fmt) "%s: " fmt, __func__
10 
11 #include <linux/bitmap.h>
12 #include <linux/cpu.h>
13 #include <linux/debugfs.h>
14 #include <linux/export.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/mailbox_client.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_qos.h>
24 #include <linux/property.h>
25 #include <linux/semaphore.h>
26 #include <linux/slab.h>
27 #include <linux/soc/ti/ti-msgmgr.h>
28 #include <linux/soc/ti/ti_sci_protocol.h>
29 #include <linux/suspend.h>
30 #include <linux/sys_soc.h>
31 #include <linux/reboot.h>
32 
33 #include "ti_sci.h"
34 
35 /* List of all TI SCI devices active in system */
36 static LIST_HEAD(ti_sci_list);
37 /* Protection for the entire list */
38 static DEFINE_MUTEX(ti_sci_list_mutex);
39 
40 /**
41  * struct ti_sci_xfer - Structure representing a message flow
42  * @tx_message:	Transmit message
43  * @rx_len:	Receive message length
44  * @xfer_buf:	Preallocated buffer to store receive message
45  *		Since we work with request-ACK protocol, we can
46  *		reuse the same buffer for the rx path as we
47  *		use for the tx path.
48  * @done:	completion event
49  */
50 struct ti_sci_xfer {
51 	struct ti_msgmgr_message tx_message;
52 	u8 rx_len;
53 	u8 *xfer_buf;
54 	struct completion done;
55 };
56 
57 /**
58  * struct ti_sci_xfers_info - Structure to manage transfer information
59  * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
60  *			Messages.
61  * @xfer_block:		Preallocated Message array
62  * @xfer_alloc_table:	Bitmap table for allocated messages.
63  *			Index of this bitmap table is also used for message
64  *			sequence identifier.
65  * @xfer_lock:		Protection for message allocation
66  */
67 struct ti_sci_xfers_info {
68 	struct semaphore sem_xfer_count;
69 	struct ti_sci_xfer *xfer_block;
70 	unsigned long *xfer_alloc_table;
71 	/* protect transfer allocation */
72 	spinlock_t xfer_lock;
73 };
74 
75 /**
76  * struct ti_sci_desc - Description of SoC integration
77  * @default_host_id:	Host identifier representing the compute entity
78  * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
79  * @max_msgs: Maximum number of messages that can be pending
80  *		  simultaneously in the system
81  * @max_msg_size: Maximum size of data per message that can be handled.
82  */
83 struct ti_sci_desc {
84 	u8 default_host_id;
85 	int max_rx_timeout_ms;
86 	int max_msgs;
87 	int max_msg_size;
88 };
89 
90 /**
91  * struct ti_sci_info - Structure representing a TI SCI instance
92  * @dev:	Device pointer
93  * @desc:	SoC description for this instance
94  * @d:		Debugfs file entry
95  * @debug_region: Memory region where the debug message are available
96  * @debug_region_size: Debug region size
97  * @debug_buffer: Buffer allocated to copy debug messages.
98  * @handle:	Instance of TI SCI handle to send to clients.
99  * @cl:		Mailbox Client
100  * @chan_tx:	Transmit mailbox channel
101  * @chan_rx:	Receive mailbox channel
102  * @minfo:	Message info
103  * @node:	list head
104  * @host_id:	Host ID
105  * @fw_caps:	FW/SoC low power capabilities
106  * @users:	Number of users of this instance
107  */
108 struct ti_sci_info {
109 	struct device *dev;
110 	const struct ti_sci_desc *desc;
111 	struct dentry *d;
112 	void __iomem *debug_region;
113 	char *debug_buffer;
114 	size_t debug_region_size;
115 	struct ti_sci_handle handle;
116 	struct mbox_client cl;
117 	struct mbox_chan *chan_tx;
118 	struct mbox_chan *chan_rx;
119 	struct ti_sci_xfers_info minfo;
120 	struct list_head node;
121 	u8 host_id;
122 	u64 fw_caps;
123 	/* protected by ti_sci_list_mutex */
124 	int users;
125 };
126 
127 #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
128 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
129 
130 #ifdef CONFIG_DEBUG_FS
131 
132 /**
133  * ti_sci_debug_show() - Helper to dump the debug log
134  * @s:	sequence file pointer
135  * @unused:	unused.
136  *
137  * Return: 0
138  */
139 static int ti_sci_debug_show(struct seq_file *s, void *unused)
140 {
141 	struct ti_sci_info *info = s->private;
142 
143 	memcpy_fromio(info->debug_buffer, info->debug_region,
144 		      info->debug_region_size);
145 	/*
146 	 * We don't trust firmware to leave NULL terminated last byte (hence
147 	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
148 	 * specific data format for debug messages, We just present the data
149 	 * in the buffer as is - we expect the messages to be self explanatory.
150 	 */
151 	seq_puts(s, info->debug_buffer);
152 	return 0;
153 }
154 
155 /* Provide the log file operations interface*/
156 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
157 
158 /**
159  * ti_sci_debugfs_create() - Create log debug file
160  * @pdev:	platform device pointer
161  * @info:	Pointer to SCI entity information
162  *
163  * Return: 0 if all went fine, else corresponding error.
164  */
165 static int ti_sci_debugfs_create(struct platform_device *pdev,
166 				 struct ti_sci_info *info)
167 {
168 	struct device *dev = &pdev->dev;
169 	struct resource *res;
170 	char debug_name[50];
171 
172 	/* Debug region is optional */
173 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
174 					   "debug_messages");
175 	info->debug_region = devm_ioremap_resource(dev, res);
176 	if (IS_ERR(info->debug_region))
177 		return 0;
178 	info->debug_region_size = resource_size(res);
179 
180 	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
181 					  sizeof(char), GFP_KERNEL);
182 	if (!info->debug_buffer)
183 		return -ENOMEM;
184 	/* Setup NULL termination */
185 	info->debug_buffer[info->debug_region_size] = 0;
186 
187 	snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
188 		 dev_name(dev));
189 	info->d = debugfs_create_file(debug_name, 0444, NULL, info,
190 				      &ti_sci_debug_fops);
191 	if (IS_ERR(info->d))
192 		return PTR_ERR(info->d);
193 
194 	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
195 		info->debug_region, info->debug_region_size, res);
196 	return 0;
197 }
198 
199 #else /* CONFIG_DEBUG_FS */
200 static inline int ti_sci_debugfs_create(struct platform_device *dev,
201 					struct ti_sci_info *info)
202 {
203 	return 0;
204 }
205 
206 static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
207 					  struct ti_sci_info *info)
208 {
209 }
210 #endif /* CONFIG_DEBUG_FS */
211 
212 /**
213  * ti_sci_dump_header_dbg() - Helper to dump a message header.
214  * @dev:	Device pointer corresponding to the SCI entity
215  * @hdr:	pointer to header.
216  */
217 static inline void ti_sci_dump_header_dbg(struct device *dev,
218 					  struct ti_sci_msg_hdr *hdr)
219 {
220 	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
221 		hdr->type, hdr->host, hdr->seq, hdr->flags);
222 }
223 
224 /**
225  * ti_sci_rx_callback() - mailbox client callback for receive messages
226  * @cl:	client pointer
227  * @m:	mailbox message
228  *
229  * Processes one received message to appropriate transfer information and
230  * signals completion of the transfer.
231  *
232  * NOTE: This function will be invoked in IRQ context, hence should be
233  * as optimal as possible.
234  */
235 static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
236 {
237 	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
238 	struct device *dev = info->dev;
239 	struct ti_sci_xfers_info *minfo = &info->minfo;
240 	struct ti_msgmgr_message *mbox_msg = m;
241 	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
242 	struct ti_sci_xfer *xfer;
243 	u8 xfer_id;
244 
245 	xfer_id = hdr->seq;
246 
247 	/*
248 	 * Are we even expecting this?
249 	 * NOTE: barriers were implicit in locks used for modifying the bitmap
250 	 */
251 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
252 		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
253 		return;
254 	}
255 
256 	xfer = &minfo->xfer_block[xfer_id];
257 
258 	/* Is the message of valid length? */
259 	if (mbox_msg->len > info->desc->max_msg_size) {
260 		dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
261 			mbox_msg->len, info->desc->max_msg_size);
262 		ti_sci_dump_header_dbg(dev, hdr);
263 		return;
264 	}
265 	if (mbox_msg->len < xfer->rx_len) {
266 		dev_err(dev, "Recv xfer %zu < expected %d length\n",
267 			mbox_msg->len, xfer->rx_len);
268 		ti_sci_dump_header_dbg(dev, hdr);
269 		return;
270 	}
271 
272 	ti_sci_dump_header_dbg(dev, hdr);
273 	/* Take a copy to the rx buffer.. */
274 	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
275 	complete(&xfer->done);
276 }
277 
278 /**
279  * ti_sci_get_one_xfer() - Allocate one message
280  * @info:	Pointer to SCI entity information
281  * @msg_type:	Message type
282  * @msg_flags:	Flag to set for the message
283  * @tx_message_size: transmit message size
284  * @rx_message_size: receive message size
285  *
286  * Helper function which is used by various command functions that are
287  * exposed to clients of this driver for allocating a message traffic event.
288  *
289  * This function can sleep depending on pending requests already in the system
290  * for the SCI entity. Further, this also holds a spinlock to maintain integrity
291  * of internal data structures.
292  *
293  * Return: 0 if all went fine, else corresponding error.
294  */
295 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
296 					       u16 msg_type, u32 msg_flags,
297 					       size_t tx_message_size,
298 					       size_t rx_message_size)
299 {
300 	struct ti_sci_xfers_info *minfo = &info->minfo;
301 	struct ti_sci_xfer *xfer;
302 	struct ti_sci_msg_hdr *hdr;
303 	unsigned long flags;
304 	unsigned long bit_pos;
305 	u8 xfer_id;
306 	int ret;
307 	int timeout;
308 
309 	/* Ensure we have sane transfer sizes */
310 	if (rx_message_size > info->desc->max_msg_size ||
311 	    tx_message_size > info->desc->max_msg_size ||
312 	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
313 		return ERR_PTR(-ERANGE);
314 
315 	/*
316 	 * Ensure we have only controlled number of pending messages.
317 	 * Ideally, we might just have to wait a single message, be
318 	 * conservative and wait 5 times that..
319 	 */
320 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
321 	ret = down_timeout(&minfo->sem_xfer_count, timeout);
322 	if (ret < 0)
323 		return ERR_PTR(ret);
324 
325 	/* Keep the locked section as small as possible */
326 	spin_lock_irqsave(&minfo->xfer_lock, flags);
327 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
328 				      info->desc->max_msgs);
329 	set_bit(bit_pos, minfo->xfer_alloc_table);
330 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
331 
332 	/*
333 	 * We already ensured in probe that we can have max messages that can
334 	 * fit in  hdr.seq - NOTE: this improves access latencies
335 	 * to predictable O(1) access, BUT, it opens us to risk if
336 	 * remote misbehaves with corrupted message sequence responses.
337 	 * If that happens, we are going to be messed up anyways..
338 	 */
339 	xfer_id = (u8)bit_pos;
340 
341 	xfer = &minfo->xfer_block[xfer_id];
342 
343 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
344 	xfer->tx_message.len = tx_message_size;
345 	xfer->tx_message.chan_rx = info->chan_rx;
346 	xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
347 	xfer->rx_len = (u8)rx_message_size;
348 
349 	reinit_completion(&xfer->done);
350 
351 	hdr->seq = xfer_id;
352 	hdr->type = msg_type;
353 	hdr->host = info->host_id;
354 	hdr->flags = msg_flags;
355 
356 	return xfer;
357 }
358 
359 /**
360  * ti_sci_put_one_xfer() - Release a message
361  * @minfo:	transfer info pointer
362  * @xfer:	message that was reserved by ti_sci_get_one_xfer
363  *
364  * This holds a spinlock to maintain integrity of internal data structures.
365  */
366 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
367 				struct ti_sci_xfer *xfer)
368 {
369 	unsigned long flags;
370 	struct ti_sci_msg_hdr *hdr;
371 	u8 xfer_id;
372 
373 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
374 	xfer_id = hdr->seq;
375 
376 	/*
377 	 * Keep the locked section as small as possible
378 	 * NOTE: we might escape with smp_mb and no lock here..
379 	 * but just be conservative and symmetric.
380 	 */
381 	spin_lock_irqsave(&minfo->xfer_lock, flags);
382 	clear_bit(xfer_id, minfo->xfer_alloc_table);
383 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
384 
385 	/* Increment the count for the next user to get through */
386 	up(&minfo->sem_xfer_count);
387 }
388 
389 /**
390  * ti_sci_do_xfer() - Do one transfer
391  * @info:	Pointer to SCI entity information
392  * @xfer:	Transfer to initiate and wait for response
393  *
394  * Return: -ETIMEDOUT in case of no response, if transmit error,
395  *	   return corresponding error, else if all goes well,
396  *	   return 0.
397  */
398 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
399 				 struct ti_sci_xfer *xfer)
400 {
401 	int ret;
402 	int timeout;
403 	struct device *dev = info->dev;
404 	bool done_state = true;
405 
406 	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
407 	if (ret < 0)
408 		return ret;
409 
410 	ret = 0;
411 
412 	if (system_state <= SYSTEM_RUNNING) {
413 		/* And we wait for the response. */
414 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
415 		if (!wait_for_completion_timeout(&xfer->done, timeout))
416 			ret = -ETIMEDOUT;
417 	} else {
418 		/*
419 		 * If we are !running, we cannot use wait_for_completion_timeout
420 		 * during noirq phase, so we must manually poll the completion.
421 		 */
422 		ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
423 					       done_state, 1,
424 					       info->desc->max_rx_timeout_ms * 1000,
425 					       false, &xfer->done);
426 	}
427 
428 	if (ret == -ETIMEDOUT)
429 		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
430 			(void *)_RET_IP_);
431 
432 	/*
433 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
434 	 * transfer queueing since the protocol layer queues things by itself.
435 	 * Unfortunately, we have to kick the mailbox framework after we have
436 	 * received our message.
437 	 */
438 	mbox_client_txdone(info->chan_tx, ret);
439 
440 	return ret;
441 }
442 
443 /**
444  * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
445  * @info:	Pointer to SCI entity information
446  *
447  * Updates the SCI information in the internal data structure.
448  *
449  * Return: 0 if all went fine, else return appropriate error.
450  */
451 static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
452 {
453 	struct device *dev = info->dev;
454 	struct ti_sci_handle *handle = &info->handle;
455 	struct ti_sci_version_info *ver = &handle->version;
456 	struct ti_sci_msg_resp_version *rev_info;
457 	struct ti_sci_xfer *xfer;
458 	int ret;
459 
460 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
461 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
462 				   sizeof(struct ti_sci_msg_hdr),
463 				   sizeof(*rev_info));
464 	if (IS_ERR(xfer)) {
465 		ret = PTR_ERR(xfer);
466 		dev_err(dev, "Message alloc failed(%d)\n", ret);
467 		return ret;
468 	}
469 
470 	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
471 
472 	ret = ti_sci_do_xfer(info, xfer);
473 	if (ret) {
474 		dev_err(dev, "Mbox send fail %d\n", ret);
475 		goto fail;
476 	}
477 
478 	ver->abi_major = rev_info->abi_major;
479 	ver->abi_minor = rev_info->abi_minor;
480 	ver->firmware_revision = rev_info->firmware_revision;
481 	strscpy(ver->firmware_description, rev_info->firmware_description,
482 		sizeof(ver->firmware_description));
483 
484 fail:
485 	ti_sci_put_one_xfer(&info->minfo, xfer);
486 	return ret;
487 }
488 
489 /**
490  * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
491  * @r:	pointer to response buffer
492  *
493  * Return: true if the response was an ACK, else returns false.
494  */
495 static inline bool ti_sci_is_response_ack(void *r)
496 {
497 	struct ti_sci_msg_hdr *hdr = r;
498 
499 	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
500 }
501 
502 /**
503  * ti_sci_set_device_state() - Set device state helper
504  * @handle:	pointer to TI SCI handle
505  * @id:		Device identifier
506  * @flags:	flags to setup for the device
507  * @state:	State to move the device to
508  *
509  * Return: 0 if all went well, else returns appropriate error value.
510  */
511 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
512 				   u32 id, u32 flags, u8 state)
513 {
514 	struct ti_sci_info *info;
515 	struct ti_sci_msg_req_set_device_state *req;
516 	struct ti_sci_msg_hdr *resp;
517 	struct ti_sci_xfer *xfer;
518 	struct device *dev;
519 	int ret = 0;
520 
521 	if (IS_ERR(handle))
522 		return PTR_ERR(handle);
523 	if (!handle)
524 		return -EINVAL;
525 
526 	info = handle_to_ti_sci_info(handle);
527 	dev = info->dev;
528 
529 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
530 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
531 				   sizeof(*req), sizeof(*resp));
532 	if (IS_ERR(xfer)) {
533 		ret = PTR_ERR(xfer);
534 		dev_err(dev, "Message alloc failed(%d)\n", ret);
535 		return ret;
536 	}
537 	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
538 	req->id = id;
539 	req->state = state;
540 
541 	ret = ti_sci_do_xfer(info, xfer);
542 	if (ret) {
543 		dev_err(dev, "Mbox send fail %d\n", ret);
544 		goto fail;
545 	}
546 
547 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
548 
549 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
550 
551 fail:
552 	ti_sci_put_one_xfer(&info->minfo, xfer);
553 
554 	return ret;
555 }
556 
557 /**
558  * ti_sci_get_device_state() - Get device state helper
559  * @handle:	Handle to the device
560  * @id:		Device Identifier
561  * @clcnt:	Pointer to Context Loss Count
562  * @resets:	pointer to resets
563  * @p_state:	pointer to p_state
564  * @c_state:	pointer to c_state
565  *
566  * Return: 0 if all went fine, else return appropriate error.
567  */
568 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
569 				   u32 id,  u32 *clcnt,  u32 *resets,
570 				    u8 *p_state,  u8 *c_state)
571 {
572 	struct ti_sci_info *info;
573 	struct ti_sci_msg_req_get_device_state *req;
574 	struct ti_sci_msg_resp_get_device_state *resp;
575 	struct ti_sci_xfer *xfer;
576 	struct device *dev;
577 	int ret = 0;
578 
579 	if (IS_ERR(handle))
580 		return PTR_ERR(handle);
581 	if (!handle)
582 		return -EINVAL;
583 
584 	if (!clcnt && !resets && !p_state && !c_state)
585 		return -EINVAL;
586 
587 	info = handle_to_ti_sci_info(handle);
588 	dev = info->dev;
589 
590 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
591 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
592 				   sizeof(*req), sizeof(*resp));
593 	if (IS_ERR(xfer)) {
594 		ret = PTR_ERR(xfer);
595 		dev_err(dev, "Message alloc failed(%d)\n", ret);
596 		return ret;
597 	}
598 	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
599 	req->id = id;
600 
601 	ret = ti_sci_do_xfer(info, xfer);
602 	if (ret) {
603 		dev_err(dev, "Mbox send fail %d\n", ret);
604 		goto fail;
605 	}
606 
607 	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
608 	if (!ti_sci_is_response_ack(resp)) {
609 		ret = -ENODEV;
610 		goto fail;
611 	}
612 
613 	if (clcnt)
614 		*clcnt = resp->context_loss_count;
615 	if (resets)
616 		*resets = resp->resets;
617 	if (p_state)
618 		*p_state = resp->programmed_state;
619 	if (c_state)
620 		*c_state = resp->current_state;
621 fail:
622 	ti_sci_put_one_xfer(&info->minfo, xfer);
623 
624 	return ret;
625 }
626 
627 /**
628  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
629  *			     that can be shared with other hosts.
630  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
631  * @id:		Device Identifier
632  *
633  * Request for the device - NOTE: the client MUST maintain integrity of
634  * usage count by balancing get_device with put_device. No refcounting is
635  * managed by driver for that purpose.
636  *
637  * Return: 0 if all went fine, else return appropriate error.
638  */
639 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
640 {
641 	return ti_sci_set_device_state(handle, id, 0,
642 				       MSG_DEVICE_SW_STATE_ON);
643 }
644 
645 /**
646  * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
647  *				       TISCI that is exclusively owned by the
648  *				       requesting host.
649  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
650  * @id:		Device Identifier
651  *
652  * Request for the device - NOTE: the client MUST maintain integrity of
653  * usage count by balancing get_device with put_device. No refcounting is
654  * managed by driver for that purpose.
655  *
656  * Return: 0 if all went fine, else return appropriate error.
657  */
658 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
659 					   u32 id)
660 {
661 	return ti_sci_set_device_state(handle, id,
662 				       MSG_FLAG_DEVICE_EXCLUSIVE,
663 				       MSG_DEVICE_SW_STATE_ON);
664 }
665 
666 /**
667  * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
668  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
669  * @id:		Device Identifier
670  *
671  * Request for the device - NOTE: the client MUST maintain integrity of
672  * usage count by balancing get_device with put_device. No refcounting is
673  * managed by driver for that purpose.
674  *
675  * Return: 0 if all went fine, else return appropriate error.
676  */
677 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
678 {
679 	return ti_sci_set_device_state(handle, id, 0,
680 				       MSG_DEVICE_SW_STATE_RETENTION);
681 }
682 
683 /**
684  * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
685  *					TISCI that is exclusively owned by
686  *					requesting host.
687  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
688  * @id:		Device Identifier
689  *
690  * Request for the device - NOTE: the client MUST maintain integrity of
691  * usage count by balancing get_device with put_device. No refcounting is
692  * managed by driver for that purpose.
693  *
694  * Return: 0 if all went fine, else return appropriate error.
695  */
696 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
697 					    u32 id)
698 {
699 	return ti_sci_set_device_state(handle, id,
700 				       MSG_FLAG_DEVICE_EXCLUSIVE,
701 				       MSG_DEVICE_SW_STATE_RETENTION);
702 }
703 
704 /**
705  * ti_sci_cmd_put_device() - command to release a device managed by TISCI
706  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
707  * @id:		Device Identifier
708  *
709  * Request for the device - NOTE: the client MUST maintain integrity of
710  * usage count by balancing get_device with put_device. No refcounting is
711  * managed by driver for that purpose.
712  *
713  * Return: 0 if all went fine, else return appropriate error.
714  */
715 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
716 {
717 	return ti_sci_set_device_state(handle, id,
718 				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
719 }
720 
721 /**
722  * ti_sci_cmd_dev_is_valid() - Is the device valid
723  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
724  * @id:		Device Identifier
725  *
726  * Return: 0 if all went fine and the device ID is valid, else return
727  * appropriate error.
728  */
729 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
730 {
731 	u8 unused;
732 
733 	/* check the device state which will also tell us if the ID is valid */
734 	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
735 }
736 
737 /**
738  * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
739  * @handle:	Pointer to TISCI handle
740  * @id:		Device Identifier
741  * @count:	Pointer to Context Loss counter to populate
742  *
743  * Return: 0 if all went fine, else return appropriate error.
744  */
745 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
746 				    u32 *count)
747 {
748 	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
749 }
750 
751 /**
752  * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
753  * @handle:	Pointer to TISCI handle
754  * @id:		Device Identifier
755  * @r_state:	true if requested to be idle
756  *
757  * Return: 0 if all went fine, else return appropriate error.
758  */
759 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
760 				  bool *r_state)
761 {
762 	int ret;
763 	u8 state;
764 
765 	if (!r_state)
766 		return -EINVAL;
767 
768 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
769 	if (ret)
770 		return ret;
771 
772 	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
773 
774 	return 0;
775 }
776 
777 /**
778  * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
779  * @handle:	Pointer to TISCI handle
780  * @id:		Device Identifier
781  * @r_state:	true if requested to be stopped
782  * @curr_state:	true if currently stopped.
783  *
784  * Return: 0 if all went fine, else return appropriate error.
785  */
786 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
787 				  bool *r_state,  bool *curr_state)
788 {
789 	int ret;
790 	u8 p_state, c_state;
791 
792 	if (!r_state && !curr_state)
793 		return -EINVAL;
794 
795 	ret =
796 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
797 	if (ret)
798 		return ret;
799 
800 	if (r_state)
801 		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
802 	if (curr_state)
803 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
804 
805 	return 0;
806 }
807 
808 /**
809  * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
810  * @handle:	Pointer to TISCI handle
811  * @id:		Device Identifier
812  * @r_state:	true if requested to be ON
813  * @curr_state:	true if currently ON and active
814  *
815  * Return: 0 if all went fine, else return appropriate error.
816  */
817 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
818 				bool *r_state,  bool *curr_state)
819 {
820 	int ret;
821 	u8 p_state, c_state;
822 
823 	if (!r_state && !curr_state)
824 		return -EINVAL;
825 
826 	ret =
827 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
828 	if (ret)
829 		return ret;
830 
831 	if (r_state)
832 		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
833 	if (curr_state)
834 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
835 
836 	return 0;
837 }
838 
839 /**
840  * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
841  * @handle:	Pointer to TISCI handle
842  * @id:		Device Identifier
843  * @curr_state:	true if currently transitioning.
844  *
845  * Return: 0 if all went fine, else return appropriate error.
846  */
847 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
848 				   bool *curr_state)
849 {
850 	int ret;
851 	u8 state;
852 
853 	if (!curr_state)
854 		return -EINVAL;
855 
856 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
857 	if (ret)
858 		return ret;
859 
860 	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
861 
862 	return 0;
863 }
864 
865 /**
866  * ti_sci_cmd_set_device_resets() - command to set resets for device managed
867  *				    by TISCI
868  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
869  * @id:		Device Identifier
870  * @reset_state: Device specific reset bit field
871  *
872  * Return: 0 if all went fine, else return appropriate error.
873  */
874 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
875 					u32 id, u32 reset_state)
876 {
877 	struct ti_sci_info *info;
878 	struct ti_sci_msg_req_set_device_resets *req;
879 	struct ti_sci_msg_hdr *resp;
880 	struct ti_sci_xfer *xfer;
881 	struct device *dev;
882 	int ret = 0;
883 
884 	if (IS_ERR(handle))
885 		return PTR_ERR(handle);
886 	if (!handle)
887 		return -EINVAL;
888 
889 	info = handle_to_ti_sci_info(handle);
890 	dev = info->dev;
891 
892 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
893 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
894 				   sizeof(*req), sizeof(*resp));
895 	if (IS_ERR(xfer)) {
896 		ret = PTR_ERR(xfer);
897 		dev_err(dev, "Message alloc failed(%d)\n", ret);
898 		return ret;
899 	}
900 	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
901 	req->id = id;
902 	req->resets = reset_state;
903 
904 	ret = ti_sci_do_xfer(info, xfer);
905 	if (ret) {
906 		dev_err(dev, "Mbox send fail %d\n", ret);
907 		goto fail;
908 	}
909 
910 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
911 
912 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
913 
914 fail:
915 	ti_sci_put_one_xfer(&info->minfo, xfer);
916 
917 	return ret;
918 }
919 
920 /**
921  * ti_sci_cmd_get_device_resets() - Get reset state for device managed
922  *				    by TISCI
923  * @handle:		Pointer to TISCI handle
924  * @id:			Device Identifier
925  * @reset_state:	Pointer to reset state to populate
926  *
927  * Return: 0 if all went fine, else return appropriate error.
928  */
929 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
930 					u32 id, u32 *reset_state)
931 {
932 	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
933 				       NULL);
934 }
935 
936 /**
937  * ti_sci_set_clock_state() - Set clock state helper
938  * @handle:	pointer to TI SCI handle
939  * @dev_id:	Device identifier this request is for
940  * @clk_id:	Clock identifier for the device for this request.
941  *		Each device has it's own set of clock inputs. This indexes
942  *		which clock input to modify.
943  * @flags:	Header flags as needed
944  * @state:	State to request for the clock.
945  *
946  * Return: 0 if all went well, else returns appropriate error value.
947  */
948 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
949 				  u32 dev_id, u32 clk_id,
950 				  u32 flags, u8 state)
951 {
952 	struct ti_sci_info *info;
953 	struct ti_sci_msg_req_set_clock_state *req;
954 	struct ti_sci_msg_hdr *resp;
955 	struct ti_sci_xfer *xfer;
956 	struct device *dev;
957 	int ret = 0;
958 
959 	if (IS_ERR(handle))
960 		return PTR_ERR(handle);
961 	if (!handle)
962 		return -EINVAL;
963 
964 	info = handle_to_ti_sci_info(handle);
965 	dev = info->dev;
966 
967 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
968 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
969 				   sizeof(*req), sizeof(*resp));
970 	if (IS_ERR(xfer)) {
971 		ret = PTR_ERR(xfer);
972 		dev_err(dev, "Message alloc failed(%d)\n", ret);
973 		return ret;
974 	}
975 	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
976 	req->dev_id = dev_id;
977 	if (clk_id < 255) {
978 		req->clk_id = clk_id;
979 	} else {
980 		req->clk_id = 255;
981 		req->clk_id_32 = clk_id;
982 	}
983 	req->request_state = state;
984 
985 	ret = ti_sci_do_xfer(info, xfer);
986 	if (ret) {
987 		dev_err(dev, "Mbox send fail %d\n", ret);
988 		goto fail;
989 	}
990 
991 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
992 
993 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
994 
995 fail:
996 	ti_sci_put_one_xfer(&info->minfo, xfer);
997 
998 	return ret;
999 }
1000 
1001 /**
1002  * ti_sci_cmd_get_clock_state() - Get clock state helper
1003  * @handle:	pointer to TI SCI handle
1004  * @dev_id:	Device identifier this request is for
1005  * @clk_id:	Clock identifier for the device for this request.
1006  *		Each device has it's own set of clock inputs. This indexes
1007  *		which clock input to modify.
1008  * @programmed_state:	State requested for clock to move to
1009  * @current_state:	State that the clock is currently in
1010  *
1011  * Return: 0 if all went well, else returns appropriate error value.
1012  */
1013 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1014 				      u32 dev_id, u32 clk_id,
1015 				      u8 *programmed_state, u8 *current_state)
1016 {
1017 	struct ti_sci_info *info;
1018 	struct ti_sci_msg_req_get_clock_state *req;
1019 	struct ti_sci_msg_resp_get_clock_state *resp;
1020 	struct ti_sci_xfer *xfer;
1021 	struct device *dev;
1022 	int ret = 0;
1023 
1024 	if (IS_ERR(handle))
1025 		return PTR_ERR(handle);
1026 	if (!handle)
1027 		return -EINVAL;
1028 
1029 	if (!programmed_state && !current_state)
1030 		return -EINVAL;
1031 
1032 	info = handle_to_ti_sci_info(handle);
1033 	dev = info->dev;
1034 
1035 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1036 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1037 				   sizeof(*req), sizeof(*resp));
1038 	if (IS_ERR(xfer)) {
1039 		ret = PTR_ERR(xfer);
1040 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1041 		return ret;
1042 	}
1043 	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1044 	req->dev_id = dev_id;
1045 	if (clk_id < 255) {
1046 		req->clk_id = clk_id;
1047 	} else {
1048 		req->clk_id = 255;
1049 		req->clk_id_32 = clk_id;
1050 	}
1051 
1052 	ret = ti_sci_do_xfer(info, xfer);
1053 	if (ret) {
1054 		dev_err(dev, "Mbox send fail %d\n", ret);
1055 		goto fail;
1056 	}
1057 
1058 	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1059 
1060 	if (!ti_sci_is_response_ack(resp)) {
1061 		ret = -ENODEV;
1062 		goto fail;
1063 	}
1064 
1065 	if (programmed_state)
1066 		*programmed_state = resp->programmed_state;
1067 	if (current_state)
1068 		*current_state = resp->current_state;
1069 
1070 fail:
1071 	ti_sci_put_one_xfer(&info->minfo, xfer);
1072 
1073 	return ret;
1074 }
1075 
1076 /**
1077  * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1078  * @handle:	pointer to TI SCI handle
1079  * @dev_id:	Device identifier this request is for
1080  * @clk_id:	Clock identifier for the device for this request.
1081  *		Each device has it's own set of clock inputs. This indexes
1082  *		which clock input to modify.
1083  * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1084  * @can_change_freq: 'true' if frequency change is desired, else 'false'
1085  * @enable_input_term: 'true' if input termination is desired, else 'false'
1086  *
1087  * Return: 0 if all went well, else returns appropriate error value.
1088  */
1089 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1090 				u32 clk_id, bool needs_ssc,
1091 				bool can_change_freq, bool enable_input_term)
1092 {
1093 	u32 flags = 0;
1094 
1095 	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1096 	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1097 	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1098 
1099 	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1100 				      MSG_CLOCK_SW_STATE_REQ);
1101 }
1102 
1103 /**
1104  * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1105  * @handle:	pointer to TI SCI handle
1106  * @dev_id:	Device identifier this request is for
1107  * @clk_id:	Clock identifier for the device for this request.
1108  *		Each device has it's own set of clock inputs. This indexes
1109  *		which clock input to modify.
1110  *
1111  * NOTE: This clock must have been requested by get_clock previously.
1112  *
1113  * Return: 0 if all went well, else returns appropriate error value.
1114  */
1115 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1116 				 u32 dev_id, u32 clk_id)
1117 {
1118 	return ti_sci_set_clock_state(handle, dev_id, clk_id,
1119 				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1120 				      MSG_CLOCK_SW_STATE_UNREQ);
1121 }
1122 
1123 /**
1124  * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1125  * @handle:	pointer to TI SCI handle
1126  * @dev_id:	Device identifier this request is for
1127  * @clk_id:	Clock identifier for the device for this request.
1128  *		Each device has it's own set of clock inputs. This indexes
1129  *		which clock input to modify.
1130  *
1131  * NOTE: This clock must have been requested by get_clock previously.
1132  *
1133  * Return: 0 if all went well, else returns appropriate error value.
1134  */
1135 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1136 				u32 dev_id, u32 clk_id)
1137 {
1138 	return ti_sci_set_clock_state(handle, dev_id, clk_id,
1139 				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1140 				      MSG_CLOCK_SW_STATE_AUTO);
1141 }
1142 
1143 /**
1144  * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1145  * @handle:	pointer to TI SCI handle
1146  * @dev_id:	Device identifier this request is for
1147  * @clk_id:	Clock identifier for the device for this request.
1148  *		Each device has it's own set of clock inputs. This indexes
1149  *		which clock input to modify.
1150  * @req_state: state indicating if the clock is auto managed
1151  *
1152  * Return: 0 if all went well, else returns appropriate error value.
1153  */
1154 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1155 				  u32 dev_id, u32 clk_id, bool *req_state)
1156 {
1157 	u8 state = 0;
1158 	int ret;
1159 
1160 	if (!req_state)
1161 		return -EINVAL;
1162 
1163 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1164 	if (ret)
1165 		return ret;
1166 
1167 	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1168 	return 0;
1169 }
1170 
1171 /**
1172  * ti_sci_cmd_clk_is_on() - Is the clock ON
1173  * @handle:	pointer to TI SCI handle
1174  * @dev_id:	Device identifier this request is for
1175  * @clk_id:	Clock identifier for the device for this request.
1176  *		Each device has it's own set of clock inputs. This indexes
1177  *		which clock input to modify.
1178  * @req_state: state indicating if the clock is managed by us and enabled
1179  * @curr_state: state indicating if the clock is ready for operation
1180  *
1181  * Return: 0 if all went well, else returns appropriate error value.
1182  */
1183 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1184 				u32 clk_id, bool *req_state, bool *curr_state)
1185 {
1186 	u8 c_state = 0, r_state = 0;
1187 	int ret;
1188 
1189 	if (!req_state && !curr_state)
1190 		return -EINVAL;
1191 
1192 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1193 					 &r_state, &c_state);
1194 	if (ret)
1195 		return ret;
1196 
1197 	if (req_state)
1198 		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1199 	if (curr_state)
1200 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1201 	return 0;
1202 }
1203 
1204 /**
1205  * ti_sci_cmd_clk_is_off() - Is the clock OFF
1206  * @handle:	pointer to TI SCI handle
1207  * @dev_id:	Device identifier this request is for
1208  * @clk_id:	Clock identifier for the device for this request.
1209  *		Each device has it's own set of clock inputs. This indexes
1210  *		which clock input to modify.
1211  * @req_state: state indicating if the clock is managed by us and disabled
1212  * @curr_state: state indicating if the clock is NOT ready for operation
1213  *
1214  * Return: 0 if all went well, else returns appropriate error value.
1215  */
1216 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1217 				 u32 clk_id, bool *req_state, bool *curr_state)
1218 {
1219 	u8 c_state = 0, r_state = 0;
1220 	int ret;
1221 
1222 	if (!req_state && !curr_state)
1223 		return -EINVAL;
1224 
1225 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1226 					 &r_state, &c_state);
1227 	if (ret)
1228 		return ret;
1229 
1230 	if (req_state)
1231 		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1232 	if (curr_state)
1233 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1234 	return 0;
1235 }
1236 
1237 /**
1238  * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1239  * @handle:	pointer to TI SCI handle
1240  * @dev_id:	Device identifier this request is for
1241  * @clk_id:	Clock identifier for the device for this request.
1242  *		Each device has it's own set of clock inputs. This indexes
1243  *		which clock input to modify.
1244  * @parent_id:	Parent clock identifier to set
1245  *
1246  * Return: 0 if all went well, else returns appropriate error value.
1247  */
1248 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1249 				     u32 dev_id, u32 clk_id, u32 parent_id)
1250 {
1251 	struct ti_sci_info *info;
1252 	struct ti_sci_msg_req_set_clock_parent *req;
1253 	struct ti_sci_msg_hdr *resp;
1254 	struct ti_sci_xfer *xfer;
1255 	struct device *dev;
1256 	int ret = 0;
1257 
1258 	if (IS_ERR(handle))
1259 		return PTR_ERR(handle);
1260 	if (!handle)
1261 		return -EINVAL;
1262 
1263 	info = handle_to_ti_sci_info(handle);
1264 	dev = info->dev;
1265 
1266 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1267 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1268 				   sizeof(*req), sizeof(*resp));
1269 	if (IS_ERR(xfer)) {
1270 		ret = PTR_ERR(xfer);
1271 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1272 		return ret;
1273 	}
1274 	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1275 	req->dev_id = dev_id;
1276 	if (clk_id < 255) {
1277 		req->clk_id = clk_id;
1278 	} else {
1279 		req->clk_id = 255;
1280 		req->clk_id_32 = clk_id;
1281 	}
1282 	if (parent_id < 255) {
1283 		req->parent_id = parent_id;
1284 	} else {
1285 		req->parent_id = 255;
1286 		req->parent_id_32 = parent_id;
1287 	}
1288 
1289 	ret = ti_sci_do_xfer(info, xfer);
1290 	if (ret) {
1291 		dev_err(dev, "Mbox send fail %d\n", ret);
1292 		goto fail;
1293 	}
1294 
1295 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1296 
1297 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1298 
1299 fail:
1300 	ti_sci_put_one_xfer(&info->minfo, xfer);
1301 
1302 	return ret;
1303 }
1304 
1305 /**
1306  * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1307  * @handle:	pointer to TI SCI handle
1308  * @dev_id:	Device identifier this request is for
1309  * @clk_id:	Clock identifier for the device for this request.
1310  *		Each device has it's own set of clock inputs. This indexes
1311  *		which clock input to modify.
1312  * @parent_id:	Current clock parent
1313  *
1314  * Return: 0 if all went well, else returns appropriate error value.
1315  */
1316 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1317 				     u32 dev_id, u32 clk_id, u32 *parent_id)
1318 {
1319 	struct ti_sci_info *info;
1320 	struct ti_sci_msg_req_get_clock_parent *req;
1321 	struct ti_sci_msg_resp_get_clock_parent *resp;
1322 	struct ti_sci_xfer *xfer;
1323 	struct device *dev;
1324 	int ret = 0;
1325 
1326 	if (IS_ERR(handle))
1327 		return PTR_ERR(handle);
1328 	if (!handle || !parent_id)
1329 		return -EINVAL;
1330 
1331 	info = handle_to_ti_sci_info(handle);
1332 	dev = info->dev;
1333 
1334 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1335 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1336 				   sizeof(*req), sizeof(*resp));
1337 	if (IS_ERR(xfer)) {
1338 		ret = PTR_ERR(xfer);
1339 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1340 		return ret;
1341 	}
1342 	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1343 	req->dev_id = dev_id;
1344 	if (clk_id < 255) {
1345 		req->clk_id = clk_id;
1346 	} else {
1347 		req->clk_id = 255;
1348 		req->clk_id_32 = clk_id;
1349 	}
1350 
1351 	ret = ti_sci_do_xfer(info, xfer);
1352 	if (ret) {
1353 		dev_err(dev, "Mbox send fail %d\n", ret);
1354 		goto fail;
1355 	}
1356 
1357 	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1358 
1359 	if (!ti_sci_is_response_ack(resp)) {
1360 		ret = -ENODEV;
1361 	} else {
1362 		if (resp->parent_id < 255)
1363 			*parent_id = resp->parent_id;
1364 		else
1365 			*parent_id = resp->parent_id_32;
1366 	}
1367 
1368 fail:
1369 	ti_sci_put_one_xfer(&info->minfo, xfer);
1370 
1371 	return ret;
1372 }
1373 
1374 /**
1375  * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1376  * @handle:	pointer to TI SCI handle
1377  * @dev_id:	Device identifier this request is for
1378  * @clk_id:	Clock identifier for the device for this request.
1379  *		Each device has it's own set of clock inputs. This indexes
1380  *		which clock input to modify.
1381  * @num_parents: Returns he number of parents to the current clock.
1382  *
1383  * Return: 0 if all went well, else returns appropriate error value.
1384  */
1385 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1386 					  u32 dev_id, u32 clk_id,
1387 					  u32 *num_parents)
1388 {
1389 	struct ti_sci_info *info;
1390 	struct ti_sci_msg_req_get_clock_num_parents *req;
1391 	struct ti_sci_msg_resp_get_clock_num_parents *resp;
1392 	struct ti_sci_xfer *xfer;
1393 	struct device *dev;
1394 	int ret = 0;
1395 
1396 	if (IS_ERR(handle))
1397 		return PTR_ERR(handle);
1398 	if (!handle || !num_parents)
1399 		return -EINVAL;
1400 
1401 	info = handle_to_ti_sci_info(handle);
1402 	dev = info->dev;
1403 
1404 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1405 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1406 				   sizeof(*req), sizeof(*resp));
1407 	if (IS_ERR(xfer)) {
1408 		ret = PTR_ERR(xfer);
1409 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1410 		return ret;
1411 	}
1412 	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1413 	req->dev_id = dev_id;
1414 	if (clk_id < 255) {
1415 		req->clk_id = clk_id;
1416 	} else {
1417 		req->clk_id = 255;
1418 		req->clk_id_32 = clk_id;
1419 	}
1420 
1421 	ret = ti_sci_do_xfer(info, xfer);
1422 	if (ret) {
1423 		dev_err(dev, "Mbox send fail %d\n", ret);
1424 		goto fail;
1425 	}
1426 
1427 	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1428 
1429 	if (!ti_sci_is_response_ack(resp)) {
1430 		ret = -ENODEV;
1431 	} else {
1432 		if (resp->num_parents < 255)
1433 			*num_parents = resp->num_parents;
1434 		else
1435 			*num_parents = resp->num_parents_32;
1436 	}
1437 
1438 fail:
1439 	ti_sci_put_one_xfer(&info->minfo, xfer);
1440 
1441 	return ret;
1442 }
1443 
1444 /**
1445  * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1446  * @handle:	pointer to TI SCI handle
1447  * @dev_id:	Device identifier this request is for
1448  * @clk_id:	Clock identifier for the device for this request.
1449  *		Each device has it's own set of clock inputs. This indexes
1450  *		which clock input to modify.
1451  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1452  *		allowable programmed frequency and does not account for clock
1453  *		tolerances and jitter.
1454  * @target_freq: The target clock frequency in Hz. A frequency will be
1455  *		processed as close to this target frequency as possible.
1456  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1457  *		allowable programmed frequency and does not account for clock
1458  *		tolerances and jitter.
1459  * @match_freq:	Frequency match in Hz response.
1460  *
1461  * Return: 0 if all went well, else returns appropriate error value.
1462  */
1463 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1464 					 u32 dev_id, u32 clk_id, u64 min_freq,
1465 					 u64 target_freq, u64 max_freq,
1466 					 u64 *match_freq)
1467 {
1468 	struct ti_sci_info *info;
1469 	struct ti_sci_msg_req_query_clock_freq *req;
1470 	struct ti_sci_msg_resp_query_clock_freq *resp;
1471 	struct ti_sci_xfer *xfer;
1472 	struct device *dev;
1473 	int ret = 0;
1474 
1475 	if (IS_ERR(handle))
1476 		return PTR_ERR(handle);
1477 	if (!handle || !match_freq)
1478 		return -EINVAL;
1479 
1480 	info = handle_to_ti_sci_info(handle);
1481 	dev = info->dev;
1482 
1483 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1484 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1485 				   sizeof(*req), sizeof(*resp));
1486 	if (IS_ERR(xfer)) {
1487 		ret = PTR_ERR(xfer);
1488 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1489 		return ret;
1490 	}
1491 	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1492 	req->dev_id = dev_id;
1493 	if (clk_id < 255) {
1494 		req->clk_id = clk_id;
1495 	} else {
1496 		req->clk_id = 255;
1497 		req->clk_id_32 = clk_id;
1498 	}
1499 	req->min_freq_hz = min_freq;
1500 	req->target_freq_hz = target_freq;
1501 	req->max_freq_hz = max_freq;
1502 
1503 	ret = ti_sci_do_xfer(info, xfer);
1504 	if (ret) {
1505 		dev_err(dev, "Mbox send fail %d\n", ret);
1506 		goto fail;
1507 	}
1508 
1509 	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1510 
1511 	if (!ti_sci_is_response_ack(resp))
1512 		ret = -ENODEV;
1513 	else
1514 		*match_freq = resp->freq_hz;
1515 
1516 fail:
1517 	ti_sci_put_one_xfer(&info->minfo, xfer);
1518 
1519 	return ret;
1520 }
1521 
1522 /**
1523  * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1524  * @handle:	pointer to TI SCI handle
1525  * @dev_id:	Device identifier this request is for
1526  * @clk_id:	Clock identifier for the device for this request.
1527  *		Each device has it's own set of clock inputs. This indexes
1528  *		which clock input to modify.
1529  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1530  *		allowable programmed frequency and does not account for clock
1531  *		tolerances and jitter.
1532  * @target_freq: The target clock frequency in Hz. A frequency will be
1533  *		processed as close to this target frequency as possible.
1534  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1535  *		allowable programmed frequency and does not account for clock
1536  *		tolerances and jitter.
1537  *
1538  * Return: 0 if all went well, else returns appropriate error value.
1539  */
1540 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1541 				   u32 dev_id, u32 clk_id, u64 min_freq,
1542 				   u64 target_freq, u64 max_freq)
1543 {
1544 	struct ti_sci_info *info;
1545 	struct ti_sci_msg_req_set_clock_freq *req;
1546 	struct ti_sci_msg_hdr *resp;
1547 	struct ti_sci_xfer *xfer;
1548 	struct device *dev;
1549 	int ret = 0;
1550 
1551 	if (IS_ERR(handle))
1552 		return PTR_ERR(handle);
1553 	if (!handle)
1554 		return -EINVAL;
1555 
1556 	info = handle_to_ti_sci_info(handle);
1557 	dev = info->dev;
1558 
1559 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1560 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1561 				   sizeof(*req), sizeof(*resp));
1562 	if (IS_ERR(xfer)) {
1563 		ret = PTR_ERR(xfer);
1564 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1565 		return ret;
1566 	}
1567 	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1568 	req->dev_id = dev_id;
1569 	if (clk_id < 255) {
1570 		req->clk_id = clk_id;
1571 	} else {
1572 		req->clk_id = 255;
1573 		req->clk_id_32 = clk_id;
1574 	}
1575 	req->min_freq_hz = min_freq;
1576 	req->target_freq_hz = target_freq;
1577 	req->max_freq_hz = max_freq;
1578 
1579 	ret = ti_sci_do_xfer(info, xfer);
1580 	if (ret) {
1581 		dev_err(dev, "Mbox send fail %d\n", ret);
1582 		goto fail;
1583 	}
1584 
1585 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1586 
1587 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1588 
1589 fail:
1590 	ti_sci_put_one_xfer(&info->minfo, xfer);
1591 
1592 	return ret;
1593 }
1594 
1595 /**
1596  * ti_sci_cmd_clk_get_freq() - Get current frequency
1597  * @handle:	pointer to TI SCI handle
1598  * @dev_id:	Device identifier this request is for
1599  * @clk_id:	Clock identifier for the device for this request.
1600  *		Each device has it's own set of clock inputs. This indexes
1601  *		which clock input to modify.
1602  * @freq:	Currently frequency in Hz
1603  *
1604  * Return: 0 if all went well, else returns appropriate error value.
1605  */
1606 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1607 				   u32 dev_id, u32 clk_id, u64 *freq)
1608 {
1609 	struct ti_sci_info *info;
1610 	struct ti_sci_msg_req_get_clock_freq *req;
1611 	struct ti_sci_msg_resp_get_clock_freq *resp;
1612 	struct ti_sci_xfer *xfer;
1613 	struct device *dev;
1614 	int ret = 0;
1615 
1616 	if (IS_ERR(handle))
1617 		return PTR_ERR(handle);
1618 	if (!handle || !freq)
1619 		return -EINVAL;
1620 
1621 	info = handle_to_ti_sci_info(handle);
1622 	dev = info->dev;
1623 
1624 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1625 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1626 				   sizeof(*req), sizeof(*resp));
1627 	if (IS_ERR(xfer)) {
1628 		ret = PTR_ERR(xfer);
1629 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1630 		return ret;
1631 	}
1632 	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1633 	req->dev_id = dev_id;
1634 	if (clk_id < 255) {
1635 		req->clk_id = clk_id;
1636 	} else {
1637 		req->clk_id = 255;
1638 		req->clk_id_32 = clk_id;
1639 	}
1640 
1641 	ret = ti_sci_do_xfer(info, xfer);
1642 	if (ret) {
1643 		dev_err(dev, "Mbox send fail %d\n", ret);
1644 		goto fail;
1645 	}
1646 
1647 	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1648 
1649 	if (!ti_sci_is_response_ack(resp))
1650 		ret = -ENODEV;
1651 	else
1652 		*freq = resp->freq_hz;
1653 
1654 fail:
1655 	ti_sci_put_one_xfer(&info->minfo, xfer);
1656 
1657 	return ret;
1658 }
1659 
1660 /**
1661  * ti_sci_cmd_prepare_sleep() - Prepare system for system suspend
1662  * @handle:		pointer to TI SCI handle
1663  * @mode:		Device identifier
1664  * @ctx_lo:		Low part of address for context save
1665  * @ctx_hi:		High part of address for context save
1666  * @debug_flags:	Debug flags to pass to firmware
1667  *
1668  * Return: 0 if all went well, else returns appropriate error value.
1669  */
1670 static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
1671 				    u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
1672 {
1673 	struct ti_sci_info *info;
1674 	struct ti_sci_msg_req_prepare_sleep *req;
1675 	struct ti_sci_msg_hdr *resp;
1676 	struct ti_sci_xfer *xfer;
1677 	struct device *dev;
1678 	int ret = 0;
1679 
1680 	if (IS_ERR(handle))
1681 		return PTR_ERR(handle);
1682 	if (!handle)
1683 		return -EINVAL;
1684 
1685 	info = handle_to_ti_sci_info(handle);
1686 	dev = info->dev;
1687 
1688 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
1689 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1690 				   sizeof(*req), sizeof(*resp));
1691 	if (IS_ERR(xfer)) {
1692 		ret = PTR_ERR(xfer);
1693 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1694 		return ret;
1695 	}
1696 
1697 	req = (struct ti_sci_msg_req_prepare_sleep *)xfer->xfer_buf;
1698 	req->mode = mode;
1699 	req->ctx_lo = ctx_lo;
1700 	req->ctx_hi = ctx_hi;
1701 	req->debug_flags = debug_flags;
1702 
1703 	ret = ti_sci_do_xfer(info, xfer);
1704 	if (ret) {
1705 		dev_err(dev, "Mbox send fail %d\n", ret);
1706 		goto fail;
1707 	}
1708 
1709 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1710 
1711 	if (!ti_sci_is_response_ack(resp)) {
1712 		dev_err(dev, "Failed to prepare sleep\n");
1713 		ret = -ENODEV;
1714 	}
1715 
1716 fail:
1717 	ti_sci_put_one_xfer(&info->minfo, xfer);
1718 
1719 	return ret;
1720 }
1721 
1722 /**
1723  * ti_sci_msg_cmd_query_fw_caps() - Get the FW/SoC capabilities
1724  * @handle:		Pointer to TI SCI handle
1725  * @fw_caps:		Each bit in fw_caps indicating one FW/SOC capability
1726  *
1727  * Check if the firmware supports any optional low power modes.
1728  * Old revisions of TIFS (< 08.04) will NACK the request which results in
1729  * -ENODEV being returned.
1730  *
1731  * Return: 0 if all went well, else returns appropriate error value.
1732  */
1733 static int ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle *handle,
1734 					u64 *fw_caps)
1735 {
1736 	struct ti_sci_info *info;
1737 	struct ti_sci_xfer *xfer;
1738 	struct ti_sci_msg_resp_query_fw_caps *resp;
1739 	struct device *dev;
1740 	int ret = 0;
1741 
1742 	if (IS_ERR(handle))
1743 		return PTR_ERR(handle);
1744 	if (!handle)
1745 		return -EINVAL;
1746 
1747 	info = handle_to_ti_sci_info(handle);
1748 	dev = info->dev;
1749 
1750 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS,
1751 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1752 				   sizeof(struct ti_sci_msg_hdr),
1753 				   sizeof(*resp));
1754 	if (IS_ERR(xfer)) {
1755 		ret = PTR_ERR(xfer);
1756 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1757 		return ret;
1758 	}
1759 
1760 	ret = ti_sci_do_xfer(info, xfer);
1761 	if (ret) {
1762 		dev_err(dev, "Mbox send fail %d\n", ret);
1763 		goto fail;
1764 	}
1765 
1766 	resp = (struct ti_sci_msg_resp_query_fw_caps *)xfer->xfer_buf;
1767 
1768 	if (!ti_sci_is_response_ack(resp)) {
1769 		dev_err(dev, "Failed to get capabilities\n");
1770 		ret = -ENODEV;
1771 		goto fail;
1772 	}
1773 
1774 	if (fw_caps)
1775 		*fw_caps = resp->fw_caps;
1776 
1777 fail:
1778 	ti_sci_put_one_xfer(&info->minfo, xfer);
1779 
1780 	return ret;
1781 }
1782 
1783 /**
1784  * ti_sci_cmd_set_io_isolation() - Enable IO isolation in LPM
1785  * @handle:		Pointer to TI SCI handle
1786  * @state:		The desired state of the IO isolation
1787  *
1788  * Return: 0 if all went well, else returns appropriate error value.
1789  */
1790 static int ti_sci_cmd_set_io_isolation(const struct ti_sci_handle *handle,
1791 				       u8 state)
1792 {
1793 	struct ti_sci_info *info;
1794 	struct ti_sci_msg_req_set_io_isolation *req;
1795 	struct ti_sci_msg_hdr *resp;
1796 	struct ti_sci_xfer *xfer;
1797 	struct device *dev;
1798 	int ret = 0;
1799 
1800 	if (IS_ERR(handle))
1801 		return PTR_ERR(handle);
1802 	if (!handle)
1803 		return -EINVAL;
1804 
1805 	info = handle_to_ti_sci_info(handle);
1806 	dev = info->dev;
1807 
1808 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_IO_ISOLATION,
1809 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1810 				   sizeof(*req), sizeof(*resp));
1811 	if (IS_ERR(xfer)) {
1812 		ret = PTR_ERR(xfer);
1813 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1814 		return ret;
1815 	}
1816 	req = (struct ti_sci_msg_req_set_io_isolation *)xfer->xfer_buf;
1817 	req->state = state;
1818 
1819 	ret = ti_sci_do_xfer(info, xfer);
1820 	if (ret) {
1821 		dev_err(dev, "Mbox send fail %d\n", ret);
1822 		goto fail;
1823 	}
1824 
1825 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1826 
1827 	if (!ti_sci_is_response_ack(resp)) {
1828 		dev_err(dev, "Failed to set IO isolation\n");
1829 		ret = -ENODEV;
1830 	}
1831 
1832 fail:
1833 	ti_sci_put_one_xfer(&info->minfo, xfer);
1834 
1835 	return ret;
1836 }
1837 
1838 /**
1839  * ti_sci_msg_cmd_lpm_wake_reason() - Get the wakeup source from LPM
1840  * @handle:		Pointer to TI SCI handle
1841  * @source:		The wakeup source that woke the SoC from LPM
1842  * @timestamp:		Timestamp of the wakeup event
1843  * @pin:		The pin that has triggered wake up
1844  * @mode:		The last entered low power mode
1845  *
1846  * Return: 0 if all went well, else returns appropriate error value.
1847  */
1848 static int ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle *handle,
1849 					  u32 *source, u64 *timestamp, u8 *pin, u8 *mode)
1850 {
1851 	struct ti_sci_info *info;
1852 	struct ti_sci_xfer *xfer;
1853 	struct ti_sci_msg_resp_lpm_wake_reason *resp;
1854 	struct device *dev;
1855 	int ret = 0;
1856 
1857 	if (IS_ERR(handle))
1858 		return PTR_ERR(handle);
1859 	if (!handle)
1860 		return -EINVAL;
1861 
1862 	info = handle_to_ti_sci_info(handle);
1863 	dev = info->dev;
1864 
1865 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_WAKE_REASON,
1866 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1867 				   sizeof(struct ti_sci_msg_hdr),
1868 				   sizeof(*resp));
1869 	if (IS_ERR(xfer)) {
1870 		ret = PTR_ERR(xfer);
1871 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1872 		return ret;
1873 	}
1874 
1875 	ret = ti_sci_do_xfer(info, xfer);
1876 	if (ret) {
1877 		dev_err(dev, "Mbox send fail %d\n", ret);
1878 		goto fail;
1879 	}
1880 
1881 	resp = (struct ti_sci_msg_resp_lpm_wake_reason *)xfer->xfer_buf;
1882 
1883 	if (!ti_sci_is_response_ack(resp)) {
1884 		dev_err(dev, "Failed to get wake reason\n");
1885 		ret = -ENODEV;
1886 		goto fail;
1887 	}
1888 
1889 	if (source)
1890 		*source = resp->wake_source;
1891 	if (timestamp)
1892 		*timestamp = resp->wake_timestamp;
1893 	if (pin)
1894 		*pin = resp->wake_pin;
1895 	if (mode)
1896 		*mode = resp->mode;
1897 
1898 fail:
1899 	ti_sci_put_one_xfer(&info->minfo, xfer);
1900 
1901 	return ret;
1902 }
1903 
1904 /**
1905  * ti_sci_cmd_set_device_constraint() - Set LPM constraint on behalf of a device
1906  * @handle:	pointer to TI SCI handle
1907  * @id:	Device identifier
1908  * @state:	The desired state of device constraint: set or clear
1909  *
1910  * Return: 0 if all went well, else returns appropriate error value.
1911  */
1912 static int ti_sci_cmd_set_device_constraint(const struct ti_sci_handle *handle,
1913 					    u32 id, u8 state)
1914 {
1915 	struct ti_sci_info *info;
1916 	struct ti_sci_msg_req_lpm_set_device_constraint *req;
1917 	struct ti_sci_msg_hdr *resp;
1918 	struct ti_sci_xfer *xfer;
1919 	struct device *dev;
1920 	int ret = 0;
1921 
1922 	if (IS_ERR(handle))
1923 		return PTR_ERR(handle);
1924 	if (!handle)
1925 		return -EINVAL;
1926 
1927 	info = handle_to_ti_sci_info(handle);
1928 	dev = info->dev;
1929 
1930 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT,
1931 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1932 				   sizeof(*req), sizeof(*resp));
1933 	if (IS_ERR(xfer)) {
1934 		ret = PTR_ERR(xfer);
1935 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1936 		return ret;
1937 	}
1938 	req = (struct ti_sci_msg_req_lpm_set_device_constraint *)xfer->xfer_buf;
1939 	req->id = id;
1940 	req->state = state;
1941 
1942 	ret = ti_sci_do_xfer(info, xfer);
1943 	if (ret) {
1944 		dev_err(dev, "Mbox send fail %d\n", ret);
1945 		goto fail;
1946 	}
1947 
1948 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1949 
1950 	if (!ti_sci_is_response_ack(resp)) {
1951 		dev_err(dev, "Failed to set device constraint\n");
1952 		ret = -ENODEV;
1953 	}
1954 
1955 fail:
1956 	ti_sci_put_one_xfer(&info->minfo, xfer);
1957 
1958 	return ret;
1959 }
1960 
1961 /**
1962  * ti_sci_cmd_set_latency_constraint() - Set LPM resume latency constraint
1963  * @handle:	pointer to TI SCI handle
1964  * @latency:	maximum acceptable latency (in ms) to wake up from LPM
1965  * @state:	The desired state of latency constraint: set or clear
1966  *
1967  * Return: 0 if all went well, else returns appropriate error value.
1968  */
1969 static int ti_sci_cmd_set_latency_constraint(const struct ti_sci_handle *handle,
1970 					     u16 latency, u8 state)
1971 {
1972 	struct ti_sci_info *info;
1973 	struct ti_sci_msg_req_lpm_set_latency_constraint *req;
1974 	struct ti_sci_msg_hdr *resp;
1975 	struct ti_sci_xfer *xfer;
1976 	struct device *dev;
1977 	int ret = 0;
1978 
1979 	if (IS_ERR(handle))
1980 		return PTR_ERR(handle);
1981 	if (!handle)
1982 		return -EINVAL;
1983 
1984 	info = handle_to_ti_sci_info(handle);
1985 	dev = info->dev;
1986 
1987 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT,
1988 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1989 				   sizeof(*req), sizeof(*resp));
1990 	if (IS_ERR(xfer)) {
1991 		ret = PTR_ERR(xfer);
1992 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1993 		return ret;
1994 	}
1995 	req = (struct ti_sci_msg_req_lpm_set_latency_constraint *)xfer->xfer_buf;
1996 	req->latency = latency;
1997 	req->state = state;
1998 
1999 	ret = ti_sci_do_xfer(info, xfer);
2000 	if (ret) {
2001 		dev_err(dev, "Mbox send fail %d\n", ret);
2002 		goto fail;
2003 	}
2004 
2005 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2006 
2007 	if (!ti_sci_is_response_ack(resp)) {
2008 		dev_err(dev, "Failed to set device constraint\n");
2009 		ret = -ENODEV;
2010 	}
2011 
2012 fail:
2013 	ti_sci_put_one_xfer(&info->minfo, xfer);
2014 
2015 	return ret;
2016 }
2017 
2018 /**
2019  * ti_sci_cmd_lpm_abort() - Abort entry to LPM by clearing selection of LPM to enter
2020  * @dev:	Device pointer corresponding to the SCI entity
2021  *
2022  * Return: 0 if all went well, else returns appropriate error value.
2023  */
2024 static int ti_sci_cmd_lpm_abort(struct device *dev)
2025 {
2026 	struct ti_sci_info *info = dev_get_drvdata(dev);
2027 	struct ti_sci_msg_hdr *req;
2028 	struct ti_sci_msg_hdr *resp;
2029 	struct ti_sci_xfer *xfer;
2030 	int ret = 0;
2031 
2032 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_ABORT,
2033 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2034 				   sizeof(*req), sizeof(*resp));
2035 	if (IS_ERR(xfer)) {
2036 		ret = PTR_ERR(xfer);
2037 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2038 		return ret;
2039 	}
2040 	req = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2041 
2042 	ret = ti_sci_do_xfer(info, xfer);
2043 	if (ret) {
2044 		dev_err(dev, "Mbox send fail %d\n", ret);
2045 		goto fail;
2046 	}
2047 
2048 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2049 
2050 	if (!ti_sci_is_response_ack(resp))
2051 		ret = -ENODEV;
2052 
2053 fail:
2054 	ti_sci_put_one_xfer(&info->minfo, xfer);
2055 
2056 	return ret;
2057 }
2058 
2059 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
2060 {
2061 	struct ti_sci_info *info;
2062 	struct ti_sci_msg_req_reboot *req;
2063 	struct ti_sci_msg_hdr *resp;
2064 	struct ti_sci_xfer *xfer;
2065 	struct device *dev;
2066 	int ret = 0;
2067 
2068 	if (IS_ERR(handle))
2069 		return PTR_ERR(handle);
2070 	if (!handle)
2071 		return -EINVAL;
2072 
2073 	info = handle_to_ti_sci_info(handle);
2074 	dev = info->dev;
2075 
2076 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
2077 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2078 				   sizeof(*req), sizeof(*resp));
2079 	if (IS_ERR(xfer)) {
2080 		ret = PTR_ERR(xfer);
2081 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2082 		return ret;
2083 	}
2084 	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
2085 
2086 	ret = ti_sci_do_xfer(info, xfer);
2087 	if (ret) {
2088 		dev_err(dev, "Mbox send fail %d\n", ret);
2089 		goto fail;
2090 	}
2091 
2092 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2093 
2094 	if (!ti_sci_is_response_ack(resp))
2095 		ret = -ENODEV;
2096 	else
2097 		ret = 0;
2098 
2099 fail:
2100 	ti_sci_put_one_xfer(&info->minfo, xfer);
2101 
2102 	return ret;
2103 }
2104 
2105 /**
2106  * ti_sci_get_resource_range - Helper to get a range of resources assigned
2107  *			       to a host. Resource is uniquely identified by
2108  *			       type and subtype.
2109  * @handle:		Pointer to TISCI handle.
2110  * @dev_id:		TISCI device ID.
2111  * @subtype:		Resource assignment subtype that is being requested
2112  *			from the given device.
2113  * @s_host:		Host processor ID to which the resources are allocated
2114  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
2115  *			resource range start index and number of resources
2116  *
2117  * Return: 0 if all went fine, else return appropriate error.
2118  */
2119 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
2120 				     u32 dev_id, u8 subtype, u8 s_host,
2121 				     struct ti_sci_resource_desc *desc)
2122 {
2123 	struct ti_sci_msg_resp_get_resource_range *resp;
2124 	struct ti_sci_msg_req_get_resource_range *req;
2125 	struct ti_sci_xfer *xfer;
2126 	struct ti_sci_info *info;
2127 	struct device *dev;
2128 	int ret = 0;
2129 
2130 	if (IS_ERR(handle))
2131 		return PTR_ERR(handle);
2132 	if (!handle || !desc)
2133 		return -EINVAL;
2134 
2135 	info = handle_to_ti_sci_info(handle);
2136 	dev = info->dev;
2137 
2138 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
2139 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2140 				   sizeof(*req), sizeof(*resp));
2141 	if (IS_ERR(xfer)) {
2142 		ret = PTR_ERR(xfer);
2143 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2144 		return ret;
2145 	}
2146 
2147 	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
2148 	req->secondary_host = s_host;
2149 	req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
2150 	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
2151 
2152 	ret = ti_sci_do_xfer(info, xfer);
2153 	if (ret) {
2154 		dev_err(dev, "Mbox send fail %d\n", ret);
2155 		goto fail;
2156 	}
2157 
2158 	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
2159 
2160 	if (!ti_sci_is_response_ack(resp)) {
2161 		ret = -ENODEV;
2162 	} else if (!resp->range_num && !resp->range_num_sec) {
2163 		/* Neither of the two resource range is valid */
2164 		ret = -ENODEV;
2165 	} else {
2166 		desc->start = resp->range_start;
2167 		desc->num = resp->range_num;
2168 		desc->start_sec = resp->range_start_sec;
2169 		desc->num_sec = resp->range_num_sec;
2170 	}
2171 
2172 fail:
2173 	ti_sci_put_one_xfer(&info->minfo, xfer);
2174 
2175 	return ret;
2176 }
2177 
2178 /**
2179  * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
2180  *				   that is same as ti sci interface host.
2181  * @handle:		Pointer to TISCI handle.
2182  * @dev_id:		TISCI device ID.
2183  * @subtype:		Resource assignment subtype that is being requested
2184  *			from the given device.
2185  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
2186  *			resource range start index and number of resources
2187  *
2188  * Return: 0 if all went fine, else return appropriate error.
2189  */
2190 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
2191 					 u32 dev_id, u8 subtype,
2192 					 struct ti_sci_resource_desc *desc)
2193 {
2194 	return ti_sci_get_resource_range(handle, dev_id, subtype,
2195 					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
2196 					 desc);
2197 }
2198 
2199 /**
2200  * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
2201  *					      assigned to a specified host.
2202  * @handle:		Pointer to TISCI handle.
2203  * @dev_id:		TISCI device ID.
2204  * @subtype:		Resource assignment subtype that is being requested
2205  *			from the given device.
2206  * @s_host:		Host processor ID to which the resources are allocated
2207  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
2208  *			resource range start index and number of resources
2209  *
2210  * Return: 0 if all went fine, else return appropriate error.
2211  */
2212 static
2213 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
2214 					     u32 dev_id, u8 subtype, u8 s_host,
2215 					     struct ti_sci_resource_desc *desc)
2216 {
2217 	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
2218 }
2219 
2220 /**
2221  * ti_sci_manage_irq() - Helper api to configure/release the irq route between
2222  *			 the requested source and destination
2223  * @handle:		Pointer to TISCI handle.
2224  * @valid_params:	Bit fields defining the validity of certain params
2225  * @src_id:		Device ID of the IRQ source
2226  * @src_index:		IRQ source index within the source device
2227  * @dst_id:		Device ID of the IRQ destination
2228  * @dst_host_irq:	IRQ number of the destination device
2229  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2230  * @vint:		Virtual interrupt to be used within the IA
2231  * @global_event:	Global event number to be used for the requesting event
2232  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2233  * @s_host:		Secondary host ID to which the irq/event is being
2234  *			requested for.
2235  * @type:		Request type irq set or release.
2236  *
2237  * Return: 0 if all went fine, else return appropriate error.
2238  */
2239 static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
2240 			     u32 valid_params, u16 src_id, u16 src_index,
2241 			     u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
2242 			     u16 global_event, u8 vint_status_bit, u8 s_host,
2243 			     u16 type)
2244 {
2245 	struct ti_sci_msg_req_manage_irq *req;
2246 	struct ti_sci_msg_hdr *resp;
2247 	struct ti_sci_xfer *xfer;
2248 	struct ti_sci_info *info;
2249 	struct device *dev;
2250 	int ret = 0;
2251 
2252 	if (IS_ERR(handle))
2253 		return PTR_ERR(handle);
2254 	if (!handle)
2255 		return -EINVAL;
2256 
2257 	info = handle_to_ti_sci_info(handle);
2258 	dev = info->dev;
2259 
2260 	xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2261 				   sizeof(*req), sizeof(*resp));
2262 	if (IS_ERR(xfer)) {
2263 		ret = PTR_ERR(xfer);
2264 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2265 		return ret;
2266 	}
2267 	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
2268 	req->valid_params = valid_params;
2269 	req->src_id = src_id;
2270 	req->src_index = src_index;
2271 	req->dst_id = dst_id;
2272 	req->dst_host_irq = dst_host_irq;
2273 	req->ia_id = ia_id;
2274 	req->vint = vint;
2275 	req->global_event = global_event;
2276 	req->vint_status_bit = vint_status_bit;
2277 	req->secondary_host = s_host;
2278 
2279 	ret = ti_sci_do_xfer(info, xfer);
2280 	if (ret) {
2281 		dev_err(dev, "Mbox send fail %d\n", ret);
2282 		goto fail;
2283 	}
2284 
2285 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2286 
2287 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2288 
2289 fail:
2290 	ti_sci_put_one_xfer(&info->minfo, xfer);
2291 
2292 	return ret;
2293 }
2294 
2295 /**
2296  * ti_sci_set_irq() - Helper api to configure the irq route between the
2297  *		      requested source and destination
2298  * @handle:		Pointer to TISCI handle.
2299  * @valid_params:	Bit fields defining the validity of certain params
2300  * @src_id:		Device ID of the IRQ source
2301  * @src_index:		IRQ source index within the source device
2302  * @dst_id:		Device ID of the IRQ destination
2303  * @dst_host_irq:	IRQ number of the destination device
2304  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2305  * @vint:		Virtual interrupt to be used within the IA
2306  * @global_event:	Global event number to be used for the requesting event
2307  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2308  * @s_host:		Secondary host ID to which the irq/event is being
2309  *			requested for.
2310  *
2311  * Return: 0 if all went fine, else return appropriate error.
2312  */
2313 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
2314 			  u16 src_id, u16 src_index, u16 dst_id,
2315 			  u16 dst_host_irq, u16 ia_id, u16 vint,
2316 			  u16 global_event, u8 vint_status_bit, u8 s_host)
2317 {
2318 	pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
2319 		 __func__, valid_params, src_id, src_index,
2320 		 dst_id, dst_host_irq, ia_id, vint, global_event,
2321 		 vint_status_bit);
2322 
2323 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
2324 				 dst_id, dst_host_irq, ia_id, vint,
2325 				 global_event, vint_status_bit, s_host,
2326 				 TI_SCI_MSG_SET_IRQ);
2327 }
2328 
2329 /**
2330  * ti_sci_free_irq() - Helper api to free the irq route between the
2331  *			   requested source and destination
2332  * @handle:		Pointer to TISCI handle.
2333  * @valid_params:	Bit fields defining the validity of certain params
2334  * @src_id:		Device ID of the IRQ source
2335  * @src_index:		IRQ source index within the source device
2336  * @dst_id:		Device ID of the IRQ destination
2337  * @dst_host_irq:	IRQ number of the destination device
2338  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2339  * @vint:		Virtual interrupt to be used within the IA
2340  * @global_event:	Global event number to be used for the requesting event
2341  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2342  * @s_host:		Secondary host ID to which the irq/event is being
2343  *			requested for.
2344  *
2345  * Return: 0 if all went fine, else return appropriate error.
2346  */
2347 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
2348 			   u16 src_id, u16 src_index, u16 dst_id,
2349 			   u16 dst_host_irq, u16 ia_id, u16 vint,
2350 			   u16 global_event, u8 vint_status_bit, u8 s_host)
2351 {
2352 	pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
2353 		 __func__, valid_params, src_id, src_index,
2354 		 dst_id, dst_host_irq, ia_id, vint, global_event,
2355 		 vint_status_bit);
2356 
2357 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
2358 				 dst_id, dst_host_irq, ia_id, vint,
2359 				 global_event, vint_status_bit, s_host,
2360 				 TI_SCI_MSG_FREE_IRQ);
2361 }
2362 
2363 /**
2364  * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
2365  *			  source and destination.
2366  * @handle:		Pointer to TISCI handle.
2367  * @src_id:		Device ID of the IRQ source
2368  * @src_index:		IRQ source index within the source device
2369  * @dst_id:		Device ID of the IRQ destination
2370  * @dst_host_irq:	IRQ number of the destination device
2371  *
2372  * Return: 0 if all went fine, else return appropriate error.
2373  */
2374 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
2375 			      u16 src_index, u16 dst_id, u16 dst_host_irq)
2376 {
2377 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2378 
2379 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
2380 			      dst_host_irq, 0, 0, 0, 0, 0);
2381 }
2382 
2383 /**
2384  * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
2385  *				requested source and Interrupt Aggregator.
2386  * @handle:		Pointer to TISCI handle.
2387  * @src_id:		Device ID of the IRQ source
2388  * @src_index:		IRQ source index within the source device
2389  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2390  * @vint:		Virtual interrupt to be used within the IA
2391  * @global_event:	Global event number to be used for the requesting event
2392  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2393  *
2394  * Return: 0 if all went fine, else return appropriate error.
2395  */
2396 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
2397 				    u16 src_id, u16 src_index, u16 ia_id,
2398 				    u16 vint, u16 global_event,
2399 				    u8 vint_status_bit)
2400 {
2401 	u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
2402 			   MSG_FLAG_GLB_EVNT_VALID |
2403 			   MSG_FLAG_VINT_STS_BIT_VALID;
2404 
2405 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
2406 			      ia_id, vint, global_event, vint_status_bit, 0);
2407 }
2408 
2409 /**
2410  * ti_sci_cmd_free_irq() - Free a host irq route between the between the
2411  *			   requested source and destination.
2412  * @handle:		Pointer to TISCI handle.
2413  * @src_id:		Device ID of the IRQ source
2414  * @src_index:		IRQ source index within the source device
2415  * @dst_id:		Device ID of the IRQ destination
2416  * @dst_host_irq:	IRQ number of the destination device
2417  *
2418  * Return: 0 if all went fine, else return appropriate error.
2419  */
2420 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2421 			       u16 src_index, u16 dst_id, u16 dst_host_irq)
2422 {
2423 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2424 
2425 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2426 			       dst_host_irq, 0, 0, 0, 0, 0);
2427 }
2428 
2429 /**
2430  * ti_sci_cmd_free_event_map() - Free an event map between the requested source
2431  *				 and Interrupt Aggregator.
2432  * @handle:		Pointer to TISCI handle.
2433  * @src_id:		Device ID of the IRQ source
2434  * @src_index:		IRQ source index within the source device
2435  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2436  * @vint:		Virtual interrupt to be used within the IA
2437  * @global_event:	Global event number to be used for the requesting event
2438  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2439  *
2440  * Return: 0 if all went fine, else return appropriate error.
2441  */
2442 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2443 				     u16 src_id, u16 src_index, u16 ia_id,
2444 				     u16 vint, u16 global_event,
2445 				     u8 vint_status_bit)
2446 {
2447 	u32 valid_params = MSG_FLAG_IA_ID_VALID |
2448 			   MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2449 			   MSG_FLAG_VINT_STS_BIT_VALID;
2450 
2451 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2452 			       ia_id, vint, global_event, vint_status_bit, 0);
2453 }
2454 
2455 /**
2456  * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
2457  * @handle:	Pointer to TI SCI handle.
2458  * @params:	Pointer to ti_sci_msg_rm_ring_cfg ring config structure
2459  *
2460  * Return: 0 if all went well, else returns appropriate error value.
2461  *
2462  * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
2463  * more info.
2464  */
2465 static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
2466 				  const struct ti_sci_msg_rm_ring_cfg *params)
2467 {
2468 	struct ti_sci_msg_rm_ring_cfg_req *req;
2469 	struct ti_sci_msg_hdr *resp;
2470 	struct ti_sci_xfer *xfer;
2471 	struct ti_sci_info *info;
2472 	struct device *dev;
2473 	int ret = 0;
2474 
2475 	if (IS_ERR_OR_NULL(handle))
2476 		return -EINVAL;
2477 
2478 	info = handle_to_ti_sci_info(handle);
2479 	dev = info->dev;
2480 
2481 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2482 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2483 				   sizeof(*req), sizeof(*resp));
2484 	if (IS_ERR(xfer)) {
2485 		ret = PTR_ERR(xfer);
2486 		dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2487 		return ret;
2488 	}
2489 	req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2490 	req->valid_params = params->valid_params;
2491 	req->nav_id = params->nav_id;
2492 	req->index = params->index;
2493 	req->addr_lo = params->addr_lo;
2494 	req->addr_hi = params->addr_hi;
2495 	req->count = params->count;
2496 	req->mode = params->mode;
2497 	req->size = params->size;
2498 	req->order_id = params->order_id;
2499 	req->virtid = params->virtid;
2500 	req->asel = params->asel;
2501 
2502 	ret = ti_sci_do_xfer(info, xfer);
2503 	if (ret) {
2504 		dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2505 		goto fail;
2506 	}
2507 
2508 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2509 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2510 
2511 fail:
2512 	ti_sci_put_one_xfer(&info->minfo, xfer);
2513 	dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
2514 	return ret;
2515 }
2516 
2517 /**
2518  * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2519  * @handle:	Pointer to TI SCI handle.
2520  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2521  *		pairing
2522  * @src_thread:	Source PSI-L thread ID
2523  * @dst_thread: Destination PSI-L thread ID
2524  *
2525  * Return: 0 if all went well, else returns appropriate error value.
2526  */
2527 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2528 				   u32 nav_id, u32 src_thread, u32 dst_thread)
2529 {
2530 	struct ti_sci_msg_psil_pair *req;
2531 	struct ti_sci_msg_hdr *resp;
2532 	struct ti_sci_xfer *xfer;
2533 	struct ti_sci_info *info;
2534 	struct device *dev;
2535 	int ret = 0;
2536 
2537 	if (IS_ERR(handle))
2538 		return PTR_ERR(handle);
2539 	if (!handle)
2540 		return -EINVAL;
2541 
2542 	info = handle_to_ti_sci_info(handle);
2543 	dev = info->dev;
2544 
2545 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2546 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2547 				   sizeof(*req), sizeof(*resp));
2548 	if (IS_ERR(xfer)) {
2549 		ret = PTR_ERR(xfer);
2550 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2551 		return ret;
2552 	}
2553 	req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2554 	req->nav_id = nav_id;
2555 	req->src_thread = src_thread;
2556 	req->dst_thread = dst_thread;
2557 
2558 	ret = ti_sci_do_xfer(info, xfer);
2559 	if (ret) {
2560 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2561 		goto fail;
2562 	}
2563 
2564 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2565 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2566 
2567 fail:
2568 	ti_sci_put_one_xfer(&info->minfo, xfer);
2569 
2570 	return ret;
2571 }
2572 
2573 /**
2574  * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2575  * @handle:	Pointer to TI SCI handle.
2576  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2577  *		unpairing
2578  * @src_thread:	Source PSI-L thread ID
2579  * @dst_thread:	Destination PSI-L thread ID
2580  *
2581  * Return: 0 if all went well, else returns appropriate error value.
2582  */
2583 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2584 				     u32 nav_id, u32 src_thread, u32 dst_thread)
2585 {
2586 	struct ti_sci_msg_psil_unpair *req;
2587 	struct ti_sci_msg_hdr *resp;
2588 	struct ti_sci_xfer *xfer;
2589 	struct ti_sci_info *info;
2590 	struct device *dev;
2591 	int ret = 0;
2592 
2593 	if (IS_ERR(handle))
2594 		return PTR_ERR(handle);
2595 	if (!handle)
2596 		return -EINVAL;
2597 
2598 	info = handle_to_ti_sci_info(handle);
2599 	dev = info->dev;
2600 
2601 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2602 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2603 				   sizeof(*req), sizeof(*resp));
2604 	if (IS_ERR(xfer)) {
2605 		ret = PTR_ERR(xfer);
2606 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2607 		return ret;
2608 	}
2609 	req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2610 	req->nav_id = nav_id;
2611 	req->src_thread = src_thread;
2612 	req->dst_thread = dst_thread;
2613 
2614 	ret = ti_sci_do_xfer(info, xfer);
2615 	if (ret) {
2616 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2617 		goto fail;
2618 	}
2619 
2620 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2621 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2622 
2623 fail:
2624 	ti_sci_put_one_xfer(&info->minfo, xfer);
2625 
2626 	return ret;
2627 }
2628 
2629 /**
2630  * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2631  * @handle:	Pointer to TI SCI handle.
2632  * @params:	Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2633  *		structure
2634  *
2635  * Return: 0 if all went well, else returns appropriate error value.
2636  *
2637  * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2638  * more info.
2639  */
2640 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2641 			const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2642 {
2643 	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2644 	struct ti_sci_msg_hdr *resp;
2645 	struct ti_sci_xfer *xfer;
2646 	struct ti_sci_info *info;
2647 	struct device *dev;
2648 	int ret = 0;
2649 
2650 	if (IS_ERR_OR_NULL(handle))
2651 		return -EINVAL;
2652 
2653 	info = handle_to_ti_sci_info(handle);
2654 	dev = info->dev;
2655 
2656 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2657 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2658 				   sizeof(*req), sizeof(*resp));
2659 	if (IS_ERR(xfer)) {
2660 		ret = PTR_ERR(xfer);
2661 		dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2662 		return ret;
2663 	}
2664 	req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2665 	req->valid_params = params->valid_params;
2666 	req->nav_id = params->nav_id;
2667 	req->index = params->index;
2668 	req->tx_pause_on_err = params->tx_pause_on_err;
2669 	req->tx_filt_einfo = params->tx_filt_einfo;
2670 	req->tx_filt_pswords = params->tx_filt_pswords;
2671 	req->tx_atype = params->tx_atype;
2672 	req->tx_chan_type = params->tx_chan_type;
2673 	req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2674 	req->tx_fetch_size = params->tx_fetch_size;
2675 	req->tx_credit_count = params->tx_credit_count;
2676 	req->txcq_qnum = params->txcq_qnum;
2677 	req->tx_priority = params->tx_priority;
2678 	req->tx_qos = params->tx_qos;
2679 	req->tx_orderid = params->tx_orderid;
2680 	req->fdepth = params->fdepth;
2681 	req->tx_sched_priority = params->tx_sched_priority;
2682 	req->tx_burst_size = params->tx_burst_size;
2683 	req->tx_tdtype = params->tx_tdtype;
2684 	req->extended_ch_type = params->extended_ch_type;
2685 
2686 	ret = ti_sci_do_xfer(info, xfer);
2687 	if (ret) {
2688 		dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2689 		goto fail;
2690 	}
2691 
2692 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2693 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2694 
2695 fail:
2696 	ti_sci_put_one_xfer(&info->minfo, xfer);
2697 	dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2698 	return ret;
2699 }
2700 
2701 /**
2702  * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2703  * @handle:	Pointer to TI SCI handle.
2704  * @params:	Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2705  *		structure
2706  *
2707  * Return: 0 if all went well, else returns appropriate error value.
2708  *
2709  * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2710  * more info.
2711  */
2712 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2713 			const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2714 {
2715 	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2716 	struct ti_sci_msg_hdr *resp;
2717 	struct ti_sci_xfer *xfer;
2718 	struct ti_sci_info *info;
2719 	struct device *dev;
2720 	int ret = 0;
2721 
2722 	if (IS_ERR_OR_NULL(handle))
2723 		return -EINVAL;
2724 
2725 	info = handle_to_ti_sci_info(handle);
2726 	dev = info->dev;
2727 
2728 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2729 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2730 				   sizeof(*req), sizeof(*resp));
2731 	if (IS_ERR(xfer)) {
2732 		ret = PTR_ERR(xfer);
2733 		dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2734 		return ret;
2735 	}
2736 	req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2737 	req->valid_params = params->valid_params;
2738 	req->nav_id = params->nav_id;
2739 	req->index = params->index;
2740 	req->rx_fetch_size = params->rx_fetch_size;
2741 	req->rxcq_qnum = params->rxcq_qnum;
2742 	req->rx_priority = params->rx_priority;
2743 	req->rx_qos = params->rx_qos;
2744 	req->rx_orderid = params->rx_orderid;
2745 	req->rx_sched_priority = params->rx_sched_priority;
2746 	req->flowid_start = params->flowid_start;
2747 	req->flowid_cnt = params->flowid_cnt;
2748 	req->rx_pause_on_err = params->rx_pause_on_err;
2749 	req->rx_atype = params->rx_atype;
2750 	req->rx_chan_type = params->rx_chan_type;
2751 	req->rx_ignore_short = params->rx_ignore_short;
2752 	req->rx_ignore_long = params->rx_ignore_long;
2753 	req->rx_burst_size = params->rx_burst_size;
2754 
2755 	ret = ti_sci_do_xfer(info, xfer);
2756 	if (ret) {
2757 		dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2758 		goto fail;
2759 	}
2760 
2761 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2762 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2763 
2764 fail:
2765 	ti_sci_put_one_xfer(&info->minfo, xfer);
2766 	dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2767 	return ret;
2768 }
2769 
2770 /**
2771  * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2772  * @handle:	Pointer to TI SCI handle.
2773  * @params:	Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2774  *		structure
2775  *
2776  * Return: 0 if all went well, else returns appropriate error value.
2777  *
2778  * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2779  * more info.
2780  */
2781 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2782 			const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2783 {
2784 	struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2785 	struct ti_sci_msg_hdr *resp;
2786 	struct ti_sci_xfer *xfer;
2787 	struct ti_sci_info *info;
2788 	struct device *dev;
2789 	int ret = 0;
2790 
2791 	if (IS_ERR_OR_NULL(handle))
2792 		return -EINVAL;
2793 
2794 	info = handle_to_ti_sci_info(handle);
2795 	dev = info->dev;
2796 
2797 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2798 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2799 				   sizeof(*req), sizeof(*resp));
2800 	if (IS_ERR(xfer)) {
2801 		ret = PTR_ERR(xfer);
2802 		dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2803 		return ret;
2804 	}
2805 	req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2806 	req->valid_params = params->valid_params;
2807 	req->nav_id = params->nav_id;
2808 	req->flow_index = params->flow_index;
2809 	req->rx_einfo_present = params->rx_einfo_present;
2810 	req->rx_psinfo_present = params->rx_psinfo_present;
2811 	req->rx_error_handling = params->rx_error_handling;
2812 	req->rx_desc_type = params->rx_desc_type;
2813 	req->rx_sop_offset = params->rx_sop_offset;
2814 	req->rx_dest_qnum = params->rx_dest_qnum;
2815 	req->rx_src_tag_hi = params->rx_src_tag_hi;
2816 	req->rx_src_tag_lo = params->rx_src_tag_lo;
2817 	req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2818 	req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2819 	req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2820 	req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2821 	req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2822 	req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2823 	req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2824 	req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2825 	req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2826 	req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2827 	req->rx_ps_location = params->rx_ps_location;
2828 
2829 	ret = ti_sci_do_xfer(info, xfer);
2830 	if (ret) {
2831 		dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2832 		goto fail;
2833 	}
2834 
2835 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2836 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2837 
2838 fail:
2839 	ti_sci_put_one_xfer(&info->minfo, xfer);
2840 	dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2841 	return ret;
2842 }
2843 
2844 /**
2845  * ti_sci_cmd_proc_request() - Command to request a physical processor control
2846  * @handle:	Pointer to TI SCI handle
2847  * @proc_id:	Processor ID this request is for
2848  *
2849  * Return: 0 if all went well, else returns appropriate error value.
2850  */
2851 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2852 				   u8 proc_id)
2853 {
2854 	struct ti_sci_msg_req_proc_request *req;
2855 	struct ti_sci_msg_hdr *resp;
2856 	struct ti_sci_info *info;
2857 	struct ti_sci_xfer *xfer;
2858 	struct device *dev;
2859 	int ret = 0;
2860 
2861 	if (!handle)
2862 		return -EINVAL;
2863 	if (IS_ERR(handle))
2864 		return PTR_ERR(handle);
2865 
2866 	info = handle_to_ti_sci_info(handle);
2867 	dev = info->dev;
2868 
2869 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2870 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2871 				   sizeof(*req), sizeof(*resp));
2872 	if (IS_ERR(xfer)) {
2873 		ret = PTR_ERR(xfer);
2874 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2875 		return ret;
2876 	}
2877 	req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2878 	req->processor_id = proc_id;
2879 
2880 	ret = ti_sci_do_xfer(info, xfer);
2881 	if (ret) {
2882 		dev_err(dev, "Mbox send fail %d\n", ret);
2883 		goto fail;
2884 	}
2885 
2886 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2887 
2888 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2889 
2890 fail:
2891 	ti_sci_put_one_xfer(&info->minfo, xfer);
2892 
2893 	return ret;
2894 }
2895 
2896 /**
2897  * ti_sci_cmd_proc_release() - Command to release a physical processor control
2898  * @handle:	Pointer to TI SCI handle
2899  * @proc_id:	Processor ID this request is for
2900  *
2901  * Return: 0 if all went well, else returns appropriate error value.
2902  */
2903 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2904 				   u8 proc_id)
2905 {
2906 	struct ti_sci_msg_req_proc_release *req;
2907 	struct ti_sci_msg_hdr *resp;
2908 	struct ti_sci_info *info;
2909 	struct ti_sci_xfer *xfer;
2910 	struct device *dev;
2911 	int ret = 0;
2912 
2913 	if (!handle)
2914 		return -EINVAL;
2915 	if (IS_ERR(handle))
2916 		return PTR_ERR(handle);
2917 
2918 	info = handle_to_ti_sci_info(handle);
2919 	dev = info->dev;
2920 
2921 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2922 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2923 				   sizeof(*req), sizeof(*resp));
2924 	if (IS_ERR(xfer)) {
2925 		ret = PTR_ERR(xfer);
2926 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2927 		return ret;
2928 	}
2929 	req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2930 	req->processor_id = proc_id;
2931 
2932 	ret = ti_sci_do_xfer(info, xfer);
2933 	if (ret) {
2934 		dev_err(dev, "Mbox send fail %d\n", ret);
2935 		goto fail;
2936 	}
2937 
2938 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2939 
2940 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2941 
2942 fail:
2943 	ti_sci_put_one_xfer(&info->minfo, xfer);
2944 
2945 	return ret;
2946 }
2947 
2948 /**
2949  * ti_sci_cmd_proc_handover() - Command to handover a physical processor
2950  *				control to a host in the processor's access
2951  *				control list.
2952  * @handle:	Pointer to TI SCI handle
2953  * @proc_id:	Processor ID this request is for
2954  * @host_id:	Host ID to get the control of the processor
2955  *
2956  * Return: 0 if all went well, else returns appropriate error value.
2957  */
2958 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2959 				    u8 proc_id, u8 host_id)
2960 {
2961 	struct ti_sci_msg_req_proc_handover *req;
2962 	struct ti_sci_msg_hdr *resp;
2963 	struct ti_sci_info *info;
2964 	struct ti_sci_xfer *xfer;
2965 	struct device *dev;
2966 	int ret = 0;
2967 
2968 	if (!handle)
2969 		return -EINVAL;
2970 	if (IS_ERR(handle))
2971 		return PTR_ERR(handle);
2972 
2973 	info = handle_to_ti_sci_info(handle);
2974 	dev = info->dev;
2975 
2976 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2977 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2978 				   sizeof(*req), sizeof(*resp));
2979 	if (IS_ERR(xfer)) {
2980 		ret = PTR_ERR(xfer);
2981 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2982 		return ret;
2983 	}
2984 	req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2985 	req->processor_id = proc_id;
2986 	req->host_id = host_id;
2987 
2988 	ret = ti_sci_do_xfer(info, xfer);
2989 	if (ret) {
2990 		dev_err(dev, "Mbox send fail %d\n", ret);
2991 		goto fail;
2992 	}
2993 
2994 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2995 
2996 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2997 
2998 fail:
2999 	ti_sci_put_one_xfer(&info->minfo, xfer);
3000 
3001 	return ret;
3002 }
3003 
3004 /**
3005  * ti_sci_cmd_proc_set_config() - Command to set the processor boot
3006  *				    configuration flags
3007  * @handle:		Pointer to TI SCI handle
3008  * @proc_id:		Processor ID this request is for
3009  * @bootvector:		Processor Boot vector (start address)
3010  * @config_flags_set:	Configuration flags to be set
3011  * @config_flags_clear:	Configuration flags to be cleared.
3012  *
3013  * Return: 0 if all went well, else returns appropriate error value.
3014  */
3015 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
3016 				      u8 proc_id, u64 bootvector,
3017 				      u32 config_flags_set,
3018 				      u32 config_flags_clear)
3019 {
3020 	struct ti_sci_msg_req_set_config *req;
3021 	struct ti_sci_msg_hdr *resp;
3022 	struct ti_sci_info *info;
3023 	struct ti_sci_xfer *xfer;
3024 	struct device *dev;
3025 	int ret = 0;
3026 
3027 	if (!handle)
3028 		return -EINVAL;
3029 	if (IS_ERR(handle))
3030 		return PTR_ERR(handle);
3031 
3032 	info = handle_to_ti_sci_info(handle);
3033 	dev = info->dev;
3034 
3035 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
3036 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
3037 				   sizeof(*req), sizeof(*resp));
3038 	if (IS_ERR(xfer)) {
3039 		ret = PTR_ERR(xfer);
3040 		dev_err(dev, "Message alloc failed(%d)\n", ret);
3041 		return ret;
3042 	}
3043 	req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
3044 	req->processor_id = proc_id;
3045 	req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
3046 	req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
3047 				TI_SCI_ADDR_HIGH_SHIFT;
3048 	req->config_flags_set = config_flags_set;
3049 	req->config_flags_clear = config_flags_clear;
3050 
3051 	ret = ti_sci_do_xfer(info, xfer);
3052 	if (ret) {
3053 		dev_err(dev, "Mbox send fail %d\n", ret);
3054 		goto fail;
3055 	}
3056 
3057 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
3058 
3059 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
3060 
3061 fail:
3062 	ti_sci_put_one_xfer(&info->minfo, xfer);
3063 
3064 	return ret;
3065 }
3066 
3067 /**
3068  * ti_sci_cmd_proc_set_control() - Command to set the processor boot
3069  *				     control flags
3070  * @handle:			Pointer to TI SCI handle
3071  * @proc_id:			Processor ID this request is for
3072  * @control_flags_set:		Control flags to be set
3073  * @control_flags_clear:	Control flags to be cleared
3074  *
3075  * Return: 0 if all went well, else returns appropriate error value.
3076  */
3077 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
3078 				       u8 proc_id, u32 control_flags_set,
3079 				       u32 control_flags_clear)
3080 {
3081 	struct ti_sci_msg_req_set_ctrl *req;
3082 	struct ti_sci_msg_hdr *resp;
3083 	struct ti_sci_info *info;
3084 	struct ti_sci_xfer *xfer;
3085 	struct device *dev;
3086 	int ret = 0;
3087 
3088 	if (!handle)
3089 		return -EINVAL;
3090 	if (IS_ERR(handle))
3091 		return PTR_ERR(handle);
3092 
3093 	info = handle_to_ti_sci_info(handle);
3094 	dev = info->dev;
3095 
3096 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
3097 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
3098 				   sizeof(*req), sizeof(*resp));
3099 	if (IS_ERR(xfer)) {
3100 		ret = PTR_ERR(xfer);
3101 		dev_err(dev, "Message alloc failed(%d)\n", ret);
3102 		return ret;
3103 	}
3104 	req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
3105 	req->processor_id = proc_id;
3106 	req->control_flags_set = control_flags_set;
3107 	req->control_flags_clear = control_flags_clear;
3108 
3109 	ret = ti_sci_do_xfer(info, xfer);
3110 	if (ret) {
3111 		dev_err(dev, "Mbox send fail %d\n", ret);
3112 		goto fail;
3113 	}
3114 
3115 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
3116 
3117 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
3118 
3119 fail:
3120 	ti_sci_put_one_xfer(&info->minfo, xfer);
3121 
3122 	return ret;
3123 }
3124 
3125 /**
3126  * ti_sci_cmd_proc_get_status() - Command to get the processor boot status
3127  * @handle:	Pointer to TI SCI handle
3128  * @proc_id:	Processor ID this request is for
3129  * @bv:		Processor Boot vector (start address)
3130  * @cfg_flags:	Processor specific configuration flags
3131  * @ctrl_flags:	Processor specific control flags
3132  * @sts_flags:	Processor specific status flags
3133  *
3134  * Return: 0 if all went well, else returns appropriate error value.
3135  */
3136 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
3137 				      u8 proc_id, u64 *bv, u32 *cfg_flags,
3138 				      u32 *ctrl_flags, u32 *sts_flags)
3139 {
3140 	struct ti_sci_msg_resp_get_status *resp;
3141 	struct ti_sci_msg_req_get_status *req;
3142 	struct ti_sci_info *info;
3143 	struct ti_sci_xfer *xfer;
3144 	struct device *dev;
3145 	int ret = 0;
3146 
3147 	if (!handle)
3148 		return -EINVAL;
3149 	if (IS_ERR(handle))
3150 		return PTR_ERR(handle);
3151 
3152 	info = handle_to_ti_sci_info(handle);
3153 	dev = info->dev;
3154 
3155 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
3156 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
3157 				   sizeof(*req), sizeof(*resp));
3158 	if (IS_ERR(xfer)) {
3159 		ret = PTR_ERR(xfer);
3160 		dev_err(dev, "Message alloc failed(%d)\n", ret);
3161 		return ret;
3162 	}
3163 	req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
3164 	req->processor_id = proc_id;
3165 
3166 	ret = ti_sci_do_xfer(info, xfer);
3167 	if (ret) {
3168 		dev_err(dev, "Mbox send fail %d\n", ret);
3169 		goto fail;
3170 	}
3171 
3172 	resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
3173 
3174 	if (!ti_sci_is_response_ack(resp)) {
3175 		ret = -ENODEV;
3176 	} else {
3177 		*bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
3178 		      (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
3179 		       TI_SCI_ADDR_HIGH_MASK);
3180 		*cfg_flags = resp->config_flags;
3181 		*ctrl_flags = resp->control_flags;
3182 		*sts_flags = resp->status_flags;
3183 	}
3184 
3185 fail:
3186 	ti_sci_put_one_xfer(&info->minfo, xfer);
3187 
3188 	return ret;
3189 }
3190 
3191 /*
3192  * ti_sci_setup_ops() - Setup the operations structures
3193  * @info:	pointer to TISCI pointer
3194  */
3195 static void ti_sci_setup_ops(struct ti_sci_info *info)
3196 {
3197 	struct ti_sci_ops *ops = &info->handle.ops;
3198 	struct ti_sci_core_ops *core_ops = &ops->core_ops;
3199 	struct ti_sci_dev_ops *dops = &ops->dev_ops;
3200 	struct ti_sci_clk_ops *cops = &ops->clk_ops;
3201 	struct ti_sci_pm_ops *pmops = &ops->pm_ops;
3202 	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
3203 	struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
3204 	struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
3205 	struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
3206 	struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
3207 	struct ti_sci_proc_ops *pops = &ops->proc_ops;
3208 
3209 	core_ops->reboot_device = ti_sci_cmd_core_reboot;
3210 
3211 	dops->get_device = ti_sci_cmd_get_device;
3212 	dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
3213 	dops->idle_device = ti_sci_cmd_idle_device;
3214 	dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
3215 	dops->put_device = ti_sci_cmd_put_device;
3216 
3217 	dops->is_valid = ti_sci_cmd_dev_is_valid;
3218 	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
3219 	dops->is_idle = ti_sci_cmd_dev_is_idle;
3220 	dops->is_stop = ti_sci_cmd_dev_is_stop;
3221 	dops->is_on = ti_sci_cmd_dev_is_on;
3222 	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
3223 	dops->set_device_resets = ti_sci_cmd_set_device_resets;
3224 	dops->get_device_resets = ti_sci_cmd_get_device_resets;
3225 
3226 	cops->get_clock = ti_sci_cmd_get_clock;
3227 	cops->idle_clock = ti_sci_cmd_idle_clock;
3228 	cops->put_clock = ti_sci_cmd_put_clock;
3229 	cops->is_auto = ti_sci_cmd_clk_is_auto;
3230 	cops->is_on = ti_sci_cmd_clk_is_on;
3231 	cops->is_off = ti_sci_cmd_clk_is_off;
3232 
3233 	cops->set_parent = ti_sci_cmd_clk_set_parent;
3234 	cops->get_parent = ti_sci_cmd_clk_get_parent;
3235 	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
3236 
3237 	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
3238 	cops->set_freq = ti_sci_cmd_clk_set_freq;
3239 	cops->get_freq = ti_sci_cmd_clk_get_freq;
3240 
3241 	if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
3242 		pr_debug("detected DM managed LPM in fw_caps\n");
3243 		pmops->lpm_wake_reason = ti_sci_msg_cmd_lpm_wake_reason;
3244 		pmops->set_device_constraint = ti_sci_cmd_set_device_constraint;
3245 		pmops->set_latency_constraint = ti_sci_cmd_set_latency_constraint;
3246 	}
3247 
3248 	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
3249 	rm_core_ops->get_range_from_shost =
3250 				ti_sci_cmd_get_resource_range_from_shost;
3251 
3252 	iops->set_irq = ti_sci_cmd_set_irq;
3253 	iops->set_event_map = ti_sci_cmd_set_event_map;
3254 	iops->free_irq = ti_sci_cmd_free_irq;
3255 	iops->free_event_map = ti_sci_cmd_free_event_map;
3256 
3257 	rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
3258 
3259 	psilops->pair = ti_sci_cmd_rm_psil_pair;
3260 	psilops->unpair = ti_sci_cmd_rm_psil_unpair;
3261 
3262 	udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
3263 	udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
3264 	udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
3265 
3266 	pops->request = ti_sci_cmd_proc_request;
3267 	pops->release = ti_sci_cmd_proc_release;
3268 	pops->handover = ti_sci_cmd_proc_handover;
3269 	pops->set_config = ti_sci_cmd_proc_set_config;
3270 	pops->set_control = ti_sci_cmd_proc_set_control;
3271 	pops->get_status = ti_sci_cmd_proc_get_status;
3272 }
3273 
3274 /**
3275  * ti_sci_get_handle() - Get the TI SCI handle for a device
3276  * @dev:	Pointer to device for which we want SCI handle
3277  *
3278  * NOTE: The function does not track individual clients of the framework
3279  * and is expected to be maintained by caller of TI SCI protocol library.
3280  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
3281  * Return: pointer to handle if successful, else:
3282  * -EPROBE_DEFER if the instance is not ready
3283  * -ENODEV if the required node handler is missing
3284  * -EINVAL if invalid conditions are encountered.
3285  */
3286 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
3287 {
3288 	struct device_node *ti_sci_np;
3289 	struct ti_sci_handle *handle = NULL;
3290 	struct ti_sci_info *info;
3291 
3292 	if (!dev) {
3293 		pr_err("I need a device pointer\n");
3294 		return ERR_PTR(-EINVAL);
3295 	}
3296 	ti_sci_np = of_get_parent(dev->of_node);
3297 	if (!ti_sci_np) {
3298 		dev_err(dev, "No OF information\n");
3299 		return ERR_PTR(-EINVAL);
3300 	}
3301 
3302 	mutex_lock(&ti_sci_list_mutex);
3303 	list_for_each_entry(info, &ti_sci_list, node) {
3304 		if (ti_sci_np == info->dev->of_node) {
3305 			handle = &info->handle;
3306 			info->users++;
3307 			break;
3308 		}
3309 	}
3310 	mutex_unlock(&ti_sci_list_mutex);
3311 	of_node_put(ti_sci_np);
3312 
3313 	if (!handle)
3314 		return ERR_PTR(-EPROBE_DEFER);
3315 
3316 	return handle;
3317 }
3318 EXPORT_SYMBOL_GPL(ti_sci_get_handle);
3319 
3320 /**
3321  * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
3322  * @handle:	Handle acquired by ti_sci_get_handle
3323  *
3324  * NOTE: The function does not track individual clients of the framework
3325  * and is expected to be maintained by caller of TI SCI protocol library.
3326  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
3327  *
3328  * Return: 0 is successfully released
3329  * if an error pointer was passed, it returns the error value back,
3330  * if null was passed, it returns -EINVAL;
3331  */
3332 int ti_sci_put_handle(const struct ti_sci_handle *handle)
3333 {
3334 	struct ti_sci_info *info;
3335 
3336 	if (IS_ERR(handle))
3337 		return PTR_ERR(handle);
3338 	if (!handle)
3339 		return -EINVAL;
3340 
3341 	info = handle_to_ti_sci_info(handle);
3342 	mutex_lock(&ti_sci_list_mutex);
3343 	if (!WARN_ON(!info->users))
3344 		info->users--;
3345 	mutex_unlock(&ti_sci_list_mutex);
3346 
3347 	return 0;
3348 }
3349 EXPORT_SYMBOL_GPL(ti_sci_put_handle);
3350 
3351 static void devm_ti_sci_release(struct device *dev, void *res)
3352 {
3353 	const struct ti_sci_handle **ptr = res;
3354 	const struct ti_sci_handle *handle = *ptr;
3355 	int ret;
3356 
3357 	ret = ti_sci_put_handle(handle);
3358 	if (ret)
3359 		dev_err(dev, "failed to put handle %d\n", ret);
3360 }
3361 
3362 /**
3363  * devm_ti_sci_get_handle() - Managed get handle
3364  * @dev:	device for which we want SCI handle for.
3365  *
3366  * NOTE: This releases the handle once the device resources are
3367  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3368  * The function does not track individual clients of the framework
3369  * and is expected to be maintained by caller of TI SCI protocol library.
3370  *
3371  * Return: 0 if all went fine, else corresponding error.
3372  */
3373 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
3374 {
3375 	const struct ti_sci_handle **ptr;
3376 	const struct ti_sci_handle *handle;
3377 
3378 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3379 	if (!ptr)
3380 		return ERR_PTR(-ENOMEM);
3381 	handle = ti_sci_get_handle(dev);
3382 
3383 	if (!IS_ERR(handle)) {
3384 		*ptr = handle;
3385 		devres_add(dev, ptr);
3386 	} else {
3387 		devres_free(ptr);
3388 	}
3389 
3390 	return handle;
3391 }
3392 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
3393 
3394 /**
3395  * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
3396  * @np:		device node
3397  * @property:	property name containing phandle on TISCI node
3398  *
3399  * NOTE: The function does not track individual clients of the framework
3400  * and is expected to be maintained by caller of TI SCI protocol library.
3401  * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
3402  * Return: pointer to handle if successful, else:
3403  * -EPROBE_DEFER if the instance is not ready
3404  * -ENODEV if the required node handler is missing
3405  * -EINVAL if invalid conditions are encountered.
3406  */
3407 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3408 						  const char *property)
3409 {
3410 	struct ti_sci_handle *handle = NULL;
3411 	struct device_node *ti_sci_np;
3412 	struct ti_sci_info *info;
3413 
3414 	if (!np) {
3415 		pr_err("I need a device pointer\n");
3416 		return ERR_PTR(-EINVAL);
3417 	}
3418 
3419 	ti_sci_np = of_parse_phandle(np, property, 0);
3420 	if (!ti_sci_np)
3421 		return ERR_PTR(-ENODEV);
3422 
3423 	mutex_lock(&ti_sci_list_mutex);
3424 	list_for_each_entry(info, &ti_sci_list, node) {
3425 		if (ti_sci_np == info->dev->of_node) {
3426 			handle = &info->handle;
3427 			info->users++;
3428 			break;
3429 		}
3430 	}
3431 	mutex_unlock(&ti_sci_list_mutex);
3432 	of_node_put(ti_sci_np);
3433 
3434 	if (!handle)
3435 		return ERR_PTR(-EPROBE_DEFER);
3436 
3437 	return handle;
3438 }
3439 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3440 
3441 /**
3442  * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
3443  * @dev:	Device pointer requesting TISCI handle
3444  * @property:	property name containing phandle on TISCI node
3445  *
3446  * NOTE: This releases the handle once the device resources are
3447  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3448  * The function does not track individual clients of the framework
3449  * and is expected to be maintained by caller of TI SCI protocol library.
3450  *
3451  * Return: 0 if all went fine, else corresponding error.
3452  */
3453 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3454 						       const char *property)
3455 {
3456 	const struct ti_sci_handle *handle;
3457 	const struct ti_sci_handle **ptr;
3458 
3459 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3460 	if (!ptr)
3461 		return ERR_PTR(-ENOMEM);
3462 	handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3463 
3464 	if (!IS_ERR(handle)) {
3465 		*ptr = handle;
3466 		devres_add(dev, ptr);
3467 	} else {
3468 		devres_free(ptr);
3469 	}
3470 
3471 	return handle;
3472 }
3473 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3474 
3475 /**
3476  * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3477  * @res:	Pointer to the TISCI resource
3478  *
3479  * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3480  */
3481 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3482 {
3483 	unsigned long flags;
3484 	u16 set, free_bit;
3485 
3486 	raw_spin_lock_irqsave(&res->lock, flags);
3487 	for (set = 0; set < res->sets; set++) {
3488 		struct ti_sci_resource_desc *desc = &res->desc[set];
3489 		int res_count = desc->num + desc->num_sec;
3490 
3491 		free_bit = find_first_zero_bit(desc->res_map, res_count);
3492 		if (free_bit != res_count) {
3493 			__set_bit(free_bit, desc->res_map);
3494 			raw_spin_unlock_irqrestore(&res->lock, flags);
3495 
3496 			if (desc->num && free_bit < desc->num)
3497 				return desc->start + free_bit;
3498 			else
3499 				return desc->start_sec + free_bit;
3500 		}
3501 	}
3502 	raw_spin_unlock_irqrestore(&res->lock, flags);
3503 
3504 	return TI_SCI_RESOURCE_NULL;
3505 }
3506 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3507 
3508 /**
3509  * ti_sci_release_resource() - Release a resource from TISCI resource.
3510  * @res:	Pointer to the TISCI resource
3511  * @id:		Resource id to be released.
3512  */
3513 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3514 {
3515 	unsigned long flags;
3516 	u16 set;
3517 
3518 	raw_spin_lock_irqsave(&res->lock, flags);
3519 	for (set = 0; set < res->sets; set++) {
3520 		struct ti_sci_resource_desc *desc = &res->desc[set];
3521 
3522 		if (desc->num && desc->start <= id &&
3523 		    (desc->start + desc->num) > id)
3524 			__clear_bit(id - desc->start, desc->res_map);
3525 		else if (desc->num_sec && desc->start_sec <= id &&
3526 			 (desc->start_sec + desc->num_sec) > id)
3527 			__clear_bit(id - desc->start_sec, desc->res_map);
3528 	}
3529 	raw_spin_unlock_irqrestore(&res->lock, flags);
3530 }
3531 EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3532 
3533 /**
3534  * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
3535  * @res:	Pointer to the TISCI resource
3536  *
3537  * Return: Total number of available resources.
3538  */
3539 u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3540 {
3541 	u32 set, count = 0;
3542 
3543 	for (set = 0; set < res->sets; set++)
3544 		count += res->desc[set].num + res->desc[set].num_sec;
3545 
3546 	return count;
3547 }
3548 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3549 
3550 /**
3551  * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
3552  * @handle:	TISCI handle
3553  * @dev:	Device pointer to which the resource is assigned
3554  * @dev_id:	TISCI device id to which the resource is assigned
3555  * @sub_types:	Array of sub_types assigned corresponding to device
3556  * @sets:	Number of sub_types
3557  *
3558  * Return: Pointer to ti_sci_resource if all went well else appropriate
3559  *	   error pointer.
3560  */
3561 static struct ti_sci_resource *
3562 devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
3563 			      struct device *dev, u32 dev_id, u32 *sub_types,
3564 			      u32 sets)
3565 {
3566 	struct ti_sci_resource *res;
3567 	bool valid_set = false;
3568 	int i, ret, res_count;
3569 
3570 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3571 	if (!res)
3572 		return ERR_PTR(-ENOMEM);
3573 
3574 	res->sets = sets;
3575 	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3576 				 GFP_KERNEL);
3577 	if (!res->desc)
3578 		return ERR_PTR(-ENOMEM);
3579 
3580 	for (i = 0; i < res->sets; i++) {
3581 		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3582 							sub_types[i],
3583 							&res->desc[i]);
3584 		if (ret) {
3585 			dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3586 				dev_id, sub_types[i]);
3587 			memset(&res->desc[i], 0, sizeof(res->desc[i]));
3588 			continue;
3589 		}
3590 
3591 		dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
3592 			dev_id, sub_types[i], res->desc[i].start,
3593 			res->desc[i].num, res->desc[i].start_sec,
3594 			res->desc[i].num_sec);
3595 
3596 		valid_set = true;
3597 		res_count = res->desc[i].num + res->desc[i].num_sec;
3598 		res->desc[i].res_map = devm_bitmap_zalloc(dev, res_count,
3599 							  GFP_KERNEL);
3600 		if (!res->desc[i].res_map)
3601 			return ERR_PTR(-ENOMEM);
3602 	}
3603 	raw_spin_lock_init(&res->lock);
3604 
3605 	if (valid_set)
3606 		return res;
3607 
3608 	return ERR_PTR(-EINVAL);
3609 }
3610 
3611 /**
3612  * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3613  * @handle:	TISCI handle
3614  * @dev:	Device pointer to which the resource is assigned
3615  * @dev_id:	TISCI device id to which the resource is assigned
3616  * @of_prop:	property name by which the resource are represented
3617  *
3618  * Return: Pointer to ti_sci_resource if all went well else appropriate
3619  *	   error pointer.
3620  */
3621 struct ti_sci_resource *
3622 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3623 			    struct device *dev, u32 dev_id, char *of_prop)
3624 {
3625 	struct ti_sci_resource *res;
3626 	u32 *sub_types;
3627 	int sets;
3628 
3629 	sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3630 					       sizeof(u32));
3631 	if (sets < 0) {
3632 		dev_err(dev, "%s resource type ids not available\n", of_prop);
3633 		return ERR_PTR(sets);
3634 	}
3635 
3636 	sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
3637 	if (!sub_types)
3638 		return ERR_PTR(-ENOMEM);
3639 
3640 	of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
3641 	res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
3642 					    sets);
3643 
3644 	kfree(sub_types);
3645 	return res;
3646 }
3647 EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
3648 
3649 /**
3650  * devm_ti_sci_get_resource() - Get a resource range assigned to the device
3651  * @handle:	TISCI handle
3652  * @dev:	Device pointer to which the resource is assigned
3653  * @dev_id:	TISCI device id to which the resource is assigned
3654  * @sub_type:	TISCI resource subytpe representing the resource.
3655  *
3656  * Return: Pointer to ti_sci_resource if all went well else appropriate
3657  *	   error pointer.
3658  */
3659 struct ti_sci_resource *
3660 devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
3661 			 u32 dev_id, u32 sub_type)
3662 {
3663 	return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
3664 }
3665 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
3666 
3667 static int tisci_reboot_handler(struct sys_off_data *data)
3668 {
3669 	struct ti_sci_info *info = data->cb_data;
3670 	const struct ti_sci_handle *handle = &info->handle;
3671 
3672 	ti_sci_cmd_core_reboot(handle);
3673 
3674 	/* call fail OR pass, we should not be here in the first place */
3675 	return NOTIFY_BAD;
3676 }
3677 
3678 static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
3679 {
3680 	/*
3681 	 * Map and validate the target Linux suspend state to TISCI LPM.
3682 	 * Default is to let Device Manager select the low power mode.
3683 	 */
3684 	switch (pm_suspend_target_state) {
3685 	case PM_SUSPEND_MEM:
3686 		if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
3687 			/*
3688 			 * For the DM_MANAGED mode the context is reserved for
3689 			 * internal use and can be 0
3690 			 */
3691 			return ti_sci_cmd_prepare_sleep(&info->handle,
3692 							TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED,
3693 							0, 0, 0);
3694 		} else {
3695 			/* DM Managed is not supported by the firmware. */
3696 			dev_err(info->dev, "Suspend to memory is not supported by the firmware\n");
3697 			return -EOPNOTSUPP;
3698 		}
3699 		break;
3700 	default:
3701 		/*
3702 		 * Do not fail if we don't have action to take for a
3703 		 * specific suspend mode.
3704 		 */
3705 		return 0;
3706 	}
3707 }
3708 
3709 static int __maybe_unused ti_sci_suspend(struct device *dev)
3710 {
3711 	struct ti_sci_info *info = dev_get_drvdata(dev);
3712 	struct device *cpu_dev, *cpu_dev_max = NULL;
3713 	s32 val, cpu_lat = 0;
3714 	u16 cpu_lat_ms;
3715 	int i, ret;
3716 
3717 	if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
3718 		for_each_possible_cpu(i) {
3719 			cpu_dev = get_cpu_device(i);
3720 			val = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_RESUME_LATENCY);
3721 			if (val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) {
3722 				cpu_lat = max(cpu_lat, val);
3723 				cpu_dev_max = cpu_dev;
3724 			}
3725 		}
3726 		if (cpu_dev_max) {
3727 			/*
3728 			 * PM QoS latency unit is usecs, device manager uses msecs.
3729 			 * Convert to msecs and round down for device manager.
3730 			 */
3731 			cpu_lat_ms = cpu_lat / USEC_PER_MSEC;
3732 			dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u ms\n", __func__,
3733 				cpu_lat_ms);
3734 			ret = ti_sci_cmd_set_latency_constraint(&info->handle,
3735 								cpu_lat_ms,
3736 								TISCI_MSG_CONSTRAINT_SET);
3737 			if (ret)
3738 				return ret;
3739 		}
3740 	}
3741 
3742 	ret = ti_sci_prepare_system_suspend(info);
3743 	if (ret)
3744 		return ret;
3745 
3746 	return 0;
3747 }
3748 
3749 static int __maybe_unused ti_sci_suspend_noirq(struct device *dev)
3750 {
3751 	struct ti_sci_info *info = dev_get_drvdata(dev);
3752 	int ret = 0;
3753 
3754 	ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
3755 	if (ret)
3756 		return ret;
3757 
3758 	return 0;
3759 }
3760 
3761 static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
3762 {
3763 	struct ti_sci_info *info = dev_get_drvdata(dev);
3764 	int ret = 0;
3765 	u32 source;
3766 	u64 time;
3767 	u8 pin;
3768 	u8 mode;
3769 
3770 	ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
3771 	if (ret)
3772 		return ret;
3773 
3774 	ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
3775 	/* Do not fail to resume on error as the wake reason is not critical */
3776 	if (!ret)
3777 		dev_info(dev, "ti_sci: wakeup source:0x%x, pin:0x%x, mode:0x%x\n",
3778 			 source, pin, mode);
3779 
3780 	return 0;
3781 }
3782 
3783 static void __maybe_unused ti_sci_pm_complete(struct device *dev)
3784 {
3785 	struct ti_sci_info *info = dev_get_drvdata(dev);
3786 
3787 	if (info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT) {
3788 		if (ti_sci_cmd_lpm_abort(dev))
3789 			dev_err(dev, "LPM clear selection failed.\n");
3790 	}
3791 }
3792 
3793 static const struct dev_pm_ops ti_sci_pm_ops = {
3794 #ifdef CONFIG_PM_SLEEP
3795 	.suspend = ti_sci_suspend,
3796 	.suspend_noirq = ti_sci_suspend_noirq,
3797 	.resume_noirq = ti_sci_resume_noirq,
3798 	.complete = ti_sci_pm_complete,
3799 #endif
3800 };
3801 
3802 /* Description for K2G */
3803 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3804 	.default_host_id = 2,
3805 	/* Conservative duration */
3806 	.max_rx_timeout_ms = 1000,
3807 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3808 	.max_msgs = 20,
3809 	.max_msg_size = 64,
3810 };
3811 
3812 /* Description for AM654 */
3813 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3814 	.default_host_id = 12,
3815 	/* Conservative duration */
3816 	.max_rx_timeout_ms = 10000,
3817 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3818 	.max_msgs = 20,
3819 	.max_msg_size = 60,
3820 };
3821 
3822 static const struct of_device_id ti_sci_of_match[] = {
3823 	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3824 	{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
3825 	{ /* Sentinel */ },
3826 };
3827 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
3828 
3829 static int ti_sci_probe(struct platform_device *pdev)
3830 {
3831 	struct device *dev = &pdev->dev;
3832 	const struct ti_sci_desc *desc;
3833 	struct ti_sci_xfer *xfer;
3834 	struct ti_sci_info *info = NULL;
3835 	struct ti_sci_xfers_info *minfo;
3836 	struct mbox_client *cl;
3837 	int ret = -EINVAL;
3838 	int i;
3839 	u32 h_id;
3840 
3841 	desc = device_get_match_data(dev);
3842 
3843 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3844 	if (!info)
3845 		return -ENOMEM;
3846 
3847 	info->dev = dev;
3848 	info->desc = desc;
3849 	ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3850 	/* if the property is not present in DT, use a default from desc */
3851 	if (ret < 0) {
3852 		info->host_id = info->desc->default_host_id;
3853 	} else {
3854 		if (!h_id) {
3855 			dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3856 			info->host_id = info->desc->default_host_id;
3857 		} else {
3858 			info->host_id = h_id;
3859 		}
3860 	}
3861 
3862 	INIT_LIST_HEAD(&info->node);
3863 	minfo = &info->minfo;
3864 
3865 	/*
3866 	 * Pre-allocate messages
3867 	 * NEVER allocate more than what we can indicate in hdr.seq
3868 	 * if we have data description bug, force a fix..
3869 	 */
3870 	if (WARN_ON(desc->max_msgs >=
3871 		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
3872 		return -EINVAL;
3873 
3874 	minfo->xfer_block = devm_kcalloc(dev,
3875 					 desc->max_msgs,
3876 					 sizeof(*minfo->xfer_block),
3877 					 GFP_KERNEL);
3878 	if (!minfo->xfer_block)
3879 		return -ENOMEM;
3880 
3881 	minfo->xfer_alloc_table = devm_bitmap_zalloc(dev,
3882 						     desc->max_msgs,
3883 						     GFP_KERNEL);
3884 	if (!minfo->xfer_alloc_table)
3885 		return -ENOMEM;
3886 
3887 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
3888 	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3889 		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3890 					      GFP_KERNEL);
3891 		if (!xfer->xfer_buf)
3892 			return -ENOMEM;
3893 
3894 		xfer->tx_message.buf = xfer->xfer_buf;
3895 		init_completion(&xfer->done);
3896 	}
3897 
3898 	ret = ti_sci_debugfs_create(pdev, info);
3899 	if (ret)
3900 		dev_warn(dev, "Failed to create debug file\n");
3901 
3902 	platform_set_drvdata(pdev, info);
3903 
3904 	cl = &info->cl;
3905 	cl->dev = dev;
3906 	cl->tx_block = false;
3907 	cl->rx_callback = ti_sci_rx_callback;
3908 	cl->knows_txdone = true;
3909 
3910 	spin_lock_init(&minfo->xfer_lock);
3911 	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3912 
3913 	info->chan_rx = mbox_request_channel_byname(cl, "rx");
3914 	if (IS_ERR(info->chan_rx)) {
3915 		ret = PTR_ERR(info->chan_rx);
3916 		goto out;
3917 	}
3918 
3919 	info->chan_tx = mbox_request_channel_byname(cl, "tx");
3920 	if (IS_ERR(info->chan_tx)) {
3921 		ret = PTR_ERR(info->chan_tx);
3922 		goto out;
3923 	}
3924 	ret = ti_sci_cmd_get_revision(info);
3925 	if (ret) {
3926 		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
3927 		goto out;
3928 	}
3929 
3930 	ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
3931 	dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n",
3932 		info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
3933 		info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
3934 		info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "",
3935 		info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : ""
3936 	);
3937 
3938 	ti_sci_setup_ops(info);
3939 
3940 	ret = devm_register_restart_handler(dev, tisci_reboot_handler, info);
3941 	if (ret) {
3942 		dev_err(dev, "reboot registration fail(%d)\n", ret);
3943 		goto out;
3944 	}
3945 
3946 	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
3947 		 info->handle.version.abi_major, info->handle.version.abi_minor,
3948 		 info->handle.version.firmware_revision,
3949 		 info->handle.version.firmware_description);
3950 
3951 	mutex_lock(&ti_sci_list_mutex);
3952 	list_add_tail(&info->node, &ti_sci_list);
3953 	mutex_unlock(&ti_sci_list_mutex);
3954 
3955 	return of_platform_populate(dev->of_node, NULL, NULL, dev);
3956 out:
3957 	if (!IS_ERR(info->chan_tx))
3958 		mbox_free_channel(info->chan_tx);
3959 	if (!IS_ERR(info->chan_rx))
3960 		mbox_free_channel(info->chan_rx);
3961 	debugfs_remove(info->d);
3962 	return ret;
3963 }
3964 
3965 static struct platform_driver ti_sci_driver = {
3966 	.probe = ti_sci_probe,
3967 	.driver = {
3968 		   .name = "ti-sci",
3969 		   .of_match_table = ti_sci_of_match,
3970 		   .suppress_bind_attrs = true,
3971 		   .pm = &ti_sci_pm_ops,
3972 	},
3973 };
3974 module_platform_driver(ti_sci_driver);
3975 
3976 MODULE_LICENSE("GPL v2");
3977 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
3978 MODULE_AUTHOR("Nishanth Menon");
3979 MODULE_ALIAS("platform:ti-sci");
3980