xref: /linux/drivers/firmware/ti_sci.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments System Control Interface Protocol Driver
4  *
5  * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
6  *	Nishanth Menon
7  */
8 
9 #define pr_fmt(fmt) "%s: " fmt, __func__
10 
11 #include <linux/bitmap.h>
12 #include <linux/cpu.h>
13 #include <linux/debugfs.h>
14 #include <linux/export.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/mailbox_client.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_qos.h>
24 #include <linux/property.h>
25 #include <linux/semaphore.h>
26 #include <linux/slab.h>
27 #include <linux/soc/ti/ti-msgmgr.h>
28 #include <linux/soc/ti/ti_sci_protocol.h>
29 #include <linux/suspend.h>
30 #include <linux/sys_soc.h>
31 #include <linux/reboot.h>
32 
33 #include "ti_sci.h"
34 
35 /* List of all TI SCI devices active in system */
36 static LIST_HEAD(ti_sci_list);
37 /* Protection for the entire list */
38 static DEFINE_MUTEX(ti_sci_list_mutex);
39 
40 /**
41  * struct ti_sci_xfer - Structure representing a message flow
42  * @tx_message:	Transmit message
43  * @rx_len:	Receive message length
44  * @xfer_buf:	Preallocated buffer to store receive message
45  *		Since we work with request-ACK protocol, we can
46  *		reuse the same buffer for the rx path as we
47  *		use for the tx path.
48  * @done:	completion event
49  */
50 struct ti_sci_xfer {
51 	struct ti_msgmgr_message tx_message;
52 	u8 rx_len;
53 	u8 *xfer_buf;
54 	struct completion done;
55 };
56 
57 /**
58  * struct ti_sci_xfers_info - Structure to manage transfer information
59  * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
60  *			Messages.
61  * @xfer_block:		Preallocated Message array
62  * @xfer_alloc_table:	Bitmap table for allocated messages.
63  *			Index of this bitmap table is also used for message
64  *			sequence identifier.
65  * @xfer_lock:		Protection for message allocation
66  */
67 struct ti_sci_xfers_info {
68 	struct semaphore sem_xfer_count;
69 	struct ti_sci_xfer *xfer_block;
70 	unsigned long *xfer_alloc_table;
71 	/* protect transfer allocation */
72 	spinlock_t xfer_lock;
73 };
74 
75 /**
76  * struct ti_sci_desc - Description of SoC integration
77  * @default_host_id:	Host identifier representing the compute entity
78  * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
79  * @max_msgs: Maximum number of messages that can be pending
80  *		  simultaneously in the system
81  * @max_msg_size: Maximum size of data per message that can be handled.
82  */
83 struct ti_sci_desc {
84 	u8 default_host_id;
85 	int max_rx_timeout_ms;
86 	int max_msgs;
87 	int max_msg_size;
88 };
89 
90 /**
91  * struct ti_sci_info - Structure representing a TI SCI instance
92  * @dev:	Device pointer
93  * @desc:	SoC description for this instance
94  * @d:		Debugfs file entry
95  * @debug_region: Memory region where the debug message are available
96  * @debug_region_size: Debug region size
97  * @debug_buffer: Buffer allocated to copy debug messages.
98  * @handle:	Instance of TI SCI handle to send to clients.
99  * @cl:		Mailbox Client
100  * @chan_tx:	Transmit mailbox channel
101  * @chan_rx:	Receive mailbox channel
102  * @minfo:	Message info
103  * @node:	list head
104  * @host_id:	Host ID
105  * @fw_caps:	FW/SoC low power capabilities
106  * @users:	Number of users of this instance
107  */
108 struct ti_sci_info {
109 	struct device *dev;
110 	const struct ti_sci_desc *desc;
111 	struct dentry *d;
112 	void __iomem *debug_region;
113 	char *debug_buffer;
114 	size_t debug_region_size;
115 	struct ti_sci_handle handle;
116 	struct mbox_client cl;
117 	struct mbox_chan *chan_tx;
118 	struct mbox_chan *chan_rx;
119 	struct ti_sci_xfers_info minfo;
120 	struct list_head node;
121 	u8 host_id;
122 	u64 fw_caps;
123 	/* protected by ti_sci_list_mutex */
124 	int users;
125 };
126 
127 #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
128 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
129 
130 #ifdef CONFIG_DEBUG_FS
131 
132 /**
133  * ti_sci_debug_show() - Helper to dump the debug log
134  * @s:	sequence file pointer
135  * @unused:	unused.
136  *
137  * Return: 0
138  */
139 static int ti_sci_debug_show(struct seq_file *s, void *unused)
140 {
141 	struct ti_sci_info *info = s->private;
142 
143 	memcpy_fromio(info->debug_buffer, info->debug_region,
144 		      info->debug_region_size);
145 	/*
146 	 * We don't trust firmware to leave NULL terminated last byte (hence
147 	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
148 	 * specific data format for debug messages, We just present the data
149 	 * in the buffer as is - we expect the messages to be self explanatory.
150 	 */
151 	seq_puts(s, info->debug_buffer);
152 	return 0;
153 }
154 
155 /* Provide the log file operations interface*/
156 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
157 
158 /**
159  * ti_sci_debugfs_create() - Create log debug file
160  * @pdev:	platform device pointer
161  * @info:	Pointer to SCI entity information
162  *
163  * Return: 0 if all went fine, else corresponding error.
164  */
165 static int ti_sci_debugfs_create(struct platform_device *pdev,
166 				 struct ti_sci_info *info)
167 {
168 	struct device *dev = &pdev->dev;
169 	struct resource *res;
170 	char debug_name[50];
171 
172 	/* Debug region is optional */
173 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
174 					   "debug_messages");
175 	info->debug_region = devm_ioremap_resource(dev, res);
176 	if (IS_ERR(info->debug_region))
177 		return 0;
178 	info->debug_region_size = resource_size(res);
179 
180 	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
181 					  sizeof(char), GFP_KERNEL);
182 	if (!info->debug_buffer)
183 		return -ENOMEM;
184 	/* Setup NULL termination */
185 	info->debug_buffer[info->debug_region_size] = 0;
186 
187 	snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
188 		 dev_name(dev));
189 	info->d = debugfs_create_file(debug_name, 0444, NULL, info,
190 				      &ti_sci_debug_fops);
191 	if (IS_ERR(info->d))
192 		return PTR_ERR(info->d);
193 
194 	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
195 		info->debug_region, info->debug_region_size, res);
196 	return 0;
197 }
198 
199 #else /* CONFIG_DEBUG_FS */
200 static inline int ti_sci_debugfs_create(struct platform_device *dev,
201 					struct ti_sci_info *info)
202 {
203 	return 0;
204 }
205 
206 static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
207 					  struct ti_sci_info *info)
208 {
209 }
210 #endif /* CONFIG_DEBUG_FS */
211 
212 /**
213  * ti_sci_dump_header_dbg() - Helper to dump a message header.
214  * @dev:	Device pointer corresponding to the SCI entity
215  * @hdr:	pointer to header.
216  */
217 static inline void ti_sci_dump_header_dbg(struct device *dev,
218 					  struct ti_sci_msg_hdr *hdr)
219 {
220 	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
221 		hdr->type, hdr->host, hdr->seq, hdr->flags);
222 }
223 
224 /**
225  * ti_sci_rx_callback() - mailbox client callback for receive messages
226  * @cl:	client pointer
227  * @m:	mailbox message
228  *
229  * Processes one received message to appropriate transfer information and
230  * signals completion of the transfer.
231  *
232  * NOTE: This function will be invoked in IRQ context, hence should be
233  * as optimal as possible.
234  */
235 static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
236 {
237 	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
238 	struct device *dev = info->dev;
239 	struct ti_sci_xfers_info *minfo = &info->minfo;
240 	struct ti_msgmgr_message *mbox_msg = m;
241 	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
242 	struct ti_sci_xfer *xfer;
243 	u8 xfer_id;
244 
245 	xfer_id = hdr->seq;
246 
247 	/*
248 	 * Are we even expecting this?
249 	 * NOTE: barriers were implicit in locks used for modifying the bitmap
250 	 */
251 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
252 		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
253 		return;
254 	}
255 
256 	xfer = &minfo->xfer_block[xfer_id];
257 
258 	/* Is the message of valid length? */
259 	if (mbox_msg->len > info->desc->max_msg_size) {
260 		dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
261 			mbox_msg->len, info->desc->max_msg_size);
262 		ti_sci_dump_header_dbg(dev, hdr);
263 		return;
264 	}
265 	if (mbox_msg->len < xfer->rx_len) {
266 		dev_err(dev, "Recv xfer %zu < expected %d length\n",
267 			mbox_msg->len, xfer->rx_len);
268 		ti_sci_dump_header_dbg(dev, hdr);
269 		return;
270 	}
271 
272 	ti_sci_dump_header_dbg(dev, hdr);
273 	/* Take a copy to the rx buffer.. */
274 	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
275 	complete(&xfer->done);
276 }
277 
278 /**
279  * ti_sci_get_one_xfer() - Allocate one message
280  * @info:	Pointer to SCI entity information
281  * @msg_type:	Message type
282  * @msg_flags:	Flag to set for the message
283  * @tx_message_size: transmit message size
284  * @rx_message_size: receive message size
285  *
286  * Helper function which is used by various command functions that are
287  * exposed to clients of this driver for allocating a message traffic event.
288  *
289  * This function can sleep depending on pending requests already in the system
290  * for the SCI entity. Further, this also holds a spinlock to maintain integrity
291  * of internal data structures.
292  *
293  * Return: 0 if all went fine, else corresponding error.
294  */
295 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
296 					       u16 msg_type, u32 msg_flags,
297 					       size_t tx_message_size,
298 					       size_t rx_message_size)
299 {
300 	struct ti_sci_xfers_info *minfo = &info->minfo;
301 	struct ti_sci_xfer *xfer;
302 	struct ti_sci_msg_hdr *hdr;
303 	unsigned long flags;
304 	unsigned long bit_pos;
305 	u8 xfer_id;
306 	int ret;
307 	int timeout;
308 
309 	/* Ensure we have sane transfer sizes */
310 	if (rx_message_size > info->desc->max_msg_size ||
311 	    tx_message_size > info->desc->max_msg_size ||
312 	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
313 		return ERR_PTR(-ERANGE);
314 
315 	/*
316 	 * Ensure we have only controlled number of pending messages.
317 	 * Ideally, we might just have to wait a single message, be
318 	 * conservative and wait 5 times that..
319 	 */
320 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
321 	ret = down_timeout(&minfo->sem_xfer_count, timeout);
322 	if (ret < 0)
323 		return ERR_PTR(ret);
324 
325 	/* Keep the locked section as small as possible */
326 	spin_lock_irqsave(&minfo->xfer_lock, flags);
327 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
328 				      info->desc->max_msgs);
329 	set_bit(bit_pos, minfo->xfer_alloc_table);
330 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
331 
332 	/*
333 	 * We already ensured in probe that we can have max messages that can
334 	 * fit in  hdr.seq - NOTE: this improves access latencies
335 	 * to predictable O(1) access, BUT, it opens us to risk if
336 	 * remote misbehaves with corrupted message sequence responses.
337 	 * If that happens, we are going to be messed up anyways..
338 	 */
339 	xfer_id = (u8)bit_pos;
340 
341 	xfer = &minfo->xfer_block[xfer_id];
342 
343 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
344 	xfer->tx_message.len = tx_message_size;
345 	xfer->tx_message.chan_rx = info->chan_rx;
346 	xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
347 	xfer->rx_len = (u8)rx_message_size;
348 
349 	reinit_completion(&xfer->done);
350 
351 	hdr->seq = xfer_id;
352 	hdr->type = msg_type;
353 	hdr->host = info->host_id;
354 	hdr->flags = msg_flags;
355 
356 	return xfer;
357 }
358 
359 /**
360  * ti_sci_put_one_xfer() - Release a message
361  * @minfo:	transfer info pointer
362  * @xfer:	message that was reserved by ti_sci_get_one_xfer
363  *
364  * This holds a spinlock to maintain integrity of internal data structures.
365  */
366 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
367 				struct ti_sci_xfer *xfer)
368 {
369 	unsigned long flags;
370 	struct ti_sci_msg_hdr *hdr;
371 	u8 xfer_id;
372 
373 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
374 	xfer_id = hdr->seq;
375 
376 	/*
377 	 * Keep the locked section as small as possible
378 	 * NOTE: we might escape with smp_mb and no lock here..
379 	 * but just be conservative and symmetric.
380 	 */
381 	spin_lock_irqsave(&minfo->xfer_lock, flags);
382 	clear_bit(xfer_id, minfo->xfer_alloc_table);
383 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
384 
385 	/* Increment the count for the next user to get through */
386 	up(&minfo->sem_xfer_count);
387 }
388 
389 /**
390  * ti_sci_do_xfer() - Do one transfer
391  * @info:	Pointer to SCI entity information
392  * @xfer:	Transfer to initiate and wait for response
393  *
394  * Return: -ETIMEDOUT in case of no response, if transmit error,
395  *	   return corresponding error, else if all goes well,
396  *	   return 0.
397  */
398 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
399 				 struct ti_sci_xfer *xfer)
400 {
401 	int ret;
402 	int timeout;
403 	struct device *dev = info->dev;
404 	bool done_state = true;
405 
406 	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
407 	if (ret < 0)
408 		return ret;
409 
410 	ret = 0;
411 
412 	if (system_state <= SYSTEM_RUNNING) {
413 		/* And we wait for the response. */
414 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
415 		if (!wait_for_completion_timeout(&xfer->done, timeout))
416 			ret = -ETIMEDOUT;
417 	} else {
418 		/*
419 		 * If we are !running, we cannot use wait_for_completion_timeout
420 		 * during noirq phase, so we must manually poll the completion.
421 		 */
422 		ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
423 					       done_state, 1,
424 					       info->desc->max_rx_timeout_ms * 1000,
425 					       false, &xfer->done);
426 	}
427 
428 	if (ret == -ETIMEDOUT)
429 		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
430 			(void *)_RET_IP_);
431 
432 	/*
433 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
434 	 * transfer queueing since the protocol layer queues things by itself.
435 	 * Unfortunately, we have to kick the mailbox framework after we have
436 	 * received our message.
437 	 */
438 	mbox_client_txdone(info->chan_tx, ret);
439 
440 	return ret;
441 }
442 
443 /**
444  * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
445  * @info:	Pointer to SCI entity information
446  *
447  * Updates the SCI information in the internal data structure.
448  *
449  * Return: 0 if all went fine, else return appropriate error.
450  */
451 static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
452 {
453 	struct device *dev = info->dev;
454 	struct ti_sci_handle *handle = &info->handle;
455 	struct ti_sci_version_info *ver = &handle->version;
456 	struct ti_sci_msg_resp_version *rev_info;
457 	struct ti_sci_xfer *xfer;
458 	int ret;
459 
460 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
461 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
462 				   sizeof(struct ti_sci_msg_hdr),
463 				   sizeof(*rev_info));
464 	if (IS_ERR(xfer)) {
465 		ret = PTR_ERR(xfer);
466 		dev_err(dev, "Message alloc failed(%d)\n", ret);
467 		return ret;
468 	}
469 
470 	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
471 
472 	ret = ti_sci_do_xfer(info, xfer);
473 	if (ret) {
474 		dev_err(dev, "Mbox send fail %d\n", ret);
475 		goto fail;
476 	}
477 
478 	ver->abi_major = rev_info->abi_major;
479 	ver->abi_minor = rev_info->abi_minor;
480 	ver->firmware_revision = rev_info->firmware_revision;
481 	strscpy(ver->firmware_description, rev_info->firmware_description,
482 		sizeof(ver->firmware_description));
483 
484 fail:
485 	ti_sci_put_one_xfer(&info->minfo, xfer);
486 	return ret;
487 }
488 
489 /**
490  * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
491  * @r:	pointer to response buffer
492  *
493  * Return: true if the response was an ACK, else returns false.
494  */
495 static inline bool ti_sci_is_response_ack(void *r)
496 {
497 	struct ti_sci_msg_hdr *hdr = r;
498 
499 	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
500 }
501 
502 /**
503  * ti_sci_set_device_state() - Set device state helper
504  * @handle:	pointer to TI SCI handle
505  * @id:		Device identifier
506  * @flags:	flags to setup for the device
507  * @state:	State to move the device to
508  *
509  * Return: 0 if all went well, else returns appropriate error value.
510  */
511 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
512 				   u32 id, u32 flags, u8 state)
513 {
514 	struct ti_sci_info *info;
515 	struct ti_sci_msg_req_set_device_state *req;
516 	struct ti_sci_msg_hdr *resp;
517 	struct ti_sci_xfer *xfer;
518 	struct device *dev;
519 	int ret = 0;
520 
521 	if (IS_ERR(handle))
522 		return PTR_ERR(handle);
523 	if (!handle)
524 		return -EINVAL;
525 
526 	info = handle_to_ti_sci_info(handle);
527 	dev = info->dev;
528 
529 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
530 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
531 				   sizeof(*req), sizeof(*resp));
532 	if (IS_ERR(xfer)) {
533 		ret = PTR_ERR(xfer);
534 		dev_err(dev, "Message alloc failed(%d)\n", ret);
535 		return ret;
536 	}
537 	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
538 	req->id = id;
539 	req->state = state;
540 
541 	ret = ti_sci_do_xfer(info, xfer);
542 	if (ret) {
543 		dev_err(dev, "Mbox send fail %d\n", ret);
544 		goto fail;
545 	}
546 
547 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
548 
549 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
550 
551 fail:
552 	ti_sci_put_one_xfer(&info->minfo, xfer);
553 
554 	return ret;
555 }
556 
557 /**
558  * ti_sci_get_device_state() - Get device state helper
559  * @handle:	Handle to the device
560  * @id:		Device Identifier
561  * @clcnt:	Pointer to Context Loss Count
562  * @resets:	pointer to resets
563  * @p_state:	pointer to p_state
564  * @c_state:	pointer to c_state
565  *
566  * Return: 0 if all went fine, else return appropriate error.
567  */
568 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
569 				   u32 id,  u32 *clcnt,  u32 *resets,
570 				    u8 *p_state,  u8 *c_state)
571 {
572 	struct ti_sci_info *info;
573 	struct ti_sci_msg_req_get_device_state *req;
574 	struct ti_sci_msg_resp_get_device_state *resp;
575 	struct ti_sci_xfer *xfer;
576 	struct device *dev;
577 	int ret = 0;
578 
579 	if (IS_ERR(handle))
580 		return PTR_ERR(handle);
581 	if (!handle)
582 		return -EINVAL;
583 
584 	if (!clcnt && !resets && !p_state && !c_state)
585 		return -EINVAL;
586 
587 	info = handle_to_ti_sci_info(handle);
588 	dev = info->dev;
589 
590 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
591 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
592 				   sizeof(*req), sizeof(*resp));
593 	if (IS_ERR(xfer)) {
594 		ret = PTR_ERR(xfer);
595 		dev_err(dev, "Message alloc failed(%d)\n", ret);
596 		return ret;
597 	}
598 	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
599 	req->id = id;
600 
601 	ret = ti_sci_do_xfer(info, xfer);
602 	if (ret) {
603 		dev_err(dev, "Mbox send fail %d\n", ret);
604 		goto fail;
605 	}
606 
607 	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
608 	if (!ti_sci_is_response_ack(resp)) {
609 		ret = -ENODEV;
610 		goto fail;
611 	}
612 
613 	if (clcnt)
614 		*clcnt = resp->context_loss_count;
615 	if (resets)
616 		*resets = resp->resets;
617 	if (p_state)
618 		*p_state = resp->programmed_state;
619 	if (c_state)
620 		*c_state = resp->current_state;
621 fail:
622 	ti_sci_put_one_xfer(&info->minfo, xfer);
623 
624 	return ret;
625 }
626 
627 /**
628  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
629  *			     that can be shared with other hosts.
630  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
631  * @id:		Device Identifier
632  *
633  * Request for the device - NOTE: the client MUST maintain integrity of
634  * usage count by balancing get_device with put_device. No refcounting is
635  * managed by driver for that purpose.
636  *
637  * Return: 0 if all went fine, else return appropriate error.
638  */
639 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
640 {
641 	return ti_sci_set_device_state(handle, id, 0,
642 				       MSG_DEVICE_SW_STATE_ON);
643 }
644 
645 /**
646  * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
647  *				       TISCI that is exclusively owned by the
648  *				       requesting host.
649  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
650  * @id:		Device Identifier
651  *
652  * Request for the device - NOTE: the client MUST maintain integrity of
653  * usage count by balancing get_device with put_device. No refcounting is
654  * managed by driver for that purpose.
655  *
656  * Return: 0 if all went fine, else return appropriate error.
657  */
658 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
659 					   u32 id)
660 {
661 	return ti_sci_set_device_state(handle, id,
662 				       MSG_FLAG_DEVICE_EXCLUSIVE,
663 				       MSG_DEVICE_SW_STATE_ON);
664 }
665 
666 /**
667  * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
668  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
669  * @id:		Device Identifier
670  *
671  * Request for the device - NOTE: the client MUST maintain integrity of
672  * usage count by balancing get_device with put_device. No refcounting is
673  * managed by driver for that purpose.
674  *
675  * Return: 0 if all went fine, else return appropriate error.
676  */
677 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
678 {
679 	return ti_sci_set_device_state(handle, id, 0,
680 				       MSG_DEVICE_SW_STATE_RETENTION);
681 }
682 
683 /**
684  * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
685  *					TISCI that is exclusively owned by
686  *					requesting host.
687  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
688  * @id:		Device Identifier
689  *
690  * Request for the device - NOTE: the client MUST maintain integrity of
691  * usage count by balancing get_device with put_device. No refcounting is
692  * managed by driver for that purpose.
693  *
694  * Return: 0 if all went fine, else return appropriate error.
695  */
696 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
697 					    u32 id)
698 {
699 	return ti_sci_set_device_state(handle, id,
700 				       MSG_FLAG_DEVICE_EXCLUSIVE,
701 				       MSG_DEVICE_SW_STATE_RETENTION);
702 }
703 
704 /**
705  * ti_sci_cmd_put_device() - command to release a device managed by TISCI
706  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
707  * @id:		Device Identifier
708  *
709  * Request for the device - NOTE: the client MUST maintain integrity of
710  * usage count by balancing get_device with put_device. No refcounting is
711  * managed by driver for that purpose.
712  *
713  * Return: 0 if all went fine, else return appropriate error.
714  */
715 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
716 {
717 	return ti_sci_set_device_state(handle, id,
718 				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
719 }
720 
721 /**
722  * ti_sci_cmd_dev_is_valid() - Is the device valid
723  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
724  * @id:		Device Identifier
725  *
726  * Return: 0 if all went fine and the device ID is valid, else return
727  * appropriate error.
728  */
729 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
730 {
731 	u8 unused;
732 
733 	/* check the device state which will also tell us if the ID is valid */
734 	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
735 }
736 
737 /**
738  * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
739  * @handle:	Pointer to TISCI handle
740  * @id:		Device Identifier
741  * @count:	Pointer to Context Loss counter to populate
742  *
743  * Return: 0 if all went fine, else return appropriate error.
744  */
745 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
746 				    u32 *count)
747 {
748 	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
749 }
750 
751 /**
752  * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
753  * @handle:	Pointer to TISCI handle
754  * @id:		Device Identifier
755  * @r_state:	true if requested to be idle
756  *
757  * Return: 0 if all went fine, else return appropriate error.
758  */
759 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
760 				  bool *r_state)
761 {
762 	int ret;
763 	u8 state;
764 
765 	if (!r_state)
766 		return -EINVAL;
767 
768 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
769 	if (ret)
770 		return ret;
771 
772 	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
773 
774 	return 0;
775 }
776 
777 /**
778  * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
779  * @handle:	Pointer to TISCI handle
780  * @id:		Device Identifier
781  * @r_state:	true if requested to be stopped
782  * @curr_state:	true if currently stopped.
783  *
784  * Return: 0 if all went fine, else return appropriate error.
785  */
786 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
787 				  bool *r_state,  bool *curr_state)
788 {
789 	int ret;
790 	u8 p_state, c_state;
791 
792 	if (!r_state && !curr_state)
793 		return -EINVAL;
794 
795 	ret =
796 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
797 	if (ret)
798 		return ret;
799 
800 	if (r_state)
801 		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
802 	if (curr_state)
803 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
804 
805 	return 0;
806 }
807 
808 /**
809  * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
810  * @handle:	Pointer to TISCI handle
811  * @id:		Device Identifier
812  * @r_state:	true if requested to be ON
813  * @curr_state:	true if currently ON and active
814  *
815  * Return: 0 if all went fine, else return appropriate error.
816  */
817 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
818 				bool *r_state,  bool *curr_state)
819 {
820 	int ret;
821 	u8 p_state, c_state;
822 
823 	if (!r_state && !curr_state)
824 		return -EINVAL;
825 
826 	ret =
827 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
828 	if (ret)
829 		return ret;
830 
831 	if (r_state)
832 		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
833 	if (curr_state)
834 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
835 
836 	return 0;
837 }
838 
839 /**
840  * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
841  * @handle:	Pointer to TISCI handle
842  * @id:		Device Identifier
843  * @curr_state:	true if currently transitioning.
844  *
845  * Return: 0 if all went fine, else return appropriate error.
846  */
847 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
848 				   bool *curr_state)
849 {
850 	int ret;
851 	u8 state;
852 
853 	if (!curr_state)
854 		return -EINVAL;
855 
856 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
857 	if (ret)
858 		return ret;
859 
860 	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
861 
862 	return 0;
863 }
864 
865 /**
866  * ti_sci_cmd_set_device_resets() - command to set resets for device managed
867  *				    by TISCI
868  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
869  * @id:		Device Identifier
870  * @reset_state: Device specific reset bit field
871  *
872  * Return: 0 if all went fine, else return appropriate error.
873  */
874 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
875 					u32 id, u32 reset_state)
876 {
877 	struct ti_sci_info *info;
878 	struct ti_sci_msg_req_set_device_resets *req;
879 	struct ti_sci_msg_hdr *resp;
880 	struct ti_sci_xfer *xfer;
881 	struct device *dev;
882 	int ret = 0;
883 
884 	if (IS_ERR(handle))
885 		return PTR_ERR(handle);
886 	if (!handle)
887 		return -EINVAL;
888 
889 	info = handle_to_ti_sci_info(handle);
890 	dev = info->dev;
891 
892 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
893 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
894 				   sizeof(*req), sizeof(*resp));
895 	if (IS_ERR(xfer)) {
896 		ret = PTR_ERR(xfer);
897 		dev_err(dev, "Message alloc failed(%d)\n", ret);
898 		return ret;
899 	}
900 	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
901 	req->id = id;
902 	req->resets = reset_state;
903 
904 	ret = ti_sci_do_xfer(info, xfer);
905 	if (ret) {
906 		dev_err(dev, "Mbox send fail %d\n", ret);
907 		goto fail;
908 	}
909 
910 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
911 
912 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
913 
914 fail:
915 	ti_sci_put_one_xfer(&info->minfo, xfer);
916 
917 	return ret;
918 }
919 
920 /**
921  * ti_sci_cmd_get_device_resets() - Get reset state for device managed
922  *				    by TISCI
923  * @handle:		Pointer to TISCI handle
924  * @id:			Device Identifier
925  * @reset_state:	Pointer to reset state to populate
926  *
927  * Return: 0 if all went fine, else return appropriate error.
928  */
929 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
930 					u32 id, u32 *reset_state)
931 {
932 	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
933 				       NULL);
934 }
935 
936 /**
937  * ti_sci_set_clock_state() - Set clock state helper
938  * @handle:	pointer to TI SCI handle
939  * @dev_id:	Device identifier this request is for
940  * @clk_id:	Clock identifier for the device for this request.
941  *		Each device has it's own set of clock inputs. This indexes
942  *		which clock input to modify.
943  * @flags:	Header flags as needed
944  * @state:	State to request for the clock.
945  *
946  * Return: 0 if all went well, else returns appropriate error value.
947  */
948 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
949 				  u32 dev_id, u32 clk_id,
950 				  u32 flags, u8 state)
951 {
952 	struct ti_sci_info *info;
953 	struct ti_sci_msg_req_set_clock_state *req;
954 	struct ti_sci_msg_hdr *resp;
955 	struct ti_sci_xfer *xfer;
956 	struct device *dev;
957 	int ret = 0;
958 
959 	if (IS_ERR(handle))
960 		return PTR_ERR(handle);
961 	if (!handle)
962 		return -EINVAL;
963 
964 	info = handle_to_ti_sci_info(handle);
965 	dev = info->dev;
966 
967 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
968 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
969 				   sizeof(*req), sizeof(*resp));
970 	if (IS_ERR(xfer)) {
971 		ret = PTR_ERR(xfer);
972 		dev_err(dev, "Message alloc failed(%d)\n", ret);
973 		return ret;
974 	}
975 	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
976 	req->dev_id = dev_id;
977 	if (clk_id < 255) {
978 		req->clk_id = clk_id;
979 	} else {
980 		req->clk_id = 255;
981 		req->clk_id_32 = clk_id;
982 	}
983 	req->request_state = state;
984 
985 	ret = ti_sci_do_xfer(info, xfer);
986 	if (ret) {
987 		dev_err(dev, "Mbox send fail %d\n", ret);
988 		goto fail;
989 	}
990 
991 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
992 
993 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
994 
995 fail:
996 	ti_sci_put_one_xfer(&info->minfo, xfer);
997 
998 	return ret;
999 }
1000 
1001 /**
1002  * ti_sci_cmd_get_clock_state() - Get clock state helper
1003  * @handle:	pointer to TI SCI handle
1004  * @dev_id:	Device identifier this request is for
1005  * @clk_id:	Clock identifier for the device for this request.
1006  *		Each device has it's own set of clock inputs. This indexes
1007  *		which clock input to modify.
1008  * @programmed_state:	State requested for clock to move to
1009  * @current_state:	State that the clock is currently in
1010  *
1011  * Return: 0 if all went well, else returns appropriate error value.
1012  */
1013 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1014 				      u32 dev_id, u32 clk_id,
1015 				      u8 *programmed_state, u8 *current_state)
1016 {
1017 	struct ti_sci_info *info;
1018 	struct ti_sci_msg_req_get_clock_state *req;
1019 	struct ti_sci_msg_resp_get_clock_state *resp;
1020 	struct ti_sci_xfer *xfer;
1021 	struct device *dev;
1022 	int ret = 0;
1023 
1024 	if (IS_ERR(handle))
1025 		return PTR_ERR(handle);
1026 	if (!handle)
1027 		return -EINVAL;
1028 
1029 	if (!programmed_state && !current_state)
1030 		return -EINVAL;
1031 
1032 	info = handle_to_ti_sci_info(handle);
1033 	dev = info->dev;
1034 
1035 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1036 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1037 				   sizeof(*req), sizeof(*resp));
1038 	if (IS_ERR(xfer)) {
1039 		ret = PTR_ERR(xfer);
1040 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1041 		return ret;
1042 	}
1043 	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1044 	req->dev_id = dev_id;
1045 	if (clk_id < 255) {
1046 		req->clk_id = clk_id;
1047 	} else {
1048 		req->clk_id = 255;
1049 		req->clk_id_32 = clk_id;
1050 	}
1051 
1052 	ret = ti_sci_do_xfer(info, xfer);
1053 	if (ret) {
1054 		dev_err(dev, "Mbox send fail %d\n", ret);
1055 		goto fail;
1056 	}
1057 
1058 	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1059 
1060 	if (!ti_sci_is_response_ack(resp)) {
1061 		ret = -ENODEV;
1062 		goto fail;
1063 	}
1064 
1065 	if (programmed_state)
1066 		*programmed_state = resp->programmed_state;
1067 	if (current_state)
1068 		*current_state = resp->current_state;
1069 
1070 fail:
1071 	ti_sci_put_one_xfer(&info->minfo, xfer);
1072 
1073 	return ret;
1074 }
1075 
1076 /**
1077  * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1078  * @handle:	pointer to TI SCI handle
1079  * @dev_id:	Device identifier this request is for
1080  * @clk_id:	Clock identifier for the device for this request.
1081  *		Each device has it's own set of clock inputs. This indexes
1082  *		which clock input to modify.
1083  * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1084  * @can_change_freq: 'true' if frequency change is desired, else 'false'
1085  * @enable_input_term: 'true' if input termination is desired, else 'false'
1086  *
1087  * Return: 0 if all went well, else returns appropriate error value.
1088  */
1089 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1090 				u32 clk_id, bool needs_ssc,
1091 				bool can_change_freq, bool enable_input_term)
1092 {
1093 	u32 flags = 0;
1094 
1095 	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1096 	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1097 	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1098 
1099 	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1100 				      MSG_CLOCK_SW_STATE_REQ);
1101 }
1102 
1103 /**
1104  * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1105  * @handle:	pointer to TI SCI handle
1106  * @dev_id:	Device identifier this request is for
1107  * @clk_id:	Clock identifier for the device for this request.
1108  *		Each device has it's own set of clock inputs. This indexes
1109  *		which clock input to modify.
1110  *
1111  * NOTE: This clock must have been requested by get_clock previously.
1112  *
1113  * Return: 0 if all went well, else returns appropriate error value.
1114  */
1115 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1116 				 u32 dev_id, u32 clk_id)
1117 {
1118 	return ti_sci_set_clock_state(handle, dev_id, clk_id,
1119 				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1120 				      MSG_CLOCK_SW_STATE_UNREQ);
1121 }
1122 
1123 /**
1124  * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1125  * @handle:	pointer to TI SCI handle
1126  * @dev_id:	Device identifier this request is for
1127  * @clk_id:	Clock identifier for the device for this request.
1128  *		Each device has it's own set of clock inputs. This indexes
1129  *		which clock input to modify.
1130  *
1131  * NOTE: This clock must have been requested by get_clock previously.
1132  *
1133  * Return: 0 if all went well, else returns appropriate error value.
1134  */
1135 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1136 				u32 dev_id, u32 clk_id)
1137 {
1138 	return ti_sci_set_clock_state(handle, dev_id, clk_id,
1139 				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1140 				      MSG_CLOCK_SW_STATE_AUTO);
1141 }
1142 
1143 /**
1144  * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1145  * @handle:	pointer to TI SCI handle
1146  * @dev_id:	Device identifier this request is for
1147  * @clk_id:	Clock identifier for the device for this request.
1148  *		Each device has it's own set of clock inputs. This indexes
1149  *		which clock input to modify.
1150  * @req_state: state indicating if the clock is auto managed
1151  *
1152  * Return: 0 if all went well, else returns appropriate error value.
1153  */
1154 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1155 				  u32 dev_id, u32 clk_id, bool *req_state)
1156 {
1157 	u8 state = 0;
1158 	int ret;
1159 
1160 	if (!req_state)
1161 		return -EINVAL;
1162 
1163 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1164 	if (ret)
1165 		return ret;
1166 
1167 	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1168 	return 0;
1169 }
1170 
1171 /**
1172  * ti_sci_cmd_clk_is_on() - Is the clock ON
1173  * @handle:	pointer to TI SCI handle
1174  * @dev_id:	Device identifier this request is for
1175  * @clk_id:	Clock identifier for the device for this request.
1176  *		Each device has it's own set of clock inputs. This indexes
1177  *		which clock input to modify.
1178  * @req_state: state indicating if the clock is managed by us and enabled
1179  * @curr_state: state indicating if the clock is ready for operation
1180  *
1181  * Return: 0 if all went well, else returns appropriate error value.
1182  */
1183 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1184 				u32 clk_id, bool *req_state, bool *curr_state)
1185 {
1186 	u8 c_state = 0, r_state = 0;
1187 	int ret;
1188 
1189 	if (!req_state && !curr_state)
1190 		return -EINVAL;
1191 
1192 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1193 					 &r_state, &c_state);
1194 	if (ret)
1195 		return ret;
1196 
1197 	if (req_state)
1198 		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1199 	if (curr_state)
1200 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1201 	return 0;
1202 }
1203 
1204 /**
1205  * ti_sci_cmd_clk_is_off() - Is the clock OFF
1206  * @handle:	pointer to TI SCI handle
1207  * @dev_id:	Device identifier this request is for
1208  * @clk_id:	Clock identifier for the device for this request.
1209  *		Each device has it's own set of clock inputs. This indexes
1210  *		which clock input to modify.
1211  * @req_state: state indicating if the clock is managed by us and disabled
1212  * @curr_state: state indicating if the clock is NOT ready for operation
1213  *
1214  * Return: 0 if all went well, else returns appropriate error value.
1215  */
1216 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1217 				 u32 clk_id, bool *req_state, bool *curr_state)
1218 {
1219 	u8 c_state = 0, r_state = 0;
1220 	int ret;
1221 
1222 	if (!req_state && !curr_state)
1223 		return -EINVAL;
1224 
1225 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1226 					 &r_state, &c_state);
1227 	if (ret)
1228 		return ret;
1229 
1230 	if (req_state)
1231 		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1232 	if (curr_state)
1233 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1234 	return 0;
1235 }
1236 
1237 /**
1238  * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1239  * @handle:	pointer to TI SCI handle
1240  * @dev_id:	Device identifier this request is for
1241  * @clk_id:	Clock identifier for the device for this request.
1242  *		Each device has it's own set of clock inputs. This indexes
1243  *		which clock input to modify.
1244  * @parent_id:	Parent clock identifier to set
1245  *
1246  * Return: 0 if all went well, else returns appropriate error value.
1247  */
1248 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1249 				     u32 dev_id, u32 clk_id, u32 parent_id)
1250 {
1251 	struct ti_sci_info *info;
1252 	struct ti_sci_msg_req_set_clock_parent *req;
1253 	struct ti_sci_msg_hdr *resp;
1254 	struct ti_sci_xfer *xfer;
1255 	struct device *dev;
1256 	int ret = 0;
1257 
1258 	if (IS_ERR(handle))
1259 		return PTR_ERR(handle);
1260 	if (!handle)
1261 		return -EINVAL;
1262 
1263 	info = handle_to_ti_sci_info(handle);
1264 	dev = info->dev;
1265 
1266 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1267 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1268 				   sizeof(*req), sizeof(*resp));
1269 	if (IS_ERR(xfer)) {
1270 		ret = PTR_ERR(xfer);
1271 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1272 		return ret;
1273 	}
1274 	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1275 	req->dev_id = dev_id;
1276 	if (clk_id < 255) {
1277 		req->clk_id = clk_id;
1278 	} else {
1279 		req->clk_id = 255;
1280 		req->clk_id_32 = clk_id;
1281 	}
1282 	if (parent_id < 255) {
1283 		req->parent_id = parent_id;
1284 	} else {
1285 		req->parent_id = 255;
1286 		req->parent_id_32 = parent_id;
1287 	}
1288 
1289 	ret = ti_sci_do_xfer(info, xfer);
1290 	if (ret) {
1291 		dev_err(dev, "Mbox send fail %d\n", ret);
1292 		goto fail;
1293 	}
1294 
1295 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1296 
1297 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1298 
1299 fail:
1300 	ti_sci_put_one_xfer(&info->minfo, xfer);
1301 
1302 	return ret;
1303 }
1304 
1305 /**
1306  * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1307  * @handle:	pointer to TI SCI handle
1308  * @dev_id:	Device identifier this request is for
1309  * @clk_id:	Clock identifier for the device for this request.
1310  *		Each device has it's own set of clock inputs. This indexes
1311  *		which clock input to modify.
1312  * @parent_id:	Current clock parent
1313  *
1314  * Return: 0 if all went well, else returns appropriate error value.
1315  */
1316 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1317 				     u32 dev_id, u32 clk_id, u32 *parent_id)
1318 {
1319 	struct ti_sci_info *info;
1320 	struct ti_sci_msg_req_get_clock_parent *req;
1321 	struct ti_sci_msg_resp_get_clock_parent *resp;
1322 	struct ti_sci_xfer *xfer;
1323 	struct device *dev;
1324 	int ret = 0;
1325 
1326 	if (IS_ERR(handle))
1327 		return PTR_ERR(handle);
1328 	if (!handle || !parent_id)
1329 		return -EINVAL;
1330 
1331 	info = handle_to_ti_sci_info(handle);
1332 	dev = info->dev;
1333 
1334 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1335 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1336 				   sizeof(*req), sizeof(*resp));
1337 	if (IS_ERR(xfer)) {
1338 		ret = PTR_ERR(xfer);
1339 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1340 		return ret;
1341 	}
1342 	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1343 	req->dev_id = dev_id;
1344 	if (clk_id < 255) {
1345 		req->clk_id = clk_id;
1346 	} else {
1347 		req->clk_id = 255;
1348 		req->clk_id_32 = clk_id;
1349 	}
1350 
1351 	ret = ti_sci_do_xfer(info, xfer);
1352 	if (ret) {
1353 		dev_err(dev, "Mbox send fail %d\n", ret);
1354 		goto fail;
1355 	}
1356 
1357 	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1358 
1359 	if (!ti_sci_is_response_ack(resp)) {
1360 		ret = -ENODEV;
1361 	} else {
1362 		if (resp->parent_id < 255)
1363 			*parent_id = resp->parent_id;
1364 		else
1365 			*parent_id = resp->parent_id_32;
1366 	}
1367 
1368 fail:
1369 	ti_sci_put_one_xfer(&info->minfo, xfer);
1370 
1371 	return ret;
1372 }
1373 
1374 /**
1375  * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1376  * @handle:	pointer to TI SCI handle
1377  * @dev_id:	Device identifier this request is for
1378  * @clk_id:	Clock identifier for the device for this request.
1379  *		Each device has it's own set of clock inputs. This indexes
1380  *		which clock input to modify.
1381  * @num_parents: Returns he number of parents to the current clock.
1382  *
1383  * Return: 0 if all went well, else returns appropriate error value.
1384  */
1385 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1386 					  u32 dev_id, u32 clk_id,
1387 					  u32 *num_parents)
1388 {
1389 	struct ti_sci_info *info;
1390 	struct ti_sci_msg_req_get_clock_num_parents *req;
1391 	struct ti_sci_msg_resp_get_clock_num_parents *resp;
1392 	struct ti_sci_xfer *xfer;
1393 	struct device *dev;
1394 	int ret = 0;
1395 
1396 	if (IS_ERR(handle))
1397 		return PTR_ERR(handle);
1398 	if (!handle || !num_parents)
1399 		return -EINVAL;
1400 
1401 	info = handle_to_ti_sci_info(handle);
1402 	dev = info->dev;
1403 
1404 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1405 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1406 				   sizeof(*req), sizeof(*resp));
1407 	if (IS_ERR(xfer)) {
1408 		ret = PTR_ERR(xfer);
1409 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1410 		return ret;
1411 	}
1412 	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1413 	req->dev_id = dev_id;
1414 	if (clk_id < 255) {
1415 		req->clk_id = clk_id;
1416 	} else {
1417 		req->clk_id = 255;
1418 		req->clk_id_32 = clk_id;
1419 	}
1420 
1421 	ret = ti_sci_do_xfer(info, xfer);
1422 	if (ret) {
1423 		dev_err(dev, "Mbox send fail %d\n", ret);
1424 		goto fail;
1425 	}
1426 
1427 	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1428 
1429 	if (!ti_sci_is_response_ack(resp)) {
1430 		ret = -ENODEV;
1431 	} else {
1432 		if (resp->num_parents < 255)
1433 			*num_parents = resp->num_parents;
1434 		else
1435 			*num_parents = resp->num_parents_32;
1436 	}
1437 
1438 fail:
1439 	ti_sci_put_one_xfer(&info->minfo, xfer);
1440 
1441 	return ret;
1442 }
1443 
1444 /**
1445  * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1446  * @handle:	pointer to TI SCI handle
1447  * @dev_id:	Device identifier this request is for
1448  * @clk_id:	Clock identifier for the device for this request.
1449  *		Each device has it's own set of clock inputs. This indexes
1450  *		which clock input to modify.
1451  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1452  *		allowable programmed frequency and does not account for clock
1453  *		tolerances and jitter.
1454  * @target_freq: The target clock frequency in Hz. A frequency will be
1455  *		processed as close to this target frequency as possible.
1456  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1457  *		allowable programmed frequency and does not account for clock
1458  *		tolerances and jitter.
1459  * @match_freq:	Frequency match in Hz response.
1460  *
1461  * Return: 0 if all went well, else returns appropriate error value.
1462  */
1463 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1464 					 u32 dev_id, u32 clk_id, u64 min_freq,
1465 					 u64 target_freq, u64 max_freq,
1466 					 u64 *match_freq)
1467 {
1468 	struct ti_sci_info *info;
1469 	struct ti_sci_msg_req_query_clock_freq *req;
1470 	struct ti_sci_msg_resp_query_clock_freq *resp;
1471 	struct ti_sci_xfer *xfer;
1472 	struct device *dev;
1473 	int ret = 0;
1474 
1475 	if (IS_ERR(handle))
1476 		return PTR_ERR(handle);
1477 	if (!handle || !match_freq)
1478 		return -EINVAL;
1479 
1480 	info = handle_to_ti_sci_info(handle);
1481 	dev = info->dev;
1482 
1483 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1484 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1485 				   sizeof(*req), sizeof(*resp));
1486 	if (IS_ERR(xfer)) {
1487 		ret = PTR_ERR(xfer);
1488 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1489 		return ret;
1490 	}
1491 	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1492 	req->dev_id = dev_id;
1493 	if (clk_id < 255) {
1494 		req->clk_id = clk_id;
1495 	} else {
1496 		req->clk_id = 255;
1497 		req->clk_id_32 = clk_id;
1498 	}
1499 	req->min_freq_hz = min_freq;
1500 	req->target_freq_hz = target_freq;
1501 	req->max_freq_hz = max_freq;
1502 
1503 	ret = ti_sci_do_xfer(info, xfer);
1504 	if (ret) {
1505 		dev_err(dev, "Mbox send fail %d\n", ret);
1506 		goto fail;
1507 	}
1508 
1509 	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1510 
1511 	if (!ti_sci_is_response_ack(resp))
1512 		ret = -ENODEV;
1513 	else
1514 		*match_freq = resp->freq_hz;
1515 
1516 fail:
1517 	ti_sci_put_one_xfer(&info->minfo, xfer);
1518 
1519 	return ret;
1520 }
1521 
1522 /**
1523  * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1524  * @handle:	pointer to TI SCI handle
1525  * @dev_id:	Device identifier this request is for
1526  * @clk_id:	Clock identifier for the device for this request.
1527  *		Each device has it's own set of clock inputs. This indexes
1528  *		which clock input to modify.
1529  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1530  *		allowable programmed frequency and does not account for clock
1531  *		tolerances and jitter.
1532  * @target_freq: The target clock frequency in Hz. A frequency will be
1533  *		processed as close to this target frequency as possible.
1534  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1535  *		allowable programmed frequency and does not account for clock
1536  *		tolerances and jitter.
1537  *
1538  * Return: 0 if all went well, else returns appropriate error value.
1539  */
1540 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1541 				   u32 dev_id, u32 clk_id, u64 min_freq,
1542 				   u64 target_freq, u64 max_freq)
1543 {
1544 	struct ti_sci_info *info;
1545 	struct ti_sci_msg_req_set_clock_freq *req;
1546 	struct ti_sci_msg_hdr *resp;
1547 	struct ti_sci_xfer *xfer;
1548 	struct device *dev;
1549 	int ret = 0;
1550 
1551 	if (IS_ERR(handle))
1552 		return PTR_ERR(handle);
1553 	if (!handle)
1554 		return -EINVAL;
1555 
1556 	info = handle_to_ti_sci_info(handle);
1557 	dev = info->dev;
1558 
1559 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1560 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1561 				   sizeof(*req), sizeof(*resp));
1562 	if (IS_ERR(xfer)) {
1563 		ret = PTR_ERR(xfer);
1564 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1565 		return ret;
1566 	}
1567 	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1568 	req->dev_id = dev_id;
1569 	if (clk_id < 255) {
1570 		req->clk_id = clk_id;
1571 	} else {
1572 		req->clk_id = 255;
1573 		req->clk_id_32 = clk_id;
1574 	}
1575 	req->min_freq_hz = min_freq;
1576 	req->target_freq_hz = target_freq;
1577 	req->max_freq_hz = max_freq;
1578 
1579 	ret = ti_sci_do_xfer(info, xfer);
1580 	if (ret) {
1581 		dev_err(dev, "Mbox send fail %d\n", ret);
1582 		goto fail;
1583 	}
1584 
1585 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1586 
1587 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1588 
1589 fail:
1590 	ti_sci_put_one_xfer(&info->minfo, xfer);
1591 
1592 	return ret;
1593 }
1594 
1595 /**
1596  * ti_sci_cmd_clk_get_freq() - Get current frequency
1597  * @handle:	pointer to TI SCI handle
1598  * @dev_id:	Device identifier this request is for
1599  * @clk_id:	Clock identifier for the device for this request.
1600  *		Each device has it's own set of clock inputs. This indexes
1601  *		which clock input to modify.
1602  * @freq:	Currently frequency in Hz
1603  *
1604  * Return: 0 if all went well, else returns appropriate error value.
1605  */
1606 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1607 				   u32 dev_id, u32 clk_id, u64 *freq)
1608 {
1609 	struct ti_sci_info *info;
1610 	struct ti_sci_msg_req_get_clock_freq *req;
1611 	struct ti_sci_msg_resp_get_clock_freq *resp;
1612 	struct ti_sci_xfer *xfer;
1613 	struct device *dev;
1614 	int ret = 0;
1615 
1616 	if (IS_ERR(handle))
1617 		return PTR_ERR(handle);
1618 	if (!handle || !freq)
1619 		return -EINVAL;
1620 
1621 	info = handle_to_ti_sci_info(handle);
1622 	dev = info->dev;
1623 
1624 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1625 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1626 				   sizeof(*req), sizeof(*resp));
1627 	if (IS_ERR(xfer)) {
1628 		ret = PTR_ERR(xfer);
1629 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1630 		return ret;
1631 	}
1632 	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1633 	req->dev_id = dev_id;
1634 	if (clk_id < 255) {
1635 		req->clk_id = clk_id;
1636 	} else {
1637 		req->clk_id = 255;
1638 		req->clk_id_32 = clk_id;
1639 	}
1640 
1641 	ret = ti_sci_do_xfer(info, xfer);
1642 	if (ret) {
1643 		dev_err(dev, "Mbox send fail %d\n", ret);
1644 		goto fail;
1645 	}
1646 
1647 	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1648 
1649 	if (!ti_sci_is_response_ack(resp))
1650 		ret = -ENODEV;
1651 	else
1652 		*freq = resp->freq_hz;
1653 
1654 fail:
1655 	ti_sci_put_one_xfer(&info->minfo, xfer);
1656 
1657 	return ret;
1658 }
1659 
1660 /**
1661  * ti_sci_cmd_prepare_sleep() - Prepare system for system suspend
1662  * @handle:		pointer to TI SCI handle
1663  * @mode:		Device identifier
1664  * @ctx_lo:		Low part of address for context save
1665  * @ctx_hi:		High part of address for context save
1666  * @debug_flags:	Debug flags to pass to firmware
1667  *
1668  * Return: 0 if all went well, else returns appropriate error value.
1669  */
1670 static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
1671 				    u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
1672 {
1673 	struct ti_sci_info *info;
1674 	struct ti_sci_msg_req_prepare_sleep *req;
1675 	struct ti_sci_msg_hdr *resp;
1676 	struct ti_sci_xfer *xfer;
1677 	struct device *dev;
1678 	int ret = 0;
1679 
1680 	if (IS_ERR(handle))
1681 		return PTR_ERR(handle);
1682 	if (!handle)
1683 		return -EINVAL;
1684 
1685 	info = handle_to_ti_sci_info(handle);
1686 	dev = info->dev;
1687 
1688 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
1689 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1690 				   sizeof(*req), sizeof(*resp));
1691 	if (IS_ERR(xfer)) {
1692 		ret = PTR_ERR(xfer);
1693 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1694 		return ret;
1695 	}
1696 
1697 	req = (struct ti_sci_msg_req_prepare_sleep *)xfer->xfer_buf;
1698 	req->mode = mode;
1699 	req->ctx_lo = ctx_lo;
1700 	req->ctx_hi = ctx_hi;
1701 	req->debug_flags = debug_flags;
1702 
1703 	ret = ti_sci_do_xfer(info, xfer);
1704 	if (ret) {
1705 		dev_err(dev, "Mbox send fail %d\n", ret);
1706 		goto fail;
1707 	}
1708 
1709 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1710 
1711 	if (!ti_sci_is_response_ack(resp)) {
1712 		dev_err(dev, "Failed to prepare sleep\n");
1713 		ret = -ENODEV;
1714 	}
1715 
1716 fail:
1717 	ti_sci_put_one_xfer(&info->minfo, xfer);
1718 
1719 	return ret;
1720 }
1721 
1722 /**
1723  * ti_sci_msg_cmd_query_fw_caps() - Get the FW/SoC capabilities
1724  * @handle:		Pointer to TI SCI handle
1725  * @fw_caps:		Each bit in fw_caps indicating one FW/SOC capability
1726  *
1727  * Check if the firmware supports any optional low power modes.
1728  * Old revisions of TIFS (< 08.04) will NACK the request which results in
1729  * -ENODEV being returned.
1730  *
1731  * Return: 0 if all went well, else returns appropriate error value.
1732  */
1733 static int ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle *handle,
1734 					u64 *fw_caps)
1735 {
1736 	struct ti_sci_info *info;
1737 	struct ti_sci_xfer *xfer;
1738 	struct ti_sci_msg_resp_query_fw_caps *resp;
1739 	struct device *dev;
1740 	int ret = 0;
1741 
1742 	if (IS_ERR(handle))
1743 		return PTR_ERR(handle);
1744 	if (!handle)
1745 		return -EINVAL;
1746 
1747 	info = handle_to_ti_sci_info(handle);
1748 	dev = info->dev;
1749 
1750 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS,
1751 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1752 				   sizeof(struct ti_sci_msg_hdr),
1753 				   sizeof(*resp));
1754 	if (IS_ERR(xfer)) {
1755 		ret = PTR_ERR(xfer);
1756 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1757 		return ret;
1758 	}
1759 
1760 	ret = ti_sci_do_xfer(info, xfer);
1761 	if (ret) {
1762 		dev_err(dev, "Mbox send fail %d\n", ret);
1763 		goto fail;
1764 	}
1765 
1766 	resp = (struct ti_sci_msg_resp_query_fw_caps *)xfer->xfer_buf;
1767 
1768 	if (!ti_sci_is_response_ack(resp)) {
1769 		dev_err(dev, "Failed to get capabilities\n");
1770 		ret = -ENODEV;
1771 		goto fail;
1772 	}
1773 
1774 	if (fw_caps)
1775 		*fw_caps = resp->fw_caps;
1776 
1777 fail:
1778 	ti_sci_put_one_xfer(&info->minfo, xfer);
1779 
1780 	return ret;
1781 }
1782 
1783 /**
1784  * ti_sci_cmd_set_io_isolation() - Enable IO isolation in LPM
1785  * @handle:		Pointer to TI SCI handle
1786  * @state:		The desired state of the IO isolation
1787  *
1788  * Return: 0 if all went well, else returns appropriate error value.
1789  */
1790 static int ti_sci_cmd_set_io_isolation(const struct ti_sci_handle *handle,
1791 				       u8 state)
1792 {
1793 	struct ti_sci_info *info;
1794 	struct ti_sci_msg_req_set_io_isolation *req;
1795 	struct ti_sci_msg_hdr *resp;
1796 	struct ti_sci_xfer *xfer;
1797 	struct device *dev;
1798 	int ret = 0;
1799 
1800 	if (IS_ERR(handle))
1801 		return PTR_ERR(handle);
1802 	if (!handle)
1803 		return -EINVAL;
1804 
1805 	info = handle_to_ti_sci_info(handle);
1806 	dev = info->dev;
1807 
1808 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_IO_ISOLATION,
1809 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1810 				   sizeof(*req), sizeof(*resp));
1811 	if (IS_ERR(xfer)) {
1812 		ret = PTR_ERR(xfer);
1813 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1814 		return ret;
1815 	}
1816 	req = (struct ti_sci_msg_req_set_io_isolation *)xfer->xfer_buf;
1817 	req->state = state;
1818 
1819 	ret = ti_sci_do_xfer(info, xfer);
1820 	if (ret) {
1821 		dev_err(dev, "Mbox send fail %d\n", ret);
1822 		goto fail;
1823 	}
1824 
1825 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1826 
1827 	if (!ti_sci_is_response_ack(resp)) {
1828 		dev_err(dev, "Failed to set IO isolation\n");
1829 		ret = -ENODEV;
1830 	}
1831 
1832 fail:
1833 	ti_sci_put_one_xfer(&info->minfo, xfer);
1834 
1835 	return ret;
1836 }
1837 
1838 /**
1839  * ti_sci_msg_cmd_lpm_wake_reason() - Get the wakeup source from LPM
1840  * @handle:		Pointer to TI SCI handle
1841  * @source:		The wakeup source that woke the SoC from LPM
1842  * @timestamp:		Timestamp of the wakeup event
1843  * @pin:		The pin that has triggered wake up
1844  * @mode:		The last entered low power mode
1845  *
1846  * Return: 0 if all went well, else returns appropriate error value.
1847  */
1848 static int ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle *handle,
1849 					  u32 *source, u64 *timestamp, u8 *pin, u8 *mode)
1850 {
1851 	struct ti_sci_info *info;
1852 	struct ti_sci_xfer *xfer;
1853 	struct ti_sci_msg_resp_lpm_wake_reason *resp;
1854 	struct device *dev;
1855 	int ret = 0;
1856 
1857 	if (IS_ERR(handle))
1858 		return PTR_ERR(handle);
1859 	if (!handle)
1860 		return -EINVAL;
1861 
1862 	info = handle_to_ti_sci_info(handle);
1863 	dev = info->dev;
1864 
1865 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_WAKE_REASON,
1866 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1867 				   sizeof(struct ti_sci_msg_hdr),
1868 				   sizeof(*resp));
1869 	if (IS_ERR(xfer)) {
1870 		ret = PTR_ERR(xfer);
1871 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1872 		return ret;
1873 	}
1874 
1875 	ret = ti_sci_do_xfer(info, xfer);
1876 	if (ret) {
1877 		dev_err(dev, "Mbox send fail %d\n", ret);
1878 		goto fail;
1879 	}
1880 
1881 	resp = (struct ti_sci_msg_resp_lpm_wake_reason *)xfer->xfer_buf;
1882 
1883 	if (!ti_sci_is_response_ack(resp)) {
1884 		dev_err(dev, "Failed to get wake reason\n");
1885 		ret = -ENODEV;
1886 		goto fail;
1887 	}
1888 
1889 	if (source)
1890 		*source = resp->wake_source;
1891 	if (timestamp)
1892 		*timestamp = resp->wake_timestamp;
1893 	if (pin)
1894 		*pin = resp->wake_pin;
1895 	if (mode)
1896 		*mode = resp->mode;
1897 
1898 fail:
1899 	ti_sci_put_one_xfer(&info->minfo, xfer);
1900 
1901 	return ret;
1902 }
1903 
1904 /**
1905  * ti_sci_cmd_set_device_constraint() - Set LPM constraint on behalf of a device
1906  * @handle:	pointer to TI SCI handle
1907  * @id:	Device identifier
1908  * @state:	The desired state of device constraint: set or clear
1909  *
1910  * Return: 0 if all went well, else returns appropriate error value.
1911  */
1912 static int ti_sci_cmd_set_device_constraint(const struct ti_sci_handle *handle,
1913 					    u32 id, u8 state)
1914 {
1915 	struct ti_sci_info *info;
1916 	struct ti_sci_msg_req_lpm_set_device_constraint *req;
1917 	struct ti_sci_msg_hdr *resp;
1918 	struct ti_sci_xfer *xfer;
1919 	struct device *dev;
1920 	int ret = 0;
1921 
1922 	if (IS_ERR(handle))
1923 		return PTR_ERR(handle);
1924 	if (!handle)
1925 		return -EINVAL;
1926 
1927 	info = handle_to_ti_sci_info(handle);
1928 	dev = info->dev;
1929 
1930 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT,
1931 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1932 				   sizeof(*req), sizeof(*resp));
1933 	if (IS_ERR(xfer)) {
1934 		ret = PTR_ERR(xfer);
1935 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1936 		return ret;
1937 	}
1938 	req = (struct ti_sci_msg_req_lpm_set_device_constraint *)xfer->xfer_buf;
1939 	req->id = id;
1940 	req->state = state;
1941 
1942 	ret = ti_sci_do_xfer(info, xfer);
1943 	if (ret) {
1944 		dev_err(dev, "Mbox send fail %d\n", ret);
1945 		goto fail;
1946 	}
1947 
1948 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1949 
1950 	if (!ti_sci_is_response_ack(resp)) {
1951 		dev_err(dev, "Failed to set device constraint\n");
1952 		ret = -ENODEV;
1953 	}
1954 
1955 fail:
1956 	ti_sci_put_one_xfer(&info->minfo, xfer);
1957 
1958 	return ret;
1959 }
1960 
1961 /**
1962  * ti_sci_cmd_set_latency_constraint() - Set LPM resume latency constraint
1963  * @handle:	pointer to TI SCI handle
1964  * @latency:	maximum acceptable latency (in ms) to wake up from LPM
1965  * @state:	The desired state of latency constraint: set or clear
1966  *
1967  * Return: 0 if all went well, else returns appropriate error value.
1968  */
1969 static int ti_sci_cmd_set_latency_constraint(const struct ti_sci_handle *handle,
1970 					     u16 latency, u8 state)
1971 {
1972 	struct ti_sci_info *info;
1973 	struct ti_sci_msg_req_lpm_set_latency_constraint *req;
1974 	struct ti_sci_msg_hdr *resp;
1975 	struct ti_sci_xfer *xfer;
1976 	struct device *dev;
1977 	int ret = 0;
1978 
1979 	if (IS_ERR(handle))
1980 		return PTR_ERR(handle);
1981 	if (!handle)
1982 		return -EINVAL;
1983 
1984 	info = handle_to_ti_sci_info(handle);
1985 	dev = info->dev;
1986 
1987 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT,
1988 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1989 				   sizeof(*req), sizeof(*resp));
1990 	if (IS_ERR(xfer)) {
1991 		ret = PTR_ERR(xfer);
1992 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1993 		return ret;
1994 	}
1995 	req = (struct ti_sci_msg_req_lpm_set_latency_constraint *)xfer->xfer_buf;
1996 	req->latency = latency;
1997 	req->state = state;
1998 
1999 	ret = ti_sci_do_xfer(info, xfer);
2000 	if (ret) {
2001 		dev_err(dev, "Mbox send fail %d\n", ret);
2002 		goto fail;
2003 	}
2004 
2005 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2006 
2007 	if (!ti_sci_is_response_ack(resp)) {
2008 		dev_err(dev, "Failed to set device constraint\n");
2009 		ret = -ENODEV;
2010 	}
2011 
2012 fail:
2013 	ti_sci_put_one_xfer(&info->minfo, xfer);
2014 
2015 	return ret;
2016 }
2017 
2018 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
2019 {
2020 	struct ti_sci_info *info;
2021 	struct ti_sci_msg_req_reboot *req;
2022 	struct ti_sci_msg_hdr *resp;
2023 	struct ti_sci_xfer *xfer;
2024 	struct device *dev;
2025 	int ret = 0;
2026 
2027 	if (IS_ERR(handle))
2028 		return PTR_ERR(handle);
2029 	if (!handle)
2030 		return -EINVAL;
2031 
2032 	info = handle_to_ti_sci_info(handle);
2033 	dev = info->dev;
2034 
2035 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
2036 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2037 				   sizeof(*req), sizeof(*resp));
2038 	if (IS_ERR(xfer)) {
2039 		ret = PTR_ERR(xfer);
2040 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2041 		return ret;
2042 	}
2043 	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
2044 
2045 	ret = ti_sci_do_xfer(info, xfer);
2046 	if (ret) {
2047 		dev_err(dev, "Mbox send fail %d\n", ret);
2048 		goto fail;
2049 	}
2050 
2051 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2052 
2053 	if (!ti_sci_is_response_ack(resp))
2054 		ret = -ENODEV;
2055 	else
2056 		ret = 0;
2057 
2058 fail:
2059 	ti_sci_put_one_xfer(&info->minfo, xfer);
2060 
2061 	return ret;
2062 }
2063 
2064 /**
2065  * ti_sci_get_resource_range - Helper to get a range of resources assigned
2066  *			       to a host. Resource is uniquely identified by
2067  *			       type and subtype.
2068  * @handle:		Pointer to TISCI handle.
2069  * @dev_id:		TISCI device ID.
2070  * @subtype:		Resource assignment subtype that is being requested
2071  *			from the given device.
2072  * @s_host:		Host processor ID to which the resources are allocated
2073  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
2074  *			resource range start index and number of resources
2075  *
2076  * Return: 0 if all went fine, else return appropriate error.
2077  */
2078 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
2079 				     u32 dev_id, u8 subtype, u8 s_host,
2080 				     struct ti_sci_resource_desc *desc)
2081 {
2082 	struct ti_sci_msg_resp_get_resource_range *resp;
2083 	struct ti_sci_msg_req_get_resource_range *req;
2084 	struct ti_sci_xfer *xfer;
2085 	struct ti_sci_info *info;
2086 	struct device *dev;
2087 	int ret = 0;
2088 
2089 	if (IS_ERR(handle))
2090 		return PTR_ERR(handle);
2091 	if (!handle || !desc)
2092 		return -EINVAL;
2093 
2094 	info = handle_to_ti_sci_info(handle);
2095 	dev = info->dev;
2096 
2097 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
2098 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2099 				   sizeof(*req), sizeof(*resp));
2100 	if (IS_ERR(xfer)) {
2101 		ret = PTR_ERR(xfer);
2102 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2103 		return ret;
2104 	}
2105 
2106 	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
2107 	req->secondary_host = s_host;
2108 	req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
2109 	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
2110 
2111 	ret = ti_sci_do_xfer(info, xfer);
2112 	if (ret) {
2113 		dev_err(dev, "Mbox send fail %d\n", ret);
2114 		goto fail;
2115 	}
2116 
2117 	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
2118 
2119 	if (!ti_sci_is_response_ack(resp)) {
2120 		ret = -ENODEV;
2121 	} else if (!resp->range_num && !resp->range_num_sec) {
2122 		/* Neither of the two resource range is valid */
2123 		ret = -ENODEV;
2124 	} else {
2125 		desc->start = resp->range_start;
2126 		desc->num = resp->range_num;
2127 		desc->start_sec = resp->range_start_sec;
2128 		desc->num_sec = resp->range_num_sec;
2129 	}
2130 
2131 fail:
2132 	ti_sci_put_one_xfer(&info->minfo, xfer);
2133 
2134 	return ret;
2135 }
2136 
2137 /**
2138  * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
2139  *				   that is same as ti sci interface host.
2140  * @handle:		Pointer to TISCI handle.
2141  * @dev_id:		TISCI device ID.
2142  * @subtype:		Resource assignment subtype that is being requested
2143  *			from the given device.
2144  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
2145  *			resource range start index and number of resources
2146  *
2147  * Return: 0 if all went fine, else return appropriate error.
2148  */
2149 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
2150 					 u32 dev_id, u8 subtype,
2151 					 struct ti_sci_resource_desc *desc)
2152 {
2153 	return ti_sci_get_resource_range(handle, dev_id, subtype,
2154 					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
2155 					 desc);
2156 }
2157 
2158 /**
2159  * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
2160  *					      assigned to a specified host.
2161  * @handle:		Pointer to TISCI handle.
2162  * @dev_id:		TISCI device ID.
2163  * @subtype:		Resource assignment subtype that is being requested
2164  *			from the given device.
2165  * @s_host:		Host processor ID to which the resources are allocated
2166  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
2167  *			resource range start index and number of resources
2168  *
2169  * Return: 0 if all went fine, else return appropriate error.
2170  */
2171 static
2172 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
2173 					     u32 dev_id, u8 subtype, u8 s_host,
2174 					     struct ti_sci_resource_desc *desc)
2175 {
2176 	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
2177 }
2178 
2179 /**
2180  * ti_sci_manage_irq() - Helper api to configure/release the irq route between
2181  *			 the requested source and destination
2182  * @handle:		Pointer to TISCI handle.
2183  * @valid_params:	Bit fields defining the validity of certain params
2184  * @src_id:		Device ID of the IRQ source
2185  * @src_index:		IRQ source index within the source device
2186  * @dst_id:		Device ID of the IRQ destination
2187  * @dst_host_irq:	IRQ number of the destination device
2188  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2189  * @vint:		Virtual interrupt to be used within the IA
2190  * @global_event:	Global event number to be used for the requesting event
2191  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2192  * @s_host:		Secondary host ID to which the irq/event is being
2193  *			requested for.
2194  * @type:		Request type irq set or release.
2195  *
2196  * Return: 0 if all went fine, else return appropriate error.
2197  */
2198 static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
2199 			     u32 valid_params, u16 src_id, u16 src_index,
2200 			     u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
2201 			     u16 global_event, u8 vint_status_bit, u8 s_host,
2202 			     u16 type)
2203 {
2204 	struct ti_sci_msg_req_manage_irq *req;
2205 	struct ti_sci_msg_hdr *resp;
2206 	struct ti_sci_xfer *xfer;
2207 	struct ti_sci_info *info;
2208 	struct device *dev;
2209 	int ret = 0;
2210 
2211 	if (IS_ERR(handle))
2212 		return PTR_ERR(handle);
2213 	if (!handle)
2214 		return -EINVAL;
2215 
2216 	info = handle_to_ti_sci_info(handle);
2217 	dev = info->dev;
2218 
2219 	xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2220 				   sizeof(*req), sizeof(*resp));
2221 	if (IS_ERR(xfer)) {
2222 		ret = PTR_ERR(xfer);
2223 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2224 		return ret;
2225 	}
2226 	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
2227 	req->valid_params = valid_params;
2228 	req->src_id = src_id;
2229 	req->src_index = src_index;
2230 	req->dst_id = dst_id;
2231 	req->dst_host_irq = dst_host_irq;
2232 	req->ia_id = ia_id;
2233 	req->vint = vint;
2234 	req->global_event = global_event;
2235 	req->vint_status_bit = vint_status_bit;
2236 	req->secondary_host = s_host;
2237 
2238 	ret = ti_sci_do_xfer(info, xfer);
2239 	if (ret) {
2240 		dev_err(dev, "Mbox send fail %d\n", ret);
2241 		goto fail;
2242 	}
2243 
2244 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2245 
2246 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2247 
2248 fail:
2249 	ti_sci_put_one_xfer(&info->minfo, xfer);
2250 
2251 	return ret;
2252 }
2253 
2254 /**
2255  * ti_sci_set_irq() - Helper api to configure the irq route between the
2256  *		      requested source and destination
2257  * @handle:		Pointer to TISCI handle.
2258  * @valid_params:	Bit fields defining the validity of certain params
2259  * @src_id:		Device ID of the IRQ source
2260  * @src_index:		IRQ source index within the source device
2261  * @dst_id:		Device ID of the IRQ destination
2262  * @dst_host_irq:	IRQ number of the destination device
2263  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2264  * @vint:		Virtual interrupt to be used within the IA
2265  * @global_event:	Global event number to be used for the requesting event
2266  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2267  * @s_host:		Secondary host ID to which the irq/event is being
2268  *			requested for.
2269  *
2270  * Return: 0 if all went fine, else return appropriate error.
2271  */
2272 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
2273 			  u16 src_id, u16 src_index, u16 dst_id,
2274 			  u16 dst_host_irq, u16 ia_id, u16 vint,
2275 			  u16 global_event, u8 vint_status_bit, u8 s_host)
2276 {
2277 	pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
2278 		 __func__, valid_params, src_id, src_index,
2279 		 dst_id, dst_host_irq, ia_id, vint, global_event,
2280 		 vint_status_bit);
2281 
2282 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
2283 				 dst_id, dst_host_irq, ia_id, vint,
2284 				 global_event, vint_status_bit, s_host,
2285 				 TI_SCI_MSG_SET_IRQ);
2286 }
2287 
2288 /**
2289  * ti_sci_free_irq() - Helper api to free the irq route between the
2290  *			   requested source and destination
2291  * @handle:		Pointer to TISCI handle.
2292  * @valid_params:	Bit fields defining the validity of certain params
2293  * @src_id:		Device ID of the IRQ source
2294  * @src_index:		IRQ source index within the source device
2295  * @dst_id:		Device ID of the IRQ destination
2296  * @dst_host_irq:	IRQ number of the destination device
2297  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2298  * @vint:		Virtual interrupt to be used within the IA
2299  * @global_event:	Global event number to be used for the requesting event
2300  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2301  * @s_host:		Secondary host ID to which the irq/event is being
2302  *			requested for.
2303  *
2304  * Return: 0 if all went fine, else return appropriate error.
2305  */
2306 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
2307 			   u16 src_id, u16 src_index, u16 dst_id,
2308 			   u16 dst_host_irq, u16 ia_id, u16 vint,
2309 			   u16 global_event, u8 vint_status_bit, u8 s_host)
2310 {
2311 	pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
2312 		 __func__, valid_params, src_id, src_index,
2313 		 dst_id, dst_host_irq, ia_id, vint, global_event,
2314 		 vint_status_bit);
2315 
2316 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
2317 				 dst_id, dst_host_irq, ia_id, vint,
2318 				 global_event, vint_status_bit, s_host,
2319 				 TI_SCI_MSG_FREE_IRQ);
2320 }
2321 
2322 /**
2323  * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
2324  *			  source and destination.
2325  * @handle:		Pointer to TISCI handle.
2326  * @src_id:		Device ID of the IRQ source
2327  * @src_index:		IRQ source index within the source device
2328  * @dst_id:		Device ID of the IRQ destination
2329  * @dst_host_irq:	IRQ number of the destination device
2330  *
2331  * Return: 0 if all went fine, else return appropriate error.
2332  */
2333 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
2334 			      u16 src_index, u16 dst_id, u16 dst_host_irq)
2335 {
2336 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2337 
2338 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
2339 			      dst_host_irq, 0, 0, 0, 0, 0);
2340 }
2341 
2342 /**
2343  * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
2344  *				requested source and Interrupt Aggregator.
2345  * @handle:		Pointer to TISCI handle.
2346  * @src_id:		Device ID of the IRQ source
2347  * @src_index:		IRQ source index within the source device
2348  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2349  * @vint:		Virtual interrupt to be used within the IA
2350  * @global_event:	Global event number to be used for the requesting event
2351  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2352  *
2353  * Return: 0 if all went fine, else return appropriate error.
2354  */
2355 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
2356 				    u16 src_id, u16 src_index, u16 ia_id,
2357 				    u16 vint, u16 global_event,
2358 				    u8 vint_status_bit)
2359 {
2360 	u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
2361 			   MSG_FLAG_GLB_EVNT_VALID |
2362 			   MSG_FLAG_VINT_STS_BIT_VALID;
2363 
2364 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
2365 			      ia_id, vint, global_event, vint_status_bit, 0);
2366 }
2367 
2368 /**
2369  * ti_sci_cmd_free_irq() - Free a host irq route between the between the
2370  *			   requested source and destination.
2371  * @handle:		Pointer to TISCI handle.
2372  * @src_id:		Device ID of the IRQ source
2373  * @src_index:		IRQ source index within the source device
2374  * @dst_id:		Device ID of the IRQ destination
2375  * @dst_host_irq:	IRQ number of the destination device
2376  *
2377  * Return: 0 if all went fine, else return appropriate error.
2378  */
2379 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2380 			       u16 src_index, u16 dst_id, u16 dst_host_irq)
2381 {
2382 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2383 
2384 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2385 			       dst_host_irq, 0, 0, 0, 0, 0);
2386 }
2387 
2388 /**
2389  * ti_sci_cmd_free_event_map() - Free an event map between the requested source
2390  *				 and Interrupt Aggregator.
2391  * @handle:		Pointer to TISCI handle.
2392  * @src_id:		Device ID of the IRQ source
2393  * @src_index:		IRQ source index within the source device
2394  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2395  * @vint:		Virtual interrupt to be used within the IA
2396  * @global_event:	Global event number to be used for the requesting event
2397  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2398  *
2399  * Return: 0 if all went fine, else return appropriate error.
2400  */
2401 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2402 				     u16 src_id, u16 src_index, u16 ia_id,
2403 				     u16 vint, u16 global_event,
2404 				     u8 vint_status_bit)
2405 {
2406 	u32 valid_params = MSG_FLAG_IA_ID_VALID |
2407 			   MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2408 			   MSG_FLAG_VINT_STS_BIT_VALID;
2409 
2410 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2411 			       ia_id, vint, global_event, vint_status_bit, 0);
2412 }
2413 
2414 /**
2415  * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
2416  * @handle:	Pointer to TI SCI handle.
2417  * @params:	Pointer to ti_sci_msg_rm_ring_cfg ring config structure
2418  *
2419  * Return: 0 if all went well, else returns appropriate error value.
2420  *
2421  * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
2422  * more info.
2423  */
2424 static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
2425 				  const struct ti_sci_msg_rm_ring_cfg *params)
2426 {
2427 	struct ti_sci_msg_rm_ring_cfg_req *req;
2428 	struct ti_sci_msg_hdr *resp;
2429 	struct ti_sci_xfer *xfer;
2430 	struct ti_sci_info *info;
2431 	struct device *dev;
2432 	int ret = 0;
2433 
2434 	if (IS_ERR_OR_NULL(handle))
2435 		return -EINVAL;
2436 
2437 	info = handle_to_ti_sci_info(handle);
2438 	dev = info->dev;
2439 
2440 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2441 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2442 				   sizeof(*req), sizeof(*resp));
2443 	if (IS_ERR(xfer)) {
2444 		ret = PTR_ERR(xfer);
2445 		dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2446 		return ret;
2447 	}
2448 	req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2449 	req->valid_params = params->valid_params;
2450 	req->nav_id = params->nav_id;
2451 	req->index = params->index;
2452 	req->addr_lo = params->addr_lo;
2453 	req->addr_hi = params->addr_hi;
2454 	req->count = params->count;
2455 	req->mode = params->mode;
2456 	req->size = params->size;
2457 	req->order_id = params->order_id;
2458 	req->virtid = params->virtid;
2459 	req->asel = params->asel;
2460 
2461 	ret = ti_sci_do_xfer(info, xfer);
2462 	if (ret) {
2463 		dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2464 		goto fail;
2465 	}
2466 
2467 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2468 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2469 
2470 fail:
2471 	ti_sci_put_one_xfer(&info->minfo, xfer);
2472 	dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
2473 	return ret;
2474 }
2475 
2476 /**
2477  * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2478  * @handle:	Pointer to TI SCI handle.
2479  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2480  *		pairing
2481  * @src_thread:	Source PSI-L thread ID
2482  * @dst_thread: Destination PSI-L thread ID
2483  *
2484  * Return: 0 if all went well, else returns appropriate error value.
2485  */
2486 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2487 				   u32 nav_id, u32 src_thread, u32 dst_thread)
2488 {
2489 	struct ti_sci_msg_psil_pair *req;
2490 	struct ti_sci_msg_hdr *resp;
2491 	struct ti_sci_xfer *xfer;
2492 	struct ti_sci_info *info;
2493 	struct device *dev;
2494 	int ret = 0;
2495 
2496 	if (IS_ERR(handle))
2497 		return PTR_ERR(handle);
2498 	if (!handle)
2499 		return -EINVAL;
2500 
2501 	info = handle_to_ti_sci_info(handle);
2502 	dev = info->dev;
2503 
2504 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2505 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2506 				   sizeof(*req), sizeof(*resp));
2507 	if (IS_ERR(xfer)) {
2508 		ret = PTR_ERR(xfer);
2509 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2510 		return ret;
2511 	}
2512 	req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2513 	req->nav_id = nav_id;
2514 	req->src_thread = src_thread;
2515 	req->dst_thread = dst_thread;
2516 
2517 	ret = ti_sci_do_xfer(info, xfer);
2518 	if (ret) {
2519 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2520 		goto fail;
2521 	}
2522 
2523 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2524 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2525 
2526 fail:
2527 	ti_sci_put_one_xfer(&info->minfo, xfer);
2528 
2529 	return ret;
2530 }
2531 
2532 /**
2533  * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2534  * @handle:	Pointer to TI SCI handle.
2535  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2536  *		unpairing
2537  * @src_thread:	Source PSI-L thread ID
2538  * @dst_thread:	Destination PSI-L thread ID
2539  *
2540  * Return: 0 if all went well, else returns appropriate error value.
2541  */
2542 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2543 				     u32 nav_id, u32 src_thread, u32 dst_thread)
2544 {
2545 	struct ti_sci_msg_psil_unpair *req;
2546 	struct ti_sci_msg_hdr *resp;
2547 	struct ti_sci_xfer *xfer;
2548 	struct ti_sci_info *info;
2549 	struct device *dev;
2550 	int ret = 0;
2551 
2552 	if (IS_ERR(handle))
2553 		return PTR_ERR(handle);
2554 	if (!handle)
2555 		return -EINVAL;
2556 
2557 	info = handle_to_ti_sci_info(handle);
2558 	dev = info->dev;
2559 
2560 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2561 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2562 				   sizeof(*req), sizeof(*resp));
2563 	if (IS_ERR(xfer)) {
2564 		ret = PTR_ERR(xfer);
2565 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2566 		return ret;
2567 	}
2568 	req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2569 	req->nav_id = nav_id;
2570 	req->src_thread = src_thread;
2571 	req->dst_thread = dst_thread;
2572 
2573 	ret = ti_sci_do_xfer(info, xfer);
2574 	if (ret) {
2575 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2576 		goto fail;
2577 	}
2578 
2579 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2580 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2581 
2582 fail:
2583 	ti_sci_put_one_xfer(&info->minfo, xfer);
2584 
2585 	return ret;
2586 }
2587 
2588 /**
2589  * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2590  * @handle:	Pointer to TI SCI handle.
2591  * @params:	Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2592  *		structure
2593  *
2594  * Return: 0 if all went well, else returns appropriate error value.
2595  *
2596  * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2597  * more info.
2598  */
2599 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2600 			const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2601 {
2602 	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2603 	struct ti_sci_msg_hdr *resp;
2604 	struct ti_sci_xfer *xfer;
2605 	struct ti_sci_info *info;
2606 	struct device *dev;
2607 	int ret = 0;
2608 
2609 	if (IS_ERR_OR_NULL(handle))
2610 		return -EINVAL;
2611 
2612 	info = handle_to_ti_sci_info(handle);
2613 	dev = info->dev;
2614 
2615 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2616 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2617 				   sizeof(*req), sizeof(*resp));
2618 	if (IS_ERR(xfer)) {
2619 		ret = PTR_ERR(xfer);
2620 		dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2621 		return ret;
2622 	}
2623 	req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2624 	req->valid_params = params->valid_params;
2625 	req->nav_id = params->nav_id;
2626 	req->index = params->index;
2627 	req->tx_pause_on_err = params->tx_pause_on_err;
2628 	req->tx_filt_einfo = params->tx_filt_einfo;
2629 	req->tx_filt_pswords = params->tx_filt_pswords;
2630 	req->tx_atype = params->tx_atype;
2631 	req->tx_chan_type = params->tx_chan_type;
2632 	req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2633 	req->tx_fetch_size = params->tx_fetch_size;
2634 	req->tx_credit_count = params->tx_credit_count;
2635 	req->txcq_qnum = params->txcq_qnum;
2636 	req->tx_priority = params->tx_priority;
2637 	req->tx_qos = params->tx_qos;
2638 	req->tx_orderid = params->tx_orderid;
2639 	req->fdepth = params->fdepth;
2640 	req->tx_sched_priority = params->tx_sched_priority;
2641 	req->tx_burst_size = params->tx_burst_size;
2642 	req->tx_tdtype = params->tx_tdtype;
2643 	req->extended_ch_type = params->extended_ch_type;
2644 
2645 	ret = ti_sci_do_xfer(info, xfer);
2646 	if (ret) {
2647 		dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2648 		goto fail;
2649 	}
2650 
2651 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2652 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2653 
2654 fail:
2655 	ti_sci_put_one_xfer(&info->minfo, xfer);
2656 	dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2657 	return ret;
2658 }
2659 
2660 /**
2661  * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2662  * @handle:	Pointer to TI SCI handle.
2663  * @params:	Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2664  *		structure
2665  *
2666  * Return: 0 if all went well, else returns appropriate error value.
2667  *
2668  * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2669  * more info.
2670  */
2671 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2672 			const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2673 {
2674 	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2675 	struct ti_sci_msg_hdr *resp;
2676 	struct ti_sci_xfer *xfer;
2677 	struct ti_sci_info *info;
2678 	struct device *dev;
2679 	int ret = 0;
2680 
2681 	if (IS_ERR_OR_NULL(handle))
2682 		return -EINVAL;
2683 
2684 	info = handle_to_ti_sci_info(handle);
2685 	dev = info->dev;
2686 
2687 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2688 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2689 				   sizeof(*req), sizeof(*resp));
2690 	if (IS_ERR(xfer)) {
2691 		ret = PTR_ERR(xfer);
2692 		dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2693 		return ret;
2694 	}
2695 	req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2696 	req->valid_params = params->valid_params;
2697 	req->nav_id = params->nav_id;
2698 	req->index = params->index;
2699 	req->rx_fetch_size = params->rx_fetch_size;
2700 	req->rxcq_qnum = params->rxcq_qnum;
2701 	req->rx_priority = params->rx_priority;
2702 	req->rx_qos = params->rx_qos;
2703 	req->rx_orderid = params->rx_orderid;
2704 	req->rx_sched_priority = params->rx_sched_priority;
2705 	req->flowid_start = params->flowid_start;
2706 	req->flowid_cnt = params->flowid_cnt;
2707 	req->rx_pause_on_err = params->rx_pause_on_err;
2708 	req->rx_atype = params->rx_atype;
2709 	req->rx_chan_type = params->rx_chan_type;
2710 	req->rx_ignore_short = params->rx_ignore_short;
2711 	req->rx_ignore_long = params->rx_ignore_long;
2712 	req->rx_burst_size = params->rx_burst_size;
2713 
2714 	ret = ti_sci_do_xfer(info, xfer);
2715 	if (ret) {
2716 		dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2717 		goto fail;
2718 	}
2719 
2720 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2721 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2722 
2723 fail:
2724 	ti_sci_put_one_xfer(&info->minfo, xfer);
2725 	dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2726 	return ret;
2727 }
2728 
2729 /**
2730  * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2731  * @handle:	Pointer to TI SCI handle.
2732  * @params:	Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2733  *		structure
2734  *
2735  * Return: 0 if all went well, else returns appropriate error value.
2736  *
2737  * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2738  * more info.
2739  */
2740 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2741 			const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2742 {
2743 	struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2744 	struct ti_sci_msg_hdr *resp;
2745 	struct ti_sci_xfer *xfer;
2746 	struct ti_sci_info *info;
2747 	struct device *dev;
2748 	int ret = 0;
2749 
2750 	if (IS_ERR_OR_NULL(handle))
2751 		return -EINVAL;
2752 
2753 	info = handle_to_ti_sci_info(handle);
2754 	dev = info->dev;
2755 
2756 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2757 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2758 				   sizeof(*req), sizeof(*resp));
2759 	if (IS_ERR(xfer)) {
2760 		ret = PTR_ERR(xfer);
2761 		dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2762 		return ret;
2763 	}
2764 	req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2765 	req->valid_params = params->valid_params;
2766 	req->nav_id = params->nav_id;
2767 	req->flow_index = params->flow_index;
2768 	req->rx_einfo_present = params->rx_einfo_present;
2769 	req->rx_psinfo_present = params->rx_psinfo_present;
2770 	req->rx_error_handling = params->rx_error_handling;
2771 	req->rx_desc_type = params->rx_desc_type;
2772 	req->rx_sop_offset = params->rx_sop_offset;
2773 	req->rx_dest_qnum = params->rx_dest_qnum;
2774 	req->rx_src_tag_hi = params->rx_src_tag_hi;
2775 	req->rx_src_tag_lo = params->rx_src_tag_lo;
2776 	req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2777 	req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2778 	req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2779 	req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2780 	req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2781 	req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2782 	req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2783 	req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2784 	req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2785 	req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2786 	req->rx_ps_location = params->rx_ps_location;
2787 
2788 	ret = ti_sci_do_xfer(info, xfer);
2789 	if (ret) {
2790 		dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2791 		goto fail;
2792 	}
2793 
2794 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2795 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2796 
2797 fail:
2798 	ti_sci_put_one_xfer(&info->minfo, xfer);
2799 	dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2800 	return ret;
2801 }
2802 
2803 /**
2804  * ti_sci_cmd_proc_request() - Command to request a physical processor control
2805  * @handle:	Pointer to TI SCI handle
2806  * @proc_id:	Processor ID this request is for
2807  *
2808  * Return: 0 if all went well, else returns appropriate error value.
2809  */
2810 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2811 				   u8 proc_id)
2812 {
2813 	struct ti_sci_msg_req_proc_request *req;
2814 	struct ti_sci_msg_hdr *resp;
2815 	struct ti_sci_info *info;
2816 	struct ti_sci_xfer *xfer;
2817 	struct device *dev;
2818 	int ret = 0;
2819 
2820 	if (!handle)
2821 		return -EINVAL;
2822 	if (IS_ERR(handle))
2823 		return PTR_ERR(handle);
2824 
2825 	info = handle_to_ti_sci_info(handle);
2826 	dev = info->dev;
2827 
2828 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2829 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2830 				   sizeof(*req), sizeof(*resp));
2831 	if (IS_ERR(xfer)) {
2832 		ret = PTR_ERR(xfer);
2833 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2834 		return ret;
2835 	}
2836 	req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2837 	req->processor_id = proc_id;
2838 
2839 	ret = ti_sci_do_xfer(info, xfer);
2840 	if (ret) {
2841 		dev_err(dev, "Mbox send fail %d\n", ret);
2842 		goto fail;
2843 	}
2844 
2845 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2846 
2847 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2848 
2849 fail:
2850 	ti_sci_put_one_xfer(&info->minfo, xfer);
2851 
2852 	return ret;
2853 }
2854 
2855 /**
2856  * ti_sci_cmd_proc_release() - Command to release a physical processor control
2857  * @handle:	Pointer to TI SCI handle
2858  * @proc_id:	Processor ID this request is for
2859  *
2860  * Return: 0 if all went well, else returns appropriate error value.
2861  */
2862 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2863 				   u8 proc_id)
2864 {
2865 	struct ti_sci_msg_req_proc_release *req;
2866 	struct ti_sci_msg_hdr *resp;
2867 	struct ti_sci_info *info;
2868 	struct ti_sci_xfer *xfer;
2869 	struct device *dev;
2870 	int ret = 0;
2871 
2872 	if (!handle)
2873 		return -EINVAL;
2874 	if (IS_ERR(handle))
2875 		return PTR_ERR(handle);
2876 
2877 	info = handle_to_ti_sci_info(handle);
2878 	dev = info->dev;
2879 
2880 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2881 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2882 				   sizeof(*req), sizeof(*resp));
2883 	if (IS_ERR(xfer)) {
2884 		ret = PTR_ERR(xfer);
2885 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2886 		return ret;
2887 	}
2888 	req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2889 	req->processor_id = proc_id;
2890 
2891 	ret = ti_sci_do_xfer(info, xfer);
2892 	if (ret) {
2893 		dev_err(dev, "Mbox send fail %d\n", ret);
2894 		goto fail;
2895 	}
2896 
2897 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2898 
2899 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2900 
2901 fail:
2902 	ti_sci_put_one_xfer(&info->minfo, xfer);
2903 
2904 	return ret;
2905 }
2906 
2907 /**
2908  * ti_sci_cmd_proc_handover() - Command to handover a physical processor
2909  *				control to a host in the processor's access
2910  *				control list.
2911  * @handle:	Pointer to TI SCI handle
2912  * @proc_id:	Processor ID this request is for
2913  * @host_id:	Host ID to get the control of the processor
2914  *
2915  * Return: 0 if all went well, else returns appropriate error value.
2916  */
2917 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2918 				    u8 proc_id, u8 host_id)
2919 {
2920 	struct ti_sci_msg_req_proc_handover *req;
2921 	struct ti_sci_msg_hdr *resp;
2922 	struct ti_sci_info *info;
2923 	struct ti_sci_xfer *xfer;
2924 	struct device *dev;
2925 	int ret = 0;
2926 
2927 	if (!handle)
2928 		return -EINVAL;
2929 	if (IS_ERR(handle))
2930 		return PTR_ERR(handle);
2931 
2932 	info = handle_to_ti_sci_info(handle);
2933 	dev = info->dev;
2934 
2935 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2936 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2937 				   sizeof(*req), sizeof(*resp));
2938 	if (IS_ERR(xfer)) {
2939 		ret = PTR_ERR(xfer);
2940 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2941 		return ret;
2942 	}
2943 	req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2944 	req->processor_id = proc_id;
2945 	req->host_id = host_id;
2946 
2947 	ret = ti_sci_do_xfer(info, xfer);
2948 	if (ret) {
2949 		dev_err(dev, "Mbox send fail %d\n", ret);
2950 		goto fail;
2951 	}
2952 
2953 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2954 
2955 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2956 
2957 fail:
2958 	ti_sci_put_one_xfer(&info->minfo, xfer);
2959 
2960 	return ret;
2961 }
2962 
2963 /**
2964  * ti_sci_cmd_proc_set_config() - Command to set the processor boot
2965  *				    configuration flags
2966  * @handle:		Pointer to TI SCI handle
2967  * @proc_id:		Processor ID this request is for
2968  * @bootvector:		Processor Boot vector (start address)
2969  * @config_flags_set:	Configuration flags to be set
2970  * @config_flags_clear:	Configuration flags to be cleared.
2971  *
2972  * Return: 0 if all went well, else returns appropriate error value.
2973  */
2974 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
2975 				      u8 proc_id, u64 bootvector,
2976 				      u32 config_flags_set,
2977 				      u32 config_flags_clear)
2978 {
2979 	struct ti_sci_msg_req_set_config *req;
2980 	struct ti_sci_msg_hdr *resp;
2981 	struct ti_sci_info *info;
2982 	struct ti_sci_xfer *xfer;
2983 	struct device *dev;
2984 	int ret = 0;
2985 
2986 	if (!handle)
2987 		return -EINVAL;
2988 	if (IS_ERR(handle))
2989 		return PTR_ERR(handle);
2990 
2991 	info = handle_to_ti_sci_info(handle);
2992 	dev = info->dev;
2993 
2994 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
2995 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2996 				   sizeof(*req), sizeof(*resp));
2997 	if (IS_ERR(xfer)) {
2998 		ret = PTR_ERR(xfer);
2999 		dev_err(dev, "Message alloc failed(%d)\n", ret);
3000 		return ret;
3001 	}
3002 	req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
3003 	req->processor_id = proc_id;
3004 	req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
3005 	req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
3006 				TI_SCI_ADDR_HIGH_SHIFT;
3007 	req->config_flags_set = config_flags_set;
3008 	req->config_flags_clear = config_flags_clear;
3009 
3010 	ret = ti_sci_do_xfer(info, xfer);
3011 	if (ret) {
3012 		dev_err(dev, "Mbox send fail %d\n", ret);
3013 		goto fail;
3014 	}
3015 
3016 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
3017 
3018 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
3019 
3020 fail:
3021 	ti_sci_put_one_xfer(&info->minfo, xfer);
3022 
3023 	return ret;
3024 }
3025 
3026 /**
3027  * ti_sci_cmd_proc_set_control() - Command to set the processor boot
3028  *				     control flags
3029  * @handle:			Pointer to TI SCI handle
3030  * @proc_id:			Processor ID this request is for
3031  * @control_flags_set:		Control flags to be set
3032  * @control_flags_clear:	Control flags to be cleared
3033  *
3034  * Return: 0 if all went well, else returns appropriate error value.
3035  */
3036 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
3037 				       u8 proc_id, u32 control_flags_set,
3038 				       u32 control_flags_clear)
3039 {
3040 	struct ti_sci_msg_req_set_ctrl *req;
3041 	struct ti_sci_msg_hdr *resp;
3042 	struct ti_sci_info *info;
3043 	struct ti_sci_xfer *xfer;
3044 	struct device *dev;
3045 	int ret = 0;
3046 
3047 	if (!handle)
3048 		return -EINVAL;
3049 	if (IS_ERR(handle))
3050 		return PTR_ERR(handle);
3051 
3052 	info = handle_to_ti_sci_info(handle);
3053 	dev = info->dev;
3054 
3055 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
3056 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
3057 				   sizeof(*req), sizeof(*resp));
3058 	if (IS_ERR(xfer)) {
3059 		ret = PTR_ERR(xfer);
3060 		dev_err(dev, "Message alloc failed(%d)\n", ret);
3061 		return ret;
3062 	}
3063 	req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
3064 	req->processor_id = proc_id;
3065 	req->control_flags_set = control_flags_set;
3066 	req->control_flags_clear = control_flags_clear;
3067 
3068 	ret = ti_sci_do_xfer(info, xfer);
3069 	if (ret) {
3070 		dev_err(dev, "Mbox send fail %d\n", ret);
3071 		goto fail;
3072 	}
3073 
3074 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
3075 
3076 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
3077 
3078 fail:
3079 	ti_sci_put_one_xfer(&info->minfo, xfer);
3080 
3081 	return ret;
3082 }
3083 
3084 /**
3085  * ti_sci_cmd_proc_get_status() - Command to get the processor boot status
3086  * @handle:	Pointer to TI SCI handle
3087  * @proc_id:	Processor ID this request is for
3088  * @bv:		Processor Boot vector (start address)
3089  * @cfg_flags:	Processor specific configuration flags
3090  * @ctrl_flags:	Processor specific control flags
3091  * @sts_flags:	Processor specific status flags
3092  *
3093  * Return: 0 if all went well, else returns appropriate error value.
3094  */
3095 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
3096 				      u8 proc_id, u64 *bv, u32 *cfg_flags,
3097 				      u32 *ctrl_flags, u32 *sts_flags)
3098 {
3099 	struct ti_sci_msg_resp_get_status *resp;
3100 	struct ti_sci_msg_req_get_status *req;
3101 	struct ti_sci_info *info;
3102 	struct ti_sci_xfer *xfer;
3103 	struct device *dev;
3104 	int ret = 0;
3105 
3106 	if (!handle)
3107 		return -EINVAL;
3108 	if (IS_ERR(handle))
3109 		return PTR_ERR(handle);
3110 
3111 	info = handle_to_ti_sci_info(handle);
3112 	dev = info->dev;
3113 
3114 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
3115 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
3116 				   sizeof(*req), sizeof(*resp));
3117 	if (IS_ERR(xfer)) {
3118 		ret = PTR_ERR(xfer);
3119 		dev_err(dev, "Message alloc failed(%d)\n", ret);
3120 		return ret;
3121 	}
3122 	req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
3123 	req->processor_id = proc_id;
3124 
3125 	ret = ti_sci_do_xfer(info, xfer);
3126 	if (ret) {
3127 		dev_err(dev, "Mbox send fail %d\n", ret);
3128 		goto fail;
3129 	}
3130 
3131 	resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
3132 
3133 	if (!ti_sci_is_response_ack(resp)) {
3134 		ret = -ENODEV;
3135 	} else {
3136 		*bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
3137 		      (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
3138 		       TI_SCI_ADDR_HIGH_MASK);
3139 		*cfg_flags = resp->config_flags;
3140 		*ctrl_flags = resp->control_flags;
3141 		*sts_flags = resp->status_flags;
3142 	}
3143 
3144 fail:
3145 	ti_sci_put_one_xfer(&info->minfo, xfer);
3146 
3147 	return ret;
3148 }
3149 
3150 /*
3151  * ti_sci_setup_ops() - Setup the operations structures
3152  * @info:	pointer to TISCI pointer
3153  */
3154 static void ti_sci_setup_ops(struct ti_sci_info *info)
3155 {
3156 	struct ti_sci_ops *ops = &info->handle.ops;
3157 	struct ti_sci_core_ops *core_ops = &ops->core_ops;
3158 	struct ti_sci_dev_ops *dops = &ops->dev_ops;
3159 	struct ti_sci_clk_ops *cops = &ops->clk_ops;
3160 	struct ti_sci_pm_ops *pmops = &ops->pm_ops;
3161 	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
3162 	struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
3163 	struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
3164 	struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
3165 	struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
3166 	struct ti_sci_proc_ops *pops = &ops->proc_ops;
3167 
3168 	core_ops->reboot_device = ti_sci_cmd_core_reboot;
3169 
3170 	dops->get_device = ti_sci_cmd_get_device;
3171 	dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
3172 	dops->idle_device = ti_sci_cmd_idle_device;
3173 	dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
3174 	dops->put_device = ti_sci_cmd_put_device;
3175 
3176 	dops->is_valid = ti_sci_cmd_dev_is_valid;
3177 	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
3178 	dops->is_idle = ti_sci_cmd_dev_is_idle;
3179 	dops->is_stop = ti_sci_cmd_dev_is_stop;
3180 	dops->is_on = ti_sci_cmd_dev_is_on;
3181 	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
3182 	dops->set_device_resets = ti_sci_cmd_set_device_resets;
3183 	dops->get_device_resets = ti_sci_cmd_get_device_resets;
3184 
3185 	cops->get_clock = ti_sci_cmd_get_clock;
3186 	cops->idle_clock = ti_sci_cmd_idle_clock;
3187 	cops->put_clock = ti_sci_cmd_put_clock;
3188 	cops->is_auto = ti_sci_cmd_clk_is_auto;
3189 	cops->is_on = ti_sci_cmd_clk_is_on;
3190 	cops->is_off = ti_sci_cmd_clk_is_off;
3191 
3192 	cops->set_parent = ti_sci_cmd_clk_set_parent;
3193 	cops->get_parent = ti_sci_cmd_clk_get_parent;
3194 	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
3195 
3196 	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
3197 	cops->set_freq = ti_sci_cmd_clk_set_freq;
3198 	cops->get_freq = ti_sci_cmd_clk_get_freq;
3199 
3200 	if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
3201 		pr_debug("detected DM managed LPM in fw_caps\n");
3202 		pmops->lpm_wake_reason = ti_sci_msg_cmd_lpm_wake_reason;
3203 		pmops->set_device_constraint = ti_sci_cmd_set_device_constraint;
3204 		pmops->set_latency_constraint = ti_sci_cmd_set_latency_constraint;
3205 	}
3206 
3207 	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
3208 	rm_core_ops->get_range_from_shost =
3209 				ti_sci_cmd_get_resource_range_from_shost;
3210 
3211 	iops->set_irq = ti_sci_cmd_set_irq;
3212 	iops->set_event_map = ti_sci_cmd_set_event_map;
3213 	iops->free_irq = ti_sci_cmd_free_irq;
3214 	iops->free_event_map = ti_sci_cmd_free_event_map;
3215 
3216 	rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
3217 
3218 	psilops->pair = ti_sci_cmd_rm_psil_pair;
3219 	psilops->unpair = ti_sci_cmd_rm_psil_unpair;
3220 
3221 	udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
3222 	udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
3223 	udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
3224 
3225 	pops->request = ti_sci_cmd_proc_request;
3226 	pops->release = ti_sci_cmd_proc_release;
3227 	pops->handover = ti_sci_cmd_proc_handover;
3228 	pops->set_config = ti_sci_cmd_proc_set_config;
3229 	pops->set_control = ti_sci_cmd_proc_set_control;
3230 	pops->get_status = ti_sci_cmd_proc_get_status;
3231 }
3232 
3233 /**
3234  * ti_sci_get_handle() - Get the TI SCI handle for a device
3235  * @dev:	Pointer to device for which we want SCI handle
3236  *
3237  * NOTE: The function does not track individual clients of the framework
3238  * and is expected to be maintained by caller of TI SCI protocol library.
3239  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
3240  * Return: pointer to handle if successful, else:
3241  * -EPROBE_DEFER if the instance is not ready
3242  * -ENODEV if the required node handler is missing
3243  * -EINVAL if invalid conditions are encountered.
3244  */
3245 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
3246 {
3247 	struct device_node *ti_sci_np;
3248 	struct ti_sci_handle *handle = NULL;
3249 	struct ti_sci_info *info;
3250 
3251 	if (!dev) {
3252 		pr_err("I need a device pointer\n");
3253 		return ERR_PTR(-EINVAL);
3254 	}
3255 	ti_sci_np = of_get_parent(dev->of_node);
3256 	if (!ti_sci_np) {
3257 		dev_err(dev, "No OF information\n");
3258 		return ERR_PTR(-EINVAL);
3259 	}
3260 
3261 	mutex_lock(&ti_sci_list_mutex);
3262 	list_for_each_entry(info, &ti_sci_list, node) {
3263 		if (ti_sci_np == info->dev->of_node) {
3264 			handle = &info->handle;
3265 			info->users++;
3266 			break;
3267 		}
3268 	}
3269 	mutex_unlock(&ti_sci_list_mutex);
3270 	of_node_put(ti_sci_np);
3271 
3272 	if (!handle)
3273 		return ERR_PTR(-EPROBE_DEFER);
3274 
3275 	return handle;
3276 }
3277 EXPORT_SYMBOL_GPL(ti_sci_get_handle);
3278 
3279 /**
3280  * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
3281  * @handle:	Handle acquired by ti_sci_get_handle
3282  *
3283  * NOTE: The function does not track individual clients of the framework
3284  * and is expected to be maintained by caller of TI SCI protocol library.
3285  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
3286  *
3287  * Return: 0 is successfully released
3288  * if an error pointer was passed, it returns the error value back,
3289  * if null was passed, it returns -EINVAL;
3290  */
3291 int ti_sci_put_handle(const struct ti_sci_handle *handle)
3292 {
3293 	struct ti_sci_info *info;
3294 
3295 	if (IS_ERR(handle))
3296 		return PTR_ERR(handle);
3297 	if (!handle)
3298 		return -EINVAL;
3299 
3300 	info = handle_to_ti_sci_info(handle);
3301 	mutex_lock(&ti_sci_list_mutex);
3302 	if (!WARN_ON(!info->users))
3303 		info->users--;
3304 	mutex_unlock(&ti_sci_list_mutex);
3305 
3306 	return 0;
3307 }
3308 EXPORT_SYMBOL_GPL(ti_sci_put_handle);
3309 
3310 static void devm_ti_sci_release(struct device *dev, void *res)
3311 {
3312 	const struct ti_sci_handle **ptr = res;
3313 	const struct ti_sci_handle *handle = *ptr;
3314 	int ret;
3315 
3316 	ret = ti_sci_put_handle(handle);
3317 	if (ret)
3318 		dev_err(dev, "failed to put handle %d\n", ret);
3319 }
3320 
3321 /**
3322  * devm_ti_sci_get_handle() - Managed get handle
3323  * @dev:	device for which we want SCI handle for.
3324  *
3325  * NOTE: This releases the handle once the device resources are
3326  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3327  * The function does not track individual clients of the framework
3328  * and is expected to be maintained by caller of TI SCI protocol library.
3329  *
3330  * Return: 0 if all went fine, else corresponding error.
3331  */
3332 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
3333 {
3334 	const struct ti_sci_handle **ptr;
3335 	const struct ti_sci_handle *handle;
3336 
3337 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3338 	if (!ptr)
3339 		return ERR_PTR(-ENOMEM);
3340 	handle = ti_sci_get_handle(dev);
3341 
3342 	if (!IS_ERR(handle)) {
3343 		*ptr = handle;
3344 		devres_add(dev, ptr);
3345 	} else {
3346 		devres_free(ptr);
3347 	}
3348 
3349 	return handle;
3350 }
3351 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
3352 
3353 /**
3354  * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
3355  * @np:		device node
3356  * @property:	property name containing phandle on TISCI node
3357  *
3358  * NOTE: The function does not track individual clients of the framework
3359  * and is expected to be maintained by caller of TI SCI protocol library.
3360  * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
3361  * Return: pointer to handle if successful, else:
3362  * -EPROBE_DEFER if the instance is not ready
3363  * -ENODEV if the required node handler is missing
3364  * -EINVAL if invalid conditions are encountered.
3365  */
3366 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3367 						  const char *property)
3368 {
3369 	struct ti_sci_handle *handle = NULL;
3370 	struct device_node *ti_sci_np;
3371 	struct ti_sci_info *info;
3372 
3373 	if (!np) {
3374 		pr_err("I need a device pointer\n");
3375 		return ERR_PTR(-EINVAL);
3376 	}
3377 
3378 	ti_sci_np = of_parse_phandle(np, property, 0);
3379 	if (!ti_sci_np)
3380 		return ERR_PTR(-ENODEV);
3381 
3382 	mutex_lock(&ti_sci_list_mutex);
3383 	list_for_each_entry(info, &ti_sci_list, node) {
3384 		if (ti_sci_np == info->dev->of_node) {
3385 			handle = &info->handle;
3386 			info->users++;
3387 			break;
3388 		}
3389 	}
3390 	mutex_unlock(&ti_sci_list_mutex);
3391 	of_node_put(ti_sci_np);
3392 
3393 	if (!handle)
3394 		return ERR_PTR(-EPROBE_DEFER);
3395 
3396 	return handle;
3397 }
3398 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3399 
3400 /**
3401  * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
3402  * @dev:	Device pointer requesting TISCI handle
3403  * @property:	property name containing phandle on TISCI node
3404  *
3405  * NOTE: This releases the handle once the device resources are
3406  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3407  * The function does not track individual clients of the framework
3408  * and is expected to be maintained by caller of TI SCI protocol library.
3409  *
3410  * Return: 0 if all went fine, else corresponding error.
3411  */
3412 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3413 						       const char *property)
3414 {
3415 	const struct ti_sci_handle *handle;
3416 	const struct ti_sci_handle **ptr;
3417 
3418 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3419 	if (!ptr)
3420 		return ERR_PTR(-ENOMEM);
3421 	handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3422 
3423 	if (!IS_ERR(handle)) {
3424 		*ptr = handle;
3425 		devres_add(dev, ptr);
3426 	} else {
3427 		devres_free(ptr);
3428 	}
3429 
3430 	return handle;
3431 }
3432 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3433 
3434 /**
3435  * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3436  * @res:	Pointer to the TISCI resource
3437  *
3438  * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3439  */
3440 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3441 {
3442 	unsigned long flags;
3443 	u16 set, free_bit;
3444 
3445 	raw_spin_lock_irqsave(&res->lock, flags);
3446 	for (set = 0; set < res->sets; set++) {
3447 		struct ti_sci_resource_desc *desc = &res->desc[set];
3448 		int res_count = desc->num + desc->num_sec;
3449 
3450 		free_bit = find_first_zero_bit(desc->res_map, res_count);
3451 		if (free_bit != res_count) {
3452 			__set_bit(free_bit, desc->res_map);
3453 			raw_spin_unlock_irqrestore(&res->lock, flags);
3454 
3455 			if (desc->num && free_bit < desc->num)
3456 				return desc->start + free_bit;
3457 			else
3458 				return desc->start_sec + free_bit;
3459 		}
3460 	}
3461 	raw_spin_unlock_irqrestore(&res->lock, flags);
3462 
3463 	return TI_SCI_RESOURCE_NULL;
3464 }
3465 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3466 
3467 /**
3468  * ti_sci_release_resource() - Release a resource from TISCI resource.
3469  * @res:	Pointer to the TISCI resource
3470  * @id:		Resource id to be released.
3471  */
3472 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3473 {
3474 	unsigned long flags;
3475 	u16 set;
3476 
3477 	raw_spin_lock_irqsave(&res->lock, flags);
3478 	for (set = 0; set < res->sets; set++) {
3479 		struct ti_sci_resource_desc *desc = &res->desc[set];
3480 
3481 		if (desc->num && desc->start <= id &&
3482 		    (desc->start + desc->num) > id)
3483 			__clear_bit(id - desc->start, desc->res_map);
3484 		else if (desc->num_sec && desc->start_sec <= id &&
3485 			 (desc->start_sec + desc->num_sec) > id)
3486 			__clear_bit(id - desc->start_sec, desc->res_map);
3487 	}
3488 	raw_spin_unlock_irqrestore(&res->lock, flags);
3489 }
3490 EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3491 
3492 /**
3493  * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
3494  * @res:	Pointer to the TISCI resource
3495  *
3496  * Return: Total number of available resources.
3497  */
3498 u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3499 {
3500 	u32 set, count = 0;
3501 
3502 	for (set = 0; set < res->sets; set++)
3503 		count += res->desc[set].num + res->desc[set].num_sec;
3504 
3505 	return count;
3506 }
3507 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3508 
3509 /**
3510  * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
3511  * @handle:	TISCI handle
3512  * @dev:	Device pointer to which the resource is assigned
3513  * @dev_id:	TISCI device id to which the resource is assigned
3514  * @sub_types:	Array of sub_types assigned corresponding to device
3515  * @sets:	Number of sub_types
3516  *
3517  * Return: Pointer to ti_sci_resource if all went well else appropriate
3518  *	   error pointer.
3519  */
3520 static struct ti_sci_resource *
3521 devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
3522 			      struct device *dev, u32 dev_id, u32 *sub_types,
3523 			      u32 sets)
3524 {
3525 	struct ti_sci_resource *res;
3526 	bool valid_set = false;
3527 	int i, ret, res_count;
3528 
3529 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3530 	if (!res)
3531 		return ERR_PTR(-ENOMEM);
3532 
3533 	res->sets = sets;
3534 	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3535 				 GFP_KERNEL);
3536 	if (!res->desc)
3537 		return ERR_PTR(-ENOMEM);
3538 
3539 	for (i = 0; i < res->sets; i++) {
3540 		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3541 							sub_types[i],
3542 							&res->desc[i]);
3543 		if (ret) {
3544 			dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3545 				dev_id, sub_types[i]);
3546 			memset(&res->desc[i], 0, sizeof(res->desc[i]));
3547 			continue;
3548 		}
3549 
3550 		dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
3551 			dev_id, sub_types[i], res->desc[i].start,
3552 			res->desc[i].num, res->desc[i].start_sec,
3553 			res->desc[i].num_sec);
3554 
3555 		valid_set = true;
3556 		res_count = res->desc[i].num + res->desc[i].num_sec;
3557 		res->desc[i].res_map = devm_bitmap_zalloc(dev, res_count,
3558 							  GFP_KERNEL);
3559 		if (!res->desc[i].res_map)
3560 			return ERR_PTR(-ENOMEM);
3561 	}
3562 	raw_spin_lock_init(&res->lock);
3563 
3564 	if (valid_set)
3565 		return res;
3566 
3567 	return ERR_PTR(-EINVAL);
3568 }
3569 
3570 /**
3571  * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3572  * @handle:	TISCI handle
3573  * @dev:	Device pointer to which the resource is assigned
3574  * @dev_id:	TISCI device id to which the resource is assigned
3575  * @of_prop:	property name by which the resource are represented
3576  *
3577  * Return: Pointer to ti_sci_resource if all went well else appropriate
3578  *	   error pointer.
3579  */
3580 struct ti_sci_resource *
3581 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3582 			    struct device *dev, u32 dev_id, char *of_prop)
3583 {
3584 	struct ti_sci_resource *res;
3585 	u32 *sub_types;
3586 	int sets;
3587 
3588 	sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3589 					       sizeof(u32));
3590 	if (sets < 0) {
3591 		dev_err(dev, "%s resource type ids not available\n", of_prop);
3592 		return ERR_PTR(sets);
3593 	}
3594 
3595 	sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
3596 	if (!sub_types)
3597 		return ERR_PTR(-ENOMEM);
3598 
3599 	of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
3600 	res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
3601 					    sets);
3602 
3603 	kfree(sub_types);
3604 	return res;
3605 }
3606 EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
3607 
3608 /**
3609  * devm_ti_sci_get_resource() - Get a resource range assigned to the device
3610  * @handle:	TISCI handle
3611  * @dev:	Device pointer to which the resource is assigned
3612  * @dev_id:	TISCI device id to which the resource is assigned
3613  * @sub_type:	TISCI resource subytpe representing the resource.
3614  *
3615  * Return: Pointer to ti_sci_resource if all went well else appropriate
3616  *	   error pointer.
3617  */
3618 struct ti_sci_resource *
3619 devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
3620 			 u32 dev_id, u32 sub_type)
3621 {
3622 	return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
3623 }
3624 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
3625 
3626 static int tisci_reboot_handler(struct sys_off_data *data)
3627 {
3628 	struct ti_sci_info *info = data->cb_data;
3629 	const struct ti_sci_handle *handle = &info->handle;
3630 
3631 	ti_sci_cmd_core_reboot(handle);
3632 
3633 	/* call fail OR pass, we should not be here in the first place */
3634 	return NOTIFY_BAD;
3635 }
3636 
3637 static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
3638 {
3639 	/*
3640 	 * Map and validate the target Linux suspend state to TISCI LPM.
3641 	 * Default is to let Device Manager select the low power mode.
3642 	 */
3643 	switch (pm_suspend_target_state) {
3644 	case PM_SUSPEND_MEM:
3645 		if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
3646 			/*
3647 			 * For the DM_MANAGED mode the context is reserved for
3648 			 * internal use and can be 0
3649 			 */
3650 			return ti_sci_cmd_prepare_sleep(&info->handle,
3651 							TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED,
3652 							0, 0, 0);
3653 		} else {
3654 			/* DM Managed is not supported by the firmware. */
3655 			dev_err(info->dev, "Suspend to memory is not supported by the firmware\n");
3656 			return -EOPNOTSUPP;
3657 		}
3658 		break;
3659 	default:
3660 		/*
3661 		 * Do not fail if we don't have action to take for a
3662 		 * specific suspend mode.
3663 		 */
3664 		return 0;
3665 	}
3666 }
3667 
3668 static int __maybe_unused ti_sci_suspend(struct device *dev)
3669 {
3670 	struct ti_sci_info *info = dev_get_drvdata(dev);
3671 	struct device *cpu_dev, *cpu_dev_max = NULL;
3672 	s32 val, cpu_lat = 0;
3673 	int i, ret;
3674 
3675 	if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
3676 		for_each_possible_cpu(i) {
3677 			cpu_dev = get_cpu_device(i);
3678 			val = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_RESUME_LATENCY);
3679 			if (val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) {
3680 				cpu_lat = max(cpu_lat, val);
3681 				cpu_dev_max = cpu_dev;
3682 			}
3683 		}
3684 		if (cpu_dev_max) {
3685 			dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u\n", __func__, cpu_lat);
3686 			ret = ti_sci_cmd_set_latency_constraint(&info->handle,
3687 								cpu_lat, TISCI_MSG_CONSTRAINT_SET);
3688 			if (ret)
3689 				return ret;
3690 		}
3691 	}
3692 
3693 	ret = ti_sci_prepare_system_suspend(info);
3694 	if (ret)
3695 		return ret;
3696 
3697 	return 0;
3698 }
3699 
3700 static int __maybe_unused ti_sci_suspend_noirq(struct device *dev)
3701 {
3702 	struct ti_sci_info *info = dev_get_drvdata(dev);
3703 	int ret = 0;
3704 
3705 	ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
3706 	if (ret)
3707 		return ret;
3708 
3709 	return 0;
3710 }
3711 
3712 static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
3713 {
3714 	struct ti_sci_info *info = dev_get_drvdata(dev);
3715 	int ret = 0;
3716 	u32 source;
3717 	u64 time;
3718 	u8 pin;
3719 	u8 mode;
3720 
3721 	ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
3722 	if (ret)
3723 		return ret;
3724 
3725 	ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
3726 	/* Do not fail to resume on error as the wake reason is not critical */
3727 	if (!ret)
3728 		dev_info(dev, "ti_sci: wakeup source:0x%x, pin:0x%x, mode:0x%x\n",
3729 			 source, pin, mode);
3730 
3731 	return 0;
3732 }
3733 
3734 static const struct dev_pm_ops ti_sci_pm_ops = {
3735 #ifdef CONFIG_PM_SLEEP
3736 	.suspend = ti_sci_suspend,
3737 	.suspend_noirq = ti_sci_suspend_noirq,
3738 	.resume_noirq = ti_sci_resume_noirq,
3739 #endif
3740 };
3741 
3742 /* Description for K2G */
3743 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3744 	.default_host_id = 2,
3745 	/* Conservative duration */
3746 	.max_rx_timeout_ms = 1000,
3747 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3748 	.max_msgs = 20,
3749 	.max_msg_size = 64,
3750 };
3751 
3752 /* Description for AM654 */
3753 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3754 	.default_host_id = 12,
3755 	/* Conservative duration */
3756 	.max_rx_timeout_ms = 10000,
3757 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3758 	.max_msgs = 20,
3759 	.max_msg_size = 60,
3760 };
3761 
3762 static const struct of_device_id ti_sci_of_match[] = {
3763 	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3764 	{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
3765 	{ /* Sentinel */ },
3766 };
3767 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
3768 
3769 static int ti_sci_probe(struct platform_device *pdev)
3770 {
3771 	struct device *dev = &pdev->dev;
3772 	const struct ti_sci_desc *desc;
3773 	struct ti_sci_xfer *xfer;
3774 	struct ti_sci_info *info = NULL;
3775 	struct ti_sci_xfers_info *minfo;
3776 	struct mbox_client *cl;
3777 	int ret = -EINVAL;
3778 	int i;
3779 	u32 h_id;
3780 
3781 	desc = device_get_match_data(dev);
3782 
3783 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3784 	if (!info)
3785 		return -ENOMEM;
3786 
3787 	info->dev = dev;
3788 	info->desc = desc;
3789 	ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3790 	/* if the property is not present in DT, use a default from desc */
3791 	if (ret < 0) {
3792 		info->host_id = info->desc->default_host_id;
3793 	} else {
3794 		if (!h_id) {
3795 			dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3796 			info->host_id = info->desc->default_host_id;
3797 		} else {
3798 			info->host_id = h_id;
3799 		}
3800 	}
3801 
3802 	INIT_LIST_HEAD(&info->node);
3803 	minfo = &info->minfo;
3804 
3805 	/*
3806 	 * Pre-allocate messages
3807 	 * NEVER allocate more than what we can indicate in hdr.seq
3808 	 * if we have data description bug, force a fix..
3809 	 */
3810 	if (WARN_ON(desc->max_msgs >=
3811 		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
3812 		return -EINVAL;
3813 
3814 	minfo->xfer_block = devm_kcalloc(dev,
3815 					 desc->max_msgs,
3816 					 sizeof(*minfo->xfer_block),
3817 					 GFP_KERNEL);
3818 	if (!minfo->xfer_block)
3819 		return -ENOMEM;
3820 
3821 	minfo->xfer_alloc_table = devm_bitmap_zalloc(dev,
3822 						     desc->max_msgs,
3823 						     GFP_KERNEL);
3824 	if (!minfo->xfer_alloc_table)
3825 		return -ENOMEM;
3826 
3827 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
3828 	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3829 		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3830 					      GFP_KERNEL);
3831 		if (!xfer->xfer_buf)
3832 			return -ENOMEM;
3833 
3834 		xfer->tx_message.buf = xfer->xfer_buf;
3835 		init_completion(&xfer->done);
3836 	}
3837 
3838 	ret = ti_sci_debugfs_create(pdev, info);
3839 	if (ret)
3840 		dev_warn(dev, "Failed to create debug file\n");
3841 
3842 	platform_set_drvdata(pdev, info);
3843 
3844 	cl = &info->cl;
3845 	cl->dev = dev;
3846 	cl->tx_block = false;
3847 	cl->rx_callback = ti_sci_rx_callback;
3848 	cl->knows_txdone = true;
3849 
3850 	spin_lock_init(&minfo->xfer_lock);
3851 	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3852 
3853 	info->chan_rx = mbox_request_channel_byname(cl, "rx");
3854 	if (IS_ERR(info->chan_rx)) {
3855 		ret = PTR_ERR(info->chan_rx);
3856 		goto out;
3857 	}
3858 
3859 	info->chan_tx = mbox_request_channel_byname(cl, "tx");
3860 	if (IS_ERR(info->chan_tx)) {
3861 		ret = PTR_ERR(info->chan_tx);
3862 		goto out;
3863 	}
3864 	ret = ti_sci_cmd_get_revision(info);
3865 	if (ret) {
3866 		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
3867 		goto out;
3868 	}
3869 
3870 	ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
3871 	dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n",
3872 		info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
3873 		info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
3874 		info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : ""
3875 	);
3876 
3877 	ti_sci_setup_ops(info);
3878 
3879 	ret = devm_register_restart_handler(dev, tisci_reboot_handler, info);
3880 	if (ret) {
3881 		dev_err(dev, "reboot registration fail(%d)\n", ret);
3882 		goto out;
3883 	}
3884 
3885 	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
3886 		 info->handle.version.abi_major, info->handle.version.abi_minor,
3887 		 info->handle.version.firmware_revision,
3888 		 info->handle.version.firmware_description);
3889 
3890 	mutex_lock(&ti_sci_list_mutex);
3891 	list_add_tail(&info->node, &ti_sci_list);
3892 	mutex_unlock(&ti_sci_list_mutex);
3893 
3894 	return of_platform_populate(dev->of_node, NULL, NULL, dev);
3895 out:
3896 	if (!IS_ERR(info->chan_tx))
3897 		mbox_free_channel(info->chan_tx);
3898 	if (!IS_ERR(info->chan_rx))
3899 		mbox_free_channel(info->chan_rx);
3900 	debugfs_remove(info->d);
3901 	return ret;
3902 }
3903 
3904 static struct platform_driver ti_sci_driver = {
3905 	.probe = ti_sci_probe,
3906 	.driver = {
3907 		   .name = "ti-sci",
3908 		   .of_match_table = ti_sci_of_match,
3909 		   .suppress_bind_attrs = true,
3910 		   .pm = &ti_sci_pm_ops,
3911 	},
3912 };
3913 module_platform_driver(ti_sci_driver);
3914 
3915 MODULE_LICENSE("GPL v2");
3916 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
3917 MODULE_AUTHOR("Nishanth Menon");
3918 MODULE_ALIAS("platform:ti-sci");
3919