xref: /linux/drivers/firmware/ti_sci.c (revision 208eed95fc710827b100266c9450ae84d46727bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments System Control Interface Protocol Driver
4  *
5  * Copyright (C) 2015-2025 Texas Instruments Incorporated - https://www.ti.com/
6  *	Nishanth Menon
7  */
8 
9 #define pr_fmt(fmt) "%s: " fmt, __func__
10 
11 #include <linux/bitmap.h>
12 #include <linux/cpu.h>
13 #include <linux/debugfs.h>
14 #include <linux/export.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/mailbox_client.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_qos.h>
24 #include <linux/property.h>
25 #include <linux/semaphore.h>
26 #include <linux/slab.h>
27 #include <linux/soc/ti/ti-msgmgr.h>
28 #include <linux/soc/ti/ti_sci_protocol.h>
29 #include <linux/suspend.h>
30 #include <linux/sys_soc.h>
31 #include <linux/reboot.h>
32 
33 #include "ti_sci.h"
34 
35 /* List of all TI SCI devices active in system */
36 static LIST_HEAD(ti_sci_list);
37 /* Protection for the entire list */
38 static DEFINE_MUTEX(ti_sci_list_mutex);
39 
40 /**
41  * struct ti_sci_xfer - Structure representing a message flow
42  * @tx_message:	Transmit message
43  * @rx_len:	Receive message length
44  * @xfer_buf:	Preallocated buffer to store receive message
45  *		Since we work with request-ACK protocol, we can
46  *		reuse the same buffer for the rx path as we
47  *		use for the tx path.
48  * @done:	completion event
49  */
50 struct ti_sci_xfer {
51 	struct ti_msgmgr_message tx_message;
52 	u8 rx_len;
53 	u8 *xfer_buf;
54 	struct completion done;
55 };
56 
57 /**
58  * struct ti_sci_xfers_info - Structure to manage transfer information
59  * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
60  *			Messages.
61  * @xfer_block:		Preallocated Message array
62  * @xfer_alloc_table:	Bitmap table for allocated messages.
63  *			Index of this bitmap table is also used for message
64  *			sequence identifier.
65  * @xfer_lock:		Protection for message allocation
66  */
67 struct ti_sci_xfers_info {
68 	struct semaphore sem_xfer_count;
69 	struct ti_sci_xfer *xfer_block;
70 	unsigned long *xfer_alloc_table;
71 	/* protect transfer allocation */
72 	spinlock_t xfer_lock;
73 };
74 
75 /**
76  * struct ti_sci_desc - Description of SoC integration
77  * @default_host_id:	Host identifier representing the compute entity
78  * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
79  * @max_msgs: Maximum number of messages that can be pending
80  *		  simultaneously in the system
81  * @max_msg_size: Maximum size of data per message that can be handled.
82  */
83 struct ti_sci_desc {
84 	u8 default_host_id;
85 	int max_rx_timeout_ms;
86 	int max_msgs;
87 	int max_msg_size;
88 };
89 
90 /**
91  * struct ti_sci_info - Structure representing a TI SCI instance
92  * @dev:	Device pointer
93  * @desc:	SoC description for this instance
94  * @d:		Debugfs file entry
95  * @debug_region: Memory region where the debug message are available
96  * @debug_region_size: Debug region size
97  * @debug_buffer: Buffer allocated to copy debug messages.
98  * @handle:	Instance of TI SCI handle to send to clients.
99  * @cl:		Mailbox Client
100  * @chan_tx:	Transmit mailbox channel
101  * @chan_rx:	Receive mailbox channel
102  * @minfo:	Message info
103  * @node:	list head
104  * @host_id:	Host ID
105  * @fw_caps:	FW/SoC low power capabilities
106  * @users:	Number of users of this instance
107  */
108 struct ti_sci_info {
109 	struct device *dev;
110 	const struct ti_sci_desc *desc;
111 	struct dentry *d;
112 	void __iomem *debug_region;
113 	char *debug_buffer;
114 	size_t debug_region_size;
115 	struct ti_sci_handle handle;
116 	struct mbox_client cl;
117 	struct mbox_chan *chan_tx;
118 	struct mbox_chan *chan_rx;
119 	struct ti_sci_xfers_info minfo;
120 	struct list_head node;
121 	u8 host_id;
122 	u64 fw_caps;
123 	/* protected by ti_sci_list_mutex */
124 	int users;
125 };
126 
127 #define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
128 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
129 
130 #ifdef CONFIG_DEBUG_FS
131 
132 /**
133  * ti_sci_debug_show() - Helper to dump the debug log
134  * @s:	sequence file pointer
135  * @unused:	unused.
136  *
137  * Return: 0
138  */
ti_sci_debug_show(struct seq_file * s,void * unused)139 static int ti_sci_debug_show(struct seq_file *s, void *unused)
140 {
141 	struct ti_sci_info *info = s->private;
142 
143 	memcpy_fromio(info->debug_buffer, info->debug_region,
144 		      info->debug_region_size);
145 	/*
146 	 * We don't trust firmware to leave NULL terminated last byte (hence
147 	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
148 	 * specific data format for debug messages, We just present the data
149 	 * in the buffer as is - we expect the messages to be self explanatory.
150 	 */
151 	seq_puts(s, info->debug_buffer);
152 	return 0;
153 }
154 
155 /* Provide the log file operations interface*/
156 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
157 
158 /**
159  * ti_sci_debugfs_create() - Create log debug file
160  * @pdev:	platform device pointer
161  * @info:	Pointer to SCI entity information
162  *
163  * Return: 0 if all went fine, else corresponding error.
164  */
ti_sci_debugfs_create(struct platform_device * pdev,struct ti_sci_info * info)165 static int ti_sci_debugfs_create(struct platform_device *pdev,
166 				 struct ti_sci_info *info)
167 {
168 	struct device *dev = &pdev->dev;
169 	struct resource *res;
170 	char debug_name[50];
171 
172 	/* Debug region is optional */
173 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
174 					   "debug_messages");
175 	info->debug_region = devm_ioremap_resource(dev, res);
176 	if (IS_ERR(info->debug_region))
177 		return 0;
178 	info->debug_region_size = resource_size(res);
179 
180 	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
181 					  sizeof(char), GFP_KERNEL);
182 	if (!info->debug_buffer)
183 		return -ENOMEM;
184 	/* Setup NULL termination */
185 	info->debug_buffer[info->debug_region_size] = 0;
186 
187 	snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
188 		 dev_name(dev));
189 	info->d = debugfs_create_file(debug_name, 0444, NULL, info,
190 				      &ti_sci_debug_fops);
191 	if (IS_ERR(info->d))
192 		return PTR_ERR(info->d);
193 
194 	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
195 		info->debug_region, info->debug_region_size, res);
196 	return 0;
197 }
198 
199 #else /* CONFIG_DEBUG_FS */
ti_sci_debugfs_create(struct platform_device * dev,struct ti_sci_info * info)200 static inline int ti_sci_debugfs_create(struct platform_device *dev,
201 					struct ti_sci_info *info)
202 {
203 	return 0;
204 }
205 
ti_sci_debugfs_destroy(struct platform_device * dev,struct ti_sci_info * info)206 static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
207 					  struct ti_sci_info *info)
208 {
209 }
210 #endif /* CONFIG_DEBUG_FS */
211 
212 /**
213  * ti_sci_dump_header_dbg() - Helper to dump a message header.
214  * @dev:	Device pointer corresponding to the SCI entity
215  * @hdr:	pointer to header.
216  */
ti_sci_dump_header_dbg(struct device * dev,struct ti_sci_msg_hdr * hdr)217 static inline void ti_sci_dump_header_dbg(struct device *dev,
218 					  struct ti_sci_msg_hdr *hdr)
219 {
220 	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
221 		hdr->type, hdr->host, hdr->seq, hdr->flags);
222 }
223 
224 /**
225  * ti_sci_rx_callback() - mailbox client callback for receive messages
226  * @cl:	client pointer
227  * @m:	mailbox message
228  *
229  * Processes one received message to appropriate transfer information and
230  * signals completion of the transfer.
231  *
232  * NOTE: This function will be invoked in IRQ context, hence should be
233  * as optimal as possible.
234  */
ti_sci_rx_callback(struct mbox_client * cl,void * m)235 static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
236 {
237 	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
238 	struct device *dev = info->dev;
239 	struct ti_sci_xfers_info *minfo = &info->minfo;
240 	struct ti_msgmgr_message *mbox_msg = m;
241 	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
242 	struct ti_sci_xfer *xfer;
243 	u8 xfer_id;
244 
245 	xfer_id = hdr->seq;
246 
247 	/*
248 	 * Are we even expecting this?
249 	 * NOTE: barriers were implicit in locks used for modifying the bitmap
250 	 */
251 	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
252 		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
253 		return;
254 	}
255 
256 	xfer = &minfo->xfer_block[xfer_id];
257 
258 	/* Is the message of valid length? */
259 	if (mbox_msg->len > info->desc->max_msg_size) {
260 		dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
261 			mbox_msg->len, info->desc->max_msg_size);
262 		ti_sci_dump_header_dbg(dev, hdr);
263 		return;
264 	}
265 	if (mbox_msg->len < xfer->rx_len) {
266 		dev_err(dev, "Recv xfer %zu < expected %d length\n",
267 			mbox_msg->len, xfer->rx_len);
268 		ti_sci_dump_header_dbg(dev, hdr);
269 		return;
270 	}
271 
272 	ti_sci_dump_header_dbg(dev, hdr);
273 	/* Take a copy to the rx buffer.. */
274 	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
275 	complete(&xfer->done);
276 }
277 
278 /**
279  * ti_sci_get_one_xfer() - Allocate one message
280  * @info:	Pointer to SCI entity information
281  * @msg_type:	Message type
282  * @msg_flags:	Flag to set for the message
283  * @tx_message_size: transmit message size
284  * @rx_message_size: receive message size
285  *
286  * Helper function which is used by various command functions that are
287  * exposed to clients of this driver for allocating a message traffic event.
288  *
289  * This function can sleep depending on pending requests already in the system
290  * for the SCI entity. Further, this also holds a spinlock to maintain integrity
291  * of internal data structures.
292  *
293  * Return: 0 if all went fine, else corresponding error.
294  */
ti_sci_get_one_xfer(struct ti_sci_info * info,u16 msg_type,u32 msg_flags,size_t tx_message_size,size_t rx_message_size)295 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
296 					       u16 msg_type, u32 msg_flags,
297 					       size_t tx_message_size,
298 					       size_t rx_message_size)
299 {
300 	struct ti_sci_xfers_info *minfo = &info->minfo;
301 	struct ti_sci_xfer *xfer;
302 	struct ti_sci_msg_hdr *hdr;
303 	unsigned long flags;
304 	unsigned long bit_pos;
305 	u8 xfer_id;
306 	int ret;
307 	int timeout;
308 
309 	/* Ensure we have sane transfer sizes */
310 	if (rx_message_size > info->desc->max_msg_size ||
311 	    tx_message_size > info->desc->max_msg_size ||
312 	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
313 		return ERR_PTR(-ERANGE);
314 
315 	/*
316 	 * Ensure we have only controlled number of pending messages.
317 	 * Ideally, we might just have to wait a single message, be
318 	 * conservative and wait 5 times that..
319 	 */
320 	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
321 	ret = down_timeout(&minfo->sem_xfer_count, timeout);
322 	if (ret < 0)
323 		return ERR_PTR(ret);
324 
325 	/* Keep the locked section as small as possible */
326 	spin_lock_irqsave(&minfo->xfer_lock, flags);
327 	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
328 				      info->desc->max_msgs);
329 	set_bit(bit_pos, minfo->xfer_alloc_table);
330 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
331 
332 	/*
333 	 * We already ensured in probe that we can have max messages that can
334 	 * fit in  hdr.seq - NOTE: this improves access latencies
335 	 * to predictable O(1) access, BUT, it opens us to risk if
336 	 * remote misbehaves with corrupted message sequence responses.
337 	 * If that happens, we are going to be messed up anyways..
338 	 */
339 	xfer_id = (u8)bit_pos;
340 
341 	xfer = &minfo->xfer_block[xfer_id];
342 
343 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
344 	xfer->tx_message.len = tx_message_size;
345 	xfer->tx_message.chan_rx = info->chan_rx;
346 	xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
347 	xfer->rx_len = (u8)rx_message_size;
348 
349 	reinit_completion(&xfer->done);
350 
351 	hdr->seq = xfer_id;
352 	hdr->type = msg_type;
353 	hdr->host = info->host_id;
354 	hdr->flags = msg_flags;
355 
356 	return xfer;
357 }
358 
359 /**
360  * ti_sci_put_one_xfer() - Release a message
361  * @minfo:	transfer info pointer
362  * @xfer:	message that was reserved by ti_sci_get_one_xfer
363  *
364  * This holds a spinlock to maintain integrity of internal data structures.
365  */
ti_sci_put_one_xfer(struct ti_sci_xfers_info * minfo,struct ti_sci_xfer * xfer)366 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
367 				struct ti_sci_xfer *xfer)
368 {
369 	unsigned long flags;
370 	struct ti_sci_msg_hdr *hdr;
371 	u8 xfer_id;
372 
373 	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
374 	xfer_id = hdr->seq;
375 
376 	/*
377 	 * Keep the locked section as small as possible
378 	 * NOTE: we might escape with smp_mb and no lock here..
379 	 * but just be conservative and symmetric.
380 	 */
381 	spin_lock_irqsave(&minfo->xfer_lock, flags);
382 	clear_bit(xfer_id, minfo->xfer_alloc_table);
383 	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
384 
385 	/* Increment the count for the next user to get through */
386 	up(&minfo->sem_xfer_count);
387 }
388 
389 /**
390  * ti_sci_do_xfer() - Do one transfer
391  * @info:	Pointer to SCI entity information
392  * @xfer:	Transfer to initiate and wait for response
393  *
394  * Return: -ETIMEDOUT in case of no response, if transmit error,
395  *	   return corresponding error, else if all goes well,
396  *	   return 0.
397  */
ti_sci_do_xfer(struct ti_sci_info * info,struct ti_sci_xfer * xfer)398 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
399 				 struct ti_sci_xfer *xfer)
400 {
401 	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
402 	bool response_expected = !!(hdr->flags & (TI_SCI_FLAG_REQ_ACK_ON_PROCESSED |
403 						  TI_SCI_FLAG_REQ_ACK_ON_RECEIVED));
404 	int ret;
405 	int timeout;
406 	struct device *dev = info->dev;
407 	bool done_state = true;
408 
409 	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
410 	if (ret < 0)
411 		return ret;
412 
413 	ret = 0;
414 
415 	if (response_expected && system_state <= SYSTEM_RUNNING) {
416 		/* And we wait for the response. */
417 		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
418 		if (!wait_for_completion_timeout(&xfer->done, timeout))
419 			ret = -ETIMEDOUT;
420 	} else if (response_expected) {
421 		/*
422 		 * If we are !running, we cannot use wait_for_completion_timeout
423 		 * during noirq phase, so we must manually poll the completion.
424 		 */
425 		ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
426 					       done_state, 1,
427 					       info->desc->max_rx_timeout_ms * 1000,
428 					       false, &xfer->done);
429 	}
430 
431 	if (ret == -ETIMEDOUT)
432 		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
433 			(void *)_RET_IP_);
434 
435 	/*
436 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
437 	 * transfer queueing since the protocol layer queues things by itself.
438 	 * Unfortunately, we have to kick the mailbox framework after we have
439 	 * received our message.
440 	 */
441 	mbox_client_txdone(info->chan_tx, ret);
442 
443 	return ret;
444 }
445 
446 /**
447  * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
448  * @info:	Pointer to SCI entity information
449  *
450  * Updates the SCI information in the internal data structure.
451  *
452  * Return: 0 if all went fine, else return appropriate error.
453  */
ti_sci_cmd_get_revision(struct ti_sci_info * info)454 static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
455 {
456 	struct device *dev = info->dev;
457 	struct ti_sci_handle *handle = &info->handle;
458 	struct ti_sci_version_info *ver = &handle->version;
459 	struct ti_sci_msg_resp_version *rev_info;
460 	struct ti_sci_xfer *xfer;
461 	int ret;
462 
463 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
464 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
465 				   sizeof(struct ti_sci_msg_hdr),
466 				   sizeof(*rev_info));
467 	if (IS_ERR(xfer)) {
468 		ret = PTR_ERR(xfer);
469 		dev_err(dev, "Message alloc failed(%d)\n", ret);
470 		return ret;
471 	}
472 
473 	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
474 
475 	ret = ti_sci_do_xfer(info, xfer);
476 	if (ret) {
477 		dev_err(dev, "Mbox send fail %d\n", ret);
478 		goto fail;
479 	}
480 
481 	ver->abi_major = rev_info->abi_major;
482 	ver->abi_minor = rev_info->abi_minor;
483 	ver->firmware_revision = rev_info->firmware_revision;
484 	strscpy(ver->firmware_description, rev_info->firmware_description,
485 		sizeof(ver->firmware_description));
486 
487 fail:
488 	ti_sci_put_one_xfer(&info->minfo, xfer);
489 	return ret;
490 }
491 
492 /**
493  * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
494  * @r:	pointer to response buffer
495  *
496  * Return: true if the response was an ACK, else returns false.
497  */
ti_sci_is_response_ack(void * r)498 static inline bool ti_sci_is_response_ack(void *r)
499 {
500 	struct ti_sci_msg_hdr *hdr = r;
501 
502 	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
503 }
504 
505 /**
506  * ti_sci_set_device_state() - Set device state helper
507  * @handle:	pointer to TI SCI handle
508  * @id:		Device identifier
509  * @flags:	flags to setup for the device
510  * @state:	State to move the device to
511  *
512  * Return: 0 if all went well, else returns appropriate error value.
513  */
ti_sci_set_device_state(const struct ti_sci_handle * handle,u32 id,u32 flags,u8 state)514 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
515 				   u32 id, u32 flags, u8 state)
516 {
517 	struct ti_sci_info *info;
518 	struct ti_sci_msg_req_set_device_state *req;
519 	struct ti_sci_msg_hdr *resp;
520 	struct ti_sci_xfer *xfer;
521 	struct device *dev;
522 	int ret = 0;
523 
524 	if (IS_ERR(handle))
525 		return PTR_ERR(handle);
526 	if (!handle)
527 		return -EINVAL;
528 
529 	info = handle_to_ti_sci_info(handle);
530 	dev = info->dev;
531 
532 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
533 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
534 				   sizeof(*req), sizeof(*resp));
535 	if (IS_ERR(xfer)) {
536 		ret = PTR_ERR(xfer);
537 		dev_err(dev, "Message alloc failed(%d)\n", ret);
538 		return ret;
539 	}
540 	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
541 	req->id = id;
542 	req->state = state;
543 
544 	ret = ti_sci_do_xfer(info, xfer);
545 	if (ret) {
546 		dev_err(dev, "Mbox send fail %d\n", ret);
547 		goto fail;
548 	}
549 
550 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
551 
552 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
553 
554 fail:
555 	ti_sci_put_one_xfer(&info->minfo, xfer);
556 
557 	return ret;
558 }
559 
560 /**
561  * ti_sci_get_device_state() - Get device state helper
562  * @handle:	Handle to the device
563  * @id:		Device Identifier
564  * @clcnt:	Pointer to Context Loss Count
565  * @resets:	pointer to resets
566  * @p_state:	pointer to p_state
567  * @c_state:	pointer to c_state
568  *
569  * Return: 0 if all went fine, else return appropriate error.
570  */
ti_sci_get_device_state(const struct ti_sci_handle * handle,u32 id,u32 * clcnt,u32 * resets,u8 * p_state,u8 * c_state)571 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
572 				   u32 id,  u32 *clcnt,  u32 *resets,
573 				    u8 *p_state,  u8 *c_state)
574 {
575 	struct ti_sci_info *info;
576 	struct ti_sci_msg_req_get_device_state *req;
577 	struct ti_sci_msg_resp_get_device_state *resp;
578 	struct ti_sci_xfer *xfer;
579 	struct device *dev;
580 	int ret = 0;
581 
582 	if (IS_ERR(handle))
583 		return PTR_ERR(handle);
584 	if (!handle)
585 		return -EINVAL;
586 
587 	if (!clcnt && !resets && !p_state && !c_state)
588 		return -EINVAL;
589 
590 	info = handle_to_ti_sci_info(handle);
591 	dev = info->dev;
592 
593 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
594 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
595 				   sizeof(*req), sizeof(*resp));
596 	if (IS_ERR(xfer)) {
597 		ret = PTR_ERR(xfer);
598 		dev_err(dev, "Message alloc failed(%d)\n", ret);
599 		return ret;
600 	}
601 	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
602 	req->id = id;
603 
604 	ret = ti_sci_do_xfer(info, xfer);
605 	if (ret) {
606 		dev_err(dev, "Mbox send fail %d\n", ret);
607 		goto fail;
608 	}
609 
610 	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
611 	if (!ti_sci_is_response_ack(resp)) {
612 		ret = -ENODEV;
613 		goto fail;
614 	}
615 
616 	if (clcnt)
617 		*clcnt = resp->context_loss_count;
618 	if (resets)
619 		*resets = resp->resets;
620 	if (p_state)
621 		*p_state = resp->programmed_state;
622 	if (c_state)
623 		*c_state = resp->current_state;
624 fail:
625 	ti_sci_put_one_xfer(&info->minfo, xfer);
626 
627 	return ret;
628 }
629 
630 /**
631  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
632  *			     that can be shared with other hosts.
633  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
634  * @id:		Device Identifier
635  *
636  * Request for the device - NOTE: the client MUST maintain integrity of
637  * usage count by balancing get_device with put_device. No refcounting is
638  * managed by driver for that purpose.
639  *
640  * Return: 0 if all went fine, else return appropriate error.
641  */
ti_sci_cmd_get_device(const struct ti_sci_handle * handle,u32 id)642 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
643 {
644 	return ti_sci_set_device_state(handle, id, 0,
645 				       MSG_DEVICE_SW_STATE_ON);
646 }
647 
648 /**
649  * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
650  *				       TISCI that is exclusively owned by the
651  *				       requesting host.
652  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
653  * @id:		Device Identifier
654  *
655  * Request for the device - NOTE: the client MUST maintain integrity of
656  * usage count by balancing get_device with put_device. No refcounting is
657  * managed by driver for that purpose.
658  *
659  * Return: 0 if all went fine, else return appropriate error.
660  */
ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle * handle,u32 id)661 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
662 					   u32 id)
663 {
664 	return ti_sci_set_device_state(handle, id,
665 				       MSG_FLAG_DEVICE_EXCLUSIVE,
666 				       MSG_DEVICE_SW_STATE_ON);
667 }
668 
669 /**
670  * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
671  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
672  * @id:		Device Identifier
673  *
674  * Request for the device - NOTE: the client MUST maintain integrity of
675  * usage count by balancing get_device with put_device. No refcounting is
676  * managed by driver for that purpose.
677  *
678  * Return: 0 if all went fine, else return appropriate error.
679  */
ti_sci_cmd_idle_device(const struct ti_sci_handle * handle,u32 id)680 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
681 {
682 	return ti_sci_set_device_state(handle, id, 0,
683 				       MSG_DEVICE_SW_STATE_RETENTION);
684 }
685 
686 /**
687  * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
688  *					TISCI that is exclusively owned by
689  *					requesting host.
690  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
691  * @id:		Device Identifier
692  *
693  * Request for the device - NOTE: the client MUST maintain integrity of
694  * usage count by balancing get_device with put_device. No refcounting is
695  * managed by driver for that purpose.
696  *
697  * Return: 0 if all went fine, else return appropriate error.
698  */
ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle * handle,u32 id)699 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
700 					    u32 id)
701 {
702 	return ti_sci_set_device_state(handle, id,
703 				       MSG_FLAG_DEVICE_EXCLUSIVE,
704 				       MSG_DEVICE_SW_STATE_RETENTION);
705 }
706 
707 /**
708  * ti_sci_cmd_put_device() - command to release a device managed by TISCI
709  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
710  * @id:		Device Identifier
711  *
712  * Request for the device - NOTE: the client MUST maintain integrity of
713  * usage count by balancing get_device with put_device. No refcounting is
714  * managed by driver for that purpose.
715  *
716  * Return: 0 if all went fine, else return appropriate error.
717  */
ti_sci_cmd_put_device(const struct ti_sci_handle * handle,u32 id)718 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
719 {
720 	return ti_sci_set_device_state(handle, id,
721 				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
722 }
723 
724 /**
725  * ti_sci_cmd_dev_is_valid() - Is the device valid
726  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
727  * @id:		Device Identifier
728  *
729  * Return: 0 if all went fine and the device ID is valid, else return
730  * appropriate error.
731  */
ti_sci_cmd_dev_is_valid(const struct ti_sci_handle * handle,u32 id)732 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
733 {
734 	u8 unused;
735 
736 	/* check the device state which will also tell us if the ID is valid */
737 	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
738 }
739 
740 /**
741  * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
742  * @handle:	Pointer to TISCI handle
743  * @id:		Device Identifier
744  * @count:	Pointer to Context Loss counter to populate
745  *
746  * Return: 0 if all went fine, else return appropriate error.
747  */
ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle * handle,u32 id,u32 * count)748 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
749 				    u32 *count)
750 {
751 	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
752 }
753 
754 /**
755  * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
756  * @handle:	Pointer to TISCI handle
757  * @id:		Device Identifier
758  * @r_state:	true if requested to be idle
759  *
760  * Return: 0 if all went fine, else return appropriate error.
761  */
ti_sci_cmd_dev_is_idle(const struct ti_sci_handle * handle,u32 id,bool * r_state)762 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
763 				  bool *r_state)
764 {
765 	int ret;
766 	u8 state;
767 
768 	if (!r_state)
769 		return -EINVAL;
770 
771 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
772 	if (ret)
773 		return ret;
774 
775 	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
776 
777 	return 0;
778 }
779 
780 /**
781  * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
782  * @handle:	Pointer to TISCI handle
783  * @id:		Device Identifier
784  * @r_state:	true if requested to be stopped
785  * @curr_state:	true if currently stopped.
786  *
787  * Return: 0 if all went fine, else return appropriate error.
788  */
ti_sci_cmd_dev_is_stop(const struct ti_sci_handle * handle,u32 id,bool * r_state,bool * curr_state)789 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
790 				  bool *r_state,  bool *curr_state)
791 {
792 	int ret;
793 	u8 p_state, c_state;
794 
795 	if (!r_state && !curr_state)
796 		return -EINVAL;
797 
798 	ret =
799 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
800 	if (ret)
801 		return ret;
802 
803 	if (r_state)
804 		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
805 	if (curr_state)
806 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
807 
808 	return 0;
809 }
810 
811 /**
812  * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
813  * @handle:	Pointer to TISCI handle
814  * @id:		Device Identifier
815  * @r_state:	true if requested to be ON
816  * @curr_state:	true if currently ON and active
817  *
818  * Return: 0 if all went fine, else return appropriate error.
819  */
ti_sci_cmd_dev_is_on(const struct ti_sci_handle * handle,u32 id,bool * r_state,bool * curr_state)820 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
821 				bool *r_state,  bool *curr_state)
822 {
823 	int ret;
824 	u8 p_state, c_state;
825 
826 	if (!r_state && !curr_state)
827 		return -EINVAL;
828 
829 	ret =
830 	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
831 	if (ret)
832 		return ret;
833 
834 	if (r_state)
835 		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
836 	if (curr_state)
837 		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
838 
839 	return 0;
840 }
841 
842 /**
843  * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
844  * @handle:	Pointer to TISCI handle
845  * @id:		Device Identifier
846  * @curr_state:	true if currently transitioning.
847  *
848  * Return: 0 if all went fine, else return appropriate error.
849  */
ti_sci_cmd_dev_is_trans(const struct ti_sci_handle * handle,u32 id,bool * curr_state)850 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
851 				   bool *curr_state)
852 {
853 	int ret;
854 	u8 state;
855 
856 	if (!curr_state)
857 		return -EINVAL;
858 
859 	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
860 	if (ret)
861 		return ret;
862 
863 	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
864 
865 	return 0;
866 }
867 
868 /**
869  * ti_sci_cmd_set_device_resets() - command to set resets for device managed
870  *				    by TISCI
871  * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
872  * @id:		Device Identifier
873  * @reset_state: Device specific reset bit field
874  *
875  * Return: 0 if all went fine, else return appropriate error.
876  */
ti_sci_cmd_set_device_resets(const struct ti_sci_handle * handle,u32 id,u32 reset_state)877 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
878 					u32 id, u32 reset_state)
879 {
880 	struct ti_sci_info *info;
881 	struct ti_sci_msg_req_set_device_resets *req;
882 	struct ti_sci_msg_hdr *resp;
883 	struct ti_sci_xfer *xfer;
884 	struct device *dev;
885 	int ret = 0;
886 
887 	if (IS_ERR(handle))
888 		return PTR_ERR(handle);
889 	if (!handle)
890 		return -EINVAL;
891 
892 	info = handle_to_ti_sci_info(handle);
893 	dev = info->dev;
894 
895 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
896 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
897 				   sizeof(*req), sizeof(*resp));
898 	if (IS_ERR(xfer)) {
899 		ret = PTR_ERR(xfer);
900 		dev_err(dev, "Message alloc failed(%d)\n", ret);
901 		return ret;
902 	}
903 	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
904 	req->id = id;
905 	req->resets = reset_state;
906 
907 	ret = ti_sci_do_xfer(info, xfer);
908 	if (ret) {
909 		dev_err(dev, "Mbox send fail %d\n", ret);
910 		goto fail;
911 	}
912 
913 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
914 
915 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
916 
917 fail:
918 	ti_sci_put_one_xfer(&info->minfo, xfer);
919 
920 	return ret;
921 }
922 
923 /**
924  * ti_sci_cmd_get_device_resets() - Get reset state for device managed
925  *				    by TISCI
926  * @handle:		Pointer to TISCI handle
927  * @id:			Device Identifier
928  * @reset_state:	Pointer to reset state to populate
929  *
930  * Return: 0 if all went fine, else return appropriate error.
931  */
ti_sci_cmd_get_device_resets(const struct ti_sci_handle * handle,u32 id,u32 * reset_state)932 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
933 					u32 id, u32 *reset_state)
934 {
935 	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
936 				       NULL);
937 }
938 
939 /**
940  * ti_sci_set_clock_state() - Set clock state helper
941  * @handle:	pointer to TI SCI handle
942  * @dev_id:	Device identifier this request is for
943  * @clk_id:	Clock identifier for the device for this request.
944  *		Each device has it's own set of clock inputs. This indexes
945  *		which clock input to modify.
946  * @flags:	Header flags as needed
947  * @state:	State to request for the clock.
948  *
949  * Return: 0 if all went well, else returns appropriate error value.
950  */
ti_sci_set_clock_state(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,u32 flags,u8 state)951 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
952 				  u32 dev_id, u32 clk_id,
953 				  u32 flags, u8 state)
954 {
955 	struct ti_sci_info *info;
956 	struct ti_sci_msg_req_set_clock_state *req;
957 	struct ti_sci_msg_hdr *resp;
958 	struct ti_sci_xfer *xfer;
959 	struct device *dev;
960 	int ret = 0;
961 
962 	if (IS_ERR(handle))
963 		return PTR_ERR(handle);
964 	if (!handle)
965 		return -EINVAL;
966 
967 	info = handle_to_ti_sci_info(handle);
968 	dev = info->dev;
969 
970 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
971 				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
972 				   sizeof(*req), sizeof(*resp));
973 	if (IS_ERR(xfer)) {
974 		ret = PTR_ERR(xfer);
975 		dev_err(dev, "Message alloc failed(%d)\n", ret);
976 		return ret;
977 	}
978 	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
979 	req->dev_id = dev_id;
980 	if (clk_id < 255) {
981 		req->clk_id = clk_id;
982 	} else {
983 		req->clk_id = 255;
984 		req->clk_id_32 = clk_id;
985 	}
986 	req->request_state = state;
987 
988 	ret = ti_sci_do_xfer(info, xfer);
989 	if (ret) {
990 		dev_err(dev, "Mbox send fail %d\n", ret);
991 		goto fail;
992 	}
993 
994 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
995 
996 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
997 
998 fail:
999 	ti_sci_put_one_xfer(&info->minfo, xfer);
1000 
1001 	return ret;
1002 }
1003 
1004 /**
1005  * ti_sci_cmd_get_clock_state() - Get clock state helper
1006  * @handle:	pointer to TI SCI handle
1007  * @dev_id:	Device identifier this request is for
1008  * @clk_id:	Clock identifier for the device for this request.
1009  *		Each device has it's own set of clock inputs. This indexes
1010  *		which clock input to modify.
1011  * @programmed_state:	State requested for clock to move to
1012  * @current_state:	State that the clock is currently in
1013  *
1014  * Return: 0 if all went well, else returns appropriate error value.
1015  */
ti_sci_cmd_get_clock_state(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,u8 * programmed_state,u8 * current_state)1016 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1017 				      u32 dev_id, u32 clk_id,
1018 				      u8 *programmed_state, u8 *current_state)
1019 {
1020 	struct ti_sci_info *info;
1021 	struct ti_sci_msg_req_get_clock_state *req;
1022 	struct ti_sci_msg_resp_get_clock_state *resp;
1023 	struct ti_sci_xfer *xfer;
1024 	struct device *dev;
1025 	int ret = 0;
1026 
1027 	if (IS_ERR(handle))
1028 		return PTR_ERR(handle);
1029 	if (!handle)
1030 		return -EINVAL;
1031 
1032 	if (!programmed_state && !current_state)
1033 		return -EINVAL;
1034 
1035 	info = handle_to_ti_sci_info(handle);
1036 	dev = info->dev;
1037 
1038 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1039 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1040 				   sizeof(*req), sizeof(*resp));
1041 	if (IS_ERR(xfer)) {
1042 		ret = PTR_ERR(xfer);
1043 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1044 		return ret;
1045 	}
1046 	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1047 	req->dev_id = dev_id;
1048 	if (clk_id < 255) {
1049 		req->clk_id = clk_id;
1050 	} else {
1051 		req->clk_id = 255;
1052 		req->clk_id_32 = clk_id;
1053 	}
1054 
1055 	ret = ti_sci_do_xfer(info, xfer);
1056 	if (ret) {
1057 		dev_err(dev, "Mbox send fail %d\n", ret);
1058 		goto fail;
1059 	}
1060 
1061 	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1062 
1063 	if (!ti_sci_is_response_ack(resp)) {
1064 		ret = -ENODEV;
1065 		goto fail;
1066 	}
1067 
1068 	if (programmed_state)
1069 		*programmed_state = resp->programmed_state;
1070 	if (current_state)
1071 		*current_state = resp->current_state;
1072 
1073 fail:
1074 	ti_sci_put_one_xfer(&info->minfo, xfer);
1075 
1076 	return ret;
1077 }
1078 
1079 /**
1080  * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1081  * @handle:	pointer to TI SCI handle
1082  * @dev_id:	Device identifier this request is for
1083  * @clk_id:	Clock identifier for the device for this request.
1084  *		Each device has it's own set of clock inputs. This indexes
1085  *		which clock input to modify.
1086  * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1087  * @can_change_freq: 'true' if frequency change is desired, else 'false'
1088  * @enable_input_term: 'true' if input termination is desired, else 'false'
1089  *
1090  * Return: 0 if all went well, else returns appropriate error value.
1091  */
ti_sci_cmd_get_clock(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,bool needs_ssc,bool can_change_freq,bool enable_input_term)1092 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1093 				u32 clk_id, bool needs_ssc,
1094 				bool can_change_freq, bool enable_input_term)
1095 {
1096 	u32 flags = 0;
1097 
1098 	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1099 	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1100 	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1101 
1102 	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1103 				      MSG_CLOCK_SW_STATE_REQ);
1104 }
1105 
1106 /**
1107  * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1108  * @handle:	pointer to TI SCI handle
1109  * @dev_id:	Device identifier this request is for
1110  * @clk_id:	Clock identifier for the device for this request.
1111  *		Each device has it's own set of clock inputs. This indexes
1112  *		which clock input to modify.
1113  *
1114  * NOTE: This clock must have been requested by get_clock previously.
1115  *
1116  * Return: 0 if all went well, else returns appropriate error value.
1117  */
ti_sci_cmd_idle_clock(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id)1118 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1119 				 u32 dev_id, u32 clk_id)
1120 {
1121 	return ti_sci_set_clock_state(handle, dev_id, clk_id,
1122 				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1123 				      MSG_CLOCK_SW_STATE_UNREQ);
1124 }
1125 
1126 /**
1127  * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1128  * @handle:	pointer to TI SCI handle
1129  * @dev_id:	Device identifier this request is for
1130  * @clk_id:	Clock identifier for the device for this request.
1131  *		Each device has it's own set of clock inputs. This indexes
1132  *		which clock input to modify.
1133  *
1134  * NOTE: This clock must have been requested by get_clock previously.
1135  *
1136  * Return: 0 if all went well, else returns appropriate error value.
1137  */
ti_sci_cmd_put_clock(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id)1138 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1139 				u32 dev_id, u32 clk_id)
1140 {
1141 	return ti_sci_set_clock_state(handle, dev_id, clk_id,
1142 				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1143 				      MSG_CLOCK_SW_STATE_AUTO);
1144 }
1145 
1146 /**
1147  * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1148  * @handle:	pointer to TI SCI handle
1149  * @dev_id:	Device identifier this request is for
1150  * @clk_id:	Clock identifier for the device for this request.
1151  *		Each device has it's own set of clock inputs. This indexes
1152  *		which clock input to modify.
1153  * @req_state: state indicating if the clock is auto managed
1154  *
1155  * Return: 0 if all went well, else returns appropriate error value.
1156  */
ti_sci_cmd_clk_is_auto(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,bool * req_state)1157 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1158 				  u32 dev_id, u32 clk_id, bool *req_state)
1159 {
1160 	u8 state = 0;
1161 	int ret;
1162 
1163 	if (!req_state)
1164 		return -EINVAL;
1165 
1166 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1167 	if (ret)
1168 		return ret;
1169 
1170 	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1171 	return 0;
1172 }
1173 
1174 /**
1175  * ti_sci_cmd_clk_is_on() - Is the clock ON
1176  * @handle:	pointer to TI SCI handle
1177  * @dev_id:	Device identifier this request is for
1178  * @clk_id:	Clock identifier for the device for this request.
1179  *		Each device has it's own set of clock inputs. This indexes
1180  *		which clock input to modify.
1181  * @req_state: state indicating if the clock is managed by us and enabled
1182  * @curr_state: state indicating if the clock is ready for operation
1183  *
1184  * Return: 0 if all went well, else returns appropriate error value.
1185  */
ti_sci_cmd_clk_is_on(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,bool * req_state,bool * curr_state)1186 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1187 				u32 clk_id, bool *req_state, bool *curr_state)
1188 {
1189 	u8 c_state = 0, r_state = 0;
1190 	int ret;
1191 
1192 	if (!req_state && !curr_state)
1193 		return -EINVAL;
1194 
1195 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1196 					 &r_state, &c_state);
1197 	if (ret)
1198 		return ret;
1199 
1200 	if (req_state)
1201 		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1202 	if (curr_state)
1203 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1204 	return 0;
1205 }
1206 
1207 /**
1208  * ti_sci_cmd_clk_is_off() - Is the clock OFF
1209  * @handle:	pointer to TI SCI handle
1210  * @dev_id:	Device identifier this request is for
1211  * @clk_id:	Clock identifier for the device for this request.
1212  *		Each device has it's own set of clock inputs. This indexes
1213  *		which clock input to modify.
1214  * @req_state: state indicating if the clock is managed by us and disabled
1215  * @curr_state: state indicating if the clock is NOT ready for operation
1216  *
1217  * Return: 0 if all went well, else returns appropriate error value.
1218  */
ti_sci_cmd_clk_is_off(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,bool * req_state,bool * curr_state)1219 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1220 				 u32 clk_id, bool *req_state, bool *curr_state)
1221 {
1222 	u8 c_state = 0, r_state = 0;
1223 	int ret;
1224 
1225 	if (!req_state && !curr_state)
1226 		return -EINVAL;
1227 
1228 	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1229 					 &r_state, &c_state);
1230 	if (ret)
1231 		return ret;
1232 
1233 	if (req_state)
1234 		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1235 	if (curr_state)
1236 		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1237 	return 0;
1238 }
1239 
1240 /**
1241  * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1242  * @handle:	pointer to TI SCI handle
1243  * @dev_id:	Device identifier this request is for
1244  * @clk_id:	Clock identifier for the device for this request.
1245  *		Each device has it's own set of clock inputs. This indexes
1246  *		which clock input to modify.
1247  * @parent_id:	Parent clock identifier to set
1248  *
1249  * Return: 0 if all went well, else returns appropriate error value.
1250  */
ti_sci_cmd_clk_set_parent(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,u32 parent_id)1251 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1252 				     u32 dev_id, u32 clk_id, u32 parent_id)
1253 {
1254 	struct ti_sci_info *info;
1255 	struct ti_sci_msg_req_set_clock_parent *req;
1256 	struct ti_sci_msg_hdr *resp;
1257 	struct ti_sci_xfer *xfer;
1258 	struct device *dev;
1259 	int ret = 0;
1260 
1261 	if (IS_ERR(handle))
1262 		return PTR_ERR(handle);
1263 	if (!handle)
1264 		return -EINVAL;
1265 
1266 	info = handle_to_ti_sci_info(handle);
1267 	dev = info->dev;
1268 
1269 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1270 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1271 				   sizeof(*req), sizeof(*resp));
1272 	if (IS_ERR(xfer)) {
1273 		ret = PTR_ERR(xfer);
1274 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1275 		return ret;
1276 	}
1277 	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1278 	req->dev_id = dev_id;
1279 	if (clk_id < 255) {
1280 		req->clk_id = clk_id;
1281 	} else {
1282 		req->clk_id = 255;
1283 		req->clk_id_32 = clk_id;
1284 	}
1285 	if (parent_id < 255) {
1286 		req->parent_id = parent_id;
1287 	} else {
1288 		req->parent_id = 255;
1289 		req->parent_id_32 = parent_id;
1290 	}
1291 
1292 	ret = ti_sci_do_xfer(info, xfer);
1293 	if (ret) {
1294 		dev_err(dev, "Mbox send fail %d\n", ret);
1295 		goto fail;
1296 	}
1297 
1298 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1299 
1300 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1301 
1302 fail:
1303 	ti_sci_put_one_xfer(&info->minfo, xfer);
1304 
1305 	return ret;
1306 }
1307 
1308 /**
1309  * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1310  * @handle:	pointer to TI SCI handle
1311  * @dev_id:	Device identifier this request is for
1312  * @clk_id:	Clock identifier for the device for this request.
1313  *		Each device has it's own set of clock inputs. This indexes
1314  *		which clock input to modify.
1315  * @parent_id:	Current clock parent
1316  *
1317  * Return: 0 if all went well, else returns appropriate error value.
1318  */
ti_sci_cmd_clk_get_parent(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,u32 * parent_id)1319 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1320 				     u32 dev_id, u32 clk_id, u32 *parent_id)
1321 {
1322 	struct ti_sci_info *info;
1323 	struct ti_sci_msg_req_get_clock_parent *req;
1324 	struct ti_sci_msg_resp_get_clock_parent *resp;
1325 	struct ti_sci_xfer *xfer;
1326 	struct device *dev;
1327 	int ret = 0;
1328 
1329 	if (IS_ERR(handle))
1330 		return PTR_ERR(handle);
1331 	if (!handle || !parent_id)
1332 		return -EINVAL;
1333 
1334 	info = handle_to_ti_sci_info(handle);
1335 	dev = info->dev;
1336 
1337 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1338 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1339 				   sizeof(*req), sizeof(*resp));
1340 	if (IS_ERR(xfer)) {
1341 		ret = PTR_ERR(xfer);
1342 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1343 		return ret;
1344 	}
1345 	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1346 	req->dev_id = dev_id;
1347 	if (clk_id < 255) {
1348 		req->clk_id = clk_id;
1349 	} else {
1350 		req->clk_id = 255;
1351 		req->clk_id_32 = clk_id;
1352 	}
1353 
1354 	ret = ti_sci_do_xfer(info, xfer);
1355 	if (ret) {
1356 		dev_err(dev, "Mbox send fail %d\n", ret);
1357 		goto fail;
1358 	}
1359 
1360 	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1361 
1362 	if (!ti_sci_is_response_ack(resp)) {
1363 		ret = -ENODEV;
1364 	} else {
1365 		if (resp->parent_id < 255)
1366 			*parent_id = resp->parent_id;
1367 		else
1368 			*parent_id = resp->parent_id_32;
1369 	}
1370 
1371 fail:
1372 	ti_sci_put_one_xfer(&info->minfo, xfer);
1373 
1374 	return ret;
1375 }
1376 
1377 /**
1378  * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1379  * @handle:	pointer to TI SCI handle
1380  * @dev_id:	Device identifier this request is for
1381  * @clk_id:	Clock identifier for the device for this request.
1382  *		Each device has it's own set of clock inputs. This indexes
1383  *		which clock input to modify.
1384  * @num_parents: Returns he number of parents to the current clock.
1385  *
1386  * Return: 0 if all went well, else returns appropriate error value.
1387  */
ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,u32 * num_parents)1388 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1389 					  u32 dev_id, u32 clk_id,
1390 					  u32 *num_parents)
1391 {
1392 	struct ti_sci_info *info;
1393 	struct ti_sci_msg_req_get_clock_num_parents *req;
1394 	struct ti_sci_msg_resp_get_clock_num_parents *resp;
1395 	struct ti_sci_xfer *xfer;
1396 	struct device *dev;
1397 	int ret = 0;
1398 
1399 	if (IS_ERR(handle))
1400 		return PTR_ERR(handle);
1401 	if (!handle || !num_parents)
1402 		return -EINVAL;
1403 
1404 	info = handle_to_ti_sci_info(handle);
1405 	dev = info->dev;
1406 
1407 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1408 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1409 				   sizeof(*req), sizeof(*resp));
1410 	if (IS_ERR(xfer)) {
1411 		ret = PTR_ERR(xfer);
1412 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1413 		return ret;
1414 	}
1415 	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1416 	req->dev_id = dev_id;
1417 	if (clk_id < 255) {
1418 		req->clk_id = clk_id;
1419 	} else {
1420 		req->clk_id = 255;
1421 		req->clk_id_32 = clk_id;
1422 	}
1423 
1424 	ret = ti_sci_do_xfer(info, xfer);
1425 	if (ret) {
1426 		dev_err(dev, "Mbox send fail %d\n", ret);
1427 		goto fail;
1428 	}
1429 
1430 	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1431 
1432 	if (!ti_sci_is_response_ack(resp)) {
1433 		ret = -ENODEV;
1434 	} else {
1435 		if (resp->num_parents < 255)
1436 			*num_parents = resp->num_parents;
1437 		else
1438 			*num_parents = resp->num_parents_32;
1439 	}
1440 
1441 fail:
1442 	ti_sci_put_one_xfer(&info->minfo, xfer);
1443 
1444 	return ret;
1445 }
1446 
1447 /**
1448  * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1449  * @handle:	pointer to TI SCI handle
1450  * @dev_id:	Device identifier this request is for
1451  * @clk_id:	Clock identifier for the device for this request.
1452  *		Each device has it's own set of clock inputs. This indexes
1453  *		which clock input to modify.
1454  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1455  *		allowable programmed frequency and does not account for clock
1456  *		tolerances and jitter.
1457  * @target_freq: The target clock frequency in Hz. A frequency will be
1458  *		processed as close to this target frequency as possible.
1459  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1460  *		allowable programmed frequency and does not account for clock
1461  *		tolerances and jitter.
1462  * @match_freq:	Frequency match in Hz response.
1463  *
1464  * Return: 0 if all went well, else returns appropriate error value.
1465  */
ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,u64 min_freq,u64 target_freq,u64 max_freq,u64 * match_freq)1466 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1467 					 u32 dev_id, u32 clk_id, u64 min_freq,
1468 					 u64 target_freq, u64 max_freq,
1469 					 u64 *match_freq)
1470 {
1471 	struct ti_sci_info *info;
1472 	struct ti_sci_msg_req_query_clock_freq *req;
1473 	struct ti_sci_msg_resp_query_clock_freq *resp;
1474 	struct ti_sci_xfer *xfer;
1475 	struct device *dev;
1476 	int ret = 0;
1477 
1478 	if (IS_ERR(handle))
1479 		return PTR_ERR(handle);
1480 	if (!handle || !match_freq)
1481 		return -EINVAL;
1482 
1483 	info = handle_to_ti_sci_info(handle);
1484 	dev = info->dev;
1485 
1486 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1487 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1488 				   sizeof(*req), sizeof(*resp));
1489 	if (IS_ERR(xfer)) {
1490 		ret = PTR_ERR(xfer);
1491 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1492 		return ret;
1493 	}
1494 	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1495 	req->dev_id = dev_id;
1496 	if (clk_id < 255) {
1497 		req->clk_id = clk_id;
1498 	} else {
1499 		req->clk_id = 255;
1500 		req->clk_id_32 = clk_id;
1501 	}
1502 	req->min_freq_hz = min_freq;
1503 	req->target_freq_hz = target_freq;
1504 	req->max_freq_hz = max_freq;
1505 
1506 	ret = ti_sci_do_xfer(info, xfer);
1507 	if (ret) {
1508 		dev_err(dev, "Mbox send fail %d\n", ret);
1509 		goto fail;
1510 	}
1511 
1512 	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1513 
1514 	if (!ti_sci_is_response_ack(resp))
1515 		ret = -ENODEV;
1516 	else
1517 		*match_freq = resp->freq_hz;
1518 
1519 fail:
1520 	ti_sci_put_one_xfer(&info->minfo, xfer);
1521 
1522 	return ret;
1523 }
1524 
1525 /**
1526  * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1527  * @handle:	pointer to TI SCI handle
1528  * @dev_id:	Device identifier this request is for
1529  * @clk_id:	Clock identifier for the device for this request.
1530  *		Each device has it's own set of clock inputs. This indexes
1531  *		which clock input to modify.
1532  * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
1533  *		allowable programmed frequency and does not account for clock
1534  *		tolerances and jitter.
1535  * @target_freq: The target clock frequency in Hz. A frequency will be
1536  *		processed as close to this target frequency as possible.
1537  * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
1538  *		allowable programmed frequency and does not account for clock
1539  *		tolerances and jitter.
1540  *
1541  * Return: 0 if all went well, else returns appropriate error value.
1542  */
ti_sci_cmd_clk_set_freq(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,u64 min_freq,u64 target_freq,u64 max_freq)1543 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1544 				   u32 dev_id, u32 clk_id, u64 min_freq,
1545 				   u64 target_freq, u64 max_freq)
1546 {
1547 	struct ti_sci_info *info;
1548 	struct ti_sci_msg_req_set_clock_freq *req;
1549 	struct ti_sci_msg_hdr *resp;
1550 	struct ti_sci_xfer *xfer;
1551 	struct device *dev;
1552 	int ret = 0;
1553 
1554 	if (IS_ERR(handle))
1555 		return PTR_ERR(handle);
1556 	if (!handle)
1557 		return -EINVAL;
1558 
1559 	info = handle_to_ti_sci_info(handle);
1560 	dev = info->dev;
1561 
1562 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1563 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1564 				   sizeof(*req), sizeof(*resp));
1565 	if (IS_ERR(xfer)) {
1566 		ret = PTR_ERR(xfer);
1567 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1568 		return ret;
1569 	}
1570 	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1571 	req->dev_id = dev_id;
1572 	if (clk_id < 255) {
1573 		req->clk_id = clk_id;
1574 	} else {
1575 		req->clk_id = 255;
1576 		req->clk_id_32 = clk_id;
1577 	}
1578 	req->min_freq_hz = min_freq;
1579 	req->target_freq_hz = target_freq;
1580 	req->max_freq_hz = max_freq;
1581 
1582 	ret = ti_sci_do_xfer(info, xfer);
1583 	if (ret) {
1584 		dev_err(dev, "Mbox send fail %d\n", ret);
1585 		goto fail;
1586 	}
1587 
1588 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1589 
1590 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1591 
1592 fail:
1593 	ti_sci_put_one_xfer(&info->minfo, xfer);
1594 
1595 	return ret;
1596 }
1597 
1598 /**
1599  * ti_sci_cmd_clk_get_freq() - Get current frequency
1600  * @handle:	pointer to TI SCI handle
1601  * @dev_id:	Device identifier this request is for
1602  * @clk_id:	Clock identifier for the device for this request.
1603  *		Each device has it's own set of clock inputs. This indexes
1604  *		which clock input to modify.
1605  * @freq:	Currently frequency in Hz
1606  *
1607  * Return: 0 if all went well, else returns appropriate error value.
1608  */
ti_sci_cmd_clk_get_freq(const struct ti_sci_handle * handle,u32 dev_id,u32 clk_id,u64 * freq)1609 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1610 				   u32 dev_id, u32 clk_id, u64 *freq)
1611 {
1612 	struct ti_sci_info *info;
1613 	struct ti_sci_msg_req_get_clock_freq *req;
1614 	struct ti_sci_msg_resp_get_clock_freq *resp;
1615 	struct ti_sci_xfer *xfer;
1616 	struct device *dev;
1617 	int ret = 0;
1618 
1619 	if (IS_ERR(handle))
1620 		return PTR_ERR(handle);
1621 	if (!handle || !freq)
1622 		return -EINVAL;
1623 
1624 	info = handle_to_ti_sci_info(handle);
1625 	dev = info->dev;
1626 
1627 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1628 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1629 				   sizeof(*req), sizeof(*resp));
1630 	if (IS_ERR(xfer)) {
1631 		ret = PTR_ERR(xfer);
1632 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1633 		return ret;
1634 	}
1635 	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1636 	req->dev_id = dev_id;
1637 	if (clk_id < 255) {
1638 		req->clk_id = clk_id;
1639 	} else {
1640 		req->clk_id = 255;
1641 		req->clk_id_32 = clk_id;
1642 	}
1643 
1644 	ret = ti_sci_do_xfer(info, xfer);
1645 	if (ret) {
1646 		dev_err(dev, "Mbox send fail %d\n", ret);
1647 		goto fail;
1648 	}
1649 
1650 	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1651 
1652 	if (!ti_sci_is_response_ack(resp))
1653 		ret = -ENODEV;
1654 	else
1655 		*freq = resp->freq_hz;
1656 
1657 fail:
1658 	ti_sci_put_one_xfer(&info->minfo, xfer);
1659 
1660 	return ret;
1661 }
1662 
1663 /**
1664  * ti_sci_cmd_prepare_sleep() - Prepare system for system suspend
1665  * @handle:		pointer to TI SCI handle
1666  * @mode:		Device identifier
1667  * @ctx_lo:		Low part of address for context save
1668  * @ctx_hi:		High part of address for context save
1669  * @debug_flags:	Debug flags to pass to firmware
1670  *
1671  * Return: 0 if all went well, else returns appropriate error value.
1672  */
ti_sci_cmd_prepare_sleep(const struct ti_sci_handle * handle,u8 mode,u32 ctx_lo,u32 ctx_hi,u32 debug_flags)1673 static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
1674 				    u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
1675 {
1676 	u32 msg_flags = mode == TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO ?
1677 			TI_SCI_FLAG_REQ_GENERIC_NORESPONSE :
1678 			TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
1679 	struct ti_sci_info *info;
1680 	struct ti_sci_msg_req_prepare_sleep *req;
1681 	struct ti_sci_msg_hdr *resp;
1682 	struct ti_sci_xfer *xfer;
1683 	struct device *dev;
1684 	int ret = 0;
1685 
1686 	if (IS_ERR(handle))
1687 		return PTR_ERR(handle);
1688 	if (!handle)
1689 		return -EINVAL;
1690 
1691 	info = handle_to_ti_sci_info(handle);
1692 	dev = info->dev;
1693 
1694 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
1695 				   msg_flags,
1696 				   sizeof(*req), sizeof(*resp));
1697 	if (IS_ERR(xfer)) {
1698 		ret = PTR_ERR(xfer);
1699 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1700 		return ret;
1701 	}
1702 
1703 	req = (struct ti_sci_msg_req_prepare_sleep *)xfer->xfer_buf;
1704 	req->mode = mode;
1705 	req->ctx_lo = ctx_lo;
1706 	req->ctx_hi = ctx_hi;
1707 	req->debug_flags = debug_flags;
1708 
1709 	ret = ti_sci_do_xfer(info, xfer);
1710 	if (ret) {
1711 		dev_err(dev, "Mbox send fail %d\n", ret);
1712 		goto fail;
1713 	}
1714 
1715 	if (msg_flags == TI_SCI_FLAG_REQ_ACK_ON_PROCESSED) {
1716 		resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1717 		if (!ti_sci_is_response_ack(resp)) {
1718 			dev_err(dev, "Failed to prepare sleep\n");
1719 			ret = -ENODEV;
1720 		}
1721 	}
1722 
1723 fail:
1724 	ti_sci_put_one_xfer(&info->minfo, xfer);
1725 
1726 	return ret;
1727 }
1728 
1729 /**
1730  * ti_sci_msg_cmd_query_fw_caps() - Get the FW/SoC capabilities
1731  * @handle:		Pointer to TI SCI handle
1732  * @fw_caps:		Each bit in fw_caps indicating one FW/SOC capability
1733  *
1734  * Check if the firmware supports any optional low power modes.
1735  * Old revisions of TIFS (< 08.04) will NACK the request which results in
1736  * -ENODEV being returned.
1737  *
1738  * Return: 0 if all went well, else returns appropriate error value.
1739  */
ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle * handle,u64 * fw_caps)1740 static int ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle *handle,
1741 					u64 *fw_caps)
1742 {
1743 	struct ti_sci_info *info;
1744 	struct ti_sci_xfer *xfer;
1745 	struct ti_sci_msg_resp_query_fw_caps *resp;
1746 	struct device *dev;
1747 	int ret = 0;
1748 
1749 	if (IS_ERR(handle))
1750 		return PTR_ERR(handle);
1751 	if (!handle)
1752 		return -EINVAL;
1753 
1754 	info = handle_to_ti_sci_info(handle);
1755 	dev = info->dev;
1756 
1757 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS,
1758 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1759 				   sizeof(struct ti_sci_msg_hdr),
1760 				   sizeof(*resp));
1761 	if (IS_ERR(xfer)) {
1762 		ret = PTR_ERR(xfer);
1763 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1764 		return ret;
1765 	}
1766 
1767 	ret = ti_sci_do_xfer(info, xfer);
1768 	if (ret) {
1769 		dev_err(dev, "Mbox send fail %d\n", ret);
1770 		goto fail;
1771 	}
1772 
1773 	resp = (struct ti_sci_msg_resp_query_fw_caps *)xfer->xfer_buf;
1774 
1775 	if (!ti_sci_is_response_ack(resp)) {
1776 		dev_err(dev, "Failed to get capabilities\n");
1777 		ret = -ENODEV;
1778 		goto fail;
1779 	}
1780 
1781 	if (fw_caps)
1782 		*fw_caps = resp->fw_caps;
1783 
1784 fail:
1785 	ti_sci_put_one_xfer(&info->minfo, xfer);
1786 
1787 	return ret;
1788 }
1789 
1790 /**
1791  * ti_sci_cmd_set_io_isolation() - Enable IO isolation in LPM
1792  * @handle:		Pointer to TI SCI handle
1793  * @state:		The desired state of the IO isolation
1794  *
1795  * Return: 0 if all went well, else returns appropriate error value.
1796  */
ti_sci_cmd_set_io_isolation(const struct ti_sci_handle * handle,u8 state)1797 static int ti_sci_cmd_set_io_isolation(const struct ti_sci_handle *handle,
1798 				       u8 state)
1799 {
1800 	struct ti_sci_info *info;
1801 	struct ti_sci_msg_req_set_io_isolation *req;
1802 	struct ti_sci_msg_hdr *resp;
1803 	struct ti_sci_xfer *xfer;
1804 	struct device *dev;
1805 	int ret = 0;
1806 
1807 	if (IS_ERR(handle))
1808 		return PTR_ERR(handle);
1809 	if (!handle)
1810 		return -EINVAL;
1811 
1812 	info = handle_to_ti_sci_info(handle);
1813 	dev = info->dev;
1814 
1815 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_IO_ISOLATION,
1816 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1817 				   sizeof(*req), sizeof(*resp));
1818 	if (IS_ERR(xfer)) {
1819 		ret = PTR_ERR(xfer);
1820 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1821 		return ret;
1822 	}
1823 	req = (struct ti_sci_msg_req_set_io_isolation *)xfer->xfer_buf;
1824 	req->state = state;
1825 
1826 	ret = ti_sci_do_xfer(info, xfer);
1827 	if (ret) {
1828 		dev_err(dev, "Mbox send fail %d\n", ret);
1829 		goto fail;
1830 	}
1831 
1832 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1833 
1834 	if (!ti_sci_is_response_ack(resp)) {
1835 		dev_err(dev, "Failed to set IO isolation\n");
1836 		ret = -ENODEV;
1837 	}
1838 
1839 fail:
1840 	ti_sci_put_one_xfer(&info->minfo, xfer);
1841 
1842 	return ret;
1843 }
1844 
1845 /**
1846  * ti_sci_msg_cmd_lpm_wake_reason() - Get the wakeup source from LPM
1847  * @handle:		Pointer to TI SCI handle
1848  * @source:		The wakeup source that woke the SoC from LPM
1849  * @timestamp:		Timestamp of the wakeup event
1850  * @pin:		The pin that has triggered wake up
1851  * @mode:		The last entered low power mode
1852  *
1853  * Return: 0 if all went well, else returns appropriate error value.
1854  */
ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle * handle,u32 * source,u64 * timestamp,u8 * pin,u8 * mode)1855 static int ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle *handle,
1856 					  u32 *source, u64 *timestamp, u8 *pin, u8 *mode)
1857 {
1858 	struct ti_sci_info *info;
1859 	struct ti_sci_xfer *xfer;
1860 	struct ti_sci_msg_resp_lpm_wake_reason *resp;
1861 	struct device *dev;
1862 	int ret = 0;
1863 
1864 	if (IS_ERR(handle))
1865 		return PTR_ERR(handle);
1866 	if (!handle)
1867 		return -EINVAL;
1868 
1869 	info = handle_to_ti_sci_info(handle);
1870 	dev = info->dev;
1871 
1872 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_WAKE_REASON,
1873 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1874 				   sizeof(struct ti_sci_msg_hdr),
1875 				   sizeof(*resp));
1876 	if (IS_ERR(xfer)) {
1877 		ret = PTR_ERR(xfer);
1878 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1879 		return ret;
1880 	}
1881 
1882 	ret = ti_sci_do_xfer(info, xfer);
1883 	if (ret) {
1884 		dev_err(dev, "Mbox send fail %d\n", ret);
1885 		goto fail;
1886 	}
1887 
1888 	resp = (struct ti_sci_msg_resp_lpm_wake_reason *)xfer->xfer_buf;
1889 
1890 	if (!ti_sci_is_response_ack(resp)) {
1891 		dev_err(dev, "Failed to get wake reason\n");
1892 		ret = -ENODEV;
1893 		goto fail;
1894 	}
1895 
1896 	if (source)
1897 		*source = resp->wake_source;
1898 	if (timestamp)
1899 		*timestamp = resp->wake_timestamp;
1900 	if (pin)
1901 		*pin = resp->wake_pin;
1902 	if (mode)
1903 		*mode = resp->mode;
1904 
1905 fail:
1906 	ti_sci_put_one_xfer(&info->minfo, xfer);
1907 
1908 	return ret;
1909 }
1910 
1911 /**
1912  * ti_sci_cmd_set_device_constraint() - Set LPM constraint on behalf of a device
1913  * @handle:	pointer to TI SCI handle
1914  * @id:	Device identifier
1915  * @state:	The desired state of device constraint: set or clear
1916  *
1917  * Return: 0 if all went well, else returns appropriate error value.
1918  */
ti_sci_cmd_set_device_constraint(const struct ti_sci_handle * handle,u32 id,u8 state)1919 static int ti_sci_cmd_set_device_constraint(const struct ti_sci_handle *handle,
1920 					    u32 id, u8 state)
1921 {
1922 	struct ti_sci_info *info;
1923 	struct ti_sci_msg_req_lpm_set_device_constraint *req;
1924 	struct ti_sci_msg_hdr *resp;
1925 	struct ti_sci_xfer *xfer;
1926 	struct device *dev;
1927 	int ret = 0;
1928 
1929 	if (IS_ERR(handle))
1930 		return PTR_ERR(handle);
1931 	if (!handle)
1932 		return -EINVAL;
1933 
1934 	info = handle_to_ti_sci_info(handle);
1935 	dev = info->dev;
1936 
1937 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT,
1938 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1939 				   sizeof(*req), sizeof(*resp));
1940 	if (IS_ERR(xfer)) {
1941 		ret = PTR_ERR(xfer);
1942 		dev_err(dev, "Message alloc failed(%d)\n", ret);
1943 		return ret;
1944 	}
1945 	req = (struct ti_sci_msg_req_lpm_set_device_constraint *)xfer->xfer_buf;
1946 	req->id = id;
1947 	req->state = state;
1948 
1949 	ret = ti_sci_do_xfer(info, xfer);
1950 	if (ret) {
1951 		dev_err(dev, "Mbox send fail %d\n", ret);
1952 		goto fail;
1953 	}
1954 
1955 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1956 
1957 	if (!ti_sci_is_response_ack(resp)) {
1958 		dev_err(dev, "Failed to set device constraint\n");
1959 		ret = -ENODEV;
1960 	}
1961 
1962 fail:
1963 	ti_sci_put_one_xfer(&info->minfo, xfer);
1964 
1965 	return ret;
1966 }
1967 
1968 /**
1969  * ti_sci_cmd_set_latency_constraint() - Set LPM resume latency constraint
1970  * @handle:	pointer to TI SCI handle
1971  * @latency:	maximum acceptable latency (in ms) to wake up from LPM
1972  * @state:	The desired state of latency constraint: set or clear
1973  *
1974  * Return: 0 if all went well, else returns appropriate error value.
1975  */
ti_sci_cmd_set_latency_constraint(const struct ti_sci_handle * handle,u16 latency,u8 state)1976 static int ti_sci_cmd_set_latency_constraint(const struct ti_sci_handle *handle,
1977 					     u16 latency, u8 state)
1978 {
1979 	struct ti_sci_info *info;
1980 	struct ti_sci_msg_req_lpm_set_latency_constraint *req;
1981 	struct ti_sci_msg_hdr *resp;
1982 	struct ti_sci_xfer *xfer;
1983 	struct device *dev;
1984 	int ret = 0;
1985 
1986 	if (IS_ERR(handle))
1987 		return PTR_ERR(handle);
1988 	if (!handle)
1989 		return -EINVAL;
1990 
1991 	info = handle_to_ti_sci_info(handle);
1992 	dev = info->dev;
1993 
1994 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT,
1995 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1996 				   sizeof(*req), sizeof(*resp));
1997 	if (IS_ERR(xfer)) {
1998 		ret = PTR_ERR(xfer);
1999 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2000 		return ret;
2001 	}
2002 	req = (struct ti_sci_msg_req_lpm_set_latency_constraint *)xfer->xfer_buf;
2003 	req->latency = latency;
2004 	req->state = state;
2005 
2006 	ret = ti_sci_do_xfer(info, xfer);
2007 	if (ret) {
2008 		dev_err(dev, "Mbox send fail %d\n", ret);
2009 		goto fail;
2010 	}
2011 
2012 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2013 
2014 	if (!ti_sci_is_response_ack(resp)) {
2015 		dev_err(dev, "Failed to set device constraint\n");
2016 		ret = -ENODEV;
2017 	}
2018 
2019 fail:
2020 	ti_sci_put_one_xfer(&info->minfo, xfer);
2021 
2022 	return ret;
2023 }
2024 
2025 /**
2026  * ti_sci_cmd_lpm_abort() - Abort entry to LPM by clearing selection of LPM to enter
2027  * @dev:	Device pointer corresponding to the SCI entity
2028  *
2029  * Return: 0 if all went well, else returns appropriate error value.
2030  */
ti_sci_cmd_lpm_abort(struct device * dev)2031 static int ti_sci_cmd_lpm_abort(struct device *dev)
2032 {
2033 	struct ti_sci_info *info = dev_get_drvdata(dev);
2034 	struct ti_sci_msg_hdr *req;
2035 	struct ti_sci_msg_hdr *resp;
2036 	struct ti_sci_xfer *xfer;
2037 	int ret = 0;
2038 
2039 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_ABORT,
2040 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2041 				   sizeof(*req), sizeof(*resp));
2042 	if (IS_ERR(xfer)) {
2043 		ret = PTR_ERR(xfer);
2044 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2045 		return ret;
2046 	}
2047 	req = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2048 
2049 	ret = ti_sci_do_xfer(info, xfer);
2050 	if (ret) {
2051 		dev_err(dev, "Mbox send fail %d\n", ret);
2052 		goto fail;
2053 	}
2054 
2055 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2056 
2057 	if (!ti_sci_is_response_ack(resp))
2058 		ret = -ENODEV;
2059 
2060 fail:
2061 	ti_sci_put_one_xfer(&info->minfo, xfer);
2062 
2063 	return ret;
2064 }
2065 
ti_sci_cmd_core_reboot(const struct ti_sci_handle * handle)2066 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
2067 {
2068 	struct ti_sci_info *info;
2069 	struct ti_sci_msg_req_reboot *req;
2070 	struct ti_sci_msg_hdr *resp;
2071 	struct ti_sci_xfer *xfer;
2072 	struct device *dev;
2073 	int ret = 0;
2074 
2075 	if (IS_ERR(handle))
2076 		return PTR_ERR(handle);
2077 	if (!handle)
2078 		return -EINVAL;
2079 
2080 	info = handle_to_ti_sci_info(handle);
2081 	dev = info->dev;
2082 
2083 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
2084 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2085 				   sizeof(*req), sizeof(*resp));
2086 	if (IS_ERR(xfer)) {
2087 		ret = PTR_ERR(xfer);
2088 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2089 		return ret;
2090 	}
2091 	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
2092 
2093 	ret = ti_sci_do_xfer(info, xfer);
2094 	if (ret) {
2095 		dev_err(dev, "Mbox send fail %d\n", ret);
2096 		goto fail;
2097 	}
2098 
2099 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2100 
2101 	if (!ti_sci_is_response_ack(resp))
2102 		ret = -ENODEV;
2103 	else
2104 		ret = 0;
2105 
2106 fail:
2107 	ti_sci_put_one_xfer(&info->minfo, xfer);
2108 
2109 	return ret;
2110 }
2111 
2112 /**
2113  * ti_sci_get_resource_range - Helper to get a range of resources assigned
2114  *			       to a host. Resource is uniquely identified by
2115  *			       type and subtype.
2116  * @handle:		Pointer to TISCI handle.
2117  * @dev_id:		TISCI device ID.
2118  * @subtype:		Resource assignment subtype that is being requested
2119  *			from the given device.
2120  * @s_host:		Host processor ID to which the resources are allocated
2121  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
2122  *			resource range start index and number of resources
2123  *
2124  * Return: 0 if all went fine, else return appropriate error.
2125  */
ti_sci_get_resource_range(const struct ti_sci_handle * handle,u32 dev_id,u8 subtype,u8 s_host,struct ti_sci_resource_desc * desc)2126 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
2127 				     u32 dev_id, u8 subtype, u8 s_host,
2128 				     struct ti_sci_resource_desc *desc)
2129 {
2130 	struct ti_sci_msg_resp_get_resource_range *resp;
2131 	struct ti_sci_msg_req_get_resource_range *req;
2132 	struct ti_sci_xfer *xfer;
2133 	struct ti_sci_info *info;
2134 	struct device *dev;
2135 	int ret = 0;
2136 
2137 	if (IS_ERR(handle))
2138 		return PTR_ERR(handle);
2139 	if (!handle || !desc)
2140 		return -EINVAL;
2141 
2142 	info = handle_to_ti_sci_info(handle);
2143 	dev = info->dev;
2144 
2145 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
2146 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2147 				   sizeof(*req), sizeof(*resp));
2148 	if (IS_ERR(xfer)) {
2149 		ret = PTR_ERR(xfer);
2150 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2151 		return ret;
2152 	}
2153 
2154 	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
2155 	req->secondary_host = s_host;
2156 	req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
2157 	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
2158 
2159 	ret = ti_sci_do_xfer(info, xfer);
2160 	if (ret) {
2161 		dev_err(dev, "Mbox send fail %d\n", ret);
2162 		goto fail;
2163 	}
2164 
2165 	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
2166 
2167 	if (!ti_sci_is_response_ack(resp)) {
2168 		ret = -ENODEV;
2169 	} else if (!resp->range_num && !resp->range_num_sec) {
2170 		/* Neither of the two resource range is valid */
2171 		ret = -ENODEV;
2172 	} else {
2173 		desc->start = resp->range_start;
2174 		desc->num = resp->range_num;
2175 		desc->start_sec = resp->range_start_sec;
2176 		desc->num_sec = resp->range_num_sec;
2177 	}
2178 
2179 fail:
2180 	ti_sci_put_one_xfer(&info->minfo, xfer);
2181 
2182 	return ret;
2183 }
2184 
2185 /**
2186  * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
2187  *				   that is same as ti sci interface host.
2188  * @handle:		Pointer to TISCI handle.
2189  * @dev_id:		TISCI device ID.
2190  * @subtype:		Resource assignment subtype that is being requested
2191  *			from the given device.
2192  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
2193  *			resource range start index and number of resources
2194  *
2195  * Return: 0 if all went fine, else return appropriate error.
2196  */
ti_sci_cmd_get_resource_range(const struct ti_sci_handle * handle,u32 dev_id,u8 subtype,struct ti_sci_resource_desc * desc)2197 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
2198 					 u32 dev_id, u8 subtype,
2199 					 struct ti_sci_resource_desc *desc)
2200 {
2201 	return ti_sci_get_resource_range(handle, dev_id, subtype,
2202 					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
2203 					 desc);
2204 }
2205 
2206 /**
2207  * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
2208  *					      assigned to a specified host.
2209  * @handle:		Pointer to TISCI handle.
2210  * @dev_id:		TISCI device ID.
2211  * @subtype:		Resource assignment subtype that is being requested
2212  *			from the given device.
2213  * @s_host:		Host processor ID to which the resources are allocated
2214  * @desc:		Pointer to ti_sci_resource_desc to be updated with the
2215  *			resource range start index and number of resources
2216  *
2217  * Return: 0 if all went fine, else return appropriate error.
2218  */
2219 static
ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle * handle,u32 dev_id,u8 subtype,u8 s_host,struct ti_sci_resource_desc * desc)2220 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
2221 					     u32 dev_id, u8 subtype, u8 s_host,
2222 					     struct ti_sci_resource_desc *desc)
2223 {
2224 	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
2225 }
2226 
2227 /**
2228  * ti_sci_manage_irq() - Helper api to configure/release the irq route between
2229  *			 the requested source and destination
2230  * @handle:		Pointer to TISCI handle.
2231  * @valid_params:	Bit fields defining the validity of certain params
2232  * @src_id:		Device ID of the IRQ source
2233  * @src_index:		IRQ source index within the source device
2234  * @dst_id:		Device ID of the IRQ destination
2235  * @dst_host_irq:	IRQ number of the destination device
2236  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2237  * @vint:		Virtual interrupt to be used within the IA
2238  * @global_event:	Global event number to be used for the requesting event
2239  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2240  * @s_host:		Secondary host ID to which the irq/event is being
2241  *			requested for.
2242  * @type:		Request type irq set or release.
2243  *
2244  * Return: 0 if all went fine, else return appropriate error.
2245  */
ti_sci_manage_irq(const struct ti_sci_handle * handle,u32 valid_params,u16 src_id,u16 src_index,u16 dst_id,u16 dst_host_irq,u16 ia_id,u16 vint,u16 global_event,u8 vint_status_bit,u8 s_host,u16 type)2246 static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
2247 			     u32 valid_params, u16 src_id, u16 src_index,
2248 			     u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
2249 			     u16 global_event, u8 vint_status_bit, u8 s_host,
2250 			     u16 type)
2251 {
2252 	struct ti_sci_msg_req_manage_irq *req;
2253 	struct ti_sci_msg_hdr *resp;
2254 	struct ti_sci_xfer *xfer;
2255 	struct ti_sci_info *info;
2256 	struct device *dev;
2257 	int ret = 0;
2258 
2259 	if (IS_ERR(handle))
2260 		return PTR_ERR(handle);
2261 	if (!handle)
2262 		return -EINVAL;
2263 
2264 	info = handle_to_ti_sci_info(handle);
2265 	dev = info->dev;
2266 
2267 	xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2268 				   sizeof(*req), sizeof(*resp));
2269 	if (IS_ERR(xfer)) {
2270 		ret = PTR_ERR(xfer);
2271 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2272 		return ret;
2273 	}
2274 	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
2275 	req->valid_params = valid_params;
2276 	req->src_id = src_id;
2277 	req->src_index = src_index;
2278 	req->dst_id = dst_id;
2279 	req->dst_host_irq = dst_host_irq;
2280 	req->ia_id = ia_id;
2281 	req->vint = vint;
2282 	req->global_event = global_event;
2283 	req->vint_status_bit = vint_status_bit;
2284 	req->secondary_host = s_host;
2285 
2286 	ret = ti_sci_do_xfer(info, xfer);
2287 	if (ret) {
2288 		dev_err(dev, "Mbox send fail %d\n", ret);
2289 		goto fail;
2290 	}
2291 
2292 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2293 
2294 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2295 
2296 fail:
2297 	ti_sci_put_one_xfer(&info->minfo, xfer);
2298 
2299 	return ret;
2300 }
2301 
2302 /**
2303  * ti_sci_set_irq() - Helper api to configure the irq route between the
2304  *		      requested source and destination
2305  * @handle:		Pointer to TISCI handle.
2306  * @valid_params:	Bit fields defining the validity of certain params
2307  * @src_id:		Device ID of the IRQ source
2308  * @src_index:		IRQ source index within the source device
2309  * @dst_id:		Device ID of the IRQ destination
2310  * @dst_host_irq:	IRQ number of the destination device
2311  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2312  * @vint:		Virtual interrupt to be used within the IA
2313  * @global_event:	Global event number to be used for the requesting event
2314  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2315  * @s_host:		Secondary host ID to which the irq/event is being
2316  *			requested for.
2317  *
2318  * Return: 0 if all went fine, else return appropriate error.
2319  */
ti_sci_set_irq(const struct ti_sci_handle * handle,u32 valid_params,u16 src_id,u16 src_index,u16 dst_id,u16 dst_host_irq,u16 ia_id,u16 vint,u16 global_event,u8 vint_status_bit,u8 s_host)2320 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
2321 			  u16 src_id, u16 src_index, u16 dst_id,
2322 			  u16 dst_host_irq, u16 ia_id, u16 vint,
2323 			  u16 global_event, u8 vint_status_bit, u8 s_host)
2324 {
2325 	pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
2326 		 __func__, valid_params, src_id, src_index,
2327 		 dst_id, dst_host_irq, ia_id, vint, global_event,
2328 		 vint_status_bit);
2329 
2330 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
2331 				 dst_id, dst_host_irq, ia_id, vint,
2332 				 global_event, vint_status_bit, s_host,
2333 				 TI_SCI_MSG_SET_IRQ);
2334 }
2335 
2336 /**
2337  * ti_sci_free_irq() - Helper api to free the irq route between the
2338  *			   requested source and destination
2339  * @handle:		Pointer to TISCI handle.
2340  * @valid_params:	Bit fields defining the validity of certain params
2341  * @src_id:		Device ID of the IRQ source
2342  * @src_index:		IRQ source index within the source device
2343  * @dst_id:		Device ID of the IRQ destination
2344  * @dst_host_irq:	IRQ number of the destination device
2345  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2346  * @vint:		Virtual interrupt to be used within the IA
2347  * @global_event:	Global event number to be used for the requesting event
2348  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2349  * @s_host:		Secondary host ID to which the irq/event is being
2350  *			requested for.
2351  *
2352  * Return: 0 if all went fine, else return appropriate error.
2353  */
ti_sci_free_irq(const struct ti_sci_handle * handle,u32 valid_params,u16 src_id,u16 src_index,u16 dst_id,u16 dst_host_irq,u16 ia_id,u16 vint,u16 global_event,u8 vint_status_bit,u8 s_host)2354 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
2355 			   u16 src_id, u16 src_index, u16 dst_id,
2356 			   u16 dst_host_irq, u16 ia_id, u16 vint,
2357 			   u16 global_event, u8 vint_status_bit, u8 s_host)
2358 {
2359 	pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
2360 		 __func__, valid_params, src_id, src_index,
2361 		 dst_id, dst_host_irq, ia_id, vint, global_event,
2362 		 vint_status_bit);
2363 
2364 	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
2365 				 dst_id, dst_host_irq, ia_id, vint,
2366 				 global_event, vint_status_bit, s_host,
2367 				 TI_SCI_MSG_FREE_IRQ);
2368 }
2369 
2370 /**
2371  * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
2372  *			  source and destination.
2373  * @handle:		Pointer to TISCI handle.
2374  * @src_id:		Device ID of the IRQ source
2375  * @src_index:		IRQ source index within the source device
2376  * @dst_id:		Device ID of the IRQ destination
2377  * @dst_host_irq:	IRQ number of the destination device
2378  *
2379  * Return: 0 if all went fine, else return appropriate error.
2380  */
ti_sci_cmd_set_irq(const struct ti_sci_handle * handle,u16 src_id,u16 src_index,u16 dst_id,u16 dst_host_irq)2381 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
2382 			      u16 src_index, u16 dst_id, u16 dst_host_irq)
2383 {
2384 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2385 
2386 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
2387 			      dst_host_irq, 0, 0, 0, 0, 0);
2388 }
2389 
2390 /**
2391  * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
2392  *				requested source and Interrupt Aggregator.
2393  * @handle:		Pointer to TISCI handle.
2394  * @src_id:		Device ID of the IRQ source
2395  * @src_index:		IRQ source index within the source device
2396  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2397  * @vint:		Virtual interrupt to be used within the IA
2398  * @global_event:	Global event number to be used for the requesting event
2399  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2400  *
2401  * Return: 0 if all went fine, else return appropriate error.
2402  */
ti_sci_cmd_set_event_map(const struct ti_sci_handle * handle,u16 src_id,u16 src_index,u16 ia_id,u16 vint,u16 global_event,u8 vint_status_bit)2403 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
2404 				    u16 src_id, u16 src_index, u16 ia_id,
2405 				    u16 vint, u16 global_event,
2406 				    u8 vint_status_bit)
2407 {
2408 	u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
2409 			   MSG_FLAG_GLB_EVNT_VALID |
2410 			   MSG_FLAG_VINT_STS_BIT_VALID;
2411 
2412 	return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
2413 			      ia_id, vint, global_event, vint_status_bit, 0);
2414 }
2415 
2416 /**
2417  * ti_sci_cmd_free_irq() - Free a host irq route between the between the
2418  *			   requested source and destination.
2419  * @handle:		Pointer to TISCI handle.
2420  * @src_id:		Device ID of the IRQ source
2421  * @src_index:		IRQ source index within the source device
2422  * @dst_id:		Device ID of the IRQ destination
2423  * @dst_host_irq:	IRQ number of the destination device
2424  *
2425  * Return: 0 if all went fine, else return appropriate error.
2426  */
ti_sci_cmd_free_irq(const struct ti_sci_handle * handle,u16 src_id,u16 src_index,u16 dst_id,u16 dst_host_irq)2427 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2428 			       u16 src_index, u16 dst_id, u16 dst_host_irq)
2429 {
2430 	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2431 
2432 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2433 			       dst_host_irq, 0, 0, 0, 0, 0);
2434 }
2435 
2436 /**
2437  * ti_sci_cmd_free_event_map() - Free an event map between the requested source
2438  *				 and Interrupt Aggregator.
2439  * @handle:		Pointer to TISCI handle.
2440  * @src_id:		Device ID of the IRQ source
2441  * @src_index:		IRQ source index within the source device
2442  * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
2443  * @vint:		Virtual interrupt to be used within the IA
2444  * @global_event:	Global event number to be used for the requesting event
2445  * @vint_status_bit:	Virtual interrupt status bit to be used for the event
2446  *
2447  * Return: 0 if all went fine, else return appropriate error.
2448  */
ti_sci_cmd_free_event_map(const struct ti_sci_handle * handle,u16 src_id,u16 src_index,u16 ia_id,u16 vint,u16 global_event,u8 vint_status_bit)2449 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2450 				     u16 src_id, u16 src_index, u16 ia_id,
2451 				     u16 vint, u16 global_event,
2452 				     u8 vint_status_bit)
2453 {
2454 	u32 valid_params = MSG_FLAG_IA_ID_VALID |
2455 			   MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2456 			   MSG_FLAG_VINT_STS_BIT_VALID;
2457 
2458 	return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2459 			       ia_id, vint, global_event, vint_status_bit, 0);
2460 }
2461 
2462 /**
2463  * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
2464  * @handle:	Pointer to TI SCI handle.
2465  * @params:	Pointer to ti_sci_msg_rm_ring_cfg ring config structure
2466  *
2467  * Return: 0 if all went well, else returns appropriate error value.
2468  *
2469  * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
2470  * more info.
2471  */
ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle * handle,const struct ti_sci_msg_rm_ring_cfg * params)2472 static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
2473 				  const struct ti_sci_msg_rm_ring_cfg *params)
2474 {
2475 	struct ti_sci_msg_rm_ring_cfg_req *req;
2476 	struct ti_sci_msg_hdr *resp;
2477 	struct ti_sci_xfer *xfer;
2478 	struct ti_sci_info *info;
2479 	struct device *dev;
2480 	int ret = 0;
2481 
2482 	if (IS_ERR_OR_NULL(handle))
2483 		return -EINVAL;
2484 
2485 	info = handle_to_ti_sci_info(handle);
2486 	dev = info->dev;
2487 
2488 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2489 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2490 				   sizeof(*req), sizeof(*resp));
2491 	if (IS_ERR(xfer)) {
2492 		ret = PTR_ERR(xfer);
2493 		dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2494 		return ret;
2495 	}
2496 	req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2497 	req->valid_params = params->valid_params;
2498 	req->nav_id = params->nav_id;
2499 	req->index = params->index;
2500 	req->addr_lo = params->addr_lo;
2501 	req->addr_hi = params->addr_hi;
2502 	req->count = params->count;
2503 	req->mode = params->mode;
2504 	req->size = params->size;
2505 	req->order_id = params->order_id;
2506 	req->virtid = params->virtid;
2507 	req->asel = params->asel;
2508 
2509 	ret = ti_sci_do_xfer(info, xfer);
2510 	if (ret) {
2511 		dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2512 		goto fail;
2513 	}
2514 
2515 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2516 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2517 
2518 fail:
2519 	ti_sci_put_one_xfer(&info->minfo, xfer);
2520 	dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
2521 	return ret;
2522 }
2523 
2524 /**
2525  * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2526  * @handle:	Pointer to TI SCI handle.
2527  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2528  *		pairing
2529  * @src_thread:	Source PSI-L thread ID
2530  * @dst_thread: Destination PSI-L thread ID
2531  *
2532  * Return: 0 if all went well, else returns appropriate error value.
2533  */
ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle * handle,u32 nav_id,u32 src_thread,u32 dst_thread)2534 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2535 				   u32 nav_id, u32 src_thread, u32 dst_thread)
2536 {
2537 	struct ti_sci_msg_psil_pair *req;
2538 	struct ti_sci_msg_hdr *resp;
2539 	struct ti_sci_xfer *xfer;
2540 	struct ti_sci_info *info;
2541 	struct device *dev;
2542 	int ret = 0;
2543 
2544 	if (IS_ERR(handle))
2545 		return PTR_ERR(handle);
2546 	if (!handle)
2547 		return -EINVAL;
2548 
2549 	info = handle_to_ti_sci_info(handle);
2550 	dev = info->dev;
2551 
2552 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2553 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2554 				   sizeof(*req), sizeof(*resp));
2555 	if (IS_ERR(xfer)) {
2556 		ret = PTR_ERR(xfer);
2557 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2558 		return ret;
2559 	}
2560 	req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2561 	req->nav_id = nav_id;
2562 	req->src_thread = src_thread;
2563 	req->dst_thread = dst_thread;
2564 
2565 	ret = ti_sci_do_xfer(info, xfer);
2566 	if (ret) {
2567 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2568 		goto fail;
2569 	}
2570 
2571 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2572 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2573 
2574 fail:
2575 	ti_sci_put_one_xfer(&info->minfo, xfer);
2576 
2577 	return ret;
2578 }
2579 
2580 /**
2581  * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2582  * @handle:	Pointer to TI SCI handle.
2583  * @nav_id:	Device ID of Navigator Subsystem which should be used for
2584  *		unpairing
2585  * @src_thread:	Source PSI-L thread ID
2586  * @dst_thread:	Destination PSI-L thread ID
2587  *
2588  * Return: 0 if all went well, else returns appropriate error value.
2589  */
ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle * handle,u32 nav_id,u32 src_thread,u32 dst_thread)2590 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2591 				     u32 nav_id, u32 src_thread, u32 dst_thread)
2592 {
2593 	struct ti_sci_msg_psil_unpair *req;
2594 	struct ti_sci_msg_hdr *resp;
2595 	struct ti_sci_xfer *xfer;
2596 	struct ti_sci_info *info;
2597 	struct device *dev;
2598 	int ret = 0;
2599 
2600 	if (IS_ERR(handle))
2601 		return PTR_ERR(handle);
2602 	if (!handle)
2603 		return -EINVAL;
2604 
2605 	info = handle_to_ti_sci_info(handle);
2606 	dev = info->dev;
2607 
2608 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2609 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2610 				   sizeof(*req), sizeof(*resp));
2611 	if (IS_ERR(xfer)) {
2612 		ret = PTR_ERR(xfer);
2613 		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2614 		return ret;
2615 	}
2616 	req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2617 	req->nav_id = nav_id;
2618 	req->src_thread = src_thread;
2619 	req->dst_thread = dst_thread;
2620 
2621 	ret = ti_sci_do_xfer(info, xfer);
2622 	if (ret) {
2623 		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2624 		goto fail;
2625 	}
2626 
2627 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2628 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2629 
2630 fail:
2631 	ti_sci_put_one_xfer(&info->minfo, xfer);
2632 
2633 	return ret;
2634 }
2635 
2636 /**
2637  * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2638  * @handle:	Pointer to TI SCI handle.
2639  * @params:	Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2640  *		structure
2641  *
2642  * Return: 0 if all went well, else returns appropriate error value.
2643  *
2644  * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2645  * more info.
2646  */
ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle * handle,const struct ti_sci_msg_rm_udmap_tx_ch_cfg * params)2647 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2648 			const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2649 {
2650 	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2651 	struct ti_sci_msg_hdr *resp;
2652 	struct ti_sci_xfer *xfer;
2653 	struct ti_sci_info *info;
2654 	struct device *dev;
2655 	int ret = 0;
2656 
2657 	if (IS_ERR_OR_NULL(handle))
2658 		return -EINVAL;
2659 
2660 	info = handle_to_ti_sci_info(handle);
2661 	dev = info->dev;
2662 
2663 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2664 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2665 				   sizeof(*req), sizeof(*resp));
2666 	if (IS_ERR(xfer)) {
2667 		ret = PTR_ERR(xfer);
2668 		dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2669 		return ret;
2670 	}
2671 	req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2672 	req->valid_params = params->valid_params;
2673 	req->nav_id = params->nav_id;
2674 	req->index = params->index;
2675 	req->tx_pause_on_err = params->tx_pause_on_err;
2676 	req->tx_filt_einfo = params->tx_filt_einfo;
2677 	req->tx_filt_pswords = params->tx_filt_pswords;
2678 	req->tx_atype = params->tx_atype;
2679 	req->tx_chan_type = params->tx_chan_type;
2680 	req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2681 	req->tx_fetch_size = params->tx_fetch_size;
2682 	req->tx_credit_count = params->tx_credit_count;
2683 	req->txcq_qnum = params->txcq_qnum;
2684 	req->tx_priority = params->tx_priority;
2685 	req->tx_qos = params->tx_qos;
2686 	req->tx_orderid = params->tx_orderid;
2687 	req->fdepth = params->fdepth;
2688 	req->tx_sched_priority = params->tx_sched_priority;
2689 	req->tx_burst_size = params->tx_burst_size;
2690 	req->tx_tdtype = params->tx_tdtype;
2691 	req->extended_ch_type = params->extended_ch_type;
2692 
2693 	ret = ti_sci_do_xfer(info, xfer);
2694 	if (ret) {
2695 		dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2696 		goto fail;
2697 	}
2698 
2699 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2700 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2701 
2702 fail:
2703 	ti_sci_put_one_xfer(&info->minfo, xfer);
2704 	dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2705 	return ret;
2706 }
2707 
2708 /**
2709  * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2710  * @handle:	Pointer to TI SCI handle.
2711  * @params:	Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2712  *		structure
2713  *
2714  * Return: 0 if all went well, else returns appropriate error value.
2715  *
2716  * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2717  * more info.
2718  */
ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle * handle,const struct ti_sci_msg_rm_udmap_rx_ch_cfg * params)2719 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2720 			const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2721 {
2722 	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2723 	struct ti_sci_msg_hdr *resp;
2724 	struct ti_sci_xfer *xfer;
2725 	struct ti_sci_info *info;
2726 	struct device *dev;
2727 	int ret = 0;
2728 
2729 	if (IS_ERR_OR_NULL(handle))
2730 		return -EINVAL;
2731 
2732 	info = handle_to_ti_sci_info(handle);
2733 	dev = info->dev;
2734 
2735 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2736 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2737 				   sizeof(*req), sizeof(*resp));
2738 	if (IS_ERR(xfer)) {
2739 		ret = PTR_ERR(xfer);
2740 		dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2741 		return ret;
2742 	}
2743 	req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2744 	req->valid_params = params->valid_params;
2745 	req->nav_id = params->nav_id;
2746 	req->index = params->index;
2747 	req->rx_fetch_size = params->rx_fetch_size;
2748 	req->rxcq_qnum = params->rxcq_qnum;
2749 	req->rx_priority = params->rx_priority;
2750 	req->rx_qos = params->rx_qos;
2751 	req->rx_orderid = params->rx_orderid;
2752 	req->rx_sched_priority = params->rx_sched_priority;
2753 	req->flowid_start = params->flowid_start;
2754 	req->flowid_cnt = params->flowid_cnt;
2755 	req->rx_pause_on_err = params->rx_pause_on_err;
2756 	req->rx_atype = params->rx_atype;
2757 	req->rx_chan_type = params->rx_chan_type;
2758 	req->rx_ignore_short = params->rx_ignore_short;
2759 	req->rx_ignore_long = params->rx_ignore_long;
2760 	req->rx_burst_size = params->rx_burst_size;
2761 
2762 	ret = ti_sci_do_xfer(info, xfer);
2763 	if (ret) {
2764 		dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2765 		goto fail;
2766 	}
2767 
2768 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2769 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2770 
2771 fail:
2772 	ti_sci_put_one_xfer(&info->minfo, xfer);
2773 	dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2774 	return ret;
2775 }
2776 
2777 /**
2778  * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2779  * @handle:	Pointer to TI SCI handle.
2780  * @params:	Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2781  *		structure
2782  *
2783  * Return: 0 if all went well, else returns appropriate error value.
2784  *
2785  * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2786  * more info.
2787  */
ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle * handle,const struct ti_sci_msg_rm_udmap_flow_cfg * params)2788 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2789 			const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2790 {
2791 	struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2792 	struct ti_sci_msg_hdr *resp;
2793 	struct ti_sci_xfer *xfer;
2794 	struct ti_sci_info *info;
2795 	struct device *dev;
2796 	int ret = 0;
2797 
2798 	if (IS_ERR_OR_NULL(handle))
2799 		return -EINVAL;
2800 
2801 	info = handle_to_ti_sci_info(handle);
2802 	dev = info->dev;
2803 
2804 	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2805 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2806 				   sizeof(*req), sizeof(*resp));
2807 	if (IS_ERR(xfer)) {
2808 		ret = PTR_ERR(xfer);
2809 		dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2810 		return ret;
2811 	}
2812 	req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2813 	req->valid_params = params->valid_params;
2814 	req->nav_id = params->nav_id;
2815 	req->flow_index = params->flow_index;
2816 	req->rx_einfo_present = params->rx_einfo_present;
2817 	req->rx_psinfo_present = params->rx_psinfo_present;
2818 	req->rx_error_handling = params->rx_error_handling;
2819 	req->rx_desc_type = params->rx_desc_type;
2820 	req->rx_sop_offset = params->rx_sop_offset;
2821 	req->rx_dest_qnum = params->rx_dest_qnum;
2822 	req->rx_src_tag_hi = params->rx_src_tag_hi;
2823 	req->rx_src_tag_lo = params->rx_src_tag_lo;
2824 	req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2825 	req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2826 	req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2827 	req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2828 	req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2829 	req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2830 	req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2831 	req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2832 	req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2833 	req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2834 	req->rx_ps_location = params->rx_ps_location;
2835 
2836 	ret = ti_sci_do_xfer(info, xfer);
2837 	if (ret) {
2838 		dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2839 		goto fail;
2840 	}
2841 
2842 	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2843 	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2844 
2845 fail:
2846 	ti_sci_put_one_xfer(&info->minfo, xfer);
2847 	dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2848 	return ret;
2849 }
2850 
2851 /**
2852  * ti_sci_cmd_proc_request() - Command to request a physical processor control
2853  * @handle:	Pointer to TI SCI handle
2854  * @proc_id:	Processor ID this request is for
2855  *
2856  * Return: 0 if all went well, else returns appropriate error value.
2857  */
ti_sci_cmd_proc_request(const struct ti_sci_handle * handle,u8 proc_id)2858 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2859 				   u8 proc_id)
2860 {
2861 	struct ti_sci_msg_req_proc_request *req;
2862 	struct ti_sci_msg_hdr *resp;
2863 	struct ti_sci_info *info;
2864 	struct ti_sci_xfer *xfer;
2865 	struct device *dev;
2866 	int ret = 0;
2867 
2868 	if (!handle)
2869 		return -EINVAL;
2870 	if (IS_ERR(handle))
2871 		return PTR_ERR(handle);
2872 
2873 	info = handle_to_ti_sci_info(handle);
2874 	dev = info->dev;
2875 
2876 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2877 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2878 				   sizeof(*req), sizeof(*resp));
2879 	if (IS_ERR(xfer)) {
2880 		ret = PTR_ERR(xfer);
2881 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2882 		return ret;
2883 	}
2884 	req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2885 	req->processor_id = proc_id;
2886 
2887 	ret = ti_sci_do_xfer(info, xfer);
2888 	if (ret) {
2889 		dev_err(dev, "Mbox send fail %d\n", ret);
2890 		goto fail;
2891 	}
2892 
2893 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2894 
2895 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2896 
2897 fail:
2898 	ti_sci_put_one_xfer(&info->minfo, xfer);
2899 
2900 	return ret;
2901 }
2902 
2903 /**
2904  * ti_sci_cmd_proc_release() - Command to release a physical processor control
2905  * @handle:	Pointer to TI SCI handle
2906  * @proc_id:	Processor ID this request is for
2907  *
2908  * Return: 0 if all went well, else returns appropriate error value.
2909  */
ti_sci_cmd_proc_release(const struct ti_sci_handle * handle,u8 proc_id)2910 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2911 				   u8 proc_id)
2912 {
2913 	struct ti_sci_msg_req_proc_release *req;
2914 	struct ti_sci_msg_hdr *resp;
2915 	struct ti_sci_info *info;
2916 	struct ti_sci_xfer *xfer;
2917 	struct device *dev;
2918 	int ret = 0;
2919 
2920 	if (!handle)
2921 		return -EINVAL;
2922 	if (IS_ERR(handle))
2923 		return PTR_ERR(handle);
2924 
2925 	info = handle_to_ti_sci_info(handle);
2926 	dev = info->dev;
2927 
2928 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2929 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2930 				   sizeof(*req), sizeof(*resp));
2931 	if (IS_ERR(xfer)) {
2932 		ret = PTR_ERR(xfer);
2933 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2934 		return ret;
2935 	}
2936 	req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2937 	req->processor_id = proc_id;
2938 
2939 	ret = ti_sci_do_xfer(info, xfer);
2940 	if (ret) {
2941 		dev_err(dev, "Mbox send fail %d\n", ret);
2942 		goto fail;
2943 	}
2944 
2945 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2946 
2947 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2948 
2949 fail:
2950 	ti_sci_put_one_xfer(&info->minfo, xfer);
2951 
2952 	return ret;
2953 }
2954 
2955 /**
2956  * ti_sci_cmd_proc_handover() - Command to handover a physical processor
2957  *				control to a host in the processor's access
2958  *				control list.
2959  * @handle:	Pointer to TI SCI handle
2960  * @proc_id:	Processor ID this request is for
2961  * @host_id:	Host ID to get the control of the processor
2962  *
2963  * Return: 0 if all went well, else returns appropriate error value.
2964  */
ti_sci_cmd_proc_handover(const struct ti_sci_handle * handle,u8 proc_id,u8 host_id)2965 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2966 				    u8 proc_id, u8 host_id)
2967 {
2968 	struct ti_sci_msg_req_proc_handover *req;
2969 	struct ti_sci_msg_hdr *resp;
2970 	struct ti_sci_info *info;
2971 	struct ti_sci_xfer *xfer;
2972 	struct device *dev;
2973 	int ret = 0;
2974 
2975 	if (!handle)
2976 		return -EINVAL;
2977 	if (IS_ERR(handle))
2978 		return PTR_ERR(handle);
2979 
2980 	info = handle_to_ti_sci_info(handle);
2981 	dev = info->dev;
2982 
2983 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2984 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2985 				   sizeof(*req), sizeof(*resp));
2986 	if (IS_ERR(xfer)) {
2987 		ret = PTR_ERR(xfer);
2988 		dev_err(dev, "Message alloc failed(%d)\n", ret);
2989 		return ret;
2990 	}
2991 	req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2992 	req->processor_id = proc_id;
2993 	req->host_id = host_id;
2994 
2995 	ret = ti_sci_do_xfer(info, xfer);
2996 	if (ret) {
2997 		dev_err(dev, "Mbox send fail %d\n", ret);
2998 		goto fail;
2999 	}
3000 
3001 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
3002 
3003 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
3004 
3005 fail:
3006 	ti_sci_put_one_xfer(&info->minfo, xfer);
3007 
3008 	return ret;
3009 }
3010 
3011 /**
3012  * ti_sci_cmd_proc_set_config() - Command to set the processor boot
3013  *				    configuration flags
3014  * @handle:		Pointer to TI SCI handle
3015  * @proc_id:		Processor ID this request is for
3016  * @bootvector:		Processor Boot vector (start address)
3017  * @config_flags_set:	Configuration flags to be set
3018  * @config_flags_clear:	Configuration flags to be cleared.
3019  *
3020  * Return: 0 if all went well, else returns appropriate error value.
3021  */
ti_sci_cmd_proc_set_config(const struct ti_sci_handle * handle,u8 proc_id,u64 bootvector,u32 config_flags_set,u32 config_flags_clear)3022 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
3023 				      u8 proc_id, u64 bootvector,
3024 				      u32 config_flags_set,
3025 				      u32 config_flags_clear)
3026 {
3027 	struct ti_sci_msg_req_set_config *req;
3028 	struct ti_sci_msg_hdr *resp;
3029 	struct ti_sci_info *info;
3030 	struct ti_sci_xfer *xfer;
3031 	struct device *dev;
3032 	int ret = 0;
3033 
3034 	if (!handle)
3035 		return -EINVAL;
3036 	if (IS_ERR(handle))
3037 		return PTR_ERR(handle);
3038 
3039 	info = handle_to_ti_sci_info(handle);
3040 	dev = info->dev;
3041 
3042 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
3043 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
3044 				   sizeof(*req), sizeof(*resp));
3045 	if (IS_ERR(xfer)) {
3046 		ret = PTR_ERR(xfer);
3047 		dev_err(dev, "Message alloc failed(%d)\n", ret);
3048 		return ret;
3049 	}
3050 	req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
3051 	req->processor_id = proc_id;
3052 	req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
3053 	req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
3054 				TI_SCI_ADDR_HIGH_SHIFT;
3055 	req->config_flags_set = config_flags_set;
3056 	req->config_flags_clear = config_flags_clear;
3057 
3058 	ret = ti_sci_do_xfer(info, xfer);
3059 	if (ret) {
3060 		dev_err(dev, "Mbox send fail %d\n", ret);
3061 		goto fail;
3062 	}
3063 
3064 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
3065 
3066 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
3067 
3068 fail:
3069 	ti_sci_put_one_xfer(&info->minfo, xfer);
3070 
3071 	return ret;
3072 }
3073 
3074 /**
3075  * ti_sci_cmd_proc_set_control() - Command to set the processor boot
3076  *				     control flags
3077  * @handle:			Pointer to TI SCI handle
3078  * @proc_id:			Processor ID this request is for
3079  * @control_flags_set:		Control flags to be set
3080  * @control_flags_clear:	Control flags to be cleared
3081  *
3082  * Return: 0 if all went well, else returns appropriate error value.
3083  */
ti_sci_cmd_proc_set_control(const struct ti_sci_handle * handle,u8 proc_id,u32 control_flags_set,u32 control_flags_clear)3084 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
3085 				       u8 proc_id, u32 control_flags_set,
3086 				       u32 control_flags_clear)
3087 {
3088 	struct ti_sci_msg_req_set_ctrl *req;
3089 	struct ti_sci_msg_hdr *resp;
3090 	struct ti_sci_info *info;
3091 	struct ti_sci_xfer *xfer;
3092 	struct device *dev;
3093 	int ret = 0;
3094 
3095 	if (!handle)
3096 		return -EINVAL;
3097 	if (IS_ERR(handle))
3098 		return PTR_ERR(handle);
3099 
3100 	info = handle_to_ti_sci_info(handle);
3101 	dev = info->dev;
3102 
3103 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
3104 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
3105 				   sizeof(*req), sizeof(*resp));
3106 	if (IS_ERR(xfer)) {
3107 		ret = PTR_ERR(xfer);
3108 		dev_err(dev, "Message alloc failed(%d)\n", ret);
3109 		return ret;
3110 	}
3111 	req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
3112 	req->processor_id = proc_id;
3113 	req->control_flags_set = control_flags_set;
3114 	req->control_flags_clear = control_flags_clear;
3115 
3116 	ret = ti_sci_do_xfer(info, xfer);
3117 	if (ret) {
3118 		dev_err(dev, "Mbox send fail %d\n", ret);
3119 		goto fail;
3120 	}
3121 
3122 	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
3123 
3124 	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
3125 
3126 fail:
3127 	ti_sci_put_one_xfer(&info->minfo, xfer);
3128 
3129 	return ret;
3130 }
3131 
3132 /**
3133  * ti_sci_cmd_proc_get_status() - Command to get the processor boot status
3134  * @handle:	Pointer to TI SCI handle
3135  * @proc_id:	Processor ID this request is for
3136  * @bv:		Processor Boot vector (start address)
3137  * @cfg_flags:	Processor specific configuration flags
3138  * @ctrl_flags:	Processor specific control flags
3139  * @sts_flags:	Processor specific status flags
3140  *
3141  * Return: 0 if all went well, else returns appropriate error value.
3142  */
ti_sci_cmd_proc_get_status(const struct ti_sci_handle * handle,u8 proc_id,u64 * bv,u32 * cfg_flags,u32 * ctrl_flags,u32 * sts_flags)3143 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
3144 				      u8 proc_id, u64 *bv, u32 *cfg_flags,
3145 				      u32 *ctrl_flags, u32 *sts_flags)
3146 {
3147 	struct ti_sci_msg_resp_get_status *resp;
3148 	struct ti_sci_msg_req_get_status *req;
3149 	struct ti_sci_info *info;
3150 	struct ti_sci_xfer *xfer;
3151 	struct device *dev;
3152 	int ret = 0;
3153 
3154 	if (!handle)
3155 		return -EINVAL;
3156 	if (IS_ERR(handle))
3157 		return PTR_ERR(handle);
3158 
3159 	info = handle_to_ti_sci_info(handle);
3160 	dev = info->dev;
3161 
3162 	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
3163 				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
3164 				   sizeof(*req), sizeof(*resp));
3165 	if (IS_ERR(xfer)) {
3166 		ret = PTR_ERR(xfer);
3167 		dev_err(dev, "Message alloc failed(%d)\n", ret);
3168 		return ret;
3169 	}
3170 	req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
3171 	req->processor_id = proc_id;
3172 
3173 	ret = ti_sci_do_xfer(info, xfer);
3174 	if (ret) {
3175 		dev_err(dev, "Mbox send fail %d\n", ret);
3176 		goto fail;
3177 	}
3178 
3179 	resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
3180 
3181 	if (!ti_sci_is_response_ack(resp)) {
3182 		ret = -ENODEV;
3183 	} else {
3184 		*bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
3185 		      (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
3186 		       TI_SCI_ADDR_HIGH_MASK);
3187 		*cfg_flags = resp->config_flags;
3188 		*ctrl_flags = resp->control_flags;
3189 		*sts_flags = resp->status_flags;
3190 	}
3191 
3192 fail:
3193 	ti_sci_put_one_xfer(&info->minfo, xfer);
3194 
3195 	return ret;
3196 }
3197 
3198 /*
3199  * ti_sci_setup_ops() - Setup the operations structures
3200  * @info:	pointer to TISCI pointer
3201  */
ti_sci_setup_ops(struct ti_sci_info * info)3202 static void ti_sci_setup_ops(struct ti_sci_info *info)
3203 {
3204 	struct ti_sci_ops *ops = &info->handle.ops;
3205 	struct ti_sci_core_ops *core_ops = &ops->core_ops;
3206 	struct ti_sci_dev_ops *dops = &ops->dev_ops;
3207 	struct ti_sci_clk_ops *cops = &ops->clk_ops;
3208 	struct ti_sci_pm_ops *pmops = &ops->pm_ops;
3209 	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
3210 	struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
3211 	struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
3212 	struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
3213 	struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
3214 	struct ti_sci_proc_ops *pops = &ops->proc_ops;
3215 
3216 	core_ops->reboot_device = ti_sci_cmd_core_reboot;
3217 
3218 	dops->get_device = ti_sci_cmd_get_device;
3219 	dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
3220 	dops->idle_device = ti_sci_cmd_idle_device;
3221 	dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
3222 	dops->put_device = ti_sci_cmd_put_device;
3223 
3224 	dops->is_valid = ti_sci_cmd_dev_is_valid;
3225 	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
3226 	dops->is_idle = ti_sci_cmd_dev_is_idle;
3227 	dops->is_stop = ti_sci_cmd_dev_is_stop;
3228 	dops->is_on = ti_sci_cmd_dev_is_on;
3229 	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
3230 	dops->set_device_resets = ti_sci_cmd_set_device_resets;
3231 	dops->get_device_resets = ti_sci_cmd_get_device_resets;
3232 
3233 	cops->get_clock = ti_sci_cmd_get_clock;
3234 	cops->idle_clock = ti_sci_cmd_idle_clock;
3235 	cops->put_clock = ti_sci_cmd_put_clock;
3236 	cops->is_auto = ti_sci_cmd_clk_is_auto;
3237 	cops->is_on = ti_sci_cmd_clk_is_on;
3238 	cops->is_off = ti_sci_cmd_clk_is_off;
3239 
3240 	cops->set_parent = ti_sci_cmd_clk_set_parent;
3241 	cops->get_parent = ti_sci_cmd_clk_get_parent;
3242 	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
3243 
3244 	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
3245 	cops->set_freq = ti_sci_cmd_clk_set_freq;
3246 	cops->get_freq = ti_sci_cmd_clk_get_freq;
3247 
3248 	if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
3249 		pr_debug("detected DM managed LPM in fw_caps\n");
3250 		pmops->lpm_wake_reason = ti_sci_msg_cmd_lpm_wake_reason;
3251 		pmops->set_device_constraint = ti_sci_cmd_set_device_constraint;
3252 		pmops->set_latency_constraint = ti_sci_cmd_set_latency_constraint;
3253 	}
3254 
3255 	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
3256 	rm_core_ops->get_range_from_shost =
3257 				ti_sci_cmd_get_resource_range_from_shost;
3258 
3259 	iops->set_irq = ti_sci_cmd_set_irq;
3260 	iops->set_event_map = ti_sci_cmd_set_event_map;
3261 	iops->free_irq = ti_sci_cmd_free_irq;
3262 	iops->free_event_map = ti_sci_cmd_free_event_map;
3263 
3264 	rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
3265 
3266 	psilops->pair = ti_sci_cmd_rm_psil_pair;
3267 	psilops->unpair = ti_sci_cmd_rm_psil_unpair;
3268 
3269 	udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
3270 	udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
3271 	udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
3272 
3273 	pops->request = ti_sci_cmd_proc_request;
3274 	pops->release = ti_sci_cmd_proc_release;
3275 	pops->handover = ti_sci_cmd_proc_handover;
3276 	pops->set_config = ti_sci_cmd_proc_set_config;
3277 	pops->set_control = ti_sci_cmd_proc_set_control;
3278 	pops->get_status = ti_sci_cmd_proc_get_status;
3279 }
3280 
3281 /**
3282  * ti_sci_get_handle() - Get the TI SCI handle for a device
3283  * @dev:	Pointer to device for which we want SCI handle
3284  *
3285  * NOTE: The function does not track individual clients of the framework
3286  * and is expected to be maintained by caller of TI SCI protocol library.
3287  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
3288  * Return: pointer to handle if successful, else:
3289  * -EPROBE_DEFER if the instance is not ready
3290  * -ENODEV if the required node handler is missing
3291  * -EINVAL if invalid conditions are encountered.
3292  */
ti_sci_get_handle(struct device * dev)3293 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
3294 {
3295 	struct device_node *ti_sci_np;
3296 	struct ti_sci_handle *handle = NULL;
3297 	struct ti_sci_info *info;
3298 
3299 	if (!dev) {
3300 		pr_err("I need a device pointer\n");
3301 		return ERR_PTR(-EINVAL);
3302 	}
3303 	ti_sci_np = of_get_parent(dev->of_node);
3304 	if (!ti_sci_np) {
3305 		dev_err(dev, "No OF information\n");
3306 		return ERR_PTR(-EINVAL);
3307 	}
3308 
3309 	mutex_lock(&ti_sci_list_mutex);
3310 	list_for_each_entry(info, &ti_sci_list, node) {
3311 		if (ti_sci_np == info->dev->of_node) {
3312 			handle = &info->handle;
3313 			info->users++;
3314 			break;
3315 		}
3316 	}
3317 	mutex_unlock(&ti_sci_list_mutex);
3318 	of_node_put(ti_sci_np);
3319 
3320 	if (!handle)
3321 		return ERR_PTR(-EPROBE_DEFER);
3322 
3323 	return handle;
3324 }
3325 EXPORT_SYMBOL_GPL(ti_sci_get_handle);
3326 
3327 /**
3328  * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
3329  * @handle:	Handle acquired by ti_sci_get_handle
3330  *
3331  * NOTE: The function does not track individual clients of the framework
3332  * and is expected to be maintained by caller of TI SCI protocol library.
3333  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
3334  *
3335  * Return: 0 is successfully released
3336  * if an error pointer was passed, it returns the error value back,
3337  * if null was passed, it returns -EINVAL;
3338  */
ti_sci_put_handle(const struct ti_sci_handle * handle)3339 int ti_sci_put_handle(const struct ti_sci_handle *handle)
3340 {
3341 	struct ti_sci_info *info;
3342 
3343 	if (IS_ERR(handle))
3344 		return PTR_ERR(handle);
3345 	if (!handle)
3346 		return -EINVAL;
3347 
3348 	info = handle_to_ti_sci_info(handle);
3349 	mutex_lock(&ti_sci_list_mutex);
3350 	if (!WARN_ON(!info->users))
3351 		info->users--;
3352 	mutex_unlock(&ti_sci_list_mutex);
3353 
3354 	return 0;
3355 }
3356 EXPORT_SYMBOL_GPL(ti_sci_put_handle);
3357 
devm_ti_sci_release(struct device * dev,void * res)3358 static void devm_ti_sci_release(struct device *dev, void *res)
3359 {
3360 	const struct ti_sci_handle **ptr = res;
3361 	const struct ti_sci_handle *handle = *ptr;
3362 	int ret;
3363 
3364 	ret = ti_sci_put_handle(handle);
3365 	if (ret)
3366 		dev_err(dev, "failed to put handle %d\n", ret);
3367 }
3368 
3369 /**
3370  * devm_ti_sci_get_handle() - Managed get handle
3371  * @dev:	device for which we want SCI handle for.
3372  *
3373  * NOTE: This releases the handle once the device resources are
3374  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3375  * The function does not track individual clients of the framework
3376  * and is expected to be maintained by caller of TI SCI protocol library.
3377  *
3378  * Return: 0 if all went fine, else corresponding error.
3379  */
devm_ti_sci_get_handle(struct device * dev)3380 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
3381 {
3382 	const struct ti_sci_handle **ptr;
3383 	const struct ti_sci_handle *handle;
3384 
3385 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3386 	if (!ptr)
3387 		return ERR_PTR(-ENOMEM);
3388 	handle = ti_sci_get_handle(dev);
3389 
3390 	if (!IS_ERR(handle)) {
3391 		*ptr = handle;
3392 		devres_add(dev, ptr);
3393 	} else {
3394 		devres_free(ptr);
3395 	}
3396 
3397 	return handle;
3398 }
3399 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
3400 
3401 /**
3402  * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
3403  * @np:		device node
3404  * @property:	property name containing phandle on TISCI node
3405  *
3406  * NOTE: The function does not track individual clients of the framework
3407  * and is expected to be maintained by caller of TI SCI protocol library.
3408  * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
3409  * Return: pointer to handle if successful, else:
3410  * -EPROBE_DEFER if the instance is not ready
3411  * -ENODEV if the required node handler is missing
3412  * -EINVAL if invalid conditions are encountered.
3413  */
ti_sci_get_by_phandle(struct device_node * np,const char * property)3414 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3415 						  const char *property)
3416 {
3417 	struct ti_sci_handle *handle = NULL;
3418 	struct device_node *ti_sci_np;
3419 	struct ti_sci_info *info;
3420 
3421 	if (!np) {
3422 		pr_err("I need a device pointer\n");
3423 		return ERR_PTR(-EINVAL);
3424 	}
3425 
3426 	ti_sci_np = of_parse_phandle(np, property, 0);
3427 	if (!ti_sci_np)
3428 		return ERR_PTR(-ENODEV);
3429 
3430 	mutex_lock(&ti_sci_list_mutex);
3431 	list_for_each_entry(info, &ti_sci_list, node) {
3432 		if (ti_sci_np == info->dev->of_node) {
3433 			handle = &info->handle;
3434 			info->users++;
3435 			break;
3436 		}
3437 	}
3438 	mutex_unlock(&ti_sci_list_mutex);
3439 	of_node_put(ti_sci_np);
3440 
3441 	if (!handle)
3442 		return ERR_PTR(-EPROBE_DEFER);
3443 
3444 	return handle;
3445 }
3446 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3447 
3448 /**
3449  * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
3450  * @dev:	Device pointer requesting TISCI handle
3451  * @property:	property name containing phandle on TISCI node
3452  *
3453  * NOTE: This releases the handle once the device resources are
3454  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3455  * The function does not track individual clients of the framework
3456  * and is expected to be maintained by caller of TI SCI protocol library.
3457  *
3458  * Return: 0 if all went fine, else corresponding error.
3459  */
devm_ti_sci_get_by_phandle(struct device * dev,const char * property)3460 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3461 						       const char *property)
3462 {
3463 	const struct ti_sci_handle *handle;
3464 	const struct ti_sci_handle **ptr;
3465 
3466 	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3467 	if (!ptr)
3468 		return ERR_PTR(-ENOMEM);
3469 	handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3470 
3471 	if (!IS_ERR(handle)) {
3472 		*ptr = handle;
3473 		devres_add(dev, ptr);
3474 	} else {
3475 		devres_free(ptr);
3476 	}
3477 
3478 	return handle;
3479 }
3480 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3481 
3482 /**
3483  * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3484  * @res:	Pointer to the TISCI resource
3485  *
3486  * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3487  */
ti_sci_get_free_resource(struct ti_sci_resource * res)3488 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3489 {
3490 	unsigned long flags;
3491 	u16 set, free_bit;
3492 
3493 	raw_spin_lock_irqsave(&res->lock, flags);
3494 	for (set = 0; set < res->sets; set++) {
3495 		struct ti_sci_resource_desc *desc = &res->desc[set];
3496 		int res_count = desc->num + desc->num_sec;
3497 
3498 		free_bit = find_first_zero_bit(desc->res_map, res_count);
3499 		if (free_bit != res_count) {
3500 			__set_bit(free_bit, desc->res_map);
3501 			raw_spin_unlock_irqrestore(&res->lock, flags);
3502 
3503 			if (desc->num && free_bit < desc->num)
3504 				return desc->start + free_bit;
3505 			else
3506 				return desc->start_sec + free_bit;
3507 		}
3508 	}
3509 	raw_spin_unlock_irqrestore(&res->lock, flags);
3510 
3511 	return TI_SCI_RESOURCE_NULL;
3512 }
3513 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3514 
3515 /**
3516  * ti_sci_release_resource() - Release a resource from TISCI resource.
3517  * @res:	Pointer to the TISCI resource
3518  * @id:		Resource id to be released.
3519  */
ti_sci_release_resource(struct ti_sci_resource * res,u16 id)3520 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3521 {
3522 	unsigned long flags;
3523 	u16 set;
3524 
3525 	raw_spin_lock_irqsave(&res->lock, flags);
3526 	for (set = 0; set < res->sets; set++) {
3527 		struct ti_sci_resource_desc *desc = &res->desc[set];
3528 
3529 		if (desc->num && desc->start <= id &&
3530 		    (desc->start + desc->num) > id)
3531 			__clear_bit(id - desc->start, desc->res_map);
3532 		else if (desc->num_sec && desc->start_sec <= id &&
3533 			 (desc->start_sec + desc->num_sec) > id)
3534 			__clear_bit(id - desc->start_sec, desc->res_map);
3535 	}
3536 	raw_spin_unlock_irqrestore(&res->lock, flags);
3537 }
3538 EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3539 
3540 /**
3541  * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
3542  * @res:	Pointer to the TISCI resource
3543  *
3544  * Return: Total number of available resources.
3545  */
ti_sci_get_num_resources(struct ti_sci_resource * res)3546 u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3547 {
3548 	u32 set, count = 0;
3549 
3550 	for (set = 0; set < res->sets; set++)
3551 		count += res->desc[set].num + res->desc[set].num_sec;
3552 
3553 	return count;
3554 }
3555 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3556 
3557 /**
3558  * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
3559  * @handle:	TISCI handle
3560  * @dev:	Device pointer to which the resource is assigned
3561  * @dev_id:	TISCI device id to which the resource is assigned
3562  * @sub_types:	Array of sub_types assigned corresponding to device
3563  * @sets:	Number of sub_types
3564  *
3565  * Return: Pointer to ti_sci_resource if all went well else appropriate
3566  *	   error pointer.
3567  */
3568 static struct ti_sci_resource *
devm_ti_sci_get_resource_sets(const struct ti_sci_handle * handle,struct device * dev,u32 dev_id,u32 * sub_types,u32 sets)3569 devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
3570 			      struct device *dev, u32 dev_id, u32 *sub_types,
3571 			      u32 sets)
3572 {
3573 	struct ti_sci_resource *res;
3574 	bool valid_set = false;
3575 	int i, ret, res_count;
3576 
3577 	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3578 	if (!res)
3579 		return ERR_PTR(-ENOMEM);
3580 
3581 	res->sets = sets;
3582 	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3583 				 GFP_KERNEL);
3584 	if (!res->desc)
3585 		return ERR_PTR(-ENOMEM);
3586 
3587 	for (i = 0; i < res->sets; i++) {
3588 		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3589 							sub_types[i],
3590 							&res->desc[i]);
3591 		if (ret) {
3592 			dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3593 				dev_id, sub_types[i]);
3594 			memset(&res->desc[i], 0, sizeof(res->desc[i]));
3595 			continue;
3596 		}
3597 
3598 		dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
3599 			dev_id, sub_types[i], res->desc[i].start,
3600 			res->desc[i].num, res->desc[i].start_sec,
3601 			res->desc[i].num_sec);
3602 
3603 		valid_set = true;
3604 		res_count = res->desc[i].num + res->desc[i].num_sec;
3605 		res->desc[i].res_map = devm_bitmap_zalloc(dev, res_count,
3606 							  GFP_KERNEL);
3607 		if (!res->desc[i].res_map)
3608 			return ERR_PTR(-ENOMEM);
3609 	}
3610 	raw_spin_lock_init(&res->lock);
3611 
3612 	if (valid_set)
3613 		return res;
3614 
3615 	return ERR_PTR(-EINVAL);
3616 }
3617 
3618 /**
3619  * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3620  * @handle:	TISCI handle
3621  * @dev:	Device pointer to which the resource is assigned
3622  * @dev_id:	TISCI device id to which the resource is assigned
3623  * @of_prop:	property name by which the resource are represented
3624  *
3625  * Return: Pointer to ti_sci_resource if all went well else appropriate
3626  *	   error pointer.
3627  */
3628 struct ti_sci_resource *
devm_ti_sci_get_of_resource(const struct ti_sci_handle * handle,struct device * dev,u32 dev_id,char * of_prop)3629 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3630 			    struct device *dev, u32 dev_id, char *of_prop)
3631 {
3632 	struct ti_sci_resource *res;
3633 	u32 *sub_types;
3634 	int sets;
3635 
3636 	sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3637 					       sizeof(u32));
3638 	if (sets < 0) {
3639 		dev_err(dev, "%s resource type ids not available\n", of_prop);
3640 		return ERR_PTR(sets);
3641 	}
3642 
3643 	sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
3644 	if (!sub_types)
3645 		return ERR_PTR(-ENOMEM);
3646 
3647 	of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
3648 	res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
3649 					    sets);
3650 
3651 	kfree(sub_types);
3652 	return res;
3653 }
3654 EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
3655 
3656 /**
3657  * devm_ti_sci_get_resource() - Get a resource range assigned to the device
3658  * @handle:	TISCI handle
3659  * @dev:	Device pointer to which the resource is assigned
3660  * @dev_id:	TISCI device id to which the resource is assigned
3661  * @sub_type:	TISCI resource subytpe representing the resource.
3662  *
3663  * Return: Pointer to ti_sci_resource if all went well else appropriate
3664  *	   error pointer.
3665  */
3666 struct ti_sci_resource *
devm_ti_sci_get_resource(const struct ti_sci_handle * handle,struct device * dev,u32 dev_id,u32 sub_type)3667 devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
3668 			 u32 dev_id, u32 sub_type)
3669 {
3670 	return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
3671 }
3672 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
3673 
3674 /*
3675  * Iterate all device nodes that have a wakeup-source property and check if one
3676  * of the possible phandles points to a Partial-IO system state. If it
3677  * does resolve the device node to an actual device and check if wakeup is
3678  * enabled.
3679  */
ti_sci_partial_io_wakeup_enabled(struct ti_sci_info * info)3680 static bool ti_sci_partial_io_wakeup_enabled(struct ti_sci_info *info)
3681 {
3682 	struct device_node *wakeup_node = NULL;
3683 
3684 	for_each_node_with_property(wakeup_node, "wakeup-source") {
3685 		struct of_phandle_iterator it;
3686 		int err;
3687 
3688 		of_for_each_phandle(&it, err, wakeup_node, "wakeup-source", NULL, 0) {
3689 			struct platform_device *pdev;
3690 			bool may_wakeup;
3691 
3692 			/*
3693 			 * Continue if idle-state-name is not off-wake. Return
3694 			 * value is the index of the string which should be 0 if
3695 			 * off-wake is present.
3696 			 */
3697 			if (of_property_match_string(it.node, "idle-state-name", "off-wake"))
3698 				continue;
3699 
3700 			pdev = of_find_device_by_node(wakeup_node);
3701 			if (!pdev)
3702 				continue;
3703 
3704 			may_wakeup = device_may_wakeup(&pdev->dev);
3705 			put_device(&pdev->dev);
3706 
3707 			if (may_wakeup) {
3708 				dev_dbg(info->dev, "%pOF identified as wakeup source for Partial-IO\n",
3709 					wakeup_node);
3710 				of_node_put(it.node);
3711 				of_node_put(wakeup_node);
3712 				return true;
3713 			}
3714 		}
3715 	}
3716 
3717 	return false;
3718 }
3719 
ti_sci_sys_off_handler(struct sys_off_data * data)3720 static int ti_sci_sys_off_handler(struct sys_off_data *data)
3721 {
3722 	struct ti_sci_info *info = data->cb_data;
3723 	const struct ti_sci_handle *handle = &info->handle;
3724 	bool enter_partial_io = ti_sci_partial_io_wakeup_enabled(info);
3725 	int ret;
3726 
3727 	if (!enter_partial_io)
3728 		return NOTIFY_DONE;
3729 
3730 	dev_info(info->dev, "Entering Partial-IO because a powered wakeup-enabled device was found.\n");
3731 
3732 	ret = ti_sci_cmd_prepare_sleep(handle, TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO, 0, 0, 0);
3733 	if (ret) {
3734 		dev_err(info->dev,
3735 			"Failed to enter Partial-IO %pe, trying to do an emergency restart\n",
3736 			ERR_PTR(ret));
3737 		emergency_restart();
3738 	}
3739 
3740 	mdelay(5000);
3741 	emergency_restart();
3742 
3743 	return NOTIFY_DONE;
3744 }
3745 
tisci_reboot_handler(struct sys_off_data * data)3746 static int tisci_reboot_handler(struct sys_off_data *data)
3747 {
3748 	struct ti_sci_info *info = data->cb_data;
3749 	const struct ti_sci_handle *handle = &info->handle;
3750 
3751 	ti_sci_cmd_core_reboot(handle);
3752 
3753 	/* call fail OR pass, we should not be here in the first place */
3754 	return NOTIFY_BAD;
3755 }
3756 
ti_sci_prepare_system_suspend(struct ti_sci_info * info)3757 static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
3758 {
3759 	/*
3760 	 * Map and validate the target Linux suspend state to TISCI LPM.
3761 	 * Default is to let Device Manager select the low power mode.
3762 	 */
3763 	switch (pm_suspend_target_state) {
3764 	case PM_SUSPEND_MEM:
3765 		if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
3766 			/*
3767 			 * For the DM_MANAGED mode the context is reserved for
3768 			 * internal use and can be 0
3769 			 */
3770 			return ti_sci_cmd_prepare_sleep(&info->handle,
3771 							TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED,
3772 							0, 0, 0);
3773 		} else {
3774 			/* DM Managed is not supported by the firmware. */
3775 			dev_err(info->dev, "Suspend to memory is not supported by the firmware\n");
3776 			return -EOPNOTSUPP;
3777 		}
3778 		break;
3779 	default:
3780 		/*
3781 		 * Do not fail if we don't have action to take for a
3782 		 * specific suspend mode.
3783 		 */
3784 		return 0;
3785 	}
3786 }
3787 
ti_sci_suspend(struct device * dev)3788 static int ti_sci_suspend(struct device *dev)
3789 {
3790 	struct ti_sci_info *info = dev_get_drvdata(dev);
3791 	struct device *cpu_dev, *cpu_dev_max = NULL;
3792 	s32 val, cpu_lat = 0;
3793 	u16 cpu_lat_ms;
3794 	int i, ret;
3795 
3796 	if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
3797 		for_each_possible_cpu(i) {
3798 			cpu_dev = get_cpu_device(i);
3799 			val = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_RESUME_LATENCY);
3800 			if (val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) {
3801 				cpu_lat = max(cpu_lat, val);
3802 				cpu_dev_max = cpu_dev;
3803 			}
3804 		}
3805 		if (cpu_dev_max) {
3806 			/*
3807 			 * PM QoS latency unit is usecs, device manager uses msecs.
3808 			 * Convert to msecs and round down for device manager.
3809 			 */
3810 			cpu_lat_ms = cpu_lat / USEC_PER_MSEC;
3811 			dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u ms\n", __func__,
3812 				cpu_lat_ms);
3813 			ret = ti_sci_cmd_set_latency_constraint(&info->handle,
3814 								cpu_lat_ms,
3815 								TISCI_MSG_CONSTRAINT_SET);
3816 			if (ret)
3817 				return ret;
3818 		}
3819 	}
3820 
3821 	ret = ti_sci_prepare_system_suspend(info);
3822 	if (ret)
3823 		return ret;
3824 
3825 	return 0;
3826 }
3827 
ti_sci_suspend_noirq(struct device * dev)3828 static int ti_sci_suspend_noirq(struct device *dev)
3829 {
3830 	struct ti_sci_info *info = dev_get_drvdata(dev);
3831 	int ret = 0;
3832 
3833 	if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
3834 		ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
3835 		if (ret)
3836 			return ret;
3837 	}
3838 
3839 	return 0;
3840 }
3841 
ti_sci_resume_noirq(struct device * dev)3842 static int ti_sci_resume_noirq(struct device *dev)
3843 {
3844 	struct ti_sci_info *info = dev_get_drvdata(dev);
3845 	int ret = 0;
3846 	u32 source;
3847 	u64 time;
3848 	u8 pin;
3849 	u8 mode;
3850 
3851 	if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
3852 		ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
3853 		if (ret)
3854 			return ret;
3855 	}
3856 
3857 	ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
3858 	/* Do not fail to resume on error as the wake reason is not critical */
3859 	if (!ret)
3860 		dev_info(dev, "ti_sci: wakeup source:0x%x, pin:0x%x, mode:0x%x\n",
3861 			 source, pin, mode);
3862 
3863 	return 0;
3864 }
3865 
ti_sci_pm_complete(struct device * dev)3866 static void ti_sci_pm_complete(struct device *dev)
3867 {
3868 	struct ti_sci_info *info = dev_get_drvdata(dev);
3869 
3870 	if (info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT) {
3871 		if (ti_sci_cmd_lpm_abort(dev))
3872 			dev_err(dev, "LPM clear selection failed.\n");
3873 	}
3874 }
3875 
3876 static const struct dev_pm_ops ti_sci_pm_ops = {
3877 	.suspend = pm_sleep_ptr(ti_sci_suspend),
3878 	.suspend_noirq = pm_sleep_ptr(ti_sci_suspend_noirq),
3879 	.resume_noirq = pm_sleep_ptr(ti_sci_resume_noirq),
3880 	.complete = pm_sleep_ptr(ti_sci_pm_complete),
3881 };
3882 
3883 /* Description for K2G */
3884 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3885 	.default_host_id = 2,
3886 	/* Conservative duration */
3887 	.max_rx_timeout_ms = 1000,
3888 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3889 	.max_msgs = 20,
3890 	.max_msg_size = 64,
3891 };
3892 
3893 /* Description for AM654 */
3894 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3895 	.default_host_id = 12,
3896 	/* Conservative duration */
3897 	.max_rx_timeout_ms = 10000,
3898 	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3899 	.max_msgs = 20,
3900 	.max_msg_size = 60,
3901 };
3902 
3903 static const struct of_device_id ti_sci_of_match[] = {
3904 	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3905 	{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
3906 	{ /* Sentinel */ },
3907 };
3908 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
3909 
ti_sci_probe(struct platform_device * pdev)3910 static int ti_sci_probe(struct platform_device *pdev)
3911 {
3912 	struct device *dev = &pdev->dev;
3913 	const struct ti_sci_desc *desc;
3914 	struct ti_sci_xfer *xfer;
3915 	struct ti_sci_info *info = NULL;
3916 	struct ti_sci_xfers_info *minfo;
3917 	struct mbox_client *cl;
3918 	int ret = -EINVAL;
3919 	int i;
3920 	u32 h_id;
3921 
3922 	desc = device_get_match_data(dev);
3923 
3924 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3925 	if (!info)
3926 		return -ENOMEM;
3927 
3928 	info->dev = dev;
3929 	info->desc = desc;
3930 	ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3931 	/* if the property is not present in DT, use a default from desc */
3932 	if (ret < 0) {
3933 		info->host_id = info->desc->default_host_id;
3934 	} else {
3935 		if (!h_id) {
3936 			dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3937 			info->host_id = info->desc->default_host_id;
3938 		} else {
3939 			info->host_id = h_id;
3940 		}
3941 	}
3942 
3943 	INIT_LIST_HEAD(&info->node);
3944 	minfo = &info->minfo;
3945 
3946 	/*
3947 	 * Pre-allocate messages
3948 	 * NEVER allocate more than what we can indicate in hdr.seq
3949 	 * if we have data description bug, force a fix..
3950 	 */
3951 	if (WARN_ON(desc->max_msgs >=
3952 		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
3953 		return -EINVAL;
3954 
3955 	minfo->xfer_block = devm_kcalloc(dev,
3956 					 desc->max_msgs,
3957 					 sizeof(*minfo->xfer_block),
3958 					 GFP_KERNEL);
3959 	if (!minfo->xfer_block)
3960 		return -ENOMEM;
3961 
3962 	minfo->xfer_alloc_table = devm_bitmap_zalloc(dev,
3963 						     desc->max_msgs,
3964 						     GFP_KERNEL);
3965 	if (!minfo->xfer_alloc_table)
3966 		return -ENOMEM;
3967 
3968 	/* Pre-initialize the buffer pointer to pre-allocated buffers */
3969 	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3970 		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3971 					      GFP_KERNEL);
3972 		if (!xfer->xfer_buf)
3973 			return -ENOMEM;
3974 
3975 		xfer->tx_message.buf = xfer->xfer_buf;
3976 		init_completion(&xfer->done);
3977 	}
3978 
3979 	ret = ti_sci_debugfs_create(pdev, info);
3980 	if (ret)
3981 		dev_warn(dev, "Failed to create debug file\n");
3982 
3983 	platform_set_drvdata(pdev, info);
3984 
3985 	cl = &info->cl;
3986 	cl->dev = dev;
3987 	cl->tx_block = false;
3988 	cl->rx_callback = ti_sci_rx_callback;
3989 	cl->knows_txdone = true;
3990 
3991 	spin_lock_init(&minfo->xfer_lock);
3992 	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3993 
3994 	info->chan_rx = mbox_request_channel_byname(cl, "rx");
3995 	if (IS_ERR(info->chan_rx)) {
3996 		ret = PTR_ERR(info->chan_rx);
3997 		goto out;
3998 	}
3999 
4000 	info->chan_tx = mbox_request_channel_byname(cl, "tx");
4001 	if (IS_ERR(info->chan_tx)) {
4002 		ret = PTR_ERR(info->chan_tx);
4003 		goto out;
4004 	}
4005 	ret = ti_sci_cmd_get_revision(info);
4006 	if (ret) {
4007 		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
4008 		goto out;
4009 	}
4010 
4011 	ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
4012 	dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s%s\n",
4013 		info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
4014 		info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
4015 		info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "",
4016 		info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "",
4017 		info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION ? " IO-Isolation" : ""
4018 	);
4019 
4020 	ti_sci_setup_ops(info);
4021 
4022 	ret = devm_register_restart_handler(dev, tisci_reboot_handler, info);
4023 	if (ret) {
4024 		dev_err(dev, "reboot registration fail(%d)\n", ret);
4025 		goto out;
4026 	}
4027 
4028 	if (info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO) {
4029 		ret = devm_register_sys_off_handler(dev,
4030 						    SYS_OFF_MODE_POWER_OFF,
4031 						    SYS_OFF_PRIO_FIRMWARE,
4032 						    ti_sci_sys_off_handler,
4033 						    info);
4034 		if (ret) {
4035 			dev_err(dev, "Failed to register sys_off_handler %pe\n",
4036 				ERR_PTR(ret));
4037 			goto out;
4038 		}
4039 	}
4040 
4041 	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
4042 		 info->handle.version.abi_major, info->handle.version.abi_minor,
4043 		 info->handle.version.firmware_revision,
4044 		 info->handle.version.firmware_description);
4045 
4046 	mutex_lock(&ti_sci_list_mutex);
4047 	list_add_tail(&info->node, &ti_sci_list);
4048 	mutex_unlock(&ti_sci_list_mutex);
4049 
4050 	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
4051 	if (ret) {
4052 		dev_err(dev, "platform_populate failed %pe\n", ERR_PTR(ret));
4053 		goto out;
4054 	}
4055 	return 0;
4056 
4057 out:
4058 	if (!IS_ERR(info->chan_tx))
4059 		mbox_free_channel(info->chan_tx);
4060 	if (!IS_ERR(info->chan_rx))
4061 		mbox_free_channel(info->chan_rx);
4062 	debugfs_remove(info->d);
4063 	return ret;
4064 }
4065 
4066 static struct platform_driver ti_sci_driver = {
4067 	.probe = ti_sci_probe,
4068 	.driver = {
4069 		   .name = "ti-sci",
4070 		   .of_match_table = ti_sci_of_match,
4071 		   .suppress_bind_attrs = true,
4072 		   .pm = &ti_sci_pm_ops,
4073 	},
4074 };
4075 module_platform_driver(ti_sci_driver);
4076 
4077 MODULE_LICENSE("GPL v2");
4078 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
4079 MODULE_AUTHOR("Nishanth Menon");
4080 MODULE_ALIAS("platform:ti-sci");
4081