xref: /linux/sound/soc/intel/catpt/ipc.c (revision 33e02dc69afbd8f1b85a51d74d72f139ba4ca623)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2020 Intel Corporation
4 //
5 // Author: Cezary Rojewski <cezary.rojewski@intel.com>
6 //
7 
8 #include <linux/irqreturn.h>
9 #include "core.h"
10 #include "messages.h"
11 #include "registers.h"
12 #include "trace.h"
13 
14 #define CATPT_IPC_TIMEOUT_MS	300
15 
catpt_ipc_init(struct catpt_ipc * ipc,struct device * dev)16 void catpt_ipc_init(struct catpt_ipc *ipc, struct device *dev)
17 {
18 	ipc->dev = dev;
19 	ipc->ready = false;
20 	ipc->default_timeout = CATPT_IPC_TIMEOUT_MS;
21 	init_completion(&ipc->done_completion);
22 	init_completion(&ipc->busy_completion);
23 	spin_lock_init(&ipc->lock);
24 	mutex_init(&ipc->mutex);
25 }
26 
catpt_ipc_arm(struct catpt_ipc * ipc,struct catpt_fw_ready * config)27 static int catpt_ipc_arm(struct catpt_ipc *ipc, struct catpt_fw_ready *config)
28 {
29 	/*
30 	 * Both tx and rx are put into and received from outbox. Inbox is
31 	 * only used for notifications where payload size is known upfront,
32 	 * thus no separate buffer is allocated for it.
33 	 */
34 	ipc->rx.data = devm_kzalloc(ipc->dev, config->outbox_size, GFP_KERNEL);
35 	if (!ipc->rx.data)
36 		return -ENOMEM;
37 
38 	memcpy(&ipc->config, config, sizeof(*config));
39 	ipc->ready = true;
40 
41 	return 0;
42 }
43 
catpt_ipc_msg_init(struct catpt_ipc * ipc,struct catpt_ipc_msg * reply)44 static void catpt_ipc_msg_init(struct catpt_ipc *ipc,
45 			       struct catpt_ipc_msg *reply)
46 {
47 	lockdep_assert_held(&ipc->lock);
48 
49 	ipc->rx.header = 0;
50 	ipc->rx.size = reply ? reply->size : 0;
51 	reinit_completion(&ipc->done_completion);
52 	reinit_completion(&ipc->busy_completion);
53 }
54 
catpt_dsp_send_tx(struct catpt_dev * cdev,const struct catpt_ipc_msg * tx)55 static void catpt_dsp_send_tx(struct catpt_dev *cdev,
56 			      const struct catpt_ipc_msg *tx)
57 {
58 	u32 header = tx->header | CATPT_IPCC_BUSY;
59 
60 	trace_catpt_ipc_request(header);
61 	trace_catpt_ipc_payload(tx->data, tx->size);
62 
63 	memcpy_toio(catpt_outbox_addr(cdev), tx->data, tx->size);
64 	catpt_writel_shim(cdev, IPCC, header);
65 }
66 
catpt_wait_msg_completion(struct catpt_dev * cdev,int timeout)67 static int catpt_wait_msg_completion(struct catpt_dev *cdev, int timeout)
68 {
69 	struct catpt_ipc *ipc = &cdev->ipc;
70 	int ret;
71 
72 	ret = wait_for_completion_timeout(&ipc->done_completion,
73 					  msecs_to_jiffies(timeout));
74 	if (!ret)
75 		return -ETIMEDOUT;
76 	if (ipc->rx.rsp.status != CATPT_REPLY_PENDING)
77 		return 0;
78 
79 	/* wait for delayed reply */
80 	ret = wait_for_completion_timeout(&ipc->busy_completion,
81 					  msecs_to_jiffies(timeout));
82 	return ret ? 0 : -ETIMEDOUT;
83 }
84 
catpt_dsp_do_send_msg(struct catpt_dev * cdev,struct catpt_ipc_msg request,struct catpt_ipc_msg * reply,int timeout)85 static int catpt_dsp_do_send_msg(struct catpt_dev *cdev,
86 				 struct catpt_ipc_msg request,
87 				 struct catpt_ipc_msg *reply, int timeout)
88 {
89 	struct catpt_ipc *ipc = &cdev->ipc;
90 	unsigned long flags;
91 	int ret;
92 
93 	if (!ipc->ready)
94 		return -EPERM;
95 	if (request.size > ipc->config.outbox_size ||
96 	    (reply && reply->size > ipc->config.outbox_size))
97 		return -EINVAL;
98 
99 	spin_lock_irqsave(&ipc->lock, flags);
100 	catpt_ipc_msg_init(ipc, reply);
101 	catpt_dsp_send_tx(cdev, &request);
102 	spin_unlock_irqrestore(&ipc->lock, flags);
103 
104 	ret = catpt_wait_msg_completion(cdev, timeout);
105 	if (ret) {
106 		dev_crit(cdev->dev, "communication severed: %d, rebooting dsp..\n",
107 			 ret);
108 		ipc->ready = false;
109 		/* TODO: attempt recovery */
110 		return ret;
111 	}
112 
113 	ret = ipc->rx.rsp.status;
114 	if (reply) {
115 		reply->header = ipc->rx.header;
116 
117 		if (!ret && reply->data)
118 			memcpy(reply->data, ipc->rx.data, reply->size);
119 	}
120 
121 	return ret;
122 }
123 
catpt_dsp_send_msg_timeout(struct catpt_dev * cdev,struct catpt_ipc_msg request,struct catpt_ipc_msg * reply,int timeout)124 int catpt_dsp_send_msg_timeout(struct catpt_dev *cdev,
125 			       struct catpt_ipc_msg request,
126 			       struct catpt_ipc_msg *reply, int timeout)
127 {
128 	struct catpt_ipc *ipc = &cdev->ipc;
129 	int ret;
130 
131 	mutex_lock(&ipc->mutex);
132 	ret = catpt_dsp_do_send_msg(cdev, request, reply, timeout);
133 	mutex_unlock(&ipc->mutex);
134 
135 	return ret;
136 }
137 
catpt_dsp_send_msg(struct catpt_dev * cdev,struct catpt_ipc_msg request,struct catpt_ipc_msg * reply)138 int catpt_dsp_send_msg(struct catpt_dev *cdev, struct catpt_ipc_msg request,
139 		       struct catpt_ipc_msg *reply)
140 {
141 	return catpt_dsp_send_msg_timeout(cdev, request, reply,
142 					  cdev->ipc.default_timeout);
143 }
144 
145 static void
catpt_dsp_notify_stream(struct catpt_dev * cdev,union catpt_notify_msg msg)146 catpt_dsp_notify_stream(struct catpt_dev *cdev, union catpt_notify_msg msg)
147 {
148 	struct catpt_stream_runtime *stream;
149 	struct catpt_notify_position pos;
150 	struct catpt_notify_glitch glitch;
151 
152 	stream = catpt_stream_find(cdev, msg.stream_hw_id);
153 	if (!stream) {
154 		dev_warn(cdev->dev, "notify %d for non-existent stream %d\n",
155 			 msg.notify_reason, msg.stream_hw_id);
156 		return;
157 	}
158 
159 	switch (msg.notify_reason) {
160 	case CATPT_NOTIFY_POSITION_CHANGED:
161 		memcpy_fromio(&pos, catpt_inbox_addr(cdev), sizeof(pos));
162 		trace_catpt_ipc_payload((u8 *)&pos, sizeof(pos));
163 
164 		catpt_stream_update_position(cdev, stream, &pos);
165 		break;
166 
167 	case CATPT_NOTIFY_GLITCH_OCCURRED:
168 		memcpy_fromio(&glitch, catpt_inbox_addr(cdev), sizeof(glitch));
169 		trace_catpt_ipc_payload((u8 *)&glitch, sizeof(glitch));
170 
171 		dev_warn(cdev->dev, "glitch %d at pos: 0x%08llx, wp: 0x%08x\n",
172 			 glitch.type, glitch.presentation_pos,
173 			 glitch.write_pos);
174 		break;
175 
176 	default:
177 		dev_warn(cdev->dev, "unknown notification: %d received\n",
178 			 msg.notify_reason);
179 		break;
180 	}
181 }
182 
catpt_dsp_copy_rx(struct catpt_dev * cdev,u32 header)183 static void catpt_dsp_copy_rx(struct catpt_dev *cdev, u32 header)
184 {
185 	struct catpt_ipc *ipc = &cdev->ipc;
186 
187 	ipc->rx.header = header;
188 	if (ipc->rx.rsp.status != CATPT_REPLY_SUCCESS)
189 		return;
190 
191 	memcpy_fromio(ipc->rx.data, catpt_outbox_addr(cdev), ipc->rx.size);
192 	trace_catpt_ipc_payload(ipc->rx.data, ipc->rx.size);
193 }
194 
catpt_dsp_process_response(struct catpt_dev * cdev,u32 header)195 static void catpt_dsp_process_response(struct catpt_dev *cdev, u32 header)
196 {
197 	union catpt_notify_msg msg = CATPT_MSG(header);
198 	struct catpt_ipc *ipc = &cdev->ipc;
199 
200 	if (msg.fw_ready) {
201 		struct catpt_fw_ready config;
202 		/* to fit 32b header original address is shifted right by 3 */
203 		u32 off = msg.mailbox_address << 3;
204 
205 		memcpy_fromio(&config, cdev->lpe_ba + off, sizeof(config));
206 		trace_catpt_ipc_payload((u8 *)&config, sizeof(config));
207 
208 		catpt_ipc_arm(ipc, &config);
209 		complete(&cdev->fw_ready);
210 		return;
211 	}
212 
213 	switch (msg.global_msg_type) {
214 	case CATPT_GLB_REQUEST_CORE_DUMP:
215 		dev_err(cdev->dev, "ADSP device coredump received\n");
216 		ipc->ready = false;
217 		catpt_coredump(cdev);
218 		/* TODO: attempt recovery */
219 		break;
220 
221 	case CATPT_GLB_STREAM_MESSAGE:
222 		switch (msg.stream_msg_type) {
223 		case CATPT_STRM_NOTIFICATION:
224 			catpt_dsp_notify_stream(cdev, msg);
225 			break;
226 		default:
227 			catpt_dsp_copy_rx(cdev, header);
228 			/* signal completion of delayed reply */
229 			complete(&ipc->busy_completion);
230 			break;
231 		}
232 		break;
233 
234 	default:
235 		dev_warn(cdev->dev, "unknown response: %d received\n",
236 			 msg.global_msg_type);
237 		break;
238 	}
239 }
240 
catpt_dsp_irq_thread(int irq,void * dev_id)241 irqreturn_t catpt_dsp_irq_thread(int irq, void *dev_id)
242 {
243 	struct catpt_dev *cdev = dev_id;
244 	u32 ipcd;
245 
246 	ipcd = catpt_readl_shim(cdev, IPCD);
247 	trace_catpt_ipc_notify(ipcd);
248 
249 	/* ensure there is delayed reply or notification to process */
250 	if (!(ipcd & CATPT_IPCD_BUSY))
251 		return IRQ_NONE;
252 
253 	catpt_dsp_process_response(cdev, ipcd);
254 
255 	/* tell DSP processing is completed */
256 	catpt_updatel_shim(cdev, IPCD, CATPT_IPCD_BUSY | CATPT_IPCD_DONE,
257 			   CATPT_IPCD_DONE);
258 	/* unmask dsp BUSY interrupt */
259 	catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCDB, 0);
260 
261 	return IRQ_HANDLED;
262 }
263 
catpt_dsp_irq_handler(int irq,void * dev_id)264 irqreturn_t catpt_dsp_irq_handler(int irq, void *dev_id)
265 {
266 	struct catpt_dev *cdev = dev_id;
267 	irqreturn_t ret = IRQ_NONE;
268 	u32 isc, ipcc;
269 
270 	isc = catpt_readl_shim(cdev, ISC);
271 	trace_catpt_irq(isc);
272 
273 	/* immediate reply */
274 	if (isc & CATPT_ISC_IPCCD) {
275 		/* mask host DONE interrupt */
276 		catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCCD, CATPT_IMC_IPCCD);
277 
278 		ipcc = catpt_readl_shim(cdev, IPCC);
279 		trace_catpt_ipc_reply(ipcc);
280 		catpt_dsp_copy_rx(cdev, ipcc);
281 		complete(&cdev->ipc.done_completion);
282 
283 		/* tell DSP processing is completed */
284 		catpt_updatel_shim(cdev, IPCC, CATPT_IPCC_DONE, 0);
285 		/* unmask host DONE interrupt */
286 		catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCCD, 0);
287 		ret = IRQ_HANDLED;
288 	}
289 
290 	/* delayed reply or notification */
291 	if (isc & CATPT_ISC_IPCDB) {
292 		/* mask dsp BUSY interrupt */
293 		catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCDB, CATPT_IMC_IPCDB);
294 		ret = IRQ_WAKE_THREAD;
295 	}
296 
297 	return ret;
298 }
299