xref: /linux/sound/soc/intel/atom/sst/sst_pvt.c (revision a9e6060bb2a6cae6d43a98ec0794844ad01273d3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  sst_pvt.c - Intel SST Driver for audio engine
4  *
5  *  Copyright (C) 2008-14	Intel Corp
6  *  Authors:	Vinod Koul <vinod.koul@intel.com>
7  *		Harsha Priya <priya.harsha@intel.com>
8  *		Dharageswari R <dharageswari.r@intel.com>
9  *		KP Jeeja <jeeja.kp@intel.com>
10  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13  */
14 #include <linux/kobject.h>
15 #include <linux/pci.h>
16 #include <linux/fs.h>
17 #include <linux/firmware.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/sched.h>
20 #include <linux/delay.h>
21 #include <sound/asound.h>
22 #include <sound/core.h>
23 #include <sound/pcm.h>
24 #include <sound/soc.h>
25 #include <sound/compress_driver.h>
26 #include <asm/platform_sst_audio.h>
27 #include "../sst-mfld-platform.h"
28 #include "sst.h"
29 
sst_shim_write(void __iomem * addr,int offset,int value)30 int sst_shim_write(void __iomem *addr, int offset, int value)
31 {
32 	writel(value, addr + offset);
33 	return 0;
34 }
35 
sst_shim_read(void __iomem * addr,int offset)36 u32 sst_shim_read(void __iomem *addr, int offset)
37 {
38 	return readl(addr + offset);
39 }
40 
sst_reg_read64(void __iomem * addr,int offset)41 u64 sst_reg_read64(void __iomem *addr, int offset)
42 {
43 	u64 val = 0;
44 
45 	memcpy_fromio(&val, addr + offset, sizeof(val));
46 
47 	return val;
48 }
49 
sst_shim_write64(void __iomem * addr,int offset,u64 value)50 int sst_shim_write64(void __iomem *addr, int offset, u64 value)
51 {
52 	memcpy_toio(addr + offset, &value, sizeof(value));
53 	return 0;
54 }
55 
sst_shim_read64(void __iomem * addr,int offset)56 u64 sst_shim_read64(void __iomem *addr, int offset)
57 {
58 	u64 val = 0;
59 
60 	memcpy_fromio(&val, addr + offset, sizeof(val));
61 	return val;
62 }
63 
sst_set_fw_state_locked(struct intel_sst_drv * sst_drv_ctx,int sst_state)64 void sst_set_fw_state_locked(
65 		struct intel_sst_drv *sst_drv_ctx, int sst_state)
66 {
67 	mutex_lock(&sst_drv_ctx->sst_lock);
68 	sst_drv_ctx->sst_state = sst_state;
69 	mutex_unlock(&sst_drv_ctx->sst_lock);
70 }
71 
72 /*
73  * sst_wait_timeout - wait on event for timeout
74  *
75  * @sst_drv_ctx: Driver context
76  * @block: Driver block to wait on
77  *
78  * This function waits with a timeout value (and is not interruptible) on a
79  * given block event
80  */
sst_wait_timeout(struct intel_sst_drv * sst_drv_ctx,struct sst_block * block)81 int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block)
82 {
83 	int retval = 0;
84 
85 	/*
86 	 * NOTE:
87 	 * Observed that FW processes the alloc msg and replies even
88 	 * before the alloc thread has finished execution
89 	 */
90 	dev_dbg(sst_drv_ctx->dev,
91 		"waiting for condition %x ipc %d drv_id %d\n",
92 		block->condition, block->msg_id, block->drv_id);
93 	if (wait_event_timeout(sst_drv_ctx->wait_queue,
94 				block->condition,
95 				msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
96 		/* event wake */
97 		dev_dbg(sst_drv_ctx->dev, "Event wake %x\n",
98 				block->condition);
99 		dev_dbg(sst_drv_ctx->dev, "message ret: %d\n",
100 				block->ret_code);
101 		retval = -block->ret_code;
102 	} else {
103 		block->on = false;
104 		dev_err(sst_drv_ctx->dev,
105 			"Wait timed-out condition:%#x, msg_id:%#x fw_state %#x\n",
106 			block->condition, block->msg_id, sst_drv_ctx->sst_state);
107 		sst_drv_ctx->sst_state = SST_RESET;
108 
109 		retval = -EBUSY;
110 	}
111 	return retval;
112 }
113 
114 /*
115  * sst_create_ipc_msg - create a IPC message
116  *
117  * @arg: ipc message
118  * @large: large or short message
119  *
120  * this function allocates structures to send a large or short
121  * message to the firmware
122  */
sst_create_ipc_msg(struct ipc_post ** arg,bool large)123 int sst_create_ipc_msg(struct ipc_post **arg, bool large)
124 {
125 	struct ipc_post *msg;
126 
127 	msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
128 	if (!msg)
129 		return -ENOMEM;
130 	if (large) {
131 		msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
132 		if (!msg->mailbox_data) {
133 			kfree(msg);
134 			return -ENOMEM;
135 		}
136 	} else {
137 		msg->mailbox_data = NULL;
138 	}
139 	msg->is_large = large;
140 	*arg = msg;
141 	return 0;
142 }
143 
144 /*
145  * sst_create_block_and_ipc_msg - Creates IPC message and sst block
146  * @arg: passed to sst_create_ipc_message API
147  * @large: large or short message
148  * @sst_drv_ctx: sst driver context
149  * @block: return block allocated
150  * @msg_id: IPC
151  * @drv_id: stream id or private id
152  */
sst_create_block_and_ipc_msg(struct ipc_post ** arg,bool large,struct intel_sst_drv * sst_drv_ctx,struct sst_block ** block,u32 msg_id,u32 drv_id)153 int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
154 		struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
155 		u32 msg_id, u32 drv_id)
156 {
157 	int retval;
158 
159 	retval = sst_create_ipc_msg(arg, large);
160 	if (retval)
161 		return retval;
162 	*block = sst_create_block(sst_drv_ctx, msg_id, drv_id);
163 	if (*block == NULL) {
164 		kfree(*arg);
165 		return -ENOMEM;
166 	}
167 	return 0;
168 }
169 
170 /*
171  * sst_clean_stream - clean the stream context
172  *
173  * @stream: stream structure
174  *
175  * this function resets the stream contexts
176  * should be called in free
177  */
sst_clean_stream(struct stream_info * stream)178 void sst_clean_stream(struct stream_info *stream)
179 {
180 	stream->status = STREAM_UN_INIT;
181 	stream->prev = STREAM_UN_INIT;
182 	mutex_lock(&stream->lock);
183 	stream->cumm_bytes = 0;
184 	mutex_unlock(&stream->lock);
185 }
186 
sst_prepare_and_post_msg(struct intel_sst_drv * sst,int task_id,int ipc_msg,int cmd_id,int pipe_id,size_t mbox_data_len,const void * mbox_data,void ** data,bool large,bool fill_dsp,bool sync,bool response)187 int sst_prepare_and_post_msg(struct intel_sst_drv *sst,
188 		int task_id, int ipc_msg, int cmd_id, int pipe_id,
189 		size_t mbox_data_len, const void *mbox_data, void **data,
190 		bool large, bool fill_dsp, bool sync, bool response)
191 {
192 	struct sst_block *block = NULL;
193 	struct ipc_post *msg = NULL;
194 	struct ipc_dsp_hdr dsp_hdr;
195 	int ret = 0, pvt_id;
196 
197 	pvt_id = sst_assign_pvt_id(sst);
198 	if (pvt_id < 0)
199 		return pvt_id;
200 
201 	if (response)
202 		ret = sst_create_block_and_ipc_msg(
203 				&msg, large, sst, &block, ipc_msg, pvt_id);
204 	else
205 		ret = sst_create_ipc_msg(&msg, large);
206 
207 	if (ret < 0) {
208 		test_and_clear_bit(pvt_id, &sst->pvt_id);
209 		return -ENOMEM;
210 	}
211 
212 	dev_dbg(sst->dev, "pvt_id = %d, pipe id = %d, task = %d ipc_msg: %d\n",
213 		 pvt_id, pipe_id, task_id, ipc_msg);
214 	sst_fill_header_mrfld(&msg->mrfld_header, ipc_msg,
215 					task_id, large, pvt_id);
216 	msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr) + mbox_data_len;
217 	msg->mrfld_header.p.header_high.part.res_rqd = !sync;
218 	dev_dbg(sst->dev, "header:%x\n",
219 			msg->mrfld_header.p.header_high.full);
220 	dev_dbg(sst->dev, "response rqd: %x",
221 			msg->mrfld_header.p.header_high.part.res_rqd);
222 	dev_dbg(sst->dev, "msg->mrfld_header.p.header_low_payload:%d",
223 			msg->mrfld_header.p.header_low_payload);
224 	if (fill_dsp) {
225 		sst_fill_header_dsp(&dsp_hdr, cmd_id, pipe_id, mbox_data_len);
226 		memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
227 		if (mbox_data_len) {
228 			memcpy(msg->mailbox_data + sizeof(dsp_hdr),
229 					mbox_data, mbox_data_len);
230 		}
231 	}
232 
233 	if (sync)
234 		sst->ops->post_message(sst, msg, true);
235 	else
236 		sst_add_to_dispatch_list_and_post(sst, msg);
237 
238 	if (response) {
239 		ret = sst_wait_timeout(sst, block);
240 		if (ret < 0)
241 			goto out;
242 
243 		if (data && block->data) {
244 			*data = kmemdup(block->data, block->size, GFP_KERNEL);
245 			if (!*data) {
246 				ret = -ENOMEM;
247 				goto out;
248 			}
249 		}
250 	}
251 out:
252 	if (response)
253 		sst_free_block(sst, block);
254 	test_and_clear_bit(pvt_id, &sst->pvt_id);
255 	return ret;
256 }
257 
sst_pm_runtime_put(struct intel_sst_drv * sst_drv)258 int sst_pm_runtime_put(struct intel_sst_drv *sst_drv)
259 {
260 	int ret;
261 
262 	pm_runtime_mark_last_busy(sst_drv->dev);
263 	ret = pm_runtime_put_autosuspend(sst_drv->dev);
264 	if (ret < 0)
265 		return ret;
266 	return 0;
267 }
268 
sst_fill_header_mrfld(union ipc_header_mrfld * header,int msg,int task_id,int large,int drv_id)269 void sst_fill_header_mrfld(union ipc_header_mrfld *header,
270 				int msg, int task_id, int large, int drv_id)
271 {
272 	header->full = 0;
273 	header->p.header_high.part.msg_id = msg;
274 	header->p.header_high.part.task_id = task_id;
275 	header->p.header_high.part.large = large;
276 	header->p.header_high.part.drv_id = drv_id;
277 	header->p.header_high.part.done = 0;
278 	header->p.header_high.part.busy = 1;
279 	header->p.header_high.part.res_rqd = 1;
280 }
281 
sst_fill_header_dsp(struct ipc_dsp_hdr * dsp,int msg,int pipe_id,int len)282 void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg,
283 					int pipe_id, int len)
284 {
285 	dsp->cmd_id = msg;
286 	dsp->mod_index_id = 0xff;
287 	dsp->pipe_id = pipe_id;
288 	dsp->length = len;
289 	dsp->mod_id = 0;
290 }
291 
292 #define SST_MAX_BLOCKS 15
293 /*
294  * sst_assign_pvt_id - assign a pvt id for stream
295  *
296  * @sst_drv_ctx : driver context
297  *
298  * this function assigns a private id for calls that dont have stream
299  * context yet, should be called with lock held
300  * uses bits for the id, and finds first free bits and assigns that
301  */
sst_assign_pvt_id(struct intel_sst_drv * drv)302 int sst_assign_pvt_id(struct intel_sst_drv *drv)
303 {
304 	int local;
305 
306 	spin_lock(&drv->block_lock);
307 	/* find first zero index from lsb */
308 	local = ffz(drv->pvt_id);
309 	dev_dbg(drv->dev, "pvt_id assigned --> %d\n", local);
310 	if (local >= SST_MAX_BLOCKS){
311 		spin_unlock(&drv->block_lock);
312 		dev_err(drv->dev, "PVT _ID error: no free id blocks ");
313 		return -EINVAL;
314 	}
315 	/* toggle the index */
316 	change_bit(local, &drv->pvt_id);
317 	spin_unlock(&drv->block_lock);
318 	return local;
319 }
320 
sst_validate_strid(struct intel_sst_drv * sst_drv_ctx,int str_id)321 int sst_validate_strid(
322 		struct intel_sst_drv *sst_drv_ctx, int str_id)
323 {
324 	if (str_id <= 0 || str_id > sst_drv_ctx->info.max_streams) {
325 		dev_err(sst_drv_ctx->dev,
326 			"SST ERR: invalid stream id : %d, max %d\n",
327 			str_id, sst_drv_ctx->info.max_streams);
328 		return -EINVAL;
329 	}
330 
331 	return 0;
332 }
333 
get_stream_info(struct intel_sst_drv * sst_drv_ctx,int str_id)334 struct stream_info *get_stream_info(
335 		struct intel_sst_drv *sst_drv_ctx, int str_id)
336 {
337 	if (sst_validate_strid(sst_drv_ctx, str_id))
338 		return NULL;
339 	return &sst_drv_ctx->streams[str_id];
340 }
341 
get_stream_id_mrfld(struct intel_sst_drv * sst_drv_ctx,u32 pipe_id)342 int get_stream_id_mrfld(struct intel_sst_drv *sst_drv_ctx,
343 		u32 pipe_id)
344 {
345 	int i;
346 
347 	for (i = 1; i <= sst_drv_ctx->info.max_streams; i++)
348 		if (pipe_id == sst_drv_ctx->streams[i].pipe_id)
349 			return i;
350 
351 	dev_dbg(sst_drv_ctx->dev, "no such pipe_id(%u)", pipe_id);
352 	return -1;
353 }
354 
relocate_imr_addr_mrfld(u32 base_addr)355 u32 relocate_imr_addr_mrfld(u32 base_addr)
356 {
357 	/* Get the difference from 512MB aligned base addr */
358 	/* relocate the base */
359 	base_addr = MRFLD_FW_VIRTUAL_BASE + (base_addr % (512 * 1024 * 1024));
360 	return base_addr;
361 }
362 EXPORT_SYMBOL_GPL(relocate_imr_addr_mrfld);
363 
sst_add_to_dispatch_list_and_post(struct intel_sst_drv * sst,struct ipc_post * msg)364 void sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst,
365 						struct ipc_post *msg)
366 {
367 	unsigned long irq_flags;
368 
369 	spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
370 	list_add_tail(&msg->node, &sst->ipc_dispatch_list);
371 	spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
372 	sst->ops->post_message(sst, NULL, false);
373 }
374