xref: /linux/drivers/media/pci/intel/ipu6/ipu6-fw-com.c (revision 6fd600d742744dc7ef7fc65ca26daa2b1163158a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013--2024 Intel Corporation
4  */
5 
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/io.h>
9 #include <linux/math.h>
10 #include <linux/overflow.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 
14 #include "ipu6-bus.h"
15 #include "ipu6-fw-com.h"
16 
17 /*
18  * FWCOM layer is a shared resource between FW and driver. It consist
19  * of token queues to both send and receive directions. Queue is simply
20  * an array of structures with read and write indexes to the queue.
21  * There are 1...n queues to both directions. Queues locates in
22  * system RAM and are mapped to ISP MMU so that both CPU and ISP can
23  * see the same buffer. Indexes are located in ISP DMEM so that FW code
24  * can poll those with very low latency and cost. CPU access to indexes is
25  * more costly but that happens only at message sending time and
26  * interrupt triggered message handling. CPU doesn't need to poll indexes.
27  * wr_reg / rd_reg are offsets to those dmem location. They are not
28  * the indexes itself.
29  */
30 
31 /* Shared structure between driver and FW - do not modify */
32 struct ipu6_fw_sys_queue {
33 	u64 host_address;
34 	u32 vied_address;
35 	u32 size;
36 	u32 token_size;
37 	u32 wr_reg;	/* reg number in subsystem's regmem */
38 	u32 rd_reg;
39 	u32 _align;
40 } __packed;
41 
42 struct ipu6_fw_sys_queue_res {
43 	u64 host_address;
44 	u32 vied_address;
45 	u32 reg;
46 } __packed;
47 
48 enum syscom_state {
49 	/* Program load or explicit host setting should init to this */
50 	SYSCOM_STATE_UNINIT = 0x57a7e000,
51 	/* SP Syscom sets this when it is ready for use */
52 	SYSCOM_STATE_READY = 0x57a7e001,
53 	/* SP Syscom sets this when no more syscom accesses will happen */
54 	SYSCOM_STATE_INACTIVE = 0x57a7e002,
55 };
56 
57 enum syscom_cmd {
58 	/* Program load or explicit host setting should init to this */
59 	SYSCOM_COMMAND_UNINIT = 0x57a7f000,
60 	/* Host Syscom requests syscom to become inactive */
61 	SYSCOM_COMMAND_INACTIVE = 0x57a7f001,
62 };
63 
64 /* firmware config: data that sent from the host to SP via DDR */
65 /* Cell copies data into a context */
66 
67 struct ipu6_fw_syscom_config {
68 	u32 firmware_address;
69 
70 	u32 num_input_queues;
71 	u32 num_output_queues;
72 
73 	/* ISP pointers to an array of ipu6_fw_sys_queue structures */
74 	u32 input_queue;
75 	u32 output_queue;
76 
77 	/* ISYS / PSYS private data */
78 	u32 specific_addr;
79 	u32 specific_size;
80 };
81 
82 struct ipu6_fw_com_context {
83 	struct ipu6_bus_device *adev;
84 	void __iomem *dmem_addr;
85 	int (*cell_ready)(struct ipu6_bus_device *adev);
86 	void (*cell_start)(struct ipu6_bus_device *adev);
87 
88 	void *dma_buffer;
89 	dma_addr_t dma_addr;
90 	unsigned int dma_size;
91 	unsigned long attrs;
92 
93 	struct ipu6_fw_sys_queue *input_queue;	/* array of host to SP queues */
94 	struct ipu6_fw_sys_queue *output_queue;	/* array of SP to host */
95 
96 	u32 config_vied_addr;
97 
98 	unsigned int buttress_boot_offset;
99 	void __iomem *base_addr;
100 };
101 
102 #define FW_COM_WR_REG 0
103 #define FW_COM_RD_REG 4
104 
105 #define REGMEM_OFFSET 0
106 #define TUNIT_MAGIC_PATTERN 0x5a5a5a5a
107 
108 enum regmem_id {
109 	/* pass pkg_dir address to SPC in non-secure mode */
110 	PKG_DIR_ADDR_REG = 0,
111 	/* Tunit CFG blob for secure - provided by host.*/
112 	TUNIT_CFG_DWR_REG = 1,
113 	/* syscom commands - modified by the host */
114 	SYSCOM_COMMAND_REG = 2,
115 	/* Store interrupt status - updated by SP */
116 	SYSCOM_IRQ_REG = 3,
117 	/* first syscom queue pointer register */
118 	SYSCOM_QPR_BASE_REG = 4
119 };
120 
121 #define BUTTRESS_FW_BOOT_PARAMS_0 0x4000
122 #define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id)			\
123 	((base) + BUTTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4)
124 
125 enum buttress_syscom_id {
126 	/* pass syscom configuration to SPC */
127 	SYSCOM_CONFIG_ID		= 0,
128 	/* syscom state - modified by SP */
129 	SYSCOM_STATE_ID			= 1,
130 	/* syscom vtl0 addr mask */
131 	SYSCOM_VTL0_ADDR_MASK_ID	= 2,
132 	SYSCOM_ID_MAX
133 };
134 
ipu6_sys_queue_init(struct ipu6_fw_sys_queue * q,unsigned int size,unsigned int token_size,struct ipu6_fw_sys_queue_res * res)135 static void ipu6_sys_queue_init(struct ipu6_fw_sys_queue *q, unsigned int size,
136 				unsigned int token_size,
137 				struct ipu6_fw_sys_queue_res *res)
138 {
139 	unsigned int buf_size = (size + 1) * token_size;
140 
141 	q->size = size + 1;
142 	q->token_size = token_size;
143 
144 	/* acquire the shared buffer space */
145 	q->host_address = res->host_address;
146 	res->host_address += buf_size;
147 	q->vied_address = res->vied_address;
148 	res->vied_address += buf_size;
149 
150 	/* acquire the shared read and writer pointers */
151 	q->wr_reg = res->reg;
152 	res->reg++;
153 	q->rd_reg = res->reg;
154 	res->reg++;
155 }
156 
ipu6_fw_com_prepare(struct ipu6_fw_com_cfg * cfg,struct ipu6_bus_device * adev,void __iomem * base)157 void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg,
158 			  struct ipu6_bus_device *adev, void __iomem *base)
159 {
160 	size_t conf_size, inq_size, outq_size, specific_size;
161 	struct ipu6_fw_syscom_config *config_host_addr;
162 	unsigned int sizeinput = 0, sizeoutput = 0;
163 	struct ipu6_fw_sys_queue_res res;
164 	struct ipu6_fw_com_context *ctx;
165 	struct device *dev = &adev->auxdev.dev;
166 	size_t sizeall, offset;
167 	unsigned long attrs = 0;
168 	void *specific_host_addr;
169 	unsigned int i;
170 
171 	if (!cfg || !cfg->cell_start || !cfg->cell_ready)
172 		return NULL;
173 
174 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
175 	if (!ctx)
176 		return NULL;
177 	ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET;
178 	ctx->adev = adev;
179 	ctx->cell_start = cfg->cell_start;
180 	ctx->cell_ready = cfg->cell_ready;
181 	ctx->buttress_boot_offset = cfg->buttress_boot_offset;
182 	ctx->base_addr  = base;
183 
184 	/*
185 	 * Allocate DMA mapped memory. Allocate one big chunk.
186 	 */
187 	/* Base cfg for FW */
188 	conf_size = roundup(sizeof(struct ipu6_fw_syscom_config), 8);
189 	/* Descriptions of the queues */
190 	inq_size = size_mul(cfg->num_input_queues,
191 			    sizeof(struct ipu6_fw_sys_queue));
192 	outq_size = size_mul(cfg->num_output_queues,
193 			     sizeof(struct ipu6_fw_sys_queue));
194 	/* FW specific information structure */
195 	specific_size = roundup(cfg->specific_size, 8);
196 
197 	sizeall = conf_size + inq_size + outq_size + specific_size;
198 
199 	for (i = 0; i < cfg->num_input_queues; i++)
200 		sizeinput += size_mul(cfg->input[i].queue_size + 1,
201 				      cfg->input[i].token_size);
202 
203 	for (i = 0; i < cfg->num_output_queues; i++)
204 		sizeoutput += size_mul(cfg->output[i].queue_size + 1,
205 				       cfg->output[i].token_size);
206 
207 	sizeall += sizeinput + sizeoutput;
208 
209 	ctx->dma_buffer = dma_alloc_attrs(dev, sizeall, &ctx->dma_addr,
210 					  GFP_KERNEL, attrs);
211 	ctx->attrs = attrs;
212 	if (!ctx->dma_buffer) {
213 		dev_err(dev, "failed to allocate dma memory\n");
214 		kfree(ctx);
215 		return NULL;
216 	}
217 
218 	ctx->dma_size = sizeall;
219 
220 	config_host_addr = ctx->dma_buffer;
221 	ctx->config_vied_addr = ctx->dma_addr;
222 
223 	offset = conf_size;
224 	ctx->input_queue = ctx->dma_buffer + offset;
225 	config_host_addr->input_queue = ctx->dma_addr + offset;
226 	config_host_addr->num_input_queues = cfg->num_input_queues;
227 
228 	offset += inq_size;
229 	ctx->output_queue = ctx->dma_buffer + offset;
230 	config_host_addr->output_queue = ctx->dma_addr + offset;
231 	config_host_addr->num_output_queues = cfg->num_output_queues;
232 
233 	/* copy firmware specific data */
234 	offset += outq_size;
235 	specific_host_addr = ctx->dma_buffer + offset;
236 	config_host_addr->specific_addr = ctx->dma_addr + offset;
237 	config_host_addr->specific_size = cfg->specific_size;
238 	if (cfg->specific_addr && cfg->specific_size)
239 		memcpy(specific_host_addr, cfg->specific_addr,
240 		       cfg->specific_size);
241 
242 	/* initialize input queues */
243 	offset += specific_size;
244 	res.reg = SYSCOM_QPR_BASE_REG;
245 	res.host_address = (u64)(ctx->dma_buffer + offset);
246 	res.vied_address = ctx->dma_addr + offset;
247 	for (i = 0; i < cfg->num_input_queues; i++)
248 		ipu6_sys_queue_init(ctx->input_queue + i,
249 				    cfg->input[i].queue_size,
250 				    cfg->input[i].token_size, &res);
251 
252 	/* initialize output queues */
253 	offset += sizeinput;
254 	res.host_address = (u64)(ctx->dma_buffer + offset);
255 	res.vied_address = ctx->dma_addr + offset;
256 	for (i = 0; i < cfg->num_output_queues; i++) {
257 		ipu6_sys_queue_init(ctx->output_queue + i,
258 				    cfg->output[i].queue_size,
259 				    cfg->output[i].token_size, &res);
260 	}
261 
262 	return ctx;
263 }
264 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, INTEL_IPU6);
265 
ipu6_fw_com_open(struct ipu6_fw_com_context * ctx)266 int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx)
267 {
268 	/* write magic pattern to disable the tunit trace */
269 	writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4);
270 	/* Check if SP is in valid state */
271 	if (!ctx->cell_ready(ctx->adev))
272 		return -EIO;
273 
274 	/* store syscom uninitialized command */
275 	writel(SYSCOM_COMMAND_UNINIT, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4);
276 
277 	/* store syscom uninitialized state */
278 	writel(SYSCOM_STATE_UNINIT,
279 	       BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
280 					  ctx->buttress_boot_offset,
281 					  SYSCOM_STATE_ID));
282 
283 	/* store firmware configuration address */
284 	writel(ctx->config_vied_addr,
285 	       BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
286 					  ctx->buttress_boot_offset,
287 					  SYSCOM_CONFIG_ID));
288 	ctx->cell_start(ctx->adev);
289 
290 	return 0;
291 }
292 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, INTEL_IPU6);
293 
ipu6_fw_com_close(struct ipu6_fw_com_context * ctx)294 int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx)
295 {
296 	int state;
297 
298 	state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
299 						 ctx->buttress_boot_offset,
300 						 SYSCOM_STATE_ID));
301 	if (state != SYSCOM_STATE_READY)
302 		return -EBUSY;
303 
304 	/* set close request flag */
305 	writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr +
306 	       SYSCOM_COMMAND_REG * 4);
307 
308 	return 0;
309 }
310 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, INTEL_IPU6);
311 
ipu6_fw_com_release(struct ipu6_fw_com_context * ctx,unsigned int force)312 int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force)
313 {
314 	/* check if release is forced, an verify cell state if it is not */
315 	if (!force && !ctx->cell_ready(ctx->adev))
316 		return -EBUSY;
317 
318 	dma_free_attrs(&ctx->adev->auxdev.dev, ctx->dma_size,
319 		       ctx->dma_buffer, ctx->dma_addr, ctx->attrs);
320 	kfree(ctx);
321 	return 0;
322 }
323 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, INTEL_IPU6);
324 
ipu6_fw_com_ready(struct ipu6_fw_com_context * ctx)325 bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx)
326 {
327 	int state;
328 
329 	state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
330 						 ctx->buttress_boot_offset,
331 						 SYSCOM_STATE_ID));
332 
333 	return state == SYSCOM_STATE_READY;
334 }
335 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, INTEL_IPU6);
336 
ipu6_send_get_token(struct ipu6_fw_com_context * ctx,int q_nbr)337 void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr)
338 {
339 	struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr];
340 	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
341 	unsigned int wr, rd;
342 	unsigned int packets;
343 	unsigned int index;
344 
345 	wr = readl(q_dmem + FW_COM_WR_REG);
346 	rd = readl(q_dmem + FW_COM_RD_REG);
347 
348 	if (WARN_ON_ONCE(wr >= q->size || rd >= q->size))
349 		return NULL;
350 
351 	if (wr < rd)
352 		packets = rd - wr - 1;
353 	else
354 		packets = q->size - (wr - rd + 1);
355 
356 	if (!packets)
357 		return NULL;
358 
359 	index = readl(q_dmem + FW_COM_WR_REG);
360 
361 	return (void *)(q->host_address + index * q->token_size);
362 }
363 EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, INTEL_IPU6);
364 
ipu6_send_put_token(struct ipu6_fw_com_context * ctx,int q_nbr)365 void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr)
366 {
367 	struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr];
368 	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
369 	unsigned int wr = readl(q_dmem + FW_COM_WR_REG) + 1;
370 
371 	if (wr >= q->size)
372 		wr = 0;
373 
374 	writel(wr, q_dmem + FW_COM_WR_REG);
375 }
376 EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, INTEL_IPU6);
377 
ipu6_recv_get_token(struct ipu6_fw_com_context * ctx,int q_nbr)378 void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr)
379 {
380 	struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr];
381 	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
382 	unsigned int wr, rd;
383 	unsigned int packets;
384 
385 	wr = readl(q_dmem + FW_COM_WR_REG);
386 	rd = readl(q_dmem + FW_COM_RD_REG);
387 
388 	if (WARN_ON_ONCE(wr >= q->size || rd >= q->size))
389 		return NULL;
390 
391 	if (wr < rd)
392 		wr += q->size;
393 
394 	packets = wr - rd;
395 	if (!packets)
396 		return NULL;
397 
398 	return (void *)(q->host_address + rd * q->token_size);
399 }
400 EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, INTEL_IPU6);
401 
ipu6_recv_put_token(struct ipu6_fw_com_context * ctx,int q_nbr)402 void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr)
403 {
404 	struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr];
405 	void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
406 	unsigned int rd = readl(q_dmem + FW_COM_RD_REG) + 1;
407 
408 	if (rd >= q->size)
409 		rd = 0;
410 
411 	writel(rd, q_dmem + FW_COM_RD_REG);
412 }
413 EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, INTEL_IPU6);
414