1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013--2024 Intel Corporation 4 */ 5 6 #include <linux/device.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/io.h> 9 #include <linux/math.h> 10 #include <linux/overflow.h> 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 14 #include "ipu6-bus.h" 15 #include "ipu6-dma.h" 16 #include "ipu6-fw-com.h" 17 18 /* 19 * FWCOM layer is a shared resource between FW and driver. It consist 20 * of token queues to both send and receive directions. Queue is simply 21 * an array of structures with read and write indexes to the queue. 22 * There are 1...n queues to both directions. Queues locates in 23 * system RAM and are mapped to ISP MMU so that both CPU and ISP can 24 * see the same buffer. Indexes are located in ISP DMEM so that FW code 25 * can poll those with very low latency and cost. CPU access to indexes is 26 * more costly but that happens only at message sending time and 27 * interrupt triggered message handling. CPU doesn't need to poll indexes. 28 * wr_reg / rd_reg are offsets to those dmem location. They are not 29 * the indexes itself. 30 */ 31 32 /* Shared structure between driver and FW - do not modify */ 33 struct ipu6_fw_sys_queue { 34 u64 host_address; 35 u32 vied_address; 36 u32 size; 37 u32 token_size; 38 u32 wr_reg; /* reg number in subsystem's regmem */ 39 u32 rd_reg; 40 u32 _align; 41 } __packed; 42 43 struct ipu6_fw_sys_queue_res { 44 u64 host_address; 45 u32 vied_address; 46 u32 reg; 47 } __packed; 48 49 enum syscom_state { 50 /* Program load or explicit host setting should init to this */ 51 SYSCOM_STATE_UNINIT = 0x57a7e000, 52 /* SP Syscom sets this when it is ready for use */ 53 SYSCOM_STATE_READY = 0x57a7e001, 54 /* SP Syscom sets this when no more syscom accesses will happen */ 55 SYSCOM_STATE_INACTIVE = 0x57a7e002, 56 }; 57 58 enum syscom_cmd { 59 /* Program load or explicit host setting should init to this */ 60 SYSCOM_COMMAND_UNINIT = 0x57a7f000, 61 /* Host Syscom requests syscom to become inactive */ 62 SYSCOM_COMMAND_INACTIVE = 0x57a7f001, 63 }; 64 65 /* firmware config: data that sent from the host to SP via DDR */ 66 /* Cell copies data into a context */ 67 68 struct ipu6_fw_syscom_config { 69 u32 firmware_address; 70 71 u32 num_input_queues; 72 u32 num_output_queues; 73 74 /* ISP pointers to an array of ipu6_fw_sys_queue structures */ 75 u32 input_queue; 76 u32 output_queue; 77 78 /* ISYS / PSYS private data */ 79 u32 specific_addr; 80 u32 specific_size; 81 }; 82 83 struct ipu6_fw_com_context { 84 struct ipu6_bus_device *adev; 85 void __iomem *dmem_addr; 86 int (*cell_ready)(struct ipu6_bus_device *adev); 87 void (*cell_start)(struct ipu6_bus_device *adev); 88 89 void *dma_buffer; 90 dma_addr_t dma_addr; 91 unsigned int dma_size; 92 93 struct ipu6_fw_sys_queue *input_queue; /* array of host to SP queues */ 94 struct ipu6_fw_sys_queue *output_queue; /* array of SP to host */ 95 96 u32 config_vied_addr; 97 98 unsigned int buttress_boot_offset; 99 void __iomem *base_addr; 100 }; 101 102 #define FW_COM_WR_REG 0 103 #define FW_COM_RD_REG 4 104 105 #define REGMEM_OFFSET 0 106 #define TUNIT_MAGIC_PATTERN 0x5a5a5a5a 107 108 enum regmem_id { 109 /* pass pkg_dir address to SPC in non-secure mode */ 110 PKG_DIR_ADDR_REG = 0, 111 /* Tunit CFG blob for secure - provided by host.*/ 112 TUNIT_CFG_DWR_REG = 1, 113 /* syscom commands - modified by the host */ 114 SYSCOM_COMMAND_REG = 2, 115 /* Store interrupt status - updated by SP */ 116 SYSCOM_IRQ_REG = 3, 117 /* first syscom queue pointer register */ 118 SYSCOM_QPR_BASE_REG = 4 119 }; 120 121 #define BUTTRESS_FW_BOOT_PARAMS_0 0x4000 122 #define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id) \ 123 ((base) + BUTTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4) 124 125 enum buttress_syscom_id { 126 /* pass syscom configuration to SPC */ 127 SYSCOM_CONFIG_ID = 0, 128 /* syscom state - modified by SP */ 129 SYSCOM_STATE_ID = 1, 130 /* syscom vtl0 addr mask */ 131 SYSCOM_VTL0_ADDR_MASK_ID = 2, 132 SYSCOM_ID_MAX 133 }; 134 135 static void ipu6_sys_queue_init(struct ipu6_fw_sys_queue *q, unsigned int size, 136 unsigned int token_size, 137 struct ipu6_fw_sys_queue_res *res) 138 { 139 unsigned int buf_size = (size + 1) * token_size; 140 141 q->size = size + 1; 142 q->token_size = token_size; 143 144 /* acquire the shared buffer space */ 145 q->host_address = res->host_address; 146 res->host_address += buf_size; 147 q->vied_address = res->vied_address; 148 res->vied_address += buf_size; 149 150 /* acquire the shared read and writer pointers */ 151 q->wr_reg = res->reg; 152 res->reg++; 153 q->rd_reg = res->reg; 154 res->reg++; 155 } 156 157 void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, 158 struct ipu6_bus_device *adev, void __iomem *base) 159 { 160 size_t conf_size, inq_size, outq_size, specific_size; 161 struct ipu6_fw_syscom_config *config_host_addr; 162 unsigned int sizeinput = 0, sizeoutput = 0; 163 struct ipu6_fw_sys_queue_res res; 164 struct ipu6_fw_com_context *ctx; 165 struct device *dev = &adev->auxdev.dev; 166 size_t sizeall, offset; 167 void *specific_host_addr; 168 unsigned int i; 169 170 if (!cfg || !cfg->cell_start || !cfg->cell_ready) 171 return NULL; 172 173 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 174 if (!ctx) 175 return NULL; 176 ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET; 177 ctx->adev = adev; 178 ctx->cell_start = cfg->cell_start; 179 ctx->cell_ready = cfg->cell_ready; 180 ctx->buttress_boot_offset = cfg->buttress_boot_offset; 181 ctx->base_addr = base; 182 183 /* 184 * Allocate DMA mapped memory. Allocate one big chunk. 185 */ 186 /* Base cfg for FW */ 187 conf_size = roundup(sizeof(struct ipu6_fw_syscom_config), 8); 188 /* Descriptions of the queues */ 189 inq_size = size_mul(cfg->num_input_queues, 190 sizeof(struct ipu6_fw_sys_queue)); 191 outq_size = size_mul(cfg->num_output_queues, 192 sizeof(struct ipu6_fw_sys_queue)); 193 /* FW specific information structure */ 194 specific_size = roundup(cfg->specific_size, 8); 195 196 sizeall = conf_size + inq_size + outq_size + specific_size; 197 198 for (i = 0; i < cfg->num_input_queues; i++) 199 sizeinput += size_mul(cfg->input[i].queue_size + 1, 200 cfg->input[i].token_size); 201 202 for (i = 0; i < cfg->num_output_queues; i++) 203 sizeoutput += size_mul(cfg->output[i].queue_size + 1, 204 cfg->output[i].token_size); 205 206 sizeall += sizeinput + sizeoutput; 207 208 ctx->dma_buffer = ipu6_dma_alloc(adev, sizeall, &ctx->dma_addr, 209 GFP_KERNEL, 0); 210 if (!ctx->dma_buffer) { 211 dev_err(dev, "failed to allocate dma memory\n"); 212 kfree(ctx); 213 return NULL; 214 } 215 216 ctx->dma_size = sizeall; 217 218 config_host_addr = ctx->dma_buffer; 219 ctx->config_vied_addr = ctx->dma_addr; 220 221 offset = conf_size; 222 ctx->input_queue = ctx->dma_buffer + offset; 223 config_host_addr->input_queue = ctx->dma_addr + offset; 224 config_host_addr->num_input_queues = cfg->num_input_queues; 225 226 offset += inq_size; 227 ctx->output_queue = ctx->dma_buffer + offset; 228 config_host_addr->output_queue = ctx->dma_addr + offset; 229 config_host_addr->num_output_queues = cfg->num_output_queues; 230 231 /* copy firmware specific data */ 232 offset += outq_size; 233 specific_host_addr = ctx->dma_buffer + offset; 234 config_host_addr->specific_addr = ctx->dma_addr + offset; 235 config_host_addr->specific_size = cfg->specific_size; 236 if (cfg->specific_addr && cfg->specific_size) 237 memcpy(specific_host_addr, cfg->specific_addr, 238 cfg->specific_size); 239 240 ipu6_dma_sync_single(adev, ctx->config_vied_addr, sizeall); 241 242 /* initialize input queues */ 243 offset += specific_size; 244 res.reg = SYSCOM_QPR_BASE_REG; 245 res.host_address = (uintptr_t)(ctx->dma_buffer + offset); 246 res.vied_address = ctx->dma_addr + offset; 247 for (i = 0; i < cfg->num_input_queues; i++) 248 ipu6_sys_queue_init(ctx->input_queue + i, 249 cfg->input[i].queue_size, 250 cfg->input[i].token_size, &res); 251 252 /* initialize output queues */ 253 offset += sizeinput; 254 res.host_address = (uintptr_t)(ctx->dma_buffer + offset); 255 res.vied_address = ctx->dma_addr + offset; 256 for (i = 0; i < cfg->num_output_queues; i++) { 257 ipu6_sys_queue_init(ctx->output_queue + i, 258 cfg->output[i].queue_size, 259 cfg->output[i].token_size, &res); 260 } 261 262 return ctx; 263 } 264 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_prepare, INTEL_IPU6); 265 266 int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx) 267 { 268 /* write magic pattern to disable the tunit trace */ 269 writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); 270 /* Check if SP is in valid state */ 271 if (!ctx->cell_ready(ctx->adev)) 272 return -EIO; 273 274 /* store syscom uninitialized command */ 275 writel(SYSCOM_COMMAND_UNINIT, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4); 276 277 /* store syscom uninitialized state */ 278 writel(SYSCOM_STATE_UNINIT, 279 BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, 280 ctx->buttress_boot_offset, 281 SYSCOM_STATE_ID)); 282 283 /* store firmware configuration address */ 284 writel(ctx->config_vied_addr, 285 BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, 286 ctx->buttress_boot_offset, 287 SYSCOM_CONFIG_ID)); 288 ctx->cell_start(ctx->adev); 289 290 return 0; 291 } 292 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_open, INTEL_IPU6); 293 294 int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx) 295 { 296 int state; 297 298 state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, 299 ctx->buttress_boot_offset, 300 SYSCOM_STATE_ID)); 301 if (state != SYSCOM_STATE_READY) 302 return -EBUSY; 303 304 /* set close request flag */ 305 writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr + 306 SYSCOM_COMMAND_REG * 4); 307 308 return 0; 309 } 310 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_close, INTEL_IPU6); 311 312 int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force) 313 { 314 /* check if release is forced, an verify cell state if it is not */ 315 if (!force && !ctx->cell_ready(ctx->adev)) 316 return -EBUSY; 317 318 ipu6_dma_free(ctx->adev, ctx->dma_size, 319 ctx->dma_buffer, ctx->dma_addr, 0); 320 kfree(ctx); 321 return 0; 322 } 323 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_release, INTEL_IPU6); 324 325 bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx) 326 { 327 int state; 328 329 state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, 330 ctx->buttress_boot_offset, 331 SYSCOM_STATE_ID)); 332 333 return state == SYSCOM_STATE_READY; 334 } 335 EXPORT_SYMBOL_NS_GPL(ipu6_fw_com_ready, INTEL_IPU6); 336 337 void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) 338 { 339 struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; 340 void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; 341 unsigned int wr, rd; 342 unsigned int packets; 343 unsigned int index; 344 345 wr = readl(q_dmem + FW_COM_WR_REG); 346 rd = readl(q_dmem + FW_COM_RD_REG); 347 348 if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) 349 return NULL; 350 351 if (wr < rd) 352 packets = rd - wr - 1; 353 else 354 packets = q->size - (wr - rd + 1); 355 356 if (!packets) 357 return NULL; 358 359 index = readl(q_dmem + FW_COM_WR_REG); 360 361 return (void *)((uintptr_t)q->host_address + index * q->token_size); 362 } 363 EXPORT_SYMBOL_NS_GPL(ipu6_send_get_token, INTEL_IPU6); 364 365 void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) 366 { 367 struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr]; 368 void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; 369 unsigned int wr = readl(q_dmem + FW_COM_WR_REG) + 1; 370 371 if (wr >= q->size) 372 wr = 0; 373 374 writel(wr, q_dmem + FW_COM_WR_REG); 375 } 376 EXPORT_SYMBOL_NS_GPL(ipu6_send_put_token, INTEL_IPU6); 377 378 void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr) 379 { 380 struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; 381 void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; 382 unsigned int wr, rd; 383 unsigned int packets; 384 385 wr = readl(q_dmem + FW_COM_WR_REG); 386 rd = readl(q_dmem + FW_COM_RD_REG); 387 388 if (WARN_ON_ONCE(wr >= q->size || rd >= q->size)) 389 return NULL; 390 391 if (wr < rd) 392 wr += q->size; 393 394 packets = wr - rd; 395 if (!packets) 396 return NULL; 397 398 return (void *)((uintptr_t)q->host_address + rd * q->token_size); 399 } 400 EXPORT_SYMBOL_NS_GPL(ipu6_recv_get_token, INTEL_IPU6); 401 402 void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr) 403 { 404 struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr]; 405 void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; 406 unsigned int rd = readl(q_dmem + FW_COM_RD_REG) + 1; 407 408 if (rd >= q->size) 409 rd = 0; 410 411 writel(rd, q_dmem + FW_COM_RD_REG); 412 } 413 EXPORT_SYMBOL_NS_GPL(ipu6_recv_put_token, INTEL_IPU6); 414