1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* 3 * Wave5 series multi-standard codec IP - platform driver 4 * 5 * Copyright (C) 2021-2023 CHIPS&MEDIA INC 6 */ 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/platform_device.h> 10 #include <linux/clk.h> 11 #include <linux/firmware.h> 12 #include <linux/interrupt.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/reset.h> 15 #include "wave5-vpu.h" 16 #include "wave5-regdefine.h" 17 #include "wave5-vpuconfig.h" 18 #include "wave5.h" 19 20 #define VPU_PLATFORM_DEVICE_NAME "vdec" 21 #define VPU_CLK_NAME "vcodec" 22 23 #define WAVE5_IS_ENC BIT(0) 24 #define WAVE5_IS_DEC BIT(1) 25 26 struct wave5_match_data { 27 int flags; 28 const char *fw_name; 29 u32 sram_size; 30 }; 31 32 static int vpu_poll_interval = 5; 33 module_param(vpu_poll_interval, int, 0644); 34 35 int wave5_vpu_wait_interrupt(struct vpu_instance *inst, unsigned int timeout) 36 { 37 int ret; 38 39 ret = wait_for_completion_timeout(&inst->irq_done, 40 msecs_to_jiffies(timeout)); 41 if (!ret) 42 return -ETIMEDOUT; 43 44 reinit_completion(&inst->irq_done); 45 46 return 0; 47 } 48 49 static void wave5_vpu_handle_irq(void *dev_id) 50 { 51 u32 seq_done; 52 u32 cmd_done; 53 u32 irq_reason; 54 struct vpu_instance *inst; 55 struct vpu_device *dev = dev_id; 56 57 irq_reason = wave5_vdi_read_register(dev, W5_VPU_VINT_REASON); 58 wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_reason); 59 wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1); 60 61 list_for_each_entry(inst, &dev->instances, list) { 62 seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO); 63 cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST); 64 65 if (irq_reason & BIT(INT_WAVE5_INIT_SEQ) || 66 irq_reason & BIT(INT_WAVE5_ENC_SET_PARAM)) { 67 if (dev->product_code == WAVE515_CODE && 68 (cmd_done & BIT(inst->id))) { 69 cmd_done &= ~BIT(inst->id); 70 wave5_vdi_write_register(dev, W5_RET_QUEUE_CMD_DONE_INST, 71 cmd_done); 72 complete(&inst->irq_done); 73 } else if (seq_done & BIT(inst->id)) { 74 seq_done &= ~BIT(inst->id); 75 wave5_vdi_write_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO, 76 seq_done); 77 complete(&inst->irq_done); 78 } 79 } 80 81 if (irq_reason & BIT(INT_WAVE5_DEC_PIC) || 82 irq_reason & BIT(INT_WAVE5_ENC_PIC)) { 83 if (cmd_done & BIT(inst->id)) { 84 cmd_done &= ~BIT(inst->id); 85 wave5_vdi_write_register(dev, W5_RET_QUEUE_CMD_DONE_INST, 86 cmd_done); 87 inst->ops->finish_process(inst); 88 } 89 } 90 91 wave5_vpu_clear_interrupt(inst, irq_reason); 92 } 93 } 94 95 static irqreturn_t wave5_vpu_irq_thread(int irq, void *dev_id) 96 { 97 struct vpu_device *dev = dev_id; 98 99 if (wave5_vdi_read_register(dev, W5_VPU_VPU_INT_STS)) 100 wave5_vpu_handle_irq(dev); 101 102 return IRQ_HANDLED; 103 } 104 105 static void wave5_vpu_irq_work_fn(struct kthread_work *work) 106 { 107 struct vpu_device *dev = container_of(work, struct vpu_device, work); 108 109 if (wave5_vdi_read_register(dev, W5_VPU_VPU_INT_STS)) 110 wave5_vpu_handle_irq(dev); 111 } 112 113 static enum hrtimer_restart wave5_vpu_timer_callback(struct hrtimer *timer) 114 { 115 struct vpu_device *dev = 116 container_of(timer, struct vpu_device, hrtimer); 117 118 kthread_queue_work(dev->worker, &dev->work); 119 hrtimer_forward_now(timer, ns_to_ktime(vpu_poll_interval * NSEC_PER_MSEC)); 120 121 return HRTIMER_RESTART; 122 } 123 124 static int wave5_vpu_load_firmware(struct device *dev, const char *fw_name, 125 u32 *revision) 126 { 127 const struct firmware *fw; 128 int ret; 129 unsigned int product_id; 130 131 ret = request_firmware(&fw, fw_name, dev); 132 if (ret) { 133 dev_err(dev, "request_firmware, fail: %d\n", ret); 134 return ret; 135 } 136 137 ret = wave5_vpu_init_with_bitcode(dev, (u8 *)fw->data, fw->size); 138 if (ret) { 139 dev_err(dev, "vpu_init_with_bitcode, fail: %d\n", ret); 140 release_firmware(fw); 141 return ret; 142 } 143 release_firmware(fw); 144 145 ret = wave5_vpu_get_version_info(dev, revision, &product_id); 146 if (ret) { 147 dev_err(dev, "vpu_get_version_info fail: %d\n", ret); 148 return ret; 149 } 150 151 dev_dbg(dev, "%s: enum product_id: %08x, fw revision: %u\n", 152 __func__, product_id, *revision); 153 154 return 0; 155 } 156 157 static __maybe_unused int wave5_pm_suspend(struct device *dev) 158 { 159 struct vpu_device *vpu = dev_get_drvdata(dev); 160 161 if (pm_runtime_suspended(dev)) 162 return 0; 163 164 if (vpu->irq < 0) 165 hrtimer_cancel(&vpu->hrtimer); 166 167 wave5_vpu_sleep_wake(dev, true, NULL, 0); 168 clk_bulk_disable_unprepare(vpu->num_clks, vpu->clks); 169 170 return 0; 171 } 172 173 static __maybe_unused int wave5_pm_resume(struct device *dev) 174 { 175 struct vpu_device *vpu = dev_get_drvdata(dev); 176 int ret = 0; 177 178 wave5_vpu_sleep_wake(dev, false, NULL, 0); 179 ret = clk_bulk_prepare_enable(vpu->num_clks, vpu->clks); 180 if (ret) { 181 dev_err(dev, "Enabling clocks, fail: %d\n", ret); 182 return ret; 183 } 184 185 if (vpu->irq < 0 && !hrtimer_active(&vpu->hrtimer)) 186 hrtimer_start(&vpu->hrtimer, ns_to_ktime(vpu->vpu_poll_interval * NSEC_PER_MSEC), 187 HRTIMER_MODE_REL_PINNED); 188 189 return ret; 190 } 191 192 static const struct dev_pm_ops wave5_pm_ops = { 193 SET_RUNTIME_PM_OPS(wave5_pm_suspend, wave5_pm_resume, NULL) 194 }; 195 196 static int wave5_vpu_probe(struct platform_device *pdev) 197 { 198 int ret; 199 struct vpu_device *dev; 200 const struct wave5_match_data *match_data; 201 u32 fw_revision; 202 203 match_data = device_get_match_data(&pdev->dev); 204 if (!match_data) { 205 dev_err(&pdev->dev, "missing device match data\n"); 206 return -EINVAL; 207 } 208 209 /* physical addresses limited to 32 bits */ 210 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 211 if (ret) { 212 dev_err(&pdev->dev, "Failed to set DMA mask: %d\n", ret); 213 return ret; 214 } 215 216 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); 217 if (!dev) 218 return -ENOMEM; 219 220 dev->vdb_register = devm_platform_ioremap_resource(pdev, 0); 221 if (IS_ERR(dev->vdb_register)) 222 return PTR_ERR(dev->vdb_register); 223 ida_init(&dev->inst_ida); 224 225 mutex_init(&dev->dev_lock); 226 mutex_init(&dev->hw_lock); 227 dev_set_drvdata(&pdev->dev, dev); 228 dev->dev = &pdev->dev; 229 230 dev->resets = devm_reset_control_array_get_optional_exclusive(&pdev->dev); 231 if (IS_ERR(dev->resets)) { 232 return dev_err_probe(&pdev->dev, PTR_ERR(dev->resets), 233 "Failed to get reset control\n"); 234 } 235 236 ret = reset_control_deassert(dev->resets); 237 if (ret) 238 return dev_err_probe(&pdev->dev, ret, "Failed to deassert resets\n"); 239 240 ret = devm_clk_bulk_get_all(&pdev->dev, &dev->clks); 241 242 /* continue without clock, assume externally managed */ 243 if (ret < 0) { 244 dev_warn(&pdev->dev, "Getting clocks, fail: %d\n", ret); 245 ret = 0; 246 } 247 dev->num_clks = ret; 248 249 ret = clk_bulk_prepare_enable(dev->num_clks, dev->clks); 250 if (ret) { 251 dev_err(&pdev->dev, "Enabling clocks, fail: %d\n", ret); 252 goto err_reset_assert; 253 } 254 255 dev->sram_pool = of_gen_pool_get(pdev->dev.of_node, "sram", 0); 256 if (!dev->sram_pool) 257 dev_warn(&pdev->dev, "sram node not found\n"); 258 259 dev->sram_size = match_data->sram_size; 260 261 dev->product_code = wave5_vdi_read_register(dev, VPU_PRODUCT_CODE_REGISTER); 262 ret = wave5_vdi_init(&pdev->dev); 263 if (ret < 0) { 264 dev_err(&pdev->dev, "wave5_vdi_init, fail: %d\n", ret); 265 goto err_clk_dis; 266 } 267 dev->product = wave5_vpu_get_product_id(dev); 268 269 dev->irq = platform_get_irq(pdev, 0); 270 if (dev->irq < 0) { 271 dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n"); 272 hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 273 dev->hrtimer.function = &wave5_vpu_timer_callback; 274 dev->worker = kthread_create_worker(0, "vpu_irq_thread"); 275 if (IS_ERR(dev->worker)) { 276 dev_err(&pdev->dev, "failed to create vpu irq worker\n"); 277 ret = PTR_ERR(dev->worker); 278 goto err_vdi_release; 279 } 280 dev->vpu_poll_interval = vpu_poll_interval; 281 kthread_init_work(&dev->work, wave5_vpu_irq_work_fn); 282 } else { 283 ret = devm_request_threaded_irq(&pdev->dev, dev->irq, NULL, 284 wave5_vpu_irq_thread, IRQF_ONESHOT, "vpu_irq", dev); 285 if (ret) { 286 dev_err(&pdev->dev, "Register interrupt handler, fail: %d\n", ret); 287 goto err_enc_unreg; 288 } 289 } 290 291 INIT_LIST_HEAD(&dev->instances); 292 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); 293 if (ret) { 294 dev_err(&pdev->dev, "v4l2_device_register, fail: %d\n", ret); 295 goto err_vdi_release; 296 } 297 298 if (match_data->flags & WAVE5_IS_DEC) { 299 ret = wave5_vpu_dec_register_device(dev); 300 if (ret) { 301 dev_err(&pdev->dev, "wave5_vpu_dec_register_device, fail: %d\n", ret); 302 goto err_v4l2_unregister; 303 } 304 } 305 if (match_data->flags & WAVE5_IS_ENC) { 306 ret = wave5_vpu_enc_register_device(dev); 307 if (ret) { 308 dev_err(&pdev->dev, "wave5_vpu_enc_register_device, fail: %d\n", ret); 309 goto err_dec_unreg; 310 } 311 } 312 313 ret = wave5_vpu_load_firmware(&pdev->dev, match_data->fw_name, &fw_revision); 314 if (ret) { 315 dev_err(&pdev->dev, "wave5_vpu_load_firmware, fail: %d\n", ret); 316 goto err_enc_unreg; 317 } 318 319 dev_info(&pdev->dev, "Added wave5 driver with caps: %s %s\n", 320 (match_data->flags & WAVE5_IS_ENC) ? "'ENCODE'" : "", 321 (match_data->flags & WAVE5_IS_DEC) ? "'DECODE'" : ""); 322 dev_info(&pdev->dev, "Product Code: 0x%x\n", dev->product_code); 323 dev_info(&pdev->dev, "Firmware Revision: %u\n", fw_revision); 324 325 pm_runtime_set_autosuspend_delay(&pdev->dev, 100); 326 pm_runtime_use_autosuspend(&pdev->dev); 327 pm_runtime_enable(&pdev->dev); 328 wave5_vpu_sleep_wake(&pdev->dev, true, NULL, 0); 329 330 return 0; 331 332 err_enc_unreg: 333 if (match_data->flags & WAVE5_IS_ENC) 334 wave5_vpu_enc_unregister_device(dev); 335 err_dec_unreg: 336 if (match_data->flags & WAVE5_IS_DEC) 337 wave5_vpu_dec_unregister_device(dev); 338 err_v4l2_unregister: 339 v4l2_device_unregister(&dev->v4l2_dev); 340 err_vdi_release: 341 wave5_vdi_release(&pdev->dev); 342 err_clk_dis: 343 clk_bulk_disable_unprepare(dev->num_clks, dev->clks); 344 err_reset_assert: 345 reset_control_assert(dev->resets); 346 347 return ret; 348 } 349 350 static void wave5_vpu_remove(struct platform_device *pdev) 351 { 352 struct vpu_device *dev = dev_get_drvdata(&pdev->dev); 353 354 if (dev->irq < 0) { 355 kthread_destroy_worker(dev->worker); 356 hrtimer_cancel(&dev->hrtimer); 357 } 358 359 pm_runtime_put_sync(&pdev->dev); 360 pm_runtime_disable(&pdev->dev); 361 362 mutex_destroy(&dev->dev_lock); 363 mutex_destroy(&dev->hw_lock); 364 reset_control_assert(dev->resets); 365 clk_bulk_disable_unprepare(dev->num_clks, dev->clks); 366 wave5_vpu_enc_unregister_device(dev); 367 wave5_vpu_dec_unregister_device(dev); 368 v4l2_device_unregister(&dev->v4l2_dev); 369 wave5_vdi_release(&pdev->dev); 370 ida_destroy(&dev->inst_ida); 371 } 372 373 static const struct wave5_match_data ti_wave521c_data = { 374 .flags = WAVE5_IS_ENC | WAVE5_IS_DEC, 375 .fw_name = "cnm/wave521c_k3_codec_fw.bin", 376 .sram_size = (64 * 1024), 377 }; 378 379 static const struct of_device_id wave5_dt_ids[] = { 380 { .compatible = "ti,j721s2-wave521c", .data = &ti_wave521c_data }, 381 { /* sentinel */ } 382 }; 383 MODULE_DEVICE_TABLE(of, wave5_dt_ids); 384 385 static struct platform_driver wave5_vpu_driver = { 386 .driver = { 387 .name = VPU_PLATFORM_DEVICE_NAME, 388 .of_match_table = of_match_ptr(wave5_dt_ids), 389 .pm = &wave5_pm_ops, 390 }, 391 .probe = wave5_vpu_probe, 392 .remove = wave5_vpu_remove, 393 }; 394 395 module_platform_driver(wave5_vpu_driver); 396 MODULE_DESCRIPTION("chips&media VPU V4L2 driver"); 397 MODULE_LICENSE("Dual BSD/GPL"); 398