Lines Matching +full:out +full:- +full:of +full:- +full:reset
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Suman Anna <s-anna@ti.com>
9 #include <linux/dma-mapping.h>
15 #include <linux/of.h>
19 #include <linux/omap-mailbox.h>
23 #include <linux/reset.h>
34 /* R5 TI-SCI Processor Configuration Flags */
48 /* R5 TI-SCI Processor Control Flags */
51 /* R5 TI-SCI Processor Status Flags */
64 * Single-CPU mode : AM64x SoCs only
65 * Single-Core mode : AM62x, AM62A SoCs
75 * struct k3_r5_soc_data - match data to handle SoC variations
77 * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
78 * @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
80 * @core_data: pointer to R5-core-specific device data
91 * struct k3_r5_cluster - K3 R5F Cluster structure
93 * @mode: Mode to configure the Cluster - Split or LockStep
94 * @cores: list of R5 cores within the cluster
96 * @soc_data: SoC-specific feature data for a R5FSS
107 * struct k3_r5_core - K3 R5 core structure
112 * @sram: on-chip SRAM memory regions data
113 * @num_sram: number of on-chip SRAM memory regions
117 * @released_from_reset: flag to signal when core is out of reset
136 ret = reset_control_assert(kproc->reset); in k3_r5_split_reset()
138 dev_err(kproc->dev, "local-reset assert failed, ret = %d\n", in k3_r5_split_reset()
143 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, in k3_r5_split_reset()
144 kproc->ti_sci_id); in k3_r5_split_reset()
146 dev_err(kproc->dev, "module-reset assert failed, ret = %d\n", in k3_r5_split_reset()
148 if (reset_control_deassert(kproc->reset)) in k3_r5_split_reset()
149 dev_warn(kproc->dev, "local-reset deassert back failed\n"); in k3_r5_split_reset()
159 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, in k3_r5_split_release()
160 kproc->ti_sci_id); in k3_r5_split_release()
162 dev_err(kproc->dev, "module-reset deassert failed, ret = %d\n", in k3_r5_split_release()
167 ret = reset_control_deassert(kproc->reset); in k3_r5_split_release()
169 dev_err(kproc->dev, "local-reset deassert failed, ret = %d\n", in k3_r5_split_release()
171 if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, in k3_r5_split_release()
172 kproc->ti_sci_id)) in k3_r5_split_release()
173 dev_warn(kproc->dev, "module-reset assert back failed\n"); in k3_r5_split_release()
185 /* assert local reset on all applicable cores */ in k3_r5_lockstep_reset()
186 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
187 ret = reset_control_assert(core->kproc->reset); in k3_r5_lockstep_reset()
189 dev_err(core->dev, "local-reset assert failed, ret = %d\n", in k3_r5_lockstep_reset()
197 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
198 kproc = core->kproc; in k3_r5_lockstep_reset()
199 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, in k3_r5_lockstep_reset()
200 kproc->ti_sci_id); in k3_r5_lockstep_reset()
202 dev_err(core->dev, "module-reset assert failed, ret = %d\n", in k3_r5_lockstep_reset()
211 list_for_each_entry_continue_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
212 kproc = core->kproc; in k3_r5_lockstep_reset()
213 if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, in k3_r5_lockstep_reset()
214 kproc->ti_sci_id)) in k3_r5_lockstep_reset()
215 dev_warn(core->dev, "module-reset assert back failed\n"); in k3_r5_lockstep_reset()
217 core = list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_reset()
219 list_for_each_entry_from_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset()
220 if (reset_control_deassert(core->kproc->reset)) in k3_r5_lockstep_reset()
221 dev_warn(core->dev, "local-reset deassert back failed\n"); in k3_r5_lockstep_reset()
234 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release()
235 kproc = core->kproc; in k3_r5_lockstep_release()
236 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, in k3_r5_lockstep_release()
237 kproc->ti_sci_id); in k3_r5_lockstep_release()
239 dev_err(core->dev, "module-reset deassert failed, ret = %d\n", in k3_r5_lockstep_release()
246 /* deassert local reset on all applicable cores */ in k3_r5_lockstep_release()
247 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release()
248 ret = reset_control_deassert(core->kproc->reset); in k3_r5_lockstep_release()
250 dev_err(core->dev, "module-reset deassert failed, ret = %d\n", in k3_r5_lockstep_release()
259 list_for_each_entry_continue(core, &cluster->cores, elem) { in k3_r5_lockstep_release()
260 if (reset_control_assert(core->kproc->reset)) in k3_r5_lockstep_release()
261 dev_warn(core->dev, "local-reset assert back failed\n"); in k3_r5_lockstep_release()
263 core = list_first_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_release()
265 list_for_each_entry_from(core, &cluster->cores, elem) { in k3_r5_lockstep_release()
266 kproc = core->kproc; in k3_r5_lockstep_release()
267 if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, in k3_r5_lockstep_release()
268 kproc->ti_sci_id)) in k3_r5_lockstep_release()
269 dev_warn(core->dev, "module-reset assert back failed\n"); in k3_r5_lockstep_release()
277 return ti_sci_proc_set_control(kproc->tsp, in k3_r5_core_halt()
283 return ti_sci_proc_set_control(kproc->tsp, in k3_r5_core_run()
288 * The R5F cores have controls for both a reset and a halt/run. The code
289 * execution from DDR requires the initial boot-strapping code to be run
295 * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to
299 * cluster in this mode. The function uses the same reset logic as LockStep
300 * mode for this (though the behavior is agnostic of the reset release order).
305 struct k3_rproc *kproc = rproc->priv; in k3_r5_rproc_prepare()
306 struct k3_r5_core *core = kproc->priv, *core0, *core1; in k3_r5_rproc_prepare()
307 struct k3_r5_cluster *cluster = core->cluster; in k3_r5_rproc_prepare()
308 struct device *dev = kproc->dev; in k3_r5_rproc_prepare()
317 * power up before proceeding to core1 and put timeout of 2sec. This in k3_r5_rproc_prepare()
324 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_rproc_prepare()
325 core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_rproc_prepare()
326 if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1 && in k3_r5_rproc_prepare()
327 !core0->released_from_reset) { in k3_r5_rproc_prepare()
328 ret = wait_event_interruptible_timeout(cluster->core_transition, in k3_r5_rproc_prepare()
329 core0->released_from_reset, in k3_r5_rproc_prepare()
333 return -EPERM; in k3_r5_rproc_prepare()
337 ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl, &stat); in k3_r5_rproc_prepare()
342 /* Re-use LockStep-mode reset logic for Single-CPU mode */ in k3_r5_rproc_prepare()
343 ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP || in k3_r5_rproc_prepare()
344 cluster->mode == CLUSTER_MODE_SINGLECPU) ? in k3_r5_rproc_prepare()
356 core->released_from_reset = true; in k3_r5_rproc_prepare()
358 wake_up_interruptible(&cluster->core_transition); in k3_r5_rproc_prepare()
361 * Newer IP revisions like on J7200 SoCs support h/w auto-initialization in k3_r5_rproc_prepare()
362 * of TCMs, so there is no need to perform the s/w memzero. This bit is in k3_r5_rproc_prepare()
364 * auto-init, but account for it in case it is disabled in k3_r5_rproc_prepare()
366 if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) { in k3_r5_rproc_prepare()
372 * Zero out both TCMs unconditionally (access from v8 Arm core is not in k3_r5_rproc_prepare()
376 dev_dbg(dev, "zeroing out ATCM memory\n"); in k3_r5_rproc_prepare()
377 memset_io(kproc->mem[0].cpu_addr, 0x00, kproc->mem[0].size); in k3_r5_rproc_prepare()
379 dev_dbg(dev, "zeroing out BTCM memory\n"); in k3_r5_rproc_prepare()
380 memset_io(kproc->mem[1].cpu_addr, 0x00, kproc->mem[1].size); in k3_r5_rproc_prepare()
387 * operations to that of the .prepare() ops. The function is used to assert the
389 * or Split mode). This completes the second portion of powering down the R5F
394 * The Single-CPU mode on applicable SoCs (eg: AM64x) combines the TCMs from
396 * both cores, but with only Core0 unhalted. This function re-uses the same
397 * reset assert logic as LockStep mode for this mode (though the behavior is
398 * agnostic of the reset assert order). This callback is invoked only in
403 struct k3_rproc *kproc = rproc->priv; in k3_r5_rproc_unprepare()
404 struct k3_r5_core *core = kproc->priv, *core0, *core1; in k3_r5_rproc_unprepare()
405 struct k3_r5_cluster *cluster = core->cluster; in k3_r5_rproc_unprepare()
406 struct device *dev = kproc->dev; in k3_r5_rproc_unprepare()
410 * Ensure power-down of cores is sequential in split mode. Core1 must in k3_r5_rproc_unprepare()
416 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_rproc_unprepare()
417 core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_rproc_unprepare()
418 if (cluster->mode == CLUSTER_MODE_SPLIT && core == core0 && in k3_r5_rproc_unprepare()
419 core1->released_from_reset) { in k3_r5_rproc_unprepare()
420 ret = wait_event_interruptible_timeout(cluster->core_transition, in k3_r5_rproc_unprepare()
421 !core1->released_from_reset, in k3_r5_rproc_unprepare()
425 return -EPERM; in k3_r5_rproc_unprepare()
429 /* Re-use LockStep-mode reset logic for Single-CPU mode */ in k3_r5_rproc_unprepare()
430 ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP || in k3_r5_rproc_unprepare()
431 cluster->mode == CLUSTER_MODE_SINGLECPU) ? in k3_r5_rproc_unprepare()
440 core->released_from_reset = false; in k3_r5_rproc_unprepare()
442 wake_up_interruptible(&cluster->core_transition); in k3_r5_rproc_unprepare()
454 * unhalt both the cores to start the execution - Core1 needs to be unhalted
455 * first followed by Core0. The Split-mode requires that Core0 to be maintained
459 * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
461 * flow as Split-mode for this. This callback is invoked only in remoteproc
466 struct k3_rproc *kproc = rproc->priv; in k3_r5_rproc_start()
467 struct k3_r5_core *core = kproc->priv; in k3_r5_rproc_start()
468 struct k3_r5_cluster *cluster = core->cluster; in k3_r5_rproc_start()
469 struct device *dev = kproc->dev; in k3_r5_rproc_start()
473 boot_addr = rproc->bootaddr; in k3_r5_rproc_start()
478 ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0); in k3_r5_rproc_start()
483 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { in k3_r5_rproc_start()
484 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_rproc_start()
485 ret = k3_r5_core_run(core->kproc); in k3_r5_rproc_start()
490 ret = k3_r5_core_run(core->kproc); in k3_r5_rproc_start()
498 list_for_each_entry_continue(core, &cluster->cores, elem) { in k3_r5_rproc_start()
499 if (k3_r5_core_halt(core->kproc)) in k3_r5_rproc_start()
500 dev_warn(core->dev, "core halt back failed\n"); in k3_r5_rproc_start()
510 * of cores the operations are performed are also in general reverse to that
511 * of the start function. The LockStep mode requires each operation to be
512 * performed first on Core0 followed by Core1. The Split-mode requires that
516 * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
518 * flow as Split-mode for this.
522 * deasserting the reset the subsequent time. The asserting of reset can
523 * be done here, but is preferred to be done in the .unprepare() ops - this
531 struct k3_rproc *kproc = rproc->priv; in k3_r5_rproc_stop()
532 struct k3_r5_core *core = kproc->priv; in k3_r5_rproc_stop()
533 struct k3_r5_cluster *cluster = core->cluster; in k3_r5_rproc_stop()
537 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { in k3_r5_rproc_stop()
538 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_rproc_stop()
539 ret = k3_r5_core_halt(core->kproc); in k3_r5_rproc_stop()
546 ret = k3_r5_core_halt(core->kproc); in k3_r5_rproc_stop()
548 goto out; in k3_r5_rproc_stop()
554 list_for_each_entry_from_reverse(core, &cluster->cores, elem) { in k3_r5_rproc_stop()
555 if (k3_r5_core_run(core->kproc)) in k3_r5_rproc_stop()
556 dev_warn(core->dev, "core run back failed\n"); in k3_r5_rproc_stop()
558 out: in k3_r5_rproc_stop()
572 struct k3_rproc *kproc = rproc->priv; in k3_r5_rproc_da_to_va()
573 struct k3_r5_core *core = kproc->priv; in k3_r5_rproc_da_to_va()
582 /* handle any SRAM regions using SoC-view addresses */ in k3_r5_rproc_da_to_va()
583 for (i = 0; i < core->num_sram; i++) { in k3_r5_rproc_da_to_va()
584 dev_addr = core->sram[i].dev_addr; in k3_r5_rproc_da_to_va()
585 size = core->sram[i].size; in k3_r5_rproc_da_to_va()
588 offset = da - dev_addr; in k3_r5_rproc_da_to_va()
589 va = core->sram[i].cpu_addr + offset; in k3_r5_rproc_da_to_va()
610 * Each R5FSS has a cluster-level setting for configuring the processor
611 * subsystem either in a safety/fault-tolerant LockStep mode or a performance
612 * oriented Split mode on most SoCs. A fewer SoCs support a non-safety mode
614 * called Single-CPU mode. Each R5F core has a number of settings to either
615 * enable/disable each of the TCMs, control which TCM appears at the R5F core's
620 * This function is used to pre-configure these settings for each R5F core, and
626 * once (in LockStep mode or Single-CPU modes) or twice (in Split mode). Support
627 * for LockStep-mode is dictated by an eFUSE register bit, and the config
630 * supports a Single-CPU mode. All cluster level settings like Cluster mode and
638 * This is overcome by switching to Split-mode initially and then programming
644 struct k3_r5_core *temp, *core0, *core = kproc->priv; in k3_r5_rproc_configure()
645 struct k3_r5_cluster *cluster = core->cluster; in k3_r5_rproc_configure()
646 struct device *dev = kproc->dev; in k3_r5_rproc_configure()
654 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_rproc_configure()
655 if (cluster->mode == CLUSTER_MODE_LOCKSTEP || in k3_r5_rproc_configure()
656 cluster->mode == CLUSTER_MODE_SINGLECPU || in k3_r5_rproc_configure()
657 cluster->mode == CLUSTER_MODE_SINGLECORE) { in k3_r5_rproc_configure()
660 core = kproc->priv; in k3_r5_rproc_configure()
663 ret = ti_sci_proc_get_status(core->kproc->tsp, &boot_vec, &cfg, &ctrl, in k3_r5_rproc_configure()
675 if (single_cpu && cluster->mode == CLUSTER_MODE_SPLIT) { in k3_r5_rproc_configure()
676 dev_err(cluster->dev, "split-mode not permitted, force configuring for single-cpu mode\n"); in k3_r5_rproc_configure()
677 cluster->mode = CLUSTER_MODE_SINGLECPU; in k3_r5_rproc_configure()
681 if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) { in k3_r5_rproc_configure()
682 dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n"); in k3_r5_rproc_configure()
683 cluster->mode = CLUSTER_MODE_SPLIT; in k3_r5_rproc_configure()
691 * Single-CPU configuration bit can only be configured in k3_r5_rproc_configure()
696 if (cluster->mode == CLUSTER_MODE_SINGLECPU || in k3_r5_rproc_configure()
697 cluster->mode == CLUSTER_MODE_SINGLECORE) { in k3_r5_rproc_configure()
701 * LockStep configuration bit is Read-only on Split-mode in k3_r5_rproc_configure()
711 if (core->atcm_enable) in k3_r5_rproc_configure()
716 if (core->btcm_enable) in k3_r5_rproc_configure()
721 if (core->loczrama) in k3_r5_rproc_configure()
726 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { in k3_r5_rproc_configure()
732 list_for_each_entry(temp, &cluster->cores, elem) { in k3_r5_rproc_configure()
733 ret = k3_r5_core_halt(temp->kproc); in k3_r5_rproc_configure()
735 goto out; in k3_r5_rproc_configure()
741 ret = ti_sci_proc_set_config(temp->kproc->tsp, boot_vec, in k3_r5_rproc_configure()
744 goto out; in k3_r5_rproc_configure()
749 ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec, in k3_r5_rproc_configure()
752 ret = k3_r5_core_halt(core->kproc); in k3_r5_rproc_configure()
754 goto out; in k3_r5_rproc_configure()
756 ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec, in k3_r5_rproc_configure()
760 out: in k3_r5_rproc_configure()
765 * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
767 * cores are usable in Split-mode, but only the Core0 TCMs can be used in
768 * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
770 * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs, Single-CPU mode on
779 struct k3_r5_core *core0, *core = kproc->priv; in k3_r5_adjust_tcm_sizes()
780 struct k3_r5_cluster *cluster = core->cluster; in k3_r5_adjust_tcm_sizes()
781 struct device *cdev = core->dev; in k3_r5_adjust_tcm_sizes()
783 if (cluster->mode == CLUSTER_MODE_LOCKSTEP || in k3_r5_adjust_tcm_sizes()
784 cluster->mode == CLUSTER_MODE_SINGLECPU || in k3_r5_adjust_tcm_sizes()
785 cluster->mode == CLUSTER_MODE_SINGLECORE || in k3_r5_adjust_tcm_sizes()
786 !cluster->soc_data->tcm_is_double) in k3_r5_adjust_tcm_sizes()
789 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_adjust_tcm_sizes()
791 WARN_ON(kproc->mem[0].size != SZ_64K); in k3_r5_adjust_tcm_sizes()
792 WARN_ON(kproc->mem[1].size != SZ_64K); in k3_r5_adjust_tcm_sizes()
794 kproc->mem[0].size /= 2; in k3_r5_adjust_tcm_sizes()
795 kproc->mem[1].size /= 2; in k3_r5_adjust_tcm_sizes()
798 kproc->mem[0].size, kproc->mem[1].size); in k3_r5_adjust_tcm_sizes()
803 * This function checks and configures a R5F core for IPC-only or remoteproc
804 * mode. The driver is configured to be in IPC-only mode for a R5F core when
805 * the core has been loaded and started by a bootloader. The IPC-only mode is
806 * detected by querying the System Firmware for reset, power on and halt status
808 * are validated and errored out.
810 * In IPC-only mode, the driver state flags for ATCM, BTCM and LOCZRAMA settings
817 struct k3_r5_core *core0, *core = kproc->priv; in k3_r5_rproc_configure_mode()
818 struct k3_r5_cluster *cluster = core->cluster; in k3_r5_rproc_configure_mode()
819 struct device *cdev = core->dev; in k3_r5_rproc_configure_mode()
824 enum cluster_mode mode = cluster->mode; in k3_r5_rproc_configure_mode()
828 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_rproc_configure_mode()
830 ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id, in k3_r5_rproc_configure_mode()
842 reset_ctrl_status = reset_control_status(kproc->reset); in k3_r5_rproc_configure_mode()
844 dev_err(cdev, "failed to get initial local reset status, ret = %d\n", in k3_r5_rproc_configure_mode()
850 * Skip the waiting mechanism for sequential power-on of cores if the in k3_r5_rproc_configure_mode()
853 core->released_from_reset = c_state; in k3_r5_rproc_configure_mode()
855 ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl, in k3_r5_rproc_configure_mode()
876 * IPC-only mode detection requires both local and module resets to in k3_r5_rproc_configure_mode()
877 * be deasserted and R5F core to be unhalted. Local reset status is in k3_r5_rproc_configure_mode()
878 * irrelevant if module reset is asserted (POR value has local reset in k3_r5_rproc_configure_mode()
882 dev_info(cdev, "configured R5F for IPC-only mode\n"); in k3_r5_rproc_configure_mode()
883 kproc->rproc->state = RPROC_DETACHED; in k3_r5_rproc_configure_mode()
885 /* override rproc ops with only required IPC-only mode ops */ in k3_r5_rproc_configure_mode()
886 kproc->rproc->ops->prepare = NULL; in k3_r5_rproc_configure_mode()
887 kproc->rproc->ops->unprepare = NULL; in k3_r5_rproc_configure_mode()
888 kproc->rproc->ops->start = NULL; in k3_r5_rproc_configure_mode()
889 kproc->rproc->ops->stop = NULL; in k3_r5_rproc_configure_mode()
890 kproc->rproc->ops->attach = k3_rproc_attach; in k3_r5_rproc_configure_mode()
891 kproc->rproc->ops->detach = k3_rproc_detach; in k3_r5_rproc_configure_mode()
892 kproc->rproc->ops->get_loaded_rsc_table = in k3_r5_rproc_configure_mode()
902 ret = -EINVAL; in k3_r5_rproc_configure_mode()
905 /* fixup TCMs, cluster & core flags to actual values in IPC-only mode */ in k3_r5_rproc_configure_mode()
908 cluster->mode = mode; in k3_r5_rproc_configure_mode()
909 core->atcm_enable = atcm_enable; in k3_r5_rproc_configure_mode()
910 core->btcm_enable = btcm_enable; in k3_r5_rproc_configure_mode()
911 core->loczrama = loczrama; in k3_r5_rproc_configure_mode()
912 kproc->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR; in k3_r5_rproc_configure_mode()
913 kproc->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0; in k3_r5_rproc_configure_mode()
922 const struct k3_rproc_dev_data *data = kproc->data; in k3_r5_core_of_get_internal_memories()
923 struct device *dev = &pdev->dev; in k3_r5_core_of_get_internal_memories()
924 struct k3_r5_core *core = kproc->priv; in k3_r5_core_of_get_internal_memories()
928 num_mems = data->num_mems; in k3_r5_core_of_get_internal_memories()
929 kproc->mem = devm_kcalloc(kproc->dev, num_mems, sizeof(*kproc->mem), in k3_r5_core_of_get_internal_memories()
931 if (!kproc->mem) in k3_r5_core_of_get_internal_memories()
932 return -ENOMEM; in k3_r5_core_of_get_internal_memories()
948 if (!strcmp(data->mems[i].name, "atcm")) { in k3_r5_core_of_get_internal_memories()
949 kproc->mem[i].dev_addr = core->loczrama ? in k3_r5_core_of_get_internal_memories()
952 kproc->mem[i].dev_addr = core->loczrama ? in k3_r5_core_of_get_internal_memories()
956 dev_dbg(dev, "Updating bus addr %pa of memory %5s\n", in k3_r5_core_of_get_internal_memories()
957 &kproc->mem[i].bus_addr, data->mems[i].name); in k3_r5_core_of_get_internal_memories()
966 struct device_node *np = pdev->dev.of_node; in k3_r5_core_of_get_sram_memories()
967 struct device *dev = &pdev->dev; in k3_r5_core_of_get_sram_memories()
975 dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n", in k3_r5_core_of_get_sram_memories()
980 core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL); in k3_r5_core_of_get_sram_memories()
981 if (!core->sram) in k3_r5_core_of_get_sram_memories()
982 return -ENOMEM; in k3_r5_core_of_get_sram_memories()
987 return -EINVAL; in k3_r5_core_of_get_sram_memories()
991 return -EINVAL; in k3_r5_core_of_get_sram_memories()
997 return -EINVAL; in k3_r5_core_of_get_sram_memories()
999 core->sram[i].bus_addr = res.start; in k3_r5_core_of_get_sram_memories()
1000 core->sram[i].dev_addr = res.start; in k3_r5_core_of_get_sram_memories()
1001 core->sram[i].size = resource_size(&res); in k3_r5_core_of_get_sram_memories()
1002 core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start, in k3_r5_core_of_get_sram_memories()
1004 if (!core->sram[i].cpu_addr) { in k3_r5_core_of_get_sram_memories()
1007 return -ENOMEM; in k3_r5_core_of_get_sram_memories()
1011 i, &core->sram[i].bus_addr, in k3_r5_core_of_get_sram_memories()
1012 core->sram[i].size, core->sram[i].cpu_addr, in k3_r5_core_of_get_sram_memories()
1013 core->sram[i].dev_addr); in k3_r5_core_of_get_sram_memories()
1015 core->num_sram = num_sram; in k3_r5_core_of_get_sram_memories()
1023 struct device *dev = &pdev->dev; in k3_r5_cluster_rproc_init()
1032 core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_cluster_rproc_init()
1033 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_cluster_rproc_init()
1034 cdev = core->dev; in k3_r5_cluster_rproc_init()
1038 dev_err(dev, "failed to parse firmware-name property, ret = %d\n", in k3_r5_cluster_rproc_init()
1040 goto out; in k3_r5_cluster_rproc_init()
1046 ret = -ENOMEM; in k3_r5_cluster_rproc_init()
1047 goto out; in k3_r5_cluster_rproc_init()
1051 rproc->has_iommu = false; in k3_r5_cluster_rproc_init()
1053 rproc->recovery_disabled = true; in k3_r5_cluster_rproc_init()
1055 kproc = rproc->priv; in k3_r5_cluster_rproc_init()
1056 kproc->priv = core; in k3_r5_cluster_rproc_init()
1057 kproc->dev = cdev; in k3_r5_cluster_rproc_init()
1058 kproc->rproc = rproc; in k3_r5_cluster_rproc_init()
1059 kproc->data = cluster->soc_data->core_data; in k3_r5_cluster_rproc_init()
1060 core->kproc = kproc; in k3_r5_cluster_rproc_init()
1062 kproc->ti_sci = devm_ti_sci_get_by_phandle(cdev, "ti,sci"); in k3_r5_cluster_rproc_init()
1063 if (IS_ERR(kproc->ti_sci)) { in k3_r5_cluster_rproc_init()
1064 ret = dev_err_probe(cdev, PTR_ERR(kproc->ti_sci), in k3_r5_cluster_rproc_init()
1065 "failed to get ti-sci handle\n"); in k3_r5_cluster_rproc_init()
1066 kproc->ti_sci = NULL; in k3_r5_cluster_rproc_init()
1067 goto out; in k3_r5_cluster_rproc_init()
1070 ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id); in k3_r5_cluster_rproc_init()
1072 dev_err(cdev, "missing 'ti,sci-dev-id' property\n"); in k3_r5_cluster_rproc_init()
1073 goto out; in k3_r5_cluster_rproc_init()
1076 kproc->reset = devm_reset_control_get_exclusive(cdev, NULL); in k3_r5_cluster_rproc_init()
1077 if (IS_ERR_OR_NULL(kproc->reset)) { in k3_r5_cluster_rproc_init()
1078 ret = PTR_ERR_OR_ZERO(kproc->reset); in k3_r5_cluster_rproc_init()
1080 ret = -ENODEV; in k3_r5_cluster_rproc_init()
1081 dev_err_probe(cdev, ret, "failed to get reset handle\n"); in k3_r5_cluster_rproc_init()
1082 goto out; in k3_r5_cluster_rproc_init()
1085 kproc->tsp = ti_sci_proc_of_get_tsp(cdev, kproc->ti_sci); in k3_r5_cluster_rproc_init()
1086 if (IS_ERR(kproc->tsp)) { in k3_r5_cluster_rproc_init()
1087 ret = dev_err_probe(cdev, PTR_ERR(kproc->tsp), in k3_r5_cluster_rproc_init()
1088 "failed to construct ti-sci proc control\n"); in k3_r5_cluster_rproc_init()
1089 goto out; in k3_r5_cluster_rproc_init()
1096 goto out; in k3_r5_cluster_rproc_init()
1099 ret = ti_sci_proc_request(kproc->tsp); in k3_r5_cluster_rproc_init()
1102 goto out; in k3_r5_cluster_rproc_init()
1105 ret = devm_add_action_or_reset(cdev, k3_release_tsp, kproc->tsp); in k3_r5_cluster_rproc_init()
1107 goto out; in k3_r5_cluster_rproc_init()
1110 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_cluster_rproc_init()
1111 cdev = core->dev; in k3_r5_cluster_rproc_init()
1112 kproc = core->kproc; in k3_r5_cluster_rproc_init()
1113 rproc = kproc->rproc; in k3_r5_cluster_rproc_init()
1121 goto out; in k3_r5_cluster_rproc_init()
1129 goto out; in k3_r5_cluster_rproc_init()
1139 goto out; in k3_r5_cluster_rproc_init()
1145 goto out; in k3_r5_cluster_rproc_init()
1148 /* create only one rproc in lockstep, single-cpu or in k3_r5_cluster_rproc_init()
1151 if (cluster->mode == CLUSTER_MODE_LOCKSTEP || in k3_r5_cluster_rproc_init()
1152 cluster->mode == CLUSTER_MODE_SINGLECPU || in k3_r5_cluster_rproc_init()
1153 cluster->mode == CLUSTER_MODE_SINGLECORE) in k3_r5_cluster_rproc_init()
1160 if (rproc->state == RPROC_ATTACHED) { in k3_r5_cluster_rproc_init()
1163 dev_err(kproc->dev, "failed to detach rproc, ret = %d\n", in k3_r5_cluster_rproc_init()
1169 out: in k3_r5_cluster_rproc_init()
1170 /* undo core0 upon any failures on core1 in split-mode */ in k3_r5_cluster_rproc_init()
1171 if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) { in k3_r5_cluster_rproc_init()
1173 kproc = core->kproc; in k3_r5_cluster_rproc_init()
1174 rproc = kproc->rproc; in k3_r5_cluster_rproc_init()
1189 * lockstep mode and single-cpu modes have only one rproc associated in k3_r5_cluster_rproc_exit()
1190 * with first core, whereas split-mode has two rprocs associated with in k3_r5_cluster_rproc_exit()
1193 core = (cluster->mode == CLUSTER_MODE_LOCKSTEP || in k3_r5_cluster_rproc_exit()
1194 cluster->mode == CLUSTER_MODE_SINGLECPU) ? in k3_r5_cluster_rproc_exit()
1195 list_first_entry(&cluster->cores, struct k3_r5_core, elem) : in k3_r5_cluster_rproc_exit()
1196 list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_cluster_rproc_exit()
1198 list_for_each_entry_from_reverse(core, &cluster->cores, elem) { in k3_r5_cluster_rproc_exit()
1199 kproc = core->kproc; in k3_r5_cluster_rproc_exit()
1200 rproc = kproc->rproc; in k3_r5_cluster_rproc_exit()
1202 if (rproc->state == RPROC_ATTACHED) { in k3_r5_cluster_rproc_exit()
1205 dev_err(kproc->dev, "failed to detach rproc, ret = %d\n", ret); in k3_r5_cluster_rproc_exit()
1210 mbox_free_channel(kproc->mbox); in k3_r5_cluster_rproc_exit()
1216 struct device *dev = &pdev->dev; in k3_r5_core_of_init()
1222 return -ENOMEM; in k3_r5_core_of_init()
1226 ret = -ENOMEM; in k3_r5_core_of_init()
1230 core->dev = dev; in k3_r5_core_of_init()
1232 * Use SoC Power-on-Reset values as default if no DT properties are in k3_r5_core_of_init()
1235 core->atcm_enable = 0; in k3_r5_core_of_init()
1236 core->btcm_enable = 1; in k3_r5_core_of_init()
1237 core->loczrama = 1; in k3_r5_core_of_init()
1239 ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable); in k3_r5_core_of_init()
1240 if (ret < 0 && ret != -EINVAL) { in k3_r5_core_of_init()
1241 dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n", in k3_r5_core_of_init()
1246 ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable); in k3_r5_core_of_init()
1247 if (ret < 0 && ret != -EINVAL) { in k3_r5_core_of_init()
1248 dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n", in k3_r5_core_of_init()
1253 ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama); in k3_r5_core_of_init()
1254 if (ret < 0 && ret != -EINVAL) { in k3_r5_core_of_init()
1281 struct device *dev = &pdev->dev; in k3_r5_core_of_exit()
1293 list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) { in k3_r5_cluster_of_exit()
1294 list_del(&core->elem); in k3_r5_cluster_of_exit()
1295 cpdev = to_platform_device(core->dev); in k3_r5_cluster_of_exit()
1303 struct device *dev = &pdev->dev; in k3_r5_cluster_of_init()
1312 ret = -ENODEV; in k3_r5_cluster_of_init()
1321 put_device(&cpdev->dev); in k3_r5_cluster_of_init()
1326 core->cluster = cluster; in k3_r5_cluster_of_init()
1327 put_device(&cpdev->dev); in k3_r5_cluster_of_init()
1328 list_add_tail(&core->elem, &cluster->cores); in k3_r5_cluster_of_init()
1340 struct device *dev = &pdev->dev; in k3_r5_probe()
1347 data = of_device_get_match_data(&pdev->dev); in k3_r5_probe()
1349 dev_err(dev, "SoC-specific data is not defined\n"); in k3_r5_probe()
1350 return -ENODEV; in k3_r5_probe()
1355 return -ENOMEM; in k3_r5_probe()
1357 cluster->dev = dev; in k3_r5_probe()
1358 cluster->soc_data = data; in k3_r5_probe()
1359 INIT_LIST_HEAD(&cluster->cores); in k3_r5_probe()
1360 init_waitqueue_head(&cluster->core_transition); in k3_r5_probe()
1362 ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode); in k3_r5_probe()
1363 if (ret < 0 && ret != -EINVAL) in k3_r5_probe()
1364 return dev_err_probe(dev, ret, "invalid format for ti,cluster-mode\n"); in k3_r5_probe()
1366 if (ret == -EINVAL) { in k3_r5_probe()
1368 * default to most common efuse configurations - Split-mode on AM64x in k3_r5_probe()
1369 * and LockStep-mode on all others in k3_r5_probe()
1370 * default to most common efuse configurations - in k3_r5_probe()
1371 * Split-mode on AM64x in k3_r5_probe()
1373 * LockStep-mode on all others in k3_r5_probe()
1375 if (!data->is_single_core) in k3_r5_probe()
1376 cluster->mode = data->single_cpu_mode ? in k3_r5_probe()
1379 cluster->mode = CLUSTER_MODE_SINGLECORE; in k3_r5_probe()
1382 if ((cluster->mode == CLUSTER_MODE_SINGLECPU && !data->single_cpu_mode) || in k3_r5_probe()
1383 (cluster->mode == CLUSTER_MODE_SINGLECORE && !data->is_single_core)) in k3_r5_probe()
1384 return dev_err_probe(dev, -EINVAL, in k3_r5_probe()
1386 cluster->mode); in k3_r5_probe()
1389 if (num_cores != 2 && !data->is_single_core) in k3_r5_probe()
1390 return dev_err_probe(dev, -ENODEV, in k3_r5_probe()
1394 if (num_cores != 1 && data->is_single_core) in k3_r5_probe()
1395 return dev_err_probe(dev, -ENODEV, in k3_r5_probe()
1469 { .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
1470 { .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
1471 { .compatible = "ti,j7200-r5fss", .data = &j7200_j721s2_soc_data, },
1472 { .compatible = "ti,am64-r5fss", .data = &am64_soc_data, },
1473 { .compatible = "ti,am62-r5fss", .data = &am62_soc_data, },
1474 { .compatible = "ti,j721s2-r5fss", .data = &j7200_j721s2_soc_data, },
1477 MODULE_DEVICE_TABLE(of, k3_r5_of_match);
1491 MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");