| /linux/drivers/cpuidle/ |
| H A D | dt_idle_genpd.c | 26 struct genpd_power_state *states, int state_count) in pd_parse_state_nodes() argument 32 ret = parse_state(to_of_node(states[i].fwnode), &state); in pd_parse_state_nodes() 42 states[i].data = state_buf; in pd_parse_state_nodes() 50 kfree(states[i].data); in pd_parse_state_nodes() 56 struct genpd_power_state **states, in pd_parse_states() argument 62 ret = of_genpd_parse_idle_states(np, states, state_count); in pd_parse_states() 67 ret = pd_parse_state_nodes(parse_state, *states, *state_count); in pd_parse_states() 69 kfree(*states); in pd_parse_states() 74 static void pd_free_states(struct genpd_power_state *states, in pd_free_states() argument 80 kfree(states[i].data); in pd_free_states() [all …]
|
| H A D | cpuidle-riscv-sbi.c | 35 u32 *states; member 81 u32 *states = __this_cpu_read(sbi_cpuidle_data.states); in sbi_cpuidle_enter_state() local 82 u32 state = states[idx]; in sbi_cpuidle_enter_state() 96 u32 *states = data->states; in __sbi_enter_domain_idle_state() local 116 state = states[idx]; in __sbi_enter_domain_idle_state() 225 drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE; in sbi_dt_cpu_init_topology() 226 drv->states[state_count - 1].enter = sbi_enter_domain_idle_state; in sbi_dt_cpu_init_topology() 227 drv->states[state_count - 1].enter_s2idle = in sbi_dt_cpu_init_topology() 241 u32 *states; in sbi_cpuidle_dt_init_states() local 248 states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); in sbi_cpuidle_dt_init_states() [all …]
|
| H A D | cpuidle-mvebu-v7.c | 36 if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE) in mvebu_v7_enter_idle() 53 .states[0] = ARM_CPUIDLE_WFI_STATE, 54 .states[1] = { 63 .states[2] = { 77 .states[0] = ARM_CPUIDLE_WFI_STATE, 78 .states[1] = { 92 .states[0] = ARM_CPUIDLE_WFI_STATE, 93 .states[1] = {
|
| H A D | cpuidle-psci.c | 69 u32 *states = data->psci_states; in __psci_enter_domain_idle_state() local 72 u32 state = states[idx]; in __psci_enter_domain_idle_state() 269 drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; in psci_dt_cpu_init_topology() 271 drv->states[state_count - 1].enter = psci_enter_domain_idle_state; in psci_dt_cpu_init_topology() 385 drv->states[0].enter = psci_enter_idle_state; in psci_idle_init_cpu() 386 drv->states[0].exit_latency = 1; in psci_idle_init_cpu() 387 drv->states[0].target_residency = 1; in psci_idle_init_cpu() 388 drv->states[0].power_usage = UINT_MAX; in psci_idle_init_cpu() 389 strscpy(drv->states[0].name, "WFI"); in psci_idle_init_cpu() 390 strscpy(drv->states[0].desc, "ARM WFI"); in psci_idle_init_cpu()
|
| H A D | Kconfig.arm | 12 It provides a generic idle driver whose idle states are configured 25 managing idle states through the PSCI firmware interface. 28 - If the idle states are described with the non-hierarchical layout, 29 all idle states are still available. 31 - If the idle states are described with the hierarchical layout, 32 only the idle states defined per CPU are available, but not the ones 33 being shared among a group of CPUs (aka cluster idle states). 44 idle states. 56 define different C-states for little and big cores through the
|
| /linux/drivers/regulator/ |
| H A D | gpio-regulator.c | 39 struct gpio_regulator_state *states; member 51 if (data->states[ptr].gpios == data->state) in gpio_regulator_get_value() 52 return data->states[ptr].value; in gpio_regulator_get_value() 65 if (data->states[ptr].value < best_val && in gpio_regulator_set_voltage() 66 data->states[ptr].value >= min_uV && in gpio_regulator_set_voltage() 67 data->states[ptr].value <= max_uV) { in gpio_regulator_set_voltage() 68 target = data->states[ptr].gpios; in gpio_regulator_set_voltage() 69 best_val = data->states[ptr].value; in gpio_regulator_set_voltage() 94 return data->states[selector].value; in gpio_regulator_list_voltage() 104 if (data->states[ptr].value > best_val && in gpio_regulator_set_current_limit() [all …]
|
| /linux/drivers/cpuidle/governors/ |
| H A D | ladder.c | 40 struct ladder_device_state states[CPUIDLE_STATE_MAX]; member 56 ldev->states[old_idx].stats.promotion_count = 0; in ladder_do_selection() 57 ldev->states[old_idx].stats.demotion_count = 0; in ladder_do_selection() 73 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; in ladder_select_state() 83 last_state = &ldev->states[last_idx]; in ladder_select_state() 85 last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns; in ladder_select_state() 91 drv->states[last_idx + 1].exit_latency_ns <= latency_req) { in ladder_select_state() 103 drv->states[last_idx].exit_latency_ns > latency_req)) { in ladder_select_state() 107 if (drv->states[i].exit_latency_ns <= latency_req) in ladder_select_state() 137 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; in ladder_enable_device() [all …]
|
| H A D | menu.c | 275 ((data->next_timer_ns < drv->states[1].target_residency_ns || in menu_select() 276 latency_req < drv->states[1].exit_latency_ns) && in menu_select() 283 *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING); in menu_select() 303 struct cpuidle_state *s = &drv->states[i]; in menu_select() 325 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && in menu_select() 344 predicted_ns = drv->states[idx].target_residency_ns; in menu_select() 354 if (drv->states[idx].target_residency_ns < TICK_NSEC && in menu_select() 368 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || in menu_select() 372 if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) { in menu_select() 384 if (drv->states[i].target_residency_ns <= delta_tick) in menu_select() [all …]
|
| /linux/tools/verification/rvgen/rvgen/ |
| H A D | automata.py | 26 self.states, self.initial_state, self.final_states = self.__get_state_variables() 81 states = [] 97 states.append(state) 108 states = sorted(set(states)) 109 states.remove(initial_state) 112 states.insert(0, initial_state) 117 return states, initial_state, final_states 146 states = self.states 155 for state in states: 184 for j, _ in enumerate(self.states):
|
| H A D | dot2c.py | 41 for state in self.states: 89 if self.states.__len__() > 255: 92 if self.states.__len__() > 65535: 95 if self.states.__len__() > 1000000: 96 raise Exception("Too many states: %d" % self.states.__len__()) 134 return self.__get_string_vector_per_line_content(self.states) 152 max_state_name = max(self.states, key = len).__len__() 156 nr_states = self.states.__len__() 204 for state in self.states:
|
| /linux/Documentation/admin-guide/blockdev/drbd/ |
| H A D | figures.rst | 20 .. kernel-figure:: conn-states-8.dot 21 :alt: conn-states-8.dot 24 .. kernel-figure:: disk-states-8.dot 25 :alt: disk-states-8.dot 28 .. kernel-figure:: peer-states-8.dot 29 :alt: peer-states-8.dot
|
| /linux/include/rv/ |
| H A D | ltl_monitor.h | 36 memset(&mon->states, 0, sizeof(mon->states)); in ltl_task_init() 111 char states[32], next[32]; in ltl_trace_event() local 117 snprintf(states, sizeof(states), "%*pbl", RV_MAX_BA_STATES, mon->states); in ltl_trace_event() 127 CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next); in ltl_trace_event() 138 if (test_bit(i, mon->states)) in ltl_validate() 144 memcpy(mon->states, next_states, sizeof(next_states)); in ltl_validate()
|
| /linux/Documentation/admin-guide/pm/ |
| H A D | intel_idle.rst | 28 processor's functional blocks into low-power states. That instruction takes two 63 .. _intel-idle-enumeration-of-states: 71 as C-states (in the ACPI terminology) or idle states. The list of meaningful 72 ``MWAIT`` hint values and idle states (i.e. low-power configurations of the 76 In order to create a list of available idle states required by the ``CPUIdle`` 77 subsystem (see :ref:`idle-states-representation` in 79 ``intel_idle`` can use two sources of information: static tables of idle states 90 states, ``intel_idle`` first looks for a ``_CST`` object under one of the ACPI 93 ``CPUIdle`` subsystem expects that the list of idle states supplied by the 97 state description and such that all of the idle states included in its return [all …]
|
| H A D | strategies.rst | 15 One of them is based on using global low-power states of the whole system in 17 significantly reduced, referred to as :doc:`sleep states <sleep-states>`. The 18 kernel puts the system into one of these states when requested by user space 21 user space code can run. Because sleep states are global and the whole system 26 <working-state>`, is based on adjusting the power states of individual hardware 30 a metastate covering a range of different power states of the system in which 32 ``inactive`` (idle). If they are active, they have to be in power states 34 are inactive, ideally, they should be in low-power states in which they may not 43 for the same system in a sleep state. However, transitions from sleep states 47 sleep states than when they are runtime idle most of the time.
|
| /linux/arch/x86/kernel/acpi/ |
| H A D | cstate.c | 116 } states[ACPI_PROCESSOR_MAX_POWER]; member 184 percpu_entry->states[cx->index].eax = 0; in acpi_processor_ffh_cstate_probe() 185 percpu_entry->states[cx->index].ecx = 0; in acpi_processor_ffh_cstate_probe() 193 percpu_entry->states[cx->index].eax = cx->address; in acpi_processor_ffh_cstate_probe() 194 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; in acpi_processor_ffh_cstate_probe() 215 mwait_play_dead(percpu_entry->states[cx->index].eax); in acpi_processor_ffh_play_dead() 225 mwait_idle_with_hints(percpu_entry->states[cx->index].eax, in acpi_processor_ffh_cstate_enter() 226 percpu_entry->states[cx->index].ecx); in acpi_processor_ffh_cstate_enter()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-bus-surface_aggregator-tabletsw | 8 Currently returned posture states are: 29 New states may be introduced with new hardware. Users therefore 30 must not rely on this list of states being exhaustive and 31 gracefully handle unknown states. 39 returned posture states are: 55 New states may be introduced with new hardware. Users therefore 56 must not rely on this list of states being exhaustive and 57 gracefully handle unknown states.
|
| /linux/Documentation/devicetree/bindings/powerpc/opal/ |
| H A D | power-mgt.txt | 5 idle states. The description of these idle states is exposed via the 14 - flags: indicating some aspects of this idle states such as the 16 idle states and so on. The flag bits are as follows: 27 The following properties provide details about the idle states. These 32 If idle-states are defined, then the properties 38 Array of strings containing the names of the idle states. 42 flags associated with the aforementioned idle-states. The 62 exit-latencies (in ns) for the idle states in 67 target-residency (in ns) for the idle states in 75 PSSCR for each of the idle states in ibm,cpu-idle-state-names. [all …]
|
| /linux/drivers/md/dm-vdo/ |
| H A D | encodings.c | 1296 void vdo_destroy_component_states(struct vdo_component_states *states) in vdo_destroy_component_states() argument 1298 if (states == NULL) in vdo_destroy_component_states() 1301 vdo_uninitialize_layout(&states->layout); in vdo_destroy_component_states() 1316 struct vdo_component_states *states) in decode_components() argument 1320 decode_vdo_component(buffer, offset, &states->vdo); in decode_components() 1323 states->vdo.config.physical_blocks, &states->layout); in decode_components() 1328 &states->recovery_journal); in decode_components() 1332 result = decode_slab_depot_state_2_0(buffer, offset, &states->slab_depot); in decode_components() 1336 result = decode_block_map_state_2_0(buffer, offset, &states->block_map); in decode_components() 1354 struct vdo_component_states *states) in vdo_decode_component_states() argument [all …]
|
| /linux/arch/arm64/boot/dts/freescale/ |
| H A D | fsl-ls2088a.dtsi | 28 cpu-idle-states = <&CPU_PW20>; 38 cpu-idle-states = <&CPU_PW20>; 48 cpu-idle-states = <&CPU_PW20>; 58 cpu-idle-states = <&CPU_PW20>; 69 cpu-idle-states = <&CPU_PW20>; 78 cpu-idle-states = <&CPU_PW20>; 88 cpu-idle-states = <&CPU_PW20>; 98 cpu-idle-states = <&CPU_PW20>;
|
| H A D | fsl-ls2080a.dtsi | 28 cpu-idle-states = <&CPU_PW20>; 38 cpu-idle-states = <&CPU_PW20>; 48 cpu-idle-states = <&CPU_PW20>; 58 cpu-idle-states = <&CPU_PW20>; 68 cpu-idle-states = <&CPU_PW20>; 78 cpu-idle-states = <&CPU_PW20>; 89 cpu-idle-states = <&CPU_PW20>; 98 cpu-idle-states = <&CPU_PW20>;
|
| /linux/arch/arm64/boot/dts/sprd/ |
| H A D | ums9620.dtsi | 53 cpu-idle-states = <&LIT_CORE_PD>; 61 cpu-idle-states = <&LIT_CORE_PD>; 69 cpu-idle-states = <&LIT_CORE_PD>; 77 cpu-idle-states = <&LIT_CORE_PD>; 85 cpu-idle-states = <&BIG_CORE_PD>; 93 cpu-idle-states = <&BIG_CORE_PD>; 101 cpu-idle-states = <&BIG_CORE_PD>; 109 cpu-idle-states = <&BIG_CORE_PD>; 113 idle-states {
|
| /linux/kernel/trace/rv/ |
| H A D | rv_trace.h | 135 TP_PROTO(struct task_struct *task, char *states, char *atoms, char *next), 137 TP_ARGS(task, states, atoms, next), 142 __string(states, states) 150 __assign_str(states); 156 __get_str(states), __get_str(atoms), __get_str(next))
|
| /linux/drivers/firmware/arm_scmi/ |
| H A D | powercap.c | 119 (lower_32_bits((p)->states[(id)].thresholds)) 121 (upper_32_bits((p)->states[(id)].thresholds)) 129 struct scmi_powercap_state *states; member 438 pi->states[domain_id].last_pcap = power_cap; in __scmi_powercap_cap_set() 458 !pi->states[domain_id].enabled) { in scmi_powercap_cap_set() 459 pi->states[domain_id].last_pcap = power_cap; in scmi_powercap_cap_set() 618 pi->states[domain_id].thresholds = in scmi_powercap_measurements_threshold_set() 623 if (pi->states[domain_id].meas_notif_enabled) in scmi_powercap_measurements_threshold_set() 641 if (enable == pi->states[domain_id].enabled) in scmi_powercap_cap_enable_set() 646 if (!pi->states[domain_id].last_pcap) in scmi_powercap_cap_enable_set() [all …]
|
| /linux/arch/sh/kernel/cpu/shmobile/ |
| H A D | cpuidle.c | 54 .states = { 89 cpuidle_driver.states[1].flags = CPUIDLE_FLAG_NONE; in sh_mobile_setup_cpuidle() 92 cpuidle_driver.states[2].flags = CPUIDLE_FLAG_NONE; in sh_mobile_setup_cpuidle()
|
| /linux/Documentation/devicetree/bindings/pinctrl/ |
| H A D | pinctrl-bindings.txt | 17 states. The number and names of those states is defined by the client device's 22 configuration used by those states. 31 they require certain specific named states for dynamic pin configuration. 41 Each client device's own binding determines the set of states that must be 61 the binding for that IP block requires certain pin states to 70 pinctrl-names: The list of names to assign states. List entry 0 defines the 76 /* For a client device requiring named states */
|