ext.c (62d3726d4cd66f3e48dfe0f0401e0d74e58c2170) ext.c (431844b65f4c1b988ccd886f2ed29c138f7bb262)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */

--- 861 unchanged lines hidden (view full) ---

870
871static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
872static struct scx_exit_info *scx_exit_info;
873
874static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
875static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
876
877/*
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
4 *
5 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
7 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
8 */

--- 861 unchanged lines hidden (view full) ---

870
871static atomic_t scx_exit_kind = ATOMIC_INIT(SCX_EXIT_DONE);
872static struct scx_exit_info *scx_exit_info;
873
874static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
875static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
876
877/*
878 * A monotically increasing sequence number that is incremented every time a
879 * scheduler is enabled. This can be used by to check if any custom sched_ext
880 * scheduler has ever been used in the system.
881 */
882static atomic_long_t scx_enable_seq = ATOMIC_LONG_INIT(0);
883
884/*
878 * The maximum amount of time in jiffies that a task may be runnable without
879 * being scheduled on a CPU. If this timeout is exceeded, it will trigger
880 * scx_ops_error().
881 */
882static unsigned long scx_watchdog_timeout;
883
884/*
885 * The last time the delayed work was run. This delayed work relies on

--- 3263 unchanged lines hidden (view full) ---

4149
4150static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4151 struct kobj_attribute *ka, char *buf)
4152{
4153 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4154}
4155SCX_ATTR(hotplug_seq);
4156
885 * The maximum amount of time in jiffies that a task may be runnable without
886 * being scheduled on a CPU. If this timeout is exceeded, it will trigger
887 * scx_ops_error().
888 */
889static unsigned long scx_watchdog_timeout;
890
891/*
892 * The last time the delayed work was run. This delayed work relies on

--- 3263 unchanged lines hidden (view full) ---

4156
4157static ssize_t scx_attr_hotplug_seq_show(struct kobject *kobj,
4158 struct kobj_attribute *ka, char *buf)
4159{
4160 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_hotplug_seq));
4161}
4162SCX_ATTR(hotplug_seq);
4163
4164static ssize_t scx_attr_enable_seq_show(struct kobject *kobj,
4165 struct kobj_attribute *ka, char *buf)
4166{
4167 return sysfs_emit(buf, "%ld\n", atomic_long_read(&scx_enable_seq));
4168}
4169SCX_ATTR(enable_seq);
4170
4157static struct attribute *scx_global_attrs[] = {
4158 &scx_attr_state.attr,
4159 &scx_attr_switch_all.attr,
4160 &scx_attr_nr_rejected.attr,
4161 &scx_attr_hotplug_seq.attr,
4171static struct attribute *scx_global_attrs[] = {
4172 &scx_attr_state.attr,
4173 &scx_attr_switch_all.attr,
4174 &scx_attr_nr_rejected.attr,
4175 &scx_attr_hotplug_seq.attr,
4176 &scx_attr_enable_seq.attr,
4162 NULL,
4163};
4164
4165static const struct attribute_group scx_global_attr_group = {
4166 .attrs = scx_global_attrs,
4167};
4168
4169static void scx_kobj_release(struct kobject *kobj)

--- 1002 unchanged lines hidden (view full) ---

5172 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5173 static_branch_enable(&__scx_switched_all);
5174
5175 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5176 scx_ops.name, scx_switched_all() ? "" : " (partial)");
5177 kobject_uevent(scx_root_kobj, KOBJ_ADD);
5178 mutex_unlock(&scx_ops_enable_mutex);
5179
4177 NULL,
4178};
4179
4180static const struct attribute_group scx_global_attr_group = {
4181 .attrs = scx_global_attrs,
4182};
4183
4184static void scx_kobj_release(struct kobject *kobj)

--- 1002 unchanged lines hidden (view full) ---

5187 if (!(ops->flags & SCX_OPS_SWITCH_PARTIAL))
5188 static_branch_enable(&__scx_switched_all);
5189
5190 pr_info("sched_ext: BPF scheduler \"%s\" enabled%s\n",
5191 scx_ops.name, scx_switched_all() ? "" : " (partial)");
5192 kobject_uevent(scx_root_kobj, KOBJ_ADD);
5193 mutex_unlock(&scx_ops_enable_mutex);
5194
5195 atomic_long_inc(&scx_enable_seq);
5196
5180 return 0;
5181
5182err_del:
5183 kobject_del(scx_root_kobj);
5184err:
5185 kobject_put(scx_root_kobj);
5186 scx_root_kobj = NULL;
5187 if (scx_exit_info) {

--- 1987 unchanged lines hidden ---
5197 return 0;
5198
5199err_del:
5200 kobject_del(scx_root_kobj);
5201err:
5202 kobject_put(scx_root_kobj);
5203 scx_root_kobj = NULL;
5204 if (scx_exit_info) {

--- 1987 unchanged lines hidden ---