1658ad225SAndrea Righi // SPDX-License-Identifier: GPL-2.0 2658ad225SAndrea Righi /* 3658ad225SAndrea Righi * A scheduler that validates ops.dequeue() is called correctly: 4658ad225SAndrea Righi * - Tasks dispatched to terminal DSQs (local, global) bypass the BPF 5658ad225SAndrea Righi * scheduler entirely: no ops.dequeue() should be called 6658ad225SAndrea Righi * - Tasks dispatched to user DSQs from ops.enqueue() enter BPF custody: 7658ad225SAndrea Righi * ops.dequeue() must be called when they leave custody 8658ad225SAndrea Righi * - Every ops.enqueue() dispatch to non-terminal DSQs is followed by 9658ad225SAndrea Righi * exactly one ops.dequeue() (validate 1:1 pairing and state machine) 10658ad225SAndrea Righi * 11658ad225SAndrea Righi * Copyright (c) 2026 NVIDIA Corporation. 12658ad225SAndrea Righi */ 13658ad225SAndrea Righi 14658ad225SAndrea Righi #include <scx/common.bpf.h> 15658ad225SAndrea Righi 16658ad225SAndrea Righi #define SHARED_DSQ 0 17658ad225SAndrea Righi 18658ad225SAndrea Righi /* 19658ad225SAndrea Righi * BPF internal queue. 20658ad225SAndrea Righi * 21658ad225SAndrea Righi * Tasks are stored here and consumed from ops.dispatch(), validating that 22658ad225SAndrea Righi * tasks on BPF internal structures still get ops.dequeue() when they 23658ad225SAndrea Righi * leave. 24658ad225SAndrea Righi */ 25658ad225SAndrea Righi struct { 26658ad225SAndrea Righi __uint(type, BPF_MAP_TYPE_QUEUE); 27658ad225SAndrea Righi __uint(max_entries, 32768); 28658ad225SAndrea Righi __type(value, s32); 29658ad225SAndrea Righi } global_queue SEC(".maps"); 30658ad225SAndrea Righi 31658ad225SAndrea Righi char _license[] SEC("license") = "GPL"; 32658ad225SAndrea Righi 33658ad225SAndrea Righi UEI_DEFINE(uei); 34658ad225SAndrea Righi 35658ad225SAndrea Righi /* 36658ad225SAndrea Righi * Counters to track the lifecycle of tasks: 37658ad225SAndrea Righi * - enqueue_cnt: Number of times ops.enqueue() was called 38658ad225SAndrea Righi * - dequeue_cnt: Number of times ops.dequeue() was called (any type) 39658ad225SAndrea Righi * - dispatch_dequeue_cnt: Number of regular dispatch dequeues (no flag) 40658ad225SAndrea Righi * - change_dequeue_cnt: Number of property change dequeues 41658ad225SAndrea Righi * - bpf_queue_full: Number of times the BPF internal queue was full 42658ad225SAndrea Righi */ 43658ad225SAndrea Righi u64 enqueue_cnt, dequeue_cnt, dispatch_dequeue_cnt, change_dequeue_cnt, bpf_queue_full; 44658ad225SAndrea Righi 45658ad225SAndrea Righi /* 46658ad225SAndrea Righi * Test scenarios: 47658ad225SAndrea Righi * 0) Dispatch to local DSQ from ops.select_cpu() (terminal DSQ, bypasses BPF 48658ad225SAndrea Righi * scheduler, no dequeue callbacks) 49658ad225SAndrea Righi * 1) Dispatch to global DSQ from ops.select_cpu() (terminal DSQ, bypasses BPF 50658ad225SAndrea Righi * scheduler, no dequeue callbacks) 51658ad225SAndrea Righi * 2) Dispatch to shared user DSQ from ops.select_cpu() (enters BPF scheduler, 52658ad225SAndrea Righi * dequeue callbacks expected) 53658ad225SAndrea Righi * 3) Dispatch to local DSQ from ops.enqueue() (terminal DSQ, bypasses BPF 54658ad225SAndrea Righi * scheduler, no dequeue callbacks) 55658ad225SAndrea Righi * 4) Dispatch to global DSQ from ops.enqueue() (terminal DSQ, bypasses BPF 56658ad225SAndrea Righi * scheduler, no dequeue callbacks) 57658ad225SAndrea Righi * 5) Dispatch to shared user DSQ from ops.enqueue() (enters BPF scheduler, 58658ad225SAndrea Righi * dequeue callbacks expected) 59658ad225SAndrea Righi * 6) BPF internal queue from ops.enqueue(): store task PIDs in ops.enqueue(), 60658ad225SAndrea Righi * consume in ops.dispatch() and dispatch to local DSQ (validates dequeue 61658ad225SAndrea Righi * for tasks stored in internal BPF data structures) 62658ad225SAndrea Righi */ 63658ad225SAndrea Righi u32 test_scenario; 64658ad225SAndrea Righi 65658ad225SAndrea Righi /* 66658ad225SAndrea Righi * Per-task state to track lifecycle and validate workflow semantics. 67658ad225SAndrea Righi * State transitions: 68658ad225SAndrea Righi * NONE -> ENQUEUED (on enqueue) 69658ad225SAndrea Righi * NONE -> DISPATCHED (on direct dispatch to terminal DSQ) 70658ad225SAndrea Righi * ENQUEUED -> DISPATCHED (on dispatch dequeue) 71658ad225SAndrea Righi * DISPATCHED -> NONE (on property change dequeue or re-enqueue) 72658ad225SAndrea Righi * ENQUEUED -> NONE (on property change dequeue before dispatch) 73658ad225SAndrea Righi */ 74658ad225SAndrea Righi enum task_state { 75658ad225SAndrea Righi TASK_NONE = 0, 76658ad225SAndrea Righi TASK_ENQUEUED, 77658ad225SAndrea Righi TASK_DISPATCHED, 78658ad225SAndrea Righi }; 79658ad225SAndrea Righi 80658ad225SAndrea Righi struct task_ctx { 81658ad225SAndrea Righi enum task_state state; /* Current state in the workflow */ 82658ad225SAndrea Righi u64 enqueue_seq; /* Sequence number for debugging */ 83658ad225SAndrea Righi }; 84658ad225SAndrea Righi 85658ad225SAndrea Righi struct { 86658ad225SAndrea Righi __uint(type, BPF_MAP_TYPE_TASK_STORAGE); 87658ad225SAndrea Righi __uint(map_flags, BPF_F_NO_PREALLOC); 88658ad225SAndrea Righi __type(key, int); 89658ad225SAndrea Righi __type(value, struct task_ctx); 90658ad225SAndrea Righi } task_ctx_stor SEC(".maps"); 91658ad225SAndrea Righi 92658ad225SAndrea Righi static struct task_ctx *try_lookup_task_ctx(struct task_struct *p) 93658ad225SAndrea Righi { 94658ad225SAndrea Righi return bpf_task_storage_get(&task_ctx_stor, p, 0, 0); 95658ad225SAndrea Righi } 96658ad225SAndrea Righi 97658ad225SAndrea Righi s32 BPF_STRUCT_OPS(dequeue_select_cpu, struct task_struct *p, 98658ad225SAndrea Righi s32 prev_cpu, u64 wake_flags) 99658ad225SAndrea Righi { 100658ad225SAndrea Righi struct task_ctx *tctx; 101658ad225SAndrea Righi 102658ad225SAndrea Righi tctx = try_lookup_task_ctx(p); 103658ad225SAndrea Righi if (!tctx) 104658ad225SAndrea Righi return prev_cpu; 105658ad225SAndrea Righi 106658ad225SAndrea Righi switch (test_scenario) { 107658ad225SAndrea Righi case 0: 108658ad225SAndrea Righi /* 109658ad225SAndrea Righi * Direct dispatch to the local DSQ. 110658ad225SAndrea Righi * 111658ad225SAndrea Righi * Task bypasses BPF scheduler entirely: no enqueue 112658ad225SAndrea Righi * tracking, no ops.dequeue() callbacks. 113658ad225SAndrea Righi */ 114658ad225SAndrea Righi scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0); 115658ad225SAndrea Righi tctx->state = TASK_DISPATCHED; 116658ad225SAndrea Righi break; 117658ad225SAndrea Righi case 1: 118658ad225SAndrea Righi /* 119658ad225SAndrea Righi * Direct dispatch to the global DSQ. 120658ad225SAndrea Righi * 121658ad225SAndrea Righi * Task bypasses BPF scheduler entirely: no enqueue 122658ad225SAndrea Righi * tracking, no ops.dequeue() callbacks. 123658ad225SAndrea Righi */ 124658ad225SAndrea Righi scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); 125658ad225SAndrea Righi tctx->state = TASK_DISPATCHED; 126658ad225SAndrea Righi break; 127658ad225SAndrea Righi case 2: 128658ad225SAndrea Righi /* 129658ad225SAndrea Righi * Dispatch to a shared user DSQ. 130658ad225SAndrea Righi * 131658ad225SAndrea Righi * Task enters BPF scheduler management: track 132658ad225SAndrea Righi * enqueue/dequeue lifecycle and validate state 133658ad225SAndrea Righi * transitions. 134658ad225SAndrea Righi */ 135658ad225SAndrea Righi if (tctx->state == TASK_ENQUEUED) 136658ad225SAndrea Righi scx_bpf_error("%d (%s): enqueue while in ENQUEUED state seq=%llu", 137658ad225SAndrea Righi p->pid, p->comm, tctx->enqueue_seq); 138658ad225SAndrea Righi 139658ad225SAndrea Righi scx_bpf_dsq_insert(p, SHARED_DSQ, SCX_SLICE_DFL, 0); 140658ad225SAndrea Righi 141658ad225SAndrea Righi __sync_fetch_and_add(&enqueue_cnt, 1); 142658ad225SAndrea Righi 143658ad225SAndrea Righi tctx->state = TASK_ENQUEUED; 144658ad225SAndrea Righi tctx->enqueue_seq++; 145658ad225SAndrea Righi break; 146658ad225SAndrea Righi } 147658ad225SAndrea Righi 148658ad225SAndrea Righi return prev_cpu; 149658ad225SAndrea Righi } 150658ad225SAndrea Righi 151658ad225SAndrea Righi void BPF_STRUCT_OPS(dequeue_enqueue, struct task_struct *p, u64 enq_flags) 152658ad225SAndrea Righi { 153658ad225SAndrea Righi struct task_ctx *tctx; 154658ad225SAndrea Righi s32 pid = p->pid; 155658ad225SAndrea Righi 156658ad225SAndrea Righi tctx = try_lookup_task_ctx(p); 157658ad225SAndrea Righi if (!tctx) 158658ad225SAndrea Righi return; 159658ad225SAndrea Righi 160658ad225SAndrea Righi switch (test_scenario) { 161658ad225SAndrea Righi case 3: 162658ad225SAndrea Righi /* 163658ad225SAndrea Righi * Direct dispatch to the local DSQ. 164658ad225SAndrea Righi * 165658ad225SAndrea Righi * Task bypasses BPF scheduler entirely: no enqueue 166658ad225SAndrea Righi * tracking, no ops.dequeue() callbacks. 167658ad225SAndrea Righi */ 168658ad225SAndrea Righi scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags); 169658ad225SAndrea Righi tctx->state = TASK_DISPATCHED; 170658ad225SAndrea Righi break; 171658ad225SAndrea Righi case 4: 172658ad225SAndrea Righi /* 173658ad225SAndrea Righi * Direct dispatch to the global DSQ. 174658ad225SAndrea Righi * 175658ad225SAndrea Righi * Task bypasses BPF scheduler entirely: no enqueue 176658ad225SAndrea Righi * tracking, no ops.dequeue() callbacks. 177658ad225SAndrea Righi */ 178658ad225SAndrea Righi scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); 179658ad225SAndrea Righi tctx->state = TASK_DISPATCHED; 180658ad225SAndrea Righi break; 181658ad225SAndrea Righi case 5: 182658ad225SAndrea Righi /* 183658ad225SAndrea Righi * Dispatch to shared user DSQ. 184658ad225SAndrea Righi * 185658ad225SAndrea Righi * Task enters BPF scheduler management: track 186658ad225SAndrea Righi * enqueue/dequeue lifecycle and validate state 187658ad225SAndrea Righi * transitions. 188658ad225SAndrea Righi */ 189658ad225SAndrea Righi if (tctx->state == TASK_ENQUEUED) 190658ad225SAndrea Righi scx_bpf_error("%d (%s): enqueue while in ENQUEUED state seq=%llu", 191658ad225SAndrea Righi p->pid, p->comm, tctx->enqueue_seq); 192658ad225SAndrea Righi 193658ad225SAndrea Righi scx_bpf_dsq_insert(p, SHARED_DSQ, SCX_SLICE_DFL, enq_flags); 194658ad225SAndrea Righi 195658ad225SAndrea Righi __sync_fetch_and_add(&enqueue_cnt, 1); 196658ad225SAndrea Righi 197658ad225SAndrea Righi tctx->state = TASK_ENQUEUED; 198658ad225SAndrea Righi tctx->enqueue_seq++; 199658ad225SAndrea Righi break; 200658ad225SAndrea Righi case 6: 201658ad225SAndrea Righi /* 202658ad225SAndrea Righi * Store task in BPF internal queue. 203658ad225SAndrea Righi * 204658ad225SAndrea Righi * Task enters BPF scheduler management: track 205658ad225SAndrea Righi * enqueue/dequeue lifecycle and validate state 206658ad225SAndrea Righi * transitions. 207658ad225SAndrea Righi */ 208658ad225SAndrea Righi if (tctx->state == TASK_ENQUEUED) 209658ad225SAndrea Righi scx_bpf_error("%d (%s): enqueue while in ENQUEUED state seq=%llu", 210658ad225SAndrea Righi p->pid, p->comm, tctx->enqueue_seq); 211658ad225SAndrea Righi 212658ad225SAndrea Righi if (bpf_map_push_elem(&global_queue, &pid, 0)) { 213658ad225SAndrea Righi scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); 214658ad225SAndrea Righi __sync_fetch_and_add(&bpf_queue_full, 1); 215658ad225SAndrea Righi 216658ad225SAndrea Righi tctx->state = TASK_DISPATCHED; 217658ad225SAndrea Righi } else { 218658ad225SAndrea Righi __sync_fetch_and_add(&enqueue_cnt, 1); 219658ad225SAndrea Righi 220658ad225SAndrea Righi tctx->state = TASK_ENQUEUED; 221658ad225SAndrea Righi tctx->enqueue_seq++; 222658ad225SAndrea Righi } 223658ad225SAndrea Righi break; 224658ad225SAndrea Righi default: 225658ad225SAndrea Righi /* For all other scenarios, dispatch to the global DSQ */ 226658ad225SAndrea Righi scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); 227658ad225SAndrea Righi tctx->state = TASK_DISPATCHED; 228658ad225SAndrea Righi break; 229658ad225SAndrea Righi } 230658ad225SAndrea Righi 231658ad225SAndrea Righi scx_bpf_kick_cpu(scx_bpf_task_cpu(p), SCX_KICK_IDLE); 232658ad225SAndrea Righi } 233658ad225SAndrea Righi 234658ad225SAndrea Righi void BPF_STRUCT_OPS(dequeue_dequeue, struct task_struct *p, u64 deq_flags) 235658ad225SAndrea Righi { 236658ad225SAndrea Righi struct task_ctx *tctx; 237658ad225SAndrea Righi 238658ad225SAndrea Righi __sync_fetch_and_add(&dequeue_cnt, 1); 239658ad225SAndrea Righi 240658ad225SAndrea Righi tctx = try_lookup_task_ctx(p); 241658ad225SAndrea Righi if (!tctx) 242658ad225SAndrea Righi return; 243658ad225SAndrea Righi 244658ad225SAndrea Righi /* 245658ad225SAndrea Righi * For scenarios 0, 1, 3, and 4 (terminal DSQs: local and global), 246658ad225SAndrea Righi * ops.dequeue() should never be called because tasks bypass the 247658ad225SAndrea Righi * BPF scheduler entirely. If we get here, it's a kernel bug. 248658ad225SAndrea Righi */ 249658ad225SAndrea Righi if (test_scenario == 0 || test_scenario == 3) { 250658ad225SAndrea Righi scx_bpf_error("%d (%s): dequeue called for local DSQ scenario", 251658ad225SAndrea Righi p->pid, p->comm); 252658ad225SAndrea Righi return; 253658ad225SAndrea Righi } 254658ad225SAndrea Righi 255658ad225SAndrea Righi if (test_scenario == 1 || test_scenario == 4) { 256658ad225SAndrea Righi scx_bpf_error("%d (%s): dequeue called for global DSQ scenario", 257658ad225SAndrea Righi p->pid, p->comm); 258658ad225SAndrea Righi return; 259658ad225SAndrea Righi } 260658ad225SAndrea Righi 261658ad225SAndrea Righi if (deq_flags & SCX_DEQ_SCHED_CHANGE) { 262658ad225SAndrea Righi /* 263658ad225SAndrea Righi * Property change interrupting the workflow. Valid from 264658ad225SAndrea Righi * both ENQUEUED and DISPATCHED states. Transitions task 265658ad225SAndrea Righi * back to NONE state. 266658ad225SAndrea Righi */ 267658ad225SAndrea Righi __sync_fetch_and_add(&change_dequeue_cnt, 1); 268658ad225SAndrea Righi 269658ad225SAndrea Righi /* Validate state transition */ 270658ad225SAndrea Righi if (tctx->state != TASK_ENQUEUED && tctx->state != TASK_DISPATCHED) 271658ad225SAndrea Righi scx_bpf_error("%d (%s): invalid property change dequeue state=%d seq=%llu", 272658ad225SAndrea Righi p->pid, p->comm, tctx->state, tctx->enqueue_seq); 273658ad225SAndrea Righi 274658ad225SAndrea Righi /* 275658ad225SAndrea Righi * Transition back to NONE: task outside scheduler control. 276658ad225SAndrea Righi * 277658ad225SAndrea Righi * Scenario 6: dispatch() checks tctx->state after popping a 278658ad225SAndrea Righi * PID, if the task is in state NONE, it was dequeued by 279658ad225SAndrea Righi * property change and must not be dispatched (this 280658ad225SAndrea Righi * prevents "target CPU not allowed"). 281658ad225SAndrea Righi */ 282658ad225SAndrea Righi tctx->state = TASK_NONE; 283658ad225SAndrea Righi } else { 284658ad225SAndrea Righi /* 285658ad225SAndrea Righi * Regular dispatch dequeue: kernel is moving the task from 286658ad225SAndrea Righi * BPF custody to a terminal DSQ. Normally we come from 287658ad225SAndrea Righi * ENQUEUED state. We can also see TASK_NONE if the task 288658ad225SAndrea Righi * was dequeued by property change (SCX_DEQ_SCHED_CHANGE) 289658ad225SAndrea Righi * while it was already on a DSQ (dispatched but not yet 290658ad225SAndrea Righi * consumed); in that case we just leave state as NONE. 291658ad225SAndrea Righi */ 292658ad225SAndrea Righi __sync_fetch_and_add(&dispatch_dequeue_cnt, 1); 293658ad225SAndrea Righi 294658ad225SAndrea Righi /* 295658ad225SAndrea Righi * Must be ENQUEUED (normal path) or NONE (already dequeued 296658ad225SAndrea Righi * by property change while on a DSQ). 297658ad225SAndrea Righi */ 298658ad225SAndrea Righi if (tctx->state != TASK_ENQUEUED && tctx->state != TASK_NONE) 299658ad225SAndrea Righi scx_bpf_error("%d (%s): dispatch dequeue from state %d seq=%llu", 300658ad225SAndrea Righi p->pid, p->comm, tctx->state, tctx->enqueue_seq); 301658ad225SAndrea Righi 302658ad225SAndrea Righi if (tctx->state == TASK_ENQUEUED) 303658ad225SAndrea Righi tctx->state = TASK_DISPATCHED; 304658ad225SAndrea Righi 305658ad225SAndrea Righi /* NONE: leave as-is, task was already property-change dequeued */ 306658ad225SAndrea Righi } 307658ad225SAndrea Righi } 308658ad225SAndrea Righi 309658ad225SAndrea Righi void BPF_STRUCT_OPS(dequeue_dispatch, s32 cpu, struct task_struct *prev) 310658ad225SAndrea Righi { 311658ad225SAndrea Righi if (test_scenario == 6) { 312658ad225SAndrea Righi struct task_ctx *tctx; 313658ad225SAndrea Righi struct task_struct *p; 314658ad225SAndrea Righi s32 pid; 315658ad225SAndrea Righi 316658ad225SAndrea Righi if (bpf_map_pop_elem(&global_queue, &pid)) 317658ad225SAndrea Righi return; 318658ad225SAndrea Righi 319658ad225SAndrea Righi p = bpf_task_from_pid(pid); 320658ad225SAndrea Righi if (!p) 321658ad225SAndrea Righi return; 322658ad225SAndrea Righi 323658ad225SAndrea Righi /* 324658ad225SAndrea Righi * If the task was dequeued by property change 325658ad225SAndrea Righi * (ops.dequeue() set tctx->state = TASK_NONE), skip 326658ad225SAndrea Righi * dispatch. 327658ad225SAndrea Righi */ 328658ad225SAndrea Righi tctx = try_lookup_task_ctx(p); 329658ad225SAndrea Righi if (!tctx || tctx->state == TASK_NONE) { 330658ad225SAndrea Righi bpf_task_release(p); 331658ad225SAndrea Righi return; 332658ad225SAndrea Righi } 333658ad225SAndrea Righi 334658ad225SAndrea Righi /* 335658ad225SAndrea Righi * Dispatch to this CPU's local DSQ if allowed, otherwise 336658ad225SAndrea Righi * fallback to the global DSQ. 337658ad225SAndrea Righi */ 338658ad225SAndrea Righi if (bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) 339658ad225SAndrea Righi scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_DFL, 0); 340658ad225SAndrea Righi else 341658ad225SAndrea Righi scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); 342658ad225SAndrea Righi 343658ad225SAndrea Righi bpf_task_release(p); 344658ad225SAndrea Righi } else { 345*12b49dd1SAndrea Righi scx_bpf_dsq_move_to_local(SHARED_DSQ, 0); 346658ad225SAndrea Righi } 347658ad225SAndrea Righi } 348658ad225SAndrea Righi 349658ad225SAndrea Righi s32 BPF_STRUCT_OPS(dequeue_init_task, struct task_struct *p, 350658ad225SAndrea Righi struct scx_init_task_args *args) 351658ad225SAndrea Righi { 352658ad225SAndrea Righi struct task_ctx *tctx; 353658ad225SAndrea Righi 354658ad225SAndrea Righi tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 355658ad225SAndrea Righi BPF_LOCAL_STORAGE_GET_F_CREATE); 356658ad225SAndrea Righi if (!tctx) 357658ad225SAndrea Righi return -ENOMEM; 358658ad225SAndrea Righi 359658ad225SAndrea Righi return 0; 360658ad225SAndrea Righi } 361658ad225SAndrea Righi 362658ad225SAndrea Righi s32 BPF_STRUCT_OPS_SLEEPABLE(dequeue_init) 363658ad225SAndrea Righi { 364658ad225SAndrea Righi s32 ret; 365658ad225SAndrea Righi 366658ad225SAndrea Righi ret = scx_bpf_create_dsq(SHARED_DSQ, -1); 367658ad225SAndrea Righi if (ret) 368658ad225SAndrea Righi return ret; 369658ad225SAndrea Righi 370658ad225SAndrea Righi return 0; 371658ad225SAndrea Righi } 372658ad225SAndrea Righi 373658ad225SAndrea Righi void BPF_STRUCT_OPS(dequeue_exit, struct scx_exit_info *ei) 374658ad225SAndrea Righi { 375658ad225SAndrea Righi UEI_RECORD(uei, ei); 376658ad225SAndrea Righi } 377658ad225SAndrea Righi 378658ad225SAndrea Righi SEC(".struct_ops.link") 379658ad225SAndrea Righi struct sched_ext_ops dequeue_ops = { 380658ad225SAndrea Righi .select_cpu = (void *)dequeue_select_cpu, 381658ad225SAndrea Righi .enqueue = (void *)dequeue_enqueue, 382658ad225SAndrea Righi .dequeue = (void *)dequeue_dequeue, 383658ad225SAndrea Righi .dispatch = (void *)dequeue_dispatch, 384658ad225SAndrea Righi .init_task = (void *)dequeue_init_task, 385658ad225SAndrea Righi .init = (void *)dequeue_init, 386658ad225SAndrea Righi .exit = (void *)dequeue_exit, 387658ad225SAndrea Righi .flags = SCX_OPS_ENQ_LAST, 388658ad225SAndrea Righi .name = "dequeue_test", 389658ad225SAndrea Righi }; 390