/linux/drivers/gpu/drm/i915/gvt/ |
H A D | sched_policy.c | 83 vgpu_data = vgpu->sched_data; in vgpu_update_timeslice() 93 static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) in gvt_balance_timeslice() argument 107 list_for_each(pos, &sched_data->lru_runq_head) { in gvt_balance_timeslice() 112 list_for_each(pos, &sched_data->lru_runq_head) { in gvt_balance_timeslice() 121 list_for_each(pos, &sched_data->lru_runq_head) { in gvt_balance_timeslice() 162 vgpu_data = scheduler->next_vgpu->sched_data; in try_to_schedule_next_vgpu() 176 static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) in find_busy_vgpu() argument 180 struct list_head *head = &sched_data->lru_runq_head; in find_busy_vgpu() 211 static void tbs_sched_func(struct gvt_sched_data *sched_data) in tbs_sched_func() argument 213 struct intel_gvt *gvt = sched_data->gvt; in tbs_sched_func() [all …]
|
H A D | scheduler.h | 58 void *sched_data; member
|
H A D | gvt.h | 196 void *sched_data; member
|
/linux/net/netfilter/ipvs/ |
H A D | ip_vs_rr.c | 28 svc->sched_data = &svc->destinations; in ip_vs_rr_init_svc() 38 p = (struct list_head *) svc->sched_data; in ip_vs_rr_del_dest() 43 svc->sched_data = p->next->prev; in ip_vs_rr_del_dest() 63 p = (struct list_head *) svc->sched_data; in ip_vs_rr_schedule() 89 svc->sched_data = &dest->n_list; in ip_vs_rr_schedule()
|
H A D | ip_vs_dh.c | 161 svc->sched_data = s; in ip_vs_dh_init_svc() 175 struct ip_vs_dh_state *s = svc->sched_data; in ip_vs_dh_done_svc() 190 struct ip_vs_dh_state *s = svc->sched_data; in ip_vs_dh_dest_changed() 221 s = (struct ip_vs_dh_state *) svc->sched_data; in ip_vs_dh_schedule()
|
H A D | ip_vs_wrr.c | 121 svc->sched_data = mark; in ip_vs_wrr_init_svc() 129 struct ip_vs_wrr_mark *mark = svc->sched_data; in ip_vs_wrr_done_svc() 141 struct ip_vs_wrr_mark *mark = svc->sched_data; in ip_vs_wrr_dest_changed() 164 struct ip_vs_wrr_mark *mark = svc->sched_data; in ip_vs_wrr_schedule()
|
H A D | ip_vs_sh.c | 237 svc->sched_data = s; in ip_vs_sh_init_svc() 251 struct ip_vs_sh_state *s = svc->sched_data; in ip_vs_sh_done_svc() 266 struct ip_vs_sh_state *s = svc->sched_data; in ip_vs_sh_dest_changed() 324 s = (struct ip_vs_sh_state *) svc->sched_data; in ip_vs_sh_schedule()
|
H A D | ip_vs_lblc.c | 230 struct ip_vs_lblc_table *tbl = svc->sched_data; in ip_vs_lblc_flush() 257 struct ip_vs_lblc_table *tbl = svc->sched_data; in ip_vs_lblc_full_check() 354 svc->sched_data = tbl; in ip_vs_lblc_init_svc() 383 struct ip_vs_lblc_table *tbl = svc->sched_data; in ip_vs_lblc_done_svc() 483 struct ip_vs_lblc_table *tbl = svc->sched_data; in ip_vs_lblc_schedule()
|
H A D | ip_vs_mh.c | 415 svc->sched_data = s; in ip_vs_mh_init_svc() 421 struct ip_vs_mh_state *s = svc->sched_data; in ip_vs_mh_done_svc() 434 struct ip_vs_mh_state *s = svc->sched_data; in ip_vs_mh_dest_changed() 489 s = (struct ip_vs_mh_state *)svc->sched_data; in ip_vs_mh_schedule()
|
H A D | ip_vs_lblcr.c | 396 struct ip_vs_lblcr_table *tbl = svc->sched_data; in ip_vs_lblcr_flush() 422 struct ip_vs_lblcr_table *tbl = svc->sched_data; in ip_vs_lblcr_full_check() 517 svc->sched_data = tbl; in ip_vs_lblcr_init_svc() 546 struct ip_vs_lblcr_table *tbl = svc->sched_data; in ip_vs_lblcr_done_svc() 647 struct ip_vs_lblcr_table *tbl = svc->sched_data; in ip_vs_lblcr_schedule()
|
/linux/block/ |
H A D | bfq-wf2q.c | 156 group_sd = next_in_service->sched_data; in bfq_update_parent_budget() 158 bfqg = container_of(group_sd, struct bfq_group, sched_data); in bfq_update_parent_budget() 223 struct bfq_sched_data *sd = entity->sched_data; in bfq_inc_active_entities() 224 struct bfq_group *bfqg = container_of(sd, struct bfq_group, sched_data); in bfq_inc_active_entities() 232 struct bfq_sched_data *sd = entity->sched_data; in bfq_dec_active_entities() 233 struct bfq_group *bfqg = container_of(sd, struct bfq_group, sched_data); in bfq_dec_active_entities() 646 entity == entity->sched_data->in_service_entity); in bfq_put_idle_entity() 676 struct bfq_sched_data *sched_data = entity->sched_data; in bfq_entity_service_tree() local 679 return sched_data->service_tree + idx; in bfq_entity_service_tree() 988 struct bfq_sched_data *sd = entity->sched_data; in __bfq_requeue_entity() [all …]
|
H A D | bfq-cgroup.c | 433 entity->sched_data = &bfqg->sched_data; in bfq_init_entity() 538 entity->my_sched_data = &bfqg->sched_data; in bfq_pd_init() 573 entity->sched_data = &parent->sched_data; in bfq_group_set_parent() 684 entity->sched_data = &bfqg->sched_data; in bfq_bfqq_move() 713 if (sync_bfqq->entity.sched_data != &bfqg->sched_data) in bfq_sync_bfqq_move() 724 if (bfqq->entity.sched_data != &bfqg->sched_data) in bfq_sync_bfqq_move() 763 async_bfqq->entity.sched_data != &bfqg->sched_data) { in __bfq_bic_change_cgroup() 857 if (bfqg->sched_data.in_service_entity) in bfq_reparent_active_queues() 859 bfqg->sched_data.in_service_entity, in bfq_reparent_active_queues() 890 st = bfqg->sched_data.service_tree + i; in bfq_pd_offline() [all …]
|
H A D | kyber-iosched.c | 507 hctx->sched_data = khd; in kyber_init_hctx() 521 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_exit_hctx() 527 kfree(hctx->sched_data); in kyber_exit_hctx() 572 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_bio_merge() 594 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_insert_requests() 806 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_dispatch_request() 851 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_has_work() 914 struct kyber_hctx_data *khd = hctx->sched_data; \ 924 struct kyber_hctx_data *khd = hctx->sched_data; \ 933 struct kyber_hctx_data *khd = hctx->sched_data; \ [all …]
|
H A D | bfq-iosched.h | 202 struct bfq_sched_data *sched_data; member 1010 struct bfq_sched_data sched_data; member 1030 struct bfq_sched_data sched_data; member
|
H A D | blk-mq-sched.c | 535 if (e->type->ops.exit_hctx && hctx->sched_data) { in blk_mq_exit_sched() 537 hctx->sched_data = NULL; in blk_mq_exit_sched()
|
H A D | bfq-iosched.c | 593 struct bfq_sched_data *sched_data; in bfqq_request_over_limit() local 615 sched_data = entity->sched_data; in bfqq_request_over_limit() 648 sched_data->service_tree[i].wsum; in bfqq_request_over_limit() 7210 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; in bfq_init_root_group() 7211 root_group->sched_data.bfq_class_idle_last_service = jiffies; in bfq_init_root_group()
|
/linux/include/linux/ |
H A D | blk-mq.h | 355 void *sched_data; member
|
/linux/include/net/ |
H A D | ip_vs.h | 697 void *sched_data; /* scheduler application data */ member
|