| /linux/block/ |
| H A D | blk-mq.c | 97 if (rq->rq_flags & RQF_IO_STAT && in blk_mq_check_in_driver() 424 data->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init() 425 rq->rq_flags = data->rq_flags; in blk_mq_rq_ctx_init() 427 if (data->rq_flags & RQF_SCHED_TAGS) { in blk_mq_rq_ctx_init() 450 if (rq->rq_flags & RQF_USE_SCHED) { in blk_mq_rq_ctx_init() 492 if (!(data->rq_flags & RQF_SCHED_TAGS)) in __blk_mq_alloc_requests_batch() 515 data->rq_flags |= RQF_SCHED_TAGS; in blk_mq_limit_depth() 526 data->rq_flags |= RQF_USE_SCHED; in blk_mq_limit_depth() 557 data->rq_flags |= RQF_RESV; in __blk_mq_alloc_requests() 590 if (!(data->rq_flags & RQF_SCHED_TAGS)) in __blk_mq_alloc_requests() [all …]
|
| H A D | blk-pm.h | 21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
|
| H A D | blk-mq.h | 166 req_flags_t rq_flags; member 234 if (data->rq_flags & RQF_SCHED_TAGS) in blk_mq_tags_from_data()
|
| H A D | blk-timeout.c | 140 req->rq_flags &= ~RQF_TIMED_OUT; in blk_add_timer()
|
| H A D | elevator.c | 168 rq->rq_flags &= ~RQF_HASHED; in __elv_rqhash_del() 184 rq->rq_flags |= RQF_HASHED; in elv_rqhash_add()
|
| /linux/kernel/sched/ |
| H A D | sched.h | 1811 struct rq_flags { struct 1866 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() 1875 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() 1884 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock() 1895 extern struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires_ret; 1898 extern struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1902 __task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in __task_rq_unlock() 1910 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() 1920 struct rq *rq; struct rq_flags rf) 1927 struct rq *rq; struct rq_flags rf) [all …]
|
| H A D | stop_task.c | 19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() 35 static struct task_struct *pick_task_stop(struct rq *rq, struct rq_flags *rf) in pick_task_stop()
|
| H A D | core.c | 717 struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf) in ___task_rq_lock() 740 struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf) in _task_rq_lock() 888 struct rq_flags rf; in hrtick() 914 struct rq_flags rf; in __hrtick_start() 1851 struct rq_flags rf; in uclamp_update_active() 2248 struct rq_flags rf; in wait_task_inactive() 2448 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() 2495 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() 2520 struct rq_flags rf; in migration_cpu_stop() 2861 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() [all …]
|
| H A D | idle.c | 453 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_idle() 489 struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf) in pick_task_idle()
|
| H A D | deadline.c | 1130 static void __push_dl_task(struct rq *rq, struct rq_flags *rf) in __push_dl_task() 1156 struct rq_flags *rf = &scope.rf; in dl_server_timer() 1226 struct rq_flags rf; in dl_task_timer() 1966 struct rq_flags rf; in inactive_task_timer() 2457 struct rq_flags rf; in migrate_task_rq_dl() 2508 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_dl() 2602 static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf) in __pick_task_dl() 2629 static struct task_struct *pick_task_dl(struct rq *rq, struct rq_flags *rf) in pick_task_dl() 3187 struct rq_flags rf; in dl_add_task_root_domain()
|
| H A D | core_sched.c | 59 struct rq_flags rf; in sched_core_update_cookie()
|
| H A D | stats.h | 209 struct rq_flags rf; in psi_ttwu_dequeue()
|
| H A D | psi.c | 1058 struct rq_flags rf; in psi_memstall_enter() 1089 struct rq_flags rf; in psi_memstall_leave() 1158 struct rq_flags rf; in cgroup_move_task()
|
| H A D | ext.c | 514 struct rq_flags rf; 2457 do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx) in do_pick_task_scx() 2520 static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf) in pick_task_scx() 2532 ext_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf) in ext_server_pick_task() 2729 struct rq_flags rf; in check_rq_for_timeouts() 2902 struct rq_flags rf; in scx_init_task() 3046 struct rq_flags rf; in scx_post_fork() 3066 struct rq_flags rf; in scx_cancel_fork() 3125 struct rq_flags rf; in sched_ext_dead() 4673 struct rq_flags rf; in scx_dump_state() [all …]
|
| /linux/include/linux/sunrpc/ |
| H A D | svc.h | 219 unsigned long rq_flags; /* flags field */ 257 /* bits for rq_flags */ 319 set_bit(RQ_VICTIM, &rqstp->rq_flags); in svc_thread_should_stop() 321 return test_bit(RQ_VICTIM, &rqstp->rq_flags); 216 unsigned long rq_flags; /* flags field */ global() member
|
| /linux/drivers/scsi/ |
| H A D | scsi_lib.c | 118 if (rq->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd() 119 rq->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd() 327 req->rq_flags |= RQF_QUIET; in scsi_execute_cmd() 747 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in scsi_rq_err_bytes() 931 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action() 1022 else if (req->rq_flags & RQF_QUIET) in scsi_io_completion_nz_result() 1269 if (rq->rq_flags & RQF_DONTPREP) { in scsi_cleanup_rq() 1271 rq->rq_flags &= ~RQF_DONTPREP; in scsi_cleanup_rq() 1350 if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) in scsi_device_state_check() 1358 if (req && !(req->rq_flags & RQF_PM)) in scsi_device_state_check() [all …]
|
| /linux/drivers/nvme/host/ |
| H A D | ioctl.c | 102 struct nvme_command *cmd, blk_opf_t rq_flags, in nvme_alloc_user_request() argument 107 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); in nvme_alloc_user_request() 458 blk_opf_t rq_flags = 0; in nvme_uring_cmd_io() local 509 rq_flags |= REQ_NOWAIT; in nvme_uring_cmd_io() 513 rq_flags |= REQ_POLLED; in nvme_uring_cmd_io() 515 req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags); in nvme_uring_cmd_io()
|
| /linux/drivers/mmc/core/ |
| H A D | queue.c | 243 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq() 291 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq() 293 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
|
| /linux/drivers/net/ethernet/fungible/funcore/ |
| H A D | fun_queue.c | 459 funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ; in fun_alloc_queue() 491 rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0, in fun_create_rq()
|
| /linux/net/sunrpc/ |
| H A D | svc_xprt.c | 419 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_reserve_slot() 423 set_bit(RQ_DATA, &rqstp->rq_flags); in svc_xprt_reserve_slot() 431 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_release_slot() 1222 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer() 1253 set_bit(RQ_DROPME, &rqstp->rq_flags); in svc_deferred_recv()
|
| H A D | svcsock.c | 395 set_bit(RQ_SECURE, &rqstp->rq_flags); in svc_sock_secure_port() 397 clear_bit(RQ_SECURE, &rqstp->rq_flags); in svc_sock_secure_port() 1184 set_bit(RQ_LOCAL, &rqstp->rq_flags); in svc_tcp_recvfrom() 1186 clear_bit(RQ_LOCAL, &rqstp->rq_flags); in svc_tcp_recvfrom()
|
| /linux/drivers/scsi/device_handler/ |
| H A D | scsi_dh_hp_sw.c | 191 req->rq_flags |= RQF_QUIET; in hp_sw_prep_fn()
|
| /linux/fs/nfsd/ |
| H A D | nfscache.c | 523 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) in nfsd_cache_lookup() 619 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); in nfsd_cache_update()
|
| H A D | nfs4idmap.c | 669 WARN_ON_ONCE(test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)); in nfsd_map_name_to_uid() 706 WARN_ON_ONCE(test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)); in nfsd_map_name_to_gid()
|
| H A D | nfssvc.c | 914 set_bit(RQ_VICTIM, &rqstp->rq_flags); in nfsd() 1013 if (test_bit(RQ_DROPME, &rqstp->rq_flags)) in nfsd_dispatch()
|