Lines Matching refs:WRITE

263 	INIT_LIST_HEAD(&sq->queued[WRITE]);  in throtl_service_queue_init()
286 for (rw = READ; rw <= WRITE; rw++) { in throtl_pd_alloc()
293 tg->bps[WRITE] = U64_MAX; in throtl_pd_alloc()
295 tg->iops[WRITE] = UINT_MAX; in throtl_pd_alloc()
343 for (rw = READ; rw <= WRITE; rw++) { in tg_update_has_rules()
759 __tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]); in tg_update_carryover()
763 bytes[READ], bytes[WRITE], ios[READ], ios[WRITE]); in tg_update_carryover()
976 bio = throtl_peek_queued(&sq->queued[WRITE]); in tg_update_disptime()
1065 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && in throtl_dispatch_tg()
1068 tg_dispatch_one_bio(tg, WRITE); in throtl_dispatch_tg()
1099 if (sq_queued(sq, READ) || sq_queued(sq, WRITE)) in throtl_select_dispatch()
1154 unsigned int __maybe_unused bio_cnt_w = sq_queued(sq, WRITE); in throtl_pending_timer_fn()
1219 for (rw = READ; rw <= WRITE; rw++) in blk_throtl_dispatch_work_fn()
1276 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1277 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1308 throtl_start_new_slice(tg, WRITE, false); in tg_conf_updated()
1457 .private = offsetof(struct throtl_grp, bps[WRITE]),
1469 .private = offsetof(struct throtl_grp, iops[WRITE]),
1511 tg->bps[WRITE] == bps_dft && in tg_prfill_limit()
1513 tg->iops[WRITE] == iops_dft) in tg_prfill_limit()
1522 if (tg->bps[WRITE] == U64_MAX) in tg_prfill_limit()
1525 seq_printf(sf, " wbps=%llu", tg->bps[WRITE]); in tg_prfill_limit()
1532 if (tg->iops[WRITE] == UINT_MAX) in tg_prfill_limit()
1535 seq_printf(sf, " wiops=%u", tg->iops[WRITE]); in tg_prfill_limit()
1577 v[1] = tg->bps[WRITE]; in tg_set_limit()
1579 v[3] = tg->iops[WRITE]; in tg_set_limit()
1617 tg->bps[WRITE] = v[1]; in tg_set_limit()
1619 tg->iops[WRITE] = v[3]; in tg_set_limit()
1819 sq_queued(sq, READ), sq_queued(sq, WRITE)); in __blk_throtl_bio()