Searched refs:backoffs (Results 1 – 4 of 4) sorted by relevance
88 int backoffs; in jffs2_rtime_decompress() local94 backoffs = positions[value]; in jffs2_rtime_decompress()101 if (backoffs + repeat >= outpos) { in jffs2_rtime_decompress()103 cpage_out[outpos++] = cpage_out[backoffs++]; in jffs2_rtime_decompress()107 memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); in jffs2_rtime_decompress()
365 struct rb_root backoffs; member
1726 spg->backoffs = RB_ROOT; in alloc_spg_mapping()1733 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); in free_spg_mapping()1979 while (!RB_EMPTY_ROOT(&spg->backoffs)) { in DEFINE_RB_FUNCS()1981 rb_entry(rb_first(&spg->backoffs), in DEFINE_RB_FUNCS()1984 erase_backoff(&spg->backoffs, backoff); in DEFINE_RB_FUNCS()2029 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); in should_plug_request()4431 insert_backoff(&spg->backoffs, backoff); in handle_backoff_block()4487 erase_backoff(&spg->backoffs, backoff); in handle_backoff_unblock()4491 if (RB_EMPTY_ROOT(&spg->backoffs)) { in handle_backoff_unblock()
48 However, the Wound-Wait algorithm is typically stated to generate fewer backoffs