Searched refs:backoffs (Results 1 – 4 of 4) sorted by relevance
88 int backoffs; in jffs2_rtime_decompress() local94 backoffs = positions[value]; in jffs2_rtime_decompress()98 if (backoffs + repeat >= outpos) { in jffs2_rtime_decompress()100 cpage_out[outpos++] = cpage_out[backoffs++]; in jffs2_rtime_decompress()104 memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); in jffs2_rtime_decompress()
1742 spg->backoffs = RB_ROOT; in alloc_spg_mapping()1749 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs)); in free_spg_mapping()1995 while (!RB_EMPTY_ROOT(&spg->backoffs)) { in DEFINE_RB_FUNCS()1997 rb_entry(rb_first(&spg->backoffs), in DEFINE_RB_FUNCS()2000 erase_backoff(&spg->backoffs, backoff); in DEFINE_RB_FUNCS()2045 backoff = lookup_containing_backoff(&spg->backoffs, &hoid); in should_plug_request()4444 insert_backoff(&spg->backoffs, backoff); in handle_backoff_block()4500 erase_backoff(&spg->backoffs, backoff); in handle_backoff_unblock()4504 if (RB_EMPTY_ROOT(&spg->backoffs)) { in handle_backoff_unblock()
365 struct rb_root backoffs; member
48 However, the Wound-Wait algorithm is typically stated to generate fewer backoffs