Lines Matching full:epoch

1166  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1168 * @epoch: Epoch object.
1169 * @ev: Epoch event.
1172 struct drbd_epoch *epoch,
1183 epoch_size = atomic_read(&epoch->epoch_size);
1187 atomic_dec(&epoch->active);
1190 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1198 atomic_read(&epoch->active) == 0 &&
1199 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1202 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1208 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1209 dec_unacked(epoch->connection);
1212 if (connection->current_epoch != epoch) {
1213 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1214 list_del(&epoch->list);
1217 kfree(epoch);
1222 epoch->flags = 0;
1223 atomic_set(&epoch->epoch_size, 0);
1224 /* atomic_set(&epoch->active, 0); is already zero */
1233 epoch = next_epoch;
1574 struct drbd_epoch *epoch;
1595 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1596 if (epoch)
1599 drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
1608 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1609 if (epoch)
1620 epoch->flags = 0;
1621 atomic_set(&epoch->epoch_size, 0);
1622 atomic_set(&epoch->active, 0);
1626 list_add(&epoch->list, &connection->current_epoch->list);
1627 connection->current_epoch = epoch;
1631 kfree(epoch);
2055 drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
2442 peer_req->epoch = connection->current_epoch;
2443 atomic_inc(&peer_req->epoch->epoch_size);
2444 atomic_inc(&peer_req->epoch->active);
2531 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);