Lines Matching refs:clone

21 	struct request *orig, *clone;
78 static void end_clone_bio(struct bio *clone)
81 container_of(clone, struct dm_rq_clone_bio_info, clone);
84 blk_status_t error = clone->bi_status;
85 bool is_last = !clone->bi_next;
87 bio_put(clone);
92 * Once error occurred, just let clone->end_io() handle
115 * the original request before the clone, and break the ordering.
153 * Complete the clone and the original request.
154 * Must be called without clone's queue lock held,
157 static void dm_end_request(struct request *clone, blk_status_t error)
159 struct dm_rq_target_io *tio = clone->end_io_data;
163 blk_rq_unprep_clone(clone);
164 tio->ti->type->release_clone_rq(clone, NULL);
195 if (tio->clone) {
196 blk_rq_unprep_clone(tio->clone);
197 tio->ti->type->release_clone_rq(tio->clone, NULL);
204 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
207 struct dm_rq_target_io *tio = clone->end_io_data;
214 r = rq_end_io(tio->ti, clone, error, &tio->info);
218 if (req_op(clone) == REQ_OP_DISCARD &&
219 !clone->q->limits.max_discard_sectors)
221 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
222 !clone->q->limits.max_write_zeroes_sectors)
229 dm_end_request(clone, error);
255 struct request *clone = tio->clone;
257 if (!clone) {
269 dm_done(clone, tio->error, mapped);
273 * Complete the clone and the original request with the error status
286 * Complete the not-mapped clone and the original request with the error status
297 static enum rq_end_io_ret end_clone_request(struct request *clone,
300 struct dm_rq_target_io *tio = clone->end_io_data;
311 container_of(bio, struct dm_rq_clone_bio_info, clone);
320 static int setup_clone(struct request *clone, struct request *rq,
325 r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
330 clone->end_io = end_clone_request;
331 clone->end_io_data = tio;
333 tio->clone = clone;
343 tio->clone = NULL;
368 struct request *clone = NULL;
371 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
377 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
379 ti->type->release_clone_rq(clone, &tio->info);
384 trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
386 ret = blk_insert_cloned_request(clone);
392 blk_rq_unprep_clone(clone);
393 blk_mq_cleanup_rq(clone);
394 tio->ti->type->release_clone_rq(clone, &tio->info);
395 tio->clone = NULL;
398 /* must complete clone in terms of original request */