xref: /linux/drivers/target/target_core_xcopy.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename: target_core_xcopy.c
4  *
5  * This file contains support for SPC-4 Extended-Copy offload with generic
6  * TCM backends.
7  *
8  * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
9  *
10  * Author:
11  * Nicholas A. Bellinger <nab@daterainc.com>
12  *
13  ******************************************************************************/
14 
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/list.h>
18 #include <linux/rculist.h>
19 #include <linux/configfs.h>
20 #include <linux/ratelimit.h>
21 #include <scsi/scsi_proto.h>
22 #include <asm/unaligned.h>
23 
24 #include <target/target_core_base.h>
25 #include <target/target_core_backend.h>
26 #include <target/target_core_fabric.h>
27 
28 #include "target_core_internal.h"
29 #include "target_core_pr.h"
30 #include "target_core_ua.h"
31 #include "target_core_xcopy.h"
32 
33 static struct workqueue_struct *xcopy_wq = NULL;
34 
35 static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
36 
37 /**
38  * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
39  *
40  * @se_dev: device being considered for match
41  * @dev_wwn: XCOPY requested NAA dev_wwn
42  * @return: 1 on match, 0 on no-match
43  */
44 static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
45 					      const unsigned char *dev_wwn)
46 {
47 	unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
48 	int rc;
49 
50 	if (!se_dev->dev_attrib.emulate_3pc) {
51 		pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
52 		return 0;
53 	}
54 
55 	memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
56 	spc_gen_naa_6h_vendor_specific(se_dev, &tmp_dev_wwn[0]);
57 
58 	rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
59 	if (rc != 0) {
60 		pr_debug("XCOPY: skip non-matching: %*ph\n",
61 			 XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
62 		return 0;
63 	}
64 	pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
65 
66 	return 1;
67 }
68 
69 static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
70 					const unsigned char *dev_wwn,
71 					struct se_device **_found_dev,
72 					struct percpu_ref **_found_lun_ref)
73 {
74 	struct se_dev_entry *deve;
75 	struct se_node_acl *nacl;
76 	struct se_lun *this_lun = NULL;
77 	struct se_device *found_dev = NULL;
78 
79 	/* cmd with NULL sess indicates no associated $FABRIC_MOD */
80 	if (!sess)
81 		goto err_out;
82 
83 	pr_debug("XCOPY 0xe4: searching for: %*ph\n",
84 		 XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
85 
86 	nacl = sess->se_node_acl;
87 	rcu_read_lock();
88 	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
89 		struct se_device *this_dev;
90 		int rc;
91 
92 		this_lun = deve->se_lun;
93 		this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
94 
95 		rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
96 		if (rc) {
97 			if (percpu_ref_tryget_live(&this_lun->lun_ref))
98 				found_dev = this_dev;
99 			break;
100 		}
101 	}
102 	rcu_read_unlock();
103 	if (found_dev == NULL)
104 		goto err_out;
105 
106 	pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
107 		 found_dev, &found_dev->dev_group);
108 	*_found_dev = found_dev;
109 	*_found_lun_ref = &this_lun->lun_ref;
110 	return 0;
111 err_out:
112 	pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
113 	return -EINVAL;
114 }
115 
116 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
117 				unsigned char *p, unsigned short cscd_index)
118 {
119 	unsigned char *desc = p;
120 	unsigned short ript;
121 	u8 desig_len;
122 	/*
123 	 * Extract RELATIVE INITIATOR PORT IDENTIFIER
124 	 */
125 	ript = get_unaligned_be16(&desc[2]);
126 	pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
127 	/*
128 	 * Check for supported code set, association, and designator type
129 	 */
130 	if ((desc[4] & 0x0f) != 0x1) {
131 		pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
132 		return -EINVAL;
133 	}
134 	if ((desc[5] & 0x30) != 0x00) {
135 		pr_err("XCOPY 0xe4: association other than LUN not supported\n");
136 		return -EINVAL;
137 	}
138 	if ((desc[5] & 0x0f) != 0x3) {
139 		pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
140 				(desc[5] & 0x0f));
141 		return -EINVAL;
142 	}
143 	/*
144 	 * Check for matching 16 byte length for NAA IEEE Registered Extended
145 	 * Assigned designator
146 	 */
147 	desig_len = desc[7];
148 	if (desig_len != XCOPY_NAA_IEEE_REGEX_LEN) {
149 		pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
150 		return -EINVAL;
151 	}
152 	pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
153 	/*
154 	 * Check for NAA IEEE Registered Extended Assigned header..
155 	 */
156 	if ((desc[8] & 0xf0) != 0x60) {
157 		pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
158 					(desc[8] & 0xf0));
159 		return -EINVAL;
160 	}
161 
162 	if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
163 		pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
164 			 "dest\n", cscd_index);
165 		return 0;
166 	}
167 
168 	if (cscd_index == xop->stdi) {
169 		memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
170 		/*
171 		 * Determine if the source designator matches the local device
172 		 */
173 		if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
174 				XCOPY_NAA_IEEE_REGEX_LEN)) {
175 			xop->op_origin = XCOL_SOURCE_RECV_OP;
176 			xop->src_dev = se_cmd->se_dev;
177 			pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
178 					" received xop\n", xop->src_dev);
179 		}
180 	}
181 
182 	if (cscd_index == xop->dtdi) {
183 		memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
184 		/*
185 		 * Determine if the destination designator matches the local
186 		 * device. If @cscd_index corresponds to both source (stdi) and
187 		 * destination (dtdi), or dtdi comes after stdi, then
188 		 * XCOL_DEST_RECV_OP wins.
189 		 */
190 		if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
191 				XCOPY_NAA_IEEE_REGEX_LEN)) {
192 			xop->op_origin = XCOL_DEST_RECV_OP;
193 			xop->dst_dev = se_cmd->se_dev;
194 			pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
195 				" received xop\n", xop->dst_dev);
196 		}
197 	}
198 
199 	return 0;
200 }
201 
202 static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
203 				struct xcopy_op *xop, unsigned char *p,
204 				unsigned short tdll, sense_reason_t *sense_ret)
205 {
206 	struct se_device *local_dev = se_cmd->se_dev;
207 	unsigned char *desc = p;
208 	int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
209 	unsigned short cscd_index = 0;
210 	unsigned short start = 0;
211 
212 	*sense_ret = TCM_INVALID_PARAMETER_LIST;
213 
214 	if (offset != 0) {
215 		pr_err("XCOPY target descriptor list length is not"
216 			" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
217 		*sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
218 		return -EINVAL;
219 	}
220 	if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
221 		pr_err("XCOPY target descriptor supports a maximum"
222 			" two src/dest descriptors, tdll: %hu too large..\n", tdll);
223 		/* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
224 		*sense_ret = TCM_TOO_MANY_TARGET_DESCS;
225 		return -EINVAL;
226 	}
227 	/*
228 	 * Generate an IEEE Registered Extended designator based upon the
229 	 * se_device the XCOPY was received upon..
230 	 */
231 	memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
232 	spc_gen_naa_6h_vendor_specific(local_dev, &xop->local_dev_wwn[0]);
233 
234 	while (start < tdll) {
235 		/*
236 		 * Check target descriptor identification with 0xE4 type, and
237 		 * compare the current index with the CSCD descriptor IDs in
238 		 * the segment descriptor. Use VPD 0x83 WWPN matching ..
239 		 */
240 		switch (desc[0]) {
241 		case 0xe4:
242 			rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
243 							&desc[0], cscd_index);
244 			if (rc != 0)
245 				goto out;
246 			start += XCOPY_TARGET_DESC_LEN;
247 			desc += XCOPY_TARGET_DESC_LEN;
248 			cscd_index++;
249 			break;
250 		default:
251 			pr_err("XCOPY unsupported descriptor type code:"
252 					" 0x%02x\n", desc[0]);
253 			*sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
254 			goto out;
255 		}
256 	}
257 
258 	switch (xop->op_origin) {
259 	case XCOL_SOURCE_RECV_OP:
260 		rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
261 						xop->dst_tid_wwn,
262 						&xop->dst_dev,
263 						&xop->remote_lun_ref);
264 		break;
265 	case XCOL_DEST_RECV_OP:
266 		rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
267 						xop->src_tid_wwn,
268 						&xop->src_dev,
269 						&xop->remote_lun_ref);
270 		break;
271 	default:
272 		pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
273 			"stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
274 		rc = -EINVAL;
275 		break;
276 	}
277 	/*
278 	 * If a matching IEEE NAA 0x83 descriptor for the requested device
279 	 * is not located on this node, return COPY_ABORTED with ASQ/ASQC
280 	 * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
281 	 * initiator to fall back to normal copy method.
282 	 */
283 	if (rc < 0) {
284 		*sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
285 		goto out;
286 	}
287 
288 	pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
289 		 xop->src_dev, &xop->src_tid_wwn[0]);
290 	pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
291 		 xop->dst_dev, &xop->dst_tid_wwn[0]);
292 
293 	return cscd_index;
294 
295 out:
296 	return -EINVAL;
297 }
298 
299 static int target_xcopy_parse_segdesc_02(struct xcopy_op *xop, unsigned char *p)
300 {
301 	unsigned char *desc = p;
302 	int dc = (desc[1] & 0x02);
303 	unsigned short desc_len;
304 
305 	desc_len = get_unaligned_be16(&desc[2]);
306 	if (desc_len != 0x18) {
307 		pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
308 				" %hu\n", desc_len);
309 		return -EINVAL;
310 	}
311 
312 	xop->stdi = get_unaligned_be16(&desc[4]);
313 	xop->dtdi = get_unaligned_be16(&desc[6]);
314 
315 	if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
316 	    xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
317 		pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
318 			XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
319 		return -EINVAL;
320 	}
321 
322 	pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
323 		desc_len, xop->stdi, xop->dtdi, dc);
324 
325 	xop->nolb = get_unaligned_be16(&desc[10]);
326 	xop->src_lba = get_unaligned_be64(&desc[12]);
327 	xop->dst_lba = get_unaligned_be64(&desc[20]);
328 	pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
329 		xop->nolb, (unsigned long long)xop->src_lba,
330 		(unsigned long long)xop->dst_lba);
331 
332 	return 0;
333 }
334 
335 static int target_xcopy_parse_segment_descriptors(struct xcopy_op *xop,
336 				unsigned char *p, unsigned int sdll,
337 				sense_reason_t *sense_ret)
338 {
339 	unsigned char *desc = p;
340 	unsigned int start = 0;
341 	int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
342 
343 	*sense_ret = TCM_INVALID_PARAMETER_LIST;
344 
345 	if (offset != 0) {
346 		pr_err("XCOPY segment descriptor list length is not"
347 			" multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
348 		*sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
349 		return -EINVAL;
350 	}
351 	if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
352 		pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
353 			" large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
354 		/* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
355 		*sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
356 		return -EINVAL;
357 	}
358 
359 	while (start < sdll) {
360 		/*
361 		 * Check segment descriptor type code for block -> block
362 		 */
363 		switch (desc[0]) {
364 		case 0x02:
365 			rc = target_xcopy_parse_segdesc_02(xop, desc);
366 			if (rc < 0)
367 				goto out;
368 
369 			ret++;
370 			start += XCOPY_SEGMENT_DESC_LEN;
371 			desc += XCOPY_SEGMENT_DESC_LEN;
372 			break;
373 		default:
374 			pr_err("XCOPY unsupported segment descriptor"
375 				"type: 0x%02x\n", desc[0]);
376 			*sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
377 			goto out;
378 		}
379 	}
380 
381 	return ret;
382 
383 out:
384 	return -EINVAL;
385 }
386 
387 /*
388  * Start xcopy_pt ops
389  */
390 
391 struct xcopy_pt_cmd {
392 	struct se_cmd se_cmd;
393 	struct completion xpt_passthrough_sem;
394 	unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
395 };
396 
397 struct se_portal_group xcopy_pt_tpg;
398 static struct se_session xcopy_pt_sess;
399 static struct se_node_acl xcopy_pt_nacl;
400 
401 static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
402 {
403         return 0;
404 }
405 
406 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
407 {
408 	if (xop->op_origin == XCOL_SOURCE_RECV_OP)
409 		pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
410 	else
411 		pr_debug("putting src lun_ref for %p\n", xop->src_dev);
412 
413 	percpu_ref_put(xop->remote_lun_ref);
414 }
415 
416 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
417 {
418 	struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
419 				struct xcopy_pt_cmd, se_cmd);
420 
421 	/* xpt_cmd is on the stack, nothing to free here */
422 	pr_debug("xpt_cmd done: %p\n", xpt_cmd);
423 }
424 
425 static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
426 {
427 	struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
428 				struct xcopy_pt_cmd, se_cmd);
429 
430 	complete(&xpt_cmd->xpt_passthrough_sem);
431 	return 0;
432 }
433 
434 static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
435 {
436 	return 0;
437 }
438 
439 static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
440 {
441 	return 0;
442 }
443 
444 static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
445 {
446 	return 0;
447 }
448 
449 static const struct target_core_fabric_ops xcopy_pt_tfo = {
450 	.fabric_name		= "xcopy-pt",
451 	.get_cmd_state		= xcopy_pt_get_cmd_state,
452 	.release_cmd		= xcopy_pt_release_cmd,
453 	.check_stop_free	= xcopy_pt_check_stop_free,
454 	.write_pending		= xcopy_pt_write_pending,
455 	.queue_data_in		= xcopy_pt_queue_data_in,
456 	.queue_status		= xcopy_pt_queue_status,
457 };
458 
459 /*
460  * End xcopy_pt_ops
461  */
462 
463 int target_xcopy_setup_pt(void)
464 {
465 	xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
466 	if (!xcopy_wq) {
467 		pr_err("Unable to allocate xcopy_wq\n");
468 		return -ENOMEM;
469 	}
470 
471 	memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
472 	INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
473 	INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
474 
475 	xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
476 
477 	memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
478 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
479 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
480 	memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
481 	transport_init_session(&xcopy_pt_sess);
482 
483 	xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
484 	xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
485 
486 	xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
487 	xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
488 
489 	return 0;
490 }
491 
492 void target_xcopy_release_pt(void)
493 {
494 	if (xcopy_wq)
495 		destroy_workqueue(xcopy_wq);
496 }
497 
498 /*
499  * target_xcopy_setup_pt_cmd - set up a pass-through command
500  * @xpt_cmd:	 Data structure to initialize.
501  * @xop:	 Describes the XCOPY operation received from an initiator.
502  * @se_dev:	 Backend device to associate with @xpt_cmd if
503  *		 @remote_port == true.
504  * @cdb:	 SCSI CDB to be copied into @xpt_cmd.
505  * @remote_port: If false, use the LUN through which the XCOPY command has
506  *		 been received. If true, use @se_dev->xcopy_lun.
507  *
508  * Set up a SCSI command (READ or WRITE) that will be used to execute an
509  * XCOPY command.
510  */
511 static int target_xcopy_setup_pt_cmd(
512 	struct xcopy_pt_cmd *xpt_cmd,
513 	struct xcopy_op *xop,
514 	struct se_device *se_dev,
515 	unsigned char *cdb,
516 	bool remote_port)
517 {
518 	struct se_cmd *cmd = &xpt_cmd->se_cmd;
519 
520 	/*
521 	 * Setup LUN+port to honor reservations based upon xop->op_origin for
522 	 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
523 	 */
524 	if (remote_port) {
525 		cmd->se_lun = &se_dev->xcopy_lun;
526 		cmd->se_dev = se_dev;
527 	} else {
528 		cmd->se_lun = xop->xop_se_cmd->se_lun;
529 		cmd->se_dev = xop->xop_se_cmd->se_dev;
530 	}
531 	cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
532 
533 	if (target_cmd_init_cdb(cmd, cdb, GFP_KERNEL))
534 		return -EINVAL;
535 
536 	cmd->tag = 0;
537 	if (target_cmd_parse_cdb(cmd))
538 		return -EINVAL;
539 
540 	if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg,
541 					xop->xop_data_nents, NULL, 0))
542 		return -EINVAL;
543 
544 	pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
545 		 " %u\n", cmd->t_data_sg, cmd->t_data_nents);
546 
547 	return 0;
548 }
549 
550 static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
551 {
552 	struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
553 	sense_reason_t sense_rc;
554 
555 	sense_rc = transport_generic_new_cmd(se_cmd);
556 	if (sense_rc)
557 		return -EINVAL;
558 
559 	if (se_cmd->data_direction == DMA_TO_DEVICE)
560 		target_execute_cmd(se_cmd);
561 
562 	wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
563 
564 	pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
565 			se_cmd->scsi_status);
566 
567 	return (se_cmd->scsi_status) ? -EINVAL : 0;
568 }
569 
570 static int target_xcopy_read_source(
571 	struct se_cmd *ec_cmd,
572 	struct xcopy_op *xop,
573 	struct se_device *src_dev,
574 	sector_t src_lba,
575 	u32 src_bytes)
576 {
577 	struct xcopy_pt_cmd xpt_cmd;
578 	struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
579 	u32 transfer_length_block = src_bytes / src_dev->dev_attrib.block_size;
580 	int rc;
581 	unsigned char cdb[16];
582 	bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
583 
584 	memset(&xpt_cmd, 0, sizeof(xpt_cmd));
585 	init_completion(&xpt_cmd.xpt_passthrough_sem);
586 
587 	memset(&cdb[0], 0, 16);
588 	cdb[0] = READ_16;
589 	put_unaligned_be64(src_lba, &cdb[2]);
590 	put_unaligned_be32(transfer_length_block, &cdb[10]);
591 	pr_debug("XCOPY: Built READ_16: LBA: %llu Blocks: %u Length: %u\n",
592 		(unsigned long long)src_lba, transfer_length_block, src_bytes);
593 
594 	__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes,
595 			  DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
596 			  NULL);
597 	rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
598 				remote_port);
599 	if (rc < 0) {
600 		ec_cmd->scsi_status = se_cmd->scsi_status;
601 		goto out;
602 	}
603 
604 	pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
605 		" memory\n", xop->xop_data_sg, xop->xop_data_nents);
606 
607 	rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
608 	if (rc < 0)
609 		ec_cmd->scsi_status = se_cmd->scsi_status;
610 out:
611 	transport_generic_free_cmd(se_cmd, 0);
612 	return rc;
613 }
614 
615 static int target_xcopy_write_destination(
616 	struct se_cmd *ec_cmd,
617 	struct xcopy_op *xop,
618 	struct se_device *dst_dev,
619 	sector_t dst_lba,
620 	u32 dst_bytes)
621 {
622 	struct xcopy_pt_cmd xpt_cmd;
623 	struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
624 	u32 transfer_length_block = dst_bytes / dst_dev->dev_attrib.block_size;
625 	int rc;
626 	unsigned char cdb[16];
627 	bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
628 
629 	memset(&xpt_cmd, 0, sizeof(xpt_cmd));
630 	init_completion(&xpt_cmd.xpt_passthrough_sem);
631 
632 	memset(&cdb[0], 0, 16);
633 	cdb[0] = WRITE_16;
634 	put_unaligned_be64(dst_lba, &cdb[2]);
635 	put_unaligned_be32(transfer_length_block, &cdb[10]);
636 	pr_debug("XCOPY: Built WRITE_16: LBA: %llu Blocks: %u Length: %u\n",
637 		(unsigned long long)dst_lba, transfer_length_block, dst_bytes);
638 
639 	__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes,
640 			  DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
641 			  NULL);
642 	rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
643 				remote_port);
644 	if (rc < 0) {
645 		ec_cmd->scsi_status = se_cmd->scsi_status;
646 		goto out;
647 	}
648 
649 	rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
650 	if (rc < 0)
651 		ec_cmd->scsi_status = se_cmd->scsi_status;
652 out:
653 	transport_generic_free_cmd(se_cmd, 0);
654 	return rc;
655 }
656 
657 static void target_xcopy_do_work(struct work_struct *work)
658 {
659 	struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
660 	struct se_cmd *ec_cmd = xop->xop_se_cmd;
661 	struct se_device *src_dev, *dst_dev;
662 	sector_t src_lba, dst_lba, end_lba;
663 	unsigned long long max_bytes, max_bytes_src, max_bytes_dst, max_blocks;
664 	int rc = 0;
665 	unsigned short nolb;
666 	unsigned int copied_bytes = 0;
667 	sense_reason_t sense_rc;
668 
669 	sense_rc = target_parse_xcopy_cmd(xop);
670 	if (sense_rc != TCM_NO_SENSE)
671 		goto err_free;
672 
673 	if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) {
674 		sense_rc = TCM_INVALID_PARAMETER_LIST;
675 		goto err_free;
676 	}
677 
678 	src_dev = xop->src_dev;
679 	dst_dev = xop->dst_dev;
680 	src_lba = xop->src_lba;
681 	dst_lba = xop->dst_lba;
682 	nolb = xop->nolb;
683 	end_lba = src_lba + nolb;
684 	/*
685 	 * Break up XCOPY I/O into hw_max_sectors * hw_block_size sized
686 	 * I/O based on the smallest max_bytes between src_dev + dst_dev
687 	 */
688 	max_bytes_src = (unsigned long long) src_dev->dev_attrib.hw_max_sectors *
689 			src_dev->dev_attrib.hw_block_size;
690 	max_bytes_dst = (unsigned long long) dst_dev->dev_attrib.hw_max_sectors *
691 			dst_dev->dev_attrib.hw_block_size;
692 
693 	max_bytes = min_t(u64, max_bytes_src, max_bytes_dst);
694 	max_bytes = min_t(u64, max_bytes, XCOPY_MAX_BYTES);
695 
696 	/*
697 	 * Using shift instead of the division because otherwise GCC
698 	 * generates __udivdi3 that is missing on i386
699 	 */
700 	max_blocks = max_bytes >> ilog2(src_dev->dev_attrib.block_size);
701 
702 	pr_debug("%s: nolb: %u, max_blocks: %llu end_lba: %llu\n", __func__,
703 			nolb, max_blocks, (unsigned long long)end_lba);
704 	pr_debug("%s: Starting src_lba: %llu, dst_lba: %llu\n", __func__,
705 			(unsigned long long)src_lba, (unsigned long long)dst_lba);
706 
707 	while (nolb) {
708 		u32 cur_bytes = min_t(u64, max_bytes, nolb * src_dev->dev_attrib.block_size);
709 		unsigned short cur_nolb = cur_bytes / src_dev->dev_attrib.block_size;
710 
711 		if (cur_bytes != xop->xop_data_bytes) {
712 			/*
713 			 * (Re)allocate a buffer large enough to hold the XCOPY
714 			 * I/O size, which can be reused each read / write loop.
715 			 */
716 			target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
717 			rc = target_alloc_sgl(&xop->xop_data_sg,
718 					      &xop->xop_data_nents,
719 					      cur_bytes,
720 					      false, false);
721 			if (rc < 0)
722 				goto out;
723 			xop->xop_data_bytes = cur_bytes;
724 		}
725 
726 		pr_debug("%s: Calling read src_dev: %p src_lba: %llu, cur_nolb: %hu\n",
727 				__func__, src_dev, (unsigned long long)src_lba, cur_nolb);
728 
729 		rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_bytes);
730 		if (rc < 0)
731 			goto out;
732 
733 		src_lba += cur_bytes / src_dev->dev_attrib.block_size;
734 		pr_debug("%s: Incremented READ src_lba to %llu\n", __func__,
735 				(unsigned long long)src_lba);
736 
737 		pr_debug("%s: Calling write dst_dev: %p dst_lba: %llu, cur_nolb: %u\n",
738 				__func__, dst_dev, (unsigned long long)dst_lba, cur_nolb);
739 
740 		rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
741 						dst_lba, cur_bytes);
742 		if (rc < 0)
743 			goto out;
744 
745 		dst_lba += cur_bytes / dst_dev->dev_attrib.block_size;
746 		pr_debug("%s: Incremented WRITE dst_lba to %llu\n", __func__,
747 				(unsigned long long)dst_lba);
748 
749 		copied_bytes += cur_bytes;
750 		nolb -= cur_bytes / src_dev->dev_attrib.block_size;
751 	}
752 
753 	xcopy_pt_undepend_remotedev(xop);
754 	target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
755 	kfree(xop);
756 
757 	pr_debug("%s: Final src_lba: %llu, dst_lba: %llu\n", __func__,
758 		(unsigned long long)src_lba, (unsigned long long)dst_lba);
759 	pr_debug("%s: Blocks copied: %u, Bytes Copied: %u\n", __func__,
760 		copied_bytes / dst_dev->dev_attrib.block_size, copied_bytes);
761 
762 	pr_debug("%s: Setting X-COPY GOOD status -> sending response\n", __func__);
763 	target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
764 	return;
765 
766 out:
767 	/*
768 	 * The XCOPY command was aborted after some data was transferred.
769 	 * Terminate command with CHECK CONDITION status, with the sense key
770 	 * set to COPY ABORTED.
771 	 */
772 	sense_rc = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
773 	xcopy_pt_undepend_remotedev(xop);
774 	target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
775 
776 err_free:
777 	kfree(xop);
778 	pr_warn_ratelimited("%s: rc: %d, sense: %u, XCOPY operation failed\n",
779 			   __func__, rc, sense_rc);
780 	target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc);
781 }
782 
783 /*
784  * Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing
785  * fails.
786  */
787 static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
788 {
789 	struct se_cmd *se_cmd = xop->xop_se_cmd;
790 	unsigned char *p = NULL, *seg_desc;
791 	unsigned int list_id, list_id_usage, sdll, inline_dl;
792 	sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
793 	int rc;
794 	unsigned short tdll;
795 
796 	p = transport_kmap_data_sg(se_cmd);
797 	if (!p) {
798 		pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
799 		return TCM_OUT_OF_RESOURCES;
800 	}
801 
802 	list_id = p[0];
803 	list_id_usage = (p[1] & 0x18) >> 3;
804 
805 	/*
806 	 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
807 	 */
808 	tdll = get_unaligned_be16(&p[2]);
809 	sdll = get_unaligned_be32(&p[8]);
810 	if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
811 		pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
812 		       tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
813 		ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
814 		goto out;
815 	}
816 
817 	inline_dl = get_unaligned_be32(&p[12]);
818 	if (inline_dl != 0) {
819 		pr_err("XCOPY with non zero inline data length\n");
820 		goto out;
821 	}
822 
823 	if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
824 		pr_err("XCOPY parameter truncation: data length %u too small "
825 			"for tdll: %hu sdll: %u inline_dl: %u\n",
826 			se_cmd->data_length, tdll, sdll, inline_dl);
827 		ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
828 		goto out;
829 	}
830 
831 	pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
832 		" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
833 		tdll, sdll, inline_dl);
834 
835 	/*
836 	 * skip over the target descriptors until segment descriptors
837 	 * have been passed - CSCD ids are needed to determine src and dest.
838 	 */
839 	seg_desc = &p[16] + tdll;
840 
841 	rc = target_xcopy_parse_segment_descriptors(xop, seg_desc, sdll, &ret);
842 	if (rc <= 0)
843 		goto out;
844 
845 	pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
846 				rc * XCOPY_SEGMENT_DESC_LEN);
847 
848 	rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
849 	if (rc <= 0)
850 		goto out;
851 
852 	if (xop->src_dev->dev_attrib.block_size !=
853 	    xop->dst_dev->dev_attrib.block_size) {
854 		pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
855 		       " block_size: %u currently unsupported\n",
856 			xop->src_dev->dev_attrib.block_size,
857 			xop->dst_dev->dev_attrib.block_size);
858 		xcopy_pt_undepend_remotedev(xop);
859 		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
860 		goto out;
861 	}
862 
863 	pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
864 				rc * XCOPY_TARGET_DESC_LEN);
865 	transport_kunmap_data_sg(se_cmd);
866 	return TCM_NO_SENSE;
867 
868 out:
869 	if (p)
870 		transport_kunmap_data_sg(se_cmd);
871 	return ret;
872 }
873 
874 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
875 {
876 	struct se_device *dev = se_cmd->se_dev;
877 	struct xcopy_op *xop;
878 	unsigned int sa;
879 
880 	if (!dev->dev_attrib.emulate_3pc) {
881 		pr_err("EXTENDED_COPY operation explicitly disabled\n");
882 		return TCM_UNSUPPORTED_SCSI_OPCODE;
883 	}
884 
885 	sa = se_cmd->t_task_cdb[1] & 0x1f;
886 	if (sa != 0x00) {
887 		pr_err("EXTENDED_COPY(LID4) not supported\n");
888 		return TCM_UNSUPPORTED_SCSI_OPCODE;
889 	}
890 
891 	if (se_cmd->data_length == 0) {
892 		target_complete_cmd(se_cmd, SAM_STAT_GOOD);
893 		return TCM_NO_SENSE;
894 	}
895 	if (se_cmd->data_length < XCOPY_HDR_LEN) {
896 		pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
897 				se_cmd->data_length, XCOPY_HDR_LEN);
898 		return TCM_PARAMETER_LIST_LENGTH_ERROR;
899 	}
900 
901 	xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
902 	if (!xop)
903 		goto err;
904 	xop->xop_se_cmd = se_cmd;
905 	INIT_WORK(&xop->xop_work, target_xcopy_do_work);
906 	if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work)))
907 		goto free;
908 	return TCM_NO_SENSE;
909 
910 free:
911 	kfree(xop);
912 
913 err:
914 	return TCM_OUT_OF_RESOURCES;
915 }
916 
917 static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
918 {
919 	unsigned char *p;
920 
921 	p = transport_kmap_data_sg(se_cmd);
922 	if (!p) {
923 		pr_err("transport_kmap_data_sg failed in"
924 		       " target_rcr_operating_parameters\n");
925 		return TCM_OUT_OF_RESOURCES;
926 	}
927 
928 	if (se_cmd->data_length < 54) {
929 		pr_err("Receive Copy Results Op Parameters length"
930 		       " too small: %u\n", se_cmd->data_length);
931 		transport_kunmap_data_sg(se_cmd);
932 		return TCM_INVALID_CDB_FIELD;
933 	}
934 	/*
935 	 * Set SNLID=1 (Supports no List ID)
936 	 */
937 	p[4] = 0x1;
938 	/*
939 	 * MAXIMUM TARGET DESCRIPTOR COUNT
940 	 */
941 	put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
942 	/*
943 	 * MAXIMUM SEGMENT DESCRIPTOR COUNT
944 	 */
945 	put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
946 	/*
947 	 * MAXIMUM DESCRIPTOR LIST LENGTH
948 	 */
949 	put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
950 	/*
951 	 * MAXIMUM SEGMENT LENGTH
952 	 */
953 	put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
954 	/*
955 	 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
956 	 */
957 	put_unaligned_be32(0x0, &p[20]);
958 	/*
959 	 * HELD DATA LIMIT
960 	 */
961 	put_unaligned_be32(0x0, &p[24]);
962 	/*
963 	 * MAXIMUM STREAM DEVICE TRANSFER SIZE
964 	 */
965 	put_unaligned_be32(0x0, &p[28]);
966 	/*
967 	 * TOTAL CONCURRENT COPIES
968 	 */
969 	put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
970 	/*
971 	 * MAXIMUM CONCURRENT COPIES
972 	 */
973 	p[36] = RCR_OP_MAX_CONCURR_COPIES;
974 	/*
975 	 * DATA SEGMENT GRANULARITY (log 2)
976 	 */
977 	p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
978 	/*
979 	 * INLINE DATA GRANULARITY log 2)
980 	 */
981 	p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
982 	/*
983 	 * HELD DATA GRANULARITY
984 	 */
985 	p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
986 	/*
987 	 * IMPLEMENTED DESCRIPTOR LIST LENGTH
988 	 */
989 	p[43] = 0x2;
990 	/*
991 	 * List of implemented descriptor type codes (ordered)
992 	 */
993 	p[44] = 0x02; /* Copy Block to Block device */
994 	p[45] = 0xe4; /* Identification descriptor target descriptor */
995 
996 	/*
997 	 * AVAILABLE DATA (n-3)
998 	 */
999 	put_unaligned_be32(42, &p[0]);
1000 
1001 	transport_kunmap_data_sg(se_cmd);
1002 	target_complete_cmd(se_cmd, SAM_STAT_GOOD);
1003 
1004 	return TCM_NO_SENSE;
1005 }
1006 
1007 sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
1008 {
1009 	unsigned char *cdb = &se_cmd->t_task_cdb[0];
1010 	int sa = (cdb[1] & 0x1f), list_id = cdb[2];
1011 	struct se_device *dev = se_cmd->se_dev;
1012 	sense_reason_t rc = TCM_NO_SENSE;
1013 
1014 	if (!dev->dev_attrib.emulate_3pc) {
1015 		pr_debug("Third-party copy operations explicitly disabled\n");
1016 		return TCM_UNSUPPORTED_SCSI_OPCODE;
1017 	}
1018 
1019 	pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
1020 		" 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
1021 
1022 	if (list_id != 0) {
1023 		pr_err("Receive Copy Results with non zero list identifier"
1024 		       " not supported\n");
1025 		return TCM_INVALID_CDB_FIELD;
1026 	}
1027 
1028 	switch (sa) {
1029 	case RCR_SA_OPERATING_PARAMETERS:
1030 		rc = target_rcr_operating_parameters(se_cmd);
1031 		break;
1032 	case RCR_SA_COPY_STATUS:
1033 	case RCR_SA_RECEIVE_DATA:
1034 	case RCR_SA_FAILED_SEGMENT_DETAILS:
1035 	default:
1036 		pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
1037 		return TCM_INVALID_CDB_FIELD;
1038 	}
1039 
1040 	return rc;
1041 }
1042