xref: /illumos-gate/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_scsi.c (revision 8fd04b8338ed5093ec2d1e668fa620b7de44c177)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/scsi/generic/mode.h>
34 #include <sys/disp.h>
35 #include <sys/byteorder.h>
36 #include <sys/atomic.h>
37 #include <sys/sdt.h>
38 #include <sys/dkio.h>
39 
40 #include <stmf.h>
41 #include <lpif.h>
42 #include <portif.h>
43 #include <stmf_ioctl.h>
44 #include <stmf_sbd.h>
45 #include <stmf_sbd_ioctl.h>
46 #include <sbd_impl.h>
47 
48 #define	SCSI2_CONFLICT_FREE_CMDS(cdb)	( \
49 	/* ----------------------- */                                      \
50 	/* Refer Both		   */                                      \
51 	/* SPC-2 (rev 20) Table 10 */                                      \
52 	/* SPC-3 (rev 23) Table 31 */                                      \
53 	/* ----------------------- */                                      \
54 	((cdb[0]) == SCMD_INQUIRY)					|| \
55 	((cdb[0]) == SCMD_LOG_SENSE_G1)					|| \
56 	((cdb[0]) == SCMD_RELEASE)					|| \
57 	((cdb[0]) == SCMD_RELEASE_G1)					|| \
58 	((cdb[0]) == SCMD_REPORT_LUNS)					|| \
59 	((cdb[0]) == SCMD_REQUEST_SENSE)				|| \
60 	/* PREVENT ALLOW MEDIUM REMOVAL with prevent == 0 */               \
61 	((((cdb[0]) == SCMD_DOORLOCK) && (((cdb[4]) & 0x3) == 0)))	|| \
62 	/* SERVICE ACTION IN with READ MEDIA SERIAL NUMBER (0x01) */       \
63 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G5) && (                          \
64 	    ((cdb[1]) & 0x1F) == 0x01))					|| \
65 	/* MAINTENANCE IN with service actions REPORT ALIASES (0x0Bh) */   \
66 	/* REPORT DEVICE IDENTIFIER (0x05)  REPORT PRIORITY (0x0Eh) */     \
67 	/* REPORT TARGET PORT GROUPS (0x0A) REPORT TIMESTAMP (0x0F) */     \
68 	(((cdb[0]) == SCMD_MAINTENANCE_IN) && (                            \
69 	    (((cdb[1]) & 0x1F) == 0x0B) ||                                 \
70 	    (((cdb[1]) & 0x1F) == 0x05) ||                                 \
71 	    (((cdb[1]) & 0x1F) == 0x0E) ||                                 \
72 	    (((cdb[1]) & 0x1F) == 0x0A) ||                                 \
73 	    (((cdb[1]) & 0x1F) == 0x0F)))				|| \
74 	/* ----------------------- */                                      \
75 	/* SBC-3 (rev 17) Table 3  */                                      \
76 	/* ----------------------- */                                      \
77 	/* READ CAPACITY(10) */                                            \
78 	((cdb[0]) == SCMD_READ_CAPACITY)				|| \
79 	/* READ CAPACITY(16) */                                            \
80 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G4) && (                          \
81 	    ((cdb[1]) & 0x1F) == 0x10))					|| \
82 	/* START STOP UNIT with START bit 0 and POWER CONDITION 0  */      \
83 	(((cdb[0]) == SCMD_START_STOP) && (                                \
84 	    (((cdb[4]) & 0xF0) == 0) && (((cdb[4]) & 0x01) == 0))))
85 /* End of SCSI2_CONFLICT_FREE_CMDS */
86 
87 stmf_status_t sbd_lu_reset_state(stmf_lu_t *lu);
88 static void sbd_handle_sync_cache(struct scsi_task *task,
89     struct stmf_data_buf *initial_dbuf);
90 void sbd_handle_read_xfer_completion(struct scsi_task *task,
91     sbd_cmd_t *scmd, struct stmf_data_buf *dbuf);
92 void sbd_handle_short_write_xfer_completion(scsi_task_t *task,
93     stmf_data_buf_t *dbuf);
94 void sbd_handle_short_write_transfers(scsi_task_t *task,
95     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size);
96 static void sbd_handle_sync_cache(struct scsi_task *task,
97     struct stmf_data_buf *initial_dbuf);
98 void sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf,
99     uint32_t buflen);
100 void sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf);
101 
102 extern void sbd_pgr_initialize_it(scsi_task_t *);
103 extern int sbd_pgr_reservation_conflict(scsi_task_t *);
104 extern void sbd_pgr_reset(sbd_lu_t *);
105 extern void sbd_pgr_remove_it_handle(sbd_lu_t *, sbd_it_data_t *);
106 extern void sbd_handle_pgr_in_cmd(scsi_task_t *, stmf_data_buf_t *);
107 extern void sbd_handle_pgr_out_cmd(scsi_task_t *, stmf_data_buf_t *);
108 extern void sbd_handle_pgr_out_data(scsi_task_t *, stmf_data_buf_t *);
109 /*
110  * IMPORTANT NOTE:
111  * =================
112  * The whole world here is based on the assumption that everything within
113  * a scsi task executes in a single threaded manner, even the aborts.
114  * Dont ever change that. There wont be any performance gain but there
115  * will be tons of race conditions.
116  */
117 
118 void
119 sbd_do_read_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
120 					struct stmf_data_buf *dbuf)
121 {
122 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
123 	uint64_t laddr;
124 	uint32_t len, buflen, iolen;
125 	int ndx;
126 	int bufs_to_take;
127 
128 	/* Lets try not to hog all the buffers the port has. */
129 	bufs_to_take = ((task->task_max_nbufs > 2) &&
130 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
131 	    task->task_max_nbufs;
132 
133 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
134 	laddr = scmd->addr + scmd->current_ro;
135 
136 	for (buflen = 0, ndx = 0; (buflen < len) &&
137 	    (ndx < dbuf->db_sglist_length); ndx++) {
138 		iolen = min(len - buflen, dbuf->db_sglist[ndx].seg_length);
139 		if (iolen == 0)
140 			break;
141 		if (sbd_data_read(sl, task, laddr, (uint64_t)iolen,
142 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
143 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
144 			/* Do not need to do xfer anymore, just complete it */
145 			dbuf->db_data_size = 0;
146 			dbuf->db_xfer_status = STMF_SUCCESS;
147 			sbd_handle_read_xfer_completion(task, scmd, dbuf);
148 			return;
149 		}
150 		buflen += iolen;
151 		laddr += (uint64_t)iolen;
152 	}
153 	dbuf->db_relative_offset = scmd->current_ro;
154 	dbuf->db_data_size = buflen;
155 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
156 	(void) stmf_xfer_data(task, dbuf, 0);
157 	scmd->len -= buflen;
158 	scmd->current_ro += buflen;
159 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
160 		uint32_t maxsize, minsize, old_minsize;
161 
162 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
163 		minsize = maxsize >> 2;
164 		do {
165 			/*
166 			 * A bad port implementation can keep on failing the
167 			 * the request but keep on sending us a false
168 			 * minsize.
169 			 */
170 			old_minsize = minsize;
171 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
172 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
173 		    (minsize >= 512));
174 		if (dbuf == NULL) {
175 			return;
176 		}
177 		scmd->nbufs++;
178 		sbd_do_read_xfer(task, scmd, dbuf);
179 	}
180 }
181 
182 void
183 sbd_handle_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
184 				struct stmf_data_buf *dbuf)
185 {
186 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
187 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
188 		    dbuf->db_xfer_status, NULL);
189 		return;
190 	}
191 	task->task_nbytes_transferred += dbuf->db_data_size;
192 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
193 		stmf_free_dbuf(task, dbuf);
194 		scmd->nbufs--;
195 		if (scmd->nbufs)
196 			return;	/* wait for all buffers to complete */
197 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
198 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
199 			stmf_scsilib_send_status(task, STATUS_CHECK,
200 			    STMF_SAA_READ_ERROR);
201 		else
202 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
203 		return;
204 	}
205 	if (dbuf->db_flags & DB_DONT_REUSE) {
206 		/* allocate new dbuf */
207 		uint32_t maxsize, minsize, old_minsize;
208 		stmf_free_dbuf(task, dbuf);
209 
210 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
211 		minsize = maxsize >> 2;
212 		do {
213 			old_minsize = minsize;
214 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
215 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
216 		    (minsize >= 512));
217 		if (dbuf == NULL) {
218 			scmd->nbufs --;
219 			if (scmd->nbufs == 0) {
220 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
221 				    STMF_ALLOC_FAILURE, NULL);
222 			}
223 			return;
224 		}
225 	}
226 	sbd_do_read_xfer(task, scmd, dbuf);
227 }
228 
229 void
230 sbd_handle_read(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
231 {
232 	uint64_t lba, laddr;
233 	uint32_t len;
234 	uint8_t op = task->task_cdb[0];
235 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
236 	sbd_cmd_t *scmd;
237 	stmf_data_buf_t *dbuf;
238 	int fast_path;
239 
240 	if (op == SCMD_READ) {
241 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
242 		len = (uint32_t)task->task_cdb[4];
243 
244 		if (len == 0) {
245 			len = 256;
246 		}
247 	} else if (op == SCMD_READ_G1) {
248 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
249 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
250 	} else if (op == SCMD_READ_G5) {
251 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
252 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
253 	} else if (op == SCMD_READ_G4) {
254 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
255 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
256 	} else {
257 		stmf_scsilib_send_status(task, STATUS_CHECK,
258 		    STMF_SAA_INVALID_OPCODE);
259 		return;
260 	}
261 
262 	laddr = lba << sl->sl_data_blocksize_shift;
263 	len <<= sl->sl_data_blocksize_shift;
264 
265 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
266 		stmf_scsilib_send_status(task, STATUS_CHECK,
267 		    STMF_SAA_LBA_OUT_OF_RANGE);
268 		return;
269 	}
270 
271 	task->task_cmd_xfer_length = len;
272 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
273 		task->task_expected_xfer_length = len;
274 	}
275 
276 	if (len != task->task_expected_xfer_length) {
277 		fast_path = 0;
278 		len = (len > task->task_expected_xfer_length) ?
279 		    task->task_expected_xfer_length : len;
280 	} else {
281 		fast_path = 1;
282 	}
283 
284 	if (len == 0) {
285 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
286 		return;
287 	}
288 
289 	if (initial_dbuf == NULL) {
290 		uint32_t maxsize, minsize, old_minsize;
291 
292 		maxsize = (len > (128*1024)) ? 128*1024 : len;
293 		minsize = maxsize >> 2;
294 		do {
295 			old_minsize = minsize;
296 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
297 			    &minsize, 0);
298 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
299 		    (minsize >= 512));
300 		if (initial_dbuf == NULL) {
301 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
302 			return;
303 		}
304 	}
305 	dbuf = initial_dbuf;
306 
307 	if ((dbuf->db_buf_size >= len) && fast_path &&
308 	    (dbuf->db_sglist_length == 1)) {
309 		if (sbd_data_read(sl, task, laddr, (uint64_t)len,
310 		    dbuf->db_sglist[0].seg_addr) == STMF_SUCCESS) {
311 			dbuf->db_relative_offset = 0;
312 			dbuf->db_data_size = len;
313 			dbuf->db_flags = DB_SEND_STATUS_GOOD |
314 			    DB_DIRECTION_TO_RPORT;
315 			(void) stmf_xfer_data(task, dbuf, STMF_IOF_LU_DONE);
316 		} else {
317 			stmf_scsilib_send_status(task, STATUS_CHECK,
318 			    STMF_SAA_READ_ERROR);
319 		}
320 		return;
321 	}
322 
323 	if (task->task_lu_private) {
324 		scmd = (sbd_cmd_t *)task->task_lu_private;
325 	} else {
326 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
327 		task->task_lu_private = scmd;
328 	}
329 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
330 	scmd->cmd_type = SBD_CMD_SCSI_READ;
331 	scmd->nbufs = 1;
332 	scmd->addr = laddr;
333 	scmd->len = len;
334 	scmd->current_ro = 0;
335 
336 	sbd_do_read_xfer(task, scmd, dbuf);
337 }
338 
339 void
340 sbd_do_write_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
341     struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
342 {
343 	uint32_t len;
344 	int bufs_to_take;
345 
346 	if (scmd->len == 0) {
347 		goto DO_WRITE_XFER_DONE;
348 	}
349 
350 	/* Lets try not to hog all the buffers the port has. */
351 	bufs_to_take = ((task->task_max_nbufs > 2) &&
352 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
353 	    task->task_max_nbufs;
354 
355 	if ((dbuf != NULL) &&
356 	    ((dbuf->db_flags & DB_DONT_REUSE) || (dbuf_reusable == 0))) {
357 		/* free current dbuf and allocate a new one */
358 		stmf_free_dbuf(task, dbuf);
359 		dbuf = NULL;
360 	}
361 	if (scmd->nbufs >= bufs_to_take) {
362 		goto DO_WRITE_XFER_DONE;
363 	}
364 	if (dbuf == NULL) {
365 		uint32_t maxsize, minsize, old_minsize;
366 
367 		maxsize = (scmd->len > (128*1024)) ? 128*1024 :
368 		    scmd->len;
369 		minsize = maxsize >> 2;
370 		do {
371 			old_minsize = minsize;
372 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
373 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
374 		    (minsize >= 512));
375 		if (dbuf == NULL) {
376 			if (scmd->nbufs == 0) {
377 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
378 				    STMF_ALLOC_FAILURE, NULL);
379 			}
380 			return;
381 		}
382 	}
383 
384 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size :
385 	    scmd->len;
386 
387 	dbuf->db_relative_offset = scmd->current_ro;
388 	dbuf->db_data_size = len;
389 	dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
390 	(void) stmf_xfer_data(task, dbuf, 0);
391 	scmd->nbufs++; /* outstanding port xfers and bufs used */
392 	scmd->len -= len;
393 	scmd->current_ro += len;
394 
395 	if ((scmd->len != 0) && (scmd->nbufs < bufs_to_take)) {
396 		sbd_do_write_xfer(task, scmd, NULL, 0);
397 	}
398 	return;
399 
400 DO_WRITE_XFER_DONE:
401 	if (dbuf != NULL) {
402 		stmf_free_dbuf(task, dbuf);
403 	}
404 }
405 
406 void
407 sbd_handle_write_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
408     struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
409 {
410 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
411 	uint64_t laddr;
412 	uint32_t buflen, iolen;
413 	int ndx;
414 
415 	if (scmd->nbufs > 0) {
416 		/*
417 		 * Decrement the count to indicate the port xfer
418 		 * into the dbuf has completed even though the buf is
419 		 * still in use here in the LU provider.
420 		 */
421 		scmd->nbufs--;
422 	}
423 
424 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
425 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
426 		    dbuf->db_xfer_status, NULL);
427 		return;
428 	}
429 
430 	if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
431 		goto WRITE_XFER_DONE;
432 	}
433 
434 	if (scmd->len != 0) {
435 		/*
436 		 * Initiate the next port xfer to occur in parallel
437 		 * with writing this buf.
438 		 */
439 		sbd_do_write_xfer(task, scmd, NULL, 0);
440 	}
441 
442 	laddr = scmd->addr + dbuf->db_relative_offset;
443 
444 	for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) &&
445 	    (ndx < dbuf->db_sglist_length); ndx++) {
446 		iolen = min(dbuf->db_data_size - buflen,
447 		    dbuf->db_sglist[ndx].seg_length);
448 		if (iolen == 0)
449 			break;
450 		if (sbd_data_write(sl, task, laddr, (uint64_t)iolen,
451 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
452 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
453 			break;
454 		}
455 		buflen += iolen;
456 		laddr += (uint64_t)iolen;
457 	}
458 	task->task_nbytes_transferred += buflen;
459 WRITE_XFER_DONE:
460 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
461 		stmf_free_dbuf(task, dbuf);
462 		if (scmd->nbufs)
463 			return;	/* wait for all buffers to complete */
464 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
465 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
466 			stmf_scsilib_send_status(task, STATUS_CHECK,
467 			    STMF_SAA_WRITE_ERROR);
468 		else
469 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
470 		return;
471 	}
472 	sbd_do_write_xfer(task, scmd, dbuf, dbuf_reusable);
473 }
474 
475 void
476 sbd_handle_write(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
477 {
478 	uint64_t lba, laddr;
479 	uint32_t len;
480 	uint8_t op = task->task_cdb[0], do_immediate_data = 0;
481 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
482 	sbd_cmd_t *scmd;
483 	stmf_data_buf_t *dbuf;
484 
485 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
486 		stmf_scsilib_send_status(task, STATUS_CHECK,
487 		    STMF_SAA_WRITE_PROTECTED);
488 		return;
489 	}
490 	if (op == SCMD_WRITE) {
491 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
492 		len = (uint32_t)task->task_cdb[4];
493 
494 		if (len == 0) {
495 			len = 256;
496 		}
497 	} else if (op == SCMD_WRITE_G1) {
498 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
499 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
500 	} else if (op == SCMD_WRITE_G5) {
501 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
502 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
503 	} else if (op == SCMD_WRITE_G4) {
504 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
505 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
506 	} else {
507 		stmf_scsilib_send_status(task, STATUS_CHECK,
508 		    STMF_SAA_INVALID_OPCODE);
509 		return;
510 	}
511 
512 	laddr = lba << sl->sl_data_blocksize_shift;
513 	len <<= sl->sl_data_blocksize_shift;
514 
515 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
516 		stmf_scsilib_send_status(task, STATUS_CHECK,
517 		    STMF_SAA_LBA_OUT_OF_RANGE);
518 		return;
519 	}
520 
521 	task->task_cmd_xfer_length = len;
522 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
523 		task->task_expected_xfer_length = len;
524 	}
525 
526 	len = (len > task->task_expected_xfer_length) ?
527 	    task->task_expected_xfer_length : len;
528 
529 	if (len == 0) {
530 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
531 		return;
532 	}
533 
534 	if ((initial_dbuf != NULL) && (task->task_flags & TF_INITIAL_BURST)) {
535 		if (initial_dbuf->db_data_size > len) {
536 			if (initial_dbuf->db_data_size >
537 			    task->task_expected_xfer_length) {
538 				/* protocol error */
539 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
540 				    STMF_INVALID_ARG, NULL);
541 				return;
542 			}
543 			initial_dbuf->db_data_size = len;
544 		}
545 		do_immediate_data = 1;
546 	}
547 	dbuf = initial_dbuf;
548 
549 	if (task->task_lu_private) {
550 		scmd = (sbd_cmd_t *)task->task_lu_private;
551 	} else {
552 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
553 		task->task_lu_private = scmd;
554 	}
555 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
556 	scmd->cmd_type = SBD_CMD_SCSI_WRITE;
557 	scmd->nbufs = 0;
558 	scmd->addr = laddr;
559 	scmd->len = len;
560 	scmd->current_ro = 0;
561 
562 	if (do_immediate_data) {
563 		/*
564 		 * Accout for data passed in this write command
565 		 */
566 		(void) stmf_xfer_data(task, dbuf, STMF_IOF_STATS_ONLY);
567 		scmd->len -= dbuf->db_data_size;
568 		scmd->current_ro += dbuf->db_data_size;
569 		dbuf->db_xfer_status = STMF_SUCCESS;
570 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 0);
571 	} else {
572 		sbd_do_write_xfer(task, scmd, dbuf, 0);
573 	}
574 }
575 
576 /*
577  * Utility routine to handle small non performance data transfers to the
578  * initiators. dbuf is an initial data buf (if any), 'p' points to a data
579  * buffer which is source of data for transfer, cdb_xfer_size is the
580  * transfer size based on CDB, cmd_xfer_size is the actual amount of data
581  * which this command would transfer (the size of data pointed to by 'p').
582  */
583 void
584 sbd_handle_short_read_transfers(scsi_task_t *task, stmf_data_buf_t *dbuf,
585     uint8_t *p, uint32_t cdb_xfer_size, uint32_t cmd_xfer_size)
586 {
587 	uint32_t bufsize, ndx;
588 	sbd_cmd_t *scmd;
589 
590 	cmd_xfer_size = min(cmd_xfer_size, cdb_xfer_size);
591 
592 	task->task_cmd_xfer_length = cmd_xfer_size;
593 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
594 		task->task_expected_xfer_length = cmd_xfer_size;
595 	} else {
596 		cmd_xfer_size = min(cmd_xfer_size,
597 		    task->task_expected_xfer_length);
598 	}
599 
600 	if (cmd_xfer_size == 0) {
601 		stmf_scsilib_send_status(task, STATUS_CHECK,
602 		    STMF_SAA_INVALID_FIELD_IN_CDB);
603 		return;
604 	}
605 	if (dbuf == NULL) {
606 		uint32_t minsize = cmd_xfer_size;
607 
608 		dbuf = stmf_alloc_dbuf(task, cmd_xfer_size, &minsize, 0);
609 	}
610 	if (dbuf == NULL) {
611 		stmf_scsilib_send_status(task, STATUS_QFULL, 0);
612 		return;
613 	}
614 
615 	for (bufsize = 0, ndx = 0; bufsize < cmd_xfer_size; ndx++) {
616 		uint8_t *d;
617 		uint32_t s;
618 
619 		d = dbuf->db_sglist[ndx].seg_addr;
620 		s = min((cmd_xfer_size - bufsize),
621 		    dbuf->db_sglist[ndx].seg_length);
622 		bcopy(p+bufsize, d, s);
623 		bufsize += s;
624 	}
625 	dbuf->db_relative_offset = 0;
626 	dbuf->db_data_size = cmd_xfer_size;
627 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
628 
629 	if (task->task_lu_private == NULL) {
630 		task->task_lu_private =
631 		    kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
632 	}
633 	scmd = (sbd_cmd_t *)task->task_lu_private;
634 
635 	scmd->cmd_type = SBD_CMD_SMALL_READ;
636 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
637 	(void) stmf_xfer_data(task, dbuf, 0);
638 }
639 
640 void
641 sbd_handle_short_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
642 				struct stmf_data_buf *dbuf)
643 {
644 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
645 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
646 		    dbuf->db_xfer_status, NULL);
647 		return;
648 	}
649 	task->task_nbytes_transferred = dbuf->db_data_size;
650 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
651 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
652 }
653 
654 void
655 sbd_handle_short_write_transfers(scsi_task_t *task,
656     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size)
657 {
658 	sbd_cmd_t *scmd;
659 
660 	task->task_cmd_xfer_length = cdb_xfer_size;
661 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
662 		task->task_expected_xfer_length = cdb_xfer_size;
663 	} else {
664 		cdb_xfer_size = min(cdb_xfer_size,
665 		    task->task_expected_xfer_length);
666 	}
667 
668 	if (cdb_xfer_size == 0) {
669 		stmf_scsilib_send_status(task, STATUS_CHECK,
670 		    STMF_SAA_INVALID_FIELD_IN_CDB);
671 		return;
672 	}
673 	if (task->task_lu_private == NULL) {
674 		task->task_lu_private = kmem_zalloc(sizeof (sbd_cmd_t),
675 		    KM_SLEEP);
676 	} else {
677 		bzero(task->task_lu_private, sizeof (sbd_cmd_t));
678 	}
679 	scmd = (sbd_cmd_t *)task->task_lu_private;
680 	scmd->cmd_type = SBD_CMD_SMALL_WRITE;
681 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
682 	scmd->len = cdb_xfer_size;
683 	if (dbuf == NULL) {
684 		uint32_t minsize = cdb_xfer_size;
685 
686 		dbuf = stmf_alloc_dbuf(task, cdb_xfer_size, &minsize, 0);
687 		if (dbuf == NULL) {
688 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
689 			    STMF_ALLOC_FAILURE, NULL);
690 			return;
691 		}
692 		dbuf->db_data_size = cdb_xfer_size;
693 		dbuf->db_relative_offset = 0;
694 		dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
695 		(void) stmf_xfer_data(task, dbuf, 0);
696 	} else {
697 		if (dbuf->db_data_size < cdb_xfer_size) {
698 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
699 			    STMF_ABORTED, NULL);
700 			return;
701 		}
702 		dbuf->db_data_size = cdb_xfer_size;
703 		sbd_handle_short_write_xfer_completion(task, dbuf);
704 	}
705 }
706 
707 void
708 sbd_handle_short_write_xfer_completion(scsi_task_t *task,
709     stmf_data_buf_t *dbuf)
710 {
711 	sbd_cmd_t *scmd;
712 	stmf_status_t st_ret;
713 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
714 
715 	/*
716 	 * For now lets assume we will get only one sglist element
717 	 * for short writes. If that ever changes, we should allocate
718 	 * a local buffer and copy all the sg elements to one linear space.
719 	 */
720 	if ((dbuf->db_xfer_status != STMF_SUCCESS) ||
721 	    (dbuf->db_sglist_length > 1)) {
722 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
723 		    dbuf->db_xfer_status, NULL);
724 		return;
725 	}
726 
727 	task->task_nbytes_transferred = dbuf->db_data_size;
728 	scmd = (sbd_cmd_t *)task->task_lu_private;
729 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
730 
731 	/* Lets find out who to call */
732 	switch (task->task_cdb[0]) {
733 	case SCMD_MODE_SELECT:
734 	case SCMD_MODE_SELECT_G1:
735 		if (sl->sl_access_state == SBD_LU_STANDBY) {
736 			st_ret = stmf_proxy_scsi_cmd(task, dbuf);
737 			if (st_ret != STMF_SUCCESS) {
738 				stmf_scsilib_send_status(task, STATUS_CHECK,
739 				    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
740 			}
741 		} else {
742 			sbd_handle_mode_select_xfer(task,
743 			    dbuf->db_sglist[0].seg_addr, dbuf->db_data_size);
744 		}
745 		break;
746 	case SCMD_PERSISTENT_RESERVE_OUT:
747 		if (sl->sl_access_state == SBD_LU_STANDBY) {
748 			st_ret = stmf_proxy_scsi_cmd(task, dbuf);
749 			if (st_ret != STMF_SUCCESS) {
750 				stmf_scsilib_send_status(task, STATUS_CHECK,
751 				    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
752 			}
753 		} else {
754 			sbd_handle_pgr_out_data(task, dbuf);
755 		}
756 		break;
757 	default:
758 		/* This should never happen */
759 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
760 		    STMF_ABORTED, NULL);
761 	}
762 }
763 
764 void
765 sbd_handle_read_capacity(struct scsi_task *task,
766     struct stmf_data_buf *initial_dbuf)
767 {
768 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
769 	uint32_t cdb_len;
770 	uint8_t p[32];
771 	uint64_t s;
772 	uint16_t blksize;
773 
774 	s = sl->sl_lu_size >> sl->sl_data_blocksize_shift;
775 	s--;
776 	blksize = ((uint16_t)1) << sl->sl_data_blocksize_shift;
777 
778 	switch (task->task_cdb[0]) {
779 	case SCMD_READ_CAPACITY:
780 		if (s & 0xffffffff00000000ull) {
781 			p[0] = p[1] = p[2] = p[3] = 0xFF;
782 		} else {
783 			p[0] = (s >> 24) & 0xff;
784 			p[1] = (s >> 16) & 0xff;
785 			p[2] = (s >> 8) & 0xff;
786 			p[3] = s & 0xff;
787 		}
788 		p[4] = 0; p[5] = 0;
789 		p[6] = (blksize >> 8) & 0xff;
790 		p[7] = blksize & 0xff;
791 		sbd_handle_short_read_transfers(task, initial_dbuf, p, 8, 8);
792 		break;
793 
794 	case SCMD_SVC_ACTION_IN_G4:
795 		cdb_len = READ_SCSI32(&task->task_cdb[10], uint32_t);
796 		bzero(p, 32);
797 		p[0] = (s >> 56) & 0xff;
798 		p[1] = (s >> 48) & 0xff;
799 		p[2] = (s >> 40) & 0xff;
800 		p[3] = (s >> 32) & 0xff;
801 		p[4] = (s >> 24) & 0xff;
802 		p[5] = (s >> 16) & 0xff;
803 		p[6] = (s >> 8) & 0xff;
804 		p[7] = s & 0xff;
805 		p[10] = (blksize >> 8) & 0xff;
806 		p[11] = blksize & 0xff;
807 		sbd_handle_short_read_transfers(task, initial_dbuf, p,
808 		    cdb_len, 32);
809 		break;
810 	}
811 }
812 
813 void
814 sbd_calc_geometry(uint64_t s, uint16_t blksize, uint8_t *nsectors,
815     uint8_t *nheads, uint32_t *ncyl)
816 {
817 	if (s < (4ull * 1024ull * 1024ull * 1024ull)) {
818 		*nsectors = 32;
819 		*nheads = 8;
820 	} else {
821 		*nsectors = 254;
822 		*nheads = 254;
823 	}
824 	*ncyl = s / ((uint64_t)blksize * (uint64_t)(*nsectors) *
825 	    (uint64_t)(*nheads));
826 }
827 
828 void
829 sbd_handle_mode_sense(struct scsi_task *task,
830     struct stmf_data_buf *initial_dbuf, uint8_t *buf)
831 {
832 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
833 	uint32_t cmd_size, n;
834 	uint8_t *cdb;
835 	uint32_t ncyl;
836 	uint8_t nsectors, nheads;
837 	uint8_t page, ctrl, header_size, pc_valid;
838 	uint16_t nbytes;
839 	uint8_t *p;
840 	uint64_t s = sl->sl_lu_size;
841 	uint32_t dev_spec_param_offset;
842 
843 	p = buf;	/* buf is assumed to be zeroed out and large enough */
844 	n = 0;
845 	cdb = &task->task_cdb[0];
846 	page = cdb[2] & 0x3F;
847 	ctrl = (cdb[2] >> 6) & 3;
848 	cmd_size = (cdb[0] == SCMD_MODE_SENSE) ? cdb[4] :
849 	    READ_SCSI16(&cdb[7], uint32_t);
850 
851 	if (cdb[0] == SCMD_MODE_SENSE) {
852 		header_size = 4;
853 		dev_spec_param_offset = 2;
854 	} else {
855 		header_size = 8;
856 		dev_spec_param_offset = 3;
857 	}
858 
859 	/* Now validate the command */
860 	if ((cdb[2] == 0) || (page == MODEPAGE_ALLPAGES) || (page == 0x08) ||
861 	    (page == 0x0A) || (page == 0x03) || (page == 0x04)) {
862 		pc_valid = 1;
863 	} else {
864 		pc_valid = 0;
865 	}
866 	if ((cmd_size < header_size) || (pc_valid == 0)) {
867 		stmf_scsilib_send_status(task, STATUS_CHECK,
868 		    STMF_SAA_INVALID_FIELD_IN_CDB);
869 		return;
870 	}
871 
872 	/* We will update the length in the mode header at the end */
873 
874 	/* Block dev device specific param in mode param header has wp bit */
875 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
876 		p[n + dev_spec_param_offset] = BIT_7;
877 	}
878 	n += header_size;
879 	/* We are not going to return any block descriptor */
880 
881 	nbytes = ((uint16_t)1) << sl->sl_data_blocksize_shift;
882 	sbd_calc_geometry(s, nbytes, &nsectors, &nheads, &ncyl);
883 
884 	if ((page == 0x03) || (page == MODEPAGE_ALLPAGES)) {
885 		p[n] = 0x03;
886 		p[n+1] = 0x16;
887 		if (ctrl != 1) {
888 			p[n + 11] = nsectors;
889 			p[n + 12] = nbytes >> 8;
890 			p[n + 13] = nbytes & 0xff;
891 			p[n + 20] = 0x80;
892 		}
893 		n += 24;
894 	}
895 	if ((page == 0x04) || (page == MODEPAGE_ALLPAGES)) {
896 		p[n] = 0x04;
897 		p[n + 1] = 0x16;
898 		if (ctrl != 1) {
899 			p[n + 2] = ncyl >> 16;
900 			p[n + 3] = ncyl >> 8;
901 			p[n + 4] = ncyl & 0xff;
902 			p[n + 5] = nheads;
903 			p[n + 20] = 0x15;
904 			p[n + 21] = 0x18;
905 		}
906 		n += 24;
907 	}
908 	if ((page == MODEPAGE_CACHING) || (page == MODEPAGE_ALLPAGES)) {
909 		struct mode_caching *mode_caching_page;
910 
911 		mode_caching_page = (struct mode_caching *)&p[n];
912 
913 		mode_caching_page->mode_page.code = MODEPAGE_CACHING;
914 		mode_caching_page->mode_page.ps = 1; /* A saveable page */
915 		mode_caching_page->mode_page.length = 0x12;
916 
917 		switch (ctrl) {
918 		case (0):
919 			/* Current */
920 			if ((sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) == 0) {
921 				mode_caching_page->wce = 1;
922 			}
923 			break;
924 
925 		case (1):
926 			/* Changeable */
927 			if ((sl->sl_flags &
928 			    SL_WRITEBACK_CACHE_SET_UNSUPPORTED) == 0) {
929 				mode_caching_page->wce = 1;
930 			}
931 			break;
932 
933 		default:
934 			if ((sl->sl_flags &
935 			    SL_SAVED_WRITE_CACHE_DISABLE) == 0) {
936 				mode_caching_page->wce = 1;
937 			}
938 			break;
939 		}
940 		n += (sizeof (struct mode_page) +
941 		    mode_caching_page->mode_page.length);
942 	}
943 	if ((page == MODEPAGE_CTRL_MODE) || (page == MODEPAGE_ALLPAGES)) {
944 		struct mode_control_scsi3 *mode_control_page;
945 
946 		mode_control_page = (struct mode_control_scsi3 *)&p[n];
947 
948 		mode_control_page->mode_page.code = MODEPAGE_CTRL_MODE;
949 		mode_control_page->mode_page.length =
950 		    PAGELENGTH_MODE_CONTROL_SCSI3;
951 		if (ctrl != 1) {
952 			/* If not looking for changeable values, report this. */
953 			mode_control_page->que_mod = CTRL_QMOD_UNRESTRICT;
954 		}
955 		n += (sizeof (struct mode_page) +
956 		    mode_control_page->mode_page.length);
957 	}
958 
959 	if (cdb[0] == SCMD_MODE_SENSE) {
960 		if (n > 255) {
961 			stmf_scsilib_send_status(task, STATUS_CHECK,
962 			    STMF_SAA_INVALID_FIELD_IN_CDB);
963 			return;
964 		}
965 		/*
966 		 * Mode parameter header length doesn't include the number
967 		 * of bytes in the length field, so adjust the count.
968 		 * Byte count minus header length field size.
969 		 */
970 		buf[0] = (n - 1) & 0xff;
971 	} else {
972 		/* Byte count minus header length field size. */
973 		buf[1] = (n - 2) & 0xff;
974 		buf[0] = ((n - 2) >> 8) & 0xff;
975 	}
976 
977 	sbd_handle_short_read_transfers(task, initial_dbuf, buf,
978 	    cmd_size, n);
979 }
980 
981 void
982 sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf)
983 {
984 	uint32_t cmd_xfer_len;
985 
986 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
987 		cmd_xfer_len = (uint32_t)task->task_cdb[4];
988 	} else {
989 		cmd_xfer_len = READ_SCSI16(&task->task_cdb[7], uint32_t);
990 	}
991 
992 	if ((task->task_cdb[1] & 0xFE) != 0x10) {
993 		stmf_scsilib_send_status(task, STATUS_CHECK,
994 		    STMF_SAA_INVALID_FIELD_IN_CDB);
995 		return;
996 	}
997 
998 	if (cmd_xfer_len == 0) {
999 		/* zero byte mode selects are allowed */
1000 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1001 		return;
1002 	}
1003 
1004 	sbd_handle_short_write_transfers(task, dbuf, cmd_xfer_len);
1005 }
1006 
1007 void
1008 sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf, uint32_t buflen)
1009 {
1010 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1011 	sbd_it_data_t *it;
1012 	int hdr_len, bd_len;
1013 	sbd_status_t sret;
1014 	int i;
1015 
1016 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
1017 		hdr_len = 4;
1018 	} else {
1019 		hdr_len = 8;
1020 	}
1021 
1022 	if (buflen < hdr_len)
1023 		goto mode_sel_param_len_err;
1024 
1025 	bd_len = hdr_len == 4 ? buf[3] : READ_SCSI16(&buf[6], int);
1026 
1027 	if (buflen < (hdr_len + bd_len + 2))
1028 		goto mode_sel_param_len_err;
1029 
1030 	buf += hdr_len + bd_len;
1031 	buflen -= hdr_len + bd_len;
1032 
1033 	if ((buf[0] != 8) || (buflen != ((uint32_t)buf[1] + 2))) {
1034 		goto mode_sel_param_len_err;
1035 	}
1036 
1037 	if (buf[2] & 0xFB) {
1038 		goto mode_sel_param_field_err;
1039 	}
1040 
1041 	for (i = 3; i < (buf[1] + 2); i++) {
1042 		if (buf[i]) {
1043 			goto mode_sel_param_field_err;
1044 		}
1045 	}
1046 
1047 	sret = SBD_SUCCESS;
1048 
1049 	/* All good. Lets handle the write cache change, if any */
1050 	if (buf[2] & BIT_2) {
1051 		sret = sbd_wcd_set(0, sl);
1052 	} else {
1053 		sret = sbd_wcd_set(1, sl);
1054 	}
1055 
1056 	if (sret != SBD_SUCCESS) {
1057 		stmf_scsilib_send_status(task, STATUS_CHECK,
1058 		    STMF_SAA_WRITE_ERROR);
1059 		return;
1060 	}
1061 
1062 	/* set on the device passed, now set the flags */
1063 	mutex_enter(&sl->sl_lock);
1064 	if (buf[2] & BIT_2) {
1065 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
1066 	} else {
1067 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
1068 	}
1069 
1070 	for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1071 		if (it == task->task_lu_itl_handle)
1072 			continue;
1073 		it->sbd_it_ua_conditions |= SBD_UA_MODE_PARAMETERS_CHANGED;
1074 	}
1075 
1076 	if (task->task_cdb[1] & 1) {
1077 		if (buf[2] & BIT_2) {
1078 			sl->sl_flags &= ~SL_SAVED_WRITE_CACHE_DISABLE;
1079 		} else {
1080 			sl->sl_flags |= SL_SAVED_WRITE_CACHE_DISABLE;
1081 		}
1082 		mutex_exit(&sl->sl_lock);
1083 		sret = sbd_write_lu_info(sl);
1084 	} else {
1085 		mutex_exit(&sl->sl_lock);
1086 	}
1087 	if (sret == SBD_SUCCESS) {
1088 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1089 	} else {
1090 		stmf_scsilib_send_status(task, STATUS_CHECK,
1091 		    STMF_SAA_WRITE_ERROR);
1092 	}
1093 	return;
1094 
1095 mode_sel_param_len_err:
1096 	stmf_scsilib_send_status(task, STATUS_CHECK,
1097 	    STMF_SAA_PARAM_LIST_LENGTH_ERROR);
1098 	return;
1099 mode_sel_param_field_err:
1100 	stmf_scsilib_send_status(task, STATUS_CHECK,
1101 	    STMF_SAA_INVALID_FIELD_IN_PARAM_LIST);
1102 }
1103 
1104 /*
1105  * This function parse through a string, passed to it as a pointer to a string,
1106  * by adjusting the pointer to the first non-space character and returns
1107  * the count/length of the first bunch of non-space characters. Multiple
1108  * Management URLs are stored as a space delimited string in sl_mgmt_url
1109  * field of sbd_lu_t. This function is used to retrieve one url at a time.
1110  *
1111  * i/p : pointer to pointer to a url string
1112  * o/p : Adjust the pointer to the url to the first non white character
1113  *       and returns the length of the URL
1114  */
1115 uint16_t
1116 sbd_parse_mgmt_url(char **url_addr) {
1117 	uint16_t url_length = 0;
1118 	char *url;
1119 	url = *url_addr;
1120 
1121 	while (*url != '\0') {
1122 		if (*url == ' ' || *url == '\t' || *url == '\n') {
1123 			(*url_addr)++;
1124 			url = *url_addr;
1125 		} else {
1126 			break;
1127 		}
1128 	}
1129 
1130 	while (*url != '\0') {
1131 		if (*url == ' ' || *url == '\t' ||
1132 		    *url == '\n' || *url == '\0') {
1133 			break;
1134 		}
1135 		url++;
1136 		url_length++;
1137 	}
1138 	return (url_length);
1139 }
1140 
1141 void
1142 sbd_handle_inquiry(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
1143 {
1144 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1145 	uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
1146 	uint8_t *p;
1147 	uint8_t byte0;
1148 	uint8_t page_length;
1149 	uint16_t bsize = 512;
1150 	uint16_t cmd_size;
1151 	uint32_t xfer_size = 4;
1152 	uint32_t mgmt_url_size = 0;
1153 	char *mgmt_url = NULL;
1154 
1155 
1156 	byte0 = DTYPE_DIRECT;
1157 	/*
1158 	 * Basic protocol checks.
1159 	 */
1160 
1161 	if ((((cdbp[1] & 1) == 0) && cdbp[2]) || cdbp[5]) {
1162 		stmf_scsilib_send_status(task, STATUS_CHECK,
1163 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1164 		return;
1165 	}
1166 
1167 	/*
1168 	 * Zero byte allocation length is not an error.  Just
1169 	 * return success.
1170 	 */
1171 
1172 	cmd_size = (((uint16_t)cdbp[3]) << 8) | cdbp[4];
1173 
1174 	if (cmd_size == 0) {
1175 		task->task_cmd_xfer_length = 0;
1176 		if (task->task_additional_flags &
1177 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1178 			task->task_expected_xfer_length = 0;
1179 		}
1180 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1181 		return;
1182 	}
1183 
1184 	/*
1185 	 * Standard inquiry
1186 	 */
1187 
1188 	if ((cdbp[1] & 1) == 0) {
1189 		int	i;
1190 		struct scsi_inquiry *inq;
1191 
1192 		p = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP);
1193 		inq = (struct scsi_inquiry *)p;
1194 
1195 		page_length = 69;
1196 		xfer_size = page_length + 5;
1197 
1198 		inq->inq_dtype = DTYPE_DIRECT;
1199 		inq->inq_ansi = 5;	/* SPC-3 */
1200 		inq->inq_hisup = 1;
1201 		inq->inq_rdf = 2;	/* Response data format for SPC-3 */
1202 		inq->inq_len = page_length;
1203 
1204 		inq->inq_tpgs = TPGS_FAILOVER_IMPLICIT;
1205 		inq->inq_cmdque = 1;
1206 
1207 		if (sl->sl_flags & SL_VID_VALID) {
1208 			bcopy(sl->sl_vendor_id, inq->inq_vid, 8);
1209 		} else {
1210 			bcopy(sbd_vendor_id, inq->inq_vid, 8);
1211 		}
1212 
1213 		if (sl->sl_flags & SL_PID_VALID) {
1214 			bcopy(sl->sl_product_id, inq->inq_pid, 16);
1215 		} else {
1216 			bcopy(sbd_product_id, inq->inq_pid, 16);
1217 		}
1218 
1219 		if (sl->sl_flags & SL_REV_VALID) {
1220 			bcopy(sl->sl_revision, inq->inq_revision, 4);
1221 		} else {
1222 			bcopy(sbd_revision, inq->inq_revision, 4);
1223 		}
1224 
1225 		/* Adding Version Descriptors */
1226 		i = 0;
1227 		/* SAM-3 no version */
1228 		inq->inq_vd[i].inq_vd_msb = 0x00;
1229 		inq->inq_vd[i].inq_vd_lsb = 0x60;
1230 		i++;
1231 
1232 		/* transport */
1233 		switch (task->task_lport->lport_id->protocol_id) {
1234 		case PROTOCOL_FIBRE_CHANNEL:
1235 			inq->inq_vd[i].inq_vd_msb = 0x09;
1236 			inq->inq_vd[i].inq_vd_lsb = 0x00;
1237 			i++;
1238 			break;
1239 
1240 		case PROTOCOL_PARALLEL_SCSI:
1241 		case PROTOCOL_SSA:
1242 		case PROTOCOL_IEEE_1394:
1243 			/* Currently no claims of conformance */
1244 			break;
1245 
1246 		case PROTOCOL_SRP:
1247 			inq->inq_vd[i].inq_vd_msb = 0x09;
1248 			inq->inq_vd[i].inq_vd_lsb = 0x40;
1249 			i++;
1250 			break;
1251 
1252 		case PROTOCOL_iSCSI:
1253 			inq->inq_vd[i].inq_vd_msb = 0x09;
1254 			inq->inq_vd[i].inq_vd_lsb = 0x60;
1255 			i++;
1256 			break;
1257 
1258 		case PROTOCOL_SAS:
1259 		case PROTOCOL_ADT:
1260 		case PROTOCOL_ATAPI:
1261 		default:
1262 			/* Currently no claims of conformance */
1263 			break;
1264 		}
1265 
1266 		/* SPC-3 no version */
1267 		inq->inq_vd[i].inq_vd_msb = 0x03;
1268 		inq->inq_vd[i].inq_vd_lsb = 0x00;
1269 		i++;
1270 
1271 		/* SBC-2 no version */
1272 		inq->inq_vd[i].inq_vd_msb = 0x03;
1273 		inq->inq_vd[i].inq_vd_lsb = 0x20;
1274 
1275 		sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1276 		    min(cmd_size, xfer_size));
1277 		kmem_free(p, bsize);
1278 
1279 		return;
1280 	}
1281 
1282 	rw_enter(&sbd_global_prop_lock, RW_READER);
1283 	if (sl->sl_mgmt_url) {
1284 		mgmt_url_size = strlen(sl->sl_mgmt_url);
1285 		mgmt_url = sl->sl_mgmt_url;
1286 	} else if (sbd_mgmt_url) {
1287 		mgmt_url_size = strlen(sbd_mgmt_url);
1288 		mgmt_url = sbd_mgmt_url;
1289 	}
1290 
1291 	/*
1292 	 * EVPD handling
1293 	 */
1294 
1295 	/* Default 512 bytes may not be enough, increase bsize if necessary */
1296 	if (cdbp[2] == 0x83 || cdbp[2] == 0x85) {
1297 		if (bsize <  cmd_size)
1298 			bsize = cmd_size;
1299 	}
1300 	p = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP);
1301 
1302 	switch (cdbp[2]) {
1303 	case 0x00:
1304 		page_length = 4 + (mgmt_url_size ? 1 : 0);
1305 
1306 		p[0] = byte0;
1307 		p[3] = page_length;
1308 		/* Supported VPD pages in ascending order */
1309 		{
1310 			uint8_t i = 5;
1311 
1312 			p[i++] = 0x80;
1313 			p[i++] = 0x83;
1314 			if (mgmt_url_size != 0)
1315 				p[i++] = 0x85;
1316 			p[i++] = 0x86;
1317 		}
1318 		xfer_size = page_length + 4;
1319 		break;
1320 
1321 	case 0x80:
1322 		if (sl->sl_serial_no_size) {
1323 			page_length = sl->sl_serial_no_size;
1324 			bcopy(sl->sl_serial_no, p + 4, sl->sl_serial_no_size);
1325 		} else {
1326 			/* if no serial num is specified set 4 spaces */
1327 			page_length = 4;
1328 			bcopy("    ", p + 4, 4);
1329 		}
1330 		p[0] = byte0;
1331 		p[1] = 0x80;
1332 		p[3] = page_length;
1333 		xfer_size = page_length + 4;
1334 		break;
1335 
1336 	case 0x83:
1337 		xfer_size = stmf_scsilib_prepare_vpd_page83(task, p,
1338 		    bsize, byte0, STMF_VPD_LU_ID|STMF_VPD_TARGET_ID|
1339 		    STMF_VPD_TP_GROUP|STMF_VPD_RELATIVE_TP_ID);
1340 		break;
1341 
1342 	case 0x85:
1343 		if (mgmt_url_size == 0) {
1344 			stmf_scsilib_send_status(task, STATUS_CHECK,
1345 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1346 			goto err_done;
1347 		}
1348 		{
1349 			uint16_t idx, newidx, sz, url_size;
1350 			char *url;
1351 
1352 			p[0] = byte0;
1353 			p[1] = 0x85;
1354 
1355 			idx = 4;
1356 			url = mgmt_url;
1357 			url_size = sbd_parse_mgmt_url(&url);
1358 			/* Creating Network Service Descriptors */
1359 			while (url_size != 0) {
1360 				/* Null terminated and 4 Byte aligned */
1361 				sz = url_size + 1;
1362 				sz += (sz % 4) ? 4 - (sz % 4) : 0;
1363 				newidx = idx + sz + 4;
1364 
1365 				if (newidx < bsize) {
1366 					/*
1367 					 * SPC-3r23 : Table 320  (Sec 7.6.5)
1368 					 * (Network service descriptor format
1369 					 *
1370 					 * Note: Hard coding service type as
1371 					 * "Storage Configuration Service".
1372 					 */
1373 					p[idx] = 1;
1374 					SCSI_WRITE16(p + idx + 2, sz);
1375 					bcopy(url, p + idx + 4, url_size);
1376 					xfer_size = newidx + 4;
1377 				}
1378 				idx = newidx;
1379 
1380 				/* skip to next mgmt url if any */
1381 				url += url_size;
1382 				url_size = sbd_parse_mgmt_url(&url);
1383 			}
1384 
1385 			/* Total descriptor length */
1386 			SCSI_WRITE16(p + 2, idx - 4);
1387 			break;
1388 		}
1389 
1390 	case 0x86:
1391 		page_length = 0x3c;
1392 
1393 		p[0] = byte0;
1394 		p[1] = 0x86;		/* Page 86 response */
1395 		p[3] = page_length;
1396 
1397 		/*
1398 		 * Bits 0, 1, and 2 will need to be updated
1399 		 * to reflect the queue tag handling if/when
1400 		 * that is implemented.  For now, we're going
1401 		 * to claim support only for Simple TA.
1402 		 */
1403 		p[5] = 1;
1404 		xfer_size = page_length + 4;
1405 		break;
1406 
1407 	default:
1408 		stmf_scsilib_send_status(task, STATUS_CHECK,
1409 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1410 		goto err_done;
1411 	}
1412 
1413 	sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1414 	    min(cmd_size, xfer_size));
1415 err_done:
1416 	kmem_free(p, bsize);
1417 	rw_exit(&sbd_global_prop_lock);
1418 }
1419 
1420 stmf_status_t
1421 sbd_task_alloc(struct scsi_task *task)
1422 {
1423 	if ((task->task_lu_private =
1424 	    kmem_alloc(sizeof (sbd_cmd_t), KM_NOSLEEP)) != NULL) {
1425 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1426 		scmd->flags = 0;
1427 		return (STMF_SUCCESS);
1428 	}
1429 	return (STMF_ALLOC_FAILURE);
1430 }
1431 
1432 void
1433 sbd_remove_it_handle(sbd_lu_t *sl, sbd_it_data_t *it)
1434 {
1435 	sbd_it_data_t **ppit;
1436 
1437 	sbd_pgr_remove_it_handle(sl, it);
1438 	mutex_enter(&sl->sl_lock);
1439 	for (ppit = &sl->sl_it_list; *ppit != NULL;
1440 	    ppit = &((*ppit)->sbd_it_next)) {
1441 		if ((*ppit) == it) {
1442 			*ppit = it->sbd_it_next;
1443 			break;
1444 		}
1445 	}
1446 	mutex_exit(&sl->sl_lock);
1447 
1448 	DTRACE_PROBE2(itl__nexus__end, stmf_lu_t *, sl->sl_lu,
1449 	    sbd_it_data_t *, it);
1450 
1451 	kmem_free(it, sizeof (*it));
1452 }
1453 
1454 void
1455 sbd_check_and_clear_scsi2_reservation(sbd_lu_t *sl, sbd_it_data_t *it)
1456 {
1457 	mutex_enter(&sl->sl_lock);
1458 	if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) == 0) {
1459 		/* If we dont have any reservations, just get out. */
1460 		mutex_exit(&sl->sl_lock);
1461 		return;
1462 	}
1463 
1464 	if (it == NULL) {
1465 		/* Find the I_T nexus which is holding the reservation. */
1466 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1467 			if (it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) {
1468 				ASSERT(it->sbd_it_session_id ==
1469 				    sl->sl_rs_owner_session_id);
1470 				break;
1471 			}
1472 		}
1473 		ASSERT(it != NULL);
1474 	} else {
1475 		/*
1476 		 * We were passed an I_T nexus. If this nexus does not hold
1477 		 * the reservation, do nothing. This is why this function is
1478 		 * called "check_and_clear".
1479 		 */
1480 		if ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0) {
1481 			mutex_exit(&sl->sl_lock);
1482 			return;
1483 		}
1484 	}
1485 	it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1486 	sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1487 	mutex_exit(&sl->sl_lock);
1488 }
1489 
1490 
1491 
1492 void
1493 sbd_new_task(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
1494 {
1495 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1496 	sbd_it_data_t *it;
1497 	uint8_t cdb0, cdb1;
1498 	stmf_status_t st_ret;
1499 
1500 	if ((it = task->task_lu_itl_handle) == NULL) {
1501 		mutex_enter(&sl->sl_lock);
1502 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1503 			if (it->sbd_it_session_id ==
1504 			    task->task_session->ss_session_id) {
1505 				mutex_exit(&sl->sl_lock);
1506 				stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1507 				return;
1508 			}
1509 		}
1510 		it = (sbd_it_data_t *)kmem_zalloc(sizeof (*it), KM_NOSLEEP);
1511 		if (it == NULL) {
1512 			mutex_exit(&sl->sl_lock);
1513 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1514 			return;
1515 		}
1516 		it->sbd_it_session_id = task->task_session->ss_session_id;
1517 		bcopy(task->task_lun_no, it->sbd_it_lun, 8);
1518 		it->sbd_it_next = sl->sl_it_list;
1519 		sl->sl_it_list = it;
1520 		mutex_exit(&sl->sl_lock);
1521 
1522 		DTRACE_PROBE1(itl__nexus__start, scsi_task *, task);
1523 
1524 		sbd_pgr_initialize_it(task);
1525 		if (stmf_register_itl_handle(task->task_lu, task->task_lun_no,
1526 		    task->task_session, it->sbd_it_session_id, it)
1527 		    != STMF_SUCCESS) {
1528 			sbd_remove_it_handle(sl, it);
1529 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1530 			return;
1531 		}
1532 		task->task_lu_itl_handle = it;
1533 		if (sl->sl_access_state != SBD_LU_STANDBY) {
1534 			it->sbd_it_ua_conditions = SBD_UA_POR;
1535 		}
1536 	} else if (it->sbd_it_flags & SBD_IT_PGR_CHECK_FLAG) {
1537 		sbd_pgr_initialize_it(task);
1538 		mutex_enter(&sl->sl_lock);
1539 		it->sbd_it_flags &= ~SBD_IT_PGR_CHECK_FLAG;
1540 		mutex_exit(&sl->sl_lock);
1541 	}
1542 
1543 	if (task->task_mgmt_function) {
1544 		stmf_scsilib_handle_task_mgmt(task);
1545 		return;
1546 	}
1547 
1548 	/*
1549 	 * if we're transitioning between access
1550 	 * states, return NOT READY
1551 	 */
1552 	if (sl->sl_access_state == SBD_LU_TRANSITION_TO_STANDBY ||
1553 	    sl->sl_access_state == SBD_LU_TRANSITION_TO_ACTIVE) {
1554 		stmf_scsilib_send_status(task, STATUS_CHECK,
1555 		    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
1556 		return;
1557 	}
1558 
1559 	/* Checking ua conditions as per SAM3R14 5.3.2 specified order */
1560 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1561 		uint32_t saa = 0;
1562 
1563 		mutex_enter(&sl->sl_lock);
1564 		if (it->sbd_it_ua_conditions & SBD_UA_POR) {
1565 			it->sbd_it_ua_conditions &= ~SBD_UA_POR;
1566 			saa = STMF_SAA_POR;
1567 		}
1568 		mutex_exit(&sl->sl_lock);
1569 		if (saa) {
1570 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1571 			return;
1572 		}
1573 	}
1574 
1575 	/* Reservation conflict checks */
1576 	if (sl->sl_access_state != SBD_LU_STANDBY) {
1577 		if (SBD_PGR_RSVD(sl->sl_pgr)) {
1578 			if (sbd_pgr_reservation_conflict(task)) {
1579 				stmf_scsilib_send_status(task,
1580 				    STATUS_RESERVATION_CONFLICT, 0);
1581 				return;
1582 			}
1583 		} else if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) &&
1584 		    ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0)) {
1585 			if (!(SCSI2_CONFLICT_FREE_CMDS(task->task_cdb))) {
1586 				stmf_scsilib_send_status(task,
1587 				    STATUS_RESERVATION_CONFLICT, 0);
1588 				return;
1589 			}
1590 		}
1591 	}
1592 
1593 	/* Rest of the ua conndition checks */
1594 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1595 		uint32_t saa = 0;
1596 
1597 		mutex_enter(&sl->sl_lock);
1598 		if (it->sbd_it_ua_conditions & SBD_UA_CAPACITY_CHANGED) {
1599 			it->sbd_it_ua_conditions &= ~SBD_UA_CAPACITY_CHANGED;
1600 			if ((task->task_cdb[0] == SCMD_READ_CAPACITY) ||
1601 			    ((task->task_cdb[0] == SCMD_SVC_ACTION_IN_G4) &&
1602 			    (task->task_cdb[1] ==
1603 			    SSVC_ACTION_READ_CAPACITY_G4))) {
1604 				saa = 0;
1605 			} else {
1606 				saa = STMF_SAA_CAPACITY_DATA_HAS_CHANGED;
1607 			}
1608 		} else if (it->sbd_it_ua_conditions &
1609 		    SBD_UA_MODE_PARAMETERS_CHANGED) {
1610 			it->sbd_it_ua_conditions &=
1611 			    ~SBD_UA_MODE_PARAMETERS_CHANGED;
1612 			saa = STMF_SAA_MODE_PARAMETERS_CHANGED;
1613 		} else if (it->sbd_it_ua_conditions &
1614 		    SBD_UA_ASYMMETRIC_ACCESS_CHANGED) {
1615 			it->sbd_it_ua_conditions &=
1616 			    ~SBD_UA_ASYMMETRIC_ACCESS_CHANGED;
1617 			saa = STMF_SAA_ASYMMETRIC_ACCESS_CHANGED;
1618 		} else if (it->sbd_it_ua_conditions &
1619 		    SBD_UA_ACCESS_STATE_TRANSITION) {
1620 			it->sbd_it_ua_conditions &=
1621 			    ~SBD_UA_ACCESS_STATE_TRANSITION;
1622 			saa = STMF_SAA_LU_NO_ACCESS_TRANSITION;
1623 		} else {
1624 			it->sbd_it_ua_conditions = 0;
1625 			saa = 0;
1626 		}
1627 		mutex_exit(&sl->sl_lock);
1628 		if (saa) {
1629 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1630 			return;
1631 		}
1632 	}
1633 
1634 	cdb0 = task->task_cdb[0];
1635 	cdb1 = task->task_cdb[1];
1636 
1637 	if (sl->sl_access_state == SBD_LU_STANDBY) {
1638 		if (cdb0 != SCMD_INQUIRY &&
1639 		    cdb0 != SCMD_MODE_SENSE &&
1640 		    cdb0 != SCMD_MODE_SENSE_G1 &&
1641 		    cdb0 != SCMD_MODE_SELECT &&
1642 		    cdb0 != SCMD_MODE_SELECT_G1 &&
1643 		    cdb0 != SCMD_RESERVE &&
1644 		    cdb0 != SCMD_RELEASE &&
1645 		    cdb0 != SCMD_PERSISTENT_RESERVE_OUT &&
1646 		    cdb0 != SCMD_PERSISTENT_RESERVE_IN &&
1647 		    cdb0 != SCMD_REQUEST_SENSE &&
1648 		    cdb0 != SCMD_READ_CAPACITY &&
1649 		    !(cdb0 == SCMD_SVC_ACTION_IN_G4 &&
1650 		    cdb1 == SSVC_ACTION_READ_CAPACITY_G4) &&
1651 		    !(cdb0 == SCMD_MAINTENANCE_IN &&
1652 		    (cdb1 & 0x1F) == 0x0A)) {
1653 			stmf_scsilib_send_status(task, STATUS_CHECK,
1654 			    STMF_SAA_LU_NO_ACCESS_STANDBY);
1655 			return;
1656 		}
1657 
1658 		/*
1659 		 * is this a short write?
1660 		 * if so, we'll need to wait until we have the buffer
1661 		 * before proxying the command
1662 		 */
1663 		switch (cdb0) {
1664 			case SCMD_MODE_SELECT:
1665 			case SCMD_MODE_SELECT_G1:
1666 			case SCMD_PERSISTENT_RESERVE_OUT:
1667 				break;
1668 			default:
1669 				st_ret = stmf_proxy_scsi_cmd(task,
1670 				    initial_dbuf);
1671 				if (st_ret != STMF_SUCCESS) {
1672 					stmf_scsilib_send_status(task,
1673 					    STATUS_CHECK,
1674 					    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
1675 				}
1676 				return;
1677 		}
1678 	}
1679 
1680 	cdb0 = task->task_cdb[0] & 0x1F;
1681 
1682 	if ((cdb0 == SCMD_READ) || (cdb0 == SCMD_WRITE)) {
1683 		if (task->task_additional_flags & TASK_AF_PORT_LOAD_HIGH) {
1684 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
1685 			return;
1686 		}
1687 		if (cdb0 == SCMD_READ) {
1688 			sbd_handle_read(task, initial_dbuf);
1689 			return;
1690 		}
1691 		sbd_handle_write(task, initial_dbuf);
1692 		return;
1693 	}
1694 
1695 	cdb0 = task->task_cdb[0];
1696 	cdb1 = task->task_cdb[1];
1697 
1698 	if (cdb0 == SCMD_INQUIRY) {		/* Inquiry */
1699 		sbd_handle_inquiry(task, initial_dbuf);
1700 		return;
1701 	}
1702 
1703 	if (cdb0  == SCMD_PERSISTENT_RESERVE_OUT) {
1704 		sbd_handle_pgr_out_cmd(task, initial_dbuf);
1705 		return;
1706 	}
1707 
1708 	if (cdb0  == SCMD_PERSISTENT_RESERVE_IN) {
1709 		sbd_handle_pgr_in_cmd(task, initial_dbuf);
1710 		return;
1711 	}
1712 
1713 	if (cdb0 == SCMD_RELEASE) {
1714 		if (cdb1) {
1715 			stmf_scsilib_send_status(task, STATUS_CHECK,
1716 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1717 			return;
1718 		}
1719 
1720 		mutex_enter(&sl->sl_lock);
1721 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1722 			/* If not owner don't release it, just return good */
1723 			if (it->sbd_it_session_id !=
1724 			    sl->sl_rs_owner_session_id) {
1725 				mutex_exit(&sl->sl_lock);
1726 				stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1727 				return;
1728 			}
1729 		}
1730 		sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1731 		it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1732 		mutex_exit(&sl->sl_lock);
1733 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1734 		return;
1735 	}
1736 
1737 	if (cdb0 == SCMD_RESERVE) {
1738 		if (cdb1) {
1739 			stmf_scsilib_send_status(task, STATUS_CHECK,
1740 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1741 			return;
1742 		}
1743 
1744 		mutex_enter(&sl->sl_lock);
1745 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1746 			/* If not owner, return conflict status */
1747 			if (it->sbd_it_session_id !=
1748 			    sl->sl_rs_owner_session_id) {
1749 				mutex_exit(&sl->sl_lock);
1750 				stmf_scsilib_send_status(task,
1751 				    STATUS_RESERVATION_CONFLICT, 0);
1752 				return;
1753 			}
1754 		}
1755 		sl->sl_flags |= SL_LU_HAS_SCSI2_RESERVATION;
1756 		it->sbd_it_flags |= SBD_IT_HAS_SCSI2_RESERVATION;
1757 		sl->sl_rs_owner_session_id = it->sbd_it_session_id;
1758 		mutex_exit(&sl->sl_lock);
1759 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1760 		return;
1761 	}
1762 
1763 	if (cdb0 == SCMD_REQUEST_SENSE) {
1764 		/*
1765 		 * LU provider needs to store unretrieved sense data
1766 		 * (e.g. after power-on/reset).  For now, we'll just
1767 		 * return good status with no sense.
1768 		 */
1769 
1770 		if ((cdb1 & ~1) || task->task_cdb[2] || task->task_cdb[3] ||
1771 		    task->task_cdb[5]) {
1772 			stmf_scsilib_send_status(task, STATUS_CHECK,
1773 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1774 		} else {
1775 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1776 		}
1777 
1778 		return;
1779 	}
1780 
1781 	/* Report Target Port Groups */
1782 	if ((cdb0 == SCMD_MAINTENANCE_IN) &&
1783 	    ((cdb1 & 0x1F) == 0x0A)) {
1784 		stmf_scsilib_handle_report_tpgs(task, initial_dbuf);
1785 		return;
1786 	}
1787 
1788 	if (cdb0 == SCMD_START_STOP) {			/* Start stop */
1789 		task->task_cmd_xfer_length = 0;
1790 		if (task->task_cdb[4] & 0xFC) {
1791 			stmf_scsilib_send_status(task, STATUS_CHECK,
1792 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1793 			return;
1794 		}
1795 		if (task->task_cdb[4] & 2) {
1796 			stmf_scsilib_send_status(task, STATUS_CHECK,
1797 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1798 		} else {
1799 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1800 		}
1801 		return;
1802 
1803 	}
1804 
1805 	if ((cdb0 == SCMD_MODE_SENSE) || (cdb0 == SCMD_MODE_SENSE_G1)) {
1806 		uint8_t *p;
1807 		p = kmem_zalloc(512, KM_SLEEP);
1808 		sbd_handle_mode_sense(task, initial_dbuf, p);
1809 		kmem_free(p, 512);
1810 		return;
1811 	}
1812 
1813 	if ((cdb0 == SCMD_MODE_SELECT) || (cdb0 == SCMD_MODE_SELECT_G1)) {
1814 		sbd_handle_mode_select(task, initial_dbuf);
1815 		return;
1816 	}
1817 
1818 	if (cdb0 == SCMD_TEST_UNIT_READY) {	/* Test unit ready */
1819 		task->task_cmd_xfer_length = 0;
1820 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1821 		return;
1822 	}
1823 
1824 	if (cdb0 == SCMD_READ_CAPACITY) {		/* Read Capacity */
1825 		sbd_handle_read_capacity(task, initial_dbuf);
1826 		return;
1827 	}
1828 
1829 	if (cdb0 == SCMD_SVC_ACTION_IN_G4) { 	/* Read Capacity or read long */
1830 		if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
1831 			sbd_handle_read_capacity(task, initial_dbuf);
1832 			return;
1833 		/*
1834 		 * } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
1835 		 * 	sbd_handle_read(task, initial_dbuf);
1836 		 * 	return;
1837 		 */
1838 		}
1839 	}
1840 
1841 	/*
1842 	 * if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
1843 	 *	if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
1844 	 *		 sbd_handle_write(task, initial_dbuf);
1845 	 * 		return;
1846 	 *	}
1847 	 * }
1848 	 */
1849 
1850 	if (cdb0 == SCMD_VERIFY) {
1851 		/*
1852 		 * Something more likely needs to be done here.
1853 		 */
1854 		task->task_cmd_xfer_length = 0;
1855 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1856 		return;
1857 	}
1858 
1859 	if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
1860 	    cdb0 == SCMD_SYNCHRONIZE_CACHE_G4) {
1861 		sbd_handle_sync_cache(task, initial_dbuf);
1862 		return;
1863 	}
1864 
1865 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
1866 }
1867 
1868 void
1869 sbd_dbuf_xfer_done(struct scsi_task *task, struct stmf_data_buf *dbuf)
1870 {
1871 	sbd_cmd_t *scmd = NULL;
1872 
1873 	scmd = (sbd_cmd_t *)task->task_lu_private;
1874 	if ((scmd == NULL) || ((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0))
1875 		return;
1876 
1877 	switch (scmd->cmd_type) {
1878 	case (SBD_CMD_SCSI_READ):
1879 		sbd_handle_read_xfer_completion(task, scmd, dbuf);
1880 		break;
1881 
1882 	case (SBD_CMD_SCSI_WRITE):
1883 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 1);
1884 		break;
1885 
1886 	case (SBD_CMD_SMALL_READ):
1887 		sbd_handle_short_read_xfer_completion(task, scmd, dbuf);
1888 		break;
1889 
1890 	case (SBD_CMD_SMALL_WRITE):
1891 		sbd_handle_short_write_xfer_completion(task, dbuf);
1892 		break;
1893 
1894 	default:
1895 		cmn_err(CE_PANIC, "Unknown cmd type, task = %p", (void *)task);
1896 		break;
1897 	}
1898 }
1899 
1900 /* ARGSUSED */
1901 void
1902 sbd_send_status_done(struct scsi_task *task)
1903 {
1904 	cmn_err(CE_PANIC,
1905 	    "sbd_send_status_done: this should not have been called");
1906 }
1907 
1908 void
1909 sbd_task_free(struct scsi_task *task)
1910 {
1911 	if (task->task_lu_private) {
1912 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1913 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1914 			cmn_err(CE_PANIC, "cmd is active, task = %p",
1915 			    (void *)task);
1916 		}
1917 		kmem_free(scmd, sizeof (sbd_cmd_t));
1918 	}
1919 }
1920 
1921 /*
1922  * Aborts are synchronus w.r.t. I/O AND
1923  * All the I/O which SBD does is synchronous AND
1924  * Everything within a task is single threaded.
1925  *   IT MEANS
1926  * If this function is called, we are doing nothing with this task
1927  * inside of sbd module.
1928  */
1929 /* ARGSUSED */
1930 stmf_status_t
1931 sbd_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
1932 {
1933 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1934 	scsi_task_t *task;
1935 
1936 	if (abort_cmd == STMF_LU_RESET_STATE) {
1937 		return (sbd_lu_reset_state(lu));
1938 	}
1939 
1940 	if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
1941 		sbd_check_and_clear_scsi2_reservation(sl, (sbd_it_data_t *)arg);
1942 		sbd_remove_it_handle(sl, (sbd_it_data_t *)arg);
1943 		return (STMF_SUCCESS);
1944 	}
1945 
1946 	ASSERT(abort_cmd == STMF_LU_ABORT_TASK);
1947 	task = (scsi_task_t *)arg;
1948 	if (task->task_lu_private) {
1949 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1950 
1951 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1952 			scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
1953 			return (STMF_ABORT_SUCCESS);
1954 		}
1955 	}
1956 
1957 	return (STMF_NOT_FOUND);
1958 }
1959 
1960 /* ARGSUSED */
1961 void
1962 sbd_ctl(struct stmf_lu *lu, int cmd, void *arg)
1963 {
1964 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1965 	stmf_change_status_t st;
1966 
1967 	ASSERT((cmd == STMF_CMD_LU_ONLINE) ||
1968 	    (cmd == STMF_CMD_LU_OFFLINE) ||
1969 	    (cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
1970 	    (cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
1971 
1972 	st.st_completion_status = STMF_SUCCESS;
1973 	st.st_additional_info = NULL;
1974 
1975 	switch (cmd) {
1976 	case STMF_CMD_LU_ONLINE:
1977 		if (sl->sl_state == STMF_STATE_ONLINE)
1978 			st.st_completion_status = STMF_ALREADY;
1979 		else if (sl->sl_state != STMF_STATE_OFFLINE)
1980 			st.st_completion_status = STMF_FAILURE;
1981 		if (st.st_completion_status == STMF_SUCCESS) {
1982 			sl->sl_state = STMF_STATE_ONLINE;
1983 			sl->sl_state_not_acked = 1;
1984 		}
1985 		(void) stmf_ctl(STMF_CMD_LU_ONLINE_COMPLETE, lu, &st);
1986 		break;
1987 
1988 	case STMF_CMD_LU_OFFLINE:
1989 		if (sl->sl_state == STMF_STATE_OFFLINE)
1990 			st.st_completion_status = STMF_ALREADY;
1991 		else if (sl->sl_state != STMF_STATE_ONLINE)
1992 			st.st_completion_status = STMF_FAILURE;
1993 		if (st.st_completion_status == STMF_SUCCESS) {
1994 			sl->sl_flags &= ~(SL_MEDIUM_REMOVAL_PREVENTED |
1995 			    SL_LU_HAS_SCSI2_RESERVATION);
1996 			sl->sl_state = STMF_STATE_OFFLINE;
1997 			sl->sl_state_not_acked = 1;
1998 			sbd_pgr_reset(sl);
1999 		}
2000 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE_COMPLETE, lu, &st);
2001 		break;
2002 
2003 	case STMF_ACK_LU_ONLINE_COMPLETE:
2004 		/* Fallthrough */
2005 	case STMF_ACK_LU_OFFLINE_COMPLETE:
2006 		sl->sl_state_not_acked = 0;
2007 		break;
2008 
2009 	}
2010 }
2011 
2012 /* ARGSUSED */
2013 stmf_status_t
2014 sbd_info(uint32_t cmd, stmf_lu_t *lu, void *arg, uint8_t *buf,
2015     uint32_t *bufsizep)
2016 {
2017 	return (STMF_NOT_SUPPORTED);
2018 }
2019 
2020 stmf_status_t
2021 sbd_lu_reset_state(stmf_lu_t *lu)
2022 {
2023 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
2024 
2025 	mutex_enter(&sl->sl_lock);
2026 	if (sl->sl_flags & SL_SAVED_WRITE_CACHE_DISABLE) {
2027 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
2028 		mutex_exit(&sl->sl_lock);
2029 		if (sl->sl_access_state == SBD_LU_ACTIVE) {
2030 			(void) sbd_wcd_set(1, sl);
2031 		}
2032 	} else {
2033 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
2034 		mutex_exit(&sl->sl_lock);
2035 		if (sl->sl_access_state == SBD_LU_ACTIVE) {
2036 			(void) sbd_wcd_set(0, sl);
2037 		}
2038 	}
2039 	sbd_pgr_reset(sl);
2040 	sbd_check_and_clear_scsi2_reservation(sl, NULL);
2041 	if (stmf_deregister_all_lu_itl_handles(lu) != STMF_SUCCESS) {
2042 		return (STMF_FAILURE);
2043 	}
2044 	return (STMF_SUCCESS);
2045 }
2046 
2047 sbd_status_t
2048 sbd_flush_data_cache(sbd_lu_t *sl, int fsync_done)
2049 {
2050 	int r = 0;
2051 	int ret;
2052 
2053 	if (fsync_done)
2054 		goto over_fsync;
2055 	if ((sl->sl_data_vtype == VREG) || (sl->sl_data_vtype == VBLK)) {
2056 		if (VOP_FSYNC(sl->sl_data_vp, FSYNC, kcred, NULL))
2057 			return (SBD_FAILURE);
2058 	}
2059 over_fsync:
2060 	if (((sl->sl_data_vtype == VCHR) || (sl->sl_data_vtype == VBLK)) &&
2061 	    ((sl->sl_flags & SL_NO_DATA_DKIOFLUSH) == 0)) {
2062 		ret = VOP_IOCTL(sl->sl_data_vp, DKIOCFLUSHWRITECACHE, NULL,
2063 		    FKIOCTL, kcred, &r, NULL);
2064 		if ((ret == ENOTTY) || (ret == ENOTSUP)) {
2065 			mutex_enter(&sl->sl_lock);
2066 			sl->sl_flags |= SL_NO_DATA_DKIOFLUSH;
2067 			mutex_exit(&sl->sl_lock);
2068 		} else if (ret != 0) {
2069 			return (SBD_FAILURE);
2070 		}
2071 	}
2072 
2073 	return (SBD_SUCCESS);
2074 }
2075 
2076 /* ARGSUSED */
2077 static void
2078 sbd_handle_sync_cache(struct scsi_task *task,
2079     struct stmf_data_buf *initial_dbuf)
2080 {
2081 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
2082 	uint64_t	lba, laddr;
2083 	sbd_status_t	sret;
2084 	uint32_t	len;
2085 	int		is_g4 = 0;
2086 	int		immed;
2087 
2088 	task->task_cmd_xfer_length = 0;
2089 	/*
2090 	 * Determine if this is a 10 or 16 byte CDB
2091 	 */
2092 
2093 	if (task->task_cdb[0] == SCMD_SYNCHRONIZE_CACHE_G4)
2094 		is_g4 = 1;
2095 
2096 	/*
2097 	 * Determine other requested parameters
2098 	 *
2099 	 * We don't have a non-volatile cache, so don't care about SYNC_NV.
2100 	 * Do not support the IMMED bit.
2101 	 */
2102 
2103 	immed = (task->task_cdb[1] & 0x02);
2104 
2105 	if (immed) {
2106 		stmf_scsilib_send_status(task, STATUS_CHECK,
2107 		    STMF_SAA_INVALID_FIELD_IN_CDB);
2108 		return;
2109 	}
2110 
2111 	/*
2112 	 * Check to be sure we're not being asked to sync an LBA
2113 	 * that is out of range.  While checking, verify reserved fields.
2114 	 */
2115 
2116 	if (is_g4) {
2117 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[14] ||
2118 		    task->task_cdb[15]) {
2119 			stmf_scsilib_send_status(task, STATUS_CHECK,
2120 			    STMF_SAA_INVALID_FIELD_IN_CDB);
2121 			return;
2122 		}
2123 
2124 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
2125 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
2126 	} else {
2127 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[6] ||
2128 		    task->task_cdb[9]) {
2129 			stmf_scsilib_send_status(task, STATUS_CHECK,
2130 			    STMF_SAA_INVALID_FIELD_IN_CDB);
2131 			return;
2132 		}
2133 
2134 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
2135 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
2136 	}
2137 
2138 	laddr = lba << sl->sl_data_blocksize_shift;
2139 	len <<= sl->sl_data_blocksize_shift;
2140 
2141 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
2142 		stmf_scsilib_send_status(task, STATUS_CHECK,
2143 		    STMF_SAA_LBA_OUT_OF_RANGE);
2144 		return;
2145 	}
2146 
2147 	sret = sbd_flush_data_cache(sl, 0);
2148 	if (sret != SBD_SUCCESS) {
2149 		stmf_scsilib_send_status(task, STATUS_CHECK,
2150 		    STMF_SAA_WRITE_ERROR);
2151 		return;
2152 	}
2153 
2154 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
2155 }
2156