xref: /illumos-gate/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_scsi.c (revision 2264ca7f5db194583c672cb5779a67f52bcd92a9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/scsi/generic/mode.h>
34 #include <sys/disp.h>
35 #include <sys/byteorder.h>
36 #include <sys/atomic.h>
37 #include <sys/sdt.h>
38 
39 #include <stmf.h>
40 #include <lpif.h>
41 #include <portif.h>
42 #include <stmf_ioctl.h>
43 #include <stmf_sbd.h>
44 #include <sbd_impl.h>
45 
46 stmf_status_t sbd_lu_reset_state(stmf_lu_t *lu);
47 static void sbd_handle_sync_cache(struct scsi_task *task,
48     struct stmf_data_buf *initial_dbuf);
49 void sbd_handle_read_xfer_completion(struct scsi_task *task,
50     sbd_cmd_t *scmd, struct stmf_data_buf *dbuf);
51 
52 /*
53  * IMPORTANT NOTE:
54  * =================
55  * The whole world here is based on the assumption that everything within
56  * a scsi task executes in a single threaded manner, even the aborts.
57  * Dont ever change that. There wont be any performance gain but there
58  * will be tons of race conditions.
59  */
60 
61 void
62 sbd_do_read_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
63 					struct stmf_data_buf *dbuf)
64 {
65 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
66 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
67 	uint64_t laddr;
68 	uint32_t len, buflen, iolen;
69 	int ndx;
70 	int bufs_to_take;
71 
72 	/* Lets try not to hog all the buffers the port has. */
73 	bufs_to_take = ((task->task_max_nbufs > 2) &&
74 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
75 	    task->task_max_nbufs;
76 
77 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
78 	laddr = scmd->addr + scmd->current_ro + slu->sl_sli->sli_lu_data_offset;
79 
80 	for (buflen = 0, ndx = 0; (buflen < len) &&
81 	    (ndx < dbuf->db_sglist_length); ndx++) {
82 		iolen = min(len - buflen, dbuf->db_sglist[ndx].seg_length);
83 		if (iolen == 0)
84 			break;
85 		if (sst->sst_data_read(sst, laddr, (uint64_t)iolen,
86 		    dbuf->db_sglist[0].seg_addr) != STMF_SUCCESS) {
87 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
88 			/* Do not need to do xfer anymore, just complete it */
89 			dbuf->db_data_size = 0;
90 			dbuf->db_xfer_status = STMF_SUCCESS;
91 			sbd_handle_read_xfer_completion(task, scmd, dbuf);
92 			return;
93 		}
94 		buflen += iolen;
95 		laddr += (uint64_t)iolen;
96 	}
97 	dbuf->db_relative_offset = scmd->current_ro;
98 	dbuf->db_data_size = buflen;
99 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
100 	(void) stmf_xfer_data(task, dbuf, 0);
101 	scmd->len -= buflen;
102 	scmd->current_ro += buflen;
103 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
104 		uint32_t maxsize, minsize, old_minsize;
105 
106 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
107 		minsize = maxsize >> 2;
108 		do {
109 			/*
110 			 * A bad port implementation can keep on failing the
111 			 * the request but keep on sending us a false
112 			 * minsize.
113 			 */
114 			old_minsize = minsize;
115 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
116 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
117 		    (minsize >= 512));
118 		if (dbuf == NULL) {
119 			return;
120 		}
121 		scmd->nbufs++;
122 		sbd_do_read_xfer(task, scmd, dbuf);
123 	}
124 }
125 
126 void
127 sbd_handle_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
128 				struct stmf_data_buf *dbuf)
129 {
130 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
131 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
132 		    dbuf->db_xfer_status, NULL);
133 		return;
134 	}
135 	task->task_nbytes_transferred += dbuf->db_data_size;
136 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
137 		stmf_free_dbuf(task, dbuf);
138 		scmd->nbufs--;
139 		if (scmd->nbufs)
140 			return;	/* wait for all buffers to complete */
141 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
142 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
143 			stmf_scsilib_send_status(task, STATUS_CHECK,
144 			    STMF_SAA_READ_ERROR);
145 		else
146 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
147 		return;
148 	}
149 	sbd_do_read_xfer(task, scmd, dbuf);
150 }
151 
152 void
153 sbd_handle_read(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
154 {
155 	uint64_t lba, laddr;
156 	uint32_t len;
157 	uint8_t op = task->task_cdb[0];
158 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
159 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
160 	sbd_cmd_t *scmd;
161 	stmf_data_buf_t *dbuf;
162 	int fast_path;
163 
164 	if (op == SCMD_READ) {
165 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
166 		len = (uint32_t)task->task_cdb[4];
167 
168 		if (len == 0) {
169 			len = 256;
170 		}
171 	} else if (op == SCMD_READ_G1) {
172 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
173 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
174 	} else if (op == SCMD_READ_G5) {
175 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
176 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
177 	} else if (op == SCMD_READ_G4) {
178 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
179 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
180 	} else {
181 		stmf_scsilib_send_status(task, STATUS_CHECK,
182 		    STMF_SAA_INVALID_OPCODE);
183 		return;
184 	}
185 
186 	laddr = lba << slu->sl_shift_count;
187 	len <<= slu->sl_shift_count;
188 
189 	if ((laddr + (uint64_t)len) > slu->sl_sli->sli_lu_data_size) {
190 		stmf_scsilib_send_status(task, STATUS_CHECK,
191 		    STMF_SAA_LBA_OUT_OF_RANGE);
192 		return;
193 	}
194 
195 	task->task_cmd_xfer_length = len;
196 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
197 		task->task_expected_xfer_length = len;
198 	}
199 
200 	if (len != task->task_expected_xfer_length) {
201 		fast_path = 0;
202 		len = (len > task->task_expected_xfer_length) ?
203 		    task->task_expected_xfer_length : len;
204 	} else {
205 		fast_path = 1;
206 	}
207 
208 	if (len == 0) {
209 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
210 		return;
211 	}
212 
213 	if (initial_dbuf == NULL) {
214 		uint32_t maxsize, minsize, old_minsize;
215 
216 		maxsize = (len > (128*1024)) ? 128*1024 : len;
217 		minsize = maxsize >> 2;
218 		do {
219 			old_minsize = minsize;
220 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
221 			    &minsize, 0);
222 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
223 		    (minsize >= 512));
224 		if (initial_dbuf == NULL) {
225 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
226 			return;
227 		}
228 	}
229 	dbuf = initial_dbuf;
230 
231 	if ((dbuf->db_buf_size >= len) && fast_path &&
232 	    (dbuf->db_sglist_length == 1)) {
233 		if (sst->sst_data_read(sst,
234 		    laddr + slu->sl_sli->sli_lu_data_offset, (uint64_t)len,
235 		    dbuf->db_sglist[0].seg_addr) == STMF_SUCCESS) {
236 			dbuf->db_relative_offset = 0;
237 			dbuf->db_data_size = len;
238 			dbuf->db_flags = DB_SEND_STATUS_GOOD |
239 			    DB_DIRECTION_TO_RPORT;
240 			(void) stmf_xfer_data(task, dbuf, STMF_IOF_LU_DONE);
241 		} else {
242 			stmf_scsilib_send_status(task, STATUS_CHECK,
243 			    STMF_SAA_READ_ERROR);
244 		}
245 		return;
246 	}
247 
248 	if (task->task_lu_private) {
249 		scmd = (sbd_cmd_t *)task->task_lu_private;
250 	} else {
251 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
252 		task->task_lu_private = scmd;
253 	}
254 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
255 	scmd->cmd_type = SBD_CMD_SCSI_READ;
256 	scmd->nbufs = 1;
257 	scmd->addr = laddr;
258 	scmd->len = len;
259 	scmd->current_ro = 0;
260 
261 	sbd_do_read_xfer(task, scmd, dbuf);
262 }
263 
264 void
265 sbd_do_write_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
266 					struct stmf_data_buf *dbuf)
267 {
268 	uint32_t len;
269 	int bufs_to_take;
270 
271 	/* Lets try not to hog all the buffers the port has. */
272 	bufs_to_take = ((task->task_max_nbufs > 2) &&
273 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
274 	    task->task_max_nbufs;
275 
276 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
277 
278 	dbuf->db_relative_offset = scmd->current_ro;
279 	dbuf->db_data_size = len;
280 	dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
281 	(void) stmf_xfer_data(task, dbuf, 0);
282 	scmd->len -= len;
283 	scmd->current_ro += len;
284 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
285 		uint32_t maxsize, minsize, old_minsize;
286 
287 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
288 		minsize = maxsize >> 2;
289 		do {
290 			old_minsize = minsize;
291 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
292 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
293 		    (minsize >= 512));
294 		if (dbuf == NULL) {
295 			return;
296 		}
297 		scmd->nbufs++;
298 		sbd_do_write_xfer(task, scmd, dbuf);
299 	}
300 }
301 
302 void
303 sbd_handle_write_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
304     struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
305 {
306 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
307 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
308 	uint64_t laddr;
309 	uint32_t buflen, iolen;
310 	int ndx;
311 
312 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
313 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
314 		    dbuf->db_xfer_status, NULL);
315 		return;
316 	}
317 
318 	if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
319 		goto WRITE_XFER_DONE;
320 	}
321 
322 	laddr = scmd->addr + dbuf->db_relative_offset +
323 	    slu->sl_sli->sli_lu_data_offset;
324 
325 	for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) &&
326 	    (ndx < dbuf->db_sglist_length); ndx++) {
327 		iolen = min(dbuf->db_data_size - buflen,
328 		    dbuf->db_sglist[ndx].seg_length);
329 		if (iolen == 0)
330 			break;
331 		if (sst->sst_data_write(sst, laddr, (uint64_t)iolen,
332 		    dbuf->db_sglist[0].seg_addr) != STMF_SUCCESS) {
333 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
334 			break;
335 		}
336 		buflen += iolen;
337 		laddr += (uint64_t)iolen;
338 	}
339 	task->task_nbytes_transferred += buflen;
340 WRITE_XFER_DONE:
341 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
342 		stmf_free_dbuf(task, dbuf);
343 		scmd->nbufs--;
344 		if (scmd->nbufs)
345 			return;	/* wait for all buffers to complete */
346 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
347 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
348 			stmf_scsilib_send_status(task, STATUS_CHECK,
349 			    STMF_SAA_WRITE_ERROR);
350 		else
351 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
352 		return;
353 	}
354 	if (dbuf_reusable == 0) {
355 		uint32_t maxsize, minsize, old_minsize;
356 		/* free current dbuf and allocate a new one */
357 		stmf_free_dbuf(task, dbuf);
358 
359 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
360 		minsize = maxsize >> 2;
361 		do {
362 			old_minsize = minsize;
363 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
364 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
365 		    (minsize >= 512));
366 		if (dbuf == NULL) {
367 			scmd->nbufs --;
368 			if (scmd->nbufs == 0) {
369 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
370 				    STMF_ALLOC_FAILURE, NULL);
371 			}
372 			return;
373 		}
374 	}
375 	sbd_do_write_xfer(task, scmd, dbuf);
376 }
377 
378 void
379 sbd_handle_write(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
380 {
381 	uint64_t lba, laddr;
382 	uint32_t len;
383 	uint8_t op = task->task_cdb[0], do_immediate_data = 0;
384 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
385 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
386 	sbd_cmd_t *scmd;
387 	stmf_data_buf_t *dbuf;
388 
389 	if (op == SCMD_WRITE) {
390 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
391 		len = (uint32_t)task->task_cdb[4];
392 
393 		if (len == 0) {
394 			len = 256;
395 		}
396 	} else if (op == SCMD_WRITE_G1) {
397 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
398 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
399 	} else if (op == SCMD_WRITE_G5) {
400 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
401 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
402 	} else if (op == SCMD_WRITE_G4) {
403 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
404 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
405 	} else {
406 		stmf_scsilib_send_status(task, STATUS_CHECK,
407 		    STMF_SAA_INVALID_OPCODE);
408 		return;
409 	}
410 
411 	laddr = lba << slu->sl_shift_count;
412 	len <<= slu->sl_shift_count;
413 
414 	if ((laddr + (uint64_t)len) > slu->sl_sli->sli_lu_data_size) {
415 		stmf_scsilib_send_status(task, STATUS_CHECK,
416 		    STMF_SAA_LBA_OUT_OF_RANGE);
417 		return;
418 	}
419 
420 	task->task_cmd_xfer_length = len;
421 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
422 		task->task_expected_xfer_length = len;
423 	}
424 
425 	len = (len > task->task_expected_xfer_length) ?
426 	    task->task_expected_xfer_length : len;
427 
428 	if (len == 0) {
429 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
430 		return;
431 	}
432 
433 	if (initial_dbuf == NULL) {
434 		uint32_t maxsize, minsize, old_minsize;
435 
436 		maxsize = (len > (128*1024)) ? 128*1024 : len;
437 		minsize = maxsize >> 2;
438 		do {
439 			old_minsize = minsize;
440 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
441 			    &minsize, 0);
442 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
443 		    (minsize >= 512));
444 		if (initial_dbuf == NULL) {
445 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
446 			    STMF_ALLOC_FAILURE, NULL);
447 			return;
448 		}
449 	} else if (task->task_flags & TF_INITIAL_BURST) {
450 		if (initial_dbuf->db_data_size > len) {
451 			if (initial_dbuf->db_data_size >
452 			    task->task_expected_xfer_length) {
453 				/* protocol error */
454 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
455 				    STMF_INVALID_ARG, NULL);
456 				return;
457 			}
458 			initial_dbuf->db_data_size = len;
459 		}
460 		do_immediate_data = 1;
461 	}
462 	dbuf = initial_dbuf;
463 
464 	if (task->task_lu_private) {
465 		scmd = (sbd_cmd_t *)task->task_lu_private;
466 	} else {
467 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
468 		task->task_lu_private = scmd;
469 	}
470 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
471 	scmd->cmd_type = SBD_CMD_SCSI_WRITE;
472 	scmd->nbufs = 1;
473 	scmd->addr = laddr;
474 	scmd->len = len;
475 	scmd->current_ro = 0;
476 
477 	if (do_immediate_data) {
478 		scmd->len -= dbuf->db_data_size;
479 		scmd->current_ro += dbuf->db_data_size;
480 		dbuf->db_xfer_status = STMF_SUCCESS;
481 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 0);
482 	} else {
483 		sbd_do_write_xfer(task, scmd, dbuf);
484 	}
485 }
486 
487 /*
488  * Utility routine to handle small non performance data transfers to the
489  * initiators. dbuf is an initial data buf (if any), 'p' points to a data
490  * buffer which is source of data for transfer, cdb_xfer_size is the
491  * transfer size based on CDB, cmd_xfer_size is the actual amount of data
492  * which this command would transfer (the size of data pointed to by 'p').
493  */
494 void
495 sbd_handle_short_read_transfers(scsi_task_t *task, stmf_data_buf_t *dbuf,
496     uint8_t *p, uint32_t cdb_xfer_size, uint32_t cmd_xfer_size)
497 {
498 	uint32_t bufsize, ndx;
499 	sbd_cmd_t *scmd;
500 
501 	cmd_xfer_size = min(cmd_xfer_size, cdb_xfer_size);
502 
503 	task->task_cmd_xfer_length = cmd_xfer_size;
504 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
505 		task->task_expected_xfer_length = cmd_xfer_size;
506 	} else {
507 		cmd_xfer_size = min(cmd_xfer_size,
508 		    task->task_expected_xfer_length);
509 	}
510 
511 	if (cmd_xfer_size == 0) {
512 		stmf_scsilib_send_status(task, STATUS_CHECK,
513 		    STMF_SAA_INVALID_FIELD_IN_CDB);
514 		return;
515 	}
516 	if (dbuf == NULL) {
517 		uint32_t minsize = cmd_xfer_size;
518 
519 		dbuf = stmf_alloc_dbuf(task, cmd_xfer_size, &minsize, 0);
520 	}
521 	if (dbuf == NULL) {
522 		stmf_scsilib_send_status(task, STATUS_QFULL, 0);
523 		return;
524 	}
525 
526 	for (bufsize = 0, ndx = 0; bufsize < cmd_xfer_size; ndx++) {
527 		uint8_t *d;
528 		uint32_t s;
529 
530 		d = dbuf->db_sglist[ndx].seg_addr;
531 		s = min((cmd_xfer_size - bufsize),
532 		    dbuf->db_sglist[ndx].seg_length);
533 		bcopy(p+bufsize, d, s);
534 		bufsize += s;
535 	}
536 	dbuf->db_relative_offset = 0;
537 	dbuf->db_data_size = cmd_xfer_size;
538 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
539 
540 	if (task->task_lu_private == NULL) {
541 		task->task_lu_private =
542 		    kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
543 	}
544 	scmd = (sbd_cmd_t *)task->task_lu_private;
545 
546 	scmd->cmd_type = SBD_CMD_SMALL_READ;
547 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
548 	(void) stmf_xfer_data(task, dbuf, 0);
549 }
550 
551 void
552 sbd_handle_short_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
553 				struct stmf_data_buf *dbuf)
554 {
555 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
556 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
557 		    dbuf->db_xfer_status, NULL);
558 		return;
559 	}
560 	task->task_nbytes_transferred = dbuf->db_data_size;
561 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
562 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
563 }
564 
565 void
566 sbd_handle_read_capacity(struct scsi_task *task,
567     struct stmf_data_buf *initial_dbuf)
568 {
569 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
570 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
571 	sbd_lu_info_t *sli = slu->sl_sli;
572 	uint32_t cdb_len;
573 	uint8_t p[32];
574 	uint64_t s;
575 
576 	s = sli->sli_lu_data_size >> slu->sl_shift_count;
577 	s--;
578 	switch (task->task_cdb[0]) {
579 	case SCMD_READ_CAPACITY:
580 		if (s & 0xffffffff00000000ull) {
581 			p[0] = p[1] = p[2] = p[3] = 0xFF;
582 		} else {
583 			p[0] = (s >> 24) & 0xff;
584 			p[1] = (s >> 16) & 0xff;
585 			p[2] = (s >> 8) & 0xff;
586 			p[3] = s & 0xff;
587 		}
588 		p[4] = 0; p[5] = 0;
589 		p[6] = (sli->sli_blocksize >> 8) & 0xff;
590 		p[7] = sli->sli_blocksize & 0xff;
591 		sbd_handle_short_read_transfers(task, initial_dbuf, p, 8, 8);
592 		return;
593 
594 	case SCMD_SVC_ACTION_IN_G4:
595 		cdb_len = READ_SCSI32(&task->task_cdb[10], uint32_t);
596 		bzero(p, 32);
597 		p[0] = (s >> 56) & 0xff;
598 		p[1] = (s >> 48) & 0xff;
599 		p[2] = (s >> 40) & 0xff;
600 		p[3] = (s >> 32) & 0xff;
601 		p[4] = (s >> 24) & 0xff;
602 		p[5] = (s >> 16) & 0xff;
603 		p[6] = (s >> 8) & 0xff;
604 		p[7] = s & 0xff;
605 		p[10] = (sli->sli_blocksize >> 8) & 0xff;
606 		p[11] = sli->sli_blocksize & 0xff;
607 		sbd_handle_short_read_transfers(task, initial_dbuf, p,
608 		    cdb_len, 32);
609 		return;
610 	}
611 }
612 
613 static uint8_t sbd_p3[] =
614 	{3, 0x16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 2, 0, 0, 0,
615 	    0, 0, 0, 0, 0x80, 0, 0, 0};
616 static uint8_t sbd_p4[] =
617 	{4, 0x16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
618 	    0, 0, 0, 0, 0x15, 0x18, 0, 0};
619 static uint8_t sbd_pa[] = {0xa, 0xa, 0, 0x10, 0, 0, 0, 0, 0, 0, 0, 0};
620 static uint8_t sbd_bd[] = {0, 0, 0, 0, 0, 0, 0x02, 0};
621 
622 void
623 sbd_handle_mode_sense(struct scsi_task *task,
624     struct stmf_data_buf *initial_dbuf)
625 {
626 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
627 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
628 	sbd_lu_info_t *sli = slu->sl_sli;
629 	uint32_t cmd_size, hdrsize, xfer_size, ncyl;
630 	uint8_t payload_buf[8 + 8 + 24 + 24 + 12];
631 	uint8_t *payload, *p;
632 	uint8_t ctrl, page;
633 	uint16_t ps;
634 	uint64_t s = sli->sli_lu_data_size;
635 	uint8_t dbd;
636 
637 	p = &task->task_cdb[0];
638 	page = p[2] & 0x3F;
639 	ctrl = (p[2] >> 6) & 3;
640 	dbd = p[1] & 0x08;
641 
642 	hdrsize = (p[0] == SCMD_MODE_SENSE) ? 4 : 8;
643 
644 	cmd_size = (p[0] == SCMD_MODE_SENSE) ? p[4] :
645 	    READ_SCSI16(&p[7], uint32_t);
646 
647 	switch (page) {
648 	case 0x03:
649 		ps = hdrsize + sizeof (sbd_p3);
650 		break;
651 	case 0x04:
652 		ps = hdrsize + sizeof (sbd_p4);
653 		break;
654 	case 0x0A:
655 		ps = hdrsize + sizeof (sbd_pa);
656 		break;
657 	case MODEPAGE_ALLPAGES:
658 		ps = hdrsize + sizeof (sbd_p3) + sizeof (sbd_p4)
659 		    + sizeof (sbd_pa);
660 
661 		/*
662 		 * If the buffer is big enough, include the block
663 		 * descriptor; otherwise, leave it out.
664 		 */
665 		if (cmd_size < ps) {
666 			dbd = 1;
667 		}
668 
669 		if (dbd == 0) {
670 			ps += 8;
671 		}
672 
673 		break;
674 	default:
675 		stmf_scsilib_send_status(task, STATUS_CHECK,
676 		    STMF_SAA_INVALID_FIELD_IN_CDB);
677 		return;
678 	}
679 
680 	xfer_size = min(cmd_size, ps);
681 
682 	if ((xfer_size < hdrsize) || (ctrl == 1) ||
683 	    (((task->task_additional_flags &
684 	    TASK_AF_NO_EXPECTED_XFER_LENGTH) == 0) &&
685 	    (xfer_size > task->task_expected_xfer_length))) {
686 		stmf_scsilib_send_status(task, STATUS_CHECK,
687 		    STMF_SAA_INVALID_FIELD_IN_CDB);
688 		return;
689 	}
690 
691 	bzero(payload_buf, xfer_size);
692 
693 	if (p[0] == SCMD_MODE_SENSE) {
694 		payload_buf[0] = ps - 1;
695 	} else {
696 		ps -= 2;
697 		*((uint16_t *)payload_buf) = BE_16(ps);
698 	}
699 
700 	payload = payload_buf + hdrsize;
701 
702 	switch (page) {
703 	case 0x03:
704 		bcopy(sbd_p3, payload, sizeof (sbd_p3));
705 		break;
706 
707 	case 0x0A:
708 		bcopy(sbd_pa, payload, sizeof (sbd_pa));
709 		break;
710 
711 	case MODEPAGE_ALLPAGES:
712 		if (dbd == 0) {
713 			payload_buf[3] = sizeof (sbd_bd);
714 			bcopy(sbd_bd, payload, sizeof (sbd_bd));
715 			payload += sizeof (sbd_bd);
716 		}
717 
718 		bcopy(sbd_p3, payload, sizeof (sbd_p3));
719 		payload += sizeof (sbd_p3);
720 		bcopy(sbd_pa, payload, sizeof (sbd_pa));
721 		payload += sizeof (sbd_pa);
722 		/* FALLTHROUGH */
723 
724 	case 0x04:
725 		bcopy(sbd_p4, payload, sizeof (sbd_p4));
726 
727 		if (s > 1024 * 1024 * 1024) {
728 			payload[5] = 16;
729 		} else {
730 			payload[5] = 2;
731 		}
732 		ncyl = (uint32_t)((s/(((uint64_t)payload[5]) * 32 * 512)) + 1);
733 		payload[4] = (uchar_t)ncyl;
734 		payload[3] = (uchar_t)(ncyl >> 8);
735 		payload[2] = (uchar_t)(ncyl >> 16);
736 		break;
737 
738 	}
739 
740 	sbd_handle_short_read_transfers(task, initial_dbuf, payload_buf,
741 	    cmd_size, xfer_size);
742 }
743 
744 
745 void
746 sbd_handle_inquiry(struct scsi_task *task, struct stmf_data_buf *initial_dbuf,
747 			uint8_t *p, int bsize)
748 {
749 	uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
750 	uint32_t cmd_size;
751 	uint8_t page_length;
752 
753 	/*
754 	 * Basic protocol checks.
755 	 */
756 
757 	if ((((cdbp[1] & 1) == 0) && cdbp[2]) || cdbp[5]) {
758 		stmf_scsilib_send_status(task, STATUS_CHECK,
759 		    STMF_SAA_INVALID_FIELD_IN_CDB);
760 		return;
761 	}
762 
763 	/*
764 	 * Zero byte allocation length is not an error.  Just
765 	 * return success.
766 	 */
767 
768 	cmd_size = (((uint32_t)cdbp[3]) << 8) | cdbp[4];
769 
770 	if (cmd_size == 0) {
771 		task->task_cmd_xfer_length = 0;
772 		if (task->task_additional_flags &
773 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
774 			task->task_expected_xfer_length = 0;
775 		}
776 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
777 		return;
778 	}
779 
780 	/*
781 	 * Standard inquiry
782 	 */
783 
784 	if ((cdbp[1] & 1) == 0) {
785 		struct scsi_inquiry *inq = (struct scsi_inquiry *)p;
786 
787 		page_length = 31;
788 		bzero(inq, page_length + 5);
789 
790 		inq->inq_dtype = 0;
791 		inq->inq_ansi = 5;	/* SPC-3 */
792 		inq->inq_hisup = 1;
793 		inq->inq_rdf = 2;	/* Response data format for SPC-3 */
794 		inq->inq_len = page_length;
795 
796 		inq->inq_tpgs = 1;
797 
798 		inq->inq_cmdque = 1;
799 
800 		(void) strncpy((char *)inq->inq_vid, "SUN     ", 8);
801 		(void) strncpy((char *)inq->inq_pid, "COMSTAR         ", 16);
802 		(void) strncpy((char *)inq->inq_revision, "1.0 ", 4);
803 
804 		sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
805 		    min(cmd_size, page_length + 5));
806 
807 		return;
808 	}
809 
810 	/*
811 	 * EVPD handling
812 	 */
813 
814 	switch (cdbp[2]) {
815 	case 0x00:
816 		page_length = 3;
817 
818 		bzero(p, page_length + 4);
819 
820 		p[0] = 0;
821 		p[3] = page_length;	/* we support 3 pages, 0, 0x83, 0x86 */
822 		p[5] = 0x83;
823 		p[6] = 0x86;
824 
825 		break;
826 
827 	case 0x83:
828 
829 		page_length = stmf_scsilib_prepare_vpd_page83(task, p,
830 		    bsize, 0, STMF_VPD_LU_ID|STMF_VPD_TARGET_ID|
831 		    STMF_VPD_TP_GROUP|STMF_VPD_RELATIVE_TP_ID) - 4;
832 		break;
833 
834 	case 0x86:
835 		page_length = 0x3c;
836 
837 		bzero(p, page_length + 4);
838 
839 		p[0] = 0;
840 		p[1] = 0x86;		/* Page 86 response */
841 		p[3] = page_length;
842 
843 		/*
844 		 * Bits 0, 1, and 2 will need to be updated
845 		 * to reflect the queue tag handling if/when
846 		 * that is implemented.  For now, we're going
847 		 * to claim support only for Simple TA.
848 		 */
849 		p[5] = 1;
850 
851 		break;
852 
853 	default:
854 		stmf_scsilib_send_status(task, STATUS_CHECK,
855 		    STMF_SAA_INVALID_FIELD_IN_CDB);
856 		return;
857 	}
858 
859 	sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
860 	    min(cmd_size, page_length + 4));
861 }
862 
863 stmf_status_t
864 sbd_task_alloc(struct scsi_task *task)
865 {
866 	if ((task->task_lu_private =
867 	    kmem_alloc(sizeof (sbd_cmd_t), KM_NOSLEEP)) != NULL) {
868 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
869 		scmd->flags = 0;
870 		return (STMF_SUCCESS);
871 	}
872 	return (STMF_ALLOC_FAILURE);
873 }
874 
875 void
876 sbd_remove_it_handle(sbd_lu_t *slu, sbd_it_data_t *it)
877 {
878 	sbd_it_data_t **ppit;
879 
880 	mutex_enter(&slu->sl_it_list_lock);
881 	for (ppit = &slu->sl_it_list; *ppit != NULL;
882 	    ppit = &((*ppit)->sbd_it_next)) {
883 		if ((*ppit) == it) {
884 			*ppit = it->sbd_it_next;
885 			break;
886 		}
887 	}
888 	mutex_exit(&slu->sl_it_list_lock);
889 
890 	DTRACE_PROBE2(itl__nexus__end, stmf_lu_t *, slu->sl_lu,
891 	    sbd_it_data_t *, it);
892 
893 	kmem_free(it, sizeof (*it));
894 }
895 
896 void
897 sbd_check_and_clear_scsi2_reservation(sbd_lu_t *slu, sbd_it_data_t *it)
898 {
899 	mutex_enter(&slu->sl_it_list_lock);
900 	if ((slu->sl_flags & SBD_LU_HAS_SCSI2_RESERVATION) == 0) {
901 		/* If we dont have any reservations, just get out. */
902 		mutex_exit(&slu->sl_it_list_lock);
903 		return;
904 	}
905 
906 	if (it == NULL) {
907 		/* Find the I_T nexus which is holding the reservation. */
908 		for (it = slu->sl_it_list; it != NULL; it = it->sbd_it_next) {
909 			if (it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) {
910 				ASSERT(it->sbd_it_session_id ==
911 				    slu->sl_rs_owner_session_id);
912 				break;
913 			}
914 		}
915 		ASSERT(it != NULL);
916 	} else {
917 		/*
918 		 * We were passed an I_T nexus. If this nexus does not hold
919 		 * the reservation, do nothing. This is why this function is
920 		 * called "check_and_clear".
921 		 */
922 		if ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0) {
923 			mutex_exit(&slu->sl_it_list_lock);
924 			return;
925 		}
926 	}
927 	it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
928 	slu->sl_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
929 	mutex_exit(&slu->sl_it_list_lock);
930 }
931 
932 /*
933  * returns non-zero, if this command can be allowed to run even if the
934  * lu has been reserved by another initiator.
935  */
936 int
937 sbd_reserve_allow(scsi_task_t *task)
938 {
939 	uint8_t cdb0 = task->task_cdb[0];
940 	uint8_t cdb1 = task->task_cdb[1];
941 
942 	if ((cdb0 == SCMD_INQUIRY) || (cdb0 == SCMD_READ_CAPACITY) ||
943 	    ((cdb0 == SCMD_SVC_ACTION_IN_G4) &&
944 	    (cdb1 == SSVC_ACTION_READ_CAPACITY_G4))) {
945 		return (1);
946 	}
947 	return (0);
948 }
949 
950 void
951 sbd_new_task(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
952 {
953 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
954 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
955 	sbd_it_data_t *it;
956 	uint8_t cdb0, cdb1;
957 
958 	if ((it = task->task_lu_itl_handle) == NULL) {
959 		mutex_enter(&slu->sl_it_list_lock);
960 		for (it = slu->sl_it_list; it != NULL; it = it->sbd_it_next) {
961 			if (it->sbd_it_session_id ==
962 			    task->task_session->ss_session_id) {
963 				mutex_exit(&slu->sl_it_list_lock);
964 				stmf_scsilib_send_status(task, STATUS_BUSY, 0);
965 				return;
966 			}
967 		}
968 		it = (sbd_it_data_t *)kmem_zalloc(sizeof (*it), KM_NOSLEEP);
969 		if (it == NULL) {
970 			mutex_exit(&slu->sl_it_list_lock);
971 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
972 			return;
973 		}
974 		it->sbd_it_session_id = task->task_session->ss_session_id;
975 		bcopy(task->task_lun_no, it->sbd_it_lun, 8);
976 		it->sbd_it_next = slu->sl_it_list;
977 		slu->sl_it_list = it;
978 		mutex_exit(&slu->sl_it_list_lock);
979 
980 		DTRACE_PROBE1(itl__nexus__start, scsi_task *, task);
981 
982 		if (stmf_register_itl_handle(task->task_lu, task->task_lun_no,
983 		    task->task_session, it->sbd_it_session_id, it)
984 		    != STMF_SUCCESS) {
985 			sbd_remove_it_handle(slu, it);
986 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
987 			return;
988 		}
989 		task->task_lu_itl_handle = it;
990 		it->sbd_it_ua_conditions = SBD_UA_POR;
991 	}
992 
993 	if (task->task_mgmt_function) {
994 		stmf_scsilib_handle_task_mgmt(task);
995 		return;
996 	}
997 
998 	if ((slu->sl_flags & SBD_LU_HAS_SCSI2_RESERVATION) &&
999 	    ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0)) {
1000 		if (!sbd_reserve_allow(task)) {
1001 			stmf_scsilib_send_status(task,
1002 			    STATUS_RESERVATION_CONFLICT, 0);
1003 			return;
1004 		}
1005 	}
1006 
1007 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1008 		uint32_t saa = 0;
1009 
1010 		mutex_enter(&slu->sl_it_list_lock);
1011 		if (it->sbd_it_ua_conditions & SBD_UA_POR) {
1012 			it->sbd_it_ua_conditions &= ~SBD_UA_POR;
1013 			saa = STMF_SAA_POR;
1014 		} else if (it->sbd_it_ua_conditions & SBD_UA_CAPACITY_CHANGED) {
1015 			it->sbd_it_ua_conditions &= ~SBD_UA_CAPACITY_CHANGED;
1016 			if ((task->task_cdb[0] == SCMD_READ_CAPACITY) ||
1017 			    ((task->task_cdb[0] == SCMD_SVC_ACTION_IN_G4) &&
1018 			    (task->task_cdb[1] ==
1019 			    SSVC_ACTION_READ_CAPACITY_G4))) {
1020 				saa = 0;
1021 			} else {
1022 				saa = STMF_SAA_CAPACITY_DATA_HAS_CHANGED;
1023 			}
1024 		} else {
1025 			it->sbd_it_ua_conditions = 0;
1026 			saa = 0;
1027 		}
1028 		mutex_exit(&slu->sl_it_list_lock);
1029 		if (saa) {
1030 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1031 			return;
1032 		}
1033 	}
1034 
1035 
1036 	cdb0 = task->task_cdb[0] & 0x1F;
1037 
1038 	if ((cdb0 == SCMD_READ) || (cdb0 == SCMD_WRITE)) {
1039 		if (task->task_additional_flags & TASK_AF_PORT_LOAD_HIGH) {
1040 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
1041 			return;
1042 		}
1043 		if (cdb0 == SCMD_READ) {
1044 			sbd_handle_read(task, initial_dbuf);
1045 			return;
1046 		}
1047 		sbd_handle_write(task, initial_dbuf);
1048 		return;
1049 	}
1050 
1051 	cdb0 = task->task_cdb[0];
1052 	cdb1 = task->task_cdb[1];
1053 
1054 	if (cdb0 == SCMD_TEST_UNIT_READY) {	/* Test unit ready */
1055 		task->task_cmd_xfer_length = 0;
1056 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1057 		return;
1058 	}
1059 
1060 	if (cdb0 == SCMD_READ_CAPACITY) {		/* Read Capacity */
1061 		sbd_handle_read_capacity(task, initial_dbuf);
1062 		return;
1063 	}
1064 
1065 	if (cdb0 == SCMD_INQUIRY) {		/* Inquiry */
1066 		uint8_t *p;
1067 
1068 		p = (uint8_t *)kmem_zalloc(512, KM_SLEEP);
1069 		sbd_handle_inquiry(task, initial_dbuf, p, 512);
1070 		kmem_free(p, 512);
1071 		return;
1072 	}
1073 
1074 	if (cdb0 == SCMD_SVC_ACTION_IN_G4) { 	/* Read Capacity or read long */
1075 		if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
1076 			sbd_handle_read_capacity(task, initial_dbuf);
1077 			return;
1078 		/*
1079 		 * } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
1080 		 * 	sbd_handle_read(task, initial_dbuf);
1081 		 * 	return;
1082 		 */
1083 		}
1084 	}
1085 
1086 	/*
1087 	 * if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
1088 	 *	if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
1089 	 *		 sbd_handle_write(task, initial_dbuf);
1090 	 * 		return;
1091 	 *	}
1092 	 * }
1093 	 */
1094 
1095 	if (cdb0 == SCMD_START_STOP) {			/* Start stop */
1096 		/* XXX Implement power management */
1097 		task->task_cmd_xfer_length = 0;
1098 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1099 		return;
1100 	}
1101 #if 0
1102 	/* XXX Remove #if 0 above */
1103 	if ((cdb0 == SCMD_MODE_SELECT) || (cdb0 == SCMD_MODE_SELECT_G1)) {
1104 		sbd_handle_mode_select(task, initial_dbuf);
1105 		return;
1106 	}
1107 #endif
1108 	if ((cdb0 == SCMD_MODE_SENSE) || (cdb0 == SCMD_MODE_SENSE_G1)) {
1109 		sbd_handle_mode_sense(task, initial_dbuf);
1110 		return;
1111 	}
1112 
1113 	if (cdb0 == SCMD_REQUEST_SENSE) {
1114 		/*
1115 		 * LU provider needs to store unretrieved sense data
1116 		 * (e.g. after power-on/reset).  For now, we'll just
1117 		 * return good status with no sense.
1118 		 */
1119 
1120 		if ((cdb1 & ~1) || task->task_cdb[2] || task->task_cdb[3] ||
1121 		    task->task_cdb[5]) {
1122 			stmf_scsilib_send_status(task, STATUS_CHECK,
1123 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1124 		} else {
1125 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1126 		}
1127 
1128 		return;
1129 	}
1130 
1131 	if (cdb0 == SCMD_VERIFY) {
1132 		/*
1133 		 * Something more likely needs to be done here.
1134 		 */
1135 		task->task_cmd_xfer_length = 0;
1136 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1137 		return;
1138 	}
1139 
1140 	if ((cdb0 == SCMD_RESERVE) || (cdb0 == SCMD_RELEASE)) {
1141 		if (cdb1) {
1142 			stmf_scsilib_send_status(task, STATUS_CHECK,
1143 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1144 			return;
1145 		}
1146 		mutex_enter(&slu->sl_it_list_lock);
1147 		if (slu->sl_flags & SBD_LU_HAS_SCSI2_RESERVATION) {
1148 			if (it->sbd_it_session_id !=
1149 			    slu->sl_rs_owner_session_id) {
1150 				/*
1151 				 * This can only happen if things were in
1152 				 * flux.
1153 				 */
1154 				mutex_exit(&slu->sl_it_list_lock);
1155 				stmf_scsilib_send_status(task,
1156 				    STATUS_RESERVATION_CONFLICT, 0);
1157 				return;
1158 			}
1159 		}
1160 	}
1161 
1162 	if (cdb0 == SCMD_RELEASE) {
1163 		slu->sl_flags &= ~SBD_LU_HAS_SCSI2_RESERVATION;
1164 		it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1165 		mutex_exit(&slu->sl_it_list_lock);
1166 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1167 		return;
1168 	}
1169 	if (cdb0 == SCMD_RESERVE) {
1170 		slu->sl_flags |= SBD_LU_HAS_SCSI2_RESERVATION;
1171 		it->sbd_it_flags |= SBD_IT_HAS_SCSI2_RESERVATION;
1172 		slu->sl_rs_owner_session_id = it->sbd_it_session_id;
1173 		mutex_exit(&slu->sl_it_list_lock);
1174 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1175 		return;
1176 	}
1177 
1178 	if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
1179 	    cdb0 == SCMD_SYNCHRONIZE_CACHE_G4) {
1180 		sbd_handle_sync_cache(task, initial_dbuf);
1181 		return;
1182 	}
1183 
1184 	/* Report Target Port Groups */
1185 	if ((cdb0 == SCMD_MAINTENANCE_IN) &&
1186 	    ((cdb1 & 0x1F) == 0x0A)) {
1187 		stmf_scsilib_handle_report_tpgs(task, initial_dbuf);
1188 		return;
1189 	}
1190 
1191 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
1192 }
1193 
1194 void
1195 sbd_dbuf_xfer_done(struct scsi_task *task, struct stmf_data_buf *dbuf)
1196 {
1197 	sbd_cmd_t *scmd = NULL;
1198 
1199 	scmd = (sbd_cmd_t *)task->task_lu_private;
1200 	if ((scmd == NULL) || ((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0))
1201 		return;
1202 
1203 	if (scmd->cmd_type == SBD_CMD_SCSI_READ) {
1204 		sbd_handle_read_xfer_completion(task, scmd, dbuf);
1205 	} else if (scmd->cmd_type == SBD_CMD_SCSI_WRITE) {
1206 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 1);
1207 	} else if (scmd->cmd_type == SBD_CMD_SMALL_READ) {
1208 		sbd_handle_short_read_xfer_completion(task, scmd, dbuf);
1209 	} else {
1210 		cmn_err(CE_PANIC, "Unknown cmd type, task = %p", (void *)task);
1211 	}
1212 }
1213 
1214 /* ARGSUSED */
1215 void
1216 sbd_send_status_done(struct scsi_task *task)
1217 {
1218 	cmn_err(CE_PANIC,
1219 	    "sbd_send_status_done: this should not have been called");
1220 }
1221 
1222 void
1223 sbd_task_free(struct scsi_task *task)
1224 {
1225 	if (task->task_lu_private) {
1226 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1227 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1228 			cmn_err(CE_PANIC, "cmd is active, task = %p",
1229 			    (void *)task);
1230 		}
1231 		kmem_free(scmd, sizeof (sbd_cmd_t));
1232 	}
1233 }
1234 
1235 /*
1236  * Aborts are synchronus w.r.t. I/O AND
1237  * All the I/O which SBD does is synchronous AND
1238  * Everything within a task is single threaded.
1239  *   IT MEANS
1240  * If this function is called, we are doing nothing with this task
1241  * inside of sbd module.
1242  */
1243 /* ARGSUSED */
1244 stmf_status_t
1245 sbd_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
1246 {
1247 	sbd_store_t *sst = (sbd_store_t *)lu->lu_provider_private;
1248 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
1249 	scsi_task_t *task;
1250 
1251 	if (abort_cmd == STMF_LU_RESET_STATE) {
1252 		return (sbd_lu_reset_state(lu));
1253 	}
1254 
1255 	if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
1256 		sbd_check_and_clear_scsi2_reservation(slu,
1257 		    (sbd_it_data_t *)arg);
1258 		sbd_remove_it_handle(slu, (sbd_it_data_t *)arg);
1259 		return (STMF_SUCCESS);
1260 	}
1261 
1262 	ASSERT(abort_cmd == STMF_LU_ABORT_TASK);
1263 	task = (scsi_task_t *)arg;
1264 	if (task->task_lu_private) {
1265 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1266 
1267 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1268 			scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
1269 			return (STMF_ABORT_SUCCESS);
1270 		}
1271 	}
1272 
1273 	return (STMF_NOT_FOUND);
1274 }
1275 
1276 /* ARGSUSED */
1277 void
1278 sbd_ctl(struct stmf_lu *lu, int cmd, void *arg)
1279 {
1280 	sbd_store_t *sst = (sbd_store_t *)lu->lu_provider_private;
1281 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
1282 	stmf_change_status_t st;
1283 
1284 	ASSERT((cmd == STMF_CMD_LU_ONLINE) ||
1285 	    (cmd == STMF_CMD_LU_OFFLINE) ||
1286 	    (cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
1287 	    (cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
1288 
1289 	st.st_completion_status = STMF_SUCCESS;
1290 	st.st_additional_info = NULL;
1291 
1292 	switch (cmd) {
1293 	case STMF_CMD_LU_ONLINE:
1294 		if (slu->sl_state == STMF_STATE_ONLINE)
1295 			st.st_completion_status = STMF_ALREADY;
1296 		else if (slu->sl_state != STMF_STATE_OFFLINE)
1297 			st.st_completion_status = STMF_FAILURE;
1298 		if (st.st_completion_status == STMF_SUCCESS) {
1299 			slu->sl_state = STMF_STATE_ONLINING;
1300 			slu->sl_state_not_acked = 1;
1301 			st.st_completion_status = sst->sst_online(sst);
1302 			if (st.st_completion_status != STMF_SUCCESS) {
1303 				slu->sl_state = STMF_STATE_OFFLINE;
1304 				slu->sl_state_not_acked = 0;
1305 			} else {
1306 				slu->sl_state = STMF_STATE_ONLINE;
1307 			}
1308 		}
1309 		(void) stmf_ctl(STMF_CMD_LU_ONLINE_COMPLETE, lu, &st);
1310 		break;
1311 
1312 	case STMF_CMD_LU_OFFLINE:
1313 		if (slu->sl_state == STMF_STATE_OFFLINE)
1314 			st.st_completion_status = STMF_ALREADY;
1315 		else if (slu->sl_state != STMF_STATE_ONLINE)
1316 			st.st_completion_status = STMF_FAILURE;
1317 		if (st.st_completion_status == STMF_SUCCESS) {
1318 			slu->sl_state = STMF_STATE_OFFLINING;
1319 			slu->sl_state_not_acked = 1;
1320 			st.st_completion_status = sst->sst_offline(sst);
1321 			if (st.st_completion_status != STMF_SUCCESS) {
1322 				slu->sl_state = STMF_STATE_ONLINE;
1323 				slu->sl_state_not_acked = 0;
1324 			} else {
1325 				slu->sl_state = STMF_STATE_OFFLINE;
1326 			}
1327 		}
1328 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE_COMPLETE, lu, &st);
1329 		break;
1330 
1331 	case STMF_ACK_LU_ONLINE_COMPLETE:
1332 		/* Fallthrough */
1333 	case STMF_ACK_LU_OFFLINE_COMPLETE:
1334 		slu->sl_state_not_acked = 0;
1335 		break;
1336 
1337 	}
1338 }
1339 
1340 /* ARGSUSED */
1341 stmf_status_t
1342 sbd_info(uint32_t cmd, stmf_lu_t *lu, void *arg, uint8_t *buf,
1343 						uint32_t *bufsizep)
1344 {
1345 	return (STMF_NOT_SUPPORTED);
1346 }
1347 
1348 stmf_status_t
1349 sbd_lu_reset_state(stmf_lu_t *lu)
1350 {
1351 	sbd_store_t *sst = (sbd_store_t *)lu->lu_provider_private;
1352 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
1353 
1354 	sbd_check_and_clear_scsi2_reservation(slu, NULL);
1355 	if (stmf_deregister_all_lu_itl_handles(lu) != STMF_SUCCESS) {
1356 		return (STMF_FAILURE);
1357 	}
1358 	return (STMF_SUCCESS);
1359 }
1360 
1361 /* ARGSUSED */
1362 static void
1363 sbd_handle_sync_cache(struct scsi_task *task,
1364     struct stmf_data_buf *initial_dbuf)
1365 {
1366 	sbd_store_t	*sst =
1367 	    (sbd_store_t *)task->task_lu->lu_provider_private;
1368 	sbd_lu_t	*slu = (sbd_lu_t *)sst->sst_sbd_private;
1369 	uint64_t	lba, laddr;
1370 	uint32_t	len;
1371 	int		is_g4 = 0;
1372 	int		immed;
1373 
1374 	/*
1375 	 * Determine if this is a 10 or 16 byte CDB
1376 	 */
1377 
1378 	if (task->task_cdb[0] == SCMD_SYNCHRONIZE_CACHE_G4)
1379 		is_g4 = 1;
1380 
1381 	/*
1382 	 * Determine other requested parameters
1383 	 *
1384 	 * We don't have a non-volatile cache, so don't care about SYNC_NV.
1385 	 * Do not support the IMMED bit.
1386 	 */
1387 
1388 	immed = (task->task_cdb[1] & 0x02);
1389 
1390 	if (immed) {
1391 		stmf_scsilib_send_status(task, STATUS_CHECK,
1392 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1393 		return;
1394 	}
1395 
1396 	/*
1397 	 * Check to be sure we're not being asked to sync an LBA
1398 	 * that is out of range.  While checking, verify reserved fields.
1399 	 */
1400 
1401 	if (is_g4) {
1402 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[14] ||
1403 		    task->task_cdb[15]) {
1404 			stmf_scsilib_send_status(task, STATUS_CHECK,
1405 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1406 			return;
1407 		}
1408 
1409 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
1410 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
1411 	} else {
1412 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[6] ||
1413 		    task->task_cdb[9]) {
1414 			stmf_scsilib_send_status(task, STATUS_CHECK,
1415 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1416 			return;
1417 		}
1418 
1419 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
1420 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
1421 	}
1422 
1423 	laddr = lba << slu->sl_shift_count;
1424 	len <<= slu->sl_shift_count;
1425 
1426 	if ((laddr + (uint64_t)len) > slu->sl_sli->sli_lu_data_size) {
1427 		stmf_scsilib_send_status(task, STATUS_CHECK,
1428 		    STMF_SAA_LBA_OUT_OF_RANGE);
1429 		return;
1430 	}
1431 
1432 	if (sst->sst_data_flush(sst) != STMF_SUCCESS) {
1433 		stmf_scsilib_send_status(task, STATUS_CHECK,
1434 		    STMF_SAA_WRITE_ERROR);
1435 		return;
1436 	}
1437 
1438 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1439 }
1440