xref: /titanic_41/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_iocb.c (revision f885d00f4e3c96a769ce0228a732da31ad9d0b78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 #pragma ident	"Copyright 2010 QLogic Corporation; ql_iocb.c"
29 
30 /*
31  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32  *
33  * ***********************************************************************
34  * *									**
35  * *				NOTICE					**
36  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
37  * *			ALL RIGHTS RESERVED				**
38  * *									**
39  * ***********************************************************************
40  *
41  */
42 
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_iocb.h>
47 #include <ql_isr.h>
48 #include <ql_xioctl.h>
49 
50 /*
51  * Local Function Prototypes.
52  */
53 static int ql_req_pkt(ql_adapter_state_t *, request_t **);
54 static void ql_continuation_iocb(ql_adapter_state_t *, ddi_dma_cookie_t *,
55     uint16_t, boolean_t);
56 static void ql_isp24xx_rcvbuf(ql_adapter_state_t *);
57 static void ql_cmd_24xx_type_6_iocb(ql_adapter_state_t *, ql_srb_t *, void *);
58 
59 /*
60  * ql_start_iocb
61  *	The start IOCB is responsible for building request packets
62  *	on request ring and modifying ISP input pointer.
63  *
64  * Input:
65  *	ha:	adapter state pointer.
66  *	sp:	srb structure pointer.
67  *
68  * Context:
69  *	Interrupt or Kernel context, no mailbox commands allowed.
70  */
71 void
ql_start_iocb(ql_adapter_state_t * vha,ql_srb_t * sp)72 ql_start_iocb(ql_adapter_state_t *vha, ql_srb_t *sp)
73 {
74 	ql_link_t		*link;
75 	request_t		*pkt;
76 	uint64_t		*ptr64;
77 	uint32_t		cnt;
78 	ql_adapter_state_t	*ha = vha->pha;
79 
80 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
81 
82 	/* Acquire ring lock. */
83 	REQUEST_RING_LOCK(ha);
84 
85 	if (sp != NULL) {
86 		/*
87 		 * If the pending queue is not empty maintain order
88 		 * by puting this srb at the tail and geting the head.
89 		 */
90 		if ((link = ha->pending_cmds.first) != NULL) {
91 			ql_add_link_b(&ha->pending_cmds, &sp->cmd);
92 			/* Remove command from pending command queue */
93 			sp = link->base_address;
94 			ql_remove_link(&ha->pending_cmds, &sp->cmd);
95 		}
96 	} else {
97 		/* Get command from pending command queue if not empty. */
98 		if ((link = ha->pending_cmds.first) == NULL) {
99 			/* Release ring specific lock */
100 			REQUEST_RING_UNLOCK(ha);
101 			QL_PRINT_3(CE_CONT, "(%d): empty done\n",
102 			    ha->instance);
103 			return;
104 		}
105 		/* Remove command from pending command queue */
106 		sp = link->base_address;
107 		ql_remove_link(&ha->pending_cmds, &sp->cmd);
108 	}
109 
110 	/* start this request and as many others as possible */
111 	for (;;) {
112 		if (ha->req_q_cnt < sp->req_cnt) {
113 			/* Calculate number of free request entries. */
114 			cnt = RD16_IO_REG(ha, req_out);
115 			if (ha->req_ring_index < cnt)  {
116 				ha->req_q_cnt = (uint16_t)
117 				    (cnt - ha->req_ring_index);
118 			} else {
119 				ha->req_q_cnt = (uint16_t)(REQUEST_ENTRY_CNT -
120 				    (ha->req_ring_index - cnt));
121 			}
122 			if (ha->req_q_cnt != 0) {
123 				ha->req_q_cnt--;
124 			}
125 
126 			/*
127 			 * If no room in request ring put this srb at
128 			 * the head of the pending queue and exit.
129 			 */
130 			if (ha->req_q_cnt < sp->req_cnt) {
131 				QL_PRINT_8(CE_CONT, "(%d): request ring full,"
132 				    " req_q_cnt=%d, req_ring_index=%d\n",
133 				    ha->instance, ha->req_q_cnt,
134 				    ha->req_ring_index);
135 				ql_add_link_t(&ha->pending_cmds, &sp->cmd);
136 				break;
137 			}
138 		}
139 
140 		/* Check for room in outstanding command list. */
141 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
142 			ha->osc_index++;
143 			if (ha->osc_index == MAX_OUTSTANDING_COMMANDS) {
144 				ha->osc_index = 1;
145 			}
146 			if (ha->outstanding_cmds[ha->osc_index] == NULL) {
147 				break;
148 			}
149 		}
150 		/*
151 		 * If no room in outstanding array put this srb at
152 		 * the head of the pending queue and exit.
153 		 */
154 		if (cnt == MAX_OUTSTANDING_COMMANDS) {
155 			QL_PRINT_8(CE_CONT, "(%d): no room in outstanding "
156 			    "array\n", ha->instance);
157 			ql_add_link_t(&ha->pending_cmds, &sp->cmd);
158 			break;
159 		}
160 
161 		/* nothing to stop us now. */
162 		ha->outstanding_cmds[ha->osc_index] = sp;
163 		/* create and save a unique response identifier in the srb */
164 		sp->handle = ha->adapter_stats->ncmds << OSC_INDEX_SHIFT |
165 		    ha->osc_index;
166 		ha->req_q_cnt -= sp->req_cnt;
167 
168 		/* build the iocb in the request ring */
169 		pkt = ha->request_ring_ptr;
170 		sp->request_ring_ptr = pkt;
171 		sp->flags |= SRB_IN_TOKEN_ARRAY;
172 
173 		/* Zero out packet. */
174 		ptr64 = (uint64_t *)pkt;
175 		*ptr64++ = 0; *ptr64++ = 0;
176 		*ptr64++ = 0; *ptr64++ = 0;
177 		*ptr64++ = 0; *ptr64++ = 0;
178 		*ptr64++ = 0; *ptr64 = 0;
179 
180 		/* Setup IOCB common data. */
181 		pkt->entry_count = (uint8_t)sp->req_cnt;
182 		pkt->sys_define = (uint8_t)ha->req_ring_index;
183 		/* mark the iocb with the response identifier */
184 		ddi_put32(ha->hba_buf.acc_handle, &pkt->handle,
185 		    (uint32_t)sp->handle);
186 
187 		/* Setup IOCB unique data. */
188 		(sp->iocb)(vha, sp, pkt);
189 
190 		sp->flags |= SRB_ISP_STARTED;
191 
192 		QL_PRINT_5(CE_CONT, "(%d,%d): req packet, sp=%p\n",
193 		    ha->instance, vha->vp_index, (void *)sp);
194 		QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
195 
196 		/* Sync DMA buffer. */
197 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
198 		    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
199 		    REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE,
200 		    DDI_DMA_SYNC_FORDEV);
201 
202 		/* Adjust ring index. */
203 		ha->req_ring_index++;
204 		if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
205 			ha->req_ring_index = 0;
206 			ha->request_ring_ptr = ha->request_ring_bp;
207 		} else {
208 			ha->request_ring_ptr++;
209 		}
210 
211 		/* Reset watchdog timer */
212 		sp->wdg_q_time = sp->init_wdg_q_time;
213 
214 		/*
215 		 * Send it by setting the new ring index in the ISP Request
216 		 * Ring In Pointer register.  This is the mechanism
217 		 * used to notify the isp that a new iocb has been
218 		 * placed on the request ring.
219 		 */
220 		if (CFG_IST(ha, CFG_CTRL_8021)) {
221 			uint32_t	w32;
222 
223 			w32 = ha->req_ring_index << 16 |
224 			    ha->function_number << 5 | 4;
225 			do {
226 				ddi_put32(ha->db_dev_handle, ha->nx_req_in,
227 				    w32);
228 			} while (RD_REG_DWORD(ha, ha->db_read) != w32);
229 
230 		} else {
231 			WRT16_IO_REG(ha, req_in, ha->req_ring_index);
232 		}
233 
234 		/* Update outstanding command count statistic. */
235 		ha->adapter_stats->ncmds++;
236 
237 		/* if there is a pending command, try to start it. */
238 		if ((link = ha->pending_cmds.first) == NULL) {
239 			break;
240 		}
241 
242 		/* Remove command from pending command queue */
243 		sp = link->base_address;
244 		ql_remove_link(&ha->pending_cmds, &sp->cmd);
245 	}
246 
247 	/* Release ring specific lock */
248 	REQUEST_RING_UNLOCK(ha);
249 
250 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
251 }
252 
253 /*
254  * ql_req_pkt
255  *	Function is responsible for locking ring and
256  *	getting a zeroed out request packet.
257  *
258  * Input:
259  *	ha:	adapter state pointer.
260  *	pkt:	address for packet pointer.
261  *
262  * Returns:
263  *	ql local function return status code.
264  *
265  * Context:
266  *	Interrupt or Kernel context, no mailbox commands allowed.
267  */
268 static int
ql_req_pkt(ql_adapter_state_t * vha,request_t ** pktp)269 ql_req_pkt(ql_adapter_state_t *vha, request_t **pktp)
270 {
271 	uint16_t		cnt;
272 	uint32_t		*long_ptr;
273 	uint32_t		timer;
274 	int			rval = QL_FUNCTION_TIMEOUT;
275 	ql_adapter_state_t	*ha = vha->pha;
276 
277 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
278 
279 	/* Wait for 30 seconds for slot. */
280 	for (timer = 30000; timer != 0; timer--) {
281 		/* Acquire ring lock. */
282 		REQUEST_RING_LOCK(ha);
283 
284 		if (ha->req_q_cnt == 0) {
285 			/* Calculate number of free request entries. */
286 			cnt = RD16_IO_REG(ha, req_out);
287 			if (ha->req_ring_index < cnt) {
288 				ha->req_q_cnt = (uint16_t)
289 				    (cnt - ha->req_ring_index);
290 			} else {
291 				ha->req_q_cnt = (uint16_t)
292 				    (REQUEST_ENTRY_CNT -
293 				    (ha->req_ring_index - cnt));
294 			}
295 			if (ha->req_q_cnt != 0) {
296 				ha->req_q_cnt--;
297 			}
298 		}
299 
300 		/* Found empty request ring slot? */
301 		if (ha->req_q_cnt != 0) {
302 			ha->req_q_cnt--;
303 			*pktp = ha->request_ring_ptr;
304 
305 			/* Zero out packet. */
306 			long_ptr = (uint32_t *)ha->request_ring_ptr;
307 			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE/4; cnt++) {
308 				*long_ptr++ = 0;
309 			}
310 
311 			/* Setup IOCB common data. */
312 			ha->request_ring_ptr->entry_count = 1;
313 			ha->request_ring_ptr->sys_define =
314 			    (uint8_t)ha->req_ring_index;
315 			ddi_put32(ha->hba_buf.acc_handle,
316 			    &ha->request_ring_ptr->handle,
317 			    (uint32_t)QL_FCA_BRAND);
318 
319 			rval = QL_SUCCESS;
320 
321 			break;
322 		}
323 
324 		/* Release request queue lock. */
325 		REQUEST_RING_UNLOCK(ha);
326 
327 		drv_usecwait(MILLISEC);
328 
329 		/* Check for pending interrupts. */
330 		/*
331 		 * XXX protect interrupt routine from calling itself.
332 		 * Need to revisit this routine. So far we never
333 		 * hit this case as req slot was available
334 		 */
335 		if ((!(curthread->t_flag & T_INTR_THREAD)) &&
336 		    INTERRUPT_PENDING(ha)) {
337 			(void) ql_isr((caddr_t)ha);
338 			INTR_LOCK(ha);
339 			ha->intr_claimed = TRUE;
340 			INTR_UNLOCK(ha);
341 		}
342 	}
343 
344 	if (rval != QL_SUCCESS) {
345 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
346 		EL(ha, "failed, rval = %xh, isp_abort_needed\n", rval);
347 	} else {
348 		/*EMPTY*/
349 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
350 	}
351 	return (rval);
352 }
353 
354 /*
355  * ql_isp_cmd
356  *	Function is responsible for modifying ISP input pointer.
357  *	This action notifies the isp that a new request has been
358  *	added to the request ring.
359  *
360  *	Releases ring lock.
361  *
362  * Input:
363  *	ha:	adapter state pointer.
364  *
365  * Context:
366  *	Interrupt or Kernel context, no mailbox commands allowed.
367  */
368 void
ql_isp_cmd(ql_adapter_state_t * vha)369 ql_isp_cmd(ql_adapter_state_t *vha)
370 {
371 	ql_adapter_state_t	*ha = vha->pha;
372 
373 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
374 
375 	QL_PRINT_5(CE_CONT, "(%d): req packet:\n", ha->instance);
376 	QL_DUMP_5((uint8_t *)ha->request_ring_ptr, 8, REQUEST_ENTRY_SIZE);
377 
378 	/* Sync DMA buffer. */
379 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
380 	    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
381 	    REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE,
382 	    DDI_DMA_SYNC_FORDEV);
383 
384 	/* Adjust ring index. */
385 	ha->req_ring_index++;
386 	if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
387 		ha->req_ring_index = 0;
388 		ha->request_ring_ptr = ha->request_ring_bp;
389 	} else {
390 		ha->request_ring_ptr++;
391 	}
392 
393 	/* Set chip new ring index. */
394 	if (CFG_IST(ha, CFG_CTRL_8021)) {
395 		uint32_t	w32;
396 
397 		w32 = ha->req_ring_index << 16 |
398 		    ha->function_number << 5 | 4;
399 		do {
400 			ddi_put32(ha->db_dev_handle, ha->nx_req_in, w32);
401 		} while (RD_REG_DWORD(ha, ha->db_read) != w32);
402 
403 	} else {
404 		WRT16_IO_REG(ha, req_in, ha->req_ring_index);
405 	}
406 
407 	/* Release ring lock. */
408 	REQUEST_RING_UNLOCK(ha);
409 
410 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
411 }
412 
413 /*
414  * ql_command_iocb
415  *	Setup of command IOCB.
416  *
417  * Input:
418  *	ha:	adapter state pointer.
419  *	sp:	srb structure pointer.
420  *
421  *	arg:	request queue packet.
422  *
423  * Context:
424  *	Interrupt or Kernel context, no mailbox commands allowed.
425  */
426 void
ql_command_iocb(ql_adapter_state_t * ha,ql_srb_t * sp,void * arg)427 ql_command_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
428 {
429 	ddi_dma_cookie_t	*cp;
430 	uint32_t		*ptr32, cnt;
431 	uint16_t		seg_cnt;
432 	fcp_cmd_t		*fcp = sp->fcp;
433 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
434 	cmd_entry_t		*pkt = arg;
435 
436 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
437 
438 	/* Set LUN number */
439 	pkt->lun_l = LSB(sp->lun_queue->lun_no);
440 	pkt->lun_h = MSB(sp->lun_queue->lun_no);
441 
442 	/* Set target ID */
443 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
444 		pkt->target_l = LSB(tq->loop_id);
445 		pkt->target_h = MSB(tq->loop_id);
446 	} else {
447 		pkt->target_h = LSB(tq->loop_id);
448 	}
449 
450 	/* Set tag queue control flags */
451 	if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
452 		pkt->control_flags_l = (uint8_t)
453 		    (pkt->control_flags_l | CF_HTAG);
454 	} else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
455 		pkt->control_flags_l = (uint8_t)
456 		    (pkt->control_flags_l | CF_OTAG);
457 	/* else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) */
458 	} else {
459 		pkt->control_flags_l = (uint8_t)
460 		    (pkt->control_flags_l | CF_STAG);
461 	}
462 
463 	/* Set ISP command timeout. */
464 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
465 
466 	/* Load SCSI CDB */
467 	ddi_rep_put8(ha->hba_buf.acc_handle, fcp->fcp_cdb,
468 	    pkt->scsi_cdb, MAX_CMDSZ, DDI_DEV_AUTOINCR);
469 
470 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
471 		pkt->entry_type = IOCB_CMD_TYPE_3;
472 		cnt = CMD_TYPE_3_DATA_SEGMENTS;
473 	} else {
474 		pkt->entry_type = IOCB_CMD_TYPE_2;
475 		cnt = CMD_TYPE_2_DATA_SEGMENTS;
476 	}
477 
478 	if (fcp->fcp_data_len == 0) {
479 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
480 		ha->xioctl->IOControlRequests++;
481 		return;
482 	}
483 
484 	/*
485 	 * Set transfer direction. Load Data segments.
486 	 */
487 	if (fcp->fcp_cntl.cntl_write_data) {
488 		pkt->control_flags_l = (uint8_t)
489 		    (pkt->control_flags_l | CF_DATA_OUT);
490 		ha->xioctl->IOOutputRequests++;
491 		ha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
492 	} else if (fcp->fcp_cntl.cntl_read_data) {
493 		pkt->control_flags_l = (uint8_t)
494 		    (pkt->control_flags_l | CF_DATA_IN);
495 		ha->xioctl->IOInputRequests++;
496 		ha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
497 	}
498 
499 	/* Set data segment count. */
500 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
501 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
502 
503 	/* Load total byte count. */
504 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, fcp->fcp_data_len);
505 
506 	/* Load command data segment. */
507 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
508 	cp = sp->pkt->pkt_data_cookie;
509 	while (cnt && seg_cnt) {
510 		ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
511 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
512 			ddi_put32(ha->hba_buf.acc_handle, ptr32++,
513 			    cp->dmac_notused);
514 		}
515 		ddi_put32(ha->hba_buf.acc_handle, ptr32++,
516 		    (uint32_t)cp->dmac_size);
517 		seg_cnt--;
518 		cnt--;
519 		cp++;
520 	}
521 
522 	/*
523 	 * Build continuation packets.
524 	 */
525 	if (seg_cnt) {
526 		ql_continuation_iocb(ha, cp, seg_cnt,
527 		    (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
528 	}
529 
530 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
531 }
532 
533 /*
534  * ql_continuation_iocb
535  *	Setup of continuation IOCB.
536  *
537  * Input:
538  *	ha:		adapter state pointer.
539  *	cp:		cookie list pointer.
540  *	seg_cnt:	number of segments.
541  *	addr64:		64 bit addresses.
542  *
543  * Context:
544  *	Interrupt or Kernel context, no mailbox commands allowed.
545  */
546 static void
ql_continuation_iocb(ql_adapter_state_t * ha,ddi_dma_cookie_t * cp,uint16_t seg_cnt,boolean_t addr64)547 ql_continuation_iocb(ql_adapter_state_t *ha, ddi_dma_cookie_t *cp,
548     uint16_t seg_cnt, boolean_t addr64)
549 {
550 	cont_entry_t	*pkt;
551 	uint64_t	*ptr64;
552 	uint32_t	*ptr32, cnt;
553 
554 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
555 
556 	/*
557 	 * Build continuation packets.
558 	 */
559 	while (seg_cnt) {
560 		/* Sync DMA buffer. */
561 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
562 		    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
563 		    REQUEST_Q_BUFFER_OFFSET), REQUEST_ENTRY_SIZE,
564 		    DDI_DMA_SYNC_FORDEV);
565 
566 		/* Adjust ring pointer, and deal with wrap. */
567 		ha->req_ring_index++;
568 		if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
569 			ha->req_ring_index = 0;
570 			ha->request_ring_ptr = ha->request_ring_bp;
571 		} else {
572 			ha->request_ring_ptr++;
573 		}
574 		pkt = (cont_entry_t *)ha->request_ring_ptr;
575 
576 		/* Zero out packet. */
577 		ptr64 = (uint64_t *)pkt;
578 		*ptr64++ = 0; *ptr64++ = 0;
579 		*ptr64++ = 0; *ptr64++ = 0;
580 		*ptr64++ = 0; *ptr64++ = 0;
581 		*ptr64++ = 0; *ptr64 = 0;
582 
583 		/*
584 		 * Build continuation packet.
585 		 */
586 		pkt->entry_count = 1;
587 		pkt->sys_define = (uint8_t)ha->req_ring_index;
588 		if (addr64) {
589 			pkt->entry_type = CONTINUATION_TYPE_1;
590 			cnt = CONT_TYPE_1_DATA_SEGMENTS;
591 			ptr32 = (uint32_t *)
592 			    &((cont_type_1_entry_t *)pkt)->dseg_0_address;
593 			while (cnt && seg_cnt) {
594 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
595 				    cp->dmac_address);
596 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
597 				    cp->dmac_notused);
598 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
599 				    (uint32_t)cp->dmac_size);
600 				seg_cnt--;
601 				cnt--;
602 				cp++;
603 			}
604 		} else {
605 			pkt->entry_type = CONTINUATION_TYPE_0;
606 			cnt = CONT_TYPE_0_DATA_SEGMENTS;
607 			ptr32 = (uint32_t *)&pkt->dseg_0_address;
608 			while (cnt && seg_cnt) {
609 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
610 				    cp->dmac_address);
611 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
612 				    (uint32_t)cp->dmac_size);
613 				seg_cnt--;
614 				cnt--;
615 				cp++;
616 			}
617 		}
618 
619 		QL_PRINT_5(CE_CONT, "(%d): packet:\n", ha->instance);
620 		QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
621 	}
622 
623 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
624 }
625 
626 /*
627  * ql_command_24xx_iocb
628  *	Setup of ISP24xx command IOCB.
629  *
630  * Input:
631  *	ha:	adapter state pointer.
632  *	sp:	srb structure pointer.
633  *	arg:	request queue packet.
634  *
635  * Context:
636  *	Interrupt or Kernel context, no mailbox commands allowed.
637  */
638 void
ql_command_24xx_iocb(ql_adapter_state_t * ha,ql_srb_t * sp,void * arg)639 ql_command_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
640 {
641 	ddi_dma_cookie_t	*cp;
642 	uint32_t		*ptr32, cnt;
643 	uint16_t		seg_cnt;
644 	fcp_cmd_t		*fcp = sp->fcp;
645 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
646 	cmd7_24xx_entry_t	*pkt = arg;
647 	ql_adapter_state_t	*pha = ha->pha;
648 
649 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
650 
651 	if (fcp->fcp_data_len != 0 && sp->sg_dma.dma_handle != NULL &&
652 	    sp->pkt->pkt_data_cookie_cnt > 1) {
653 		ql_cmd_24xx_type_6_iocb(ha, sp, arg);
654 		QL_PRINT_3(CE_CONT, "(%d): cmd6 exit\n", ha->instance);
655 		return;
656 	}
657 
658 	pkt->entry_type = IOCB_CMD_TYPE_7;
659 
660 	/* Set LUN number */
661 	pkt->fcp_lun[2] = LSB(sp->lun_queue->lun_no);
662 	pkt->fcp_lun[3] = MSB(sp->lun_queue->lun_no);
663 
664 	/* Set N_port handle */
665 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
666 
667 	/* Set target ID */
668 	pkt->target_id[0] = tq->d_id.b.al_pa;
669 	pkt->target_id[1] = tq->d_id.b.area;
670 	pkt->target_id[2] = tq->d_id.b.domain;
671 
672 	pkt->vp_index = ha->vp_index;
673 
674 	/* Set ISP command timeout. */
675 	if (sp->isp_timeout < 0x1999) {
676 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
677 		    sp->isp_timeout);
678 	}
679 
680 	/* Load SCSI CDB */
681 	ddi_rep_put8(pha->hba_buf.acc_handle, fcp->fcp_cdb, pkt->scsi_cdb,
682 	    MAX_CMDSZ, DDI_DEV_AUTOINCR);
683 	for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
684 		ql_chg_endian((uint8_t *)&pkt->scsi_cdb + cnt, 4);
685 	}
686 
687 	/*
688 	 * Set tag queue control flags
689 	 * Note:
690 	 *	Cannot copy fcp->fcp_cntl.cntl_qtype directly,
691 	 *	problem with x86 in 32bit kernel mode
692 	 */
693 	switch (fcp->fcp_cntl.cntl_qtype) {
694 	case FCP_QTYPE_SIMPLE:
695 		pkt->task = TA_STAG;
696 		break;
697 	case FCP_QTYPE_HEAD_OF_Q:
698 		pkt->task = TA_HTAG;
699 		break;
700 	case FCP_QTYPE_ORDERED:
701 		pkt->task = TA_OTAG;
702 		break;
703 	case FCP_QTYPE_ACA_Q_TAG:
704 		pkt->task = TA_ACA;
705 		break;
706 	case FCP_QTYPE_UNTAGGED:
707 		pkt->task = TA_UNTAGGED;
708 		break;
709 	default:
710 		break;
711 	}
712 
713 	if (fcp->fcp_data_len == 0) {
714 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
715 		pha->xioctl->IOControlRequests++;
716 		return;
717 	}
718 
719 	/* Set transfer direction. */
720 	if (fcp->fcp_cntl.cntl_write_data) {
721 		pkt->control_flags = CF_WR;
722 		pha->xioctl->IOOutputRequests++;
723 		pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
724 	} else if (fcp->fcp_cntl.cntl_read_data) {
725 		pkt->control_flags = CF_RD;
726 		pha->xioctl->IOInputRequests++;
727 		pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
728 	}
729 
730 	/* Set data segment count. */
731 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
732 	ddi_put16(pha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
733 
734 	/* Load total byte count. */
735 	ddi_put32(pha->hba_buf.acc_handle, &pkt->total_byte_count,
736 	    fcp->fcp_data_len);
737 
738 	/* Load command data segment. */
739 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
740 	cp = sp->pkt->pkt_data_cookie;
741 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
742 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
743 	ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
744 	seg_cnt--;
745 	cp++;
746 
747 	/*
748 	 * Build continuation packets.
749 	 */
750 	if (seg_cnt) {
751 		ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE);
752 	}
753 
754 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
755 }
756 
757 /*
758  * ql_cmd_24xx_type_6_iocb
759  *	Setup of ISP24xx command type 6 IOCB.
760  *
761  * Input:
762  *	ha:	adapter state pointer.
763  *	sp:	srb structure pointer.
764  *	arg:	request queue packet.
765  *
766  * Context:
767  *	Interrupt or Kernel context, no mailbox commands allowed.
768  */
769 static void
ql_cmd_24xx_type_6_iocb(ql_adapter_state_t * ha,ql_srb_t * sp,void * arg)770 ql_cmd_24xx_type_6_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
771 {
772 	uint64_t		addr;
773 	ddi_dma_cookie_t	*cp;
774 	uint32_t		*ptr32;
775 	uint16_t		seg_cnt;
776 	fcp_cmd_t		*fcp = sp->fcp;
777 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
778 	cmd6_24xx_entry_t	*pkt = arg;
779 	ql_adapter_state_t	*pha = ha->pha;
780 	dma_mem_t		*cmem = &sp->sg_dma;
781 	cmd6_2400_dma_t		*cdma = cmem->bp;
782 
783 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
784 
785 	pkt->entry_type = IOCB_CMD_TYPE_6;
786 
787 	bzero(cdma, sizeof (cmd6_2400_dma_t));
788 
789 	/* Set LUN number */
790 	pkt->fcp_lun[2] = cdma->cmd.fcp_lun[1] = LSB(sp->lun_queue->lun_no);
791 	pkt->fcp_lun[3] = cdma->cmd.fcp_lun[0] = MSB(sp->lun_queue->lun_no);
792 
793 	/* Set N_port handle */
794 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
795 
796 	/* Set target ID */
797 	pkt->target_id[0] = tq->d_id.b.al_pa;
798 	pkt->target_id[1] = tq->d_id.b.area;
799 	pkt->target_id[2] = tq->d_id.b.domain;
800 
801 	pkt->vp_index = ha->vp_index;
802 
803 	/* Set ISP command timeout. */
804 	if (sp->isp_timeout < 0x1999) {
805 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
806 		    sp->isp_timeout);
807 	}
808 
809 	/* Load SCSI CDB */
810 	ddi_rep_put8(cmem->acc_handle, fcp->fcp_cdb, cdma->cmd.scsi_cdb,
811 	    MAX_CMDSZ, DDI_DEV_AUTOINCR);
812 
813 	/*
814 	 * Set tag queue control flags
815 	 * Note:
816 	 *	Cannot copy fcp->fcp_cntl.cntl_qtype directly,
817 	 *	problem with x86 in 32bit kernel mode
818 	 */
819 	switch (fcp->fcp_cntl.cntl_qtype) {
820 	case FCP_QTYPE_SIMPLE:
821 		cdma->cmd.task = TA_STAG;
822 		break;
823 	case FCP_QTYPE_HEAD_OF_Q:
824 		cdma->cmd.task = TA_HTAG;
825 		break;
826 	case FCP_QTYPE_ORDERED:
827 		cdma->cmd.task = TA_OTAG;
828 		break;
829 	case FCP_QTYPE_ACA_Q_TAG:
830 		cdma->cmd.task = TA_ACA;
831 		break;
832 	case FCP_QTYPE_UNTAGGED:
833 		cdma->cmd.task = TA_UNTAGGED;
834 		break;
835 	default:
836 		break;
837 	}
838 
839 	/*
840 	 * FCP_CMND Payload Data Segment
841 	 */
842 	cp = cmem->cookies;
843 	ddi_put16(pha->hba_buf.acc_handle, &pkt->cmnd_length,
844 	    sizeof (fcp_cmnd_t));
845 	ddi_put32(pha->hba_buf.acc_handle, &pkt->cmnd_address[0],
846 	    cp->dmac_address);
847 	ddi_put32(pha->hba_buf.acc_handle, &pkt->cmnd_address[1],
848 	    cp->dmac_notused);
849 
850 	/* Set transfer direction. */
851 	if (fcp->fcp_cntl.cntl_write_data) {
852 		pkt->control_flags = (uint8_t)(CF_DSD_PTR | CF_WR);
853 		cdma->cmd.control_flags = CF_WR;
854 		pha->xioctl->IOOutputRequests++;
855 		pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
856 	} else if (fcp->fcp_cntl.cntl_read_data) {
857 		pkt->control_flags = (uint8_t)(CF_DSD_PTR | CF_RD);
858 		cdma->cmd.control_flags = CF_RD;
859 		pha->xioctl->IOInputRequests++;
860 		pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
861 	}
862 
863 	/*
864 	 * FCP_DATA Data Segment Descriptor.
865 	 */
866 	addr = cp->dmac_laddress + sizeof (fcp_cmnd_t);
867 	ddi_put32(pha->hba_buf.acc_handle, &pkt->dseg_0_address[0], LSD(addr));
868 	ddi_put32(pha->hba_buf.acc_handle, &pkt->dseg_0_address[1], MSD(addr));
869 
870 	/* Set data segment count. */
871 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
872 	ddi_put16(pha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
873 	ddi_put32(pha->hba_buf.acc_handle, &pkt->dseg_0_length,
874 	    seg_cnt * 12 + 12);
875 
876 	/* Load total byte count. */
877 	ddi_put32(pha->hba_buf.acc_handle, &pkt->total_byte_count,
878 	    fcp->fcp_data_len);
879 	ddi_put32(cmem->acc_handle, &cdma->cmd.dl, (uint32_t)fcp->fcp_data_len);
880 	ql_chg_endian((uint8_t *)&cdma->cmd.dl, 4);
881 
882 	/* Load command data segments. */
883 	ptr32 = (uint32_t *)cdma->cookie_list;
884 	cp = sp->pkt->pkt_data_cookie;
885 	while (seg_cnt--) {
886 		ddi_put32(cmem->acc_handle, ptr32++, cp->dmac_address);
887 		ddi_put32(cmem->acc_handle, ptr32++, cp->dmac_notused);
888 		ddi_put32(cmem->acc_handle, ptr32++, (uint32_t)cp->dmac_size);
889 		cp++;
890 	}
891 
892 	/* Sync DMA buffer. */
893 	(void) ddi_dma_sync(cmem->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
894 
895 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
896 }
897 
898 /*
899  * ql_marker
900  *	Function issues marker IOCB.
901  *
902  * Input:
903  *	ha:		adapter state pointer.
904  *	loop_id:	device loop ID
905  *	lun:		device LUN
906  *	type:		marker modifier
907  *
908  * Returns:
909  *	ql local function return status code.
910  *
911  * Context:
912  *	Interrupt or Kernel context, no mailbox commands allowed.
913  */
914 int
ql_marker(ql_adapter_state_t * ha,uint16_t loop_id,uint16_t lun,uint8_t type)915 ql_marker(ql_adapter_state_t *ha, uint16_t loop_id, uint16_t lun,
916     uint8_t type)
917 {
918 	mrk_entry_t	*pkt;
919 	int		rval;
920 
921 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
922 
923 	rval = ql_req_pkt(ha, (request_t **)&pkt);
924 	if (rval == QL_SUCCESS) {
925 		pkt->entry_type = MARKER_TYPE;
926 
927 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
928 			marker_24xx_entry_t	*pkt24 =
929 			    (marker_24xx_entry_t *)pkt;
930 
931 			pkt24->modifier = type;
932 
933 			/* Set LUN number */
934 			pkt24->fcp_lun[2] = LSB(lun);
935 			pkt24->fcp_lun[3] = MSB(lun);
936 
937 			pkt24->vp_index = ha->vp_index;
938 
939 			/* Set N_port handle */
940 			ddi_put16(ha->pha->hba_buf.acc_handle,
941 			    &pkt24->n_port_hdl, loop_id);
942 
943 		} else {
944 			pkt->modifier = type;
945 
946 			pkt->lun_l = LSB(lun);
947 			pkt->lun_h = MSB(lun);
948 
949 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
950 				pkt->target_l = LSB(loop_id);
951 				pkt->target_h = MSB(loop_id);
952 			} else {
953 				pkt->target_h = LSB(loop_id);
954 			}
955 		}
956 
957 		/* Issue command to ISP */
958 		ql_isp_cmd(ha);
959 	}
960 
961 	if (rval != QL_SUCCESS) {
962 		EL(ha, "failed, rval = %xh\n", rval);
963 	} else {
964 		/*EMPTY*/
965 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
966 	}
967 	return (rval);
968 }
969 
970 /*
971  * ql_ms_iocb
972  *	Setup of name/management server IOCB.
973  *
974  * Input:
975  *	ha = adapter state pointer.
976  *	sp = srb structure pointer.
977  *	arg = request queue packet.
978  *
979  * Context:
980  *	Interrupt or Kernel context, no mailbox commands allowed.
981  */
982 void
ql_ms_iocb(ql_adapter_state_t * ha,ql_srb_t * sp,void * arg)983 ql_ms_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
984 {
985 	ddi_dma_cookie_t	*cp;
986 	uint32_t		*ptr32;
987 	uint16_t		seg_cnt;
988 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
989 	ms_entry_t		*pkt = arg;
990 
991 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
992 	QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
993 	/*
994 	 * Build command packet.
995 	 */
996 	pkt->entry_type = MS_TYPE;
997 
998 	/* Set loop ID */
999 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1000 		pkt->loop_id_l = LSB(tq->loop_id);
1001 		pkt->loop_id_h = MSB(tq->loop_id);
1002 	} else {
1003 		pkt->loop_id_h = LSB(tq->loop_id);
1004 	}
1005 
1006 	/* Set ISP command timeout. */
1007 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
1008 
1009 	/* Set cmd data segment count. */
1010 	pkt->cmd_dseg_count_l = 1;
1011 
1012 	/* Set total data segment count */
1013 	seg_cnt = (uint16_t)(sp->pkt->pkt_resp_cookie_cnt + 1);
1014 	ddi_put16(ha->hba_buf.acc_handle, &pkt->total_dseg_count, seg_cnt);
1015 
1016 	/* Load ct cmd byte count. */
1017 	ddi_put32(ha->hba_buf.acc_handle, &pkt->cmd_byte_count,
1018 	    (uint32_t)sp->pkt->pkt_cmdlen);
1019 
1020 	/* Load ct rsp byte count. */
1021 	ddi_put32(ha->hba_buf.acc_handle, &pkt->resp_byte_count,
1022 	    (uint32_t)sp->pkt->pkt_rsplen);
1023 
1024 	/* Load MS command data segments. */
1025 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1026 	cp = sp->pkt->pkt_cmd_cookie;
1027 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1028 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1029 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
1030 	seg_cnt--;
1031 
1032 	/* Load MS response entry data segments. */
1033 	cp = sp->pkt->pkt_resp_cookie;
1034 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1035 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1036 	ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1037 	seg_cnt--;
1038 	cp++;
1039 
1040 	/*
1041 	 * Build continuation packets.
1042 	 */
1043 	if (seg_cnt) {
1044 		ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE);
1045 	}
1046 
1047 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1048 }
1049 
1050 /*
1051  * ql_ms_24xx_iocb
1052  *	Setup of name/management server IOCB.
1053  *
1054  * Input:
1055  *	ha:	adapter state pointer.
1056  *	sp:	srb structure pointer.
1057  *	arg:	request queue packet.
1058  *
1059  * Context:
1060  *	Interrupt or Kernel context, no mailbox commands allowed.
1061  */
1062 void
ql_ms_24xx_iocb(ql_adapter_state_t * ha,ql_srb_t * sp,void * arg)1063 ql_ms_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
1064 {
1065 	ddi_dma_cookie_t	*cp;
1066 	uint32_t		*ptr32;
1067 	uint16_t		seg_cnt;
1068 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
1069 	ct_passthru_entry_t	*pkt = arg;
1070 	ql_adapter_state_t	*pha = ha->pha;
1071 
1072 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1073 	QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
1074 	/*
1075 	 * Build command packet.
1076 	 */
1077 	pkt->entry_type = CT_PASSTHRU_TYPE;
1078 
1079 	/* Set loop ID */
1080 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
1081 
1082 	pkt->vp_index = ha->vp_index;
1083 
1084 	/* Set ISP command timeout. */
1085 	if (sp->isp_timeout < 0x1999) {
1086 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
1087 		    sp->isp_timeout);
1088 	}
1089 
1090 	/* Set cmd/response data segment counts. */
1091 	ddi_put16(pha->hba_buf.acc_handle, &pkt->cmd_dseg_count, 1);
1092 	seg_cnt = (uint16_t)sp->pkt->pkt_resp_cookie_cnt;
1093 	ddi_put16(pha->hba_buf.acc_handle, &pkt->resp_dseg_count, seg_cnt);
1094 
1095 	/* Load ct cmd byte count. */
1096 	ddi_put32(pha->hba_buf.acc_handle, &pkt->cmd_byte_count,
1097 	    (uint32_t)sp->pkt->pkt_cmdlen);
1098 
1099 	/* Load ct rsp byte count. */
1100 	ddi_put32(pha->hba_buf.acc_handle, &pkt->resp_byte_count,
1101 	    (uint32_t)sp->pkt->pkt_rsplen);
1102 
1103 	/* Load MS command entry data segments. */
1104 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1105 	cp = sp->pkt->pkt_cmd_cookie;
1106 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1107 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1108 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
1109 
1110 	/* Load MS response entry data segments. */
1111 	cp = sp->pkt->pkt_resp_cookie;
1112 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1113 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1114 	ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1115 	seg_cnt--;
1116 	cp++;
1117 
1118 	/*
1119 	 * Build continuation packets.
1120 	 */
1121 	if (seg_cnt) {
1122 		ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE);
1123 	}
1124 
1125 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1126 }
1127 
1128 /*
1129  * ql_ip_iocb
1130  *	Setup of IP IOCB.
1131  *
1132  * Input:
1133  *	ha:	adapter state pointer.
1134  *	sp:	srb structure pointer.
1135  *	arg:	request queue packet.
1136  *
1137  * Context:
1138  *	Interrupt or Kernel context, no mailbox commands allowed.
1139  */
1140 void
ql_ip_iocb(ql_adapter_state_t * ha,ql_srb_t * sp,void * arg)1141 ql_ip_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
1142 {
1143 	ddi_dma_cookie_t	*cp;
1144 	uint32_t		*ptr32, cnt;
1145 	uint16_t		seg_cnt;
1146 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
1147 	ip_entry_t		*pkt = arg;
1148 
1149 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1150 
1151 	/* Set loop ID */
1152 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1153 		pkt->loop_id_l = LSB(tq->loop_id);
1154 		pkt->loop_id_h = MSB(tq->loop_id);
1155 	} else {
1156 		pkt->loop_id_h = LSB(tq->loop_id);
1157 	}
1158 
1159 	/* Set control flags */
1160 	pkt->control_flags_l = BIT_6;
1161 	if (sp->pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
1162 		pkt->control_flags_h = BIT_7;
1163 	}
1164 
1165 	/* Set ISP command timeout. */
1166 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
1167 
1168 	/* Set data segment count. */
1169 	seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1170 	/* Load total byte count. */
1171 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count,
1172 	    (uint32_t)sp->pkt->pkt_cmdlen);
1173 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
1174 
1175 	/*
1176 	 * Build command packet.
1177 	 */
1178 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1179 		pkt->entry_type = IP_A64_TYPE;
1180 		cnt = IP_A64_DATA_SEGMENTS;
1181 	} else {
1182 		pkt->entry_type = IP_TYPE;
1183 		cnt = IP_DATA_SEGMENTS;
1184 	}
1185 
1186 	/* Load command entry data segments. */
1187 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1188 	cp = sp->pkt->pkt_cmd_cookie;
1189 	while (cnt && seg_cnt) {
1190 		ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1191 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1192 			ddi_put32(ha->hba_buf.acc_handle, ptr32++,
1193 			    cp->dmac_notused);
1194 		}
1195 		ddi_put32(ha->hba_buf.acc_handle, ptr32++,
1196 		    (uint32_t)cp->dmac_size);
1197 		seg_cnt--;
1198 		cnt--;
1199 		cp++;
1200 	}
1201 
1202 	/*
1203 	 * Build continuation packets.
1204 	 */
1205 	if (seg_cnt) {
1206 		ql_continuation_iocb(ha, cp, seg_cnt,
1207 		    (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
1208 	}
1209 
1210 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1211 }
1212 
1213 /*
1214  * ql_ip_24xx_iocb
1215  *	Setup of IP IOCB for ISP24xx.
1216  *
1217  * Input:
1218  *	ha:	adapter state pointer.
1219  *	sp:	srb structure pointer.
1220  *	arg:	request queue packet.
1221  *
1222  * Context:
1223  *	Interrupt or Kernel context, no mailbox commands allowed.
1224  */
1225 void
ql_ip_24xx_iocb(ql_adapter_state_t * ha,ql_srb_t * sp,void * arg)1226 ql_ip_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
1227 {
1228 	ddi_dma_cookie_t	*cp;
1229 	uint32_t		*ptr32;
1230 	uint16_t		seg_cnt;
1231 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
1232 	ip_cmd_entry_t		*pkt = arg;
1233 
1234 	pkt->entry_type = IP_CMD_TYPE;
1235 
1236 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1237 
1238 	/* Set N_port handle */
1239 	ddi_put16(ha->hba_buf.acc_handle, &pkt->hdl_status, tq->loop_id);
1240 
1241 	/* Set ISP command timeout. */
1242 	if (sp->isp_timeout < 0x1999) {
1243 		ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout_hdl,
1244 		    sp->isp_timeout);
1245 	}
1246 
1247 	/* Set data segment count. */
1248 	seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1249 	/* Load total byte count. */
1250 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count,
1251 	    (uint32_t)sp->pkt->pkt_cmdlen);
1252 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
1253 
1254 	/* Set control flags */
1255 	ddi_put16(ha->hba_buf.acc_handle, &pkt->control_flags,
1256 	    (uint16_t)(BIT_0));
1257 
1258 	/* Set frame header control flags */
1259 	ddi_put16(ha->hba_buf.acc_handle, &pkt->frame_hdr_cntrl_flgs,
1260 	    (uint16_t)(IPCF_LAST_SEQ | IPCF_FIRST_SEQ));
1261 
1262 	/* Load command data segment. */
1263 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1264 	cp = sp->pkt->pkt_cmd_cookie;
1265 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1266 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1267 	ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1268 	seg_cnt--;
1269 	cp++;
1270 
1271 	/*
1272 	 * Build continuation packets.
1273 	 */
1274 	if (seg_cnt) {
1275 		ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE);
1276 	}
1277 
1278 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1279 }
1280 
1281 /*
1282  * ql_isp_rcvbuf
1283  *	Locates free buffers and places it on the receive buffer queue.
1284  *
1285  * Input:
1286  *	ha = adapter state pointer.
1287  *
1288  * Context:
1289  *	Interrupt or Kernel context, no mailbox commands allowed.
1290  */
1291 void
ql_isp_rcvbuf(ql_adapter_state_t * ha)1292 ql_isp_rcvbuf(ql_adapter_state_t *ha)
1293 {
1294 	rcvbuf_t	*container;
1295 	uint16_t	rcv_q_cnt;
1296 	uint16_t	index = 0;
1297 	uint16_t	index1 = 1;
1298 	int		debounce_count = QL_MAX_DEBOUNCE;
1299 	ql_srb_t	*sp;
1300 	fc_unsol_buf_t	*ubp;
1301 	int		ring_updated = FALSE;
1302 
1303 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
1304 		ql_isp24xx_rcvbuf(ha);
1305 		return;
1306 	}
1307 
1308 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1309 
1310 	/* Acquire adapter state lock. */
1311 	ADAPTER_STATE_LOCK(ha);
1312 
1313 	/* Calculate number of free receive buffer entries. */
1314 	index = RD16_IO_REG(ha, mailbox_out[8]);
1315 	do {
1316 		index1 = RD16_IO_REG(ha, mailbox_out[8]);
1317 		if (index1 == index) {
1318 			break;
1319 		} else {
1320 			index = index1;
1321 		}
1322 	} while (debounce_count --);
1323 
1324 	if (debounce_count < 0) {
1325 		/* This should never happen */
1326 		EL(ha, "max mb8 debounce retries exceeded\n");
1327 	}
1328 
1329 	rcv_q_cnt = (uint16_t)(ha->rcvbuf_ring_index < index ?
1330 	    index - ha->rcvbuf_ring_index : RCVBUF_CONTAINER_CNT -
1331 	    (ha->rcvbuf_ring_index - index));
1332 
1333 	if (rcv_q_cnt == RCVBUF_CONTAINER_CNT) {
1334 		rcv_q_cnt--;
1335 	}
1336 
1337 	/* Load all free buffers in ISP receive buffer ring. */
1338 	index = 0;
1339 	while (rcv_q_cnt > (uint16_t)0 && index < QL_UB_LIMIT) {
1340 		/* Locate a buffer to give. */
1341 		QL_UB_LOCK(ha);
1342 		while (index < QL_UB_LIMIT) {
1343 			ubp = ha->ub_array[index];
1344 			if (ubp != NULL) {
1345 				sp = ubp->ub_fca_private;
1346 				if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1347 				    (ha->flags & IP_INITIALIZED) &&
1348 				    (sp->flags & SRB_UB_IN_FCA) &&
1349 				    (!(sp->flags & (SRB_UB_IN_ISP |
1350 				    SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1351 				    SRB_UB_ACQUIRED)))) {
1352 					sp->flags |= SRB_UB_IN_ISP;
1353 					break;
1354 				}
1355 			}
1356 			index++;
1357 		}
1358 
1359 		if (index < QL_UB_LIMIT) {
1360 			rcv_q_cnt--;
1361 			index++;
1362 			container = ha->rcvbuf_ring_ptr;
1363 
1364 			/*
1365 			 * Build container.
1366 			 */
1367 			ddi_put32(ha->hba_buf.acc_handle,
1368 			    (uint32_t *)(void *)&container->bufp[0],
1369 			    sp->ub_buffer.cookie.dmac_address);
1370 
1371 			ddi_put32(ha->hba_buf.acc_handle,
1372 			    (uint32_t *)(void *)&container->bufp[1],
1373 			    sp->ub_buffer.cookie.dmac_notused);
1374 
1375 			ddi_put16(ha->hba_buf.acc_handle, &container->handle,
1376 			    LSW(sp->handle));
1377 
1378 			ha->ub_outcnt++;
1379 
1380 			/* Adjust ring index. */
1381 			ha->rcvbuf_ring_index++;
1382 			if (ha->rcvbuf_ring_index == RCVBUF_CONTAINER_CNT) {
1383 				ha->rcvbuf_ring_index = 0;
1384 				ha->rcvbuf_ring_ptr = ha->rcvbuf_ring_bp;
1385 			} else {
1386 				ha->rcvbuf_ring_ptr++;
1387 			}
1388 
1389 			ring_updated = TRUE;
1390 		}
1391 		QL_UB_UNLOCK(ha);
1392 	}
1393 
1394 	if (ring_updated) {
1395 		/* Sync queue. */
1396 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1397 		    (off_t)RCVBUF_Q_BUFFER_OFFSET, (size_t)RCVBUF_QUEUE_SIZE,
1398 		    DDI_DMA_SYNC_FORDEV);
1399 
1400 		/* Set chip new ring index. */
1401 		WRT16_IO_REG(ha, mailbox_in[8], ha->rcvbuf_ring_index);
1402 	}
1403 
1404 	/* Release adapter state lock. */
1405 	ADAPTER_STATE_UNLOCK(ha);
1406 
1407 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1408 }
1409 
1410 /*
1411  * ql_isp24xx_rcvbuf
1412  *	Locates free buffers and send it to adapter.
1413  *
1414  * Input:
1415  *	ha = adapter state pointer.
1416  *
1417  * Context:
1418  *	Interrupt or Kernel context, no mailbox commands allowed.
1419  */
1420 static void
ql_isp24xx_rcvbuf(ql_adapter_state_t * ha)1421 ql_isp24xx_rcvbuf(ql_adapter_state_t *ha)
1422 {
1423 	rcvbuf_t		*container;
1424 	uint16_t		index;
1425 	ql_srb_t		*sp;
1426 	fc_unsol_buf_t		*ubp;
1427 	int			rval;
1428 	ip_buf_pool_entry_t	*pkt = NULL;
1429 
1430 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1431 
1432 	for (;;) {
1433 		/* Locate a buffer to give. */
1434 		QL_UB_LOCK(ha);
1435 		for (index = 0; index < QL_UB_LIMIT; index++) {
1436 			ubp = ha->ub_array[index];
1437 			if (ubp != NULL) {
1438 				sp = ubp->ub_fca_private;
1439 				if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1440 				    (ha->flags & IP_INITIALIZED) &&
1441 				    (sp->flags & SRB_UB_IN_FCA) &&
1442 				    (!(sp->flags & (SRB_UB_IN_ISP |
1443 				    SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1444 				    SRB_UB_ACQUIRED)))) {
1445 					ha->ub_outcnt++;
1446 					sp->flags |= SRB_UB_IN_ISP;
1447 					break;
1448 				}
1449 			}
1450 		}
1451 		QL_UB_UNLOCK(ha);
1452 		if (index == QL_UB_LIMIT) {
1453 			break;
1454 		}
1455 
1456 		/* Get IOCB packet for buffers. */
1457 		if (pkt == NULL) {
1458 			rval = ql_req_pkt(ha, (request_t **)&pkt);
1459 			if (rval != QL_SUCCESS) {
1460 				EL(ha, "failed, ql_req_pkt=%x\n", rval);
1461 				QL_UB_LOCK(ha);
1462 				ha->ub_outcnt--;
1463 				sp->flags &= ~SRB_UB_IN_ISP;
1464 				QL_UB_UNLOCK(ha);
1465 				break;
1466 			}
1467 			pkt->entry_type = IP_BUF_POOL_TYPE;
1468 			container = &pkt->buffers[0];
1469 		}
1470 
1471 		/*
1472 		 * Build container.
1473 		 */
1474 		ddi_put32(ha->hba_buf.acc_handle, &container->bufp[0],
1475 		    sp->ub_buffer.cookie.dmac_address);
1476 		ddi_put32(ha->hba_buf.acc_handle, &container->bufp[1],
1477 		    sp->ub_buffer.cookie.dmac_notused);
1478 		ddi_put16(ha->hba_buf.acc_handle, &container->handle,
1479 		    LSW(sp->handle));
1480 
1481 		pkt->buffer_count++;
1482 		container++;
1483 
1484 		if (pkt->buffer_count == IP_POOL_BUFFERS) {
1485 			ql_isp_cmd(ha);
1486 			pkt = NULL;
1487 		}
1488 	}
1489 
1490 	if (pkt != NULL) {
1491 		ql_isp_cmd(ha);
1492 	}
1493 
1494 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1495 }
1496