xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_iocb.c (revision 0a586cea3ceec7e5e50e7e54c745082a7a333ac2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2010 QLogic Corporation; ql_iocb.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_xioctl.h>
50 
51 /*
52  * Local Function Prototypes.
53  */
54 static int ql_req_pkt(ql_adapter_state_t *, request_t **);
55 static void ql_continuation_iocb(ql_adapter_state_t *, ddi_dma_cookie_t *,
56     uint16_t, boolean_t);
57 static void ql_isp24xx_rcvbuf(ql_adapter_state_t *);
58 
59 /*
60  * ql_start_iocb
61  *	The start IOCB is responsible for building request packets
62  *	on request ring and modifying ISP input pointer.
63  *
64  * Input:
65  *	ha:	adapter state pointer.
66  *	sp:	srb structure pointer.
67  *
68  * Context:
69  *	Interrupt or Kernel context, no mailbox commands allowed.
70  */
71 void
72 ql_start_iocb(ql_adapter_state_t *vha, ql_srb_t *sp)
73 {
74 	ql_link_t		*link;
75 	request_t		*pkt;
76 	uint64_t		*ptr64;
77 	uint32_t		cnt;
78 	ql_adapter_state_t	*ha = vha->pha;
79 
80 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
81 
82 	/* Acquire ring lock. */
83 	REQUEST_RING_LOCK(ha);
84 
85 	if (sp != NULL) {
86 		/*
87 		 * If the pending queue is not empty maintain order
88 		 * by puting this srb at the tail and geting the head.
89 		 */
90 		if ((link = ha->pending_cmds.first) != NULL) {
91 			ql_add_link_b(&ha->pending_cmds, &sp->cmd);
92 			/* Remove command from pending command queue */
93 			sp = link->base_address;
94 			ql_remove_link(&ha->pending_cmds, &sp->cmd);
95 		}
96 	} else {
97 		/* Get command from pending command queue if not empty. */
98 		if ((link = ha->pending_cmds.first) == NULL) {
99 			/* Release ring specific lock */
100 			REQUEST_RING_UNLOCK(ha);
101 			QL_PRINT_3(CE_CONT, "(%d): empty done\n",
102 			    ha->instance);
103 			return;
104 		}
105 		/* Remove command from pending command queue */
106 		sp = link->base_address;
107 		ql_remove_link(&ha->pending_cmds, &sp->cmd);
108 	}
109 
110 	/* start this request and as many others as possible */
111 	for (;;) {
112 		if (ha->req_q_cnt < sp->req_cnt) {
113 			/* Calculate number of free request entries. */
114 			cnt = RD16_IO_REG(ha, req_out);
115 			if (ha->req_ring_index < cnt)  {
116 				ha->req_q_cnt = (uint16_t)
117 				    (cnt - ha->req_ring_index);
118 			} else {
119 				ha->req_q_cnt = (uint16_t)(REQUEST_ENTRY_CNT -
120 				    (ha->req_ring_index - cnt));
121 			}
122 			if (ha->req_q_cnt != 0) {
123 				ha->req_q_cnt--;
124 			}
125 
126 			/*
127 			 * If no room in request ring put this srb at
128 			 * the head of the pending queue and exit.
129 			 */
130 			if (ha->req_q_cnt < sp->req_cnt) {
131 				QL_PRINT_8(CE_CONT, "(%d): request ring full,"
132 				    " req_q_cnt=%d, req_ring_index=%d\n",
133 				    ha->instance, ha->req_q_cnt,
134 				    ha->req_ring_index);
135 				ql_add_link_t(&ha->pending_cmds, &sp->cmd);
136 				break;
137 			}
138 		}
139 
140 		/* Check for room in outstanding command list. */
141 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
142 			ha->osc_index++;
143 			if (ha->osc_index == MAX_OUTSTANDING_COMMANDS) {
144 				ha->osc_index = 1;
145 			}
146 			if (ha->outstanding_cmds[ha->osc_index] == NULL) {
147 				break;
148 			}
149 		}
150 		/*
151 		 * If no room in outstanding array put this srb at
152 		 * the head of the pending queue and exit.
153 		 */
154 		if (cnt == MAX_OUTSTANDING_COMMANDS) {
155 			QL_PRINT_8(CE_CONT, "(%d): no room in outstanding "
156 			    "array\n", ha->instance);
157 			ql_add_link_t(&ha->pending_cmds, &sp->cmd);
158 			break;
159 		}
160 
161 		/* nothing to stop us now. */
162 		ha->outstanding_cmds[ha->osc_index] = sp;
163 		/* create and save a unique response identifier in the srb */
164 		sp->handle = ha->adapter_stats->ncmds << OSC_INDEX_SHIFT |
165 		    ha->osc_index;
166 		ha->req_q_cnt -= sp->req_cnt;
167 
168 		/* build the iocb in the request ring */
169 		pkt = ha->request_ring_ptr;
170 		sp->flags |= SRB_IN_TOKEN_ARRAY;
171 
172 		/* Zero out packet. */
173 		ptr64 = (uint64_t *)pkt;
174 		*ptr64++ = 0; *ptr64++ = 0;
175 		*ptr64++ = 0; *ptr64++ = 0;
176 		*ptr64++ = 0; *ptr64++ = 0;
177 		*ptr64++ = 0; *ptr64 = 0;
178 
179 		/* Setup IOCB common data. */
180 		pkt->entry_count = (uint8_t)sp->req_cnt;
181 		pkt->sys_define = (uint8_t)ha->req_ring_index;
182 		/* mark the iocb with the response identifier */
183 		ddi_put32(ha->hba_buf.acc_handle, &pkt->handle,
184 		    (uint32_t)sp->handle);
185 
186 		/* Setup IOCB unique data. */
187 		(sp->iocb)(vha, sp, pkt);
188 
189 		sp->flags |= SRB_ISP_STARTED;
190 
191 		QL_PRINT_5(CE_CONT, "(%d,%d): req packet, sp=%p\n",
192 		    ha->instance, vha->vp_index, (void *)sp);
193 		QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
194 
195 		/* Sync DMA buffer. */
196 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
197 		    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
198 		    REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE,
199 		    DDI_DMA_SYNC_FORDEV);
200 
201 		/* Adjust ring index. */
202 		ha->req_ring_index++;
203 		if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
204 			ha->req_ring_index = 0;
205 			ha->request_ring_ptr = ha->request_ring_bp;
206 		} else {
207 			ha->request_ring_ptr++;
208 		}
209 
210 		/* Reset watchdog timer */
211 		sp->wdg_q_time = sp->init_wdg_q_time;
212 
213 		/*
214 		 * Send it by setting the new ring index in the ISP Request
215 		 * Ring In Pointer register.  This is the mechanism
216 		 * used to notify the isp that a new iocb has been
217 		 * placed on the request ring.
218 		 */
219 		if (CFG_IST(ha, CFG_CTRL_8021)) {
220 			uint32_t	w32;
221 
222 			w32 = ha->req_ring_index << 16 |
223 			    ha->function_number << 5 | 4;
224 			do {
225 				ddi_put32(ha->db_dev_handle, ha->nx_req_in,
226 				    w32);
227 			} while (RD_REG_DWORD(ha, ha->db_read) != w32);
228 
229 		} else {
230 			WRT16_IO_REG(ha, req_in, ha->req_ring_index);
231 		}
232 
233 		/* Update outstanding command count statistic. */
234 		ha->adapter_stats->ncmds++;
235 
236 		/* if there is a pending command, try to start it. */
237 		if ((link = ha->pending_cmds.first) == NULL) {
238 			break;
239 		}
240 
241 		/* Remove command from pending command queue */
242 		sp = link->base_address;
243 		ql_remove_link(&ha->pending_cmds, &sp->cmd);
244 	}
245 
246 	/* Release ring specific lock */
247 	REQUEST_RING_UNLOCK(ha);
248 
249 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
250 }
251 
252 /*
253  * ql_req_pkt
254  *	Function is responsible for locking ring and
255  *	getting a zeroed out request packet.
256  *
257  * Input:
258  *	ha:	adapter state pointer.
259  *	pkt:	address for packet pointer.
260  *
261  * Returns:
262  *	ql local function return status code.
263  *
264  * Context:
265  *	Interrupt or Kernel context, no mailbox commands allowed.
266  */
267 static int
268 ql_req_pkt(ql_adapter_state_t *vha, request_t **pktp)
269 {
270 	uint16_t		cnt;
271 	uint32_t		*long_ptr;
272 	uint32_t		timer;
273 	int			rval = QL_FUNCTION_TIMEOUT;
274 	ql_adapter_state_t	*ha = vha->pha;
275 
276 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
277 
278 	/* Wait for 30 seconds for slot. */
279 	for (timer = 30000; timer != 0; timer--) {
280 		/* Acquire ring lock. */
281 		REQUEST_RING_LOCK(ha);
282 
283 		if (ha->req_q_cnt == 0) {
284 			/* Calculate number of free request entries. */
285 			cnt = RD16_IO_REG(ha, req_out);
286 			if (ha->req_ring_index < cnt) {
287 				ha->req_q_cnt = (uint16_t)
288 				    (cnt - ha->req_ring_index);
289 			} else {
290 				ha->req_q_cnt = (uint16_t)
291 				    (REQUEST_ENTRY_CNT -
292 				    (ha->req_ring_index - cnt));
293 			}
294 			if (ha->req_q_cnt != 0) {
295 				ha->req_q_cnt--;
296 			}
297 		}
298 
299 		/* Found empty request ring slot? */
300 		if (ha->req_q_cnt != 0) {
301 			ha->req_q_cnt--;
302 			*pktp = ha->request_ring_ptr;
303 
304 			/* Zero out packet. */
305 			long_ptr = (uint32_t *)ha->request_ring_ptr;
306 			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE/4; cnt++) {
307 				*long_ptr++ = 0;
308 			}
309 
310 			/* Setup IOCB common data. */
311 			ha->request_ring_ptr->entry_count = 1;
312 			ha->request_ring_ptr->sys_define =
313 			    (uint8_t)ha->req_ring_index;
314 			ddi_put32(ha->hba_buf.acc_handle,
315 			    &ha->request_ring_ptr->handle,
316 			    (uint32_t)QL_FCA_BRAND);
317 
318 			rval = QL_SUCCESS;
319 
320 			break;
321 		}
322 
323 		/* Release request queue lock. */
324 		REQUEST_RING_UNLOCK(ha);
325 
326 		drv_usecwait(MILLISEC);
327 
328 		/* Check for pending interrupts. */
329 		/*
330 		 * XXX protect interrupt routine from calling itself.
331 		 * Need to revisit this routine. So far we never
332 		 * hit this case as req slot was available
333 		 */
334 		if ((!(curthread->t_flag & T_INTR_THREAD)) &&
335 		    INTERRUPT_PENDING(ha)) {
336 			(void) ql_isr((caddr_t)ha);
337 			INTR_LOCK(ha);
338 			ha->intr_claimed = TRUE;
339 			INTR_UNLOCK(ha);
340 		}
341 	}
342 
343 	if (rval != QL_SUCCESS) {
344 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
345 		EL(ha, "failed, rval = %xh, isp_abort_needed\n", rval);
346 	} else {
347 		/*EMPTY*/
348 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
349 	}
350 	return (rval);
351 }
352 
353 /*
354  * ql_isp_cmd
355  *	Function is responsible for modifying ISP input pointer.
356  *	This action notifies the isp that a new request has been
357  *	added to the request ring.
358  *
359  *	Releases ring lock.
360  *
361  * Input:
362  *	ha:	adapter state pointer.
363  *
364  * Context:
365  *	Interrupt or Kernel context, no mailbox commands allowed.
366  */
367 void
368 ql_isp_cmd(ql_adapter_state_t *vha)
369 {
370 	ql_adapter_state_t	*ha = vha->pha;
371 
372 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
373 
374 	QL_PRINT_5(CE_CONT, "(%d): req packet:\n", ha->instance);
375 	QL_DUMP_5((uint8_t *)ha->request_ring_ptr, 8, REQUEST_ENTRY_SIZE);
376 
377 	/* Sync DMA buffer. */
378 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
379 	    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
380 	    REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE,
381 	    DDI_DMA_SYNC_FORDEV);
382 
383 	/* Adjust ring index. */
384 	ha->req_ring_index++;
385 	if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
386 		ha->req_ring_index = 0;
387 		ha->request_ring_ptr = ha->request_ring_bp;
388 	} else {
389 		ha->request_ring_ptr++;
390 	}
391 
392 	/* Set chip new ring index. */
393 	if (CFG_IST(ha, CFG_CTRL_8021)) {
394 		uint32_t	w32;
395 
396 		w32 = ha->req_ring_index << 16 |
397 		    ha->function_number << 5 | 4;
398 		do {
399 			ddi_put32(ha->db_dev_handle, ha->nx_req_in, w32);
400 		} while (RD_REG_DWORD(ha, ha->db_read) != w32);
401 
402 	} else {
403 		WRT16_IO_REG(ha, req_in, ha->req_ring_index);
404 	}
405 
406 	/* Release ring lock. */
407 	REQUEST_RING_UNLOCK(ha);
408 
409 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
410 }
411 
412 /*
413  * ql_command_iocb
414  *	Setup of command IOCB.
415  *
416  * Input:
417  *	ha:	adapter state pointer.
418  *	sp:	srb structure pointer.
419  *
420  *	arg:	request queue packet.
421  *
422  * Context:
423  *	Interrupt or Kernel context, no mailbox commands allowed.
424  */
425 void
426 ql_command_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
427 {
428 	ddi_dma_cookie_t	*cp;
429 	uint32_t		*ptr32, cnt;
430 	uint16_t		seg_cnt;
431 	fcp_cmd_t		*fcp = sp->fcp;
432 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
433 	cmd_entry_t		*pkt = arg;
434 
435 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
436 
437 	/* Set LUN number */
438 	pkt->lun_l = LSB(sp->lun_queue->lun_no);
439 	pkt->lun_h = MSB(sp->lun_queue->lun_no);
440 
441 	/* Set target ID */
442 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
443 		pkt->target_l = LSB(tq->loop_id);
444 		pkt->target_h = MSB(tq->loop_id);
445 	} else {
446 		pkt->target_h = LSB(tq->loop_id);
447 	}
448 
449 	/* Set tag queue control flags */
450 	if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
451 		pkt->control_flags_l = (uint8_t)
452 		    (pkt->control_flags_l | CF_HTAG);
453 	} else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
454 		pkt->control_flags_l = (uint8_t)
455 		    (pkt->control_flags_l | CF_OTAG);
456 	/* else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) */
457 	} else {
458 		pkt->control_flags_l = (uint8_t)
459 		    (pkt->control_flags_l | CF_STAG);
460 	}
461 
462 	/* Set ISP command timeout. */
463 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
464 
465 	/* Load SCSI CDB */
466 	ddi_rep_put8(ha->hba_buf.acc_handle, fcp->fcp_cdb,
467 	    pkt->scsi_cdb, MAX_CMDSZ, DDI_DEV_AUTOINCR);
468 
469 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
470 		pkt->entry_type = IOCB_CMD_TYPE_3;
471 		cnt = CMD_TYPE_3_DATA_SEGMENTS;
472 	} else {
473 		pkt->entry_type = IOCB_CMD_TYPE_2;
474 		cnt = CMD_TYPE_2_DATA_SEGMENTS;
475 	}
476 
477 	if (fcp->fcp_data_len == 0) {
478 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
479 		ha->xioctl->IOControlRequests++;
480 		return;
481 	}
482 
483 	/*
484 	 * Set transfer direction. Load Data segments.
485 	 */
486 	if (fcp->fcp_cntl.cntl_write_data) {
487 		pkt->control_flags_l = (uint8_t)
488 		    (pkt->control_flags_l | CF_DATA_OUT);
489 		ha->xioctl->IOOutputRequests++;
490 		ha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
491 	} else if (fcp->fcp_cntl.cntl_read_data) {
492 		pkt->control_flags_l = (uint8_t)
493 		    (pkt->control_flags_l | CF_DATA_IN);
494 		ha->xioctl->IOInputRequests++;
495 		ha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
496 	}
497 
498 	/* Set data segment count. */
499 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
500 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
501 
502 	/* Load total byte count. */
503 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, fcp->fcp_data_len);
504 
505 	/* Load command data segment. */
506 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
507 	cp = sp->pkt->pkt_data_cookie;
508 	while (cnt && seg_cnt) {
509 		ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
510 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
511 			ddi_put32(ha->hba_buf.acc_handle, ptr32++,
512 			    cp->dmac_notused);
513 		}
514 		ddi_put32(ha->hba_buf.acc_handle, ptr32++,
515 		    (uint32_t)cp->dmac_size);
516 		seg_cnt--;
517 		cnt--;
518 		cp++;
519 	}
520 
521 	/*
522 	 * Build continuation packets.
523 	 */
524 	if (seg_cnt) {
525 		ql_continuation_iocb(ha, cp, seg_cnt,
526 		    (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
527 	}
528 
529 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
530 }
531 
532 /*
533  * ql_continuation_iocb
534  *	Setup of continuation IOCB.
535  *
536  * Input:
537  *	ha:		adapter state pointer.
538  *	cp:		cookie list pointer.
539  *	seg_cnt:	number of segments.
540  *	addr64:		64 bit addresses.
541  *
542  * Context:
543  *	Interrupt or Kernel context, no mailbox commands allowed.
544  */
545 static void
546 ql_continuation_iocb(ql_adapter_state_t *ha, ddi_dma_cookie_t *cp,
547     uint16_t seg_cnt, boolean_t addr64)
548 {
549 	cont_entry_t	*pkt;
550 	uint64_t	*ptr64;
551 	uint32_t	*ptr32, cnt;
552 
553 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
554 
555 	/*
556 	 * Build continuation packets.
557 	 */
558 	while (seg_cnt) {
559 		/* Sync DMA buffer. */
560 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
561 		    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
562 		    REQUEST_Q_BUFFER_OFFSET), REQUEST_ENTRY_SIZE,
563 		    DDI_DMA_SYNC_FORDEV);
564 
565 		/* Adjust ring pointer, and deal with wrap. */
566 		ha->req_ring_index++;
567 		if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
568 			ha->req_ring_index = 0;
569 			ha->request_ring_ptr = ha->request_ring_bp;
570 		} else {
571 			ha->request_ring_ptr++;
572 		}
573 		pkt = (cont_entry_t *)ha->request_ring_ptr;
574 
575 		/* Zero out packet. */
576 		ptr64 = (uint64_t *)pkt;
577 		*ptr64++ = 0; *ptr64++ = 0;
578 		*ptr64++ = 0; *ptr64++ = 0;
579 		*ptr64++ = 0; *ptr64++ = 0;
580 		*ptr64++ = 0; *ptr64 = 0;
581 
582 		/*
583 		 * Build continuation packet.
584 		 */
585 		pkt->entry_count = 1;
586 		pkt->sys_define = (uint8_t)ha->req_ring_index;
587 		if (addr64) {
588 			pkt->entry_type = CONTINUATION_TYPE_1;
589 			cnt = CONT_TYPE_1_DATA_SEGMENTS;
590 			ptr32 = (uint32_t *)
591 			    &((cont_type_1_entry_t *)pkt)->dseg_0_address;
592 			while (cnt && seg_cnt) {
593 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
594 				    cp->dmac_address);
595 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
596 				    cp->dmac_notused);
597 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
598 				    (uint32_t)cp->dmac_size);
599 				seg_cnt--;
600 				cnt--;
601 				cp++;
602 			}
603 		} else {
604 			pkt->entry_type = CONTINUATION_TYPE_0;
605 			cnt = CONT_TYPE_0_DATA_SEGMENTS;
606 			ptr32 = (uint32_t *)&pkt->dseg_0_address;
607 			while (cnt && seg_cnt) {
608 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
609 				    cp->dmac_address);
610 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
611 				    (uint32_t)cp->dmac_size);
612 				seg_cnt--;
613 				cnt--;
614 				cp++;
615 			}
616 		}
617 
618 		QL_PRINT_5(CE_CONT, "(%d): packet:\n", ha->instance);
619 		QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
620 	}
621 
622 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
623 }
624 
625 /*
626  * ql_command_24xx_iocb
627  *	Setup of ISP24xx command IOCB.
628  *
629  * Input:
630  *	ha:	adapter state pointer.
631  *	sp:	srb structure pointer.
632  *	arg:	request queue packet.
633  *
634  * Context:
635  *	Interrupt or Kernel context, no mailbox commands allowed.
636  */
637 void
638 ql_command_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
639 {
640 	ddi_dma_cookie_t	*cp;
641 	uint32_t		*ptr32, cnt;
642 	uint16_t		seg_cnt;
643 	fcp_cmd_t		*fcp = sp->fcp;
644 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
645 	cmd_24xx_entry_t	*pkt = arg;
646 	ql_adapter_state_t	*pha = ha->pha;
647 
648 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
649 
650 	pkt->entry_type = IOCB_CMD_TYPE_7;
651 
652 	/* Set LUN number */
653 	pkt->fcp_lun[2] = LSB(sp->lun_queue->lun_no);
654 	pkt->fcp_lun[3] = MSB(sp->lun_queue->lun_no);
655 
656 	/* Set N_port handle */
657 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
658 
659 	/* Set target ID */
660 	pkt->target_id[0] = tq->d_id.b.al_pa;
661 	pkt->target_id[1] = tq->d_id.b.area;
662 	pkt->target_id[2] = tq->d_id.b.domain;
663 
664 	pkt->vp_index = ha->vp_index;
665 
666 	/* Set ISP command timeout. */
667 	if (sp->isp_timeout < 0x1999) {
668 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
669 		    sp->isp_timeout);
670 	}
671 
672 	/* Load SCSI CDB */
673 	ddi_rep_put8(pha->hba_buf.acc_handle, fcp->fcp_cdb, pkt->scsi_cdb,
674 	    MAX_CMDSZ, DDI_DEV_AUTOINCR);
675 	for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
676 		ql_chg_endian((uint8_t *)&pkt->scsi_cdb + cnt, 4);
677 	}
678 
679 	/*
680 	 * Set tag queue control flags
681 	 * Note:
682 	 *	Cannot copy fcp->fcp_cntl.cntl_qtype directly,
683 	 *	problem with x86 in 32bit kernel mode
684 	 */
685 	switch (fcp->fcp_cntl.cntl_qtype) {
686 	case FCP_QTYPE_SIMPLE:
687 		pkt->task = TA_STAG;
688 		break;
689 	case FCP_QTYPE_HEAD_OF_Q:
690 		pkt->task = TA_HTAG;
691 		break;
692 	case FCP_QTYPE_ORDERED:
693 		pkt->task = TA_OTAG;
694 		break;
695 	case FCP_QTYPE_ACA_Q_TAG:
696 		pkt->task = TA_ACA;
697 		break;
698 	case FCP_QTYPE_UNTAGGED:
699 		pkt->task = TA_UNTAGGED;
700 		break;
701 	default:
702 		break;
703 	}
704 
705 	if (fcp->fcp_data_len == 0) {
706 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
707 		pha->xioctl->IOControlRequests++;
708 		return;
709 	}
710 
711 	/* Set transfer direction. */
712 	if (fcp->fcp_cntl.cntl_write_data) {
713 		pkt->control_flags = CF_WR;
714 		pha->xioctl->IOOutputRequests++;
715 		pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
716 	} else if (fcp->fcp_cntl.cntl_read_data) {
717 		pkt->control_flags = CF_RD;
718 		pha->xioctl->IOInputRequests++;
719 		pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
720 	}
721 
722 	/* Set data segment count. */
723 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
724 	ddi_put16(pha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
725 
726 	/* Load total byte count. */
727 	ddi_put32(pha->hba_buf.acc_handle, &pkt->total_byte_count,
728 	    fcp->fcp_data_len);
729 
730 	/* Load command data segment. */
731 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
732 	cp = sp->pkt->pkt_data_cookie;
733 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
734 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
735 	ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
736 	seg_cnt--;
737 	cp++;
738 
739 	/*
740 	 * Build continuation packets.
741 	 */
742 	if (seg_cnt) {
743 		ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE);
744 	}
745 
746 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
747 }
748 
749 /*
750  * ql_marker
751  *	Function issues marker IOCB.
752  *
753  * Input:
754  *	ha:		adapter state pointer.
755  *	loop_id:	device loop ID
756  *	lun:		device LUN
757  *	type:		marker modifier
758  *
759  * Returns:
760  *	ql local function return status code.
761  *
762  * Context:
763  *	Interrupt or Kernel context, no mailbox commands allowed.
764  */
765 int
766 ql_marker(ql_adapter_state_t *ha, uint16_t loop_id, uint16_t lun,
767     uint8_t type)
768 {
769 	mrk_entry_t	*pkt;
770 	int		rval;
771 
772 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
773 
774 	rval = ql_req_pkt(ha, (request_t **)&pkt);
775 	if (rval == QL_SUCCESS) {
776 		pkt->entry_type = MARKER_TYPE;
777 
778 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
779 			marker_24xx_entry_t	*pkt24 =
780 			    (marker_24xx_entry_t *)pkt;
781 
782 			pkt24->modifier = type;
783 
784 			/* Set LUN number */
785 			pkt24->fcp_lun[2] = LSB(lun);
786 			pkt24->fcp_lun[3] = MSB(lun);
787 
788 			pkt24->vp_index = ha->vp_index;
789 
790 			/* Set N_port handle */
791 			ddi_put16(ha->pha->hba_buf.acc_handle,
792 			    &pkt24->n_port_hdl, loop_id);
793 
794 		} else {
795 			pkt->modifier = type;
796 
797 			pkt->lun_l = LSB(lun);
798 			pkt->lun_h = MSB(lun);
799 
800 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
801 				pkt->target_l = LSB(loop_id);
802 				pkt->target_h = MSB(loop_id);
803 			} else {
804 				pkt->target_h = LSB(loop_id);
805 			}
806 		}
807 
808 		/* Issue command to ISP */
809 		ql_isp_cmd(ha);
810 	}
811 
812 	if (rval != QL_SUCCESS) {
813 		EL(ha, "failed, rval = %xh\n", rval);
814 	} else {
815 		/*EMPTY*/
816 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
817 	}
818 	return (rval);
819 }
820 
821 /*
822  * ql_ms_iocb
823  *	Setup of name/management server IOCB.
824  *
825  * Input:
826  *	ha = adapter state pointer.
827  *	sp = srb structure pointer.
828  *	arg = request queue packet.
829  *
830  * Context:
831  *	Interrupt or Kernel context, no mailbox commands allowed.
832  */
833 void
834 ql_ms_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
835 {
836 	ddi_dma_cookie_t	*cp;
837 	uint32_t		*ptr32;
838 	uint16_t		seg_cnt;
839 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
840 	ms_entry_t		*pkt = arg;
841 
842 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
843 	QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
844 	/*
845 	 * Build command packet.
846 	 */
847 	pkt->entry_type = MS_TYPE;
848 
849 	/* Set loop ID */
850 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
851 		pkt->loop_id_l = LSB(tq->loop_id);
852 		pkt->loop_id_h = MSB(tq->loop_id);
853 	} else {
854 		pkt->loop_id_h = LSB(tq->loop_id);
855 	}
856 
857 	/* Set ISP command timeout. */
858 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
859 
860 	/* Set cmd data segment count. */
861 	pkt->cmd_dseg_count_l = 1;
862 
863 	/* Set total data segment count */
864 	seg_cnt = (uint16_t)(sp->pkt->pkt_resp_cookie_cnt + 1);
865 	ddi_put16(ha->hba_buf.acc_handle, &pkt->total_dseg_count, seg_cnt);
866 
867 	/* Load ct cmd byte count. */
868 	ddi_put32(ha->hba_buf.acc_handle, &pkt->cmd_byte_count,
869 	    (uint32_t)sp->pkt->pkt_cmdlen);
870 
871 	/* Load ct rsp byte count. */
872 	ddi_put32(ha->hba_buf.acc_handle, &pkt->resp_byte_count,
873 	    (uint32_t)sp->pkt->pkt_rsplen);
874 
875 	/* Load MS command data segments. */
876 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
877 	cp = sp->pkt->pkt_cmd_cookie;
878 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
879 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
880 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
881 	seg_cnt--;
882 
883 	/* Load MS response entry data segments. */
884 	cp = sp->pkt->pkt_resp_cookie;
885 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
886 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
887 	ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
888 	seg_cnt--;
889 	cp++;
890 
891 	/*
892 	 * Build continuation packets.
893 	 */
894 	if (seg_cnt) {
895 		ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE);
896 	}
897 
898 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
899 }
900 
901 /*
902  * ql_ms_24xx_iocb
903  *	Setup of name/management server IOCB.
904  *
905  * Input:
906  *	ha:	adapter state pointer.
907  *	sp:	srb structure pointer.
908  *	arg:	request queue packet.
909  *
910  * Context:
911  *	Interrupt or Kernel context, no mailbox commands allowed.
912  */
913 void
914 ql_ms_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
915 {
916 	ddi_dma_cookie_t	*cp;
917 	uint32_t		*ptr32;
918 	uint16_t		seg_cnt;
919 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
920 	ct_passthru_entry_t	*pkt = arg;
921 	ql_adapter_state_t	*pha = ha->pha;
922 
923 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
924 	QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
925 	/*
926 	 * Build command packet.
927 	 */
928 	pkt->entry_type = CT_PASSTHRU_TYPE;
929 
930 	/* Set loop ID */
931 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
932 
933 	pkt->vp_index = ha->vp_index;
934 
935 	/* Set ISP command timeout. */
936 	if (sp->isp_timeout < 0x1999) {
937 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
938 		    sp->isp_timeout);
939 	}
940 
941 	/* Set cmd/response data segment counts. */
942 	ddi_put16(pha->hba_buf.acc_handle, &pkt->cmd_dseg_count, 1);
943 	seg_cnt = (uint16_t)sp->pkt->pkt_resp_cookie_cnt;
944 	ddi_put16(pha->hba_buf.acc_handle, &pkt->resp_dseg_count, seg_cnt);
945 
946 	/* Load ct cmd byte count. */
947 	ddi_put32(pha->hba_buf.acc_handle, &pkt->cmd_byte_count,
948 	    (uint32_t)sp->pkt->pkt_cmdlen);
949 
950 	/* Load ct rsp byte count. */
951 	ddi_put32(pha->hba_buf.acc_handle, &pkt->resp_byte_count,
952 	    (uint32_t)sp->pkt->pkt_rsplen);
953 
954 	/* Load MS command entry data segments. */
955 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
956 	cp = sp->pkt->pkt_cmd_cookie;
957 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
958 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
959 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
960 
961 	/* Load MS response entry data segments. */
962 	cp = sp->pkt->pkt_resp_cookie;
963 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
964 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
965 	ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
966 	seg_cnt--;
967 	cp++;
968 
969 	/*
970 	 * Build continuation packets.
971 	 */
972 	if (seg_cnt) {
973 		ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE);
974 	}
975 
976 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
977 }
978 
979 /*
980  * ql_ip_iocb
981  *	Setup of IP IOCB.
982  *
983  * Input:
984  *	ha:	adapter state pointer.
985  *	sp:	srb structure pointer.
986  *	arg:	request queue packet.
987  *
988  * Context:
989  *	Interrupt or Kernel context, no mailbox commands allowed.
990  */
991 void
992 ql_ip_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
993 {
994 	ddi_dma_cookie_t	*cp;
995 	uint32_t		*ptr32, cnt;
996 	uint16_t		seg_cnt;
997 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
998 	ip_entry_t		*pkt = arg;
999 
1000 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1001 
1002 	/* Set loop ID */
1003 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1004 		pkt->loop_id_l = LSB(tq->loop_id);
1005 		pkt->loop_id_h = MSB(tq->loop_id);
1006 	} else {
1007 		pkt->loop_id_h = LSB(tq->loop_id);
1008 	}
1009 
1010 	/* Set control flags */
1011 	pkt->control_flags_l = BIT_6;
1012 	if (sp->pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
1013 		pkt->control_flags_h = BIT_7;
1014 	}
1015 
1016 	/* Set ISP command timeout. */
1017 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
1018 
1019 	/* Set data segment count. */
1020 	seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1021 	/* Load total byte count. */
1022 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count,
1023 	    (uint32_t)sp->pkt->pkt_cmdlen);
1024 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
1025 
1026 	/*
1027 	 * Build command packet.
1028 	 */
1029 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1030 		pkt->entry_type = IP_A64_TYPE;
1031 		cnt = IP_A64_DATA_SEGMENTS;
1032 	} else {
1033 		pkt->entry_type = IP_TYPE;
1034 		cnt = IP_DATA_SEGMENTS;
1035 	}
1036 
1037 	/* Load command entry data segments. */
1038 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1039 	cp = sp->pkt->pkt_cmd_cookie;
1040 	while (cnt && seg_cnt) {
1041 		ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1042 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1043 			ddi_put32(ha->hba_buf.acc_handle, ptr32++,
1044 			    cp->dmac_notused);
1045 		}
1046 		ddi_put32(ha->hba_buf.acc_handle, ptr32++,
1047 		    (uint32_t)cp->dmac_size);
1048 		seg_cnt--;
1049 		cnt--;
1050 		cp++;
1051 	}
1052 
1053 	/*
1054 	 * Build continuation packets.
1055 	 */
1056 	if (seg_cnt) {
1057 		ql_continuation_iocb(ha, cp, seg_cnt,
1058 		    (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
1059 	}
1060 
1061 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1062 }
1063 
1064 /*
1065  * ql_ip_24xx_iocb
1066  *	Setup of IP IOCB for ISP24xx.
1067  *
1068  * Input:
1069  *	ha:	adapter state pointer.
1070  *	sp:	srb structure pointer.
1071  *	arg:	request queue packet.
1072  *
1073  * Context:
1074  *	Interrupt or Kernel context, no mailbox commands allowed.
1075  */
1076 void
1077 ql_ip_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
1078 {
1079 	ddi_dma_cookie_t	*cp;
1080 	uint32_t		*ptr32;
1081 	uint16_t		seg_cnt;
1082 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
1083 	ip_cmd_entry_t		*pkt = arg;
1084 
1085 	pkt->entry_type = IP_CMD_TYPE;
1086 
1087 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1088 
1089 	/* Set N_port handle */
1090 	ddi_put16(ha->hba_buf.acc_handle, &pkt->hdl_status, tq->loop_id);
1091 
1092 	/* Set ISP command timeout. */
1093 	if (sp->isp_timeout < 0x1999) {
1094 		ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout_hdl,
1095 		    sp->isp_timeout);
1096 	}
1097 
1098 	/* Set data segment count. */
1099 	seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1100 	/* Load total byte count. */
1101 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count,
1102 	    (uint32_t)sp->pkt->pkt_cmdlen);
1103 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
1104 
1105 	/* Set control flags */
1106 	ddi_put16(ha->hba_buf.acc_handle, &pkt->control_flags,
1107 	    (uint16_t)(BIT_0));
1108 
1109 	/* Set frame header control flags */
1110 	ddi_put16(ha->hba_buf.acc_handle, &pkt->frame_hdr_cntrl_flgs,
1111 	    (uint16_t)(IPCF_LAST_SEQ | IPCF_FIRST_SEQ));
1112 
1113 	/* Load command data segment. */
1114 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1115 	cp = sp->pkt->pkt_cmd_cookie;
1116 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1117 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1118 	ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1119 	seg_cnt--;
1120 	cp++;
1121 
1122 	/*
1123 	 * Build continuation packets.
1124 	 */
1125 	if (seg_cnt) {
1126 		ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE);
1127 	}
1128 
1129 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1130 }
1131 
1132 /*
1133  * ql_isp_rcvbuf
1134  *	Locates free buffers and places it on the receive buffer queue.
1135  *
1136  * Input:
1137  *	ha = adapter state pointer.
1138  *
1139  * Context:
1140  *	Interrupt or Kernel context, no mailbox commands allowed.
1141  */
1142 void
1143 ql_isp_rcvbuf(ql_adapter_state_t *ha)
1144 {
1145 	rcvbuf_t	*container;
1146 	uint16_t	rcv_q_cnt;
1147 	uint16_t	index = 0;
1148 	uint16_t	index1 = 1;
1149 	int		debounce_count = QL_MAX_DEBOUNCE;
1150 	ql_srb_t	*sp;
1151 	fc_unsol_buf_t	*ubp;
1152 	int		ring_updated = FALSE;
1153 
1154 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
1155 		ql_isp24xx_rcvbuf(ha);
1156 		return;
1157 	}
1158 
1159 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1160 
1161 	/* Acquire adapter state lock. */
1162 	ADAPTER_STATE_LOCK(ha);
1163 
1164 	/* Calculate number of free receive buffer entries. */
1165 	index = RD16_IO_REG(ha, mailbox_out[8]);
1166 	do {
1167 		index1 = RD16_IO_REG(ha, mailbox_out[8]);
1168 		if (index1 == index) {
1169 			break;
1170 		} else {
1171 			index = index1;
1172 		}
1173 	} while (debounce_count --);
1174 
1175 	if (debounce_count < 0) {
1176 		/* This should never happen */
1177 		EL(ha, "max mb8 debounce retries exceeded\n");
1178 	}
1179 
1180 	rcv_q_cnt = (uint16_t)(ha->rcvbuf_ring_index < index ?
1181 	    index - ha->rcvbuf_ring_index : RCVBUF_CONTAINER_CNT -
1182 	    (ha->rcvbuf_ring_index - index));
1183 
1184 	if (rcv_q_cnt == RCVBUF_CONTAINER_CNT) {
1185 		rcv_q_cnt--;
1186 	}
1187 
1188 	/* Load all free buffers in ISP receive buffer ring. */
1189 	index = 0;
1190 	while (rcv_q_cnt > (uint16_t)0 && index < QL_UB_LIMIT) {
1191 		/* Locate a buffer to give. */
1192 		QL_UB_LOCK(ha);
1193 		while (index < QL_UB_LIMIT) {
1194 			ubp = ha->ub_array[index];
1195 			if (ubp != NULL) {
1196 				sp = ubp->ub_fca_private;
1197 				if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1198 				    (ha->flags & IP_INITIALIZED) &&
1199 				    (sp->flags & SRB_UB_IN_FCA) &&
1200 				    (!(sp->flags & (SRB_UB_IN_ISP |
1201 				    SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1202 				    SRB_UB_ACQUIRED)))) {
1203 					sp->flags |= SRB_UB_IN_ISP;
1204 					break;
1205 				}
1206 			}
1207 			index++;
1208 		}
1209 
1210 		if (index < QL_UB_LIMIT) {
1211 			rcv_q_cnt--;
1212 			index++;
1213 			container = ha->rcvbuf_ring_ptr;
1214 
1215 			/*
1216 			 * Build container.
1217 			 */
1218 			ddi_put32(ha->hba_buf.acc_handle,
1219 			    (uint32_t *)(void *)&container->bufp[0],
1220 			    sp->ub_buffer.cookie.dmac_address);
1221 
1222 			ddi_put32(ha->hba_buf.acc_handle,
1223 			    (uint32_t *)(void *)&container->bufp[1],
1224 			    sp->ub_buffer.cookie.dmac_notused);
1225 
1226 			ddi_put16(ha->hba_buf.acc_handle, &container->handle,
1227 			    LSW(sp->handle));
1228 
1229 			ha->ub_outcnt++;
1230 
1231 			/* Adjust ring index. */
1232 			ha->rcvbuf_ring_index++;
1233 			if (ha->rcvbuf_ring_index == RCVBUF_CONTAINER_CNT) {
1234 				ha->rcvbuf_ring_index = 0;
1235 				ha->rcvbuf_ring_ptr = ha->rcvbuf_ring_bp;
1236 			} else {
1237 				ha->rcvbuf_ring_ptr++;
1238 			}
1239 
1240 			ring_updated = TRUE;
1241 		}
1242 		QL_UB_UNLOCK(ha);
1243 	}
1244 
1245 	if (ring_updated) {
1246 		/* Sync queue. */
1247 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1248 		    (off_t)RCVBUF_Q_BUFFER_OFFSET, (size_t)RCVBUF_QUEUE_SIZE,
1249 		    DDI_DMA_SYNC_FORDEV);
1250 
1251 		/* Set chip new ring index. */
1252 		WRT16_IO_REG(ha, mailbox_in[8], ha->rcvbuf_ring_index);
1253 	}
1254 
1255 	/* Release adapter state lock. */
1256 	ADAPTER_STATE_UNLOCK(ha);
1257 
1258 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1259 }
1260 
1261 /*
1262  * ql_isp24xx_rcvbuf
1263  *	Locates free buffers and send it to adapter.
1264  *
1265  * Input:
1266  *	ha = adapter state pointer.
1267  *
1268  * Context:
1269  *	Interrupt or Kernel context, no mailbox commands allowed.
1270  */
1271 static void
1272 ql_isp24xx_rcvbuf(ql_adapter_state_t *ha)
1273 {
1274 	rcvbuf_t		*container;
1275 	uint16_t		index;
1276 	ql_srb_t		*sp;
1277 	fc_unsol_buf_t		*ubp;
1278 	int			rval;
1279 	ip_buf_pool_entry_t	*pkt = NULL;
1280 
1281 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1282 
1283 	for (;;) {
1284 		/* Locate a buffer to give. */
1285 		QL_UB_LOCK(ha);
1286 		for (index = 0; index < QL_UB_LIMIT; index++) {
1287 			ubp = ha->ub_array[index];
1288 			if (ubp != NULL) {
1289 				sp = ubp->ub_fca_private;
1290 				if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1291 				    (ha->flags & IP_INITIALIZED) &&
1292 				    (sp->flags & SRB_UB_IN_FCA) &&
1293 				    (!(sp->flags & (SRB_UB_IN_ISP |
1294 				    SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1295 				    SRB_UB_ACQUIRED)))) {
1296 					ha->ub_outcnt++;
1297 					sp->flags |= SRB_UB_IN_ISP;
1298 					break;
1299 				}
1300 			}
1301 		}
1302 		QL_UB_UNLOCK(ha);
1303 		if (index == QL_UB_LIMIT) {
1304 			break;
1305 		}
1306 
1307 		/* Get IOCB packet for buffers. */
1308 		if (pkt == NULL) {
1309 			rval = ql_req_pkt(ha, (request_t **)&pkt);
1310 			if (rval != QL_SUCCESS) {
1311 				EL(ha, "failed, ql_req_pkt=%x\n", rval);
1312 				QL_UB_LOCK(ha);
1313 				ha->ub_outcnt--;
1314 				sp->flags &= ~SRB_UB_IN_ISP;
1315 				QL_UB_UNLOCK(ha);
1316 				break;
1317 			}
1318 			pkt->entry_type = IP_BUF_POOL_TYPE;
1319 			container = &pkt->buffers[0];
1320 		}
1321 
1322 		/*
1323 		 * Build container.
1324 		 */
1325 		ddi_put32(ha->hba_buf.acc_handle, &container->bufp[0],
1326 		    sp->ub_buffer.cookie.dmac_address);
1327 		ddi_put32(ha->hba_buf.acc_handle, &container->bufp[1],
1328 		    sp->ub_buffer.cookie.dmac_notused);
1329 		ddi_put16(ha->hba_buf.acc_handle, &container->handle,
1330 		    LSW(sp->handle));
1331 
1332 		pkt->buffer_count++;
1333 		container++;
1334 
1335 		if (pkt->buffer_count == IP_POOL_BUFFERS) {
1336 			ql_isp_cmd(ha);
1337 			pkt = NULL;
1338 		}
1339 	}
1340 
1341 	if (pkt != NULL) {
1342 		ql_isp_cmd(ha);
1343 	}
1344 
1345 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1346 }
1347