1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2015 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 /*
29 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
30 *
31 * ***********************************************************************
32 * * **
33 * * NOTICE **
34 * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
35 * * ALL RIGHTS RESERVED **
36 * * **
37 * ***********************************************************************
38 *
39 */
40
41 #include <ql_apps.h>
42 #include <ql_api.h>
43 #include <ql_debug.h>
44 #include <ql_iocb.h>
45 #include <ql_isr.h>
46 #include <ql_nx.h>
47 #include <ql_xioctl.h>
48 #include <ql_fm.h>
49
50
51 /*
52 * Local Function Prototypes.
53 */
54 static int ql_req_pkt(ql_adapter_state_t *, ql_request_q_t *, request_t **);
55 static void ql_isp_cmd(ql_adapter_state_t *, ql_request_q_t *);
56 static void ql_continuation_iocb(ql_adapter_state_t *, ql_request_q_t *,
57 ddi_dma_cookie_t *, uint16_t, boolean_t);
58 static void ql_isp24xx_rcvbuf(ql_adapter_state_t *);
59 static void ql_cmd_24xx_type_6_iocb(ql_adapter_state_t *, ql_request_q_t *,
60 ql_srb_t *, void *);
61
62 /*
63 * ql_start_iocb
64 * The start IOCB is responsible for building request packets
65 * on request ring and modifying ISP input pointer.
66 *
67 * Input:
68 * ha: adapter state pointer.
69 * sp: srb structure pointer.
70 *
71 * Context:
72 * Interrupt or Kernel context, no mailbox commands allowed.
73 */
74 void
ql_start_iocb(ql_adapter_state_t * vha,ql_srb_t * sp)75 ql_start_iocb(ql_adapter_state_t *vha, ql_srb_t *sp)
76 {
77 ql_link_t *link;
78 ql_request_q_t *req_q;
79 request_t *pkt;
80 uint64_t *ptr64;
81 uint32_t cnt;
82 ql_adapter_state_t *ha = vha->pha;
83
84 QL_PRINT_3(ha, "started\n");
85
86 /* Acquire ring lock. */
87 REQUEST_RING_LOCK(ha);
88
89 if (sp != NULL) {
90 /*
91 * If the pending queue is not empty maintain order
92 * by puting this srb at the tail and geting the head.
93 */
94 if ((link = ha->pending_cmds.first) != NULL) {
95 ql_add_link_b(&ha->pending_cmds, &sp->cmd);
96 /* Remove command from pending command queue */
97 sp = link->base_address;
98 ql_remove_link(&ha->pending_cmds, &sp->cmd);
99 }
100 } else {
101 /* Get command from pending command queue if not empty. */
102 if ((link = ha->pending_cmds.first) == NULL) {
103 /* Release ring specific lock */
104 REQUEST_RING_UNLOCK(ha);
105 QL_PRINT_3(ha, "empty done\n");
106 return;
107 }
108 /* Remove command from pending command queue */
109 sp = link->base_address;
110 ql_remove_link(&ha->pending_cmds, &sp->cmd);
111 }
112
113 /* start this request and as many others as possible */
114 for (;;) {
115 if (ha->req_q[1] != NULL && sp->rsp_q_number != 0) {
116 req_q = ha->req_q[1];
117 } else {
118 req_q = ha->req_q[0];
119 }
120
121 if (req_q->req_q_cnt < sp->req_cnt) {
122 /* Calculate number of free request entries. */
123 if (ha->flags & QUEUE_SHADOW_PTRS) {
124 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
125 (off_t)req_q->req_out_shadow_ofst,
126 SHADOW_ENTRY_SIZE, DDI_DMA_SYNC_FORCPU);
127 cnt = ddi_get32(req_q->req_ring.acc_handle,
128 req_q->req_out_shadow_ptr);
129 } else if (ha->flags & MULTI_QUEUE) {
130 cnt = RD16_MBAR_REG(ha, req_q->mbar_req_out);
131 } else {
132 cnt = RD16_IO_REG(ha, req_out);
133 }
134 if (req_q->req_ring_index < cnt) {
135 req_q->req_q_cnt = (uint16_t)
136 (cnt - req_q->req_ring_index);
137 } else {
138 req_q->req_q_cnt =
139 (uint16_t)(req_q->req_entry_cnt -
140 (req_q->req_ring_index - cnt));
141 }
142 if (req_q->req_q_cnt != 0) {
143 req_q->req_q_cnt--;
144 }
145
146 /*
147 * If no room in request ring put this srb at
148 * the head of the pending queue and exit.
149 */
150 if (req_q->req_q_cnt < sp->req_cnt) {
151 QL_PRINT_8(ha, "request ring full,"
152 " req_q_cnt=%d, req_ring_index=%d\n",
153 req_q->req_q_cnt, req_q->req_ring_index);
154 ql_add_link_t(&ha->pending_cmds, &sp->cmd);
155 break;
156 }
157 }
158
159 /* Check for room in outstanding command list. */
160 for (cnt = 1; cnt < ha->osc_max_cnt; cnt++) {
161 ha->osc_index++;
162 if (ha->osc_index == ha->osc_max_cnt) {
163 ha->osc_index = 1;
164 }
165 if (ha->outstanding_cmds[ha->osc_index] == NULL) {
166 break;
167 }
168 }
169 /*
170 * If no room in outstanding array put this srb at
171 * the head of the pending queue and exit.
172 */
173 if (cnt == ha->osc_max_cnt) {
174 QL_PRINT_8(ha, "no room in outstanding array\n");
175 ql_add_link_t(&ha->pending_cmds, &sp->cmd);
176 break;
177 }
178
179 /* nothing to stop us now. */
180 ha->outstanding_cmds[ha->osc_index] = sp;
181 /* create and save a unique response identifier in the srb */
182 sp->handle = ha->adapter_stats->ncmds << OSC_INDEX_SHIFT |
183 ha->osc_index;
184 req_q->req_q_cnt = (uint16_t)(req_q->req_q_cnt - sp->req_cnt);
185
186 /* build the iocb in the request ring */
187 pkt = req_q->req_ring_ptr;
188 sp->request_ring_ptr = pkt;
189 sp->req_q_number = req_q->req_q_number;
190 sp->flags |= SRB_IN_TOKEN_ARRAY;
191
192 /* Zero out packet. */
193 ptr64 = (uint64_t *)pkt;
194 *ptr64++ = 0; *ptr64++ = 0;
195 *ptr64++ = 0; *ptr64++ = 0;
196 *ptr64++ = 0; *ptr64++ = 0;
197 *ptr64++ = 0; *ptr64 = 0;
198
199 /* Setup IOCB common data. */
200 pkt->entry_count = (uint8_t)sp->req_cnt;
201 if (ha->req_q[1] != NULL && sp->rsp_q_number != 0) {
202 pkt->entry_status = sp->rsp_q_number;
203 }
204 pkt->sys_define = (uint8_t)req_q->req_ring_index;
205
206 /* mark the iocb with the response identifier */
207 ddi_put32(req_q->req_ring.acc_handle, &pkt->handle,
208 (uint32_t)sp->handle);
209
210 /* Setup IOCB unique data. */
211 (sp->iocb)(vha, req_q, sp, pkt);
212
213 sp->flags |= SRB_ISP_STARTED;
214
215 QL_PRINT_5(ha, "req packet, sp=%p\n", (void *)sp);
216 QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
217
218 /* Sync DMA buffer. */
219 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
220 (off_t)(req_q->req_ring_index * REQUEST_ENTRY_SIZE),
221 (size_t)REQUEST_ENTRY_SIZE, DDI_DMA_SYNC_FORDEV);
222
223 /* Adjust ring index. */
224 req_q->req_ring_index++;
225 if (req_q->req_ring_index == REQUEST_ENTRY_CNT) {
226 req_q->req_ring_index = 0;
227 req_q->req_ring_ptr = req_q->req_ring.bp;
228 } else {
229 req_q->req_ring_ptr++;
230 }
231
232 /* Reset watchdog timer */
233 sp->wdg_q_time = sp->init_wdg_q_time;
234
235 /*
236 * Send it by setting the new ring index in the ISP Request
237 * Ring In Pointer register. This is the mechanism
238 * used to notify the isp that a new iocb has been
239 * placed on the request ring.
240 */
241 if (ha->flags & MULTI_QUEUE) {
242 WR16_MBAR_REG(ha, req_q->mbar_req_in,
243 req_q->req_ring_index);
244 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
245 ql_8021_wr_req_in(ha, req_q->req_ring_index);
246 } else {
247 WRT16_IO_REG(ha, req_in, req_q->req_ring_index);
248 }
249
250 /* Update outstanding command count statistic. */
251 ha->adapter_stats->ncmds++;
252
253 /* if there is a pending command, try to start it. */
254 if ((link = ha->pending_cmds.first) == NULL) {
255 break;
256 }
257
258 /* Remove command from pending command queue */
259 sp = link->base_address;
260 ql_remove_link(&ha->pending_cmds, &sp->cmd);
261 }
262
263 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
264 != DDI_FM_OK) {
265 qlc_fm_report_err_impact(ha,
266 QL_FM_EREPORT_ACC_HANDLE_CHECK);
267 }
268
269 /* Release ring specific lock */
270 REQUEST_RING_UNLOCK(ha);
271
272 QL_PRINT_3(ha, "done\n");
273 }
274
275 /*
276 * ql_req_pkt
277 * Function is responsible for locking ring and
278 * getting a zeroed out request packet.
279 *
280 * Input:
281 * ha: adapter state pointer.
282 * req_q: request queue structure pointer.
283 * pkt: address for packet pointer.
284 *
285 * Returns:
286 * ql local function return status code.
287 *
288 * Context:
289 * Interrupt or Kernel context, no mailbox commands allowed.
290 */
291 static int
ql_req_pkt(ql_adapter_state_t * vha,ql_request_q_t * req_q,request_t ** pktp)292 ql_req_pkt(ql_adapter_state_t *vha, ql_request_q_t *req_q, request_t **pktp)
293 {
294 uint16_t cnt;
295 uint64_t *ptr64;
296 uint32_t timer;
297 int rval = QL_FUNCTION_TIMEOUT;
298 ql_adapter_state_t *ha = vha->pha;
299
300 QL_PRINT_3(ha, "started\n");
301
302 /* Wait for 30 seconds for slot. */
303 for (timer = 30000; timer != 0; timer--) {
304 /* Acquire ring lock. */
305 REQUEST_RING_LOCK(ha);
306
307 if (req_q->req_q_cnt == 0) {
308 /* Calculate number of free request entries. */
309 if (ha->flags & QUEUE_SHADOW_PTRS) {
310 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
311 (off_t)req_q->req_out_shadow_ofst,
312 SHADOW_ENTRY_SIZE, DDI_DMA_SYNC_FORCPU);
313 cnt = ddi_get32(req_q->req_ring.acc_handle,
314 req_q->req_out_shadow_ptr);
315 } else if (ha->flags & MULTI_QUEUE) {
316 cnt = RD16_MBAR_REG(ha, req_q->mbar_req_out);
317 } else {
318 cnt = RD16_IO_REG(ha, req_out);
319 }
320 if (req_q->req_ring_index < cnt) {
321 req_q->req_q_cnt = (uint16_t)
322 (cnt - req_q->req_ring_index);
323 } else {
324 req_q->req_q_cnt = (uint16_t)
325 (REQUEST_ENTRY_CNT -
326 (req_q->req_ring_index - cnt));
327 }
328 if (req_q->req_q_cnt != 0) {
329 req_q->req_q_cnt--;
330 }
331 }
332
333 /* Found empty request ring slot? */
334 if (req_q->req_q_cnt != 0) {
335 req_q->req_q_cnt--;
336 *pktp = req_q->req_ring_ptr;
337
338 /* Zero out packet. */
339 ptr64 = (uint64_t *)req_q->req_ring_ptr;
340 *ptr64++ = 0; *ptr64++ = 0;
341 *ptr64++ = 0; *ptr64++ = 0;
342 *ptr64++ = 0; *ptr64++ = 0;
343 *ptr64++ = 0; *ptr64 = 0;
344
345 /* Setup IOCB common data. */
346 req_q->req_ring_ptr->entry_count = 1;
347 req_q->req_ring_ptr->sys_define =
348 (uint8_t)req_q->req_ring_index;
349 ddi_put32(req_q->req_ring.acc_handle,
350 &req_q->req_ring_ptr->handle,
351 (uint32_t)QL_FCA_BRAND);
352
353 rval = QL_SUCCESS;
354
355 break;
356 }
357
358 /* Release request queue lock. */
359 REQUEST_RING_UNLOCK(ha);
360
361 drv_usecwait(MILLISEC);
362
363 /* Check for pending interrupts. */
364 /*
365 * XXX protect interrupt routine from calling itself.
366 * Need to revisit this routine. So far we never
367 * hit this case as req slot was available
368 */
369 if ((!(curthread->t_flag & T_INTR_THREAD)) &&
370 INTERRUPT_PENDING(ha)) {
371 (void) ql_isr((caddr_t)ha);
372 INTR_LOCK(ha);
373 ha->intr_claimed = TRUE;
374 INTR_UNLOCK(ha);
375 }
376 }
377
378 if (rval != QL_SUCCESS) {
379 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
380 EL(ha, "failed, rval = %xh, isp_abort_needed\n", rval);
381 } else {
382 /*EMPTY*/
383 QL_PRINT_3(ha, "done\n");
384 }
385 return (rval);
386 }
387
388 /*
389 * ql_isp_cmd
390 * Function is responsible for modifying ISP input pointer.
391 * This action notifies the isp that a new request has been
392 * added to the request ring.
393 *
394 * Releases ring lock.
395 *
396 * Input:
397 * vha: adapter state pointer.
398 * req_q: request queue structure pointer.
399 *
400 * Context:
401 * Interrupt or Kernel context, no mailbox commands allowed.
402 */
403 static void
ql_isp_cmd(ql_adapter_state_t * vha,ql_request_q_t * req_q)404 ql_isp_cmd(ql_adapter_state_t *vha, ql_request_q_t *req_q)
405 {
406 ql_adapter_state_t *ha = vha->pha;
407
408 QL_PRINT_3(ha, "started\n");
409
410 QL_PRINT_5(ha, "req packet:\n");
411 QL_DUMP_5((uint8_t *)req_q->req_ring_ptr, 8, REQUEST_ENTRY_SIZE);
412
413 /* Sync DMA buffer. */
414 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
415 (off_t)(req_q->req_ring_index * REQUEST_ENTRY_SIZE),
416 (size_t)REQUEST_ENTRY_SIZE, DDI_DMA_SYNC_FORDEV);
417
418 /* Adjust ring index. */
419 req_q->req_ring_index++;
420 if (req_q->req_ring_index == REQUEST_ENTRY_CNT) {
421 req_q->req_ring_index = 0;
422 req_q->req_ring_ptr = req_q->req_ring.bp;
423 } else {
424 req_q->req_ring_ptr++;
425 }
426
427 /* Set chip new ring index. */
428 if (ha->flags & MULTI_QUEUE) {
429 WR16_MBAR_REG(ha, req_q->mbar_req_in,
430 req_q->req_ring_index);
431 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
432 ql_8021_wr_req_in(ha, req_q->req_ring_index);
433 } else {
434 WRT16_IO_REG(ha, req_in, req_q->req_ring_index);
435 }
436
437 /* Release ring lock. */
438 REQUEST_RING_UNLOCK(ha);
439
440 QL_PRINT_3(ha, "done\n");
441 }
442
443 /*
444 * ql_command_iocb
445 * Setup of command IOCB.
446 *
447 * Input:
448 * ha: adapter state pointer.
449 * req_q: request queue structure pointer.
450 * sp: srb structure pointer.
451 * arg: request queue packet.
452 *
453 * Context:
454 * Interrupt or Kernel context, no mailbox commands allowed.
455 */
456 void
ql_command_iocb(ql_adapter_state_t * ha,ql_request_q_t * req_q,ql_srb_t * sp,void * arg)457 ql_command_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
458 void *arg)
459 {
460 ddi_dma_cookie_t *cp;
461 uint32_t *ptr32, cnt;
462 uint16_t seg_cnt;
463 fcp_cmd_t *fcp = sp->fcp;
464 ql_tgt_t *tq = sp->lun_queue->target_queue;
465 cmd_entry_t *pkt = arg;
466 cmd_3_entry_t *pkt3 = arg;
467
468 QL_PRINT_3(ha, "started\n");
469
470 /* Set LUN number */
471 pkt->lun_l = LSB(sp->lun_queue->lun_no);
472 pkt->lun_h = MSB(sp->lun_queue->lun_no);
473
474 /* Set target ID */
475 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
476 pkt->target_l = LSB(tq->loop_id);
477 pkt->target_h = MSB(tq->loop_id);
478 } else {
479 pkt->target_h = LSB(tq->loop_id);
480 }
481
482 /* Set tag queue control flags */
483 if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
484 pkt->control_flags_l = (uint8_t)
485 (pkt->control_flags_l | CF_HTAG);
486 } else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
487 pkt->control_flags_l = (uint8_t)
488 (pkt->control_flags_l | CF_OTAG);
489 /* else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) */
490 } else {
491 pkt->control_flags_l = (uint8_t)
492 (pkt->control_flags_l | CF_STAG);
493 }
494
495 /* Set ISP command timeout. */
496 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout, sp->isp_timeout);
497
498 /* Load SCSI CDB */
499 ddi_rep_put8(req_q->req_ring.acc_handle, fcp->fcp_cdb,
500 pkt->scsi_cdb, MAX_CMDSZ, DDI_DEV_AUTOINCR);
501
502 if (fcp->fcp_data_len == 0) {
503 QL_PRINT_3(ha, "done\n");
504 pkt->entry_type = IOCB_CMD_TYPE_2;
505 ha->xioctl->IOControlRequests++;
506 return;
507 }
508
509 /*
510 * Set transfer direction. Load Data segments.
511 */
512 if (fcp->fcp_cntl.cntl_write_data) {
513 pkt->control_flags_l = (uint8_t)
514 (pkt->control_flags_l | CF_DATA_OUT);
515 ha->xioctl->IOOutputRequests++;
516 ha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
517 } else if (fcp->fcp_cntl.cntl_read_data) {
518 pkt->control_flags_l = (uint8_t)
519 (pkt->control_flags_l | CF_DATA_IN);
520 ha->xioctl->IOInputRequests++;
521 ha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
522 }
523
524 /* Set data segment count. */
525 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
526 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
527
528 /* Load total byte count. */
529 ddi_put32(req_q->req_ring.acc_handle, &pkt->byte_count,
530 fcp->fcp_data_len);
531
532 /* Load command data segment. */
533 cp = sp->pkt->pkt_data_cookie;
534
535 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
536 pkt3->entry_type = IOCB_CMD_TYPE_3;
537 cnt = CMD_TYPE_3_DATA_SEGMENTS;
538
539 ptr32 = (uint32_t *)&pkt3->dseg;
540 while (cnt && seg_cnt) {
541 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
542 cp->dmac_address);
543 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
544 cp->dmac_notused);
545 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
546 (uint32_t)cp->dmac_size);
547 seg_cnt--;
548 cnt--;
549 cp++;
550 }
551 } else {
552 pkt->entry_type = IOCB_CMD_TYPE_2;
553 cnt = CMD_TYPE_2_DATA_SEGMENTS;
554
555 ptr32 = (uint32_t *)&pkt->dseg;
556 while (cnt && seg_cnt) {
557 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
558 cp->dmac_address);
559 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
560 (uint32_t)cp->dmac_size);
561 seg_cnt--;
562 cnt--;
563 cp++;
564 }
565 }
566
567 /*
568 * Build continuation packets.
569 */
570 if (seg_cnt) {
571 ql_continuation_iocb(ha, req_q, cp, seg_cnt,
572 (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
573 }
574
575 QL_PRINT_3(ha, "done\n");
576 }
577
578 /*
579 * ql_continuation_iocb
580 * Setup of continuation IOCB.
581 *
582 * Input:
583 * ha: adapter state pointer.
584 * req_q: request queue structure pointer.
585 * cp: cookie list pointer.
586 * seg_cnt: number of segments.
587 * addr64: 64 bit addresses.
588 *
589 * Context:
590 * Interrupt or Kernel context, no mailbox commands allowed.
591 */
592 /* ARGSUSED */
593 static void
ql_continuation_iocb(ql_adapter_state_t * ha,ql_request_q_t * req_q,ddi_dma_cookie_t * cp,uint16_t seg_cnt,boolean_t addr64)594 ql_continuation_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q,
595 ddi_dma_cookie_t *cp, uint16_t seg_cnt, boolean_t addr64)
596 {
597 cont_entry_t *pkt;
598 cont_type_1_entry_t *pkt1;
599 uint64_t *ptr64;
600 uint32_t *ptr32, cnt;
601
602 QL_PRINT_3(ha, "started\n");
603
604 /*
605 * Build continuation packets.
606 */
607 while (seg_cnt) {
608 /* Sync DMA buffer. */
609 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
610 (off_t)(req_q->req_ring_index * REQUEST_ENTRY_SIZE),
611 REQUEST_ENTRY_SIZE, DDI_DMA_SYNC_FORDEV);
612
613 /* Adjust ring pointer, and deal with wrap. */
614 req_q->req_ring_index++;
615 if (req_q->req_ring_index == REQUEST_ENTRY_CNT) {
616 req_q->req_ring_index = 0;
617 req_q->req_ring_ptr = req_q->req_ring.bp;
618 } else {
619 req_q->req_ring_ptr++;
620 }
621 pkt = (cont_entry_t *)req_q->req_ring_ptr;
622 pkt1 = (cont_type_1_entry_t *)req_q->req_ring_ptr;
623
624 /* Zero out packet. */
625 ptr64 = (uint64_t *)pkt;
626 *ptr64++ = 0; *ptr64++ = 0;
627 *ptr64++ = 0; *ptr64++ = 0;
628 *ptr64++ = 0; *ptr64++ = 0;
629 *ptr64++ = 0; *ptr64 = 0;
630
631 /*
632 * Build continuation packet.
633 */
634 pkt->entry_count = 1;
635 pkt->sys_define = (uint8_t)req_q->req_ring_index;
636 if (addr64) {
637 pkt1->entry_type = CONTINUATION_TYPE_1;
638 cnt = CONT_TYPE_1_DATA_SEGMENTS;
639 ptr32 = (uint32_t *)&pkt1->dseg;
640 while (cnt && seg_cnt) {
641 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
642 cp->dmac_address);
643 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
644 cp->dmac_notused);
645 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
646 (uint32_t)cp->dmac_size);
647 seg_cnt--;
648 cnt--;
649 cp++;
650 }
651 } else {
652 pkt->entry_type = CONTINUATION_TYPE_0;
653 cnt = CONT_TYPE_0_DATA_SEGMENTS;
654 ptr32 = (uint32_t *)&pkt->dseg;
655 while (cnt && seg_cnt) {
656 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
657 cp->dmac_address);
658 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
659 (uint32_t)cp->dmac_size);
660 seg_cnt--;
661 cnt--;
662 cp++;
663 }
664 }
665
666 QL_PRINT_5(ha, "packet:\n");
667 QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
668 }
669
670 QL_PRINT_3(ha, "done\n");
671 }
672
673 /*
674 * ql_command_24xx_iocb
675 * Setup of ISP24xx command IOCB.
676 *
677 * Input:
678 * ha: adapter state pointer.
679 * req_q: request queue structure pointer.
680 * sp: srb structure pointer.
681 * arg: request queue packet.
682 *
683 * Context:
684 * Interrupt or Kernel context, no mailbox commands allowed.
685 */
686 void
ql_command_24xx_iocb(ql_adapter_state_t * ha,ql_request_q_t * req_q,ql_srb_t * sp,void * arg)687 ql_command_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q,
688 ql_srb_t *sp, void *arg)
689 {
690 ddi_dma_cookie_t *cp;
691 uint32_t *ptr32, cnt;
692 uint16_t seg_cnt;
693 fcp_cmd_t *fcp = sp->fcp;
694 ql_tgt_t *tq = sp->lun_queue->target_queue;
695 cmd7_24xx_entry_t *pkt = arg;
696 ql_adapter_state_t *pha = ha->pha;
697 fcp_ent_addr_t *fcp_ent_addr;
698
699 QL_PRINT_3(ha, "started\n");
700
701 if (fcp->fcp_data_len != 0 && sp->sg_dma.dma_handle != NULL &&
702 sp->pkt->pkt_data_cookie_cnt > 1) {
703 ql_cmd_24xx_type_6_iocb(ha, req_q, sp, arg);
704 QL_PRINT_3(ha, "cmd6 exit\n");
705 return;
706 }
707
708 pkt->entry_type = IOCB_CMD_TYPE_7;
709
710 /* Set LUN number */
711 fcp_ent_addr = (fcp_ent_addr_t *)&sp->lun_queue->lun_addr;
712 pkt->fcp_lun[2] = lobyte(fcp_ent_addr->ent_addr_0);
713 pkt->fcp_lun[3] = hibyte(fcp_ent_addr->ent_addr_0);
714 pkt->fcp_lun[0] = lobyte(fcp_ent_addr->ent_addr_1);
715 pkt->fcp_lun[1] = hibyte(fcp_ent_addr->ent_addr_1);
716 pkt->fcp_lun[6] = lobyte(fcp_ent_addr->ent_addr_2);
717 pkt->fcp_lun[7] = hibyte(fcp_ent_addr->ent_addr_2);
718 pkt->fcp_lun[4] = lobyte(fcp_ent_addr->ent_addr_3);
719 pkt->fcp_lun[5] = hibyte(fcp_ent_addr->ent_addr_3);
720
721 /* Set N_port handle */
722 ddi_put16(req_q->req_ring.acc_handle, &pkt->n_port_hdl, tq->loop_id);
723
724 /* Set target ID */
725 pkt->target_id[0] = tq->d_id.b.al_pa;
726 pkt->target_id[1] = tq->d_id.b.area;
727 pkt->target_id[2] = tq->d_id.b.domain;
728
729 pkt->vp_index = ha->vp_index;
730
731 /* Set ISP command timeout. */
732 if (sp->isp_timeout < 0x1999) {
733 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout,
734 sp->isp_timeout);
735 }
736
737 /* Load SCSI CDB */
738 ddi_rep_put8(req_q->req_ring.acc_handle, fcp->fcp_cdb, pkt->scsi_cdb,
739 MAX_CMDSZ, DDI_DEV_AUTOINCR);
740 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
741 ql_chg_endian((uint8_t *)&pkt->scsi_cdb + cnt, 4);
742 }
743
744 /*
745 * Set tag queue control flags
746 * Note:
747 * Cannot copy fcp->fcp_cntl.cntl_qtype directly,
748 * problem with x86 in 32bit kernel mode
749 */
750 switch (fcp->fcp_cntl.cntl_qtype) {
751 case FCP_QTYPE_SIMPLE:
752 pkt->task = TA_STAG;
753 break;
754 case FCP_QTYPE_HEAD_OF_Q:
755 pkt->task = TA_HTAG;
756 break;
757 case FCP_QTYPE_ORDERED:
758 pkt->task = TA_OTAG;
759 break;
760 case FCP_QTYPE_ACA_Q_TAG:
761 pkt->task = TA_ACA;
762 break;
763 case FCP_QTYPE_UNTAGGED:
764 pkt->task = TA_UNTAGGED;
765 break;
766 default:
767 break;
768 }
769
770 if (fcp->fcp_data_len == 0) {
771 QL_PRINT_3(ha, "done\n");
772 pha->xioctl->IOControlRequests++;
773 return;
774 }
775
776 /* Set transfer direction. */
777 if (fcp->fcp_cntl.cntl_write_data) {
778 pkt->control_flags = CF_WR;
779 pha->xioctl->IOOutputRequests++;
780 pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
781 } else if (fcp->fcp_cntl.cntl_read_data) {
782 pkt->control_flags = CF_RD;
783 pha->xioctl->IOInputRequests++;
784 pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
785 }
786
787 /* Set data segment count. */
788 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
789 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
790
791 /* Load total byte count. */
792 ddi_put32(req_q->req_ring.acc_handle, &pkt->total_byte_count,
793 fcp->fcp_data_len);
794
795 /* Load command data segment. */
796 ptr32 = (uint32_t *)&pkt->dseg;
797 cp = sp->pkt->pkt_data_cookie;
798 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
799 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
800 ddi_put32(req_q->req_ring.acc_handle, ptr32, (uint32_t)cp->dmac_size);
801 seg_cnt--;
802 cp++;
803
804 /*
805 * Build continuation packets.
806 */
807 if (seg_cnt) {
808 ql_continuation_iocb(pha, req_q, cp, seg_cnt, B_TRUE);
809 }
810
811 QL_PRINT_3(ha, "done\n");
812 }
813
814 /*
815 * ql_cmd_24xx_type_6_iocb
816 * Setup of ISP24xx command type 6 IOCB.
817 *
818 * Input:
819 * ha: adapter state pointer.
820 * req_q: request queue structure pointer.
821 * sp: srb structure pointer.
822 * arg: request queue packet.
823 *
824 * Context:
825 * Interrupt or Kernel context, no mailbox commands allowed.
826 */
827 static void
ql_cmd_24xx_type_6_iocb(ql_adapter_state_t * ha,ql_request_q_t * req_q,ql_srb_t * sp,void * arg)828 ql_cmd_24xx_type_6_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q,
829 ql_srb_t *sp, void *arg)
830 {
831 uint64_t addr;
832 ddi_dma_cookie_t *cp;
833 uint32_t *ptr32;
834 uint16_t seg_cnt;
835 fcp_cmd_t *fcp = sp->fcp;
836 ql_tgt_t *tq = sp->lun_queue->target_queue;
837 cmd6_24xx_entry_t *pkt = arg;
838 ql_adapter_state_t *pha = ha->pha;
839 dma_mem_t *cmem = &sp->sg_dma;
840 cmd6_2400_dma_t *cdma = cmem->bp;
841 fcp_ent_addr_t *fcp_ent_addr;
842
843 QL_PRINT_3(ha, "started\n");
844
845 pkt->entry_type = IOCB_CMD_TYPE_6;
846
847 bzero(cdma, sizeof (cmd6_2400_dma_t));
848
849 /* Set LUN number */
850 fcp_ent_addr = (fcp_ent_addr_t *)&sp->lun_queue->lun_addr;
851 pkt->fcp_lun[2] = cdma->cmd.fcp_lun[2] =
852 lobyte(fcp_ent_addr->ent_addr_0);
853 pkt->fcp_lun[3] = cdma->cmd.fcp_lun[3] =
854 hibyte(fcp_ent_addr->ent_addr_0);
855 pkt->fcp_lun[0] = cdma->cmd.fcp_lun[0] =
856 lobyte(fcp_ent_addr->ent_addr_1);
857 pkt->fcp_lun[1] = cdma->cmd.fcp_lun[1] =
858 hibyte(fcp_ent_addr->ent_addr_1);
859 pkt->fcp_lun[6] = cdma->cmd.fcp_lun[6] =
860 lobyte(fcp_ent_addr->ent_addr_2);
861 pkt->fcp_lun[7] = cdma->cmd.fcp_lun[7] =
862 hibyte(fcp_ent_addr->ent_addr_2);
863 pkt->fcp_lun[4] = cdma->cmd.fcp_lun[4] =
864 lobyte(fcp_ent_addr->ent_addr_3);
865 pkt->fcp_lun[5] = cdma->cmd.fcp_lun[5] =
866 hibyte(fcp_ent_addr->ent_addr_3);
867
868 /* Set N_port handle */
869 ddi_put16(req_q->req_ring.acc_handle, &pkt->n_port_hdl, tq->loop_id);
870
871 /* Set target ID */
872 pkt->target_id[0] = tq->d_id.b.al_pa;
873 pkt->target_id[1] = tq->d_id.b.area;
874 pkt->target_id[2] = tq->d_id.b.domain;
875
876 pkt->vp_index = ha->vp_index;
877
878 /* Set ISP command timeout. */
879 if (sp->isp_timeout < 0x1999) {
880 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout,
881 sp->isp_timeout);
882 }
883
884 /* Load SCSI CDB */
885 ddi_rep_put8(cmem->acc_handle, fcp->fcp_cdb, cdma->cmd.scsi_cdb,
886 MAX_CMDSZ, DDI_DEV_AUTOINCR);
887
888 /*
889 * Set tag queue control flags
890 * Note:
891 * Cannot copy fcp->fcp_cntl.cntl_qtype directly,
892 * problem with x86 in 32bit kernel mode
893 */
894 switch (fcp->fcp_cntl.cntl_qtype) {
895 case FCP_QTYPE_SIMPLE:
896 cdma->cmd.task = TA_STAG;
897 break;
898 case FCP_QTYPE_HEAD_OF_Q:
899 cdma->cmd.task = TA_HTAG;
900 break;
901 case FCP_QTYPE_ORDERED:
902 cdma->cmd.task = TA_OTAG;
903 break;
904 case FCP_QTYPE_ACA_Q_TAG:
905 cdma->cmd.task = TA_ACA;
906 break;
907 case FCP_QTYPE_UNTAGGED:
908 cdma->cmd.task = TA_UNTAGGED;
909 break;
910 default:
911 break;
912 }
913
914 /*
915 * FCP_CMND Payload Data Segment
916 */
917 cp = cmem->cookies;
918 ddi_put16(req_q->req_ring.acc_handle, &pkt->cmnd_length,
919 sizeof (fcp_cmnd_t));
920 ddi_put32(req_q->req_ring.acc_handle, &pkt->cmnd_address[0],
921 cp->dmac_address);
922 ddi_put32(req_q->req_ring.acc_handle, &pkt->cmnd_address[1],
923 cp->dmac_notused);
924
925 /* Set transfer direction. */
926 if (fcp->fcp_cntl.cntl_write_data) {
927 pkt->control_flags = (uint8_t)(CF_DSD_PTR | CF_WR);
928 cdma->cmd.control_flags = CF_WR;
929 pha->xioctl->IOOutputRequests++;
930 pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
931 } else if (fcp->fcp_cntl.cntl_read_data) {
932 pkt->control_flags = (uint8_t)(CF_DSD_PTR | CF_RD);
933 cdma->cmd.control_flags = CF_RD;
934 pha->xioctl->IOInputRequests++;
935 pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
936 }
937
938 /*
939 * FCP_DATA Data Segment Descriptor.
940 */
941 addr = cp->dmac_laddress + sizeof (fcp_cmnd_t);
942 ddi_put32(req_q->req_ring.acc_handle, &pkt->dseg.address[0], LSD(addr));
943 ddi_put32(req_q->req_ring.acc_handle, &pkt->dseg.address[1], MSD(addr));
944
945 /* Set data segment count. */
946 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
947 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
948 ddi_put32(req_q->req_ring.acc_handle, &pkt->dseg.length,
949 seg_cnt * 12 + 12);
950
951 /* Load total byte count. */
952 ddi_put32(req_q->req_ring.acc_handle, &pkt->total_byte_count,
953 fcp->fcp_data_len);
954 ddi_put32(cmem->acc_handle, &cdma->cmd.dl, (uint32_t)fcp->fcp_data_len);
955 ql_chg_endian((uint8_t *)&cdma->cmd.dl, 4);
956
957 /* Load command data segments. */
958 ptr32 = (uint32_t *)cdma->cookie_list;
959 cp = sp->pkt->pkt_data_cookie;
960 while (seg_cnt--) {
961 ddi_put32(cmem->acc_handle, ptr32++, cp->dmac_address);
962 ddi_put32(cmem->acc_handle, ptr32++, cp->dmac_notused);
963 ddi_put32(cmem->acc_handle, ptr32++, (uint32_t)cp->dmac_size);
964 cp++;
965 }
966
967 /* Sync DMA buffer. */
968 (void) ddi_dma_sync(cmem->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
969
970 QL_PRINT_3(ha, "done\n");
971 }
972
973 /*
974 * ql_marker
975 * Function issues marker IOCB.
976 *
977 * Input:
978 * ha: adapter state pointer.
979 * loop_id: device loop ID
980 * lq: LUN queue pointer.
981 * type: marker modifier
982 *
983 * Returns:
984 * ql local function return status code.
985 *
986 * Context:
987 * Interrupt or Kernel context, no mailbox commands allowed.
988 */
989 int
ql_marker(ql_adapter_state_t * ha,uint16_t loop_id,ql_lun_t * lq,uint8_t type)990 ql_marker(ql_adapter_state_t *ha, uint16_t loop_id, ql_lun_t *lq,
991 uint8_t type)
992 {
993 mrk_entry_t *pkt;
994 int rval;
995 ql_request_q_t *req_q = ha->req_q[0];
996 fcp_ent_addr_t *fcp_ent_addr;
997
998 QL_PRINT_3(ha, "started\n");
999
1000 rval = ql_req_pkt(ha, req_q, (request_t **)&pkt);
1001 if (rval == QL_SUCCESS) {
1002 pkt->entry_type = MARKER_TYPE;
1003
1004 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
1005 marker_24xx_entry_t *pkt24 =
1006 (marker_24xx_entry_t *)pkt;
1007
1008 pkt24->modifier = type;
1009
1010 /* Set LUN number */
1011 if (lq) {
1012 fcp_ent_addr = (fcp_ent_addr_t *)&lq->lun_addr;
1013 pkt24->fcp_lun[2] =
1014 lobyte(fcp_ent_addr->ent_addr_0);
1015 pkt24->fcp_lun[3] =
1016 hibyte(fcp_ent_addr->ent_addr_0);
1017 pkt24->fcp_lun[0] =
1018 lobyte(fcp_ent_addr->ent_addr_1);
1019 pkt24->fcp_lun[1] =
1020 hibyte(fcp_ent_addr->ent_addr_1);
1021 pkt24->fcp_lun[6] =
1022 lobyte(fcp_ent_addr->ent_addr_2);
1023 pkt24->fcp_lun[7] =
1024 hibyte(fcp_ent_addr->ent_addr_2);
1025 pkt24->fcp_lun[4] =
1026 lobyte(fcp_ent_addr->ent_addr_3);
1027 pkt24->fcp_lun[5] =
1028 hibyte(fcp_ent_addr->ent_addr_3);
1029 }
1030
1031 pkt24->vp_index = ha->vp_index;
1032
1033 /* Set N_port handle */
1034 ddi_put16(req_q->req_ring.acc_handle,
1035 &pkt24->n_port_hdl, loop_id);
1036
1037 } else {
1038 pkt->modifier = type;
1039
1040 if (lq) {
1041 pkt->lun_l = LSB(lq->lun_no);
1042 pkt->lun_h = MSB(lq->lun_no);
1043 }
1044
1045 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1046 pkt->target_l = LSB(loop_id);
1047 pkt->target_h = MSB(loop_id);
1048 } else {
1049 pkt->target_h = LSB(loop_id);
1050 }
1051 }
1052
1053 /* Issue command to ISP */
1054 ql_isp_cmd(ha, req_q);
1055 }
1056
1057 if (rval != QL_SUCCESS) {
1058 EL(ha, "failed, rval = %xh\n", rval);
1059 } else {
1060 /*EMPTY*/
1061 QL_PRINT_3(ha, "done\n");
1062 }
1063 return (rval);
1064 }
1065
1066 /*
1067 * ql_ms_iocb
1068 * Setup of name/management server IOCB.
1069 *
1070 * Input:
1071 * ha: adapter state pointer.
1072 * req_q: request queue structure pointer.
1073 * sp: srb structure pointer.
1074 * arg: request queue packet.
1075 *
1076 * Context:
1077 * Interrupt or Kernel context, no mailbox commands allowed.
1078 */
1079 void
ql_ms_iocb(ql_adapter_state_t * ha,ql_request_q_t * req_q,ql_srb_t * sp,void * arg)1080 ql_ms_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
1081 void *arg)
1082 {
1083 ddi_dma_cookie_t *cp;
1084 uint32_t *ptr32;
1085 uint16_t seg_cnt;
1086 ql_tgt_t *tq = sp->lun_queue->target_queue;
1087 ms_entry_t *pkt = arg;
1088
1089 QL_PRINT_3(ha, "started\n");
1090 QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
1091 /*
1092 * Build command packet.
1093 */
1094 pkt->entry_type = MS_TYPE;
1095
1096 /* Set loop ID */
1097 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1098 pkt->loop_id_l = LSB(tq->loop_id);
1099 pkt->loop_id_h = MSB(tq->loop_id);
1100 } else {
1101 pkt->loop_id_h = LSB(tq->loop_id);
1102 }
1103
1104 /* Set ISP command timeout. */
1105 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout, sp->isp_timeout);
1106
1107 /* Set cmd data segment count. */
1108 pkt->cmd_dseg_count_l = 1;
1109
1110 /* Set total data segment count */
1111 seg_cnt = (uint16_t)(sp->pkt->pkt_resp_cookie_cnt + 1);
1112 ddi_put16(req_q->req_ring.acc_handle, &pkt->total_dseg_count, seg_cnt);
1113
1114 /* Load ct cmd byte count. */
1115 ddi_put32(req_q->req_ring.acc_handle, &pkt->cmd_byte_count,
1116 (uint32_t)sp->pkt->pkt_cmdlen);
1117
1118 /* Load ct rsp byte count. */
1119 ddi_put32(req_q->req_ring.acc_handle, &pkt->resp_byte_count,
1120 (uint32_t)sp->pkt->pkt_rsplen);
1121
1122 /* Load MS command data segments. */
1123 ptr32 = (uint32_t *)&pkt->dseg;
1124 cp = sp->pkt->pkt_cmd_cookie;
1125 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1126 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1127 ddi_put32(req_q->req_ring.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
1128 seg_cnt--;
1129
1130 /* Load MS response entry data segments. */
1131 cp = sp->pkt->pkt_resp_cookie;
1132 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1133 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1134 ddi_put32(req_q->req_ring.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1135 seg_cnt--;
1136 cp++;
1137
1138 /*
1139 * Build continuation packets.
1140 */
1141 if (seg_cnt) {
1142 ql_continuation_iocb(ha, req_q, cp, seg_cnt, B_TRUE);
1143 }
1144
1145 QL_PRINT_3(ha, "done\n");
1146 }
1147
1148 /*
1149 * ql_ms_24xx_iocb
1150 * Setup of name/management server IOCB.
1151 *
1152 * Input:
1153 * ha: adapter state pointer.
1154 * req_q: request queue structure pointer.
1155 * sp: srb structure pointer.
1156 * arg: request queue packet.
1157 *
1158 * Context:
1159 * Interrupt or Kernel context, no mailbox commands allowed.
1160 */
1161 void
ql_ms_24xx_iocb(ql_adapter_state_t * ha,ql_request_q_t * req_q,ql_srb_t * sp,void * arg)1162 ql_ms_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
1163 void *arg)
1164 {
1165 ddi_dma_cookie_t *cp;
1166 uint32_t *ptr32;
1167 uint16_t seg_cnt;
1168 ql_tgt_t *tq = sp->lun_queue->target_queue;
1169 ct_passthru_entry_t *pkt = arg;
1170 ql_adapter_state_t *pha = ha->pha;
1171
1172 QL_PRINT_3(ha, "started\n");
1173 QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
1174 /*
1175 * Build command packet.
1176 */
1177 pkt->entry_type = CT_PASSTHRU_TYPE;
1178
1179 /* Set loop ID */
1180 ddi_put16(req_q->req_ring.acc_handle, &pkt->n_port_hdl, tq->loop_id);
1181
1182 pkt->vp_index = ha->vp_index;
1183
1184 /* Set ISP command timeout. */
1185 if (sp->isp_timeout < 0x1999) {
1186 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout,
1187 sp->isp_timeout);
1188 }
1189
1190 /* Set cmd/response data segment counts. */
1191 ddi_put16(req_q->req_ring.acc_handle, &pkt->cmd_dseg_count, 1);
1192 seg_cnt = (uint16_t)sp->pkt->pkt_resp_cookie_cnt;
1193 ddi_put16(req_q->req_ring.acc_handle, &pkt->resp_dseg_count, seg_cnt);
1194
1195 /* Load ct cmd byte count. */
1196 ddi_put32(req_q->req_ring.acc_handle, &pkt->cmd_byte_count,
1197 (uint32_t)sp->pkt->pkt_cmdlen);
1198
1199 /* Load ct rsp byte count. */
1200 ddi_put32(req_q->req_ring.acc_handle, &pkt->resp_byte_count,
1201 (uint32_t)sp->pkt->pkt_rsplen);
1202
1203 /* Load MS command entry data segments. */
1204 ptr32 = (uint32_t *)&pkt->dseg;
1205 cp = sp->pkt->pkt_cmd_cookie;
1206 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1207 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1208 ddi_put32(req_q->req_ring.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
1209
1210 /* Load MS response entry data segments. */
1211 cp = sp->pkt->pkt_resp_cookie;
1212 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1213 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1214 ddi_put32(req_q->req_ring.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1215 seg_cnt--;
1216 cp++;
1217
1218 /*
1219 * Build continuation packets.
1220 */
1221 if (seg_cnt) {
1222 ql_continuation_iocb(pha, req_q, cp, seg_cnt, B_TRUE);
1223 }
1224
1225 QL_PRINT_3(ha, "done\n");
1226 }
1227
1228 /*
1229 * ql_ip_iocb
1230 * Setup of IP IOCB.
1231 *
1232 * Input:
1233 * ha: adapter state pointer.
1234 * req_q: request queue structure pointer.
1235 * sp: srb structure pointer.
1236 * arg: request queue packet.
1237 *
1238 * Context:
1239 * Interrupt or Kernel context, no mailbox commands allowed.
1240 */
1241 void
ql_ip_iocb(ql_adapter_state_t * ha,ql_request_q_t * req_q,ql_srb_t * sp,void * arg)1242 ql_ip_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
1243 void *arg)
1244 {
1245 ddi_dma_cookie_t *cp;
1246 uint32_t *ptr32, cnt;
1247 uint16_t seg_cnt;
1248 ql_tgt_t *tq = sp->lun_queue->target_queue;
1249 ip_entry_t *pkt = arg;
1250 ip_a64_entry_t *pkt64 = arg;
1251
1252 QL_PRINT_3(ha, "started\n");
1253
1254 /* Set loop ID */
1255 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1256 pkt->loop_id_l = LSB(tq->loop_id);
1257 pkt->loop_id_h = MSB(tq->loop_id);
1258 } else {
1259 pkt->loop_id_h = LSB(tq->loop_id);
1260 }
1261
1262 /* Set control flags */
1263 pkt->control_flags_l = BIT_6;
1264 if (sp->pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
1265 pkt->control_flags_h = BIT_7;
1266 }
1267
1268 /* Set ISP command timeout. */
1269 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout, sp->isp_timeout);
1270
1271 /* Set data segment count. */
1272 seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1273 /* Load total byte count. */
1274 ddi_put32(req_q->req_ring.acc_handle, &pkt->byte_count,
1275 (uint32_t)sp->pkt->pkt_cmdlen);
1276 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
1277
1278 /*
1279 * Build command packet.
1280 */
1281
1282 /* Load command entry data segments. */
1283 cp = sp->pkt->pkt_cmd_cookie;
1284
1285 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1286 pkt64->entry_type = IP_A64_TYPE;
1287 cnt = IP_A64_DATA_SEGMENTS;
1288 ptr32 = (uint32_t *)&pkt64->dseg;
1289 while (cnt && seg_cnt) {
1290 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1291 cp->dmac_address);
1292 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1293 cp->dmac_notused);
1294 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1295 (uint32_t)cp->dmac_size);
1296 seg_cnt--;
1297 cnt--;
1298 cp++;
1299 }
1300 } else {
1301 pkt->entry_type = IP_TYPE;
1302 cnt = IP_DATA_SEGMENTS;
1303 ptr32 = (uint32_t *)&pkt->dseg;
1304 while (cnt && seg_cnt) {
1305 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1306 cp->dmac_address);
1307 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1308 (uint32_t)cp->dmac_size);
1309 seg_cnt--;
1310 cnt--;
1311 cp++;
1312 }
1313 }
1314
1315 /*
1316 * Build continuation packets.
1317 */
1318 if (seg_cnt) {
1319 ql_continuation_iocb(ha, req_q, cp, seg_cnt,
1320 (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
1321 }
1322
1323 QL_PRINT_3(ha, "done\n");
1324 }
1325
1326 /*
1327 * ql_ip_24xx_iocb
1328 * Setup of IP IOCB for ISP24xx.
1329 *
1330 * Input:
1331 * ha: adapter state pointer.
1332 * req_q: request queue structure pointer.
1333 * sp: srb structure pointer.
1334 * arg: request queue packet.
1335 *
1336 * Context:
1337 * Interrupt or Kernel context, no mailbox commands allowed.
1338 */
1339 void
ql_ip_24xx_iocb(ql_adapter_state_t * ha,ql_request_q_t * req_q,ql_srb_t * sp,void * arg)1340 ql_ip_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
1341 void *arg)
1342 {
1343 ddi_dma_cookie_t *cp;
1344 uint32_t *ptr32;
1345 uint16_t seg_cnt;
1346 ql_tgt_t *tq = sp->lun_queue->target_queue;
1347 ip_cmd_entry_t *pkt = arg;
1348
1349 pkt->entry_type = IP_CMD_TYPE;
1350
1351 QL_PRINT_3(ha, "started\n");
1352
1353 /* Set N_port handle */
1354 ddi_put16(req_q->req_ring.acc_handle, &pkt->hdl_status, tq->loop_id);
1355
1356 /* Set ISP command timeout. */
1357 if (sp->isp_timeout < 0x1999) {
1358 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout_hdl,
1359 sp->isp_timeout);
1360 }
1361
1362 /* Set data segment count. */
1363 seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1364 /* Load total byte count. */
1365 ddi_put32(req_q->req_ring.acc_handle, &pkt->byte_count,
1366 (uint32_t)sp->pkt->pkt_cmdlen);
1367 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
1368
1369 /* Set control flags */
1370 ddi_put16(req_q->req_ring.acc_handle, &pkt->control_flags,
1371 (uint16_t)(BIT_0));
1372
1373 /* Set frame header control flags */
1374 ddi_put16(req_q->req_ring.acc_handle, &pkt->frame_hdr_cntrl_flgs,
1375 (uint16_t)(IPCF_LAST_SEQ | IPCF_FIRST_SEQ));
1376
1377 /* Load command data segment. */
1378 ptr32 = (uint32_t *)&pkt->dseg;
1379 cp = sp->pkt->pkt_cmd_cookie;
1380 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1381 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1382 ddi_put32(req_q->req_ring.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1383 seg_cnt--;
1384 cp++;
1385
1386 /*
1387 * Build continuation packets.
1388 */
1389 if (seg_cnt) {
1390 ql_continuation_iocb(ha, req_q, cp, seg_cnt, B_TRUE);
1391 }
1392
1393 QL_PRINT_3(ha, "done\n");
1394 }
1395
1396 /*
1397 * ql_isp_rcvbuf
1398 * Locates free buffers and places it on the receive buffer queue.
1399 *
1400 * Input:
1401 * ha = adapter state pointer.
1402 *
1403 * Context:
1404 * Interrupt or Kernel context, no mailbox commands allowed.
1405 */
1406 void
ql_isp_rcvbuf(ql_adapter_state_t * ha)1407 ql_isp_rcvbuf(ql_adapter_state_t *ha)
1408 {
1409 rcvbuf_t *container;
1410 uint16_t rcv_q_cnt;
1411 uint16_t index = 0;
1412 uint16_t index1 = 1;
1413 int debounce_count = QL_MAX_DEBOUNCE;
1414 ql_srb_t *sp;
1415 fc_unsol_buf_t *ubp;
1416 int ring_updated = FALSE;
1417
1418 if (CFG_IST(ha, CFG_CTRL_24XX)) {
1419 ql_isp24xx_rcvbuf(ha);
1420 return;
1421 }
1422
1423 QL_PRINT_3(ha, "started\n");
1424
1425 /* Acquire adapter state lock. */
1426 ADAPTER_STATE_LOCK(ha);
1427
1428 /* Calculate number of free receive buffer entries. */
1429 index = RD16_IO_REG(ha, mailbox_out[8]);
1430 do {
1431 index1 = RD16_IO_REG(ha, mailbox_out[8]);
1432 if (index1 == index) {
1433 break;
1434 } else {
1435 index = index1;
1436 }
1437 } while (debounce_count--);
1438
1439 if (debounce_count < 0) {
1440 /* This should never happen */
1441 EL(ha, "max mb8 debounce retries exceeded\n");
1442 }
1443
1444 rcv_q_cnt = (uint16_t)(ha->rcvbuf_ring_index < index ?
1445 index - ha->rcvbuf_ring_index : RCVBUF_CONTAINER_CNT -
1446 (ha->rcvbuf_ring_index - index));
1447
1448 if (rcv_q_cnt == RCVBUF_CONTAINER_CNT) {
1449 rcv_q_cnt--;
1450 }
1451
1452 /* Load all free buffers in ISP receive buffer ring. */
1453 index = 0;
1454 while (rcv_q_cnt > (uint16_t)0 && index < QL_UB_LIMIT) {
1455 /* Locate a buffer to give. */
1456 QL_UB_LOCK(ha);
1457 while (index < QL_UB_LIMIT) {
1458 ubp = ha->ub_array[index];
1459 if (ubp != NULL) {
1460 sp = ubp->ub_fca_private;
1461 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1462 (ha->flags & IP_INITIALIZED) &&
1463 (sp->flags & SRB_UB_IN_FCA) &&
1464 (!(sp->flags & (SRB_UB_IN_ISP |
1465 SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1466 SRB_UB_ACQUIRED)))) {
1467 sp->flags |= SRB_UB_IN_ISP;
1468 break;
1469 }
1470 }
1471 index++;
1472 }
1473
1474 if (index < QL_UB_LIMIT) {
1475 rcv_q_cnt--;
1476 index++;
1477 container = ha->rcvbuf_ring_ptr;
1478
1479 /*
1480 * Build container.
1481 */
1482 ddi_put32(ha->rcv_ring.acc_handle,
1483 (uint32_t *)(void *)&container->bufp[0],
1484 sp->ub_buffer.cookie.dmac_address);
1485
1486 ddi_put32(ha->rcv_ring.acc_handle,
1487 (uint32_t *)(void *)&container->bufp[1],
1488 sp->ub_buffer.cookie.dmac_notused);
1489
1490 ddi_put16(ha->rcv_ring.acc_handle, &container->handle,
1491 LSW(sp->handle));
1492
1493 ha->ub_outcnt++;
1494
1495 /* Adjust ring index. */
1496 ha->rcvbuf_ring_index++;
1497 if (ha->rcvbuf_ring_index == RCVBUF_CONTAINER_CNT) {
1498 ha->rcvbuf_ring_index = 0;
1499 ha->rcvbuf_ring_ptr = ha->rcv_ring.bp;
1500 } else {
1501 ha->rcvbuf_ring_ptr++;
1502 }
1503
1504 ring_updated = TRUE;
1505 }
1506 QL_UB_UNLOCK(ha);
1507 }
1508
1509 if (ring_updated) {
1510 /* Sync queue. */
1511 (void) ddi_dma_sync(ha->rcv_ring.dma_handle, 0,
1512 (size_t)RCVBUF_QUEUE_SIZE, DDI_DMA_SYNC_FORDEV);
1513
1514 /* Set chip new ring index. */
1515 WRT16_IO_REG(ha, mailbox_in[8], ha->rcvbuf_ring_index);
1516 }
1517
1518 /* Release adapter state lock. */
1519 ADAPTER_STATE_UNLOCK(ha);
1520
1521 QL_PRINT_3(ha, "done\n");
1522 }
1523
1524 /*
1525 * ql_isp24xx_rcvbuf
1526 * Locates free buffers and send it to adapter.
1527 *
1528 * Input:
1529 * ha = adapter state pointer.
1530 *
1531 * Context:
1532 * Interrupt or Kernel context, no mailbox commands allowed.
1533 */
1534 static void
ql_isp24xx_rcvbuf(ql_adapter_state_t * ha)1535 ql_isp24xx_rcvbuf(ql_adapter_state_t *ha)
1536 {
1537 rcvbuf_t *container;
1538 uint16_t index;
1539 ql_srb_t *sp;
1540 fc_unsol_buf_t *ubp;
1541 int rval;
1542 ip_buf_pool_entry_t *pkt = NULL;
1543 ql_request_q_t *req_q = ha->req_q[0];
1544
1545 QL_PRINT_3(ha, "started\n");
1546
1547 for (;;) {
1548 /* Locate a buffer to give. */
1549 QL_UB_LOCK(ha);
1550 for (index = 0; index < QL_UB_LIMIT; index++) {
1551 ubp = ha->ub_array[index];
1552 if (ubp != NULL) {
1553 sp = ubp->ub_fca_private;
1554 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1555 (ha->flags & IP_INITIALIZED) &&
1556 (sp->flags & SRB_UB_IN_FCA) &&
1557 (!(sp->flags & (SRB_UB_IN_ISP |
1558 SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1559 SRB_UB_ACQUIRED)))) {
1560 ha->ub_outcnt++;
1561 sp->flags |= SRB_UB_IN_ISP;
1562 break;
1563 }
1564 }
1565 }
1566 QL_UB_UNLOCK(ha);
1567 if (index == QL_UB_LIMIT) {
1568 break;
1569 }
1570
1571 /* Get IOCB packet for buffers. */
1572 if (pkt == NULL) {
1573 rval = ql_req_pkt(ha, req_q, (request_t **)&pkt);
1574 if (rval != QL_SUCCESS) {
1575 EL(ha, "failed, ql_req_pkt=%x\n", rval);
1576 QL_UB_LOCK(ha);
1577 ha->ub_outcnt--;
1578 sp->flags &= ~SRB_UB_IN_ISP;
1579 QL_UB_UNLOCK(ha);
1580 break;
1581 }
1582 pkt->entry_type = IP_BUF_POOL_TYPE;
1583 container = &pkt->buffers[0];
1584 }
1585
1586 /*
1587 * Build container.
1588 */
1589 ddi_put32(req_q->req_ring.acc_handle, &container->bufp[0],
1590 sp->ub_buffer.cookie.dmac_address);
1591 ddi_put32(req_q->req_ring.acc_handle, &container->bufp[1],
1592 sp->ub_buffer.cookie.dmac_notused);
1593 ddi_put16(req_q->req_ring.acc_handle, &container->handle,
1594 LSW(sp->handle));
1595
1596 pkt->buffer_count++;
1597 container++;
1598
1599 if (pkt->buffer_count == IP_POOL_BUFFERS) {
1600 ql_isp_cmd(ha, req_q);
1601 pkt = NULL;
1602 }
1603 }
1604
1605 if (pkt != NULL) {
1606 ql_isp_cmd(ha, req_q);
1607 }
1608
1609 QL_PRINT_3(ha, "done\n");
1610 }
1611