1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2010 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 #pragma ident "Copyright 2010 QLogic Corporation; ql_init.c"
29
30 /*
31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 *
33 * ***********************************************************************
34 * * **
35 * * NOTICE **
36 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
37 * * ALL RIGHTS RESERVED **
38 * * **
39 * ***********************************************************************
40 *
41 */
42
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_init.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_mbx.h>
50 #include <ql_nx.h>
51 #include <ql_xioctl.h>
52
53 /*
54 * Local data
55 */
56
57 /*
58 * Local prototypes
59 */
60 static uint16_t ql_nvram_request(ql_adapter_state_t *, uint32_t);
61 static int ql_nvram_24xx_config(ql_adapter_state_t *);
62 static void ql_23_properties(ql_adapter_state_t *, nvram_t *);
63 static void ql_24xx_properties(ql_adapter_state_t *, nvram_24xx_t *);
64 static int ql_check_isp_firmware(ql_adapter_state_t *);
65 static int ql_chip_diag(ql_adapter_state_t *);
66 static int ql_load_flash_fw(ql_adapter_state_t *);
67 static int ql_configure_loop(ql_adapter_state_t *);
68 static int ql_configure_hba(ql_adapter_state_t *);
69 static int ql_configure_fabric(ql_adapter_state_t *);
70 static int ql_configure_device_d_id(ql_adapter_state_t *);
71 static void ql_set_max_read_req(ql_adapter_state_t *);
72 static void ql_configure_n_port_info(ql_adapter_state_t *);
73 static void ql_clear_mcp(ql_adapter_state_t *);
74 static void ql_mps_reset(ql_adapter_state_t *);
75
76 /*
77 * ql_initialize_adapter
78 * Initialize board.
79 *
80 * Input:
81 * ha = adapter state pointer.
82 *
83 * Returns:
84 * ql local function return status code.
85 *
86 * Context:
87 * Kernel context.
88 */
89 int
ql_initialize_adapter(ql_adapter_state_t * ha)90 ql_initialize_adapter(ql_adapter_state_t *ha)
91 {
92 int rval;
93 class_svc_param_t *class3_param;
94 caddr_t msg;
95 la_els_logi_t *els = &ha->loginparams;
96 int retries = 5;
97
98 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
99
100 do {
101 /* Clear adapter flags. */
102 TASK_DAEMON_LOCK(ha);
103 ha->task_daemon_flags &= TASK_DAEMON_STOP_FLG |
104 TASK_DAEMON_SLEEPING_FLG | TASK_DAEMON_ALIVE_FLG |
105 TASK_DAEMON_IDLE_CHK_FLG;
106 ha->task_daemon_flags |= LOOP_DOWN;
107 TASK_DAEMON_UNLOCK(ha);
108
109 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
110 ADAPTER_STATE_LOCK(ha);
111 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
112 ha->flags &= ~ONLINE;
113 ADAPTER_STATE_UNLOCK(ha);
114
115 ha->state = FC_STATE_OFFLINE;
116 msg = "Loop OFFLINE";
117
118 rval = ql_pci_sbus_config(ha);
119 if (rval != QL_SUCCESS) {
120 TASK_DAEMON_LOCK(ha);
121 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
122 EL(ha, "ql_pci_sbus_cfg, isp_abort_needed\n");
123 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
124 }
125 TASK_DAEMON_UNLOCK(ha);
126 continue;
127 }
128
129 (void) ql_setup_fcache(ha);
130
131 /* Reset ISP chip. */
132 ql_reset_chip(ha);
133
134 /* Get NVRAM configuration if needed. */
135 if (ha->init_ctrl_blk.cb.version == 0) {
136 (void) ql_nvram_config(ha);
137 }
138
139 /* Set login parameters. */
140 if (CFG_IST(ha, CFG_CTRL_24258081)) {
141 els->common_service.rx_bufsize = CHAR_TO_SHORT(
142 ha->init_ctrl_blk.cb24.max_frame_length[0],
143 ha->init_ctrl_blk.cb24.max_frame_length[1]);
144 bcopy((void *)&ha->init_ctrl_blk.cb24.port_name[0],
145 (void *)&els->nport_ww_name.raw_wwn[0], 8);
146 bcopy((void *)&ha->init_ctrl_blk.cb24.node_name[0],
147 (void *)&els->node_ww_name.raw_wwn[0], 8);
148 } else {
149 els->common_service.rx_bufsize = CHAR_TO_SHORT(
150 ha->init_ctrl_blk.cb.max_frame_length[0],
151 ha->init_ctrl_blk.cb.max_frame_length[1]);
152 bcopy((void *)&ha->init_ctrl_blk.cb.port_name[0],
153 (void *)&els->nport_ww_name.raw_wwn[0], 8);
154 bcopy((void *)&ha->init_ctrl_blk.cb.node_name[0],
155 (void *)&els->node_ww_name.raw_wwn[0], 8);
156 }
157 bcopy(QL_VERSION, ha->adapter_stats->revlvl.qlddv,
158 strlen(QL_VERSION));
159
160 /* Determine which RISC code to use. */
161 if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) {
162 if ((rval = ql_chip_diag(ha)) == QL_SUCCESS) {
163 rval = ql_load_isp_firmware(ha);
164 }
165 }
166
167 if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
168 QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS) {
169
170 (void) ql_fw_ready(ha, ha->fwwait);
171
172 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
173 ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
174 if (ha->topology & QL_LOOP_CONNECTION) {
175 ha->state = ha->state | FC_STATE_LOOP;
176 msg = "Loop ONLINE";
177 ha->task_daemon_flags |= STATE_ONLINE;
178 } else if (ha->topology & QL_P2P_CONNECTION) {
179 ha->state = ha->state |
180 FC_STATE_ONLINE;
181 msg = "Link ONLINE";
182 ha->task_daemon_flags |= STATE_ONLINE;
183 } else {
184 msg = "Unknown Link state";
185 }
186 }
187 } else {
188 TASK_DAEMON_LOCK(ha);
189 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
190 EL(ha, "failed, isp_abort_needed\n");
191 ha->task_daemon_flags |= ISP_ABORT_NEEDED |
192 LOOP_DOWN;
193 }
194 TASK_DAEMON_UNLOCK(ha);
195 }
196
197 } while (retries-- != 0 && ha->task_daemon_flags & ISP_ABORT_NEEDED);
198
199 cmn_err(CE_NOTE, "!Qlogic %s(%d): %s", QL_NAME, ha->instance, msg);
200
201 /* Enable ISP interrupts and login parameters. */
202 if (CFG_IST(ha, CFG_CTRL_8021)) {
203 ql_8021_enable_intrs(ha);
204 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
205 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
206 } else {
207 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
208 }
209
210 ADAPTER_STATE_LOCK(ha);
211 ha->flags |= (INTERRUPTS_ENABLED | ONLINE);
212 ADAPTER_STATE_UNLOCK(ha);
213
214 ha->task_daemon_flags &= ~(FC_STATE_CHANGE | RESET_MARKER_NEEDED |
215 COMMAND_WAIT_NEEDED);
216
217 /*
218 * Setup login parameters.
219 */
220 els->common_service.fcph_version = 0x2006;
221 els->common_service.btob_credit = 3;
222 els->common_service.cmn_features = 0x8800;
223 els->common_service.conc_sequences = 0xff;
224 els->common_service.relative_offset = 3;
225 els->common_service.e_d_tov = 0x07d0;
226
227 class3_param = (class_svc_param_t *)&els->class_3;
228 class3_param->class_valid_svc_opt = 0x8800;
229 class3_param->rcv_data_size = els->common_service.rx_bufsize;
230 class3_param->conc_sequences = 0xff;
231
232 if (rval != QL_SUCCESS) {
233 EL(ha, "failed, rval = %xh\n", rval);
234 } else {
235 /*EMPTY*/
236 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
237 }
238 return (rval);
239 }
240
241 /*
242 * ql_pci_sbus_config
243 * Setup device PCI/SBUS configuration registers.
244 *
245 * Input:
246 * ha = adapter state pointer.
247 *
248 * Returns:
249 * ql local function return status code.
250 *
251 * Context:
252 * Kernel context.
253 */
254 int
ql_pci_sbus_config(ql_adapter_state_t * ha)255 ql_pci_sbus_config(ql_adapter_state_t *ha)
256 {
257 uint32_t timer;
258 uint16_t cmd, w16;
259
260 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
261
262 if (CFG_IST(ha, CFG_SBUS_CARD)) {
263 w16 = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
264 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_REVISION));
265 EL(ha, "FPGA rev is %d.%d", (w16 & 0xf0) >> 4,
266 w16 & 0xf);
267 } else {
268 /*
269 * we want to respect framework's setting of PCI
270 * configuration space command register and also
271 * want to make sure that all bits of interest to us
272 * are properly set in command register.
273 */
274 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
275 cmd = (uint16_t)(cmd | PCI_COMM_IO | PCI_COMM_MAE |
276 PCI_COMM_ME | PCI_COMM_MEMWR_INVAL |
277 PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
278
279 /*
280 * If this is a 2300 card and not 2312, reset the
281 * MEMWR_INVAL due to a bug in the 2300. Unfortunately, the
282 * 2310 also reports itself as a 2300 so we need to get the
283 * fb revision level -- a 6 indicates it really is a 2300 and
284 * not a 2310.
285 */
286
287 if (ha->device_id == 0x2300) {
288 /* Pause RISC. */
289 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
290 for (timer = 0; timer < 30000; timer++) {
291 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) !=
292 0) {
293 break;
294 } else {
295 drv_usecwait(MILLISEC);
296 }
297 }
298
299 /* Select FPM registers. */
300 WRT16_IO_REG(ha, ctrl_status, 0x20);
301
302 /* Get the fb rev level */
303 if (RD16_IO_REG(ha, fb_cmd) == 6) {
304 cmd = (uint16_t)(cmd & ~PCI_COMM_MEMWR_INVAL);
305 }
306
307 /* Deselect FPM registers. */
308 WRT16_IO_REG(ha, ctrl_status, 0x0);
309
310 /* Release RISC module. */
311 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
312 for (timer = 0; timer < 30000; timer++) {
313 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) ==
314 0) {
315 break;
316 } else {
317 drv_usecwait(MILLISEC);
318 }
319 }
320 } else if (ha->device_id == 0x2312) {
321 /*
322 * cPCI ISP2312 specific code to service function 1
323 * hot-swap registers.
324 */
325 if ((RD16_IO_REG(ha, ctrl_status) & ISP_FUNC_NUM_MASK)
326 != 0) {
327 ql_pci_config_put8(ha, 0x66, 0xc2);
328 }
329 }
330
331 if (!(CFG_IST(ha, CFG_CTRL_8021)) &&
332 ha->pci_max_read_req != 0) {
333 ql_set_max_read_req(ha);
334 }
335
336 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
337
338 /* Set cache line register. */
339 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ, 0x10);
340
341 /* Set latency register. */
342 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER, 0x40);
343
344 /* Reset expansion ROM address decode enable. */
345 w16 = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_ROM);
346 w16 = (uint16_t)(w16 & ~BIT_0);
347 ql_pci_config_put16(ha, PCI_CONF_ROM, w16);
348 }
349
350 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
351
352 return (QL_SUCCESS);
353 }
354
355 /*
356 * Set the PCI max read request value.
357 *
358 * Input:
359 * ha: adapter state pointer.
360 *
361 * Output:
362 * none.
363 *
364 * Returns:
365 *
366 * Context:
367 * Kernel context.
368 */
369
370 static void
ql_set_max_read_req(ql_adapter_state_t * ha)371 ql_set_max_read_req(ql_adapter_state_t *ha)
372 {
373 uint16_t read_req, w16;
374 uint16_t tmp = ha->pci_max_read_req;
375
376 if ((ha->device_id == 0x2422) ||
377 ((ha->device_id & 0xff00) == 0x2300)) {
378 /* check for vaild override value */
379 if (tmp == 512 || tmp == 1024 || tmp == 2048 ||
380 tmp == 4096) {
381 /* shift away the don't cares */
382 tmp = (uint16_t)(tmp >> 10);
383 /* convert bit pos to request value */
384 for (read_req = 0; tmp != 0; read_req++) {
385 tmp = (uint16_t)(tmp >> 1);
386 }
387 w16 = (uint16_t)ql_pci_config_get16(ha, 0x4e);
388 w16 = (uint16_t)(w16 & ~(BIT_3 & BIT_2));
389 w16 = (uint16_t)(w16 | (read_req << 2));
390 ql_pci_config_put16(ha, 0x4e, w16);
391 } else {
392 EL(ha, "invalid parameter value for "
393 "'pci-max-read-request': %d; using system "
394 "default\n", tmp);
395 }
396 } else if ((ha->device_id == 0x2432) || ((ha->device_id & 0xff00) ==
397 0x2500) || (ha->device_id == 0x8432)) {
398 /* check for vaild override value */
399 if (tmp == 128 || tmp == 256 || tmp == 512 ||
400 tmp == 1024 || tmp == 2048 || tmp == 4096) {
401 /* shift away the don't cares */
402 tmp = (uint16_t)(tmp >> 8);
403 /* convert bit pos to request value */
404 for (read_req = 0; tmp != 0; read_req++) {
405 tmp = (uint16_t)(tmp >> 1);
406 }
407 w16 = (uint16_t)ql_pci_config_get16(ha, 0x54);
408 w16 = (uint16_t)(w16 & ~(BIT_14 | BIT_13 |
409 BIT_12));
410 w16 = (uint16_t)(w16 | (read_req << 12));
411 ql_pci_config_put16(ha, 0x54, w16);
412 } else {
413 EL(ha, "invalid parameter value for "
414 "'pci-max-read-request': %d; using system "
415 "default\n", tmp);
416 }
417 }
418 }
419
420 /*
421 * NVRAM configuration.
422 *
423 * Input:
424 * ha: adapter state pointer.
425 * ha->hba_buf = request and response rings
426 *
427 * Output:
428 * ha->init_ctrl_blk = initialization control block
429 * host adapters parameters in host adapter block
430 *
431 * Returns:
432 * ql local function return status code.
433 *
434 * Context:
435 * Kernel context.
436 */
437 int
ql_nvram_config(ql_adapter_state_t * ha)438 ql_nvram_config(ql_adapter_state_t *ha)
439 {
440 uint32_t cnt;
441 caddr_t dptr1, dptr2;
442 ql_init_cb_t *icb = &ha->init_ctrl_blk.cb;
443 ql_ip_init_cb_t *ip_icb = &ha->ip_init_ctrl_blk.cb;
444 nvram_t *nv = (nvram_t *)ha->request_ring_bp;
445 uint16_t *wptr = (uint16_t *)ha->request_ring_bp;
446 uint8_t chksum = 0;
447 int rval;
448 int idpromlen;
449 char idprombuf[32];
450 uint32_t start_addr;
451
452 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
453
454 if (CFG_IST(ha, CFG_CTRL_24258081)) {
455 return (ql_nvram_24xx_config(ha));
456 }
457
458 start_addr = 0;
459 if ((rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA)) ==
460 QL_SUCCESS) {
461 /* Verify valid NVRAM checksum. */
462 for (cnt = 0; cnt < sizeof (nvram_t)/2; cnt++) {
463 *wptr = (uint16_t)ql_get_nvram_word(ha,
464 (uint32_t)(cnt + start_addr));
465 chksum = (uint8_t)(chksum + (uint8_t)*wptr);
466 chksum = (uint8_t)(chksum + (uint8_t)(*wptr >> 8));
467 wptr++;
468 }
469 ql_release_nvram(ha);
470 }
471
472 /* Bad NVRAM data, set defaults parameters. */
473 if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
474 nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
475 nv->nvram_version < 1) {
476
477 EL(ha, "failed, rval=%xh, checksum=%xh, "
478 "id=%02x%02x%02x%02xh, flsz=%xh, pciconfvid=%xh, "
479 "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
480 nv->id[2], nv->id[3], ha->xioctl->fdesc.flash_size,
481 ha->subven_id, nv->nvram_version);
482
483 /* Don't print nvram message if it's an on-board 2200 */
484 if (!((CFG_IST(ha, CFG_CTRL_2200)) &&
485 (ha->xioctl->fdesc.flash_size == 0))) {
486 cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed,"
487 " using driver defaults.", QL_NAME, ha->instance);
488 }
489
490 /* Reset NVRAM data. */
491 bzero((void *)nv, sizeof (nvram_t));
492
493 /*
494 * Set default initialization control block.
495 */
496 nv->parameter_block_version = ICB_VERSION;
497 nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
498 nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
499
500 nv->max_frame_length[1] = 4;
501
502 /*
503 * Allow 2048 byte frames for 2300
504 */
505 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
506 nv->max_frame_length[1] = 8;
507 }
508 nv->max_iocb_allocation[1] = 1;
509 nv->execution_throttle[0] = 16;
510 nv->login_retry_count = 8;
511
512 idpromlen = 32;
513
514 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
515 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
516 DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
517 &idpromlen) != DDI_PROP_SUCCESS) {
518
519 QL_PRINT_3(CE_CONT, "(%d): Unable to read idprom "
520 "property\n", ha->instance);
521 cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
522 "property", QL_NAME, ha->instance);
523
524 nv->port_name[2] = 33;
525 nv->port_name[3] = 224;
526 nv->port_name[4] = 139;
527 nv->port_name[7] = (uint8_t)
528 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
529 } else {
530
531 nv->port_name[2] = idprombuf[2];
532 nv->port_name[3] = idprombuf[3];
533 nv->port_name[4] = idprombuf[4];
534 nv->port_name[5] = idprombuf[5];
535 nv->port_name[6] = idprombuf[6];
536 nv->port_name[7] = idprombuf[7];
537 nv->port_name[0] = (uint8_t)
538 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
539 }
540
541 /* Don't print nvram message if it's an on-board 2200 */
542 if (!(CFG_IST(ha, CFG_CTRL_2200)) &&
543 (ha->xioctl->fdesc.flash_size == 0)) {
544 cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using"
545 " default HBA parameters and temporary WWPN:"
546 " %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
547 ha->instance, nv->port_name[0], nv->port_name[1],
548 nv->port_name[2], nv->port_name[3],
549 nv->port_name[4], nv->port_name[5],
550 nv->port_name[6], nv->port_name[7]);
551 }
552
553 nv->login_timeout = 4;
554
555 /* Set default connection options for the 23xx to 2 */
556 if (!(CFG_IST(ha, CFG_CTRL_2200))) {
557 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
558 BIT_5);
559 }
560
561 /*
562 * Set default host adapter parameters
563 */
564 nv->host_p[0] = BIT_1;
565 nv->host_p[1] = BIT_2;
566 nv->reset_delay = 5;
567 nv->port_down_retry_count = 8;
568 nv->maximum_luns_per_target[0] = 8;
569
570 rval = QL_FUNCTION_FAILED;
571 }
572
573 /* Check for adapter node name (big endian). */
574 for (cnt = 0; cnt < 8; cnt++) {
575 if (nv->node_name[cnt] != 0) {
576 break;
577 }
578 }
579
580 /* Copy port name if no node name (big endian). */
581 if (cnt == 8) {
582 bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
583 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
584 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
585 }
586
587 /* Reset initialization control blocks. */
588 bzero((void *)icb, sizeof (ql_init_cb_t));
589
590 /* Get driver properties. */
591 ql_23_properties(ha, nv);
592
593 cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
594 "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
595 QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
596 nv->port_name[2], nv->port_name[3], nv->port_name[4],
597 nv->port_name[5], nv->port_name[6], nv->port_name[7],
598 nv->node_name[0], nv->node_name[1], nv->node_name[2],
599 nv->node_name[3], nv->node_name[4], nv->node_name[5],
600 nv->node_name[6], nv->node_name[7]);
601
602 /*
603 * Copy over NVRAM RISC parameter block
604 * to initialization control block.
605 */
606 dptr1 = (caddr_t)icb;
607 dptr2 = (caddr_t)&nv->parameter_block_version;
608 cnt = (uint32_t)((uintptr_t)&icb->request_q_outpointer[0] -
609 (uintptr_t)&icb->version);
610 while (cnt-- != 0) {
611 *dptr1++ = *dptr2++;
612 }
613
614 /* Copy 2nd half. */
615 dptr1 = (caddr_t)&icb->add_fw_opt[0];
616 cnt = (uint32_t)((uintptr_t)&icb->reserved_3[0] -
617 (uintptr_t)&icb->add_fw_opt[0]);
618
619 while (cnt-- != 0) {
620 *dptr1++ = *dptr2++;
621 }
622
623 /*
624 * Setup driver firmware options.
625 */
626 icb->firmware_options[0] = (uint8_t)
627 (icb->firmware_options[0] | BIT_6 | BIT_1);
628
629 /*
630 * There is no use enabling fast post for SBUS or 2300
631 * Always enable 64bit addressing, except SBUS cards.
632 */
633 ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
634 if (CFG_IST(ha, (CFG_SBUS_CARD | CFG_CTRL_2300 | CFG_CTRL_6322))) {
635 icb->firmware_options[0] = (uint8_t)
636 (icb->firmware_options[0] & ~BIT_3);
637 if (CFG_IST(ha, CFG_SBUS_CARD)) {
638 icb->special_options[0] = (uint8_t)
639 (icb->special_options[0] | BIT_5);
640 ha->cfg_flags &= ~CFG_ENABLE_64BIT_ADDRESSING;
641 }
642 } else {
643 icb->firmware_options[0] = (uint8_t)
644 (icb->firmware_options[0] | BIT_3);
645 }
646 /* RIO and ZIO not supported. */
647 icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] &
648 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
649
650 icb->firmware_options[1] = (uint8_t)(icb->firmware_options[1] |
651 BIT_7 | BIT_6 | BIT_5 | BIT_2 | BIT_0);
652 icb->firmware_options[0] = (uint8_t)
653 (icb->firmware_options[0] & ~(BIT_5 | BIT_4));
654 icb->firmware_options[1] = (uint8_t)
655 (icb->firmware_options[1] & ~BIT_4);
656
657 icb->add_fw_opt[1] = (uint8_t)(icb->add_fw_opt[1] & ~(BIT_5 | BIT_4));
658 icb->special_options[0] = (uint8_t)(icb->special_options[0] | BIT_1);
659
660 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
661 if ((icb->special_options[1] & 0x20) == 0) {
662 EL(ha, "50 ohm is not set\n");
663 }
664 }
665 icb->execution_throttle[0] = 0xff;
666 icb->execution_throttle[1] = 0xff;
667
668 if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
669 icb->firmware_options[1] = (uint8_t)
670 (icb->firmware_options[1] | BIT_7 | BIT_6);
671 icb->add_fw_opt[1] = (uint8_t)
672 (icb->add_fw_opt[1] | BIT_5 | BIT_4);
673 }
674
675 /*
676 * Set host adapter parameters
677 */
678 ADAPTER_STATE_LOCK(ha);
679 ha->nvram_version = nv->nvram_version;
680 ha->adapter_features = CHAR_TO_SHORT(nv->adapter_features[0],
681 nv->adapter_features[1]);
682
683 nv->host_p[0] & BIT_4 ? (ha->cfg_flags |= CFG_DISABLE_RISC_CODE_LOAD) :
684 (ha->cfg_flags &= ~CFG_DISABLE_RISC_CODE_LOAD);
685 nv->host_p[0] & BIT_5 ? (ha->cfg_flags |= CFG_SET_CACHE_LINE_SIZE_1) :
686 (ha->cfg_flags &= ~CFG_SET_CACHE_LINE_SIZE_1);
687
688 nv->host_p[1] & BIT_1 ? (ha->cfg_flags |= CFG_ENABLE_LIP_RESET) :
689 (ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET);
690 nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
691 (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
692 nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
693 (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
694
695 nv->adapter_features[0] & BIT_3 ?
696 (ha->cfg_flags |= CFG_MULTI_CHIP_ADAPTER) :
697 (ha->cfg_flags &= ~CFG_MULTI_CHIP_ADAPTER);
698
699 ADAPTER_STATE_UNLOCK(ha);
700
701 ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
702 nv->execution_throttle[1]);
703 ha->loop_reset_delay = nv->reset_delay;
704 ha->port_down_retry_count = nv->port_down_retry_count;
705 ha->r_a_tov = (uint16_t)(icb->login_timeout < R_A_TOV_DEFAULT ?
706 R_A_TOV_DEFAULT : icb->login_timeout);
707 ha->maximum_luns_per_target = CHAR_TO_SHORT(
708 nv->maximum_luns_per_target[0], nv->maximum_luns_per_target[1]);
709 if (ha->maximum_luns_per_target == 0) {
710 ha->maximum_luns_per_target++;
711 }
712
713 /*
714 * Setup ring parameters in initialization control block
715 */
716 cnt = REQUEST_ENTRY_CNT;
717 icb->request_q_length[0] = LSB(cnt);
718 icb->request_q_length[1] = MSB(cnt);
719 cnt = RESPONSE_ENTRY_CNT;
720 icb->response_q_length[0] = LSB(cnt);
721 icb->response_q_length[1] = MSB(cnt);
722
723 icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
724 icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
725 icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
726 icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
727 icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
728 icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
729 icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
730 icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
731
732 icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
733 icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
734 icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
735 icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
736 icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
737 icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
738 icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
739 icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
740
741 /*
742 * Setup IP initialization control block
743 */
744 ip_icb->version = IP_ICB_VERSION;
745
746 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
747 ip_icb->ip_firmware_options[0] = (uint8_t)
748 (ip_icb->ip_firmware_options[0] | BIT_2 | BIT_0);
749 } else {
750 ip_icb->ip_firmware_options[0] = (uint8_t)
751 (ip_icb->ip_firmware_options[0] | BIT_2);
752 }
753
754 cnt = RCVBUF_CONTAINER_CNT;
755 ip_icb->queue_size[0] = LSB(cnt);
756 ip_icb->queue_size[1] = MSB(cnt);
757
758 ip_icb->queue_address[0] = LSB(LSW(LSD(ha->rcvbuf_dvma)));
759 ip_icb->queue_address[1] = MSB(LSW(LSD(ha->rcvbuf_dvma)));
760 ip_icb->queue_address[2] = LSB(MSW(LSD(ha->rcvbuf_dvma)));
761 ip_icb->queue_address[3] = MSB(MSW(LSD(ha->rcvbuf_dvma)));
762 ip_icb->queue_address[4] = LSB(LSW(MSD(ha->rcvbuf_dvma)));
763 ip_icb->queue_address[5] = MSB(LSW(MSD(ha->rcvbuf_dvma)));
764 ip_icb->queue_address[6] = LSB(MSW(MSD(ha->rcvbuf_dvma)));
765 ip_icb->queue_address[7] = MSB(MSW(MSD(ha->rcvbuf_dvma)));
766
767 if (rval != QL_SUCCESS) {
768 EL(ha, "failed, rval = %xh\n", rval);
769 } else {
770 /*EMPTY*/
771 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
772 }
773 return (rval);
774 }
775
776 /*
777 * Get NVRAM data word
778 * Calculates word position in NVRAM and calls request routine to
779 * get the word from NVRAM.
780 *
781 * Input:
782 * ha = adapter state pointer.
783 * address = NVRAM word address.
784 *
785 * Returns:
786 * data word.
787 *
788 * Context:
789 * Kernel context.
790 */
791 uint16_t
ql_get_nvram_word(ql_adapter_state_t * ha,uint32_t address)792 ql_get_nvram_word(ql_adapter_state_t *ha, uint32_t address)
793 {
794 uint32_t nv_cmd;
795 uint16_t rval;
796
797 QL_PRINT_4(CE_CONT, "(%d): started\n", ha->instance);
798
799 nv_cmd = address << 16;
800 nv_cmd = nv_cmd | NV_READ_OP;
801
802 rval = (uint16_t)ql_nvram_request(ha, nv_cmd);
803
804 QL_PRINT_4(CE_CONT, "(%d): NVRAM data = %xh\n", ha->instance, rval);
805
806 return (rval);
807 }
808
809 /*
810 * NVRAM request
811 * Sends read command to NVRAM and gets data from NVRAM.
812 *
813 * Input:
814 * ha = adapter state pointer.
815 * nv_cmd = Bit 26= start bit
816 * Bit 25, 24 = opcode
817 * Bit 23-16 = address
818 * Bit 15-0 = write data
819 *
820 * Returns:
821 * data word.
822 *
823 * Context:
824 * Kernel context.
825 */
826 static uint16_t
ql_nvram_request(ql_adapter_state_t * ha,uint32_t nv_cmd)827 ql_nvram_request(ql_adapter_state_t *ha, uint32_t nv_cmd)
828 {
829 uint8_t cnt;
830 uint16_t reg_data;
831 uint16_t data = 0;
832
833 /* Send command to NVRAM. */
834
835 nv_cmd <<= 5;
836 for (cnt = 0; cnt < 11; cnt++) {
837 if (nv_cmd & BIT_31) {
838 ql_nv_write(ha, NV_DATA_OUT);
839 } else {
840 ql_nv_write(ha, 0);
841 }
842 nv_cmd <<= 1;
843 }
844
845 /* Read data from NVRAM. */
846
847 for (cnt = 0; cnt < 16; cnt++) {
848 WRT16_IO_REG(ha, nvram, NV_SELECT+NV_CLOCK);
849 ql_nv_delay();
850 data <<= 1;
851 reg_data = RD16_IO_REG(ha, nvram);
852 if (reg_data & NV_DATA_IN) {
853 data = (uint16_t)(data | BIT_0);
854 }
855 WRT16_IO_REG(ha, nvram, NV_SELECT);
856 ql_nv_delay();
857 }
858
859 /* Deselect chip. */
860
861 WRT16_IO_REG(ha, nvram, NV_DESELECT);
862 ql_nv_delay();
863
864 return (data);
865 }
866
867 void
ql_nv_write(ql_adapter_state_t * ha,uint16_t data)868 ql_nv_write(ql_adapter_state_t *ha, uint16_t data)
869 {
870 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
871 ql_nv_delay();
872 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT | NV_CLOCK));
873 ql_nv_delay();
874 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT));
875 ql_nv_delay();
876 }
877
878 void
ql_nv_delay(void)879 ql_nv_delay(void)
880 {
881 drv_usecwait(NV_DELAY_COUNT);
882 }
883
884 /*
885 * ql_nvram_24xx_config
886 * ISP2400 nvram.
887 *
888 * Input:
889 * ha: adapter state pointer.
890 * ha->hba_buf = request and response rings
891 *
892 * Output:
893 * ha->init_ctrl_blk = initialization control block
894 * host adapters parameters in host adapter block
895 *
896 * Returns:
897 * ql local function return status code.
898 *
899 * Context:
900 * Kernel context.
901 */
902 int
ql_nvram_24xx_config(ql_adapter_state_t * ha)903 ql_nvram_24xx_config(ql_adapter_state_t *ha)
904 {
905 uint32_t index, addr, chksum, saved_chksum;
906 uint32_t *longptr;
907 nvram_24xx_t nvram;
908 int idpromlen;
909 char idprombuf[32];
910 caddr_t src, dst;
911 uint16_t w1;
912 int rval;
913 nvram_24xx_t *nv = (nvram_24xx_t *)&nvram;
914 ql_init_24xx_cb_t *icb =
915 (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24;
916 ql_ip_init_24xx_cb_t *ip_icb = &ha->ip_init_ctrl_blk.cb24;
917
918 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
919
920 if ((rval = ql_lock_nvram(ha, &addr, LNF_NVRAM_DATA)) == QL_SUCCESS) {
921
922 /* Get NVRAM data and calculate checksum. */
923 longptr = (uint32_t *)nv;
924 chksum = saved_chksum = 0;
925 for (index = 0; index < sizeof (nvram_24xx_t) / 4; index++) {
926 rval = ql_24xx_read_flash(ha, addr++, longptr);
927 if (rval != QL_SUCCESS) {
928 EL(ha, "24xx_read_flash failed=%xh\n", rval);
929 break;
930 }
931 saved_chksum = chksum;
932 chksum += *longptr;
933 LITTLE_ENDIAN_32(longptr);
934 longptr++;
935 }
936
937 ql_release_nvram(ha);
938 }
939
940 /* Bad NVRAM data, set defaults parameters. */
941 if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' ||
942 nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' ||
943 (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
944
945 cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed, using "
946 "driver defaults.", QL_NAME, ha->instance);
947
948 EL(ha, "failed, rval=%xh, checksum=%xh, id=%c%c%c%c, "
949 "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1],
950 nv->id[2], nv->id[3], CHAR_TO_SHORT(nv->nvram_version[0],
951 nv->nvram_version[1]));
952
953 saved_chksum = ~saved_chksum + 1;
954
955 (void) ql_flash_errlog(ha, FLASH_ERRLOG_NVRAM_CHKSUM_ERR, 0,
956 MSW(saved_chksum), LSW(saved_chksum));
957
958 /* Reset NVRAM data. */
959 bzero((void *)nv, sizeof (nvram_24xx_t));
960
961 /*
962 * Set default initialization control block.
963 */
964 nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
965 nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
966
967 nv->version[0] = 1;
968 nv->max_frame_length[1] = 8;
969 nv->execution_throttle[0] = 16;
970 nv->exchange_count[0] = 128;
971 nv->max_luns_per_target[0] = 8;
972
973 idpromlen = 32;
974
975 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
976 if (rval = ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip,
977 DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf,
978 &idpromlen) != DDI_PROP_SUCCESS) {
979
980 cmn_err(CE_WARN, "%s(%d) : Unable to read idprom "
981 "property, rval=%x", QL_NAME, ha->instance, rval);
982
983 nv->port_name[0] = 33;
984 nv->port_name[3] = 224;
985 nv->port_name[4] = 139;
986 nv->port_name[7] = (uint8_t)
987 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
988 } else {
989 nv->port_name[2] = idprombuf[2];
990 nv->port_name[3] = idprombuf[3];
991 nv->port_name[4] = idprombuf[4];
992 nv->port_name[5] = idprombuf[5];
993 nv->port_name[6] = idprombuf[6];
994 nv->port_name[7] = idprombuf[7];
995 nv->port_name[0] = (uint8_t)
996 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance);
997 }
998
999 cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using default "
1000 "HBA parameters and temporary "
1001 "WWPN: %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME,
1002 ha->instance, nv->port_name[0], nv->port_name[1],
1003 nv->port_name[2], nv->port_name[3], nv->port_name[4],
1004 nv->port_name[5], nv->port_name[6], nv->port_name[7]);
1005
1006 nv->login_retry_count[0] = 8;
1007
1008 nv->firmware_options_1[0] = BIT_2 | BIT_1;
1009 nv->firmware_options_1[1] = BIT_5;
1010 nv->firmware_options_2[0] = BIT_5;
1011 nv->firmware_options_2[1] = BIT_4;
1012 nv->firmware_options_3[1] = BIT_6;
1013
1014 /*
1015 * Set default host adapter parameters
1016 */
1017 nv->host_p[0] = BIT_4 | BIT_1;
1018 nv->host_p[1] = BIT_3 | BIT_2;
1019 nv->reset_delay = 5;
1020 nv->max_luns_per_target[0] = 128;
1021 nv->port_down_retry_count[0] = 30;
1022 nv->link_down_timeout[0] = 30;
1023
1024 if (CFG_IST(ha, CFG_CTRL_8081)) {
1025 nv->firmware_options_3[2] = BIT_4;
1026 nv->feature_mask_l[0] = 9;
1027 nv->ext_blk.version[0] = 1;
1028 nv->ext_blk.fcf_vlan_match = 1;
1029 nv->ext_blk.fcf_vlan_id[0] = LSB(1002);
1030 nv->ext_blk.fcf_vlan_id[1] = MSB(1002);
1031 nv->fw.isp8001.e_node_mac_addr[1] = 2;
1032 nv->fw.isp8001.e_node_mac_addr[2] = 3;
1033 nv->fw.isp8001.e_node_mac_addr[3] = 4;
1034 nv->fw.isp8001.e_node_mac_addr[4] = MSB(ha->instance);
1035 nv->fw.isp8001.e_node_mac_addr[5] = LSB(ha->instance);
1036 }
1037
1038 rval = QL_FUNCTION_FAILED;
1039 }
1040
1041 /* Check for adapter node name (big endian). */
1042 for (index = 0; index < 8; index++) {
1043 if (nv->node_name[index] != 0) {
1044 break;
1045 }
1046 }
1047
1048 /* Copy port name if no node name (big endian). */
1049 if (index == 8) {
1050 bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8);
1051 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
1052 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
1053 }
1054
1055 /* Reset initialization control blocks. */
1056 bzero((void *)icb, sizeof (ql_init_24xx_cb_t));
1057
1058 /* Get driver properties. */
1059 ql_24xx_properties(ha, nv);
1060
1061 cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x"
1062 "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1063 QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1],
1064 nv->port_name[2], nv->port_name[3], nv->port_name[4],
1065 nv->port_name[5], nv->port_name[6], nv->port_name[7],
1066 nv->node_name[0], nv->node_name[1], nv->node_name[2],
1067 nv->node_name[3], nv->node_name[4], nv->node_name[5],
1068 nv->node_name[6], nv->node_name[7]);
1069
1070 /*
1071 * Copy over NVRAM Firmware Initialization Control Block.
1072 */
1073 dst = (caddr_t)icb;
1074 src = (caddr_t)&nv->version;
1075 index = (uint32_t)((uintptr_t)&icb->response_q_inpointer[0] -
1076 (uintptr_t)icb);
1077 while (index--) {
1078 *dst++ = *src++;
1079 }
1080 icb->login_retry_count[0] = nv->login_retry_count[0];
1081 icb->login_retry_count[1] = nv->login_retry_count[1];
1082 icb->link_down_on_nos[0] = nv->link_down_on_nos[0];
1083 icb->link_down_on_nos[1] = nv->link_down_on_nos[1];
1084
1085 dst = (caddr_t)&icb->interrupt_delay_timer;
1086 src = (caddr_t)&nv->interrupt_delay_timer;
1087 index = (uint32_t)((uintptr_t)&icb->qos -
1088 (uintptr_t)&icb->interrupt_delay_timer);
1089 while (index--) {
1090 *dst++ = *src++;
1091 }
1092
1093 /*
1094 * Setup driver firmware options.
1095 */
1096 if (CFG_IST(ha, CFG_CTRL_8081)) {
1097 dst = (caddr_t)icb->enode_mac_addr;
1098 src = (caddr_t)nv->fw.isp8001.e_node_mac_addr;
1099 index = sizeof (nv->fw.isp8001.e_node_mac_addr);
1100 while (index--) {
1101 *dst++ = *src++;
1102 }
1103 dst = (caddr_t)&icb->ext_blk;
1104 src = (caddr_t)&nv->ext_blk;
1105 index = sizeof (ql_ext_icb_8100_t);
1106 while (index--) {
1107 *dst++ = *src++;
1108 }
1109 EL(ha, "e_node_mac_addr=%02x-%02x-%02x-%02x-%02x-%02x\n",
1110 icb->enode_mac_addr[0], icb->enode_mac_addr[1],
1111 icb->enode_mac_addr[2], icb->enode_mac_addr[3],
1112 icb->enode_mac_addr[4], icb->enode_mac_addr[5]);
1113 } else {
1114 icb->firmware_options_1[0] = (uint8_t)
1115 (icb->firmware_options_1[0] | BIT_1);
1116 icb->firmware_options_1[1] = (uint8_t)
1117 (icb->firmware_options_1[1] | BIT_5 | BIT_2);
1118 icb->firmware_options_3[0] = (uint8_t)
1119 (icb->firmware_options_3[0] | BIT_1);
1120 }
1121 icb->firmware_options_1[0] = (uint8_t)(icb->firmware_options_1[0] &
1122 ~(BIT_5 | BIT_4));
1123 icb->firmware_options_1[1] = (uint8_t)(icb->firmware_options_1[1] |
1124 BIT_6);
1125 icb->firmware_options_2[0] = (uint8_t)(icb->firmware_options_2[0] &
1126 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
1127 if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) {
1128 icb->firmware_options_2[1] = (uint8_t)
1129 (icb->firmware_options_2[1] | BIT_4);
1130 } else {
1131 icb->firmware_options_2[1] = (uint8_t)
1132 (icb->firmware_options_2[1] & ~BIT_4);
1133 }
1134
1135 icb->firmware_options_3[0] = (uint8_t)(icb->firmware_options_3[0] &
1136 ~BIT_7);
1137
1138 /* enable special N port 2 N port login behaviour */
1139 if (CFG_IST(ha, CFG_CTRL_2425)) {
1140 icb->firmware_options_3[1] =
1141 (uint8_t)(icb->firmware_options_3[1] | BIT_0);
1142 }
1143
1144 icb->execution_throttle[0] = 0xff;
1145 icb->execution_throttle[1] = 0xff;
1146
1147 /*
1148 * Set host adapter parameters
1149 */
1150 ADAPTER_STATE_LOCK(ha);
1151 ha->nvram_version = CHAR_TO_SHORT(nv->nvram_version[0],
1152 nv->nvram_version[1]);
1153 nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) :
1154 (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN);
1155 nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) :
1156 (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET);
1157 ha->cfg_flags &= ~(CFG_DISABLE_RISC_CODE_LOAD | CFG_LR_SUPPORT |
1158 CFG_SET_CACHE_LINE_SIZE_1 | CFG_MULTI_CHIP_ADAPTER);
1159 ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING;
1160 if (CFG_IST(ha, CFG_CTRL_81XX) && nv->enhanced_features[0] & BIT_0) {
1161 ha->cfg_flags |= CFG_LR_SUPPORT;
1162 }
1163 ADAPTER_STATE_UNLOCK(ha);
1164
1165 ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0],
1166 nv->execution_throttle[1]);
1167 ha->loop_reset_delay = nv->reset_delay;
1168 ha->port_down_retry_count = CHAR_TO_SHORT(nv->port_down_retry_count[0],
1169 nv->port_down_retry_count[1]);
1170 w1 = CHAR_TO_SHORT(icb->login_timeout[0], icb->login_timeout[1]);
1171 ha->r_a_tov = (uint16_t)(w1 < R_A_TOV_DEFAULT ? R_A_TOV_DEFAULT : w1);
1172 ha->maximum_luns_per_target = CHAR_TO_SHORT(
1173 nv->max_luns_per_target[0], nv->max_luns_per_target[1]);
1174 if (ha->maximum_luns_per_target == 0) {
1175 ha->maximum_luns_per_target++;
1176 }
1177
1178 /* ISP2422 Serial Link Control */
1179 if (CFG_IST(ha, CFG_CTRL_2422)) {
1180 ha->serdes_param[0] = CHAR_TO_SHORT(nv->fw.isp2400.swing_opt[0],
1181 nv->fw.isp2400.swing_opt[1]);
1182 ha->serdes_param[1] = CHAR_TO_SHORT(nv->fw.isp2400.swing_1g[0],
1183 nv->fw.isp2400.swing_1g[1]);
1184 ha->serdes_param[2] = CHAR_TO_SHORT(nv->fw.isp2400.swing_2g[0],
1185 nv->fw.isp2400.swing_2g[1]);
1186 ha->serdes_param[3] = CHAR_TO_SHORT(nv->fw.isp2400.swing_4g[0],
1187 nv->fw.isp2400.swing_4g[1]);
1188 }
1189
1190 /*
1191 * Setup ring parameters in initialization control block
1192 */
1193 w1 = REQUEST_ENTRY_CNT;
1194 icb->request_q_length[0] = LSB(w1);
1195 icb->request_q_length[1] = MSB(w1);
1196 w1 = RESPONSE_ENTRY_CNT;
1197 icb->response_q_length[0] = LSB(w1);
1198 icb->response_q_length[1] = MSB(w1);
1199
1200 icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma)));
1201 icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma)));
1202 icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma)));
1203 icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma)));
1204 icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma)));
1205 icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma)));
1206 icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma)));
1207 icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma)));
1208
1209 icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma)));
1210 icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma)));
1211 icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma)));
1212 icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma)));
1213 icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma)));
1214 icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma)));
1215 icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma)));
1216 icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma)));
1217
1218 /*
1219 * Setup IP initialization control block
1220 */
1221 ip_icb->version = IP_ICB_24XX_VERSION;
1222
1223 ip_icb->ip_firmware_options[0] = (uint8_t)
1224 (ip_icb->ip_firmware_options[0] | BIT_2);
1225
1226 if (rval != QL_SUCCESS) {
1227 EL(ha, "failed, rval = %xh\n", rval);
1228 } else {
1229 /*EMPTY*/
1230 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1231 }
1232 return (rval);
1233 }
1234
1235 /*
1236 * ql_lock_nvram
1237 * Locks NVRAM access and returns starting address of NVRAM.
1238 *
1239 * Input:
1240 * ha: adapter state pointer.
1241 * addr: pointer for start address.
1242 * flags: Are mutually exclusive:
1243 * LNF_NVRAM_DATA --> get nvram
1244 * LNF_VPD_DATA --> get vpd data (24/25xx only).
1245 *
1246 * Returns:
1247 * ql local function return status code.
1248 *
1249 * Context:
1250 * Kernel context.
1251 */
1252 int
ql_lock_nvram(ql_adapter_state_t * ha,uint32_t * addr,uint32_t flags)1253 ql_lock_nvram(ql_adapter_state_t *ha, uint32_t *addr, uint32_t flags)
1254 {
1255 int i;
1256
1257 if ((flags & LNF_NVRAM_DATA) && (flags & LNF_VPD_DATA)) {
1258 EL(ha, "invalid options for function");
1259 return (QL_FUNCTION_FAILED);
1260 }
1261
1262 if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1263 if ((flags & LNF_NVRAM_DATA) == 0) {
1264 EL(ha, "invalid 2312/2322 option for HBA");
1265 return (QL_FUNCTION_FAILED);
1266 }
1267
1268 /* if function number is non-zero, then adjust offset */
1269 *addr = ha->flash_nvram_addr;
1270
1271 /* Try to get resource lock. Wait for 10 seconds max */
1272 for (i = 0; i < 10000; i++) {
1273 /* if nvram busy bit is reset, acquire sema */
1274 if ((RD16_IO_REG(ha, nvram) & 0x8000) == 0) {
1275 WRT16_IO_REG(ha, host_to_host_sema, 1);
1276 drv_usecwait(MILLISEC);
1277 if (RD16_IO_REG(ha, host_to_host_sema) & 1) {
1278 break;
1279 }
1280 }
1281 drv_usecwait(MILLISEC);
1282 }
1283 if ((RD16_IO_REG(ha, host_to_host_sema) & 1) == 0) {
1284 cmn_err(CE_WARN, "%s(%d): unable to get NVRAM lock",
1285 QL_NAME, ha->instance);
1286 return (QL_FUNCTION_FAILED);
1287 }
1288 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
1289 if (flags & LNF_VPD_DATA) {
1290 *addr = NVRAM_DATA_ADDR | ha->flash_vpd_addr;
1291 } else if (flags & LNF_NVRAM_DATA) {
1292 *addr = NVRAM_DATA_ADDR | ha->flash_nvram_addr;
1293 } else {
1294 EL(ha, "invalid 2422 option for HBA");
1295 return (QL_FUNCTION_FAILED);
1296 }
1297
1298 GLOBAL_HW_LOCK();
1299 } else if (CFG_IST(ha, CFG_CTRL_258081)) {
1300 if (flags & LNF_VPD_DATA) {
1301 *addr = ha->flash_data_addr | ha->flash_vpd_addr;
1302 } else if (flags & LNF_NVRAM_DATA) {
1303 *addr = ha->flash_data_addr | ha->flash_nvram_addr;
1304 } else {
1305 EL(ha, "invalid 2581 option for HBA");
1306 return (QL_FUNCTION_FAILED);
1307 }
1308
1309 GLOBAL_HW_LOCK();
1310 } else {
1311 if ((flags & LNF_NVRAM_DATA) == 0) {
1312 EL(ha, "invalid option for HBA");
1313 return (QL_FUNCTION_FAILED);
1314 }
1315 *addr = 0;
1316 GLOBAL_HW_LOCK();
1317 }
1318
1319 return (QL_SUCCESS);
1320 }
1321
1322 /*
1323 * ql_release_nvram
1324 * Releases NVRAM access.
1325 *
1326 * Input:
1327 * ha: adapter state pointer.
1328 *
1329 * Context:
1330 * Kernel context.
1331 */
1332 void
ql_release_nvram(ql_adapter_state_t * ha)1333 ql_release_nvram(ql_adapter_state_t *ha)
1334 {
1335 if (ha->device_id == 0x2312 || ha->device_id == 0x2322) {
1336 /* Release resource lock */
1337 WRT16_IO_REG(ha, host_to_host_sema, 0);
1338 } else {
1339 GLOBAL_HW_UNLOCK();
1340 }
1341 }
1342
1343 /*
1344 * ql_23_properties
1345 * Copies driver properties to NVRAM or adapter structure.
1346 *
1347 * Driver properties are by design global variables and hidden
1348 * completely from administrators. Knowledgeable folks can
1349 * override the default values using driver.conf
1350 *
1351 * Input:
1352 * ha: adapter state pointer.
1353 * nv: NVRAM structure pointer.
1354 *
1355 * Context:
1356 * Kernel context.
1357 */
1358 static void
ql_23_properties(ql_adapter_state_t * ha,nvram_t * nv)1359 ql_23_properties(ql_adapter_state_t *ha, nvram_t *nv)
1360 {
1361 uint32_t data, cnt;
1362
1363 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1364
1365 /* Get frame payload size. */
1366 if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1367 data = 2048;
1368 }
1369 if (data == 512 || data == 1024 || data == 2048) {
1370 nv->max_frame_length[0] = LSB(data);
1371 nv->max_frame_length[1] = MSB(data);
1372 } else {
1373 EL(ha, "invalid parameter value for 'max-frame-length': "
1374 "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1375 nv->max_frame_length[0], nv->max_frame_length[1]));
1376 }
1377
1378 /* Get max IOCB allocation. */
1379 nv->max_iocb_allocation[0] = 0;
1380 nv->max_iocb_allocation[1] = 1;
1381
1382 /* Get execution throttle. */
1383 if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1384 data = 32;
1385 }
1386 if (data != 0 && data < 65536) {
1387 nv->execution_throttle[0] = LSB(data);
1388 nv->execution_throttle[1] = MSB(data);
1389 } else {
1390 EL(ha, "invalid parameter value for 'execution-throttle': "
1391 "%d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1392 nv->execution_throttle[0], nv->execution_throttle[1]));
1393 }
1394
1395 /* Get Login timeout. */
1396 if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1397 data = 3;
1398 }
1399 if (data < 256) {
1400 nv->login_timeout = (uint8_t)data;
1401 } else {
1402 EL(ha, "invalid parameter value for 'login-timeout': "
1403 "%d; using nvram value of %d\n", data, nv->login_timeout);
1404 }
1405
1406 /* Get retry count. */
1407 if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1408 data = 4;
1409 }
1410 if (data < 256) {
1411 nv->login_retry_count = (uint8_t)data;
1412 } else {
1413 EL(ha, "invalid parameter value for 'login-retry-count': "
1414 "%d; using nvram value of %d\n", data,
1415 nv->login_retry_count);
1416 }
1417
1418 /* Get adapter hard loop ID enable. */
1419 data = ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1420 if (data == 0) {
1421 nv->firmware_options[0] =
1422 (uint8_t)(nv->firmware_options[0] & ~BIT_0);
1423 } else if (data == 1) {
1424 nv->firmware_options[0] =
1425 (uint8_t)(nv->firmware_options[0] | BIT_0);
1426 } else if (data != 0xffffffff) {
1427 EL(ha, "invalid parameter value for "
1428 "'enable-adapter-hard-loop-ID': %d; using nvram value "
1429 "of %d\n", data, nv->firmware_options[0] & BIT_0 ? 1 : 0);
1430 }
1431
1432 /* Get adapter hard loop ID. */
1433 data = ql_get_prop(ha, "adapter-hard-loop-ID");
1434 if (data < 126) {
1435 nv->hard_address[0] = (uint8_t)data;
1436 } else if (data != 0xffffffff) {
1437 EL(ha, "invalid parameter value for 'adapter-hard-loop-ID': "
1438 "%d; using nvram value of %d\n",
1439 data, nv->hard_address[0]);
1440 }
1441
1442 /* Get LIP reset. */
1443 if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1444 0xffffffff) {
1445 data = 0;
1446 }
1447 if (data == 0) {
1448 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_1);
1449 } else if (data == 1) {
1450 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_1);
1451 } else {
1452 EL(ha, "invalid parameter value for "
1453 "'enable-LIP-reset-on-bus-reset': %d; using nvram value "
1454 "of %d\n", data, nv->host_p[1] & BIT_1 ? 1 : 0);
1455 }
1456
1457 /* Get LIP full login. */
1458 if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1459 0xffffffff) {
1460 data = 1;
1461 }
1462 if (data == 0) {
1463 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1464 } else if (data == 1) {
1465 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1466 } else {
1467 EL(ha, "invalid parameter value for "
1468 "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1469 "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1470 }
1471
1472 /* Get target reset. */
1473 if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1474 0xffffffff) {
1475 data = 0;
1476 }
1477 if (data == 0) {
1478 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1479 } else if (data == 1) {
1480 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1481 } else {
1482 EL(ha, "invalid parameter value for "
1483 "'enable-target-reset-on-bus-reset': %d; using nvram "
1484 "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1485 }
1486
1487 /* Get reset delay. */
1488 if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1489 data = 5;
1490 }
1491 if (data != 0 && data < 256) {
1492 nv->reset_delay = (uint8_t)data;
1493 } else {
1494 EL(ha, "invalid parameter value for 'reset-delay': %d; "
1495 "using nvram value of %d", data, nv->reset_delay);
1496 }
1497
1498 /* Get port down retry count. */
1499 if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1500 data = 8;
1501 }
1502 if (data < 256) {
1503 nv->port_down_retry_count = (uint8_t)data;
1504 } else {
1505 EL(ha, "invalid parameter value for 'port-down-retry-count':"
1506 " %d; using nvram value of %d\n", data,
1507 nv->port_down_retry_count);
1508 }
1509
1510 /* Get connection mode setting. */
1511 if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) {
1512 data = 2;
1513 }
1514 cnt = CFG_IST(ha, CFG_CTRL_2200) ? 3 : 2;
1515 if (data <= cnt) {
1516 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] &
1517 ~(BIT_6 | BIT_5 | BIT_4));
1518 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] |
1519 (uint8_t)(data << 4));
1520 } else {
1521 EL(ha, "invalid parameter value for 'connection-options': "
1522 "%d; using nvram value of %d\n", data,
1523 (nv->add_fw_opt[0] >> 4) & 0x3);
1524 }
1525
1526 /* Get data rate setting. */
1527 if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
1528 if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1529 data = 2;
1530 }
1531 if (data < 3) {
1532 nv->special_options[1] = (uint8_t)
1533 (nv->special_options[1] & 0x3f);
1534 nv->special_options[1] = (uint8_t)
1535 (nv->special_options[1] | (uint8_t)(data << 6));
1536 } else {
1537 EL(ha, "invalid parameter value for 'fc-data-rate': "
1538 "%d; using nvram value of %d\n", data,
1539 (nv->special_options[1] >> 6) & 0x3);
1540 }
1541 }
1542
1543 /* Get adapter id string for Sun branded 23xx only */
1544 if ((CFG_IST(ha, CFG_CTRL_2300)) && nv->adapInfo[0] != 0) {
1545 (void) snprintf((int8_t *)ha->adapInfo, 16, "%s",
1546 nv->adapInfo);
1547 }
1548
1549 /* Get IP FW container count. */
1550 ha->ip_init_ctrl_blk.cb.cc[0] = LSB(ql_ip_buffer_count);
1551 ha->ip_init_ctrl_blk.cb.cc[1] = MSB(ql_ip_buffer_count);
1552
1553 /* Get IP low water mark. */
1554 ha->ip_init_ctrl_blk.cb.low_water_mark[0] = LSB(ql_ip_low_water);
1555 ha->ip_init_ctrl_blk.cb.low_water_mark[1] = MSB(ql_ip_low_water);
1556
1557 /* Get IP fast register post count. */
1558 ha->ip_init_ctrl_blk.cb.fast_post_reg_count[0] =
1559 ql_ip_fast_post_count;
1560
1561 ADAPTER_STATE_LOCK(ha);
1562
1563 ql_common_properties(ha);
1564
1565 ADAPTER_STATE_UNLOCK(ha);
1566
1567 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1568 }
1569
1570 /*
1571 * ql_common_properties
1572 * Driver properties adapter structure.
1573 *
1574 * Driver properties are by design global variables and hidden
1575 * completely from administrators. Knowledgeable folks can
1576 * override the default values using driver.conf
1577 *
1578 * Input:
1579 * ha: adapter state pointer.
1580 *
1581 * Context:
1582 * Kernel context.
1583 */
1584 void
ql_common_properties(ql_adapter_state_t * ha)1585 ql_common_properties(ql_adapter_state_t *ha)
1586 {
1587 uint32_t data;
1588
1589 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1590
1591 /* Get extended logging trace buffer size. */
1592 if ((data = ql_get_prop(ha, "set-ext-log-buffer-size")) !=
1593 0xffffffff && data != 0) {
1594 char *new_trace;
1595 uint32_t new_size;
1596
1597 if (ha->el_trace_desc->trace_buffer != NULL) {
1598 new_size = 1024 * data;
1599 new_trace = (char *)kmem_zalloc(new_size, KM_SLEEP);
1600
1601 if (new_trace == NULL) {
1602 cmn_err(CE_WARN, "%s(%d): can't get new"
1603 " trace buffer",
1604 QL_NAME, ha->instance);
1605 } else {
1606 /* free the previous */
1607 kmem_free(ha->el_trace_desc->trace_buffer,
1608 ha->el_trace_desc->trace_buffer_size);
1609 /* Use the new one */
1610 ha->el_trace_desc->trace_buffer = new_trace;
1611 ha->el_trace_desc->trace_buffer_size = new_size;
1612 }
1613 }
1614
1615 }
1616
1617 /* Get extended logging enable. */
1618 if ((data = ql_get_prop(ha, "extended-logging")) == 0xffffffff ||
1619 data == 0) {
1620 ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1621 } else if (data == 1) {
1622 ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1623 } else {
1624 EL(ha, "invalid parameter value for 'extended-logging': %d;"
1625 " using default value of 0\n", data);
1626 ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING;
1627 }
1628
1629 /* Get extended logging trace disable. */
1630 if ((data = ql_get_prop(ha, "disable-extended-logging-trace")) ==
1631 0xffffffff || data == 0) {
1632 ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1633 } else if (data == 1) {
1634 ha->cfg_flags |= CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1635 } else {
1636 EL(ha, "invalid parameter value for "
1637 "'disable-extended-logging-trace': %d;"
1638 " using default value of 0\n", data);
1639 ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE;
1640 }
1641
1642 /* Get FCP 2 Error Recovery. */
1643 if ((data = ql_get_prop(ha, "enable-FCP-2-error-recovery")) ==
1644 0xffffffff || data == 1) {
1645 ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1646 } else if (data == 0) {
1647 ha->cfg_flags &= ~CFG_ENABLE_FCP_2_SUPPORT;
1648 } else {
1649 EL(ha, "invalid parameter value for "
1650 "'enable-FCP-2-error-recovery': %d; using nvram value of "
1651 "1\n", data);
1652 ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT;
1653 }
1654
1655 #ifdef QL_DEBUG_LEVEL_2
1656 ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING;
1657 #endif
1658
1659 /* Get port down retry delay. */
1660 if ((data = ql_get_prop(ha, "port-down-retry-delay")) == 0xffffffff) {
1661 ha->port_down_retry_delay = PORT_RETRY_TIME;
1662 } else if (data < 256) {
1663 ha->port_down_retry_delay = (uint8_t)data;
1664 } else {
1665 EL(ha, "invalid parameter value for 'port-down-retry-delay':"
1666 " %d; using default value of %d", data, PORT_RETRY_TIME);
1667 ha->port_down_retry_delay = PORT_RETRY_TIME;
1668 }
1669
1670 /* Get queue full retry count. */
1671 if ((data = ql_get_prop(ha, "queue-full-retry-count")) == 0xffffffff) {
1672 ha->qfull_retry_count = 16;
1673 } else if (data < 256) {
1674 ha->qfull_retry_count = (uint8_t)data;
1675 } else {
1676 EL(ha, "invalid parameter value for 'queue-full-retry-count':"
1677 " %d; using default value of 16", data);
1678 ha->qfull_retry_count = 16;
1679 }
1680
1681 /* Get queue full retry delay. */
1682 if ((data = ql_get_prop(ha, "queue-full-retry-delay")) == 0xffffffff) {
1683 ha->qfull_retry_delay = PORT_RETRY_TIME;
1684 } else if (data < 256) {
1685 ha->qfull_retry_delay = (uint8_t)data;
1686 } else {
1687 EL(ha, "invalid parameter value for 'queue-full-retry-delay':"
1688 " %d; using default value of %d", data, PORT_RETRY_TIME);
1689 ha->qfull_retry_delay = PORT_RETRY_TIME;
1690 }
1691
1692 /* Get loop down timeout. */
1693 if ((data = ql_get_prop(ha, "link-down-timeout")) == 0xffffffff) {
1694 data = 0;
1695 } else if (data > 255) {
1696 EL(ha, "invalid parameter value for 'link-down-timeout': %d;"
1697 " using nvram value of 0\n", data);
1698 data = 0;
1699 }
1700 ha->loop_down_abort_time = (uint8_t)(LOOP_DOWN_TIMER_START - data);
1701 if (ha->loop_down_abort_time == LOOP_DOWN_TIMER_START) {
1702 ha->loop_down_abort_time--;
1703 } else if (ha->loop_down_abort_time <= LOOP_DOWN_TIMER_END) {
1704 ha->loop_down_abort_time = LOOP_DOWN_TIMER_END + 1;
1705 }
1706
1707 /* Get link down error enable. */
1708 if ((data = ql_get_prop(ha, "enable-link-down-error")) == 0xffffffff ||
1709 data == 1) {
1710 ha->cfg_flags |= CFG_ENABLE_LINK_DOWN_REPORTING;
1711 } else if (data == 0) {
1712 ha->cfg_flags &= ~CFG_ENABLE_LINK_DOWN_REPORTING;
1713 } else {
1714 EL(ha, "invalid parameter value for 'link-down-error': %d;"
1715 " using default value of 1\n", data);
1716 }
1717
1718 /*
1719 * Get firmware dump flags.
1720 * TAKE_FW_DUMP_ON_MAILBOX_TIMEOUT BIT_0
1721 * TAKE_FW_DUMP_ON_ISP_SYSTEM_ERROR BIT_1
1722 * TAKE_FW_DUMP_ON_DRIVER_COMMAND_TIMEOUT BIT_2
1723 * TAKE_FW_DUMP_ON_LOOP_OFFLINE_TIMEOUT BIT_3
1724 */
1725 ha->cfg_flags &= ~(CFG_DUMP_MAILBOX_TIMEOUT |
1726 CFG_DUMP_ISP_SYSTEM_ERROR | CFG_DUMP_DRIVER_COMMAND_TIMEOUT |
1727 CFG_DUMP_LOOP_OFFLINE_TIMEOUT);
1728 if ((data = ql_get_prop(ha, "firmware-dump-flags")) != 0xffffffff) {
1729 if (data & BIT_0) {
1730 ha->cfg_flags |= CFG_DUMP_MAILBOX_TIMEOUT;
1731 }
1732 if (data & BIT_1) {
1733 ha->cfg_flags |= CFG_DUMP_ISP_SYSTEM_ERROR;
1734 }
1735 if (data & BIT_2) {
1736 ha->cfg_flags |= CFG_DUMP_DRIVER_COMMAND_TIMEOUT;
1737 }
1738 if (data & BIT_3) {
1739 ha->cfg_flags |= CFG_DUMP_LOOP_OFFLINE_TIMEOUT;
1740 }
1741 }
1742
1743 /* Get the PCI max read request size override. */
1744 ha->pci_max_read_req = 0;
1745 if ((data = ql_get_prop(ha, "pci-max-read-request")) != 0xffffffff &&
1746 data != 0) {
1747 ha->pci_max_read_req = (uint16_t)(data);
1748 }
1749
1750 /*
1751 * Set default fw wait, adjusted for slow FCF's.
1752 * Revisit when FCF's as fast as FC switches.
1753 */
1754 ha->fwwait = (uint8_t)(CFG_IST(ha, CFG_CTRL_8081) ? 45 : 10);
1755 /* Get the attach fw_ready override value. */
1756 if ((data = ql_get_prop(ha, "init-loop-sync-wait")) != 0xffffffff) {
1757 if (data > 0 && data <= 240) {
1758 ha->fwwait = (uint8_t)data;
1759 } else {
1760 EL(ha, "invalid parameter value for "
1761 "'init-loop-sync-wait': %d; using default "
1762 "value of %d\n", data, ha->fwwait);
1763 }
1764 }
1765
1766 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1767 }
1768
1769 /*
1770 * ql_24xx_properties
1771 * Copies driver properties to NVRAM or adapter structure.
1772 *
1773 * Driver properties are by design global variables and hidden
1774 * completely from administrators. Knowledgeable folks can
1775 * override the default values using /etc/system.
1776 *
1777 * Input:
1778 * ha: adapter state pointer.
1779 * nv: NVRAM structure pointer.
1780 *
1781 * Context:
1782 * Kernel context.
1783 */
1784 static void
ql_24xx_properties(ql_adapter_state_t * ha,nvram_24xx_t * nv)1785 ql_24xx_properties(ql_adapter_state_t *ha, nvram_24xx_t *nv)
1786 {
1787 uint32_t data;
1788
1789 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1790
1791 /* Get frame size */
1792 if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) {
1793 data = 2048;
1794 }
1795 if (data == 512 || data == 1024 || data == 2048 || data == 2112) {
1796 nv->max_frame_length[0] = LSB(data);
1797 nv->max_frame_length[1] = MSB(data);
1798 } else {
1799 EL(ha, "invalid parameter value for 'max-frame-length': %d;"
1800 " using nvram default of %d\n", data, CHAR_TO_SHORT(
1801 nv->max_frame_length[0], nv->max_frame_length[1]));
1802 }
1803
1804 /* Get execution throttle. */
1805 if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) {
1806 data = 32;
1807 }
1808 if (data != 0 && data < 65536) {
1809 nv->execution_throttle[0] = LSB(data);
1810 nv->execution_throttle[1] = MSB(data);
1811 } else {
1812 EL(ha, "invalid parameter value for 'execution-throttle':"
1813 " %d; using nvram default of %d\n", data, CHAR_TO_SHORT(
1814 nv->execution_throttle[0], nv->execution_throttle[1]));
1815 }
1816
1817 /* Get Login timeout. */
1818 if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) {
1819 data = 3;
1820 }
1821 if (data < 65536) {
1822 nv->login_timeout[0] = LSB(data);
1823 nv->login_timeout[1] = MSB(data);
1824 } else {
1825 EL(ha, "invalid parameter value for 'login-timeout': %d; "
1826 "using nvram value of %d\n", data, CHAR_TO_SHORT(
1827 nv->login_timeout[0], nv->login_timeout[1]));
1828 }
1829
1830 /* Get retry count. */
1831 if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) {
1832 data = 4;
1833 }
1834 if (data < 65536) {
1835 nv->login_retry_count[0] = LSB(data);
1836 nv->login_retry_count[1] = MSB(data);
1837 } else {
1838 EL(ha, "invalid parameter value for 'login-retry-count': "
1839 "%d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1840 nv->login_retry_count[0], nv->login_retry_count[1]));
1841 }
1842
1843 /* Get adapter hard loop ID enable. */
1844 data = ql_get_prop(ha, "enable-adapter-hard-loop-ID");
1845 if (data == 0) {
1846 nv->firmware_options_1[0] =
1847 (uint8_t)(nv->firmware_options_1[0] & ~BIT_0);
1848 } else if (data == 1) {
1849 nv->firmware_options_1[0] =
1850 (uint8_t)(nv->firmware_options_1[0] | BIT_0);
1851 } else if (data != 0xffffffff) {
1852 EL(ha, "invalid parameter value for "
1853 "'enable-adapter-hard-loop-ID': %d; using nvram value "
1854 "of %d\n", data,
1855 nv->firmware_options_1[0] & BIT_0 ? 1 : 0);
1856 }
1857
1858 /* Get adapter hard loop ID. */
1859 data = ql_get_prop(ha, "adapter-hard-loop-ID");
1860 if (data < 126) {
1861 nv->hard_address[0] = LSB(data);
1862 nv->hard_address[1] = MSB(data);
1863 } else if (data != 0xffffffff) {
1864 EL(ha, "invalid parameter value for 'adapter-hard-loop-ID':"
1865 " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1866 nv->hard_address[0], nv->hard_address[1]));
1867 }
1868
1869 /* Get LIP reset. */
1870 if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) ==
1871 0xffffffff) {
1872 data = 0;
1873 }
1874 if (data == 0) {
1875 ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET;
1876 } else if (data == 1) {
1877 ha->cfg_flags |= CFG_ENABLE_LIP_RESET;
1878 } else {
1879 EL(ha, "invalid parameter value for "
1880 "'enable-LIP-reset-on-bus-reset': %d; using value of 0\n",
1881 data);
1882 }
1883
1884 /* Get LIP full login. */
1885 if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) ==
1886 0xffffffff) {
1887 data = 1;
1888 }
1889 if (data == 0) {
1890 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2);
1891 } else if (data == 1) {
1892 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2);
1893 } else {
1894 EL(ha, "invalid parameter value for "
1895 "'enable-LIP-full-login-on-bus-reset': %d; using nvram "
1896 "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0);
1897 }
1898
1899 /* Get target reset. */
1900 if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) ==
1901 0xffffffff) {
1902 data = 0;
1903 }
1904 if (data == 0) {
1905 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3);
1906 } else if (data == 1) {
1907 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3);
1908 } else {
1909 EL(ha, "invalid parameter value for "
1910 "'enable-target-reset-on-bus-reset': %d; using nvram "
1911 "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0);
1912 }
1913
1914 /* Get reset delay. */
1915 if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) {
1916 data = 5;
1917 }
1918 if (data != 0 && data < 256) {
1919 nv->reset_delay = (uint8_t)data;
1920 } else {
1921 EL(ha, "invalid parameter value for 'reset-delay': %d; "
1922 "using nvram value of %d", data, nv->reset_delay);
1923 }
1924
1925 /* Get port down retry count. */
1926 if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) {
1927 data = 8;
1928 }
1929 if (data < 256) {
1930 nv->port_down_retry_count[0] = LSB(data);
1931 nv->port_down_retry_count[1] = MSB(data);
1932 } else {
1933 EL(ha, "invalid parameter value for 'port-down-retry-count':"
1934 " %d; using nvram value of %d\n", data, CHAR_TO_SHORT(
1935 nv->port_down_retry_count[0],
1936 nv->port_down_retry_count[1]));
1937 }
1938
1939 if (!(CFG_IST(ha, CFG_CTRL_8081))) {
1940 /* Get connection mode setting. */
1941 if ((data = ql_get_prop(ha, "connection-options")) ==
1942 0xffffffff) {
1943 data = 2;
1944 }
1945 if (data <= 2) {
1946 nv->firmware_options_2[0] = (uint8_t)
1947 (nv->firmware_options_2[0] &
1948 ~(BIT_6 | BIT_5 | BIT_4));
1949 nv->firmware_options_2[0] = (uint8_t)
1950 (nv->firmware_options_2[0] | (uint8_t)(data << 4));
1951 } else {
1952 EL(ha, "invalid parameter value for 'connection-"
1953 "options': %d; using nvram value of %d\n", data,
1954 (nv->firmware_options_2[0] >> 4) & 0x3);
1955 }
1956
1957 /* Get data rate setting. */
1958 if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) {
1959 data = 2;
1960 }
1961 if ((CFG_IST(ha, CFG_CTRL_2422) && data < 4) ||
1962 (CFG_IST(ha, CFG_CTRL_258081) && data < 5)) {
1963 nv->firmware_options_3[1] = (uint8_t)
1964 (nv->firmware_options_3[1] & 0x1f);
1965 nv->firmware_options_3[1] = (uint8_t)
1966 (nv->firmware_options_3[1] | (uint8_t)(data << 5));
1967 } else {
1968 EL(ha, "invalid parameter value for 'fc-data-rate': "
1969 "%d; using nvram value of %d\n", data,
1970 (nv->firmware_options_3[1] >> 5) & 0x7);
1971 }
1972 }
1973
1974 /* Get IP FW container count. */
1975 ha->ip_init_ctrl_blk.cb24.cc[0] = LSB(ql_ip_buffer_count);
1976 ha->ip_init_ctrl_blk.cb24.cc[1] = MSB(ql_ip_buffer_count);
1977
1978 /* Get IP low water mark. */
1979 ha->ip_init_ctrl_blk.cb24.low_water_mark[0] = LSB(ql_ip_low_water);
1980 ha->ip_init_ctrl_blk.cb24.low_water_mark[1] = MSB(ql_ip_low_water);
1981
1982 ADAPTER_STATE_LOCK(ha);
1983
1984 /* Get enable flash load. */
1985 if ((data = ql_get_prop(ha, "enable-flash-load")) == 0xffffffff ||
1986 data == 0) {
1987 ha->cfg_flags &= ~CFG_LOAD_FLASH_FW;
1988 } else if (data == 1) {
1989 ha->cfg_flags |= CFG_LOAD_FLASH_FW;
1990 } else {
1991 EL(ha, "invalid parameter value for 'enable-flash-load': "
1992 "%d; using default value of 0\n", data);
1993 }
1994
1995 /* Enable firmware extended tracing */
1996 if ((data = ql_get_prop(ha, "enable-fwexttrace")) != 0xffffffff) {
1997 if (data != 0) {
1998 ha->cfg_flags |= CFG_ENABLE_FWEXTTRACE;
1999 }
2000 }
2001
2002 /* Enable firmware fc tracing */
2003 if ((data = ql_get_prop(ha, "enable-fwfcetrace")) != 0xffffffff) {
2004 ha->cfg_flags |= CFG_ENABLE_FWFCETRACE;
2005 ha->fwfcetraceopt = data;
2006 }
2007
2008 /* Enable fast timeout */
2009 if ((data = ql_get_prop(ha, "enable-fasttimeout")) != 0xffffffff) {
2010 if (data != 0) {
2011 ha->cfg_flags |= CFG_FAST_TIMEOUT;
2012 }
2013 }
2014
2015 ql_common_properties(ha);
2016
2017 ADAPTER_STATE_UNLOCK(ha);
2018
2019 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2020 }
2021
2022 /*
2023 * ql_get_prop
2024 * Get property value from configuration file.
2025 *
2026 * Input:
2027 * ha= adapter state pointer.
2028 * string = property string pointer.
2029 *
2030 * Returns:
2031 * 0xFFFFFFFF = no property else property value.
2032 *
2033 * Context:
2034 * Kernel context.
2035 */
2036 uint32_t
ql_get_prop(ql_adapter_state_t * ha,char * string)2037 ql_get_prop(ql_adapter_state_t *ha, char *string)
2038 {
2039 char buf[256];
2040 uint32_t data = 0xffffffff;
2041
2042 /*
2043 * Look for a adapter instance NPIV (virtual port) specific parameter
2044 */
2045 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2046 (void) sprintf(buf, "hba%d-vp%d-%s", ha->instance,
2047 ha->vp_index, string);
2048 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2049 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2050 buf, (int)0xffffffff);
2051 }
2052
2053 /*
2054 * Get adapter instance parameter if a vp specific one isn't found.
2055 */
2056 if (data == 0xffffffff) {
2057 (void) sprintf(buf, "hba%d-%s", ha->instance, string);
2058 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2059 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip,
2060 0, buf, (int)0xffffffff);
2061 }
2062
2063 /* Adapter instance parameter found? */
2064 if (data == 0xffffffff) {
2065 /* No, get default parameter. */
2066 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2067 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0,
2068 string, (int)0xffffffff);
2069 }
2070
2071 return (data);
2072 }
2073
2074 /*
2075 * ql_check_isp_firmware
2076 * Checks if using already loaded RISC code or drivers copy.
2077 * If using already loaded code, save a copy of it.
2078 *
2079 * Input:
2080 * ha = adapter state pointer.
2081 *
2082 * Returns:
2083 * ql local function return status code.
2084 *
2085 * Context:
2086 * Kernel context.
2087 */
2088 static int
ql_check_isp_firmware(ql_adapter_state_t * ha)2089 ql_check_isp_firmware(ql_adapter_state_t *ha)
2090 {
2091 int rval;
2092 uint16_t word_count;
2093 uint32_t byte_count;
2094 uint32_t fw_size, *lptr;
2095 caddr_t bufp;
2096 uint16_t risc_address = (uint16_t)ha->risc_fw[0].addr;
2097
2098 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2099
2100 /* Test for firmware running. */
2101 if (CFG_IST(ha, CFG_CTRL_8021)) {
2102 if (ql_8021_idc_handler(ha) != NX_DEV_READY) {
2103 rval = QL_FUNCTION_FAILED;
2104 } else {
2105 rval = ql_start_firmware(ha);
2106 }
2107 } else if (CFG_IST(ha, CFG_DISABLE_RISC_CODE_LOAD)) {
2108 if (ha->risc_code != NULL) {
2109 kmem_free(ha->risc_code, ha->risc_code_size);
2110 ha->risc_code = NULL;
2111 ha->risc_code_size = 0;
2112 }
2113
2114 /* Get RISC code length. */
2115 rval = ql_rd_risc_ram(ha, risc_address + 3, ha->request_dvma,
2116 1);
2117 if (rval == QL_SUCCESS) {
2118 lptr = (uint32_t *)ha->request_ring_bp;
2119 fw_size = *lptr << 1;
2120
2121 if ((bufp = kmem_alloc(fw_size, KM_SLEEP)) != NULL) {
2122 ha->risc_code_size = fw_size;
2123 ha->risc_code = bufp;
2124 ha->fw_transfer_size = 128;
2125
2126 /* Dump RISC code. */
2127 do {
2128 if (fw_size > ha->fw_transfer_size) {
2129 byte_count =
2130 ha->fw_transfer_size;
2131 } else {
2132 byte_count = fw_size;
2133 }
2134
2135 word_count =
2136 (uint16_t)(byte_count >> 1);
2137
2138 rval = ql_rd_risc_ram(ha, risc_address,
2139 ha->request_dvma, word_count);
2140 if (rval != QL_SUCCESS) {
2141 kmem_free(ha->risc_code,
2142 ha->risc_code_size);
2143 ha->risc_code = NULL;
2144 ha->risc_code_size = 0;
2145 break;
2146 }
2147
2148 (void) ddi_dma_sync(
2149 ha->hba_buf.dma_handle,
2150 REQUEST_Q_BUFFER_OFFSET,
2151 byte_count,
2152 DDI_DMA_SYNC_FORKERNEL);
2153 ddi_rep_get16(ha->hba_buf.acc_handle,
2154 (uint16_t *)bufp,
2155 (uint16_t *)ha->request_ring_bp,
2156 word_count, DDI_DEV_AUTOINCR);
2157
2158 risc_address += word_count;
2159 fw_size -= byte_count;
2160 bufp += byte_count;
2161 } while (fw_size != 0);
2162 }
2163 rval = QL_FUNCTION_FAILED;
2164 }
2165 } else {
2166 rval = QL_FUNCTION_FAILED;
2167 }
2168
2169 if (rval != QL_SUCCESS) {
2170 EL(ha, "Load RISC code\n");
2171 } else {
2172 /*EMPTY*/
2173 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2174 }
2175 return (rval);
2176 }
2177
2178 /*
2179 * Chip diagnostics
2180 * Test chip for proper operation.
2181 *
2182 * Input:
2183 * ha = adapter state pointer.
2184 *
2185 * Returns:
2186 * ql local function return status code.
2187 *
2188 * Context:
2189 * Kernel context.
2190 */
2191 static int
ql_chip_diag(ql_adapter_state_t * ha)2192 ql_chip_diag(ql_adapter_state_t *ha)
2193 {
2194 ql_mbx_data_t mr;
2195 int rval;
2196 int32_t retries = 4;
2197 uint16_t id;
2198
2199 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2200
2201 do {
2202 /* Reset ISP chip. */
2203 TASK_DAEMON_LOCK(ha);
2204 ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
2205 TASK_DAEMON_UNLOCK(ha);
2206
2207 /* For ISP2200A reduce firmware load size. */
2208 if (CFG_IST(ha, CFG_CTRL_2200) &&
2209 RD16_IO_REG(ha, mailbox_out[7]) == 4) {
2210 ha->fw_transfer_size = 128;
2211 } else {
2212 ha->fw_transfer_size = REQUEST_QUEUE_SIZE;
2213 }
2214
2215 rval = QL_SUCCESS;
2216 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
2217 ql_reset_chip(ha);
2218
2219 /* Check product ID of chip */
2220 mr.mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
2221 mr.mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
2222 mr.mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
2223
2224 if (ha->device_id == 0x5432 ||
2225 ha->device_id == 0x8432) {
2226 id = 0x2432;
2227 } else if (ha->device_id == 0x5422 ||
2228 ha->device_id == 0x8422) {
2229 id = 0x2422;
2230 } else {
2231 id = ha->device_id;
2232 }
2233
2234 if (mr.mb[1] == PROD_ID_1 &&
2235 (mr.mb[2] == PROD_ID_2 || mr.mb[2] == PROD_ID_2a) &&
2236 (mr.mb[3] == PROD_ID_3 || mr.mb[3] == id)) {
2237 ha->adapter_stats->revlvl.isp2200 =
2238 RD16_IO_REG(ha, mailbox_out[4]);
2239 ha->adapter_stats->revlvl.risc =
2240 RD16_IO_REG(ha, mailbox_out[5]);
2241 ha->adapter_stats->revlvl.frmbfr =
2242 RD16_IO_REG(ha, mailbox_out[6]);
2243 ha->adapter_stats->revlvl.riscrom =
2244 RD16_IO_REG(ha, mailbox_out[7]);
2245 } else {
2246 cmn_err(CE_WARN, "%s(%d) - prod id failed!, "
2247 "mb1=%xh, mb2=%xh, mb3=%xh", QL_NAME,
2248 ha->instance, mr.mb[1], mr.mb[2], mr.mb[3]);
2249 rval = QL_FUNCTION_FAILED;
2250 }
2251 } else if (!(ha->task_daemon_flags & FIRMWARE_LOADED)) {
2252 break;
2253 }
2254
2255 if (rval == QL_SUCCESS) {
2256 /* Wrap Incoming Mailboxes Test. */
2257 mr.mb[1] = 0xAAAA;
2258 mr.mb[2] = 0x5555;
2259 mr.mb[3] = 0xAA55;
2260 mr.mb[4] = 0x55AA;
2261 mr.mb[5] = 0xA5A5;
2262 mr.mb[6] = 0x5A5A;
2263 mr.mb[7] = 0x2525;
2264 rval = ql_mbx_wrap_test(ha, &mr);
2265 if (rval == QL_SUCCESS) {
2266 if (mr.mb[1] != 0xAAAA ||
2267 mr.mb[2] != 0x5555 ||
2268 mr.mb[3] != 0xAA55 ||
2269 mr.mb[4] != 0x55AA ||
2270 mr.mb[5] != 0xA5A5 ||
2271 mr.mb[6] != 0x5A5A ||
2272 mr.mb[7] != 0x2525) {
2273 rval = QL_FUNCTION_FAILED;
2274 (void) ql_flash_errlog(ha,
2275 FLASH_ERRLOG_ISP_ERR, 0,
2276 RD16_IO_REG(ha, hccr),
2277 RD16_IO_REG(ha, istatus));
2278 }
2279 } else {
2280 cmn_err(CE_WARN, "%s(%d) - reg test failed="
2281 "%xh!", QL_NAME, ha->instance, rval);
2282 }
2283 }
2284 } while ((retries-- != 0) && (rval != QL_SUCCESS));
2285
2286 if (rval != QL_SUCCESS) {
2287 EL(ha, "failed, rval = %xh\n", rval);
2288 } else {
2289 /*EMPTY*/
2290 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2291 }
2292 return (rval);
2293 }
2294
2295 /*
2296 * ql_load_isp_firmware
2297 * Load and start RISC firmware.
2298 * Uses request ring for DMA buffer.
2299 *
2300 * Input:
2301 * ha = adapter state pointer.
2302 *
2303 * Returns:
2304 * ql local function return status code.
2305 *
2306 * Context:
2307 * Kernel context.
2308 */
2309 int
ql_load_isp_firmware(ql_adapter_state_t * vha)2310 ql_load_isp_firmware(ql_adapter_state_t *vha)
2311 {
2312 caddr_t risc_code_address;
2313 uint32_t risc_address, risc_code_size;
2314 int rval;
2315 uint32_t word_count, cnt;
2316 size_t byte_count;
2317 ql_adapter_state_t *ha = vha->pha;
2318
2319 if (CFG_IST(ha, CFG_CTRL_8021)) {
2320 rval = ql_8021_load_risc(ha);
2321 } else {
2322 if (CFG_IST(ha, CFG_CTRL_81XX)) {
2323 ql_mps_reset(ha);
2324 }
2325
2326 if (CFG_IST(ha, CFG_LOAD_FLASH_FW)) {
2327 return (ql_load_flash_fw(ha));
2328 }
2329
2330 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2331
2332 /* Load firmware segments */
2333 for (cnt = 0; cnt < MAX_RISC_CODE_SEGMENTS &&
2334 ha->risc_fw[cnt].code != NULL; cnt++) {
2335
2336 risc_code_address = ha->risc_fw[cnt].code;
2337 risc_address = ha->risc_fw[cnt].addr;
2338 risc_code_size = ha->risc_fw[cnt].length;
2339
2340 while (risc_code_size) {
2341 if (CFG_IST(ha, CFG_CTRL_242581)) {
2342 word_count = ha->fw_transfer_size >> 2;
2343 if (word_count > risc_code_size) {
2344 word_count = risc_code_size;
2345 }
2346 byte_count = word_count << 2;
2347
2348 ddi_rep_put32(ha->hba_buf.acc_handle,
2349 (uint32_t *)risc_code_address,
2350 (uint32_t *)ha->request_ring_bp,
2351 word_count, DDI_DEV_AUTOINCR);
2352 } else {
2353 word_count = ha->fw_transfer_size >> 1;
2354 if (word_count > risc_code_size) {
2355 word_count = risc_code_size;
2356 }
2357 byte_count = word_count << 1;
2358
2359 ddi_rep_put16(ha->hba_buf.acc_handle,
2360 (uint16_t *)risc_code_address,
2361 (uint16_t *)ha->request_ring_bp,
2362 word_count, DDI_DEV_AUTOINCR);
2363 }
2364
2365 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
2366 REQUEST_Q_BUFFER_OFFSET, byte_count,
2367 DDI_DMA_SYNC_FORDEV);
2368
2369 rval = ql_wrt_risc_ram(ha, risc_address,
2370 ha->request_dvma, word_count);
2371 if (rval != QL_SUCCESS) {
2372 EL(ha, "failed, load=%xh\n", rval);
2373 cnt = MAX_RISC_CODE_SEGMENTS;
2374 break;
2375 }
2376
2377 risc_address += word_count;
2378 risc_code_size -= word_count;
2379 risc_code_address += byte_count;
2380 }
2381 }
2382 }
2383
2384 /* Start firmware. */
2385 if (rval == QL_SUCCESS) {
2386 rval = ql_start_firmware(ha);
2387 }
2388
2389 if (rval != QL_SUCCESS) {
2390 EL(ha, "failed, rval = %xh\n", rval);
2391 } else {
2392 /*EMPTY*/
2393 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2394 }
2395
2396 return (rval);
2397 }
2398
2399 /*
2400 * ql_load_flash_fw
2401 * Gets ISP24xx firmware from flash and loads ISP.
2402 *
2403 * Input:
2404 * ha: adapter state pointer.
2405 *
2406 * Returns:
2407 * ql local function return status code.
2408 */
2409 static int
ql_load_flash_fw(ql_adapter_state_t * ha)2410 ql_load_flash_fw(ql_adapter_state_t *ha)
2411 {
2412 int rval;
2413 uint8_t seg_cnt;
2414 uint32_t risc_address, xfer_size, count, *bp, faddr;
2415 uint32_t risc_code_size = 0;
2416
2417 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2418
2419 faddr = ha->flash_data_addr | ha->flash_fw_addr;
2420
2421 for (seg_cnt = 0; seg_cnt < 2; seg_cnt++) {
2422 xfer_size = ha->fw_transfer_size >> 2;
2423 do {
2424 GLOBAL_HW_LOCK();
2425
2426 /* Read data from flash. */
2427 bp = (uint32_t *)ha->request_ring_bp;
2428 for (count = 0; count < xfer_size; count++) {
2429 rval = ql_24xx_read_flash(ha, faddr++, bp);
2430 if (rval != QL_SUCCESS) {
2431 break;
2432 }
2433 ql_chg_endian((uint8_t *)bp++, 4);
2434 }
2435
2436 GLOBAL_HW_UNLOCK();
2437
2438 if (rval != QL_SUCCESS) {
2439 EL(ha, "24xx_read_flash failed=%xh\n", rval);
2440 break;
2441 }
2442
2443 if (risc_code_size == 0) {
2444 bp = (uint32_t *)ha->request_ring_bp;
2445 risc_address = bp[2];
2446 risc_code_size = bp[3];
2447 ha->risc_fw[seg_cnt].addr = risc_address;
2448 }
2449
2450 if (risc_code_size < xfer_size) {
2451 faddr -= xfer_size - risc_code_size;
2452 xfer_size = risc_code_size;
2453 }
2454
2455 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
2456 REQUEST_Q_BUFFER_OFFSET, xfer_size << 2,
2457 DDI_DMA_SYNC_FORDEV);
2458
2459 rval = ql_wrt_risc_ram(ha, risc_address,
2460 ha->request_dvma, xfer_size);
2461 if (rval != QL_SUCCESS) {
2462 EL(ha, "ql_wrt_risc_ram failed=%xh\n", rval);
2463 break;
2464 }
2465
2466 risc_address += xfer_size;
2467 risc_code_size -= xfer_size;
2468 } while (risc_code_size);
2469
2470 if (rval != QL_SUCCESS) {
2471 break;
2472 }
2473 }
2474
2475 /* Start firmware. */
2476 if (rval == QL_SUCCESS) {
2477 rval = ql_start_firmware(ha);
2478 }
2479
2480 if (rval != QL_SUCCESS) {
2481 EL(ha, "failed, rval = %xh\n", rval);
2482 } else {
2483 /*EMPTY*/
2484 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2485 }
2486 return (rval);
2487 }
2488
2489 /*
2490 * ql_start_firmware
2491 * Starts RISC code.
2492 *
2493 * Input:
2494 * ha = adapter state pointer.
2495 *
2496 * Returns:
2497 * ql local function return status code.
2498 *
2499 * Context:
2500 * Kernel context.
2501 */
2502 int
ql_start_firmware(ql_adapter_state_t * vha)2503 ql_start_firmware(ql_adapter_state_t *vha)
2504 {
2505 int rval, rval2;
2506 uint32_t data;
2507 ql_mbx_data_t mr;
2508 ql_adapter_state_t *ha = vha->pha;
2509
2510 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2511
2512 if (CFG_IST(ha, CFG_CTRL_8021)) {
2513 /* Save firmware version. */
2514 rval = ql_get_fw_version(ha, &mr, MAILBOX_TOV);
2515 ha->fw_major_version = mr.mb[1];
2516 ha->fw_minor_version = mr.mb[2];
2517 ha->fw_subminor_version = mr.mb[3];
2518 ha->fw_attributes = mr.mb[6];
2519 } else if ((rval = ql_verify_checksum(ha)) == QL_SUCCESS) {
2520 /* Verify checksum of loaded RISC code. */
2521 /* Start firmware execution. */
2522 (void) ql_execute_fw(ha);
2523
2524 /* Save firmware version. */
2525 (void) ql_get_fw_version(ha, &mr, MAILBOX_TOV);
2526 ha->fw_major_version = mr.mb[1];
2527 ha->fw_minor_version = mr.mb[2];
2528 ha->fw_subminor_version = mr.mb[3];
2529 ha->fw_ext_memory_size = ((SHORT_TO_LONG(mr.mb[4], mr.mb[5]) -
2530 0x100000) + 1) * 4;
2531 ha->fw_attributes = mr.mb[6];
2532
2533 if (CFG_IST(ha, CFG_CTRL_81XX)) {
2534 ha->phy_fw_major_version = LSB(mr.mb[8]);
2535 ha->phy_fw_minor_version = MSB(mr.mb[9]);
2536 ha->phy_fw_subminor_version = LSB(mr.mb[9]);
2537 ha->mpi_fw_major_version = LSB(mr.mb[10]);
2538 ha->mpi_fw_minor_version = MSB(mr.mb[11]);
2539 ha->mpi_fw_subminor_version = LSB(mr.mb[11]);
2540 ha->mpi_capability_list = SHORT_TO_LONG(mr.mb[13],
2541 mr.mb[12]);
2542 if ((rval2 = ql_flash_access(ha, FAC_GET_SECTOR_SIZE,
2543 0, 0, &data)) == QL_SUCCESS) {
2544 ha->xioctl->fdesc.block_size = data << 2;
2545 QL_PRINT_10(CE_CONT, "(%d): fdesc.block_size="
2546 "%xh\n", ha->instance,
2547 ha->xioctl->fdesc.block_size);
2548 } else {
2549 EL(ha, "flash_access status=%xh\n", rval2);
2550 }
2551 }
2552
2553 /* Set Serdes Transmit Parameters. */
2554 if (CFG_IST(ha, CFG_CTRL_2422) && ha->serdes_param[0] & BIT_0) {
2555 mr.mb[1] = ha->serdes_param[0];
2556 mr.mb[2] = ha->serdes_param[1];
2557 mr.mb[3] = ha->serdes_param[2];
2558 mr.mb[4] = ha->serdes_param[3];
2559 (void) ql_serdes_param(ha, &mr);
2560 }
2561 }
2562 /* ETS workaround */
2563 if (CFG_IST(ha, CFG_CTRL_81XX) && ql_enable_ets) {
2564 if (ql_get_firmware_option(ha, &mr) == QL_SUCCESS) {
2565 mr.mb[2] = (uint16_t)
2566 (mr.mb[2] | FO2_FCOE_512_MAX_MEM_WR_BURST);
2567 (void) ql_set_firmware_option(ha, &mr);
2568 }
2569 }
2570 if (rval != QL_SUCCESS) {
2571 ha->task_daemon_flags &= ~FIRMWARE_LOADED;
2572 EL(ha, "failed, rval = %xh\n", rval);
2573 } else {
2574 ha->task_daemon_flags |= FIRMWARE_LOADED;
2575 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2576 }
2577 return (rval);
2578 }
2579
2580 /*
2581 * ql_set_cache_line
2582 * Sets PCI cache line parameter.
2583 *
2584 * Input:
2585 * ha = adapter state pointer.
2586 *
2587 * Returns:
2588 * ql local function return status code.
2589 *
2590 * Context:
2591 * Kernel context.
2592 */
2593 int
ql_set_cache_line(ql_adapter_state_t * ha)2594 ql_set_cache_line(ql_adapter_state_t *ha)
2595 {
2596 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2597
2598 /* Set the cache line. */
2599 if (CFG_IST(ha->pha, CFG_SET_CACHE_LINE_SIZE_1)) {
2600 /* Set cache line register. */
2601 ql_pci_config_put8(ha->pha, PCI_CONF_CACHE_LINESZ, 1);
2602 }
2603
2604 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2605
2606 return (QL_SUCCESS);
2607 }
2608
2609 /*
2610 * ql_init_rings
2611 * Initializes firmware and ring pointers.
2612 *
2613 * Beginning of response ring has initialization control block
2614 * already built by nvram config routine.
2615 *
2616 * Input:
2617 * ha = adapter state pointer.
2618 * ha->hba_buf = request and response rings
2619 * ha->init_ctrl_blk = initialization control block
2620 *
2621 * Returns:
2622 * ql local function return status code.
2623 *
2624 * Context:
2625 * Kernel context.
2626 */
2627 int
ql_init_rings(ql_adapter_state_t * vha2)2628 ql_init_rings(ql_adapter_state_t *vha2)
2629 {
2630 int rval, rval2;
2631 uint16_t index;
2632 ql_mbx_data_t mr;
2633 ql_adapter_state_t *ha = vha2->pha;
2634
2635 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2636
2637 /* Clear outstanding commands array. */
2638 for (index = 0; index < MAX_OUTSTANDING_COMMANDS; index++) {
2639 ha->outstanding_cmds[index] = NULL;
2640 }
2641 ha->osc_index = 1;
2642
2643 ha->pending_cmds.first = NULL;
2644 ha->pending_cmds.last = NULL;
2645
2646 /* Initialize firmware. */
2647 ha->request_ring_ptr = ha->request_ring_bp;
2648 ha->req_ring_index = 0;
2649 ha->req_q_cnt = REQUEST_ENTRY_CNT - 1;
2650 ha->response_ring_ptr = ha->response_ring_bp;
2651 ha->rsp_ring_index = 0;
2652
2653 if (ha->flags & VP_ENABLED) {
2654 ql_adapter_state_t *vha;
2655 uint16_t cnt;
2656 uint32_t max_vports;
2657 ql_init_24xx_cb_t *icb = &ha->init_ctrl_blk.cb24;
2658
2659 max_vports = (CFG_IST(ha, CFG_CTRL_2422) ?
2660 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS);
2661 bzero(icb->vp_count,
2662 ((uintptr_t)icb + sizeof (ql_init_24xx_cb_t)) -
2663 (uintptr_t)icb->vp_count);
2664 icb->vp_count[0] = (uint8_t)max_vports;
2665
2666 /* Allow connection option 2. */
2667 icb->global_vp_option[0] = BIT_1;
2668
2669 for (cnt = 0, vha = ha->vp_next; cnt < max_vports &&
2670 vha != NULL; vha = vha->vp_next, cnt++) {
2671
2672 index = (uint8_t)(vha->vp_index - 1);
2673 bcopy(vha->loginparams.node_ww_name.raw_wwn,
2674 icb->vpc[index].node_name, 8);
2675 bcopy(vha->loginparams.nport_ww_name.raw_wwn,
2676 icb->vpc[index].port_name, 8);
2677
2678 icb->vpc[index].options = VPO_TARGET_MODE_DISABLED |
2679 VPO_INITIATOR_MODE_ENABLED;
2680 if (vha->flags & VP_ENABLED) {
2681 icb->vpc[index].options = (uint8_t)
2682 (icb->vpc[index].options | VPO_ENABLED);
2683 }
2684 }
2685 }
2686
2687 for (index = 0; index < 2; index++) {
2688 rval = ql_init_firmware(ha);
2689 if (rval == QL_COMMAND_ERROR) {
2690 EL(ha, "stopping firmware\n");
2691 (void) ql_stop_firmware(ha);
2692 } else {
2693 break;
2694 }
2695 }
2696
2697 if (rval == QL_SUCCESS && (CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2698 /* Tell firmware to enable MBA_PORT_BYPASS_CHANGED event */
2699 rval = ql_get_firmware_option(ha, &mr);
2700 if (rval == QL_SUCCESS) {
2701 mr.mb[1] = (uint16_t)(mr.mb[1] | BIT_9);
2702 mr.mb[2] = 0;
2703 mr.mb[3] = BIT_10;
2704 rval = ql_set_firmware_option(ha, &mr);
2705 }
2706 }
2707
2708 if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWFCETRACE))) {
2709 /* Firmware Fibre Channel Event Trace Buffer */
2710 if ((rval2 = ql_get_dma_mem(ha, &ha->fwfcetracebuf, FWFCESIZE,
2711 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2712 EL(ha, "fcetrace buffer alloc failed: %xh\n", rval2);
2713 } else {
2714 if ((rval2 = ql_fw_etrace(ha, &ha->fwfcetracebuf,
2715 FTO_FCE_TRACE_ENABLE)) != QL_SUCCESS) {
2716 EL(ha, "fcetrace enable failed: %xh\n", rval2);
2717 ql_free_phys(ha, &ha->fwfcetracebuf);
2718 }
2719 }
2720 }
2721
2722 if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE))) {
2723 /* Firmware Extended Trace Buffer */
2724 if ((rval2 = ql_get_dma_mem(ha, &ha->fwexttracebuf, FWEXTSIZE,
2725 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) {
2726 EL(ha, "exttrace buffer alloc failed: %xh\n", rval2);
2727 } else {
2728 if ((rval2 = ql_fw_etrace(ha, &ha->fwexttracebuf,
2729 FTO_EXT_TRACE_ENABLE)) != QL_SUCCESS) {
2730 EL(ha, "exttrace enable failed: %xh\n", rval2);
2731 ql_free_phys(ha, &ha->fwexttracebuf);
2732 }
2733 }
2734 }
2735
2736 if (rval == QL_SUCCESS && CFG_IST(ha, CFG_CTRL_MENLO)) {
2737 ql_mbx_iocb_t *pkt;
2738 clock_t timer;
2739
2740 /* Wait for firmware login of menlo. */
2741 for (timer = 3000; timer; timer--) {
2742 if (ha->flags & MENLO_LOGIN_OPERATIONAL) {
2743 break;
2744 }
2745
2746 if (!(ha->flags & INTERRUPTS_ENABLED) ||
2747 ddi_in_panic()) {
2748 if (INTERRUPT_PENDING(ha)) {
2749 (void) ql_isr((caddr_t)ha);
2750 INTR_LOCK(ha);
2751 ha->intr_claimed = B_TRUE;
2752 INTR_UNLOCK(ha);
2753 }
2754 }
2755
2756 /* Delay for 1 tick (10 milliseconds). */
2757 ql_delay(ha, 10000);
2758 }
2759
2760 if (timer == 0) {
2761 rval = QL_FUNCTION_TIMEOUT;
2762 } else {
2763 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
2764 if (pkt == NULL) {
2765 EL(ha, "failed, kmem_zalloc\n");
2766 rval = QL_MEMORY_ALLOC_FAILED;
2767 } else {
2768 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
2769 pkt->mvfy.entry_count = 1;
2770 pkt->mvfy.options_status =
2771 LE_16(VMF_DO_NOT_UPDATE_FW);
2772
2773 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
2774 sizeof (ql_mbx_iocb_t));
2775 LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
2776 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
2777
2778 if (rval != QL_SUCCESS ||
2779 (pkt->mvfy.entry_status & 0x3c) != 0 ||
2780 pkt->mvfy.options_status != CS_COMPLETE) {
2781 EL(ha, "failed, status=%xh, es=%xh, "
2782 "cs=%xh, fc=%xh\n", rval,
2783 pkt->mvfy.entry_status & 0x3c,
2784 pkt->mvfy.options_status,
2785 pkt->mvfy.failure_code);
2786 if (rval == QL_SUCCESS) {
2787 rval = QL_FUNCTION_FAILED;
2788 }
2789 }
2790
2791 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
2792 }
2793 }
2794 }
2795
2796 if (rval != QL_SUCCESS) {
2797 TASK_DAEMON_LOCK(ha);
2798 ha->task_daemon_flags &= ~FIRMWARE_UP;
2799 TASK_DAEMON_UNLOCK(ha);
2800 EL(ha, "failed, rval = %xh\n", rval);
2801 } else {
2802 TASK_DAEMON_LOCK(ha);
2803 ha->task_daemon_flags |= FIRMWARE_UP;
2804 TASK_DAEMON_UNLOCK(ha);
2805 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2806 }
2807 return (rval);
2808 }
2809
2810 /*
2811 * ql_fw_ready
2812 * Waits for firmware ready. If firmware becomes ready
2813 * device queues and RISC code are synchronized.
2814 *
2815 * Input:
2816 * ha = adapter state pointer.
2817 * secs = max wait time, in seconds (0-255).
2818 *
2819 * Returns:
2820 * ql local function return status code.
2821 *
2822 * Context:
2823 * Kernel context.
2824 */
2825 int
ql_fw_ready(ql_adapter_state_t * ha,uint8_t secs)2826 ql_fw_ready(ql_adapter_state_t *ha, uint8_t secs)
2827 {
2828 ql_mbx_data_t mr;
2829 clock_t timer;
2830 clock_t dly = 250000;
2831 clock_t sec_delay = MICROSEC / dly;
2832 clock_t wait = secs * sec_delay;
2833 int rval = QL_FUNCTION_FAILED;
2834 uint16_t state = 0xffff;
2835
2836 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2837
2838 timer = ha->r_a_tov < secs ? secs : ha->r_a_tov;
2839 timer = (timer + 2) * sec_delay;
2840
2841 /* Wait for ISP to finish LIP */
2842 while (timer != 0 && wait != 0 &&
2843 !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) {
2844
2845 rval = ql_get_firmware_state(ha, &mr);
2846 if (rval == QL_SUCCESS) {
2847 if (ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2848 LOOP_DOWN)) {
2849 wait--;
2850 } else if (mr.mb[1] != FSTATE_READY) {
2851 if (mr.mb[1] != FSTATE_WAIT_LOGIN) {
2852 wait--;
2853 }
2854 rval = QL_FUNCTION_FAILED;
2855 } else {
2856 /* Firmware is ready. Get 2 * R_A_TOV. */
2857 rval = ql_get_timeout_parameters(ha,
2858 &ha->r_a_tov);
2859 if (rval != QL_SUCCESS) {
2860 EL(ha, "failed, get_timeout_param"
2861 "=%xh\n", rval);
2862 }
2863
2864 /* Configure loop. */
2865 rval = ql_configure_loop(ha);
2866 (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
2867
2868 if (ha->task_daemon_flags &
2869 LOOP_RESYNC_NEEDED) {
2870 wait--;
2871 EL(ha, "loop trans; tdf=%xh\n",
2872 ha->task_daemon_flags);
2873 } else {
2874 break;
2875 }
2876 }
2877 } else {
2878 wait--;
2879 }
2880
2881 if (state != mr.mb[1]) {
2882 EL(ha, "mailbox_reg[1] = %xh\n", mr.mb[1]);
2883 state = mr.mb[1];
2884 }
2885
2886 /* Delay for a tick if waiting. */
2887 if (timer-- != 0 && wait != 0) {
2888 if (timer % 4 == 0) {
2889 delay(drv_usectohz(dly));
2890 } else {
2891 drv_usecwait(dly);
2892 }
2893 } else {
2894 rval = QL_FUNCTION_TIMEOUT;
2895 }
2896 }
2897
2898 if (rval != QL_SUCCESS) {
2899 EL(ha, "failed, rval = %xh\n", rval);
2900 } else {
2901 /*EMPTY*/
2902 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2903 }
2904 return (rval);
2905 }
2906
2907 /*
2908 * ql_configure_loop
2909 * Setup configurations based on loop.
2910 *
2911 * Input:
2912 * ha = adapter state pointer.
2913 *
2914 * Returns:
2915 * ql local function return status code.
2916 *
2917 * Context:
2918 * Kernel context.
2919 */
2920 static int
ql_configure_loop(ql_adapter_state_t * ha)2921 ql_configure_loop(ql_adapter_state_t *ha)
2922 {
2923 int rval;
2924 ql_adapter_state_t *vha;
2925
2926 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2927
2928 for (vha = ha; vha != NULL; vha = vha->vp_next) {
2929 TASK_DAEMON_LOCK(ha);
2930 if (!(vha->task_daemon_flags & LOOP_RESYNC_NEEDED) &&
2931 vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2932 TASK_DAEMON_UNLOCK(ha);
2933 continue;
2934 }
2935 vha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED;
2936 TASK_DAEMON_UNLOCK(ha);
2937
2938 rval = ql_configure_hba(vha);
2939 if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2940 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2941 rval = ql_configure_device_d_id(vha);
2942 if (rval == QL_SUCCESS && !(ha->task_daemon_flags &
2943 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
2944 (void) ql_configure_fabric(vha);
2945 }
2946 }
2947 }
2948
2949 if (rval != QL_SUCCESS) {
2950 EL(ha, "failed, rval = %xh\n", rval);
2951 } else {
2952 /*EMPTY*/
2953 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2954 }
2955 return (rval);
2956 }
2957
2958 /*
2959 * ql_configure_n_port_info
2960 * Setup configurations based on N port 2 N port topology.
2961 *
2962 * Input:
2963 * ha = adapter state pointer.
2964 *
2965 * Returns:
2966 * ql local function return status code.
2967 *
2968 * Context:
2969 * Kernel context.
2970 */
2971 static void
ql_configure_n_port_info(ql_adapter_state_t * ha)2972 ql_configure_n_port_info(ql_adapter_state_t *ha)
2973 {
2974 ql_tgt_t tmp_tq;
2975 ql_tgt_t *tq;
2976 uint8_t *cb_port_name;
2977 ql_link_t *link;
2978 int index, rval;
2979
2980 tq = &tmp_tq;
2981
2982 /* Free existing target queues. */
2983 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
2984 link = ha->dev[index].first;
2985 while (link != NULL) {
2986 tq = link->base_address;
2987 link = link->next;
2988 ql_remove_link(&ha->dev[index], &tq->device);
2989 ql_dev_free(ha, tq);
2990 }
2991 }
2992
2993 /*
2994 * If the N_Port's WWPN is larger than our's then it has the
2995 * N_Port login initiative. It will have determined that and
2996 * logged in with the firmware. This results in a device
2997 * database entry. In this situation we will later send up a PLOGI
2998 * by proxy for the N_Port to get things going.
2999 *
3000 * If the N_Ports WWPN is smaller then the firmware has the
3001 * N_Port login initiative and does a FLOGI in order to obtain the
3002 * N_Ports WWNN and WWPN. These names are required later
3003 * during Leadvilles FLOGI. No PLOGI is done by the firmware in
3004 * anticipation of a PLOGI via the driver from the upper layers.
3005 * Upon reciept of said PLOGI the driver issues an ELS PLOGI
3006 * pass-through command and the firmware assumes the s_id
3007 * and the N_Port assumes the d_id and Bob's your uncle.
3008 */
3009
3010 /*
3011 * In N port 2 N port topology the FW provides a port database entry at
3012 * loop_id 0x7fe which allows us to acquire the Ports WWPN.
3013 */
3014 tq->d_id.b.al_pa = 0;
3015 tq->d_id.b.area = 0;
3016 tq->d_id.b.domain = 0;
3017 tq->loop_id = 0x7fe;
3018
3019 rval = ql_get_port_database(ha, tq, PDF_NONE);
3020 if (rval == QL_SUCCESS || rval == QL_NOT_LOGGED_IN) {
3021 ql_dev_id_list_t *list;
3022 uint32_t list_size;
3023 ql_mbx_data_t mr;
3024 port_id_t d_id = {0, 0, 0, 0};
3025 uint16_t loop_id = 0;
3026
3027 cb_port_name = (uint8_t *)(CFG_IST(ha, CFG_CTRL_24258081) ?
3028 &ha->init_ctrl_blk.cb24.port_name[0] :
3029 &ha->init_ctrl_blk.cb.port_name[0]);
3030
3031 if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
3032 (la_wwn_t *)cb_port_name) == 1)) {
3033 EL(ha, "target port has N_Port login initiative\n");
3034 } else {
3035 EL(ha, "host port has N_Port login initiative\n");
3036 }
3037
3038 /* Capture the N Ports WWPN */
3039
3040 bcopy((void *)&tq->port_name[0],
3041 (void *)&ha->n_port->port_name[0], 8);
3042 bcopy((void *)&tq->node_name[0],
3043 (void *)&ha->n_port->node_name[0], 8);
3044
3045 /* Resolve an n_port_handle */
3046 ha->n_port->n_port_handle = 0x7fe;
3047
3048 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3049 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
3050
3051 if (list != NULL &&
3052 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
3053 QL_SUCCESS) {
3054 if (mr.mb[1]) {
3055 EL(ha, "id list entries = %d\n", mr.mb[1]);
3056 for (index = 0; index < mr.mb[1]; index++) {
3057 ql_dev_list(ha, list, index,
3058 &d_id, &loop_id);
3059 ha->n_port->n_port_handle = loop_id;
3060 }
3061 } else {
3062 for (index = 0; index <= LAST_LOCAL_LOOP_ID;
3063 index++) {
3064 /* resuse tq */
3065 tq->loop_id = (uint16_t)index;
3066 rval = ql_get_port_database(ha, tq,
3067 PDF_NONE);
3068 if (rval == QL_NOT_LOGGED_IN) {
3069 if (tq->master_state ==
3070 PD_STATE_PLOGI_PENDING) {
3071 ha->n_port->
3072 n_port_handle =
3073 tq->loop_id;
3074 break;
3075 }
3076 } else {
3077 ha->n_port->n_port_handle =
3078 tq->loop_id;
3079 break;
3080 }
3081 }
3082 }
3083 } else {
3084 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
3085 QL_NAME, ha->instance, d_id.b24);
3086 }
3087 if (list != NULL) {
3088 kmem_free(list, list_size);
3089 }
3090 }
3091 }
3092
3093
3094 /*
3095 * ql_configure_hba
3096 * Setup adapter context.
3097 *
3098 * Input:
3099 * ha = adapter state pointer.
3100 *
3101 * Returns:
3102 * ql local function return status code.
3103 *
3104 * Context:
3105 * Kernel context.
3106 */
3107 static int
ql_configure_hba(ql_adapter_state_t * ha)3108 ql_configure_hba(ql_adapter_state_t *ha)
3109 {
3110 uint8_t *bp;
3111 int rval;
3112 uint32_t state;
3113 ql_mbx_data_t mr;
3114
3115 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3116
3117 /* Get host addresses. */
3118 rval = ql_get_adapter_id(ha, &mr);
3119 if (rval == QL_SUCCESS) {
3120 ha->topology = (uint8_t)(ha->topology &
3121 ~(QL_N_PORT | QL_NL_PORT | QL_F_PORT | QL_FL_PORT));
3122
3123 /* Save Host d_id, alpa, loop ID. */
3124 ha->loop_id = mr.mb[1];
3125 ha->d_id.b.al_pa = LSB(mr.mb[2]);
3126 ha->d_id.b.area = MSB(mr.mb[2]);
3127 ha->d_id.b.domain = LSB(mr.mb[3]);
3128
3129 ADAPTER_STATE_LOCK(ha);
3130 ha->flags &= ~FDISC_ENABLED;
3131
3132 /* Get loop topology. */
3133 switch (mr.mb[6]) {
3134 case CNX_LOOP_NO_FABRIC:
3135 ha->topology = (uint8_t)(ha->topology | QL_NL_PORT);
3136 break;
3137 case CNX_FLPORT_IN_LOOP:
3138 ha->topology = (uint8_t)(ha->topology | QL_FL_PORT);
3139 break;
3140 case CNX_NPORT_2_NPORT_P2P:
3141 case CNX_NPORT_2_NPORT_NO_TGT_RSP:
3142 ha->flags |= POINT_TO_POINT;
3143 ha->topology = (uint8_t)(ha->topology | QL_N_PORT);
3144 if (CFG_IST(ha, CFG_CTRL_2425)) {
3145 ql_configure_n_port_info(ha);
3146 }
3147 break;
3148 case CNX_FLPORT_P2P:
3149 ha->flags |= POINT_TO_POINT;
3150 ha->topology = (uint8_t)(ha->topology | QL_F_PORT);
3151
3152 /* Get supported option. */
3153 if (CFG_IST(ha, CFG_CTRL_24258081) &&
3154 mr.mb[7] & GID_FP_NPIV_SUPPORT) {
3155 ha->flags |= FDISC_ENABLED;
3156 }
3157 /* Get VLAN ID, mac address */
3158 if (CFG_IST(ha, CFG_CTRL_8081)) {
3159 ha->fabric_params = mr.mb[7];
3160 ha->fcoe_vlan_id = (uint16_t)(mr.mb[9] & 0xfff);
3161 ha->fcoe_fcf_idx = mr.mb[10];
3162 ha->fcoe_vnport_mac[0] = MSB(mr.mb[11]);
3163 ha->fcoe_vnport_mac[1] = LSB(mr.mb[11]);
3164 ha->fcoe_vnport_mac[2] = MSB(mr.mb[12]);
3165 ha->fcoe_vnport_mac[3] = LSB(mr.mb[12]);
3166 ha->fcoe_vnport_mac[4] = MSB(mr.mb[13]);
3167 ha->fcoe_vnport_mac[5] = LSB(mr.mb[13]);
3168 }
3169 break;
3170 default:
3171 QL_PRINT_2(CE_CONT, "(%d,%d): UNKNOWN topology=%xh, "
3172 "d_id=%xh\n", ha->instance, ha->vp_index, mr.mb[6],
3173 ha->d_id.b24);
3174 rval = QL_FUNCTION_FAILED;
3175 break;
3176 }
3177 ADAPTER_STATE_UNLOCK(ha);
3178
3179 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
3180 CFG_CTRL_24258081))) {
3181 mr.mb[1] = 0;
3182 mr.mb[2] = 0;
3183 rval = ql_data_rate(ha, &mr);
3184 if (rval != QL_SUCCESS) {
3185 EL(ha, "data_rate status=%xh\n", rval);
3186 state = FC_STATE_FULL_SPEED;
3187 } else {
3188 ha->iidma_rate = mr.mb[1];
3189 if (mr.mb[1] == IIDMA_RATE_1GB) {
3190 state = FC_STATE_1GBIT_SPEED;
3191 } else if (mr.mb[1] == IIDMA_RATE_2GB) {
3192 state = FC_STATE_2GBIT_SPEED;
3193 } else if (mr.mb[1] == IIDMA_RATE_4GB) {
3194 state = FC_STATE_4GBIT_SPEED;
3195 } else if (mr.mb[1] == IIDMA_RATE_8GB) {
3196 state = FC_STATE_8GBIT_SPEED;
3197 } else if (mr.mb[1] == IIDMA_RATE_10GB) {
3198 state = FC_STATE_10GBIT_SPEED;
3199 } else {
3200 state = 0;
3201 }
3202 }
3203 } else {
3204 ha->iidma_rate = IIDMA_RATE_1GB;
3205 state = FC_STATE_FULL_SPEED;
3206 }
3207 ha->state = FC_PORT_STATE_MASK(ha->state) | state;
3208 } else if (rval == MBS_COMMAND_ERROR) {
3209 EL(ha, "mbox cmd error, rval = %xh, mr.mb[1]=%hx\n",
3210 rval, mr.mb[1]);
3211 }
3212
3213 if (rval != QL_SUCCESS) {
3214 EL(ha, "failed, rval = %xh\n", rval);
3215 } else {
3216 bp = ha->loginparams.nport_ww_name.raw_wwn;
3217 EL(ha, "topology=%xh, d_id=%xh, "
3218 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n",
3219 ha->topology, ha->d_id.b24, bp[0], bp[1],
3220 bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]);
3221 }
3222 return (rval);
3223 }
3224
3225 /*
3226 * ql_configure_device_d_id
3227 * Updates device loop ID.
3228 * Also adds to device queue any new devices found on private loop.
3229 *
3230 * Input:
3231 * ha = adapter state pointer.
3232 *
3233 * Returns:
3234 * ql local function return status code.
3235 *
3236 * Context:
3237 * Kernel context.
3238 */
3239 static int
ql_configure_device_d_id(ql_adapter_state_t * ha)3240 ql_configure_device_d_id(ql_adapter_state_t *ha)
3241 {
3242 port_id_t d_id;
3243 ql_link_t *link;
3244 int rval;
3245 int loop;
3246 ql_tgt_t *tq;
3247 ql_dev_id_list_t *list;
3248 uint32_t list_size;
3249 uint16_t index, loop_id;
3250 ql_mbx_data_t mr;
3251 uint8_t retries = MAX_DEVICE_LOST_RETRY;
3252
3253 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3254
3255 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
3256 list = kmem_zalloc(list_size, KM_SLEEP);
3257 if (list == NULL) {
3258 rval = QL_MEMORY_ALLOC_FAILED;
3259 EL(ha, "failed, rval = %xh\n", rval);
3260 return (rval);
3261 }
3262
3263 do {
3264 /*
3265 * Get data from RISC code d_id list to init each device queue.
3266 */
3267 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
3268 if (rval != QL_SUCCESS) {
3269 kmem_free(list, list_size);
3270 EL(ha, "failed, rval = %xh\n", rval);
3271 return (rval);
3272 }
3273
3274 /* Acquire adapter state lock. */
3275 ADAPTER_STATE_LOCK(ha);
3276
3277 /* Mark all queues as unusable. */
3278 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3279 for (link = ha->dev[index].first; link != NULL;
3280 link = link->next) {
3281 tq = link->base_address;
3282 DEVICE_QUEUE_LOCK(tq);
3283 if (!(tq->flags & TQF_PLOGI_PROGRS) &&
3284 !(ha->topology & QL_N_PORT)) {
3285 tq->loop_id = (uint16_t)
3286 (tq->loop_id | PORT_LOST_ID);
3287 }
3288 DEVICE_QUEUE_UNLOCK(tq);
3289 }
3290 }
3291
3292 /* If device not in queues add new queue. */
3293 for (index = 0; index < mr.mb[1]; index++) {
3294 ql_dev_list(ha, list, index, &d_id, &loop_id);
3295
3296 if (VALID_DEVICE_ID(ha, loop_id)) {
3297 tq = ql_dev_init(ha, d_id, loop_id);
3298 if (tq != NULL) {
3299 tq->loop_id = loop_id;
3300
3301 /* Test for fabric device. */
3302 if (d_id.b.domain !=
3303 ha->d_id.b.domain ||
3304 d_id.b.area != ha->d_id.b.area) {
3305 tq->flags |= TQF_FABRIC_DEVICE;
3306 }
3307
3308 ADAPTER_STATE_UNLOCK(ha);
3309 if (ql_get_port_database(ha, tq,
3310 PDF_NONE) == QL_SUCCESS) {
3311 ADAPTER_STATE_LOCK(ha);
3312 tq->loop_id = (uint16_t)
3313 (tq->loop_id &
3314 ~PORT_LOST_ID);
3315 } else {
3316 ADAPTER_STATE_LOCK(ha);
3317 }
3318 }
3319 }
3320 }
3321
3322 /* 24xx does not report switch devices in ID list. */
3323 if ((CFG_IST(ha, CFG_CTRL_24258081)) &&
3324 ha->topology & (QL_F_PORT | QL_FL_PORT)) {
3325 d_id.b24 = 0xfffffe;
3326 tq = ql_dev_init(ha, d_id, FL_PORT_24XX_HDL);
3327 if (tq != NULL) {
3328 tq->flags |= TQF_FABRIC_DEVICE;
3329 ADAPTER_STATE_UNLOCK(ha);
3330 (void) ql_get_port_database(ha, tq, PDF_NONE);
3331 ADAPTER_STATE_LOCK(ha);
3332 }
3333 d_id.b24 = 0xfffffc;
3334 tq = ql_dev_init(ha, d_id, SNS_24XX_HDL);
3335 if (tq != NULL) {
3336 tq->flags |= TQF_FABRIC_DEVICE;
3337 ADAPTER_STATE_UNLOCK(ha);
3338 if (ha->vp_index != 0) {
3339 (void) ql_login_fport(ha, tq,
3340 SNS_24XX_HDL, LFF_NONE, NULL);
3341 }
3342 (void) ql_get_port_database(ha, tq, PDF_NONE);
3343 ADAPTER_STATE_LOCK(ha);
3344 }
3345 }
3346
3347 /* If F_port exists, allocate queue for FL_Port. */
3348 index = ql_alpa_to_index[0xfe];
3349 d_id.b24 = 0;
3350 if (ha->dev[index].first != NULL) {
3351 tq = ql_dev_init(ha, d_id, (uint16_t)
3352 (CFG_IST(ha, CFG_CTRL_24258081) ?
3353 FL_PORT_24XX_HDL : FL_PORT_LOOP_ID));
3354 if (tq != NULL) {
3355 tq->flags |= TQF_FABRIC_DEVICE;
3356 ADAPTER_STATE_UNLOCK(ha);
3357 (void) ql_get_port_database(ha, tq, PDF_NONE);
3358 ADAPTER_STATE_LOCK(ha);
3359 }
3360 }
3361
3362 /* Allocate queue for broadcast. */
3363 d_id.b24 = 0xffffff;
3364 (void) ql_dev_init(ha, d_id, (uint16_t)
3365 (CFG_IST(ha, CFG_CTRL_24258081) ? BROADCAST_24XX_HDL :
3366 IP_BROADCAST_LOOP_ID));
3367
3368 /* Check for any devices lost. */
3369 loop = FALSE;
3370 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3371 for (link = ha->dev[index].first; link != NULL;
3372 link = link->next) {
3373 tq = link->base_address;
3374
3375 if ((tq->loop_id & PORT_LOST_ID) &&
3376 !(tq->flags & (TQF_INITIATOR_DEVICE |
3377 TQF_FABRIC_DEVICE))) {
3378 loop = TRUE;
3379 }
3380 }
3381 }
3382
3383 /* Release adapter state lock. */
3384 ADAPTER_STATE_UNLOCK(ha);
3385
3386 /* Give devices time to recover. */
3387 if (loop == TRUE) {
3388 drv_usecwait(1000000);
3389 }
3390 } while (retries-- && loop == TRUE &&
3391 !(ha->pha->task_daemon_flags & LOOP_RESYNC_NEEDED));
3392
3393 kmem_free(list, list_size);
3394
3395 if (rval != QL_SUCCESS) {
3396 EL(ha, "failed=%xh\n", rval);
3397 } else {
3398 /*EMPTY*/
3399 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3400 }
3401
3402 return (rval);
3403 }
3404
3405 /*
3406 * ql_dev_list
3407 * Gets device d_id and loop ID from firmware device list.
3408 *
3409 * Input:
3410 * ha: adapter state pointer.
3411 * list device list pointer.
3412 * index: list index of device data.
3413 * d_id: pointer for d_id data.
3414 * id: pointer for loop ID.
3415 *
3416 * Context:
3417 * Kernel context.
3418 */
3419 void
ql_dev_list(ql_adapter_state_t * ha,union ql_dev_id_list * list,uint32_t index,port_id_t * d_id,uint16_t * id)3420 ql_dev_list(ql_adapter_state_t *ha, union ql_dev_id_list *list,
3421 uint32_t index, port_id_t *d_id, uint16_t *id)
3422 {
3423 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3424 struct ql_24_dev_id *list24 = (struct ql_24_dev_id *)list;
3425
3426 d_id->b.al_pa = list24[index].al_pa;
3427 d_id->b.area = list24[index].area;
3428 d_id->b.domain = list24[index].domain;
3429 *id = CHAR_TO_SHORT(list24[index].n_port_hdl_l,
3430 list24[index].n_port_hdl_h);
3431
3432 } else if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3433 struct ql_ex_dev_id *list23 = (struct ql_ex_dev_id *)list;
3434
3435 d_id->b.al_pa = list23[index].al_pa;
3436 d_id->b.area = list23[index].area;
3437 d_id->b.domain = list23[index].domain;
3438 *id = CHAR_TO_SHORT(list23[index].loop_id_l,
3439 list23[index].loop_id_h);
3440
3441 } else {
3442 struct ql_dev_id *list22 = (struct ql_dev_id *)list;
3443
3444 d_id->b.al_pa = list22[index].al_pa;
3445 d_id->b.area = list22[index].area;
3446 d_id->b.domain = list22[index].domain;
3447 *id = (uint16_t)list22[index].loop_id;
3448 }
3449 }
3450
3451 /*
3452 * ql_configure_fabric
3453 * Setup fabric context.
3454 *
3455 * Input:
3456 * ha = adapter state pointer.
3457 *
3458 * Returns:
3459 * ql local function return status code.
3460 *
3461 * Context:
3462 * Kernel context.
3463 */
3464 static int
ql_configure_fabric(ql_adapter_state_t * ha)3465 ql_configure_fabric(ql_adapter_state_t *ha)
3466 {
3467 port_id_t d_id;
3468 ql_tgt_t *tq;
3469 int rval = QL_FUNCTION_FAILED;
3470
3471 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3472
3473 ha->topology = (uint8_t)(ha->topology & ~QL_SNS_CONNECTION);
3474
3475 /* Test switch fabric controller present. */
3476 d_id.b24 = FS_FABRIC_F_PORT;
3477 tq = ql_d_id_to_queue(ha, d_id);
3478 if (tq != NULL) {
3479 /* Get port/node names of F_Port. */
3480 (void) ql_get_port_database(ha, tq, PDF_NONE);
3481
3482 d_id.b24 = FS_NAME_SERVER;
3483 tq = ql_d_id_to_queue(ha, d_id);
3484 if (tq != NULL) {
3485 (void) ql_get_port_database(ha, tq, PDF_NONE);
3486 ha->topology = (uint8_t)
3487 (ha->topology | QL_SNS_CONNECTION);
3488 rval = QL_SUCCESS;
3489 }
3490 }
3491
3492 if (rval != QL_SUCCESS) {
3493 EL(ha, "failed=%xh\n", rval);
3494 } else {
3495 /*EMPTY*/
3496 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3497 }
3498 return (rval);
3499 }
3500
3501 /*
3502 * ql_reset_chip
3503 * Reset ISP chip.
3504 *
3505 * Input:
3506 * ha = adapter block pointer.
3507 * All activity on chip must be already stopped.
3508 * ADAPTER_STATE_LOCK must be released.
3509 *
3510 * Context:
3511 * Interrupt or Kernel context, no mailbox commands allowed.
3512 */
3513 void
ql_reset_chip(ql_adapter_state_t * vha)3514 ql_reset_chip(ql_adapter_state_t *vha)
3515 {
3516 uint32_t cnt;
3517 uint16_t cmd;
3518 ql_adapter_state_t *ha = vha->pha;
3519
3520 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3521
3522 /*
3523 * accessing pci space while not powered can cause panic's
3524 * on some platforms (i.e. Sunblade 1000's)
3525 */
3526 if (ha->power_level == PM_LEVEL_D3) {
3527 QL_PRINT_2(CE_CONT, "(%d): Low Power exit\n", ha->instance);
3528 return;
3529 }
3530
3531 /* Reset all outbound mailbox registers */
3532 for (cnt = 0; cnt < ha->reg_off->mbox_cnt; cnt++) {
3533 WRT16_IO_REG(ha, mailbox_in[cnt], (uint16_t)0);
3534 }
3535
3536 if (CFG_IST(ha, CFG_CTRL_8021)) {
3537 ha->timeout_cnt = 0;
3538 ql_8021_reset_chip(ha);
3539 QL_PRINT_3(CE_CONT, "(%d): 8021 exit\n", ha->instance);
3540 return;
3541 }
3542
3543 /* Disable ISP interrupts. */
3544 WRT16_IO_REG(ha, ictrl, 0);
3545 ADAPTER_STATE_LOCK(ha);
3546 ha->flags &= ~INTERRUPTS_ENABLED;
3547 ADAPTER_STATE_UNLOCK(ha);
3548
3549 if (CFG_IST(ha, CFG_CTRL_242581)) {
3550 RD32_IO_REG(ha, ictrl);
3551 ql_reset_24xx_chip(ha);
3552 QL_PRINT_3(CE_CONT, "(%d): 24xx exit\n", ha->instance);
3553 return;
3554 }
3555
3556 /*
3557 * We are going to reset the chip in case of 2300. That might cause
3558 * a PBM ERR if a DMA transaction is in progress. One way of
3559 * avoiding it is to disable Bus Master operation before we start
3560 * the reset activity.
3561 */
3562 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3563 cmd = (uint16_t)(cmd & ~PCI_COMM_ME);
3564 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3565
3566 /* Pause RISC. */
3567 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3568 for (cnt = 0; cnt < 30000; cnt++) {
3569 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3570 break;
3571 }
3572 drv_usecwait(MILLISEC);
3573 }
3574
3575 /*
3576 * A call to ql_isr() can still happen through
3577 * ql_mailbox_command(). So Mark that we are/(will-be)
3578 * running from rom code now.
3579 */
3580 TASK_DAEMON_LOCK(ha);
3581 ha->task_daemon_flags &= ~(FIRMWARE_UP | FIRMWARE_LOADED);
3582 TASK_DAEMON_UNLOCK(ha);
3583
3584 /* Select FPM registers. */
3585 WRT16_IO_REG(ha, ctrl_status, 0x20);
3586
3587 /* FPM Soft Reset. */
3588 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
3589
3590 /* Toggle FPM reset for 2300 */
3591 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3592 WRT16_IO_REG(ha, fpm_diag_config, 0);
3593 }
3594
3595 /* Select frame buffer registers. */
3596 WRT16_IO_REG(ha, ctrl_status, 0x10);
3597
3598 /* Reset frame buffer FIFOs. */
3599 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3600 WRT16_IO_REG(ha, fb_cmd, 0x00fc);
3601 /* read back fb_cmd until zero or 3 seconds max */
3602 for (cnt = 0; cnt < 300000; cnt++) {
3603 if ((RD16_IO_REG(ha, fb_cmd) & 0xff) == 0) {
3604 break;
3605 }
3606 drv_usecwait(10);
3607 }
3608 } else {
3609 WRT16_IO_REG(ha, fb_cmd, 0xa000);
3610 }
3611
3612 /* Select RISC module registers. */
3613 WRT16_IO_REG(ha, ctrl_status, 0);
3614
3615 /* Reset RISC module. */
3616 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
3617
3618 /* Reset ISP semaphore. */
3619 WRT16_IO_REG(ha, semaphore, 0);
3620
3621 /* Release RISC module. */
3622 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3623
3624 /* Insure mailbox registers are free. */
3625 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
3626 WRT16_IO_REG(ha, hccr, HC_CLR_HOST_INT);
3627
3628 /* clear the mailbox command pointer. */
3629 ql_clear_mcp(ha);
3630
3631 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3632 ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3633
3634 /* Bus Master is disabled so chip reset is safe. */
3635 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
3636 WRT16_IO_REG(ha, ctrl_status, ISP_RESET);
3637 drv_usecwait(MILLISEC);
3638
3639 /* Wait for reset to finish. */
3640 for (cnt = 0; cnt < 30000; cnt++) {
3641 if ((RD16_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3642 break;
3643 }
3644 drv_usecwait(MILLISEC);
3645 }
3646 }
3647
3648 /* Wait for RISC to recover from reset. */
3649 for (cnt = 0; cnt < 30000; cnt++) {
3650 if (RD16_IO_REG(ha, mailbox_out[0]) != MBS_BUSY) {
3651 break;
3652 }
3653 drv_usecwait(MILLISEC);
3654 }
3655
3656 /* restore bus master */
3657 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
3658 cmd = (uint16_t)(cmd | PCI_COMM_ME);
3659 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd);
3660
3661 /* Disable RISC pause on FPM parity error. */
3662 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
3663
3664 /* Initialize probe registers */
3665 if (CFG_IST(ha, CFG_SBUS_CARD)) {
3666 /* Pause RISC. */
3667 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
3668 for (cnt = 0; cnt < 30000; cnt++) {
3669 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) {
3670 break;
3671 } else {
3672 drv_usecwait(MILLISEC);
3673 }
3674 }
3675
3676 /* Select FPM registers. */
3677 WRT16_IO_REG(ha, ctrl_status, 0x30);
3678
3679 /* Set probe register */
3680 WRT16_IO_REG(ha, mailbox_in[23], 0x204c);
3681
3682 /* Select RISC module registers. */
3683 WRT16_IO_REG(ha, ctrl_status, 0);
3684
3685 /* Release RISC module. */
3686 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
3687 }
3688
3689 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3690 }
3691
3692 /*
3693 * ql_reset_24xx_chip
3694 * Reset ISP24xx chip.
3695 *
3696 * Input:
3697 * ha = adapter block pointer.
3698 * All activity on chip must be already stopped.
3699 *
3700 * Context:
3701 * Interrupt or Kernel context, no mailbox commands allowed.
3702 */
3703 void
ql_reset_24xx_chip(ql_adapter_state_t * ha)3704 ql_reset_24xx_chip(ql_adapter_state_t *ha)
3705 {
3706 uint32_t timer, stat;
3707
3708 /* Shutdown DMA. */
3709 WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN | MWB_4096_BYTES);
3710
3711 /* Wait for DMA to stop. */
3712 for (timer = 0; timer < 30000; timer++) {
3713 if ((RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE) == 0) {
3714 break;
3715 }
3716 drv_usecwait(100);
3717 }
3718
3719 /* Stop the firmware. */
3720 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3721 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
3722 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3723 for (timer = 0; timer < 30000; timer++) {
3724 stat = RD32_IO_REG(ha, risc2host);
3725 if (stat & BIT_15) {
3726 if ((stat & 0xff) < 0x12) {
3727 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3728 break;
3729 }
3730 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3731 }
3732 drv_usecwait(100);
3733 }
3734
3735 /* Reset the chip. */
3736 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
3737 MWB_4096_BYTES);
3738 drv_usecwait(100);
3739
3740 /* Wait for idle status from ROM firmware. */
3741 for (timer = 0; timer < 30000; timer++) {
3742 if (RD16_IO_REG(ha, mailbox_out[0]) == 0) {
3743 break;
3744 }
3745 drv_usecwait(100);
3746 }
3747
3748 /* Wait for reset to finish. */
3749 for (timer = 0; timer < 30000; timer++) {
3750 if ((RD32_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) {
3751 break;
3752 }
3753 drv_usecwait(100);
3754 }
3755
3756 /* clear the mailbox command pointer. */
3757 ql_clear_mcp(ha);
3758
3759 /* Insure mailbox registers are free. */
3760 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags &
3761 ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT));
3762
3763 if (ha->flags & MPI_RESET_NEEDED) {
3764 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3765 WRT16_IO_REG(ha, mailbox_in[0], MBC_RESTART_MPI);
3766 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
3767 for (timer = 0; timer < 30000; timer++) {
3768 stat = RD32_IO_REG(ha, risc2host);
3769 if (stat & BIT_15) {
3770 if ((stat & 0xff) < 0x12) {
3771 WRT32_IO_REG(ha, hccr,
3772 HC24_CLR_RISC_INT);
3773 break;
3774 }
3775 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
3776 }
3777 drv_usecwait(100);
3778 }
3779 ADAPTER_STATE_LOCK(ha);
3780 ha->flags &= ~MPI_RESET_NEEDED;
3781 ADAPTER_STATE_UNLOCK(ha);
3782 }
3783
3784 /*
3785 * Set flash write-protection.
3786 */
3787 if ((ha->flags & ONLINE) == 0) {
3788 ql_24xx_protect_flash(ha);
3789 }
3790 }
3791
3792 /*
3793 * ql_clear_mcp
3794 * Carefully clear the mailbox command pointer in the ha struct.
3795 *
3796 * Input:
3797 * ha = adapter block pointer.
3798 *
3799 * Context:
3800 * Interrupt or Kernel context, no mailbox commands allowed.
3801 */
3802
3803 static void
ql_clear_mcp(ql_adapter_state_t * ha)3804 ql_clear_mcp(ql_adapter_state_t *ha)
3805 {
3806 uint32_t cnt;
3807
3808 /* Don't null ha->mcp without the lock, but don't hang either. */
3809 if (MBX_REGISTER_LOCK_OWNER(ha) == curthread) {
3810 ha->mcp = NULL;
3811 } else {
3812 for (cnt = 0; cnt < 300000; cnt++) {
3813 if (TRY_MBX_REGISTER_LOCK(ha) != 0) {
3814 ha->mcp = NULL;
3815 MBX_REGISTER_UNLOCK(ha);
3816 break;
3817 } else {
3818 drv_usecwait(10);
3819 }
3820 }
3821 }
3822 }
3823
3824
3825 /*
3826 * ql_abort_isp
3827 * Resets ISP and aborts all outstanding commands.
3828 *
3829 * Input:
3830 * ha = adapter state pointer.
3831 * DEVICE_QUEUE_LOCK must be released.
3832 *
3833 * Returns:
3834 * ql local function return status code.
3835 *
3836 * Context:
3837 * Kernel context.
3838 */
3839 int
ql_abort_isp(ql_adapter_state_t * vha)3840 ql_abort_isp(ql_adapter_state_t *vha)
3841 {
3842 ql_link_t *link, *link2;
3843 ddi_devstate_t state;
3844 uint16_t index;
3845 ql_tgt_t *tq;
3846 ql_lun_t *lq;
3847 ql_srb_t *sp;
3848 int rval = QL_SUCCESS;
3849 ql_adapter_state_t *ha = vha->pha;
3850
3851 QL_PRINT_2(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3852
3853 TASK_DAEMON_LOCK(ha);
3854 ha->task_daemon_flags &= ~ISP_ABORT_NEEDED;
3855 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE ||
3856 (ha->flags & ONLINE) == 0 || ha->flags & ADAPTER_SUSPENDED) {
3857 TASK_DAEMON_UNLOCK(ha);
3858 return (rval);
3859 }
3860
3861 ha->task_daemon_flags |= ABORT_ISP_ACTIVE;
3862 ha->task_daemon_flags &= ~(RESET_MARKER_NEEDED | FIRMWARE_UP |
3863 FIRMWARE_LOADED);
3864 for (vha = ha; vha != NULL; vha = vha->vp_next) {
3865 vha->task_daemon_flags |= LOOP_DOWN;
3866 vha->task_daemon_flags &= ~(COMMAND_WAIT_NEEDED |
3867 LOOP_RESYNC_NEEDED);
3868 }
3869
3870 TASK_DAEMON_UNLOCK(ha);
3871
3872 if (ha->mailbox_flags & MBX_BUSY_FLG) {
3873 /* Acquire mailbox register lock. */
3874 MBX_REGISTER_LOCK(ha);
3875
3876 /* Wake up mailbox box routine. */
3877 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_ABORT);
3878 cv_broadcast(&ha->cv_mbx_intr);
3879
3880 /* Release mailbox register lock. */
3881 MBX_REGISTER_UNLOCK(ha);
3882
3883 /* Wait for mailbox. */
3884 for (index = 100; index &&
3885 ha->mailbox_flags & MBX_ABORT; index--) {
3886 drv_usecwait(50000);
3887 }
3888 }
3889
3890 /* Wait for commands to end gracefully if not in panic. */
3891 if (ha->flags & PARITY_ERROR) {
3892 ADAPTER_STATE_LOCK(ha);
3893 ha->flags &= ~PARITY_ERROR;
3894 ADAPTER_STATE_UNLOCK(ha);
3895 } else if (ddi_in_panic() == 0) {
3896 ql_cmd_wait(ha);
3897 }
3898
3899 /* Shutdown IP. */
3900 if (ha->flags & IP_INITIALIZED) {
3901 (void) ql_shutdown_ip(ha);
3902 }
3903
3904 /* Reset the chip. */
3905 ql_reset_chip(ha);
3906
3907 /*
3908 * Even though we have waited for outstanding commands to complete,
3909 * except for ones marked SRB_COMMAND_TIMEOUT, and reset the ISP,
3910 * there could still be an interrupt thread active. The interrupt
3911 * lock will prevent us from getting an sp from the outstanding
3912 * cmds array that the ISR may be using.
3913 */
3914
3915 /* Place all commands in outstanding cmd list on device queue. */
3916 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
3917 REQUEST_RING_LOCK(ha);
3918 INTR_LOCK(ha);
3919 if ((link = ha->pending_cmds.first) != NULL) {
3920 sp = link->base_address;
3921 ql_remove_link(&ha->pending_cmds, &sp->cmd);
3922
3923 REQUEST_RING_UNLOCK(ha);
3924 index = 0;
3925 } else {
3926 REQUEST_RING_UNLOCK(ha);
3927 if ((sp = ha->outstanding_cmds[index]) == NULL) {
3928 INTR_UNLOCK(ha);
3929 continue;
3930 }
3931 }
3932
3933 /*
3934 * It's not obvious but the index for commands pulled from
3935 * pending will be zero and that entry in the outstanding array
3936 * is not used so nulling it is "no harm, no foul".
3937 */
3938
3939 ha->outstanding_cmds[index] = NULL;
3940 sp->handle = 0;
3941 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3942
3943 INTR_UNLOCK(ha);
3944
3945 /* If command timeout. */
3946 if (sp->flags & SRB_COMMAND_TIMEOUT) {
3947 sp->pkt->pkt_reason = CS_TIMEOUT;
3948 sp->flags &= ~SRB_RETRY;
3949 sp->flags |= SRB_ISP_COMPLETED;
3950
3951 /* Call done routine to handle completion. */
3952 ql_done(&sp->cmd);
3953 continue;
3954 }
3955
3956 /* Acquire target queue lock. */
3957 lq = sp->lun_queue;
3958 tq = lq->target_queue;
3959 DEVICE_QUEUE_LOCK(tq);
3960
3961 /* Reset watchdog time. */
3962 sp->wdg_q_time = sp->init_wdg_q_time;
3963
3964 /* Place request back on top of device queue. */
3965 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED |
3966 SRB_RETRY);
3967
3968 ql_add_link_t(&lq->cmd, &sp->cmd);
3969 sp->flags |= SRB_IN_DEVICE_QUEUE;
3970
3971 /* Release target queue lock. */
3972 DEVICE_QUEUE_UNLOCK(tq);
3973 }
3974
3975 /*
3976 * Clear per LUN active count, because there should not be
3977 * any IO outstanding at this time.
3978 */
3979 for (vha = ha; vha != NULL; vha = vha->vp_next) {
3980 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
3981 link = vha->dev[index].first;
3982 while (link != NULL) {
3983 tq = link->base_address;
3984 link = link->next;
3985 DEVICE_QUEUE_LOCK(tq);
3986 tq->outcnt = 0;
3987 tq->flags &= ~TQF_QUEUE_SUSPENDED;
3988 for (link2 = tq->lun_queues.first;
3989 link2 != NULL; link2 = link2->next) {
3990 lq = link2->base_address;
3991 lq->lun_outcnt = 0;
3992 lq->flags &= ~LQF_UNTAGGED_PENDING;
3993 }
3994 DEVICE_QUEUE_UNLOCK(tq);
3995 }
3996 }
3997 }
3998
3999 if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) {
4000 if ((rval = ql_chip_diag(ha)) == QL_SUCCESS) {
4001 rval = ql_load_isp_firmware(ha);
4002 }
4003 }
4004
4005 if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) ==
4006 QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS &&
4007 (rval = ql_fw_ready(ha, 10)) == QL_SUCCESS) {
4008
4009 /* If reset abort needed that may have been set. */
4010 TASK_DAEMON_LOCK(ha);
4011 ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED |
4012 ABORT_ISP_ACTIVE);
4013 TASK_DAEMON_UNLOCK(ha);
4014
4015 /* Enable ISP interrupts. */
4016 if (CFG_IST(ha, CFG_CTRL_8021)) {
4017 ql_8021_enable_intrs(ha);
4018 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
4019 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
4020 } else {
4021 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
4022 }
4023
4024 ADAPTER_STATE_LOCK(ha);
4025 ha->flags |= INTERRUPTS_ENABLED;
4026 ADAPTER_STATE_UNLOCK(ha);
4027
4028 /* Set loop online, if it really is. */
4029 ql_loop_online(ha);
4030
4031 state = ddi_get_devstate(ha->dip);
4032 if (state != DDI_DEVSTATE_UP) {
4033 /*EMPTY*/
4034 ddi_dev_report_fault(ha->dip, DDI_SERVICE_RESTORED,
4035 DDI_DEVICE_FAULT, "Device reset succeeded");
4036 }
4037 } else {
4038 /* Enable ISP interrupts. */
4039 if (CFG_IST(ha, CFG_CTRL_8021)) {
4040 ql_8021_enable_intrs(ha);
4041 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
4042 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
4043 } else {
4044 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
4045 }
4046
4047 ADAPTER_STATE_LOCK(ha);
4048 ha->flags |= INTERRUPTS_ENABLED;
4049 ADAPTER_STATE_UNLOCK(ha);
4050
4051 TASK_DAEMON_LOCK(ha);
4052 ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE);
4053 ha->task_daemon_flags |= LOOP_DOWN;
4054 TASK_DAEMON_UNLOCK(ha);
4055
4056 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
4057 }
4058
4059 if (rval != QL_SUCCESS) {
4060 EL(ha, "failed, rval = %xh\n", rval);
4061 } else {
4062 /*EMPTY*/
4063 QL_PRINT_2(CE_CONT, "(%d): done\n", ha->instance);
4064 }
4065 return (rval);
4066 }
4067
4068 /*
4069 * ql_vport_control
4070 * Issue Virtual Port Control command.
4071 *
4072 * Input:
4073 * ha = virtual adapter state pointer.
4074 * cmd = control command.
4075 *
4076 * Returns:
4077 * ql local function return status code.
4078 *
4079 * Context:
4080 * Kernel context.
4081 */
4082 int
ql_vport_control(ql_adapter_state_t * ha,uint8_t cmd)4083 ql_vport_control(ql_adapter_state_t *ha, uint8_t cmd)
4084 {
4085 ql_mbx_iocb_t *pkt;
4086 uint8_t bit;
4087 int rval;
4088 uint32_t pkt_size;
4089
4090 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4091
4092 if (ha->vp_index != 0) {
4093 pkt_size = sizeof (ql_mbx_iocb_t);
4094 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4095 if (pkt == NULL) {
4096 EL(ha, "failed, kmem_zalloc\n");
4097 return (QL_MEMORY_ALLOC_FAILED);
4098 }
4099
4100 pkt->vpc.entry_type = VP_CONTROL_TYPE;
4101 pkt->vpc.entry_count = 1;
4102 pkt->vpc.command = cmd;
4103 pkt->vpc.vp_count = 1;
4104 bit = (uint8_t)(ha->vp_index - 1);
4105 pkt->vpc.vp_index[bit / 8] = (uint8_t)
4106 (pkt->vpc.vp_index[bit / 8] | BIT_0 << bit % 8);
4107
4108 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4109 if (rval == QL_SUCCESS && pkt->vpc.status != 0) {
4110 rval = QL_COMMAND_ERROR;
4111 }
4112
4113 kmem_free(pkt, pkt_size);
4114 } else {
4115 rval = QL_SUCCESS;
4116 }
4117
4118 if (rval != QL_SUCCESS) {
4119 EL(ha, "failed, rval = %xh\n", rval);
4120 } else {
4121 /*EMPTY*/
4122 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4123 ha->vp_index);
4124 }
4125 return (rval);
4126 }
4127
4128 /*
4129 * ql_vport_modify
4130 * Issue of Modify Virtual Port command.
4131 *
4132 * Input:
4133 * ha = virtual adapter state pointer.
4134 * cmd = command.
4135 * opt = option.
4136 *
4137 * Context:
4138 * Interrupt or Kernel context, no mailbox commands allowed.
4139 */
4140 int
ql_vport_modify(ql_adapter_state_t * ha,uint8_t cmd,uint8_t opt)4141 ql_vport_modify(ql_adapter_state_t *ha, uint8_t cmd, uint8_t opt)
4142 {
4143 ql_mbx_iocb_t *pkt;
4144 int rval;
4145 uint32_t pkt_size;
4146
4147 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4148
4149 pkt_size = sizeof (ql_mbx_iocb_t);
4150 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4151 if (pkt == NULL) {
4152 EL(ha, "failed, kmem_zalloc\n");
4153 return (QL_MEMORY_ALLOC_FAILED);
4154 }
4155
4156 pkt->vpm.entry_type = VP_MODIFY_TYPE;
4157 pkt->vpm.entry_count = 1;
4158 pkt->vpm.command = cmd;
4159 pkt->vpm.vp_count = 1;
4160 pkt->vpm.first_vp_index = ha->vp_index;
4161 pkt->vpm.first_options = opt;
4162 bcopy(ha->loginparams.nport_ww_name.raw_wwn, pkt->vpm.first_port_name,
4163 8);
4164 bcopy(ha->loginparams.node_ww_name.raw_wwn, pkt->vpm.first_node_name,
4165 8);
4166
4167 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
4168 if (rval == QL_SUCCESS && pkt->vpm.status != 0) {
4169 EL(ha, "failed, ql_issue_mbx_iocb=%xh, status=%xh\n", rval,
4170 pkt->vpm.status);
4171 rval = QL_COMMAND_ERROR;
4172 }
4173
4174 kmem_free(pkt, pkt_size);
4175
4176 if (rval != QL_SUCCESS) {
4177 EL(ha, "failed, rval = %xh\n", rval);
4178 } else {
4179 /*EMPTY*/
4180 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
4181 ha->vp_index);
4182 }
4183 return (rval);
4184 }
4185
4186 /*
4187 * ql_vport_enable
4188 * Enable virtual port.
4189 *
4190 * Input:
4191 * ha = virtual adapter state pointer.
4192 *
4193 * Context:
4194 * Kernel context.
4195 */
4196 int
ql_vport_enable(ql_adapter_state_t * ha)4197 ql_vport_enable(ql_adapter_state_t *ha)
4198 {
4199 int timer;
4200
4201 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4202
4203 ha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4204 TASK_DAEMON_LOCK(ha);
4205 ha->task_daemon_flags |= LOOP_DOWN;
4206 ha->task_daemon_flags &= ~(FC_STATE_CHANGE | STATE_ONLINE);
4207 TASK_DAEMON_UNLOCK(ha);
4208
4209 ADAPTER_STATE_LOCK(ha);
4210 ha->flags |= VP_ENABLED;
4211 ADAPTER_STATE_UNLOCK(ha);
4212
4213 if (ql_vport_modify(ha, VPM_MODIFY_ENABLE, VPO_TARGET_MODE_DISABLED |
4214 VPO_INITIATOR_MODE_ENABLED | VPO_ENABLED) != QL_SUCCESS) {
4215 QL_PRINT_2(CE_CONT, "(%d): failed to enable virtual port=%d\n",
4216 ha->instance, ha->vp_index);
4217 return (QL_FUNCTION_FAILED);
4218 }
4219 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
4220 /* Wait for loop to come up. */
4221 for (timer = 0; timer < 3000 &&
4222 !(ha->task_daemon_flags & STATE_ONLINE);
4223 timer++) {
4224 delay(1);
4225 }
4226 }
4227
4228 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4229
4230 return (QL_SUCCESS);
4231 }
4232
4233 /*
4234 * ql_vport_create
4235 * Create virtual port context.
4236 *
4237 * Input:
4238 * ha: parent adapter state pointer.
4239 * index: virtual port index number.
4240 *
4241 * Context:
4242 * Kernel context.
4243 */
4244 ql_adapter_state_t *
ql_vport_create(ql_adapter_state_t * ha,uint8_t index)4245 ql_vport_create(ql_adapter_state_t *ha, uint8_t index)
4246 {
4247 ql_adapter_state_t *vha;
4248
4249 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4250
4251 /* Inherit the parents data. */
4252 vha = kmem_alloc(sizeof (ql_adapter_state_t), KM_SLEEP);
4253
4254 ADAPTER_STATE_LOCK(ha);
4255 bcopy(ha, vha, sizeof (ql_adapter_state_t));
4256 vha->pi_attrs = NULL;
4257 vha->ub_outcnt = 0;
4258 vha->ub_allocated = 0;
4259 vha->flags = 0;
4260 vha->task_daemon_flags = 0;
4261 ha->vp_next = vha;
4262 vha->pha = ha;
4263 vha->vp_index = index;
4264 ADAPTER_STATE_UNLOCK(ha);
4265
4266 vha->hba.next = NULL;
4267 vha->hba.prev = NULL;
4268 vha->hba.base_address = vha;
4269 vha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE;
4270 vha->dev = kmem_zalloc(sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE,
4271 KM_SLEEP);
4272 vha->ub_array = kmem_zalloc(sizeof (*vha->ub_array) * QL_UB_LIMIT,
4273 KM_SLEEP);
4274
4275 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4276
4277 return (vha);
4278 }
4279
4280 /*
4281 * ql_vport_destroy
4282 * Destroys virtual port context.
4283 *
4284 * Input:
4285 * ha = virtual adapter state pointer.
4286 *
4287 * Context:
4288 * Kernel context.
4289 */
4290 void
ql_vport_destroy(ql_adapter_state_t * ha)4291 ql_vport_destroy(ql_adapter_state_t *ha)
4292 {
4293 ql_adapter_state_t *vha;
4294
4295 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
4296
4297 /* Remove port from list. */
4298 ADAPTER_STATE_LOCK(ha);
4299 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
4300 if (vha->vp_next == ha) {
4301 vha->vp_next = ha->vp_next;
4302 break;
4303 }
4304 }
4305 ADAPTER_STATE_UNLOCK(ha);
4306
4307 if (ha->ub_array != NULL) {
4308 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
4309 }
4310 if (ha->dev != NULL) {
4311 kmem_free(ha->dev, sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE);
4312 }
4313 kmem_free(ha, sizeof (ql_adapter_state_t));
4314
4315 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
4316 }
4317
4318 /*
4319 * ql_mps_reset
4320 * Reset MPS for FCoE functions.
4321 *
4322 * Input:
4323 * ha = virtual adapter state pointer.
4324 *
4325 * Context:
4326 * Kernel context.
4327 */
4328 static void
ql_mps_reset(ql_adapter_state_t * ha)4329 ql_mps_reset(ql_adapter_state_t *ha)
4330 {
4331 uint32_t data, dctl = 1000;
4332
4333 do {
4334 if (dctl-- == 0 || ql_wrt_risc_ram_word(ha, 0x7c00, 1) !=
4335 QL_SUCCESS) {
4336 return;
4337 }
4338 if (ql_rd_risc_ram_word(ha, 0x7c00, &data) != QL_SUCCESS) {
4339 (void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4340 return;
4341 }
4342 } while (!(data & BIT_0));
4343
4344 if (ql_rd_risc_ram_word(ha, 0x7A15, &data) == QL_SUCCESS) {
4345 dctl = (uint16_t)ql_pci_config_get16(ha, 0x54);
4346 if ((data & 0xe0) != (dctl & 0xe0)) {
4347 data &= 0xff1f;
4348 data |= dctl & 0xe0;
4349 (void) ql_wrt_risc_ram_word(ha, 0x7A15, data);
4350 }
4351 }
4352 (void) ql_wrt_risc_ram_word(ha, 0x7c00, 0);
4353 }
4354