1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009-2020 Alexander Motin <mav@FreeBSD.org>
5 * Copyright (c) 1997-2009 by Matthew Jacob
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
32 */
33 #include <sys/cdefs.h>
34 #include <dev/isp/isp_freebsd.h>
35 #include <sys/unistd.h>
36 #include <sys/kthread.h>
37 #include <sys/conf.h>
38 #include <sys/module.h>
39 #include <sys/ioccom.h>
40 #include <dev/isp/isp_ioctl.h>
41 #include <sys/devicestat.h>
42 #include <cam/cam_periph.h>
43 #include <cam/cam_xpt_periph.h>
44
45 MODULE_VERSION(isp, 1);
46 MODULE_DEPEND(isp, cam, 1, 1, 1);
47 int isp_announced = 0;
48 int isp_loop_down_limit = 60; /* default loop down limit */
49 int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */
50 int isp_gone_device_time = 30; /* grace time before reporting device lost */
51 static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s";
52
53 static void isp_freeze_loopdown(ispsoftc_t *, int);
54 static void isp_loop_changed(ispsoftc_t *isp, int chan);
55 static void isp_rq_check_above(ispsoftc_t *);
56 static void isp_rq_check_below(ispsoftc_t *);
57 static d_ioctl_t ispioctl;
58 static void isp_poll(struct cam_sim *);
59 static callout_func_t isp_watchdog;
60 static callout_func_t isp_gdt;
61 static task_fn_t isp_gdt_task;
62 static void isp_kthread(void *);
63 static void isp_action(struct cam_sim *, union ccb *);
64 static int isp_timer_count;
65 static void isp_timer(void *);
66
67 static struct cdevsw isp_cdevsw = {
68 .d_version = D_VERSION,
69 .d_ioctl = ispioctl,
70 .d_name = "isp",
71 };
72
73 static int
isp_role_sysctl(SYSCTL_HANDLER_ARGS)74 isp_role_sysctl(SYSCTL_HANDLER_ARGS)
75 {
76 ispsoftc_t *isp = (ispsoftc_t *)arg1;
77 int chan = arg2;
78 int error, old, value;
79
80 value = FCPARAM(isp, chan)->role;
81
82 error = sysctl_handle_int(oidp, &value, 0, req);
83 if ((error != 0) || (req->newptr == NULL))
84 return (error);
85
86 if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH)
87 return (EINVAL);
88
89 ISP_LOCK(isp);
90 old = FCPARAM(isp, chan)->role;
91
92 /* We don't allow target mode switch from here. */
93 value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR);
94
95 /* If nothing has changed -- we are done. */
96 if (value == old) {
97 ISP_UNLOCK(isp);
98 return (0);
99 }
100
101 /* Actually change the role. */
102 error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value);
103 ISP_UNLOCK(isp);
104 return (error);
105 }
106
107 static int
isp_attach_chan(ispsoftc_t * isp,struct cam_devq * devq,int chan)108 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
109 {
110 fcparam *fcp = FCPARAM(isp, chan);
111 struct isp_fc *fc = ISP_FC_PC(isp, chan);
112 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev);
113 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev);
114 char name[16];
115 struct cam_sim *sim;
116 struct cam_path *path;
117 #ifdef ISP_TARGET_MODE
118 int i;
119 #endif
120
121 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
122 device_get_unit(isp->isp_dev), &isp->isp_lock,
123 isp->isp_maxcmds, isp->isp_maxcmds, devq);
124 if (sim == NULL)
125 return (ENOMEM);
126
127 if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) {
128 cam_sim_free(sim, FALSE);
129 return (EIO);
130 }
131 if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
132 xpt_bus_deregister(cam_sim_path(sim));
133 cam_sim_free(sim, FALSE);
134 return (ENXIO);
135 }
136
137 ISP_LOCK(isp);
138 fc->sim = sim;
139 fc->path = path;
140 fc->isp = isp;
141 fc->ready = 1;
142 fcp->isp_use_gft_id = 1;
143 fcp->isp_use_gff_id = 1;
144
145 callout_init_mtx(&fc->gdt, &isp->isp_lock, 0);
146 TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc);
147 #ifdef ISP_TARGET_MODE
148 TAILQ_INIT(&fc->waitq);
149 STAILQ_INIT(&fc->ntfree);
150 for (i = 0; i < ATPDPSIZE; i++)
151 STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next);
152 LIST_INIT(&fc->atfree);
153 for (i = ATPDPSIZE-1; i >= 0; i--)
154 LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next);
155 for (i = 0; i < ATPDPHASHSIZE; i++)
156 LIST_INIT(&fc->atused[i]);
157 #endif
158 isp_loop_changed(isp, chan);
159 ISP_UNLOCK(isp);
160 if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0,
161 "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
162 xpt_free_path(fc->path);
163 xpt_bus_deregister(cam_sim_path(fc->sim));
164 cam_sim_free(fc->sim, FALSE);
165 return (ENOMEM);
166 }
167 fc->num_threads += 1;
168 if (chan > 0) {
169 snprintf(name, sizeof(name), "chan%d", chan);
170 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree),
171 OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
172 "Virtual channel");
173 }
174 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
175 "wwnn", CTLFLAG_RD, &fcp->isp_wwnn,
176 "World Wide Node Name");
177 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
178 "wwpn", CTLFLAG_RD, &fcp->isp_wwpn,
179 "World Wide Port Name");
180 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
181 "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0,
182 "Loop Down Limit");
183 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
184 "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0,
185 "Gone Device Time");
186 #if defined(ISP_TARGET_MODE) && defined(DEBUG)
187 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
188 "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0,
189 "Cause a Lost Frame on a Read");
190 #endif
191 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
192 "role", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
193 isp, chan, isp_role_sysctl, "I", "Current role");
194 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
195 "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0,
196 "Connection speed in gigabits");
197 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
198 "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0,
199 "Link state");
200 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
201 "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0,
202 "Firmware state");
203 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
204 "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0,
205 "Loop state");
206 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
207 "topo", CTLFLAG_RD, &fcp->isp_topo, 0,
208 "Connection topology");
209 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
210 "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0,
211 "Use GFT_ID during fabric scan");
212 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
213 "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0,
214 "Use GFF_ID during fabric scan");
215 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
216 "fw_version_flash", CTLFLAG_RD, fcp->fw_version_flash, 0,
217 "Firmware version in (active) flash region");
218 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
219 "fw_version_ispfw", CTLFLAG_RD, fcp->fw_version_ispfw, 0,
220 "Firmware version loaded from ispfw(4)");
221 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
222 "fw_version_run", CTLFLAG_RD, fcp->fw_version_run, 0,
223 "Firmware version currently running");
224 return (0);
225 }
226
227 static void
isp_detach_chan(ispsoftc_t * isp,int chan)228 isp_detach_chan(ispsoftc_t *isp, int chan)
229 {
230 struct isp_fc *fc = ISP_FC_PC(isp, chan);
231
232 xpt_free_path(fc->path);
233 xpt_bus_deregister(cam_sim_path(fc->sim));
234 cam_sim_free(fc->sim, FALSE);
235
236 /* Wait for the channel's spawned threads to exit. */
237 wakeup(fc);
238 while (fc->num_threads != 0)
239 mtx_sleep(&fc->num_threads, &isp->isp_lock, PRIBIO, "isp_reap", 0);
240 }
241
242 int
isp_attach(ispsoftc_t * isp)243 isp_attach(ispsoftc_t *isp)
244 {
245 const char *nu = device_get_nameunit(isp->isp_osinfo.dev);
246 int du = device_get_unit(isp->isp_dev);
247 int chan;
248
249 /*
250 * Create the device queue for our SIM(s).
251 */
252 isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds);
253 if (isp->isp_osinfo.devq == NULL) {
254 return (EIO);
255 }
256
257 for (chan = 0; chan < isp->isp_nchan; chan++) {
258 if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) {
259 goto unwind;
260 }
261 }
262
263 callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0);
264 isp_timer_count = hz >> 2;
265 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
266
267 isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu);
268 if (isp->isp_osinfo.cdev) {
269 isp->isp_osinfo.cdev->si_drv1 = isp;
270 }
271 return (0);
272
273 unwind:
274 ISP_LOCK(isp);
275 isp->isp_osinfo.is_exiting = 1;
276 while (--chan >= 0)
277 isp_detach_chan(isp, chan);
278 ISP_UNLOCK(isp);
279 cam_simq_free(isp->isp_osinfo.devq);
280 isp->isp_osinfo.devq = NULL;
281 return (-1);
282 }
283
284 int
isp_detach(ispsoftc_t * isp)285 isp_detach(ispsoftc_t *isp)
286 {
287 int chan;
288
289 if (isp->isp_osinfo.cdev) {
290 destroy_dev(isp->isp_osinfo.cdev);
291 isp->isp_osinfo.cdev = NULL;
292 }
293 ISP_LOCK(isp);
294 /* Tell spawned threads that we're exiting. */
295 isp->isp_osinfo.is_exiting = 1;
296 for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1)
297 isp_detach_chan(isp, chan);
298 ISP_UNLOCK(isp);
299 callout_drain(&isp->isp_osinfo.tmo);
300 cam_simq_free(isp->isp_osinfo.devq);
301 return (0);
302 }
303
304 static void
isp_freeze_loopdown(ispsoftc_t * isp,int chan)305 isp_freeze_loopdown(ispsoftc_t *isp, int chan)
306 {
307 struct isp_fc *fc = ISP_FC_PC(isp, chan);
308
309 if (fc->sim == NULL)
310 return;
311 if (fc->simqfrozen == 0) {
312 isp_prt(isp, ISP_LOGDEBUG0,
313 "Chan %d Freeze simq (loopdown)", chan);
314 fc->simqfrozen = SIMQFRZ_LOOPDOWN;
315 xpt_hold_boot();
316 xpt_freeze_simq(fc->sim, 1);
317 } else {
318 isp_prt(isp, ISP_LOGDEBUG0,
319 "Chan %d Mark simq frozen (loopdown)", chan);
320 fc->simqfrozen |= SIMQFRZ_LOOPDOWN;
321 }
322 }
323
324 static void
isp_unfreeze_loopdown(ispsoftc_t * isp,int chan)325 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan)
326 {
327 struct isp_fc *fc = ISP_FC_PC(isp, chan);
328
329 if (fc->sim == NULL)
330 return;
331 int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN;
332 fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN;
333 if (wasfrozen && fc->simqfrozen == 0) {
334 isp_prt(isp, ISP_LOGDEBUG0,
335 "Chan %d Release simq", chan);
336 xpt_release_simq(fc->sim, 1);
337 xpt_release_boot();
338 }
339 }
340
341 /*
342 * Functions to protect from request queue overflow by freezing SIM queue.
343 * XXX: freezing only one arbitrary SIM, since they all share the queue.
344 */
345 static void
isp_rq_check_above(ispsoftc_t * isp)346 isp_rq_check_above(ispsoftc_t *isp)
347 {
348 struct isp_fc *fc = ISP_FC_PC(isp, 0);
349
350 if (isp->isp_rqovf || fc->sim == NULL)
351 return;
352 if (!isp_rqentry_avail(isp, QENTRY_MAX)) {
353 xpt_freeze_simq(fc->sim, 1);
354 isp->isp_rqovf = 1;
355 }
356 }
357
358 static void
isp_rq_check_below(ispsoftc_t * isp)359 isp_rq_check_below(ispsoftc_t *isp)
360 {
361 struct isp_fc *fc = ISP_FC_PC(isp, 0);
362
363 if (!isp->isp_rqovf || fc->sim == NULL)
364 return;
365 if (isp_rqentry_avail(isp, QENTRY_MAX)) {
366 xpt_release_simq(fc->sim, 0);
367 isp->isp_rqovf = 0;
368 }
369 }
370
371 static int
ispioctl(struct cdev * dev,u_long c,caddr_t addr,int flags,struct thread * td)372 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td)
373 {
374 ispsoftc_t *isp;
375 int nr, chan, retval = ENOTTY;
376
377 isp = dev->si_drv1;
378
379 switch (c) {
380 case ISP_SDBLEV:
381 {
382 int olddblev = isp->isp_dblev;
383 isp->isp_dblev = *(int *)addr;
384 *(int *)addr = olddblev;
385 retval = 0;
386 break;
387 }
388 case ISP_GETROLE:
389 chan = *(int *)addr;
390 if (chan < 0 || chan >= isp->isp_nchan) {
391 retval = -ENXIO;
392 break;
393 }
394 *(int *)addr = FCPARAM(isp, chan)->role;
395 retval = 0;
396 break;
397 case ISP_SETROLE:
398 nr = *(int *)addr;
399 chan = nr >> 8;
400 if (chan < 0 || chan >= isp->isp_nchan) {
401 retval = -ENXIO;
402 break;
403 }
404 nr &= 0xff;
405 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
406 retval = EINVAL;
407 break;
408 }
409 ISP_LOCK(isp);
410 *(int *)addr = FCPARAM(isp, chan)->role;
411 retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr);
412 ISP_UNLOCK(isp);
413 break;
414
415 case ISP_RESETHBA:
416 ISP_LOCK(isp);
417 isp_reinit(isp, 0);
418 ISP_UNLOCK(isp);
419 retval = 0;
420 break;
421
422 case ISP_RESCAN:
423 chan = *(intptr_t *)addr;
424 if (chan < 0 || chan >= isp->isp_nchan) {
425 retval = -ENXIO;
426 break;
427 }
428 ISP_LOCK(isp);
429 if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) {
430 retval = EIO;
431 } else {
432 retval = 0;
433 }
434 ISP_UNLOCK(isp);
435 break;
436
437 case ISP_FC_LIP:
438 chan = *(intptr_t *)addr;
439 if (chan < 0 || chan >= isp->isp_nchan) {
440 retval = -ENXIO;
441 break;
442 }
443 ISP_LOCK(isp);
444 if (isp_control(isp, ISPCTL_SEND_LIP, chan)) {
445 retval = EIO;
446 } else {
447 retval = 0;
448 }
449 ISP_UNLOCK(isp);
450 break;
451 case ISP_FC_GETDINFO:
452 {
453 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
454 fcportdb_t *lp;
455
456 if (ifc->loopid >= MAX_FC_TARG) {
457 retval = EINVAL;
458 break;
459 }
460 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid];
461 if (lp->state != FC_PORTDB_STATE_NIL) {
462 ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT;
463 ifc->loopid = lp->handle;
464 ifc->portid = lp->portid;
465 ifc->node_wwn = lp->node_wwn;
466 ifc->port_wwn = lp->port_wwn;
467 retval = 0;
468 } else {
469 retval = ENODEV;
470 }
471 break;
472 }
473 case ISP_FC_GETHINFO:
474 {
475 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
476 int chan = hba->fc_channel;
477
478 if (chan < 0 || chan >= isp->isp_nchan) {
479 retval = ENXIO;
480 break;
481 }
482 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
483 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
484 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
485 hba->fc_nchannels = isp->isp_nchan;
486 hba->fc_nports = MAX_FC_TARG;
487 hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed;
488 hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1;
489 hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid;
490 hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram;
491 hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram;
492 hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn;
493 hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn;
494 retval = 0;
495 break;
496 }
497 case ISP_TSK_MGMT:
498 {
499 int needmarker;
500 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
501 uint16_t nphdl;
502 isp24xx_tmf_t tmf;
503 isp24xx_statusreq_t sp;
504 fcparam *fcp;
505 fcportdb_t *lp;
506 int i;
507
508 chan = fct->chan;
509 if (chan < 0 || chan >= isp->isp_nchan) {
510 retval = -ENXIO;
511 break;
512 }
513
514 needmarker = retval = 0;
515 nphdl = fct->loopid;
516 ISP_LOCK(isp);
517 fcp = FCPARAM(isp, chan);
518
519 for (i = 0; i < MAX_FC_TARG; i++) {
520 lp = &fcp->portdb[i];
521 if (lp->handle == nphdl) {
522 break;
523 }
524 }
525 if (i == MAX_FC_TARG) {
526 retval = ENXIO;
527 ISP_UNLOCK(isp);
528 break;
529 }
530 ISP_MEMZERO(&tmf, sizeof(tmf));
531 tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT;
532 tmf.tmf_header.rqs_entry_count = 1;
533 tmf.tmf_nphdl = lp->handle;
534 tmf.tmf_delay = 2;
535 tmf.tmf_timeout = 4;
536 tmf.tmf_tidlo = lp->portid;
537 tmf.tmf_tidhi = lp->portid >> 16;
538 tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan);
539 tmf.tmf_lun[1] = fct->lun & 0xff;
540 if (fct->lun >= 256) {
541 tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8);
542 }
543 switch (fct->action) {
544 case IPT_CLEAR_ACA:
545 tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA;
546 break;
547 case IPT_TARGET_RESET:
548 tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET;
549 needmarker = 1;
550 break;
551 case IPT_LUN_RESET:
552 tmf.tmf_flags = ISP24XX_TMF_LUN_RESET;
553 needmarker = 1;
554 break;
555 case IPT_CLEAR_TASK_SET:
556 tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET;
557 needmarker = 1;
558 break;
559 case IPT_ABORT_TASK_SET:
560 tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET;
561 needmarker = 1;
562 break;
563 default:
564 retval = EINVAL;
565 break;
566 }
567 if (retval) {
568 ISP_UNLOCK(isp);
569 break;
570 }
571
572 retval = isp_exec_entry_queue(isp, &tmf, &sp, 5);
573 if (retval != 0) {
574 isp_prt(isp, ISP_LOGERR, "%s: TMF of chan %d error %d",
575 __func__, chan, retval);
576 ISP_UNLOCK(isp);
577 break;
578 }
579
580 if (sp.req_completion_status != 0)
581 retval = EIO;
582 else if (needmarker)
583 fcp->sendmarker = 1;
584 ISP_UNLOCK(isp);
585 break;
586 }
587 default:
588 break;
589 }
590 return (retval);
591 }
592
593 /*
594 * Local Inlines
595 */
596
597 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *);
598 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *);
599
600 static ISP_INLINE int
isp_get_pcmd(ispsoftc_t * isp,union ccb * ccb)601 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb)
602 {
603 ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free;
604 if (ISP_PCMD(ccb) == NULL) {
605 return (-1);
606 }
607 isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next;
608 return (0);
609 }
610
611 static ISP_INLINE void
isp_free_pcmd(ispsoftc_t * isp,union ccb * ccb)612 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb)
613 {
614 if (ISP_PCMD(ccb)) {
615 #ifdef ISP_TARGET_MODE
616 PISP_PCMD(ccb)->datalen = 0;
617 #endif
618 PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free;
619 isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb);
620 ISP_PCMD(ccb) = NULL;
621 }
622 }
623
624 /*
625 * Put the target mode functions here, because some are inlines
626 */
627 #ifdef ISP_TARGET_MODE
628 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t);
629 static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t);
630 static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t);
631 static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *);
632 static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int);
633 static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t);
634 static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *);
635 static tstate_t *create_lun_state(ispsoftc_t *, int, struct cam_path *);
636 static void destroy_lun_state(ispsoftc_t *, int, tstate_t *);
637 static void isp_enable_lun(ispsoftc_t *, union ccb *);
638 static void isp_disable_lun(ispsoftc_t *, union ccb *);
639 static callout_func_t isp_refire_notify_ack;
640 static void isp_complete_ctio(ispsoftc_t *isp, union ccb *);
641 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE };
642 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How);
643 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *);
644 static void isp_handle_platform_ctio(ispsoftc_t *, ct7_entry_t *);
645 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp);
646 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *);
647 static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t);
648
649 static ISP_INLINE tstate_t *
get_lun_statep(ispsoftc_t * isp,int bus,lun_id_t lun)650 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun)
651 {
652 struct isp_fc *fc = ISP_FC_PC(isp, bus);
653 tstate_t *tptr;
654
655 SLIST_FOREACH(tptr, &fc->lun_hash[LUN_HASH_FUNC(lun)], next) {
656 if (tptr->ts_lun == lun)
657 return (tptr);
658 }
659 return (NULL);
660 }
661
662 static int
isp_atio_restart(ispsoftc_t * isp,int bus,tstate_t * tptr)663 isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr)
664 {
665 inot_private_data_t *ntp;
666 struct ntpdlist rq;
667
668 if (STAILQ_EMPTY(&tptr->restart_queue))
669 return (0);
670 STAILQ_INIT(&rq);
671 STAILQ_CONCAT(&rq, &tptr->restart_queue);
672 while ((ntp = STAILQ_FIRST(&rq)) != NULL) {
673 STAILQ_REMOVE_HEAD(&rq, next);
674 isp_prt(isp, ISP_LOGTDEBUG0,
675 "%s: restarting resrc deprived %x", __func__,
676 ((at7_entry_t *)ntp->data)->at_rxid);
677 isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data);
678 isp_put_ntpd(isp, bus, ntp);
679 if (!STAILQ_EMPTY(&tptr->restart_queue))
680 break;
681 }
682 if (!STAILQ_EMPTY(&rq)) {
683 STAILQ_CONCAT(&rq, &tptr->restart_queue);
684 STAILQ_CONCAT(&tptr->restart_queue, &rq);
685 }
686 return (!STAILQ_EMPTY(&tptr->restart_queue));
687 }
688
689 static void
isp_tmcmd_restart(ispsoftc_t * isp)690 isp_tmcmd_restart(ispsoftc_t *isp)
691 {
692 struct isp_fc *fc;
693 tstate_t *tptr;
694 union ccb *ccb;
695 int bus, i;
696
697 for (bus = 0; bus < isp->isp_nchan; bus++) {
698 fc = ISP_FC_PC(isp, bus);
699 for (i = 0; i < LUN_HASH_SIZE; i++) {
700 SLIST_FOREACH(tptr, &fc->lun_hash[i], next)
701 isp_atio_restart(isp, bus, tptr);
702 }
703
704 /*
705 * We only need to do this once per channel.
706 */
707 ccb = (union ccb *)TAILQ_FIRST(&fc->waitq);
708 if (ccb != NULL) {
709 TAILQ_REMOVE(&fc->waitq, &ccb->ccb_h, sim_links.tqe);
710 isp_target_start_ctio(isp, ccb, FROM_TIMER);
711 }
712 }
713 isp_rq_check_above(isp);
714 isp_rq_check_below(isp);
715 }
716
717 static atio_private_data_t *
isp_get_atpd(ispsoftc_t * isp,int chan,uint32_t tag)718 isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
719 {
720 struct isp_fc *fc = ISP_FC_PC(isp, chan);
721 atio_private_data_t *atp;
722
723 atp = LIST_FIRST(&fc->atfree);
724 if (atp) {
725 LIST_REMOVE(atp, next);
726 atp->tag = tag;
727 LIST_INSERT_HEAD(&fc->atused[ATPDPHASH(tag)], atp, next);
728 }
729 return (atp);
730 }
731
732 static atio_private_data_t *
isp_find_atpd(ispsoftc_t * isp,int chan,uint32_t tag)733 isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
734 {
735 struct isp_fc *fc = ISP_FC_PC(isp, chan);
736 atio_private_data_t *atp;
737
738 LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) {
739 if (atp->tag == tag)
740 return (atp);
741 }
742 return (NULL);
743 }
744
745 static void
isp_put_atpd(ispsoftc_t * isp,int chan,atio_private_data_t * atp)746 isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp)
747 {
748 struct isp_fc *fc = ISP_FC_PC(isp, chan);
749
750 if (atp->ests)
751 isp_put_ecmd(isp, atp->ests);
752 LIST_REMOVE(atp, next);
753 memset(atp, 0, sizeof (*atp));
754 LIST_INSERT_HEAD(&fc->atfree, atp, next);
755 }
756
757 static void
isp_dump_atpd(ispsoftc_t * isp,int chan)758 isp_dump_atpd(ispsoftc_t *isp, int chan)
759 {
760 struct isp_fc *fc = ISP_FC_PC(isp, chan);
761 atio_private_data_t *atp;
762 const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" };
763
764 for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
765 if (atp->state == ATPD_STATE_FREE)
766 continue;
767 isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s",
768 chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]);
769 }
770 }
771
772 static inot_private_data_t *
isp_get_ntpd(ispsoftc_t * isp,int chan)773 isp_get_ntpd(ispsoftc_t *isp, int chan)
774 {
775 struct isp_fc *fc = ISP_FC_PC(isp, chan);
776 inot_private_data_t *ntp;
777
778 ntp = STAILQ_FIRST(&fc->ntfree);
779 if (ntp)
780 STAILQ_REMOVE_HEAD(&fc->ntfree, next);
781 return (ntp);
782 }
783
784 static inot_private_data_t *
isp_find_ntpd(ispsoftc_t * isp,int chan,uint32_t tag_id,uint32_t seq_id)785 isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id)
786 {
787 struct isp_fc *fc = ISP_FC_PC(isp, chan);
788 inot_private_data_t *ntp;
789
790 for (ntp = fc->ntpool; ntp < &fc->ntpool[ATPDPSIZE]; ntp++) {
791 if (ntp->tag_id == tag_id && ntp->seq_id == seq_id)
792 return (ntp);
793 }
794 return (NULL);
795 }
796
797 static void
isp_put_ntpd(ispsoftc_t * isp,int chan,inot_private_data_t * ntp)798 isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp)
799 {
800 struct isp_fc *fc = ISP_FC_PC(isp, chan);
801
802 ntp->tag_id = ntp->seq_id = 0;
803 STAILQ_INSERT_HEAD(&fc->ntfree, ntp, next);
804 }
805
806 tstate_t *
create_lun_state(ispsoftc_t * isp,int bus,struct cam_path * path)807 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path)
808 {
809 struct isp_fc *fc = ISP_FC_PC(isp, bus);
810 lun_id_t lun;
811 tstate_t *tptr;
812
813 lun = xpt_path_lun_id(path);
814 tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
815 if (tptr == NULL)
816 return (NULL);
817 tptr->ts_lun = lun;
818 SLIST_INIT(&tptr->atios);
819 SLIST_INIT(&tptr->inots);
820 STAILQ_INIT(&tptr->restart_queue);
821 SLIST_INSERT_HEAD(&fc->lun_hash[LUN_HASH_FUNC(lun)], tptr, next);
822 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n");
823 return (tptr);
824 }
825
826 static void
destroy_lun_state(ispsoftc_t * isp,int bus,tstate_t * tptr)827 destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr)
828 {
829 struct isp_fc *fc = ISP_FC_PC(isp, bus);
830 union ccb *ccb;
831 inot_private_data_t *ntp;
832
833 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) {
834 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
835 ccb->ccb_h.status = CAM_REQ_ABORTED;
836 xpt_done(ccb);
837 };
838 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) {
839 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
840 ccb->ccb_h.status = CAM_REQ_ABORTED;
841 xpt_done(ccb);
842 }
843 while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) {
844 isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0);
845 STAILQ_REMOVE_HEAD(&tptr->restart_queue, next);
846 isp_put_ntpd(isp, bus, ntp);
847 }
848 SLIST_REMOVE(&fc->lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], tptr, tstate, next);
849 free(tptr, M_DEVBUF);
850 }
851
852 static void
isp_enable_lun(ispsoftc_t * isp,union ccb * ccb)853 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb)
854 {
855 tstate_t *tptr;
856 int bus = XS_CHANNEL(ccb);
857 target_id_t target = ccb->ccb_h.target_id;
858 lun_id_t lun = ccb->ccb_h.target_lun;
859
860 /*
861 * We only support either target and lun both wildcard
862 * or target and lun both non-wildcard.
863 */
864 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
865 "enabling lun %jx\n", (uintmax_t)lun);
866 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
867 ccb->ccb_h.status = CAM_LUN_INVALID;
868 xpt_done(ccb);
869 return;
870 }
871
872 /* Create the state pointer. It should not already exist. */
873 tptr = get_lun_statep(isp, bus, lun);
874 if (tptr) {
875 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
876 xpt_done(ccb);
877 return;
878 }
879 tptr = create_lun_state(isp, bus, ccb->ccb_h.path);
880 if (tptr == NULL) {
881 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
882 xpt_done(ccb);
883 return;
884 }
885
886 ccb->ccb_h.status = CAM_REQ_CMP;
887 xpt_done(ccb);
888 }
889
890 static void
isp_disable_lun(ispsoftc_t * isp,union ccb * ccb)891 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb)
892 {
893 tstate_t *tptr;
894 int bus = XS_CHANNEL(ccb);
895 target_id_t target = ccb->ccb_h.target_id;
896 lun_id_t lun = ccb->ccb_h.target_lun;
897
898 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
899 "disabling lun %jx\n", (uintmax_t)lun);
900 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
901 ccb->ccb_h.status = CAM_LUN_INVALID;
902 xpt_done(ccb);
903 return;
904 }
905
906 /* Find the state pointer. */
907 if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) {
908 ccb->ccb_h.status = CAM_PATH_INVALID;
909 xpt_done(ccb);
910 return;
911 }
912
913 destroy_lun_state(isp, bus, tptr);
914 ccb->ccb_h.status = CAM_REQ_CMP;
915 xpt_done(ccb);
916 }
917
918 static void
isp_target_start_ctio(ispsoftc_t * isp,union ccb * ccb,enum Start_Ctio_How how)919 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
920 {
921 int fctape, sendstatus, resid;
922 fcparam *fcp;
923 atio_private_data_t *atp;
924 struct ccb_scsiio *cso;
925 struct isp_ccbq *waitq;
926 uint32_t dmaresult, handle, xfrlen, sense_length, tmp;
927 ct7_entry_t local, *cto = &local;
928
929 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len,
930 (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0));
931
932 waitq = &ISP_FC_PC(isp, XS_CHANNEL(ccb))->waitq;
933 switch (how) {
934 case FROM_CAM:
935 /*
936 * Insert at the tail of the list, if any, waiting CTIO CCBs
937 */
938 TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe);
939 break;
940 case FROM_TIMER:
941 case FROM_SRR:
942 case FROM_CTIO_DONE:
943 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
944 break;
945 }
946
947 while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) {
948 TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe);
949
950 cso = &ccb->csio;
951 xfrlen = cso->dxfer_len;
952 if (xfrlen == 0) {
953 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
954 ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n");
955 ccb->ccb_h.status = CAM_REQ_INVALID;
956 xpt_done(ccb);
957 continue;
958 }
959 }
960
961 atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id);
962 if (atp == NULL) {
963 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__);
964 isp_dump_atpd(isp, XS_CHANNEL(ccb));
965 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
966 xpt_done(ccb);
967 continue;
968 }
969
970 /*
971 * Is this command a dead duck?
972 */
973 if (atp->dead) {
974 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id);
975 ccb->ccb_h.status = CAM_REQ_ABORTED;
976 xpt_done(ccb);
977 continue;
978 }
979
980 /*
981 * Check to make sure we're still in target mode.
982 */
983 fcp = FCPARAM(isp, XS_CHANNEL(ccb));
984 if ((fcp->role & ISP_ROLE_TARGET) == 0) {
985 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id);
986 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
987 xpt_done(ccb);
988 continue;
989 }
990
991 /*
992 * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which
993 * could be split into two CTIOs to split data and status).
994 */
995 if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) {
996 isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags);
997 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
998 break;
999 }
1000
1001 /*
1002 * Does the initiator expect FC-Tape style responses?
1003 */
1004 if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) {
1005 fctape = 1;
1006 } else {
1007 fctape = 0;
1008 }
1009
1010 /*
1011 * If we already did the data xfer portion of a CTIO that sends data
1012 * and status, don't do it again and do the status portion now.
1013 */
1014 if (atp->sendst) {
1015 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u",
1016 cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit);
1017 xfrlen = 0; /* we already did the data transfer */
1018 atp->sendst = 0;
1019 }
1020 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1021 sendstatus = 1;
1022 } else {
1023 sendstatus = 0;
1024 }
1025
1026 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
1027 KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?"));
1028 /*
1029 * Sense length is not the entire sense data structure size. Periph
1030 * drivers don't seem to be setting sense_len to reflect the actual
1031 * size. We'll peek inside to get the right amount.
1032 */
1033 sense_length = cso->sense_len;
1034
1035 /*
1036 * This 'cannot' happen
1037 */
1038 if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) {
1039 sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE;
1040 }
1041 } else {
1042 sense_length = 0;
1043 }
1044
1045 /*
1046 * Check for overflow
1047 */
1048 tmp = atp->bytes_xfered + atp->bytes_in_transit;
1049 if (xfrlen > 0 && tmp > atp->orig_datalen) {
1050 isp_prt(isp, ISP_LOGERR,
1051 "%s: [0x%x] data overflow by %u bytes", __func__,
1052 cso->tag_id, tmp + xfrlen - atp->orig_datalen);
1053 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1054 xpt_done(ccb);
1055 continue;
1056 }
1057 if (xfrlen > atp->orig_datalen - tmp) {
1058 xfrlen = atp->orig_datalen - tmp;
1059 if (xfrlen == 0 && !sendstatus) {
1060 cso->resid = cso->dxfer_len;
1061 ccb->ccb_h.status = CAM_REQ_CMP;
1062 xpt_done(ccb);
1063 continue;
1064 }
1065 }
1066
1067 memset(cto, 0, QENTRY_LEN);
1068 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1069 cto->ct_header.rqs_entry_count = 1;
1070 cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM;
1071 ATPD_SET_SEQNO(cto, atp);
1072 cto->ct_nphdl = atp->nphdl;
1073 cto->ct_rxid = atp->tag;
1074 cto->ct_iid_lo = atp->sid;
1075 cto->ct_iid_hi = atp->sid >> 16;
1076 cto->ct_oxid = atp->oxid;
1077 cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb));
1078 cto->ct_timeout = XS_TIME(ccb);
1079 cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT;
1080
1081 /*
1082 * Mode 1, status, no data. Only possible when we are sending status, have
1083 * no data to transfer, and any sense data can fit into a ct7_entry_t.
1084 *
1085 * Mode 2, status, no data. We have to use this in the case that
1086 * the sense data won't fit into a ct7_entry_t.
1087 *
1088 */
1089 if (sendstatus && xfrlen == 0) {
1090 cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA;
1091 resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit;
1092 if (sense_length <= MAXRESPLEN_24XX) {
1093 cto->ct_flags |= CT7_FLAG_MODE1;
1094 cto->ct_scsi_status = cso->scsi_status;
1095 if (resid < 0) {
1096 cto->ct_resid = -resid;
1097 cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8);
1098 } else if (resid > 0) {
1099 cto->ct_resid = resid;
1100 cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8);
1101 }
1102 if (fctape) {
1103 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1104 }
1105 if (sense_length) {
1106 cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8);
1107 cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length;
1108 memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length);
1109 }
1110 } else {
1111 bus_addr_t addr;
1112 fcp_rsp_iu_t rp;
1113
1114 if (atp->ests == NULL) {
1115 atp->ests = isp_get_ecmd(isp);
1116 if (atp->ests == NULL) {
1117 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1118 break;
1119 }
1120 }
1121 memset(&rp, 0, sizeof(rp));
1122 if (fctape) {
1123 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1124 rp.fcp_rsp_bits |= FCP_CONF_REQ;
1125 }
1126 cto->ct_flags |= CT7_FLAG_MODE2;
1127 rp.fcp_rsp_scsi_status = cso->scsi_status;
1128 if (resid < 0) {
1129 rp.fcp_rsp_resid = -resid;
1130 rp.fcp_rsp_bits |= FCP_RESID_OVERFLOW;
1131 } else if (resid > 0) {
1132 rp.fcp_rsp_resid = resid;
1133 rp.fcp_rsp_bits |= FCP_RESID_UNDERFLOW;
1134 }
1135 if (sense_length) {
1136 rp.fcp_rsp_snslen = sense_length;
1137 cto->ct_senselen = sense_length;
1138 rp.fcp_rsp_bits |= FCP_SNSLEN_VALID;
1139 isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1140 memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length);
1141 } else {
1142 isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1143 }
1144 if (isp->isp_dblev & ISP_LOGTDEBUG1) {
1145 isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests);
1146 }
1147 bus_dmamap_sync(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_map, BUS_DMASYNC_PREWRITE);
1148 addr = isp->isp_osinfo.ecmd_dma;
1149 addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE);
1150 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests,
1151 (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length);
1152 cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length;
1153 cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr);
1154 cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr);
1155 cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length;
1156 }
1157 if (sense_length) {
1158 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__,
1159 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length,
1160 cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]);
1161 } else {
1162 isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__,
1163 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid);
1164 }
1165 atp->state = ATPD_STATE_LAST_CTIO;
1166 }
1167
1168 /*
1169 * Mode 0 data transfers, *possibly* with status.
1170 */
1171 if (xfrlen != 0) {
1172 cto->ct_flags |= CT7_FLAG_MODE0;
1173 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1174 cto->ct_flags |= CT7_DATA_IN;
1175 } else {
1176 cto->ct_flags |= CT7_DATA_OUT;
1177 }
1178
1179 cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit;
1180 cto->rsp.m0.ct_xfrlen = xfrlen;
1181
1182 #ifdef DEBUG
1183 if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) {
1184 isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2));
1185 ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0;
1186 cto->rsp.m0.ct_xfrlen -= xfrlen >> 2;
1187 }
1188 #endif
1189 if (sendstatus) {
1190 resid = atp->orig_datalen - atp->bytes_xfered - xfrlen;
1191 if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) {
1192 cto->ct_flags |= CT7_SENDSTATUS;
1193 atp->state = ATPD_STATE_LAST_CTIO;
1194 if (fctape) {
1195 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1196 }
1197 } else {
1198 atp->sendst = 1; /* send status later */
1199 cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM;
1200 atp->state = ATPD_STATE_CTIO;
1201 }
1202 } else {
1203 atp->state = ATPD_STATE_CTIO;
1204 }
1205 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__,
1206 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered);
1207 }
1208
1209 if (isp_get_pcmd(isp, ccb)) {
1210 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n");
1211 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1212 break;
1213 }
1214 handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET);
1215 if (handle == 0) {
1216 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__);
1217 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1218 isp_free_pcmd(isp, ccb);
1219 break;
1220 }
1221 atp->bytes_in_transit += xfrlen;
1222 PISP_PCMD(ccb)->datalen = xfrlen;
1223
1224 /*
1225 * Call the dma setup routines for this entry (and any subsequent
1226 * CTIOs) if there's data to move, and then tell the f/w it's got
1227 * new things to play with. As with isp_start's usage of DMA setup,
1228 * any swizzling is done in the machine dependent layer. Because
1229 * of this, we put the request onto the queue area first in native
1230 * format.
1231 */
1232 cto->ct_syshandle = handle;
1233 dmaresult = ISP_DMASETUP(isp, cso, cto);
1234 if (dmaresult != 0) {
1235 isp_destroy_handle(isp, handle);
1236 isp_free_pcmd(isp, ccb);
1237 if (dmaresult == CMD_EAGAIN) {
1238 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1239 break;
1240 }
1241 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1242 xpt_done(ccb);
1243 continue;
1244 }
1245 ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED;
1246 if (xfrlen) {
1247 ccb->ccb_h.spriv_field0 = atp->bytes_xfered;
1248 } else {
1249 ccb->ccb_h.spriv_field0 = ~0;
1250 }
1251 atp->ctcnt++;
1252 atp->seqno++;
1253 }
1254 }
1255
1256 static void
isp_refire_notify_ack(void * arg)1257 isp_refire_notify_ack(void *arg)
1258 {
1259 isp_tna_t *tp = arg;
1260 ispsoftc_t *isp = tp->isp;
1261
1262 ISP_ASSERT_LOCKED(isp);
1263 if (isp_notify_ack(isp, tp->not)) {
1264 callout_schedule(&tp->timer, 5);
1265 } else {
1266 free(tp, M_DEVBUF);
1267 }
1268 }
1269
1270
1271 static void
isp_complete_ctio(ispsoftc_t * isp,union ccb * ccb)1272 isp_complete_ctio(ispsoftc_t *isp, union ccb *ccb)
1273 {
1274
1275 isp_rq_check_below(isp);
1276 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1277 xpt_done(ccb);
1278 }
1279
1280 static void
isp_handle_platform_atio7(ispsoftc_t * isp,at7_entry_t * aep)1281 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
1282 {
1283 int cdbxlen;
1284 lun_id_t lun;
1285 uint16_t chan, nphdl = NIL_HANDLE;
1286 uint32_t did, sid;
1287 fcportdb_t *lp;
1288 tstate_t *tptr;
1289 struct ccb_accept_tio *atiop;
1290 atio_private_data_t *atp = NULL;
1291 atio_private_data_t *oatp;
1292 inot_private_data_t *ntp;
1293
1294 did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2];
1295 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1296 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun));
1297
1298 if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) {
1299 /* Channel has to be derived from D_ID */
1300 isp_find_chan_by_did(isp, did, &chan);
1301 if (chan == ISP_NOCHAN) {
1302 isp_prt(isp, ISP_LOGWARN,
1303 "%s: [RX_ID 0x%x] D_ID %x not found on any channel",
1304 __func__, aep->at_rxid, did);
1305 isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN,
1306 ECMD_TERMINATE, 0);
1307 return;
1308 }
1309 } else {
1310 chan = 0;
1311 }
1312
1313 /*
1314 * Find the PDB entry for this initiator
1315 */
1316 if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) {
1317 /*
1318 * If we're not in the port database terminate the exchange.
1319 */
1320 isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already",
1321 __func__, aep->at_rxid, did, chan, sid);
1322 isp_dump_portdb(isp, chan);
1323 isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0);
1324 return;
1325 }
1326 nphdl = lp->handle;
1327
1328 /*
1329 * Get the tstate pointer
1330 */
1331 tptr = get_lun_statep(isp, chan, lun);
1332 if (tptr == NULL) {
1333 tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD);
1334 if (tptr == NULL) {
1335 isp_prt(isp, ISP_LOGWARN,
1336 "%s: [0x%x] no state pointer for lun %jx or wildcard",
1337 __func__, aep->at_rxid, (uintmax_t)lun);
1338 if (lun == 0) {
1339 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1340 } else {
1341 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
1342 }
1343 return;
1344 }
1345 }
1346
1347 /*
1348 * Start any commands pending resources first.
1349 */
1350 if (isp_atio_restart(isp, chan, tptr))
1351 goto noresrc;
1352
1353 /*
1354 * If the f/w is out of resources, just send a BUSY status back.
1355 */
1356 if (aep->at_rxid == AT7_NORESRC_RXID) {
1357 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1358 return;
1359 }
1360
1361 /*
1362 * If we're out of resources, just send a BUSY status back.
1363 */
1364 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1365 if (atiop == NULL) {
1366 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid);
1367 goto noresrc;
1368 }
1369
1370 oatp = isp_find_atpd(isp, chan, aep->at_rxid);
1371 if (oatp) {
1372 isp_prt(isp, oatp->state == ATPD_STATE_LAST_CTIO ? ISP_LOGTDEBUG0 :
1373 ISP_LOGWARN, "[0x%x] tag wraparound (N-Port Handle "
1374 "0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d",
1375 aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state);
1376 /*
1377 * It's not a "no resource" condition- but we can treat it like one
1378 */
1379 goto noresrc;
1380 }
1381 atp = isp_get_atpd(isp, chan, aep->at_rxid);
1382 if (atp == NULL) {
1383 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid);
1384 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1385 return;
1386 }
1387 atp->word3 = lp->prli_word3;
1388 atp->state = ATPD_STATE_ATIO;
1389 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1390 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n");
1391 atiop->init_id = FC_PORTDB_TGT(isp, chan, lp);
1392 atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1393 atiop->ccb_h.target_lun = lun;
1394 atiop->sense_len = 0;
1395 cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT;
1396 if (cdbxlen) {
1397 isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored");
1398 }
1399 cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb);
1400 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen);
1401 atiop->cdb_len = cdbxlen;
1402 atiop->ccb_h.status = CAM_CDB_RECVD;
1403 atiop->tag_id = atp->tag;
1404 switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) {
1405 case FCP_CMND_TASK_ATTR_SIMPLE:
1406 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1407 atiop->tag_action = MSG_SIMPLE_TASK;
1408 break;
1409 case FCP_CMND_TASK_ATTR_HEAD:
1410 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1411 atiop->tag_action = MSG_HEAD_OF_QUEUE_TASK;
1412 break;
1413 case FCP_CMND_TASK_ATTR_ORDERED:
1414 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1415 atiop->tag_action = MSG_ORDERED_TASK;
1416 break;
1417 case FCP_CMND_TASK_ATTR_ACA:
1418 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1419 atiop->tag_action = MSG_ACA_TASK;
1420 break;
1421 case FCP_CMND_TASK_ATTR_UNTAGGED:
1422 default:
1423 atiop->tag_action = 0;
1424 break;
1425 }
1426 atiop->priority = (aep->at_cmnd.fcp_cmnd_task_attribute &
1427 FCP_CMND_PRIO_MASK) >> FCP_CMND_PRIO_SHIFT;
1428 atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl;
1429 atp->bytes_xfered = 0;
1430 atp->lun = lun;
1431 atp->nphdl = nphdl;
1432 atp->sid = sid;
1433 atp->did = did;
1434 atp->oxid = aep->at_hdr.ox_id;
1435 atp->rxid = aep->at_hdr.rx_id;
1436 atp->cdb0 = atiop->cdb_io.cdb_bytes[0];
1437 atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK;
1438 atp->state = ATPD_STATE_CAM;
1439 isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u",
1440 aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen);
1441 xpt_done((union ccb *)atiop);
1442 return;
1443 noresrc:
1444 KASSERT(atp == NULL, ("%s: atp is not NULL on noresrc!\n", __func__));
1445 ntp = isp_get_ntpd(isp, chan);
1446 if (ntp == NULL) {
1447 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1448 return;
1449 }
1450 memcpy(ntp->data, aep, QENTRY_LEN);
1451 STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next);
1452 }
1453
1454
1455 /*
1456 * Handle starting an SRR (sequence retransmit request)
1457 * We get here when we've gotten the immediate notify
1458 * and the return of all outstanding CTIOs for this
1459 * transaction.
1460 */
1461 static void
isp_handle_srr_start(ispsoftc_t * isp,atio_private_data_t * atp)1462 isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp)
1463 {
1464 in_fcentry_24xx_t *inot;
1465 uint32_t srr_off, ccb_off, ccb_len, ccb_end;
1466 union ccb *ccb;
1467
1468 inot = (in_fcentry_24xx_t *)atp->srr;
1469 srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16);
1470 ccb = atp->srr_ccb;
1471 atp->srr_ccb = NULL;
1472 atp->nsrr++;
1473 if (ccb == NULL) {
1474 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag);
1475 goto fail;
1476 }
1477
1478 ccb_off = ccb->ccb_h.spriv_field0;
1479 ccb_len = ccb->csio.dxfer_len;
1480 ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len;
1481
1482 switch (inot->in_srr_iu) {
1483 case R_CTL_INFO_SOLICITED_DATA:
1484 /*
1485 * We have to restart a FCP_DATA data out transaction
1486 */
1487 atp->sendst = 0;
1488 atp->bytes_xfered = srr_off;
1489 if (ccb_len == 0) {
1490 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off);
1491 goto mdp;
1492 }
1493 if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) {
1494 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1495 goto mdp;
1496 }
1497 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1498 break;
1499 case R_CTL_INFO_COMMAND_STATUS:
1500 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag);
1501 atp->sendst = 1;
1502 /*
1503 * We have to restart a FCP_RSP IU transaction
1504 */
1505 break;
1506 case R_CTL_INFO_DATA_DESCRIPTOR:
1507 /*
1508 * We have to restart an FCP DATA in transaction
1509 */
1510 isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping");
1511 goto fail;
1512
1513 default:
1514 isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu);
1515 goto fail;
1516 }
1517
1518 /*
1519 * We can't do anything until this is acked, so we might as well start it now.
1520 * We aren't going to do the usual asynchronous ack issue because we need
1521 * to make sure this gets on the wire first.
1522 */
1523 if (isp_notify_ack(isp, inot)) {
1524 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1525 goto fail;
1526 }
1527 isp_target_start_ctio(isp, ccb, FROM_SRR);
1528 return;
1529 fail:
1530 inot->in_reserved = 1;
1531 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1532 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1533 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1534 isp_complete_ctio(isp, ccb);
1535 return;
1536 mdp:
1537 if (isp_notify_ack(isp, inot)) {
1538 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1539 goto fail;
1540 }
1541 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1542 ccb->ccb_h.status |= CAM_MESSAGE_RECV;
1543 /*
1544 * This is not a strict interpretation of MDP, but it's close
1545 */
1546 ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16];
1547 ccb->csio.msg_len = 7;
1548 ccb->csio.msg_ptr[0] = MSG_EXTENDED;
1549 ccb->csio.msg_ptr[1] = 5;
1550 ccb->csio.msg_ptr[2] = 0; /* modify data pointer */
1551 ccb->csio.msg_ptr[3] = srr_off >> 24;
1552 ccb->csio.msg_ptr[4] = srr_off >> 16;
1553 ccb->csio.msg_ptr[5] = srr_off >> 8;
1554 ccb->csio.msg_ptr[6] = srr_off;
1555 isp_complete_ctio(isp, ccb);
1556 }
1557
1558
1559 static void
isp_handle_platform_srr(ispsoftc_t * isp,isp_notify_t * notify)1560 isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify)
1561 {
1562 in_fcentry_24xx_t *inot = notify->nt_lreserved;
1563 atio_private_data_t *atp;
1564 uint32_t tag = notify->nt_tagval & 0xffffffff;
1565
1566 atp = isp_find_atpd(isp, notify->nt_channel, tag);
1567 if (atp == NULL) {
1568 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify",
1569 __func__, tag);
1570 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1571 return;
1572 }
1573 atp->srr_notify_rcvd = 1;
1574 memcpy(atp->srr, inot, sizeof (atp->srr));
1575 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x",
1576 inot->in_rxid, inot->in_flags, inot->in_srr_iu,
1577 ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo);
1578 if (atp->srr_ccb)
1579 isp_handle_srr_start(isp, atp);
1580 }
1581
1582 static void
isp_handle_platform_ctio(ispsoftc_t * isp,ct7_entry_t * ct)1583 isp_handle_platform_ctio(ispsoftc_t *isp, ct7_entry_t *ct)
1584 {
1585 union ccb *ccb;
1586 int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0;
1587 atio_private_data_t *atp = NULL;
1588 int bus;
1589 uint32_t handle, data_requested, resid;
1590
1591 handle = ct->ct_syshandle;
1592 ccb = isp_find_xs(isp, handle);
1593 if (ccb == NULL) {
1594 isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, ct);
1595 return;
1596 }
1597 isp_destroy_handle(isp, handle);
1598 resid = data_requested = PISP_PCMD(ccb)->datalen;
1599 isp_free_pcmd(isp, ccb);
1600
1601 bus = XS_CHANNEL(ccb);
1602 atp = isp_find_atpd(isp, bus, ct->ct_rxid);
1603 if (atp == NULL) {
1604 /*
1605 * XXX: isp_clear_commands() generates fake CTIO with zero
1606 * ct_rxid value, filling only ct_syshandle. Workaround
1607 * that using tag_id from the CCB, pointed by ct_syshandle.
1608 */
1609 atp = isp_find_atpd(isp, bus, ccb->csio.tag_id);
1610 }
1611 if (atp == NULL) {
1612 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id);
1613 return;
1614 }
1615 KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero"));
1616 atp->bytes_in_transit -= data_requested;
1617 atp->ctcnt -= 1;
1618 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1619
1620 if (ct->ct_nphdl == CT7_SRR) {
1621 atp->srr_ccb = ccb;
1622 if (atp->srr_notify_rcvd)
1623 isp_handle_srr_start(isp, atp);
1624 return;
1625 }
1626 if (ct->ct_nphdl == CT_HBA_RESET) {
1627 sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1628 (atp->sendst == 0);
1629 failure = CAM_UNREC_HBA_ERROR;
1630 } else {
1631 sentstatus = ct->ct_flags & CT7_SENDSTATUS;
1632 ok = (ct->ct_nphdl == CT7_OK);
1633 notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0;
1634 if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA)
1635 resid = ct->ct_resid;
1636 }
1637 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct),
1638 notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID");
1639 if (ok) {
1640 if (data_requested > 0) {
1641 atp->bytes_xfered += data_requested - resid;
1642 ccb->csio.resid = ccb->csio.dxfer_len -
1643 (data_requested - resid);
1644 }
1645 if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE))
1646 ccb->ccb_h.status |= CAM_SENT_SENSE;
1647 ccb->ccb_h.status |= CAM_REQ_CMP;
1648 } else {
1649 notify_cam = 1;
1650 if (failure == CAM_UNREC_HBA_ERROR)
1651 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
1652 else
1653 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1654 }
1655 atp->state = ATPD_STATE_PDON;
1656
1657 /*
1658 * We never *not* notify CAM when there has been any error (ok == 0),
1659 * so we never need to do an ATIO putback if we're not notifying CAM.
1660 */
1661 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)",
1662 (sentstatus)? " FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0);
1663 if (notify_cam == 0) {
1664 if (atp->sendst) {
1665 isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE);
1666 }
1667 return;
1668 }
1669
1670 /*
1671 * We are done with this ATIO if we successfully sent status.
1672 * In all other cases expect either another CTIO or XPT_ABORT.
1673 */
1674 if (ok && sentstatus)
1675 isp_put_atpd(isp, bus, atp);
1676
1677 /*
1678 * We're telling CAM we're done with this CTIO transaction.
1679 *
1680 * 24XX cards never need an ATIO put back.
1681 */
1682 isp_complete_ctio(isp, ccb);
1683 }
1684
1685 static int
isp_handle_platform_target_notify_ack(ispsoftc_t * isp,isp_notify_t * mp,uint32_t rsp)1686 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp)
1687 {
1688 ct7_entry_t local, *cto = &local;
1689
1690 if (isp->isp_state != ISP_RUNSTATE) {
1691 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL);
1692 return (0);
1693 }
1694
1695 /*
1696 * This case is for a Task Management Function, which shows up as an ATIO7 entry.
1697 */
1698 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) {
1699 at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved;
1700 fcportdb_t *lp;
1701 uint32_t sid;
1702 uint16_t nphdl;
1703
1704 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1705 if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) {
1706 nphdl = lp->handle;
1707 } else {
1708 nphdl = NIL_HANDLE;
1709 }
1710 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1711 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1712 cto->ct_header.rqs_entry_count = 1;
1713 cto->ct_nphdl = nphdl;
1714 cto->ct_rxid = aep->at_rxid;
1715 cto->ct_vpidx = mp->nt_channel;
1716 cto->ct_iid_lo = sid;
1717 cto->ct_iid_hi = sid >> 16;
1718 cto->ct_oxid = aep->at_hdr.ox_id;
1719 cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1;
1720 cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT;
1721 if (rsp != 0) {
1722 cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8);
1723 cto->rsp.m1.ct_resplen = 4;
1724 ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
1725 cto->rsp.m1.ct_resp[0] = rsp & 0xff;
1726 cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff;
1727 cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff;
1728 cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff;
1729 }
1730 return (isp_send_entry(isp, cto));
1731 }
1732
1733 /*
1734 * This case is for a responding to an ABTS frame
1735 */
1736 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1737
1738 /*
1739 * Overload nt_need_ack here to mark whether we've terminated the associated command.
1740 */
1741 if (mp->nt_need_ack) {
1742 abts_t *abts = (abts_t *)mp->nt_lreserved;
1743
1744 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1745 isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task);
1746 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1747 cto->ct_header.rqs_entry_count = 1;
1748 cto->ct_nphdl = mp->nt_nphdl;
1749 cto->ct_rxid = abts->abts_rxid_task;
1750 cto->ct_iid_lo = mp->nt_sid;
1751 cto->ct_iid_hi = mp->nt_sid >> 16;
1752 cto->ct_oxid = abts->abts_ox_id;
1753 cto->ct_vpidx = mp->nt_channel;
1754 cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
1755 if (isp_send_entry(isp, cto)) {
1756 return (ENOMEM);
1757 }
1758 mp->nt_need_ack = 0;
1759 }
1760 return (isp_acknak_abts(isp, mp->nt_lreserved, 0));
1761 }
1762
1763 /*
1764 * General purpose acknowledgement
1765 */
1766 if (mp->nt_need_ack) {
1767 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL);
1768 /*
1769 * Don't need to use the guaranteed send because the caller can retry
1770 */
1771 return (isp_notify_ack(isp, mp->nt_lreserved));
1772 }
1773 return (0);
1774 }
1775
1776 /*
1777 * Handle task management functions.
1778 *
1779 * We show up here with a notify structure filled out.
1780 *
1781 * The nt_lreserved tag points to the original queue entry
1782 */
1783 static void
isp_handle_platform_target_tmf(ispsoftc_t * isp,isp_notify_t * notify)1784 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify)
1785 {
1786 tstate_t *tptr;
1787 fcportdb_t *lp;
1788 struct ccb_immediate_notify *inot;
1789 inot_private_data_t *ntp = NULL;
1790 atio_private_data_t *atp;
1791 lun_id_t lun;
1792
1793 isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode,
1794 notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun);
1795 if (notify->nt_lun == LUN_ANY) {
1796 if (notify->nt_tagval == TAG_ANY) {
1797 lun = CAM_LUN_WILDCARD;
1798 } else {
1799 atp = isp_find_atpd(isp, notify->nt_channel,
1800 notify->nt_tagval & 0xffffffff);
1801 lun = atp ? atp->lun : CAM_LUN_WILDCARD;
1802 }
1803 } else {
1804 lun = notify->nt_lun;
1805 }
1806 tptr = get_lun_statep(isp, notify->nt_channel, lun);
1807 if (tptr == NULL) {
1808 tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD);
1809 if (tptr == NULL) {
1810 isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1811 goto bad;
1812 }
1813 }
1814 inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots);
1815 if (inot == NULL) {
1816 isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1817 goto bad;
1818 }
1819
1820 inot->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1821 inot->ccb_h.target_lun = lun;
1822 if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 &&
1823 isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) {
1824 inot->initiator_id = CAM_TARGET_WILDCARD;
1825 } else {
1826 inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp);
1827 }
1828 inot->seq_id = notify->nt_tagval;
1829 inot->tag_id = notify->nt_tagval >> 32;
1830
1831 switch (notify->nt_ncode) {
1832 case NT_ABORT_TASK:
1833 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id);
1834 inot->arg = MSG_ABORT_TASK;
1835 break;
1836 case NT_ABORT_TASK_SET:
1837 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY);
1838 inot->arg = MSG_ABORT_TASK_SET;
1839 break;
1840 case NT_CLEAR_ACA:
1841 inot->arg = MSG_CLEAR_ACA;
1842 break;
1843 case NT_CLEAR_TASK_SET:
1844 inot->arg = MSG_CLEAR_TASK_SET;
1845 break;
1846 case NT_LUN_RESET:
1847 inot->arg = MSG_LOGICAL_UNIT_RESET;
1848 break;
1849 case NT_TARGET_RESET:
1850 inot->arg = MSG_TARGET_RESET;
1851 break;
1852 case NT_QUERY_TASK_SET:
1853 inot->arg = MSG_QUERY_TASK_SET;
1854 break;
1855 case NT_QUERY_ASYNC_EVENT:
1856 inot->arg = MSG_QUERY_ASYNC_EVENT;
1857 break;
1858 default:
1859 isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun);
1860 goto bad;
1861 }
1862
1863 ntp = isp_get_ntpd(isp, notify->nt_channel);
1864 if (ntp == NULL) {
1865 isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__);
1866 goto bad;
1867 }
1868 ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t));
1869 if (notify->nt_lreserved) {
1870 ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN);
1871 ntp->nt.nt_lreserved = &ntp->data;
1872 }
1873 ntp->seq_id = notify->nt_tagval;
1874 ntp->tag_id = notify->nt_tagval >> 32;
1875
1876 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
1877 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n");
1878 inot->ccb_h.status = CAM_MESSAGE_RECV;
1879 xpt_done((union ccb *)inot);
1880 return;
1881 bad:
1882 if (notify->nt_need_ack) {
1883 if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1884 if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) {
1885 isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK");
1886 }
1887 } else {
1888 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved);
1889 }
1890 }
1891 }
1892
1893 static void
isp_target_mark_aborted_early(ispsoftc_t * isp,int chan,tstate_t * tptr,uint32_t tag_id)1894 isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id)
1895 {
1896 struct isp_fc *fc = ISP_FC_PC(isp, chan);
1897 atio_private_data_t *atp;
1898 inot_private_data_t *ntp, *tmp;
1899 uint32_t this_tag_id;
1900
1901 /*
1902 * First, clean any commands pending restart
1903 */
1904 STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) {
1905 this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid;
1906 if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) {
1907 isp_endcmd(isp, ntp->data, NIL_HANDLE, chan,
1908 ECMD_TERMINATE, 0);
1909 isp_put_ntpd(isp, chan, ntp);
1910 STAILQ_REMOVE(&tptr->restart_queue, ntp,
1911 inot_private_data, next);
1912 }
1913 }
1914
1915 /*
1916 * Now mark other ones dead as well.
1917 */
1918 for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
1919 if (atp->lun != tptr->ts_lun)
1920 continue;
1921 if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id)
1922 atp->dead = 1;
1923 }
1924 }
1925 #endif
1926
1927 static void
isp_poll(struct cam_sim * sim)1928 isp_poll(struct cam_sim *sim)
1929 {
1930 ispsoftc_t *isp = cam_sim_softc(sim);
1931
1932 ISP_RUN_ISR(isp);
1933 }
1934
1935
1936 static void
isp_watchdog(void * arg)1937 isp_watchdog(void *arg)
1938 {
1939 struct ccb_scsiio *xs = arg;
1940 ispsoftc_t *isp;
1941 uint32_t ohandle = ISP_HANDLE_FREE, handle;
1942
1943 isp = XS_ISP(xs);
1944
1945 handle = isp_find_handle(isp, xs);
1946
1947 /*
1948 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere.
1949 */
1950 if (handle != ISP_HANDLE_FREE) {
1951 ISP_RUN_ISR(isp);
1952 ohandle = handle;
1953 handle = isp_find_handle(isp, xs);
1954 }
1955 if (handle != ISP_HANDLE_FREE) {
1956 /*
1957 * Try and make sure the command is really dead before
1958 * we release the handle (and DMA resources) for reuse.
1959 *
1960 * If we are successful in aborting the command then
1961 * we're done here because we'll get the command returned
1962 * back separately.
1963 */
1964 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
1965 return;
1966 }
1967
1968 /*
1969 * Note that after calling the above, the command may in
1970 * fact have been completed.
1971 */
1972 xs = isp_find_xs(isp, handle);
1973
1974 /*
1975 * If the command no longer exists, then we won't
1976 * be able to find the xs again with this handle.
1977 */
1978 if (xs == NULL) {
1979 return;
1980 }
1981
1982 /*
1983 * After this point, the command is really dead.
1984 */
1985 ISP_DMAFREE(isp, xs);
1986 isp_destroy_handle(isp, handle);
1987 isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle);
1988 XS_SETERR(xs, CAM_CMD_TIMEOUT);
1989 isp_done(xs);
1990 } else {
1991 if (ohandle != ISP_HANDLE_FREE) {
1992 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle);
1993 } else {
1994 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__);
1995 }
1996 }
1997 }
1998
1999 static void
isp_make_here(ispsoftc_t * isp,fcportdb_t * fcp,int chan,int tgt)2000 isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
2001 {
2002 union ccb *ccb;
2003 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2004
2005 /*
2006 * Allocate a CCB, create a wildcard path for this target and schedule a rescan.
2007 */
2008 ccb = xpt_alloc_ccb_nowait();
2009 if (ccb == NULL) {
2010 isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan);
2011 return;
2012 }
2013 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim),
2014 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2015 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan");
2016 xpt_free_ccb(ccb);
2017 return;
2018 }
2019 xpt_rescan(ccb);
2020 }
2021
2022 static void
isp_make_gone(ispsoftc_t * isp,fcportdb_t * fcp,int chan,int tgt)2023 isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
2024 {
2025 struct cam_path *tp;
2026 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2027
2028 if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2029 xpt_async(AC_LOST_DEVICE, tp, NULL);
2030 xpt_free_path(tp);
2031 }
2032 }
2033
2034 /*
2035 * Gone Device Timer Function- when we have decided that a device has gone
2036 * away, we wait a specific period of time prior to telling the OS it has
2037 * gone away.
2038 *
2039 * This timer function fires once a second and then scans the port database
2040 * for devices that are marked dead but still have a virtual target assigned.
2041 * We decrement a counter for that port database entry, and when it hits zero,
2042 * we tell the OS the device has gone away.
2043 */
2044 static void
isp_gdt(void * arg)2045 isp_gdt(void *arg)
2046 {
2047 struct isp_fc *fc = arg;
2048 taskqueue_enqueue(taskqueue_thread, &fc->gtask);
2049 }
2050
2051 static void
isp_gdt_task(void * arg,int pending)2052 isp_gdt_task(void *arg, int pending)
2053 {
2054 struct isp_fc *fc = arg;
2055 ispsoftc_t *isp = fc->isp;
2056 int chan = fc - ISP_FC_PC(isp, 0);
2057 fcportdb_t *lp;
2058 struct ac_contract ac;
2059 struct ac_device_changed *adc;
2060 int dbidx, more_to_do = 0;
2061
2062 ISP_LOCK(isp);
2063 isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan);
2064 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2065 lp = &FCPARAM(isp, chan)->portdb[dbidx];
2066
2067 if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
2068 continue;
2069 }
2070 if (lp->gone_timer != 0) {
2071 lp->gone_timer -= 1;
2072 more_to_do++;
2073 continue;
2074 }
2075 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout");
2076 if (lp->is_target) {
2077 lp->is_target = 0;
2078 isp_make_gone(isp, lp, chan, dbidx);
2079 }
2080 if (lp->is_initiator) {
2081 lp->is_initiator = 0;
2082 ac.contract_number = AC_CONTRACT_DEV_CHG;
2083 adc = (struct ac_device_changed *) ac.contract_data;
2084 adc->wwpn = lp->port_wwn;
2085 adc->port = lp->portid;
2086 adc->target = dbidx;
2087 adc->arrived = 0;
2088 xpt_async(AC_CONTRACT, fc->path, &ac);
2089 }
2090 lp->state = FC_PORTDB_STATE_NIL;
2091 }
2092 if (fc->ready) {
2093 if (more_to_do) {
2094 callout_reset(&fc->gdt, hz, isp_gdt, fc);
2095 } else {
2096 callout_deactivate(&fc->gdt);
2097 isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime);
2098 }
2099 }
2100 ISP_UNLOCK(isp);
2101 }
2102
2103 /*
2104 * When loop goes down we remember the time and freeze CAM command queue.
2105 * During some time period we are trying to reprobe the loop. But if we
2106 * fail, we tell the OS that devices have gone away and drop the freeze.
2107 *
2108 * We don't clear the devices out of our port database because, when loop
2109 * come back up, we have to do some actual cleanup with the chip at that
2110 * point (implicit PLOGO, e.g., to get the chip's port database state right).
2111 */
2112 static void
isp_loop_changed(ispsoftc_t * isp,int chan)2113 isp_loop_changed(ispsoftc_t *isp, int chan)
2114 {
2115 fcparam *fcp = FCPARAM(isp, chan);
2116 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2117
2118 if (fc->loop_down_time)
2119 return;
2120 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan);
2121 if (fcp->role & ISP_ROLE_INITIATOR)
2122 isp_freeze_loopdown(isp, chan);
2123 fc->loop_down_time = time_uptime;
2124 wakeup(fc);
2125 }
2126
2127 static void
isp_loop_up(ispsoftc_t * isp,int chan)2128 isp_loop_up(ispsoftc_t *isp, int chan)
2129 {
2130 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2131
2132 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan);
2133 fc->loop_seen_once = 1;
2134 fc->loop_down_time = 0;
2135 isp_unfreeze_loopdown(isp, chan);
2136 }
2137
2138 static void
isp_loop_dead(ispsoftc_t * isp,int chan)2139 isp_loop_dead(ispsoftc_t *isp, int chan)
2140 {
2141 fcparam *fcp = FCPARAM(isp, chan);
2142 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2143 fcportdb_t *lp;
2144 struct ac_contract ac;
2145 struct ac_device_changed *adc;
2146 int dbidx, i;
2147
2148 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan);
2149
2150 /*
2151 * Notify to the OS all targets who we now consider have departed.
2152 */
2153 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2154 lp = &fcp->portdb[dbidx];
2155
2156 if (lp->state == FC_PORTDB_STATE_NIL)
2157 continue;
2158
2159 for (i = 0; i < ISP_HANDLE_NUM(isp); i++) {
2160 struct ccb_scsiio *xs;
2161
2162 if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) {
2163 continue;
2164 }
2165 if ((xs = isp->isp_xflist[i].cmd) == NULL) {
2166 continue;
2167 }
2168 if (dbidx != XS_TGT(xs)) {
2169 continue;
2170 }
2171 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout",
2172 isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2173 (uintmax_t)XS_LUN(xs));
2174
2175 /*
2176 * Just like in isp_watchdog, abort the outstanding
2177 * command or immediately free its resources if it is
2178 * not active
2179 */
2180 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
2181 continue;
2182 }
2183
2184 ISP_DMAFREE(isp, xs);
2185 isp_destroy_handle(isp, isp->isp_xflist[i].handle);
2186 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed",
2187 isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2188 (uintmax_t)XS_LUN(xs));
2189 XS_SETERR(xs, HBA_BUSRESET);
2190 isp_done(xs);
2191 }
2192
2193 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout");
2194 if (lp->is_target) {
2195 lp->is_target = 0;
2196 isp_make_gone(isp, lp, chan, dbidx);
2197 }
2198 if (lp->is_initiator) {
2199 lp->is_initiator = 0;
2200 ac.contract_number = AC_CONTRACT_DEV_CHG;
2201 adc = (struct ac_device_changed *) ac.contract_data;
2202 adc->wwpn = lp->port_wwn;
2203 adc->port = lp->portid;
2204 adc->target = dbidx;
2205 adc->arrived = 0;
2206 xpt_async(AC_CONTRACT, fc->path, &ac);
2207 }
2208 }
2209
2210 isp_unfreeze_loopdown(isp, chan);
2211 fc->loop_down_time = 0;
2212 }
2213
2214 static void
isp_kthread(void * arg)2215 isp_kthread(void *arg)
2216 {
2217 struct isp_fc *fc = arg;
2218 ispsoftc_t *isp = fc->isp;
2219 int chan = fc - ISP_FC_PC(isp, 0);
2220 int slp = 0, d;
2221 int lb, lim;
2222
2223 ISP_LOCK(isp);
2224 while (isp->isp_osinfo.is_exiting == 0) {
2225 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2226 "Chan %d Checking FC state", chan);
2227 lb = isp_fc_runstate(isp, chan, 250000);
2228 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2229 "Chan %d FC got to %s state", chan,
2230 isp_fc_loop_statename(lb));
2231
2232 /*
2233 * Our action is different based upon whether we're supporting
2234 * Initiator mode or not. If we are, we might freeze the simq
2235 * when loop is down and set all sorts of different delays to
2236 * check again.
2237 *
2238 * If not, we simply just wait for loop to come up.
2239 */
2240 if (lb == LOOP_READY || lb < 0) {
2241 slp = 0;
2242 } else {
2243 /*
2244 * If we've never seen loop up and we've waited longer
2245 * than quickboot time, or we've seen loop up but we've
2246 * waited longer than loop_down_limit, give up and go
2247 * to sleep until loop comes up.
2248 */
2249 if (fc->loop_seen_once == 0)
2250 lim = isp_quickboot_time;
2251 else
2252 lim = fc->loop_down_limit;
2253 d = time_uptime - fc->loop_down_time;
2254 if (d >= lim)
2255 slp = 0;
2256 else if (d < 10)
2257 slp = 1;
2258 else if (d < 30)
2259 slp = 5;
2260 else if (d < 60)
2261 slp = 10;
2262 else if (d < 120)
2263 slp = 20;
2264 else
2265 slp = 30;
2266 }
2267
2268 if (slp == 0) {
2269 if (lb == LOOP_READY)
2270 isp_loop_up(isp, chan);
2271 else
2272 isp_loop_dead(isp, chan);
2273 }
2274
2275 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2276 "Chan %d sleep for %d seconds", chan, slp);
2277 msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz);
2278 }
2279 fc->num_threads -= 1;
2280 wakeup(&fc->num_threads);
2281 ISP_UNLOCK(isp);
2282 kthread_exit();
2283 }
2284
2285 #ifdef ISP_TARGET_MODE
2286 static void
isp_abort_atio(ispsoftc_t * isp,union ccb * ccb)2287 isp_abort_atio(ispsoftc_t *isp, union ccb *ccb)
2288 {
2289 atio_private_data_t *atp;
2290 union ccb *accb = ccb->cab.abort_ccb;
2291 struct ccb_hdr *sccb;
2292 tstate_t *tptr;
2293
2294 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2295 if (tptr != NULL) {
2296 /* Search for the ATIO among queueued. */
2297 SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) {
2298 if (sccb != &accb->ccb_h)
2299 continue;
2300 SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle);
2301 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2302 "Abort FREE ATIO\n");
2303 accb->ccb_h.status = CAM_REQ_ABORTED;
2304 xpt_done(accb);
2305 ccb->ccb_h.status = CAM_REQ_CMP;
2306 return;
2307 }
2308 }
2309
2310 /* Search for the ATIO among running. */
2311 atp = isp_find_atpd(isp, XS_CHANNEL(accb), accb->atio.tag_id);
2312 if (atp != NULL) {
2313 /* Send TERMINATE to firmware. */
2314 if (!atp->dead) {
2315 uint8_t storage[QENTRY_LEN];
2316 ct7_entry_t *cto = (ct7_entry_t *) storage;
2317
2318 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
2319 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
2320 cto->ct_header.rqs_entry_count = 1;
2321 cto->ct_nphdl = atp->nphdl;
2322 cto->ct_rxid = atp->tag;
2323 cto->ct_iid_lo = atp->sid;
2324 cto->ct_iid_hi = atp->sid >> 16;
2325 cto->ct_oxid = atp->oxid;
2326 cto->ct_vpidx = XS_CHANNEL(accb);
2327 cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
2328 isp_send_entry(isp, cto);
2329 }
2330 isp_put_atpd(isp, XS_CHANNEL(accb), atp);
2331 ccb->ccb_h.status = CAM_REQ_CMP;
2332 } else {
2333 ccb->ccb_h.status = CAM_UA_ABORT;
2334 }
2335 }
2336
2337 static void
isp_abort_inot(ispsoftc_t * isp,union ccb * ccb)2338 isp_abort_inot(ispsoftc_t *isp, union ccb *ccb)
2339 {
2340 inot_private_data_t *ntp;
2341 union ccb *accb = ccb->cab.abort_ccb;
2342 struct ccb_hdr *sccb;
2343 tstate_t *tptr;
2344
2345 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2346 if (tptr != NULL) {
2347 /* Search for the INOT among queueued. */
2348 SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) {
2349 if (sccb != &accb->ccb_h)
2350 continue;
2351 SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle);
2352 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2353 "Abort FREE INOT\n");
2354 accb->ccb_h.status = CAM_REQ_ABORTED;
2355 xpt_done(accb);
2356 ccb->ccb_h.status = CAM_REQ_CMP;
2357 return;
2358 }
2359 }
2360
2361 /* Search for the INOT among running. */
2362 ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id);
2363 if (ntp != NULL) {
2364 if (ntp->nt.nt_need_ack) {
2365 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK,
2366 ntp->nt.nt_lreserved);
2367 }
2368 isp_put_ntpd(isp, XS_CHANNEL(accb), ntp);
2369 ccb->ccb_h.status = CAM_REQ_CMP;
2370 } else {
2371 ccb->ccb_h.status = CAM_UA_ABORT;
2372 return;
2373 }
2374 }
2375 #endif
2376
2377 static void
isp_action(struct cam_sim * sim,union ccb * ccb)2378 isp_action(struct cam_sim *sim, union ccb *ccb)
2379 {
2380 int bus, tgt, error;
2381 ispsoftc_t *isp;
2382 fcparam *fcp;
2383 struct ccb_trans_settings *cts;
2384 sbintime_t ts;
2385
2386 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2387
2388 isp = (ispsoftc_t *)cam_sim_softc(sim);
2389 ISP_ASSERT_LOCKED(isp);
2390 bus = cam_sim_bus(sim);
2391 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2392 ISP_PCMD(ccb) = NULL;
2393
2394 switch (ccb->ccb_h.func_code) {
2395 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2396 /*
2397 * Do a couple of preliminary checks...
2398 */
2399 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2400 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2401 ccb->ccb_h.status = CAM_REQ_INVALID;
2402 isp_done((struct ccb_scsiio *) ccb);
2403 break;
2404 }
2405 }
2406 #ifdef DIAGNOSTIC
2407 if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) {
2408 xpt_print(ccb->ccb_h.path, "invalid target\n");
2409 ccb->ccb_h.status = CAM_PATH_INVALID;
2410 }
2411 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2412 xpt_done(ccb);
2413 break;
2414 }
2415 #endif
2416 ccb->csio.scsi_status = SCSI_STATUS_OK;
2417 if (isp_get_pcmd(isp, ccb)) {
2418 isp_prt(isp, ISP_LOGWARN, "out of PCMDs");
2419 cam_freeze_devq(ccb->ccb_h.path);
2420 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
2421 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2422 xpt_done(ccb);
2423 break;
2424 }
2425 error = isp_start((XS_T *) ccb);
2426 isp_rq_check_above(isp);
2427 switch (error) {
2428 case 0:
2429 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2430 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
2431 break;
2432 /* Give firmware extra 10s to handle timeout. */
2433 ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S;
2434 callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0,
2435 isp_watchdog, ccb, 0);
2436 break;
2437 case CMD_RQLATER:
2438 isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later",
2439 XS_TGT(ccb), (uintmax_t)XS_LUN(ccb));
2440 cam_freeze_devq(ccb->ccb_h.path);
2441 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
2442 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2443 isp_free_pcmd(isp, ccb);
2444 xpt_done(ccb);
2445 break;
2446 case CMD_EAGAIN:
2447 isp_free_pcmd(isp, ccb);
2448 cam_freeze_devq(ccb->ccb_h.path);
2449 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2450 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2451 xpt_done(ccb);
2452 break;
2453 case CMD_COMPLETE:
2454 isp_done((struct ccb_scsiio *) ccb);
2455 break;
2456 default:
2457 isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__);
2458 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2459 isp_free_pcmd(isp, ccb);
2460 xpt_done(ccb);
2461 }
2462 break;
2463
2464 #ifdef ISP_TARGET_MODE
2465 case XPT_EN_LUN: /* Enable/Disable LUN as a target */
2466 if (ccb->cel.enable) {
2467 isp_enable_lun(isp, ccb);
2468 } else {
2469 isp_disable_lun(isp, ccb);
2470 }
2471 break;
2472 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
2473 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2474 {
2475 tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2476 if (tptr == NULL) {
2477 const char *str;
2478
2479 if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
2480 str = "XPT_IMMEDIATE_NOTIFY";
2481 else
2482 str = "XPT_ACCEPT_TARGET_IO";
2483 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path,
2484 "%s: no state pointer found for %s\n",
2485 __func__, str);
2486 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2487 xpt_done(ccb);
2488 break;
2489 }
2490
2491 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2492 ccb->atio.tag_id = 0;
2493 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle);
2494 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2495 "Put FREE ATIO\n");
2496 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
2497 ccb->cin1.seq_id = ccb->cin1.tag_id = 0;
2498 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
2499 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2500 "Put FREE INOT\n");
2501 }
2502 ccb->ccb_h.status = CAM_REQ_INPROG;
2503 break;
2504 }
2505 case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */
2506 {
2507 inot_private_data_t *ntp;
2508
2509 /*
2510 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb
2511 * XXX: matches that for the immediate notify, we have to *search* for the notify structure
2512 */
2513 /*
2514 * All the relevant path information is in the associated immediate notify
2515 */
2516 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2517 ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id);
2518 if (ntp == NULL) {
2519 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__,
2520 ccb->cna2.tag_id, ccb->cna2.seq_id);
2521 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2522 xpt_done(ccb);
2523 break;
2524 }
2525 if (isp_handle_platform_target_notify_ack(isp, &ntp->nt,
2526 (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) {
2527 cam_freeze_devq(ccb->ccb_h.path);
2528 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2529 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2530 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2531 break;
2532 }
2533 isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp);
2534 ccb->ccb_h.status = CAM_REQ_CMP;
2535 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2536 xpt_done(ccb);
2537 break;
2538 }
2539 case XPT_CONT_TARGET_IO:
2540 isp_target_start_ctio(isp, ccb, FROM_CAM);
2541 isp_rq_check_above(isp);
2542 break;
2543 #endif
2544 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2545 tgt = ccb->ccb_h.target_id;
2546 tgt |= (bus << 16);
2547
2548 error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt);
2549 if (error) {
2550 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2551 } else {
2552 /*
2553 * If we have a FC device, reset the Command
2554 * Reference Number, because the target will expect
2555 * that we re-start the CRN at 1 after a reset.
2556 */
2557 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2558
2559 ccb->ccb_h.status = CAM_REQ_CMP;
2560 }
2561 xpt_done(ccb);
2562 break;
2563 case XPT_ABORT: /* Abort the specified CCB */
2564 {
2565 union ccb *accb = ccb->cab.abort_ccb;
2566 switch (accb->ccb_h.func_code) {
2567 #ifdef ISP_TARGET_MODE
2568 case XPT_ACCEPT_TARGET_IO:
2569 isp_abort_atio(isp, ccb);
2570 break;
2571 case XPT_IMMEDIATE_NOTIFY:
2572 isp_abort_inot(isp, ccb);
2573 break;
2574 #endif
2575 case XPT_SCSI_IO:
2576 error = isp_control(isp, ISPCTL_ABORT_CMD, accb);
2577 if (error) {
2578 ccb->ccb_h.status = CAM_UA_ABORT;
2579 } else {
2580 ccb->ccb_h.status = CAM_REQ_CMP;
2581 }
2582 break;
2583 default:
2584 ccb->ccb_h.status = CAM_REQ_INVALID;
2585 break;
2586 }
2587 /*
2588 * This is not a queued CCB, so the caller expects it to be
2589 * complete when control is returned.
2590 */
2591 break;
2592 }
2593 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2594 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2595 cts = &ccb->cts;
2596 if (!IS_CURRENT_SETTINGS(cts)) {
2597 ccb->ccb_h.status = CAM_REQ_INVALID;
2598 xpt_done(ccb);
2599 break;
2600 }
2601 ccb->ccb_h.status = CAM_REQ_CMP;
2602 xpt_done(ccb);
2603 break;
2604 case XPT_GET_TRAN_SETTINGS:
2605 {
2606 struct ccb_trans_settings_scsi *scsi;
2607 struct ccb_trans_settings_fc *fc;
2608
2609 cts = &ccb->cts;
2610 scsi = &cts->proto_specific.scsi;
2611 fc = &cts->xport_specific.fc;
2612 tgt = cts->ccb_h.target_id;
2613 fcp = FCPARAM(isp, bus);
2614
2615 cts->protocol = PROTO_SCSI;
2616 cts->protocol_version = SCSI_REV_2;
2617 cts->transport = XPORT_FC;
2618 cts->transport_version = 0;
2619
2620 scsi->valid = CTS_SCSI_VALID_TQ;
2621 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2622 fc->valid = CTS_FC_VALID_SPEED;
2623 fc->bitrate = fcp->isp_gbspeed * 100000;
2624 if (tgt < MAX_FC_TARG) {
2625 fcportdb_t *lp = &fcp->portdb[tgt];
2626 fc->wwnn = lp->node_wwn;
2627 fc->wwpn = lp->port_wwn;
2628 fc->port = lp->portid;
2629 fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2630 }
2631 ccb->ccb_h.status = CAM_REQ_CMP;
2632 xpt_done(ccb);
2633 break;
2634 }
2635 case XPT_CALC_GEOMETRY:
2636 cam_calc_geometry(&ccb->ccg, 1);
2637 xpt_done(ccb);
2638 break;
2639
2640 case XPT_RESET_BUS: /* Reset the specified bus */
2641 error = isp_control(isp, ISPCTL_RESET_BUS, bus);
2642 if (error) {
2643 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2644 xpt_done(ccb);
2645 break;
2646 }
2647 if (bootverbose) {
2648 xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus);
2649 }
2650 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0);
2651 ccb->ccb_h.status = CAM_REQ_CMP;
2652 xpt_done(ccb);
2653 break;
2654
2655 case XPT_TERM_IO: /* Terminate the I/O process */
2656 ccb->ccb_h.status = CAM_REQ_INVALID;
2657 xpt_done(ccb);
2658 break;
2659
2660 case XPT_SET_SIM_KNOB: /* Set SIM knobs */
2661 {
2662 struct ccb_sim_knob *kp = &ccb->knob;
2663 fcparam *fcp = FCPARAM(isp, bus);
2664
2665 if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) {
2666 fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn;
2667 fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn;
2668 isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn);
2669 }
2670 ccb->ccb_h.status = CAM_REQ_CMP;
2671 if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) {
2672 int rchange = 0;
2673 int newrole = 0;
2674
2675 switch (kp->xport_specific.fc.role) {
2676 case KNOB_ROLE_NONE:
2677 if (fcp->role != ISP_ROLE_NONE) {
2678 rchange = 1;
2679 newrole = ISP_ROLE_NONE;
2680 }
2681 break;
2682 case KNOB_ROLE_TARGET:
2683 if (fcp->role != ISP_ROLE_TARGET) {
2684 rchange = 1;
2685 newrole = ISP_ROLE_TARGET;
2686 }
2687 break;
2688 case KNOB_ROLE_INITIATOR:
2689 if (fcp->role != ISP_ROLE_INITIATOR) {
2690 rchange = 1;
2691 newrole = ISP_ROLE_INITIATOR;
2692 }
2693 break;
2694 case KNOB_ROLE_BOTH:
2695 if (fcp->role != ISP_ROLE_BOTH) {
2696 rchange = 1;
2697 newrole = ISP_ROLE_BOTH;
2698 }
2699 break;
2700 }
2701 if (rchange) {
2702 ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole);
2703 if (isp_control(isp, ISPCTL_CHANGE_ROLE,
2704 bus, newrole) != 0) {
2705 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2706 xpt_done(ccb);
2707 break;
2708 }
2709 }
2710 }
2711 xpt_done(ccb);
2712 break;
2713 }
2714 case XPT_GET_SIM_KNOB_OLD: /* Get SIM knobs -- compat value */
2715 case XPT_GET_SIM_KNOB: /* Get SIM knobs */
2716 {
2717 struct ccb_sim_knob *kp = &ccb->knob;
2718 fcparam *fcp = FCPARAM(isp, bus);
2719
2720 kp->xport_specific.fc.wwnn = fcp->isp_wwnn;
2721 kp->xport_specific.fc.wwpn = fcp->isp_wwpn;
2722 switch (fcp->role) {
2723 case ISP_ROLE_NONE:
2724 kp->xport_specific.fc.role = KNOB_ROLE_NONE;
2725 break;
2726 case ISP_ROLE_TARGET:
2727 kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
2728 break;
2729 case ISP_ROLE_INITIATOR:
2730 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
2731 break;
2732 case ISP_ROLE_BOTH:
2733 kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
2734 break;
2735 }
2736 kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
2737 ccb->ccb_h.status = CAM_REQ_CMP;
2738 xpt_done(ccb);
2739 break;
2740 }
2741 case XPT_PATH_INQ: /* Path routing inquiry */
2742 {
2743 struct ccb_pathinq *cpi = &ccb->cpi;
2744
2745 cpi->version_num = 1;
2746 #ifdef ISP_TARGET_MODE
2747 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2748 #else
2749 cpi->target_sprt = 0;
2750 #endif
2751 cpi->hba_eng_cnt = 0;
2752 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2753 cpi->max_lun = 255;
2754 cpi->bus_id = cam_sim_bus(sim);
2755 cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE;
2756
2757 fcp = FCPARAM(isp, bus);
2758
2759 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
2760 cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN;
2761
2762 /*
2763 * Because our loop ID can shift from time to time,
2764 * make our initiator ID out of range of our bus.
2765 */
2766 cpi->initiator_id = cpi->max_target + 1;
2767
2768 /*
2769 * Set base transfer capabilities for Fibre Channel, for this HBA.
2770 */
2771 if (IS_25XX(isp))
2772 cpi->base_transfer_speed = 8000000;
2773 else
2774 cpi->base_transfer_speed = 4000000;
2775 cpi->hba_inquiry = PI_TAG_ABLE;
2776 cpi->transport = XPORT_FC;
2777 cpi->transport_version = 0;
2778 cpi->xport_specific.fc.wwnn = fcp->isp_wwnn;
2779 cpi->xport_specific.fc.wwpn = fcp->isp_wwpn;
2780 cpi->xport_specific.fc.port = fcp->isp_portid;
2781 cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000;
2782 cpi->protocol = PROTO_SCSI;
2783 cpi->protocol_version = SCSI_REV_2;
2784 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2785 strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2786 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2787 cpi->unit_number = cam_sim_unit(sim);
2788 cpi->ccb_h.status = CAM_REQ_CMP;
2789 xpt_done(ccb);
2790 break;
2791 }
2792 default:
2793 ccb->ccb_h.status = CAM_REQ_INVALID;
2794 xpt_done(ccb);
2795 break;
2796 }
2797 }
2798
2799 void
isp_done(XS_T * sccb)2800 isp_done(XS_T *sccb)
2801 {
2802 ispsoftc_t *isp = XS_ISP(sccb);
2803 uint32_t status;
2804
2805 if (XS_NOERR(sccb))
2806 XS_SETERR(sccb, CAM_REQ_CMP);
2807
2808 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) {
2809 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2810 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2811 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2812 } else {
2813 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2814 }
2815 }
2816
2817 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2818 status = sccb->ccb_h.status & CAM_STATUS_MASK;
2819 if (status != CAM_REQ_CMP &&
2820 (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2821 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2822 xpt_freeze_devq(sccb->ccb_h.path, 1);
2823 }
2824
2825 if (ISP_PCMD(sccb)) {
2826 if (callout_active(&PISP_PCMD(sccb)->wdog))
2827 callout_stop(&PISP_PCMD(sccb)->wdog);
2828 isp_free_pcmd(isp, (union ccb *) sccb);
2829 }
2830 isp_rq_check_below(isp);
2831 xpt_done((union ccb *) sccb);
2832 }
2833
2834 void
isp_async(ispsoftc_t * isp,ispasync_t cmd,...)2835 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
2836 {
2837 int bus;
2838 static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s";
2839 char buf[64];
2840 char *msg = NULL;
2841 target_id_t tgt = 0;
2842 fcportdb_t *lp;
2843 struct isp_fc *fc;
2844 struct ac_contract ac;
2845 struct ac_device_changed *adc;
2846 va_list ap;
2847
2848 switch (cmd) {
2849 case ISPASYNC_LOOP_RESET:
2850 {
2851 uint16_t lipp;
2852 fcparam *fcp;
2853 va_start(ap, cmd);
2854 bus = va_arg(ap, int);
2855 va_end(ap);
2856
2857 lipp = ISP_READ(isp, OUTMAILBOX1);
2858 fcp = FCPARAM(isp, bus);
2859
2860 isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp);
2861 /*
2862 * Per FCP-4, a Reset LIP should result in a CRN reset. Other
2863 * LIPs and loop up/down events should never reset the CRN. For
2864 * an as of yet unknown reason, 24xx series cards (and
2865 * potentially others) can interrupt with a LIP Reset status
2866 * when no LIP reset came down the wire. Additionally, the LIP
2867 * primitive accompanying this status would not be a valid LIP
2868 * Reset primitive, but some variation of an invalid AL_PA
2869 * LIP. As a result, we have to verify the AL_PD in the LIP
2870 * addresses our port before blindly resetting.
2871 */
2872 if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF)))
2873 isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0);
2874 isp_loop_changed(isp, bus);
2875 break;
2876 }
2877 case ISPASYNC_LIP:
2878 if (msg == NULL)
2879 msg = "LIP Received";
2880 /* FALLTHROUGH */
2881 case ISPASYNC_LOOP_DOWN:
2882 if (msg == NULL)
2883 msg = "LOOP Down";
2884 /* FALLTHROUGH */
2885 case ISPASYNC_LOOP_UP:
2886 if (msg == NULL)
2887 msg = "LOOP Up";
2888 va_start(ap, cmd);
2889 bus = va_arg(ap, int);
2890 va_end(ap);
2891 isp_loop_changed(isp, bus);
2892 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
2893 break;
2894 case ISPASYNC_DEV_ARRIVED:
2895 va_start(ap, cmd);
2896 bus = va_arg(ap, int);
2897 lp = va_arg(ap, fcportdb_t *);
2898 va_end(ap);
2899 fc = ISP_FC_PC(isp, bus);
2900 tgt = FC_PORTDB_TGT(isp, bus, lp);
2901 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2902 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived");
2903 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2904 (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) {
2905 lp->is_target = 1;
2906 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2907 isp_make_here(isp, lp, bus, tgt);
2908 }
2909 if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2910 (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) {
2911 lp->is_initiator = 1;
2912 ac.contract_number = AC_CONTRACT_DEV_CHG;
2913 adc = (struct ac_device_changed *) ac.contract_data;
2914 adc->wwpn = lp->port_wwn;
2915 adc->port = lp->portid;
2916 adc->target = tgt;
2917 adc->arrived = 1;
2918 xpt_async(AC_CONTRACT, fc->path, &ac);
2919 }
2920 break;
2921 case ISPASYNC_DEV_CHANGED:
2922 case ISPASYNC_DEV_STAYED:
2923 {
2924 int crn_reset_done;
2925
2926 crn_reset_done = 0;
2927 va_start(ap, cmd);
2928 bus = va_arg(ap, int);
2929 lp = va_arg(ap, fcportdb_t *);
2930 va_end(ap);
2931 fc = ISP_FC_PC(isp, bus);
2932 tgt = FC_PORTDB_TGT(isp, bus, lp);
2933 isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3);
2934 if (cmd == ISPASYNC_DEV_CHANGED)
2935 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed");
2936 else
2937 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed");
2938
2939 if (lp->is_target !=
2940 ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2941 (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) {
2942 lp->is_target = !lp->is_target;
2943 if (lp->is_target) {
2944 if (cmd == ISPASYNC_DEV_CHANGED) {
2945 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2946 crn_reset_done = 1;
2947 }
2948 isp_make_here(isp, lp, bus, tgt);
2949 } else {
2950 isp_make_gone(isp, lp, bus, tgt);
2951 if (cmd == ISPASYNC_DEV_CHANGED) {
2952 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2953 crn_reset_done = 1;
2954 }
2955 }
2956 }
2957 if (lp->is_initiator !=
2958 ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2959 (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) {
2960 lp->is_initiator = !lp->is_initiator;
2961 ac.contract_number = AC_CONTRACT_DEV_CHG;
2962 adc = (struct ac_device_changed *) ac.contract_data;
2963 adc->wwpn = lp->port_wwn;
2964 adc->port = lp->portid;
2965 adc->target = tgt;
2966 adc->arrived = lp->is_initiator;
2967 xpt_async(AC_CONTRACT, fc->path, &ac);
2968 }
2969
2970 if ((cmd == ISPASYNC_DEV_CHANGED) &&
2971 (crn_reset_done == 0))
2972 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2973
2974 break;
2975 }
2976 case ISPASYNC_DEV_GONE:
2977 va_start(ap, cmd);
2978 bus = va_arg(ap, int);
2979 lp = va_arg(ap, fcportdb_t *);
2980 va_end(ap);
2981 fc = ISP_FC_PC(isp, bus);
2982 tgt = FC_PORTDB_TGT(isp, bus, lp);
2983 /*
2984 * If this has a virtual target or initiator set the isp_gdt
2985 * timer running on it to delay its departure.
2986 */
2987 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2988 if (lp->is_target || lp->is_initiator) {
2989 lp->state = FC_PORTDB_STATE_ZOMBIE;
2990 lp->gone_timer = fc->gone_device_time;
2991 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie");
2992 if (fc->ready && !callout_active(&fc->gdt)) {
2993 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime);
2994 callout_reset(&fc->gdt, hz, isp_gdt, fc);
2995 }
2996 break;
2997 }
2998 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone");
2999 break;
3000 case ISPASYNC_CHANGE_NOTIFY:
3001 {
3002 char *msg;
3003 int evt, nphdl, nlstate, portid, reason;
3004
3005 va_start(ap, cmd);
3006 bus = va_arg(ap, int);
3007 evt = va_arg(ap, int);
3008 if (evt == ISPASYNC_CHANGE_PDB) {
3009 nphdl = va_arg(ap, int);
3010 nlstate = va_arg(ap, int);
3011 reason = va_arg(ap, int);
3012 } else if (evt == ISPASYNC_CHANGE_SNS) {
3013 portid = va_arg(ap, int);
3014 } else {
3015 nphdl = NIL_HANDLE;
3016 nlstate = reason = 0;
3017 }
3018 va_end(ap);
3019
3020 if (evt == ISPASYNC_CHANGE_PDB) {
3021 int tgt_set = 0;
3022 msg = "Port Database Changed";
3023 isp_prt(isp, ISP_LOGINFO,
3024 "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)",
3025 bus, msg, nphdl, nlstate, reason);
3026 /*
3027 * Port database syncs are not sufficient for
3028 * determining that logins or logouts are done on the
3029 * loop, but this information is directly available from
3030 * the reason code from the incoming mbox. We must reset
3031 * the fcp crn on these events according to FCP-4
3032 */
3033 switch (reason) {
3034 case PDB24XX_AE_IMPL_LOGO_1:
3035 case PDB24XX_AE_IMPL_LOGO_2:
3036 case PDB24XX_AE_IMPL_LOGO_3:
3037 case PDB24XX_AE_PLOGI_RCVD:
3038 case PDB24XX_AE_PRLI_RCVD:
3039 case PDB24XX_AE_PRLO_RCVD:
3040 case PDB24XX_AE_LOGO_RCVD:
3041 case PDB24XX_AE_PLOGI_DONE:
3042 case PDB24XX_AE_PRLI_DONE:
3043 /*
3044 * If the event is not global, twiddle tgt and
3045 * tgt_set to nominate only the target
3046 * associated with the nphdl.
3047 */
3048 if (nphdl != PDB24XX_AE_GLOBAL) {
3049 /* Break if we don't yet have the pdb */
3050 if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp))
3051 break;
3052 tgt = FC_PORTDB_TGT(isp, bus, lp);
3053 tgt_set = 1;
3054 }
3055 isp_fcp_reset_crn(isp, bus, tgt, tgt_set);
3056 break;
3057 default:
3058 break; /* NOP */
3059 }
3060 } else if (evt == ISPASYNC_CHANGE_SNS) {
3061 msg = "Name Server Database Changed";
3062 isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)",
3063 bus, msg, portid);
3064 } else {
3065 msg = "Other Change Notify";
3066 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
3067 }
3068 isp_loop_changed(isp, bus);
3069 break;
3070 }
3071 #ifdef ISP_TARGET_MODE
3072 case ISPASYNC_TARGET_NOTIFY:
3073 {
3074 isp_notify_t *notify;
3075 va_start(ap, cmd);
3076 notify = va_arg(ap, isp_notify_t *);
3077 va_end(ap);
3078 switch (notify->nt_ncode) {
3079 case NT_ABORT_TASK:
3080 case NT_ABORT_TASK_SET:
3081 case NT_CLEAR_ACA:
3082 case NT_CLEAR_TASK_SET:
3083 case NT_LUN_RESET:
3084 case NT_TARGET_RESET:
3085 case NT_QUERY_TASK_SET:
3086 case NT_QUERY_ASYNC_EVENT:
3087 /*
3088 * These are task management functions.
3089 */
3090 isp_handle_platform_target_tmf(isp, notify);
3091 break;
3092 case NT_LIP_RESET:
3093 case NT_LINK_UP:
3094 case NT_LINK_DOWN:
3095 case NT_HBA_RESET:
3096 /*
3097 * No action need be taken here.
3098 */
3099 break;
3100 case NT_SRR:
3101 isp_handle_platform_srr(isp, notify);
3102 break;
3103 default:
3104 isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode);
3105 isp_handle_platform_target_notify_ack(isp, notify, 0);
3106 break;
3107 }
3108 break;
3109 }
3110 case ISPASYNC_TARGET_NOTIFY_ACK:
3111 {
3112 void *inot;
3113 va_start(ap, cmd);
3114 inot = va_arg(ap, void *);
3115 va_end(ap);
3116 if (isp_notify_ack(isp, inot)) {
3117 isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT);
3118 if (tp) {
3119 tp->isp = isp;
3120 memcpy(tp->data, inot, sizeof (tp->data));
3121 tp->not = tp->data;
3122 callout_init_mtx(&tp->timer, &isp->isp_lock, 0);
3123 callout_reset(&tp->timer, 5,
3124 isp_refire_notify_ack, tp);
3125 } else {
3126 isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire");
3127 }
3128 }
3129 break;
3130 }
3131 case ISPASYNC_TARGET_ACTION:
3132 {
3133 isphdr_t *hp;
3134
3135 va_start(ap, cmd);
3136 hp = va_arg(ap, isphdr_t *);
3137 va_end(ap);
3138 switch (hp->rqs_entry_type) {
3139 case RQSTYPE_ATIO:
3140 isp_handle_platform_atio7(isp, (at7_entry_t *)hp);
3141 break;
3142 case RQSTYPE_CTIO7:
3143 isp_handle_platform_ctio(isp, (ct7_entry_t *)hp);
3144 break;
3145 default:
3146 isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x",
3147 __func__, hp->rqs_entry_type);
3148 break;
3149 }
3150 break;
3151 }
3152 #endif
3153 case ISPASYNC_FW_CRASH:
3154 {
3155 uint16_t mbox1;
3156 mbox1 = ISP_READ(isp, OUTMAILBOX1);
3157 isp_prt(isp, ISP_LOGERR, "Internal Firmware Error @ RISC Address 0x%x", mbox1);
3158 #if 0
3159 isp_reinit(isp, 1);
3160 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3161 #endif
3162 break;
3163 }
3164 default:
3165 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3166 break;
3167 }
3168 }
3169
3170 uint64_t
isp_default_wwn(ispsoftc_t * isp,int chan,int isactive,int iswwnn)3171 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn)
3172 {
3173 uint64_t seed;
3174 struct isp_fc *fc = ISP_FC_PC(isp, chan);
3175
3176 /* First try to use explicitly configured WWNs. */
3177 seed = iswwnn ? fc->def_wwnn : fc->def_wwpn;
3178 if (seed)
3179 return (seed);
3180
3181 /* Otherwise try to use WWNs from NVRAM. */
3182 if (isactive) {
3183 seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram :
3184 FCPARAM(isp, chan)->isp_wwpn_nvram;
3185 if (seed)
3186 return (seed);
3187 }
3188
3189 /* If still no WWNs, try to steal them from the first channel. */
3190 if (chan > 0) {
3191 seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn :
3192 ISP_FC_PC(isp, 0)->def_wwpn;
3193 if (seed == 0) {
3194 seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram :
3195 FCPARAM(isp, 0)->isp_wwpn_nvram;
3196 }
3197 }
3198
3199 /* If still nothing -- improvise. */
3200 if (seed == 0) {
3201 seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev);
3202 if (!iswwnn)
3203 seed ^= 0x0100000000000000ULL;
3204 }
3205
3206 /* For additional channels we have to improvise even more. */
3207 if (!iswwnn && chan > 0) {
3208 /*
3209 * We'll stick our channel number plus one first into bits
3210 * 57..59 and thence into bits 52..55 which allows for 8 bits
3211 * of channel which is enough for our maximum of 255 channels.
3212 */
3213 seed ^= 0x0100000000000000ULL;
3214 seed ^= ((uint64_t) (chan + 1) & 0xf) << 56;
3215 seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52;
3216 }
3217 return (seed);
3218 }
3219
3220 void
isp_prt(ispsoftc_t * isp,int level,const char * fmt,...)3221 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...)
3222 {
3223 int loc;
3224 char lbuf[200];
3225 va_list ap;
3226
3227 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3228 return;
3229 }
3230 snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev));
3231 loc = strlen(lbuf);
3232 va_start(ap, fmt);
3233 vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap);
3234 va_end(ap);
3235 printf("%s\n", lbuf);
3236 }
3237
3238 void
isp_xs_prt(ispsoftc_t * isp,XS_T * xs,int level,const char * fmt,...)3239 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...)
3240 {
3241 va_list ap;
3242 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3243 return;
3244 }
3245 xpt_print_path(xs->ccb_h.path);
3246 va_start(ap, fmt);
3247 vprintf(fmt, ap);
3248 va_end(ap);
3249 printf("\n");
3250 }
3251
3252 uint64_t
isp_nanotime_sub(struct timespec * b,struct timespec * a)3253 isp_nanotime_sub(struct timespec *b, struct timespec *a)
3254 {
3255 uint64_t elapsed;
3256 struct timespec x;
3257
3258 timespecsub(b, a, &x);
3259 elapsed = GET_NANOSEC(&x);
3260 if (elapsed == 0)
3261 elapsed++;
3262 return (elapsed);
3263 }
3264
3265 int
isp_fc_scratch_acquire(ispsoftc_t * isp,int chan)3266 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan)
3267 {
3268 struct isp_fc *fc = ISP_FC_PC(isp, chan);
3269
3270 if (fc->fcbsy)
3271 return (-1);
3272 fc->fcbsy = 1;
3273 return (0);
3274 }
3275
3276 void
isp_platform_intr(void * arg)3277 isp_platform_intr(void *arg)
3278 {
3279 ispsoftc_t *isp = arg;
3280
3281 ISP_LOCK(isp);
3282 ISP_RUN_ISR(isp);
3283 ISP_UNLOCK(isp);
3284 }
3285
3286 void
isp_platform_intr_resp(void * arg)3287 isp_platform_intr_resp(void *arg)
3288 {
3289 ispsoftc_t *isp = arg;
3290
3291 ISP_LOCK(isp);
3292 isp_intr_respq(isp);
3293 ISP_UNLOCK(isp);
3294
3295 /* We have handshake enabled, so explicitly complete interrupt */
3296 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3297 }
3298
3299 void
isp_platform_intr_atio(void * arg)3300 isp_platform_intr_atio(void *arg)
3301 {
3302 ispsoftc_t *isp = arg;
3303
3304 ISP_LOCK(isp);
3305 #ifdef ISP_TARGET_MODE
3306 isp_intr_atioq(isp);
3307 #endif
3308 ISP_UNLOCK(isp);
3309
3310 /* We have handshake enabled, so explicitly complete interrupt */
3311 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3312 }
3313
3314 typedef struct {
3315 ispsoftc_t *isp;
3316 struct ccb_scsiio *csio;
3317 void *qe;
3318 int error;
3319 } mush_t;
3320
3321 static void
isp_dma2(void * arg,bus_dma_segment_t * dm_segs,int nseg,int error)3322 isp_dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
3323 {
3324 mush_t *mp = (mush_t *) arg;
3325 ispsoftc_t *isp= mp->isp;
3326 struct ccb_scsiio *csio = mp->csio;
3327 bus_dmasync_op_t op;
3328
3329 if (error) {
3330 mp->error = error;
3331 return;
3332 }
3333 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3334 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3335 op = BUS_DMASYNC_PREREAD;
3336 else
3337 op = BUS_DMASYNC_PREWRITE;
3338 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3339
3340 mp->error = ISP_SEND_CMD(isp, mp->qe, dm_segs, nseg);
3341 if (mp->error)
3342 isp_dmafree(isp, csio);
3343 }
3344
3345 int
isp_dmasetup(ispsoftc_t * isp,struct ccb_scsiio * csio,void * qe)3346 isp_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *qe)
3347 {
3348 mush_t mp;
3349 int error;
3350
3351 if (XS_XFRLEN(csio)) {
3352 mp.isp = isp;
3353 mp.csio = csio;
3354 mp.qe = qe;
3355 mp.error = 0;
3356 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
3357 (union ccb *)csio, isp_dma2, &mp, BUS_DMA_NOWAIT);
3358 if (error == 0)
3359 error = mp.error;
3360 } else {
3361 error = ISP_SEND_CMD(isp, qe, NULL, 0);
3362 }
3363 switch (error) {
3364 case 0:
3365 case CMD_COMPLETE:
3366 case CMD_EAGAIN:
3367 case CMD_RQLATER:
3368 break;
3369 case ENOMEM:
3370 error = CMD_EAGAIN;
3371 break;
3372 case EINVAL:
3373 case EFBIG:
3374 csio->ccb_h.status = CAM_REQ_INVALID;
3375 error = CMD_COMPLETE;
3376 break;
3377 default:
3378 csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
3379 error = CMD_COMPLETE;
3380 break;
3381 }
3382 return (error);
3383 }
3384
3385 void
isp_dmafree(ispsoftc_t * isp,struct ccb_scsiio * csio)3386 isp_dmafree(ispsoftc_t *isp, struct ccb_scsiio *csio)
3387 {
3388 bus_dmasync_op_t op;
3389
3390 if (XS_XFRLEN(csio) == 0)
3391 return;
3392
3393 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3394 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3395 op = BUS_DMASYNC_POSTREAD;
3396 else
3397 op = BUS_DMASYNC_POSTWRITE;
3398 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3399 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
3400 }
3401
3402 /*
3403 * Reset the command reference number for all LUNs on a specific target
3404 * (needed when a target arrives again) or for all targets on a port
3405 * (needed for events like a LIP).
3406 */
3407 void
isp_fcp_reset_crn(ispsoftc_t * isp,int chan,uint32_t tgt,int tgt_set)3408 isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set)
3409 {
3410 struct isp_fc *fc = ISP_FC_PC(isp, chan);
3411 struct isp_nexus *nxp;
3412 int i;
3413
3414 if (tgt_set == 0)
3415 isp_prt(isp, ISP_LOGDEBUG0,
3416 "Chan %d resetting CRN on all targets", chan);
3417 else
3418 isp_prt(isp, ISP_LOGDEBUG0,
3419 "Chan %d resetting CRN on target %u", chan, tgt);
3420
3421 for (i = 0; i < NEXUS_HASH_WIDTH; i++) {
3422 for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) {
3423 if (tgt_set == 0 || tgt == nxp->tgt)
3424 nxp->crnseed = 0;
3425 }
3426 }
3427 }
3428
3429 int
isp_fcp_next_crn(ispsoftc_t * isp,uint8_t * crnp,XS_T * cmd)3430 isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd)
3431 {
3432 lun_id_t lun;
3433 uint32_t chan, tgt;
3434 struct isp_fc *fc;
3435 struct isp_nexus *nxp;
3436 int idx;
3437
3438 chan = XS_CHANNEL(cmd);
3439 tgt = XS_TGT(cmd);
3440 lun = XS_LUN(cmd);
3441 fc = ISP_FC_PC(isp, chan);
3442 idx = NEXUS_HASH(tgt, lun);
3443 nxp = fc->nexus_hash[idx];
3444
3445 while (nxp) {
3446 if (nxp->tgt == tgt && nxp->lun == lun)
3447 break;
3448 nxp = nxp->next;
3449 }
3450 if (nxp == NULL) {
3451 nxp = fc->nexus_free_list;
3452 if (nxp == NULL) {
3453 nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT);
3454 if (nxp == NULL) {
3455 return (-1);
3456 }
3457 } else {
3458 fc->nexus_free_list = nxp->next;
3459 }
3460 nxp->tgt = tgt;
3461 nxp->lun = lun;
3462 nxp->next = fc->nexus_hash[idx];
3463 fc->nexus_hash[idx] = nxp;
3464 }
3465 if (nxp->crnseed == 0)
3466 nxp->crnseed = 1;
3467 *crnp = nxp->crnseed++;
3468 return (0);
3469 }
3470
3471 /*
3472 * We enter with the lock held
3473 */
3474 void
isp_timer(void * arg)3475 isp_timer(void *arg)
3476 {
3477 ispsoftc_t *isp = arg;
3478 #ifdef ISP_TARGET_MODE
3479 isp_tmcmd_restart(isp);
3480 #endif
3481 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
3482 }
3483
3484 #ifdef ISP_TARGET_MODE
3485 isp_ecmd_t *
isp_get_ecmd(ispsoftc_t * isp)3486 isp_get_ecmd(ispsoftc_t *isp)
3487 {
3488 isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free;
3489 if (ecmd) {
3490 isp->isp_osinfo.ecmd_free = ecmd->next;
3491 }
3492 return (ecmd);
3493 }
3494
3495 void
isp_put_ecmd(ispsoftc_t * isp,isp_ecmd_t * ecmd)3496 isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd)
3497 {
3498 ecmd->next = isp->isp_osinfo.ecmd_free;
3499 isp->isp_osinfo.ecmd_free = ecmd;
3500 }
3501 #endif
3502