xref: /freebsd/sys/dev/tws/tws_cam.c (revision a134ebd6e63f658f2d3d04ac0c60d23bcaa86dd7)
1 /*
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 LSI Corp.
5  * All rights reserved.
6  * Author : Manjunath Ranganathaiah <manjunath.ranganathaiah@lsi.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #include <dev/tws/tws.h>
33 #include <dev/tws/tws_services.h>
34 #include <dev/tws/tws_hdm.h>
35 #include <dev/tws/tws_user.h>
36 #include <cam/cam.h>
37 #include <cam/cam_ccb.h>
38 #include <cam/cam_sim.h>
39 #include <cam/cam_xpt_sim.h>
40 #include <cam/cam_debug.h>
41 #include <cam/cam_periph.h>
42 
43 #include <cam/scsi/scsi_all.h>
44 #include <cam/scsi/scsi_message.h>
45 
46 static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
47 static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
48 
49 static void  tws_action(struct cam_sim *sim, union ccb *ccb);
50 static void  tws_poll(struct cam_sim *sim);
51 static void tws_scsi_complete(struct tws_request *req);
52 
53 
54 
55 void tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
56 int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
57 int tws_bus_scan(struct tws_softc *sc);
58 int tws_cam_attach(struct tws_softc *sc);
59 void tws_cam_detach(struct tws_softc *sc);
60 void tws_reset(void *arg);
61 
62 static void tws_reset_cb(void *arg);
63 static void tws_reinit(void *arg);
64 static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
65 static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req);
66 static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
67                             int nseg, int error);
68 static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src,
69                             void *sgl_dest, u_int16_t num_sgl_entries);
70 static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
71 static void tws_scsi_err_complete(struct tws_request *req,
72                                                struct tws_command_header *hdr);
73 static void tws_passthru_err_complete(struct tws_request *req,
74                                                struct tws_command_header *hdr);
75 
76 
77 void tws_timeout(void *arg);
78 static void tws_intr_attn_aen(struct tws_softc *sc);
79 static void tws_intr_attn_error(struct tws_softc *sc);
80 static void tws_intr_resp(struct tws_softc *sc);
81 void tws_intr(void *arg);
82 void tws_cmd_complete(struct tws_request *req);
83 void tws_aen_complete(struct tws_request *req);
84 int tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
85 void tws_getset_param_complete(struct tws_request *req);
86 int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
87               u_int32_t param_size, void *data);
88 int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
89               u_int32_t param_size, void *data);
90 
91 
92 extern struct tws_request *tws_get_request(struct tws_softc *sc,
93                                             u_int16_t type);
94 extern void *tws_release_request(struct tws_request *req);
95 extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
96 extern boolean tws_get_response(struct tws_softc *sc,
97                                            u_int16_t *req_id, u_int64_t *mfa);
98 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
99                                 u_int8_t q_type );
100 extern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
101                                    struct tws_request *req, u_int8_t q_type );
102 extern void tws_send_event(struct tws_softc *sc, u_int8_t event);
103 
104 extern struct tws_sense *
105 tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
106 
107 extern void tws_fetch_aen(void *arg);
108 extern void tws_disable_db_intr(struct tws_softc *sc);
109 extern void tws_enable_db_intr(struct tws_softc *sc);
110 extern void tws_passthru_complete(struct tws_request *req);
111 extern void tws_aen_synctime_with_host(struct tws_softc *sc);
112 extern void tws_circular_aenq_insert(struct tws_softc *sc,
113                     struct tws_circular_q *cq, struct tws_event_packet *aen);
114 extern int tws_use_32bit_sgls;
115 extern boolean tws_ctlr_reset(struct tws_softc *sc);
116 extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc,
117                                                            u_int8_t q_type );
118 extern void tws_turn_off_interrupts(struct tws_softc *sc);
119 extern void tws_turn_on_interrupts(struct tws_softc *sc);
120 extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
121 extern void tws_init_obfl_q(struct tws_softc *sc);
122 extern uint8_t tws_get_state(struct tws_softc *sc);
123 extern void tws_assert_soft_reset(struct tws_softc *sc);
124 extern boolean tws_ctlr_ready(struct tws_softc *sc);
125 extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
126 extern int tws_setup_intr(struct tws_softc *sc, int irqs);
127 extern int tws_teardown_intr(struct tws_softc *sc);
128 
129 
130 
131 int
132 tws_cam_attach(struct tws_softc *sc)
133 {
134     struct cam_devq *devq;
135 
136     TWS_TRACE_DEBUG(sc, "entry", 0, sc);
137     /* Create a device queue for sim */
138 
139     /*
140      * if the user sets cam depth to less than 1
141      * cam may get confused
142      */
143     if ( tws_cam_depth < 1 )
144         tws_cam_depth = 1;
145     if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS)  )
146         tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
147 
148     TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
149 
150     if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
151         tws_log(sc, CAM_SIMQ_ALLOC);
152         return(ENOMEM);
153     }
154 
155    /*
156     * Create a SIM entry.  Though we can support tws_cam_depth
157     * simultaneous requests, we claim to be able to handle only
158     * (tws_cam_depth), so that we always have reserved  requests
159     * packet available to service ioctls and internal commands.
160     */
161     sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
162                       device_get_unit(sc->tws_dev),
163                       &sc->sim_lock,
164                       tws_cam_depth, 1, devq);
165                       /* 1, 1, devq); */
166     if (sc->sim == NULL) {
167         cam_simq_free(devq);
168         tws_log(sc, CAM_SIM_ALLOC);
169     }
170     /* Register the bus. */
171     mtx_lock(&sc->sim_lock);
172     if (xpt_bus_register(sc->sim,
173                          sc->tws_dev,
174                          0) != CAM_SUCCESS) {
175         cam_sim_free(sc->sim, TRUE); /* passing true will free the devq */
176         sc->sim = NULL; /* so cam_detach will not try to free it */
177         mtx_unlock(&sc->sim_lock);
178         tws_log(sc, TWS_XPT_BUS_REGISTER);
179         return(ENXIO);
180     }
181     if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
182                          CAM_TARGET_WILDCARD,
183                          CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
184         xpt_bus_deregister(cam_sim_path(sc->sim));
185         /* Passing TRUE to cam_sim_free will free the devq as well. */
186         cam_sim_free(sc->sim, TRUE);
187         tws_log(sc, TWS_XPT_CREATE_PATH);
188         mtx_unlock(&sc->sim_lock);
189         return(ENXIO);
190     }
191     mtx_unlock(&sc->sim_lock);
192 
193     return(0);
194 }
195 
196 void
197 tws_cam_detach(struct tws_softc *sc)
198 {
199     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
200     mtx_lock(&sc->sim_lock);
201     if (sc->path)
202         xpt_free_path(sc->path);
203     if (sc->sim) {
204         xpt_bus_deregister(cam_sim_path(sc->sim));
205         cam_sim_free(sc->sim, TRUE);
206     }
207     mtx_unlock(&sc->sim_lock);
208 }
209 
210 int
211 tws_bus_scan(struct tws_softc *sc)
212 {
213     union ccb       *ccb;
214 
215     TWS_TRACE_DEBUG(sc, "entry", sc, 0);
216     if (!(sc->sim))
217         return(ENXIO);
218     ccb = xpt_alloc_ccb();
219     mtx_lock(&sc->sim_lock);
220     if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim),
221                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
222 	mtx_unlock(&sc->sim_lock);
223         xpt_free_ccb(ccb);
224         return(EIO);
225     }
226     xpt_rescan(ccb);
227     mtx_unlock(&sc->sim_lock);
228     return(0);
229 }
230 
231 static void
232 tws_action(struct cam_sim *sim, union ccb *ccb)
233 {
234     struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
235 
236 
237     switch( ccb->ccb_h.func_code ) {
238         case XPT_SCSI_IO:
239         {
240             if ( tws_execute_scsi(sc, ccb) )
241                 TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
242             break;
243         }
244         case XPT_ABORT:
245         {
246             TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
247             ccb->ccb_h.status = CAM_UA_ABORT;
248             xpt_done(ccb);
249             break;
250         }
251         case XPT_RESET_BUS:
252         {
253             TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
254             break;
255         }
256         case XPT_SET_TRAN_SETTINGS:
257         {
258             TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
259             ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
260             xpt_done(ccb);
261 
262             break;
263         }
264         case XPT_GET_TRAN_SETTINGS:
265         {
266             TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
267 
268             ccb->cts.protocol = PROTO_SCSI;
269             ccb->cts.protocol_version = SCSI_REV_2;
270             ccb->cts.transport = XPORT_SPI;
271             ccb->cts.transport_version = 2;
272 
273             ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
274             ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
275             ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
276             ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
277             ccb->ccb_h.status = CAM_REQ_CMP;
278             xpt_done(ccb);
279 
280             break;
281         }
282         case XPT_CALC_GEOMETRY:
283         {
284             TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb,
285                                           ccb->ccg.block_size);
286             cam_calc_geometry(&ccb->ccg, 1/* extended */);
287             xpt_done(ccb);
288 
289             break;
290         }
291         case XPT_PATH_INQ:
292         {
293             TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
294             ccb->cpi.version_num = 1;
295             ccb->cpi.hba_inquiry = 0;
296             ccb->cpi.target_sprt = 0;
297             ccb->cpi.hba_misc = 0;
298             ccb->cpi.hba_eng_cnt = 0;
299             ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
300             ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
301             ccb->cpi.unit_number = cam_sim_unit(sim);
302             ccb->cpi.bus_id = cam_sim_bus(sim);
303             ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
304             ccb->cpi.base_transfer_speed = 6000000;
305             strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
306             strlcpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
307             strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
308             ccb->cpi.transport = XPORT_SPI;
309             ccb->cpi.transport_version = 2;
310             ccb->cpi.protocol = PROTO_SCSI;
311             ccb->cpi.protocol_version = SCSI_REV_2;
312             ccb->cpi.maxio = TWS_MAX_IO_SIZE;
313             ccb->ccb_h.status = CAM_REQ_CMP;
314             xpt_done(ccb);
315 
316             break;
317         }
318         default:
319             TWS_TRACE_DEBUG(sc, "default", sim, ccb);
320             ccb->ccb_h.status = CAM_REQ_INVALID;
321             xpt_done(ccb);
322             break;
323     }
324 }
325 
326 static void
327 tws_scsi_complete(struct tws_request *req)
328 {
329     struct tws_softc *sc = req->sc;
330 
331     mtx_lock(&sc->q_lock);
332     tws_q_remove_request(sc, req, TWS_BUSY_Q);
333     mtx_unlock(&sc->q_lock);
334 
335     callout_stop(&req->timeout);
336     tws_unmap_request(req->sc, req);
337 
338 
339     req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
340     mtx_lock(&sc->sim_lock);
341     xpt_done(req->ccb_ptr);
342     mtx_unlock(&sc->sim_lock);
343 
344     mtx_lock(&sc->q_lock);
345     tws_q_insert_tail(sc, req, TWS_FREE_Q);
346     mtx_unlock(&sc->q_lock);
347 }
348 
349 void
350 tws_getset_param_complete(struct tws_request *req)
351 {
352     struct tws_softc *sc = req->sc;
353 
354     TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
355 
356     callout_stop(&req->timeout);
357     tws_unmap_request(sc, req);
358 
359     free(req->data, M_TWS);
360 
361     req->state = TWS_REQ_STATE_FREE;
362 }
363 
364 void
365 tws_aen_complete(struct tws_request *req)
366 {
367     struct tws_softc *sc = req->sc;
368     struct tws_command_header *sense;
369     struct tws_event_packet event;
370     u_int16_t aen_code=0;
371 
372     TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
373 
374     callout_stop(&req->timeout);
375     tws_unmap_request(sc, req);
376 
377     sense = (struct tws_command_header *)req->data;
378 
379     TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0],
380                                    sense->sense_data[2]);
381     TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id,
382                                    sense->status_block.res__severity);
383     TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum,
384                                    sense->status_block.error);
385     TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header,
386                                    sense->header_desc.size_sense);
387 
388     aen_code = sense->status_block.error;
389 
390     switch ( aen_code ) {
391         case TWS_AEN_SYNC_TIME_WITH_HOST :
392             tws_aen_synctime_with_host(sc);
393             break;
394         case TWS_AEN_QUEUE_EMPTY :
395             break;
396         default :
397             bzero(&event, sizeof(struct tws_event_packet));
398             event.sequence_id = sc->seq_id;
399             event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
400             event.aen_code = sense->status_block.error;
401             event.severity = sense->status_block.res__severity & 0x7;
402             event.event_src = TWS_SRC_CTRL_EVENT;
403             strcpy(event.severity_str, tws_sev_str[event.severity]);
404             event.retrieved = TWS_AEN_NOT_RETRIEVED;
405 
406             bcopy(sense->err_specific_desc, event.parameter_data,
407                                     TWS_ERROR_SPECIFIC_DESC_LEN);
408             event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
409             event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
410 
411             if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
412                 event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
413                                                 event.parameter_len) + 1);
414             }
415 
416             device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
417                 event.severity_str,
418                 event.event_src,
419                 event.aen_code,
420                 event.parameter_data +
421                      (strlen(event.parameter_data) + 1),
422                 event.parameter_data);
423 
424             mtx_lock(&sc->gen_lock);
425             tws_circular_aenq_insert(sc, &sc->aen_q, &event);
426             sc->seq_id++;
427             mtx_unlock(&sc->gen_lock);
428             break;
429 
430     }
431 
432     free(req->data, M_TWS);
433 
434     req->state = TWS_REQ_STATE_FREE;
435 
436     if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
437         /* timeout(tws_fetch_aen, sc, 1);*/
438         sc->stats.num_aens++;
439         tws_fetch_aen((void *)sc);
440     }
441 }
442 
443 void
444 tws_cmd_complete(struct tws_request *req)
445 {
446     struct tws_softc *sc = req->sc;
447 
448     callout_stop(&req->timeout);
449     tws_unmap_request(sc, req);
450 }
451 
452 static void
453 tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
454 {
455     struct tws_command_header *hdr;
456     struct tws_sense *sen;
457     struct tws_request *req;
458     u_int16_t req_id;
459     u_int32_t reg, status;
460 
461     if ( !mfa ) {
462         TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
463         return;
464     } else {
465         /* lookup the sense */
466         sen = tws_find_sense_from_mfa(sc, mfa);
467         if ( sen == NULL ) {
468             TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
469             return;
470         }
471         hdr = sen->hdr;
472         TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
473         req_id = hdr->header_desc.request_id;
474         req = &sc->reqs[req_id];
475         TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
476         if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS )
477             TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
478     }
479 
480     switch (req->type) {
481         case TWS_REQ_TYPE_PASSTHRU :
482             tws_passthru_err_complete(req, hdr);
483             break;
484         case TWS_REQ_TYPE_GETSET_PARAM :
485             tws_getset_param_complete(req);
486             break;
487         case TWS_REQ_TYPE_SCSI_IO :
488             tws_scsi_err_complete(req, hdr);
489             break;
490 
491     }
492 
493     mtx_lock(&sc->io_lock);
494     hdr->header_desc.size_header = 128;
495     reg = (u_int32_t)( mfa>>32);
496     tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
497     reg = (u_int32_t)(mfa);
498     tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
499 
500     status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
501     if ( status & TWS_BIT13 ) {
502         device_printf(sc->tws_dev,  "OBFL Overrun\n");
503         sc->obfl_q_overrun = true;
504     }
505     mtx_unlock(&sc->io_lock);
506 }
507 
508 static void
509 tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
510 {
511     u_int8_t *sense_data;
512     struct tws_softc *sc = req->sc;
513     union ccb *ccb = req->ccb_ptr;
514 
515     TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
516                                  req->cmd_pkt->cmd.pkt_a.status);
517     if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
518          hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
519 
520         if ( ccb->ccb_h.target_lun ) {
521             TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
522             ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
523         } else {
524             TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
525             ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
526         }
527 
528     } else {
529         TWS_TRACE_DEBUG(sc, "scsi status  error",0,0);
530         ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
531         if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
532               (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
533             ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
534             TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
535         }
536     }
537 
538     /* if there were no error simply mark complete error */
539     if (ccb->ccb_h.status == 0)
540         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
541 
542     sense_data = (u_int8_t *)&ccb->csio.sense_data;
543     if (sense_data) {
544         memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
545         ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
546         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
547     }
548     ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
549 
550     ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
551     mtx_lock(&sc->sim_lock);
552     xpt_done(ccb);
553     mtx_unlock(&sc->sim_lock);
554 
555     callout_stop(&req->timeout);
556     tws_unmap_request(req->sc, req);
557     mtx_lock(&sc->q_lock);
558     tws_q_remove_request(sc, req, TWS_BUSY_Q);
559     tws_q_insert_tail(sc, req, TWS_FREE_Q);
560     mtx_unlock(&sc->q_lock);
561 }
562 
563 static void
564 tws_passthru_err_complete(struct tws_request *req,
565                                           struct tws_command_header *hdr)
566 {
567     TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
568     req->error_code = hdr->status_block.error;
569     memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
570     tws_passthru_complete(req);
571 }
572 
573 static void
574 tws_drain_busy_queue(struct tws_softc *sc)
575 {
576     struct tws_request *req;
577     union ccb          *ccb;
578     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
579 
580     mtx_lock(&sc->q_lock);
581     req = tws_q_remove_tail(sc, TWS_BUSY_Q);
582     mtx_unlock(&sc->q_lock);
583     while ( req ) {
584         TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id);
585 	callout_stop(&req->timeout);
586 
587         req->error_code = TWS_REQ_RET_RESET;
588         ccb = (union ccb *)(req->ccb_ptr);
589 
590         ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
591         ccb->ccb_h.status |=  CAM_REQUEUE_REQ;
592         ccb->ccb_h.status |=  CAM_SCSI_BUS_RESET;
593 
594         tws_unmap_request(req->sc, req);
595 
596         mtx_lock(&sc->sim_lock);
597         xpt_done(req->ccb_ptr);
598         mtx_unlock(&sc->sim_lock);
599 
600         mtx_lock(&sc->q_lock);
601         tws_q_insert_tail(sc, req, TWS_FREE_Q);
602         req = tws_q_remove_tail(sc, TWS_BUSY_Q);
603         mtx_unlock(&sc->q_lock);
604     }
605 }
606 
607 
608 static void
609 tws_drain_reserved_reqs(struct tws_softc *sc)
610 {
611     struct tws_request *r;
612 
613     r = &sc->reqs[TWS_REQ_TYPE_AEN_FETCH];
614     if ( r->state != TWS_REQ_STATE_FREE ) {
615         TWS_TRACE_DEBUG(sc, "reset aen req", 0, 0);
616 	callout_stop(&r->timeout);
617         tws_unmap_request(sc, r);
618         free(r->data, M_TWS);
619         r->state = TWS_REQ_STATE_FREE;
620         r->error_code = TWS_REQ_RET_RESET;
621     }
622 
623     r = &sc->reqs[TWS_REQ_TYPE_PASSTHRU];
624     if ( r->state == TWS_REQ_STATE_BUSY ) {
625         TWS_TRACE_DEBUG(sc, "reset passthru req", 0, 0);
626         r->error_code = TWS_REQ_RET_RESET;
627     }
628 
629     r = &sc->reqs[TWS_REQ_TYPE_GETSET_PARAM];
630     if ( r->state != TWS_REQ_STATE_FREE ) {
631         TWS_TRACE_DEBUG(sc, "reset setparam req", 0, 0);
632 	callout_stop(&r->timeout);
633         tws_unmap_request(sc, r);
634         free(r->data, M_TWS);
635         r->state = TWS_REQ_STATE_FREE;
636         r->error_code = TWS_REQ_RET_RESET;
637     }
638 }
639 
640 static void
641 tws_drain_response_queue(struct tws_softc *sc)
642 {
643     u_int16_t req_id;
644     u_int64_t mfa;
645     while ( tws_get_response(sc, &req_id, &mfa) );
646 }
647 
648 
649 static int32_t
650 tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
651 {
652     struct tws_command_packet *cmd_pkt;
653     struct tws_request *req;
654     struct ccb_hdr *ccb_h = &(ccb->ccb_h);
655     struct ccb_scsiio *csio = &(ccb->csio);
656     int error;
657     u_int16_t lun;
658 
659     mtx_assert(&sc->sim_lock, MA_OWNED);
660     if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
661         TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
662         ccb_h->status |= CAM_TID_INVALID;
663         xpt_done(ccb);
664         return(0);
665     }
666     if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
667         TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
668         ccb_h->status |= CAM_LUN_INVALID;
669         xpt_done(ccb);
670         return(0);
671     }
672 
673     if(ccb_h->flags & CAM_CDB_PHYS) {
674         TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
675         ccb_h->status = CAM_REQ_INVALID;
676         xpt_done(ccb);
677         return(0);
678     }
679 
680     /*
681      * We are going to work on this request.  Mark it as enqueued (though
682      * we don't actually queue it...)
683      */
684     ccb_h->status |= CAM_SIM_QUEUED;
685 
686     req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO);
687     if ( !req ) {
688         TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
689         ccb_h->status |= CAM_REQUEUE_REQ;
690         xpt_done(ccb);
691         return(0);
692     }
693 
694     if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
695         if(ccb_h->flags & CAM_DIR_IN)
696             req->flags |= TWS_DIR_IN;
697         if(ccb_h->flags & CAM_DIR_OUT)
698             req->flags |= TWS_DIR_OUT;
699     } else {
700         req->flags = TWS_DIR_NONE; /* no data */
701     }
702 
703     req->type = TWS_REQ_TYPE_SCSI_IO;
704     req->cb = tws_scsi_complete;
705 
706     cmd_pkt = req->cmd_pkt;
707     /* cmd_pkt->hdr.header_desc.size_header = 128; */
708     cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
709     cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
710     cmd_pkt->cmd.pkt_a.status = 0;
711     cmd_pkt->cmd.pkt_a.sgl_offset = 16;
712 
713     /* lower nibble */
714     lun = ccb_h->target_lun & 0XF;
715     lun = lun << 12;
716     cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
717     /* upper nibble */
718     lun = ccb_h->target_lun & 0XF0;
719     lun = lun << 8;
720     cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
721 
722 #ifdef TWS_DEBUG
723     if ( csio->cdb_len > 16 )
724          TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
725 #endif
726 
727     if(ccb_h->flags & CAM_CDB_POINTER)
728         bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
729     else
730         bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
731 
732     req->data = ccb;
733     req->flags |= TWS_DATA_CCB;
734     /* save ccb ptr */
735     req->ccb_ptr = ccb;
736     /*
737      * tws_map_load_data_callback will fill in the SGL,
738      * and submit the I/O.
739      */
740     sc->stats.scsi_ios++;
741     callout_reset_sbt(&req->timeout, SBT_1MS * ccb->ccb_h.timeout, 0,
742       tws_timeout, req, 0);
743     error = tws_map_request(sc, req);
744     return(error);
745 }
746 
747 
748 int
749 tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
750 {
751     struct tws_request *req;
752     struct tws_command_packet *cmd_pkt;
753     int error;
754 
755     TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
756     req = tws_get_request(sc, TWS_REQ_TYPE_AEN_FETCH);
757 
758     if ( req == NULL )
759         return(ENOMEM);
760 
761     req->cb = tws_aen_complete;
762 
763     cmd_pkt = req->cmd_pkt;
764     cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
765     cmd_pkt->cmd.pkt_a.status = 0;
766     cmd_pkt->cmd.pkt_a.unit = 0;
767     cmd_pkt->cmd.pkt_a.sgl_offset = 16;
768     cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
769 
770     cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
771     cmd_pkt->cmd.pkt_a.cdb[4] = 128;
772 
773     req->length = TWS_SECTOR_SIZE;
774     req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
775     if ( req->data == NULL )
776         return(ENOMEM);
777     bzero(req->data, TWS_SECTOR_SIZE);
778     req->flags = TWS_DIR_IN;
779 
780     callout_reset(&req->timeout, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
781     error = tws_map_request(sc, req);
782     return(error);
783 
784 }
785 
786 int
787 tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
788               u_int32_t param_size, void *data)
789 {
790     struct tws_request *req;
791     struct tws_command_packet *cmd_pkt;
792     union tws_command_giga *cmd;
793     struct tws_getset_param *param;
794     int error;
795 
796     req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
797     if ( req == NULL ) {
798         TWS_TRACE_DEBUG(sc, "null req", 0, 0);
799         return(ENOMEM);
800     }
801 
802     req->length = TWS_SECTOR_SIZE;
803     req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
804     if ( req->data == NULL )
805         return(ENOMEM);
806     bzero(req->data, TWS_SECTOR_SIZE);
807     param = (struct tws_getset_param *)req->data;
808 
809     req->cb = tws_getset_param_complete;
810     req->flags = TWS_DIR_OUT;
811     cmd_pkt = req->cmd_pkt;
812 
813     cmd = &cmd_pkt->cmd.pkt_g;
814     cmd->param.sgl_off__opcode =
815             BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
816     cmd->param.request_id = (u_int8_t)req->request_id;
817     cmd->param.host_id__unit = 0;
818     cmd->param.param_count = 1;
819     cmd->param.size = 2; /* map routine will add sgls */
820 
821     /* Specify which parameter we want to set. */
822     param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
823     param->parameter_id = (u_int8_t)(param_id);
824     param->parameter_size_bytes = (u_int16_t)param_size;
825     memcpy(param->data, data, param_size);
826 
827     callout_reset(&req->timeout, (TWS_IOCTL_TIMEOUT * hz), tws_timeout, req);
828     error = tws_map_request(sc, req);
829     return(error);
830 
831 }
832 
833 int
834 tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
835               u_int32_t param_size, void *data)
836 {
837     struct tws_request *req;
838     struct tws_command_packet *cmd_pkt;
839     union tws_command_giga *cmd;
840     struct tws_getset_param *param;
841     u_int16_t reqid;
842     u_int64_t mfa;
843     int error = SUCCESS;
844 
845 
846     req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
847     if ( req == NULL ) {
848         TWS_TRACE_DEBUG(sc, "null req", 0, 0);
849         return(FAILURE);
850     }
851 
852     req->length = TWS_SECTOR_SIZE;
853     req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
854     if ( req->data == NULL )
855         return(FAILURE);
856     bzero(req->data, TWS_SECTOR_SIZE);
857     param = (struct tws_getset_param *)req->data;
858 
859     req->cb = NULL;
860     req->flags = TWS_DIR_IN;
861     cmd_pkt = req->cmd_pkt;
862 
863     cmd = &cmd_pkt->cmd.pkt_g;
864     cmd->param.sgl_off__opcode =
865             BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
866     cmd->param.request_id = (u_int8_t)req->request_id;
867     cmd->param.host_id__unit = 0;
868     cmd->param.param_count = 1;
869     cmd->param.size = 2; /* map routine will add sgls */
870 
871     /* Specify which parameter we want to set. */
872     param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
873     param->parameter_id = (u_int8_t)(param_id);
874     param->parameter_size_bytes = (u_int16_t)param_size;
875 
876     error = tws_map_request(sc, req);
877     if (!error) {
878         reqid = tws_poll4_response(sc, &mfa);
879         tws_unmap_request(sc, req);
880 
881         if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) {
882             memcpy(data, param->data, param_size);
883         } else {
884             error = FAILURE;
885         }
886     }
887 
888     free(req->data, M_TWS);
889     req->state = TWS_REQ_STATE_FREE;
890     return(error);
891 
892 }
893 
894 void
895 tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
896 {
897     if (req->data != NULL) {
898         if ( req->flags & TWS_DIR_IN )
899             bus_dmamap_sync(sc->data_tag, req->dma_map,
900                                             BUS_DMASYNC_POSTREAD);
901         if ( req->flags & TWS_DIR_OUT )
902             bus_dmamap_sync(sc->data_tag, req->dma_map,
903                                             BUS_DMASYNC_POSTWRITE);
904         mtx_lock(&sc->io_lock);
905         bus_dmamap_unload(sc->data_tag, req->dma_map);
906         mtx_unlock(&sc->io_lock);
907     }
908 }
909 
910 int32_t
911 tws_map_request(struct tws_softc *sc, struct tws_request *req)
912 {
913     int32_t error = 0;
914 
915 
916     /* If the command involves data, map that too. */
917     if (req->data != NULL) {
918         int my_flags = ((req->type == TWS_REQ_TYPE_SCSI_IO) ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
919 
920         /*
921          * Map the data buffer into bus space and build the SG list.
922          */
923         mtx_lock(&sc->io_lock);
924 	if (req->flags & TWS_DATA_CCB)
925 		error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map,
926 					    req->data,
927 					    tws_dmamap_data_load_cbfn, req,
928 					    my_flags);
929 	else
930 		error = bus_dmamap_load(sc->data_tag, req->dma_map,
931 					req->data, req->length,
932 					tws_dmamap_data_load_cbfn, req,
933 					my_flags);
934         mtx_unlock(&sc->io_lock);
935 
936         if (error == EINPROGRESS) {
937             TWS_TRACE(sc, "in progress", 0, error);
938             tws_freeze_simq(sc, req);
939             error = 0;  // EINPROGRESS is not a fatal error.
940         }
941     } else { /* no data involved */
942         error = tws_submit_command(sc, req);
943     }
944     return(error);
945 }
946 
947 
948 static void
949 tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
950                             int nseg, int error)
951 {
952     struct tws_request *req = (struct tws_request *)arg;
953     struct tws_softc *sc = req->sc;
954     u_int16_t sgls = nseg;
955     void *sgl_ptr;
956     struct tws_cmd_generic *gcmd;
957 
958 
959     if ( error ) {
960         TWS_TRACE(sc, "SOMETHING BAD HAPPENED! error = %d\n", error, 0);
961     }
962 
963     if ( error == EFBIG ) {
964         TWS_TRACE(sc, "not enough data segs", 0, nseg);
965         req->error_code = error;
966         req->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
967         return;
968     }
969 
970     if ( req->flags & TWS_DIR_IN )
971         bus_dmamap_sync(req->sc->data_tag, req->dma_map,
972                                             BUS_DMASYNC_PREREAD);
973     if ( req->flags & TWS_DIR_OUT )
974         bus_dmamap_sync(req->sc->data_tag, req->dma_map,
975                                         BUS_DMASYNC_PREWRITE);
976     if ( segs ) {
977         if ( (req->type == TWS_REQ_TYPE_PASSTHRU &&
978              GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) !=
979                             TWS_FW_CMD_EXECUTE_SCSI) ||
980               req->type == TWS_REQ_TYPE_GETSET_PARAM) {
981             gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
982             sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
983             gcmd->size += sgls *
984                           ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 : 2 );
985             tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls);
986 
987         } else {
988             tws_fill_sg_list(req->sc, (void *)segs,
989                       (void *)&(req->cmd_pkt->cmd.pkt_a.sg_list), sgls);
990             req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
991         }
992     }
993 
994 
995     req->error_code = tws_submit_command(req->sc, req);
996 
997 }
998 
999 
1000 static void
1001 tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest,
1002                           u_int16_t num_sgl_entries)
1003 {
1004     int i;
1005 
1006     if ( sc->is64bit ) {
1007         struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
1008 
1009         if ( !tws_use_32bit_sgls ) {
1010             struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
1011             if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
1012                 TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
1013             for (i = 0; i < num_sgl_entries; i++) {
1014                 sgl_d[i].address = sgl_s->address;
1015                 sgl_d[i].length = sgl_s->length;
1016                 sgl_d[i].flag = 0;
1017                 sgl_d[i].reserved = 0;
1018                 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1019                                                sizeof(bus_dma_segment_t));
1020             }
1021         } else {
1022             struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1023             if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1024                 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1025             for (i = 0; i < num_sgl_entries; i++) {
1026                 sgl_d[i].address = sgl_s->address;
1027                 sgl_d[i].length = sgl_s->length;
1028                 sgl_d[i].flag = 0;
1029                 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1030                                                sizeof(bus_dma_segment_t));
1031             }
1032         }
1033     } else {
1034         struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
1035         struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1036 
1037         if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1038             TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1039 
1040 
1041         for (i = 0; i < num_sgl_entries; i++) {
1042             sgl_d[i].address = sgl_s[i].address;
1043             sgl_d[i].length = sgl_s[i].length;
1044             sgl_d[i].flag = 0;
1045         }
1046     }
1047 }
1048 
1049 
1050 void
1051 tws_intr(void *arg)
1052 {
1053     struct tws_softc *sc = (struct tws_softc *)arg;
1054     u_int32_t histat=0, db=0;
1055 
1056     if (!(sc)) {
1057         device_printf(sc->tws_dev, "null softc!!!\n");
1058         return;
1059     }
1060 
1061     if ( tws_get_state(sc) == TWS_RESET ) {
1062         return;
1063     }
1064 
1065     if ( tws_get_state(sc) != TWS_ONLINE ) {
1066         return;
1067     }
1068 
1069     sc->stats.num_intrs++;
1070     histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
1071     if ( histat & TWS_BIT2 ) {
1072         TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
1073         db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1074         if ( db & TWS_BIT21 ) {
1075             tws_intr_attn_error(sc);
1076             return;
1077         }
1078         if ( db & TWS_BIT18 ) {
1079             tws_intr_attn_aen(sc);
1080         }
1081     }
1082 
1083     if ( histat & TWS_BIT3 ) {
1084         tws_intr_resp(sc);
1085     }
1086 }
1087 
1088 static void
1089 tws_intr_attn_aen(struct tws_softc *sc)
1090 {
1091     u_int32_t db=0;
1092 
1093     /* maskoff db intrs until all the aens are fetched */
1094     /* tws_disable_db_intr(sc); */
1095     tws_fetch_aen((void *)sc);
1096     tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
1097     db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1098 
1099 }
1100 
1101 static void
1102 tws_intr_attn_error(struct tws_softc *sc)
1103 {
1104     u_int32_t db=0;
1105 
1106     TWS_TRACE(sc, "attn error", 0, 0);
1107     tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
1108     db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1109     device_printf(sc->tws_dev, "Micro controller error.\n");
1110     tws_reset(sc);
1111 }
1112 
1113 static void
1114 tws_intr_resp(struct tws_softc *sc)
1115 {
1116     u_int16_t req_id;
1117     u_int64_t mfa;
1118 
1119     while ( tws_get_response(sc, &req_id, &mfa) ) {
1120         sc->stats.reqs_out++;
1121         if ( req_id == TWS_INVALID_REQID ) {
1122             TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
1123             sc->stats.reqs_errored++;
1124             tws_err_complete(sc, mfa);
1125             continue;
1126         }
1127         sc->reqs[req_id].cb(&sc->reqs[req_id]);
1128     }
1129 
1130 }
1131 
1132 
1133 static void
1134 tws_poll(struct cam_sim *sim)
1135 {
1136     struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
1137     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
1138     tws_intr((void *) sc);
1139 }
1140 
1141 void
1142 tws_timeout(void *arg)
1143 {
1144     struct tws_request *req = (struct tws_request *)arg;
1145     struct tws_softc *sc = req->sc;
1146 
1147 
1148     if ( req->error_code == TWS_REQ_RET_RESET ) {
1149         return;
1150     }
1151 
1152     mtx_lock(&sc->gen_lock);
1153     if ( req->error_code == TWS_REQ_RET_RESET ) {
1154         mtx_unlock(&sc->gen_lock);
1155         return;
1156     }
1157 
1158     if ( tws_get_state(sc) == TWS_RESET ) {
1159         mtx_unlock(&sc->gen_lock);
1160         return;
1161     }
1162 
1163     xpt_freeze_simq(sc->sim, 1);
1164 
1165     tws_send_event(sc, TWS_RESET_START);
1166 
1167     if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1168         device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n");
1169     } else if (req->type == TWS_REQ_TYPE_PASSTHRU) {
1170         device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n");
1171     } else {
1172         device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n");
1173     }
1174 
1175     tws_assert_soft_reset(sc);
1176     tws_turn_off_interrupts(sc);
1177     tws_reset_cb( (void*) sc );
1178     tws_reinit( (void*) sc );
1179 
1180 //  device_printf(sc->tws_dev,  "Controller Reset complete!\n");
1181     tws_send_event(sc, TWS_RESET_COMPLETE);
1182     mtx_unlock(&sc->gen_lock);
1183 
1184     xpt_release_simq(sc->sim, 1);
1185 }
1186 
1187 void
1188 tws_reset(void *arg)
1189 {
1190     struct tws_softc *sc = (struct tws_softc *)arg;
1191 
1192     mtx_lock(&sc->gen_lock);
1193     if ( tws_get_state(sc) == TWS_RESET ) {
1194         mtx_unlock(&sc->gen_lock);
1195         return;
1196     }
1197 
1198     xpt_freeze_simq(sc->sim, 1);
1199 
1200     tws_send_event(sc, TWS_RESET_START);
1201 
1202     device_printf(sc->tws_dev,  "Resetting controller\n");
1203 
1204     tws_assert_soft_reset(sc);
1205     tws_turn_off_interrupts(sc);
1206     tws_reset_cb( (void*) sc );
1207     tws_reinit( (void*) sc );
1208 
1209 //  device_printf(sc->tws_dev,  "Controller Reset complete!\n");
1210     tws_send_event(sc, TWS_RESET_COMPLETE);
1211     mtx_unlock(&sc->gen_lock);
1212 
1213     xpt_release_simq(sc->sim, 1);
1214 }
1215 
1216 static void
1217 tws_reset_cb(void *arg)
1218 {
1219     struct tws_softc *sc = (struct tws_softc *)arg;
1220     time_t endt;
1221     int found = 0;
1222     u_int32_t reg;
1223 
1224     if ( tws_get_state(sc) != TWS_RESET ) {
1225         return;
1226     }
1227 
1228 //  device_printf(sc->tws_dev,  "Draining Busy Queue\n");
1229     tws_drain_busy_queue(sc);
1230 //  device_printf(sc->tws_dev,  "Draining Reserved Reqs\n");
1231     tws_drain_reserved_reqs(sc);
1232 //  device_printf(sc->tws_dev,  "Draining Response Queue\n");
1233     tws_drain_response_queue(sc);
1234 
1235 //  device_printf(sc->tws_dev,  "Looking for controller ready flag...\n");
1236     endt = TWS_LOCAL_TIME + TWS_POLL_TIMEOUT;
1237     while ((TWS_LOCAL_TIME <= endt) && (!found)) {
1238         reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
1239         if ( reg & TWS_BIT13 ) {
1240             found = 1;
1241 //          device_printf(sc->tws_dev,  " ... Got it!\n");
1242         }
1243     }
1244     if ( !found )
1245             device_printf(sc->tws_dev,  " ... Controller ready flag NOT found!\n");
1246 }
1247 
1248 static void
1249 tws_reinit(void *arg)
1250 {
1251     struct tws_softc *sc = (struct tws_softc *)arg;
1252     int timeout_val=0;
1253     int try=2;
1254     int done=0;
1255 
1256 
1257 //  device_printf(sc->tws_dev,  "Waiting for Controller Ready\n");
1258     while ( !done && try ) {
1259         if ( tws_ctlr_ready(sc) ) {
1260             done = 1;
1261             break;
1262         } else {
1263             timeout_val += 5;
1264             if ( timeout_val >= TWS_RESET_TIMEOUT ) {
1265                timeout_val = 0;
1266                if ( try )
1267                    tws_assert_soft_reset(sc);
1268                try--;
1269             }
1270             mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz);
1271         }
1272     }
1273 
1274     if (!done) {
1275         device_printf(sc->tws_dev,  "FAILED to get Controller Ready!\n");
1276         return;
1277     }
1278 
1279     sc->obfl_q_overrun = false;
1280 //  device_printf(sc->tws_dev,  "Sending initConnect\n");
1281     if ( tws_init_connect(sc, tws_queue_depth) ) {
1282         TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
1283     }
1284     tws_init_obfl_q(sc);
1285 
1286     tws_turn_on_interrupts(sc);
1287 
1288     wakeup_one(sc);
1289 }
1290 
1291 
1292 static void
1293 tws_freeze_simq(struct tws_softc *sc, struct tws_request *req)
1294 {
1295     /* Only for IO commands */
1296     if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1297         union ccb   *ccb = (union ccb *)(req->ccb_ptr);
1298 
1299         xpt_freeze_simq(sc->sim, 1);
1300         ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1301         ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1302     }
1303 }
1304 
1305 
1306 TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);
1307