1 /*
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2010 LSI Corp.
5 * All rights reserved.
6 * Author : Manjunath Ranganathaiah <manjunath.ranganathaiah@lsi.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <dev/tws/tws.h>
31 #include <dev/tws/tws_services.h>
32 #include <dev/tws/tws_hdm.h>
33 #include <dev/tws/tws_user.h>
34 #include <cam/cam.h>
35 #include <cam/cam_ccb.h>
36 #include <cam/cam_sim.h>
37 #include <cam/cam_xpt_sim.h>
38 #include <cam/cam_debug.h>
39 #include <cam/cam_periph.h>
40
41 #include <cam/scsi/scsi_all.h>
42 #include <cam/scsi/scsi_message.h>
43
44 static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
45 static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
46
47 static void tws_action(struct cam_sim *sim, union ccb *ccb);
48 static void tws_poll(struct cam_sim *sim);
49 static void tws_scsi_complete(struct tws_request *req);
50
51 void tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
52 int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
53 int tws_bus_scan(struct tws_softc *sc);
54 int tws_cam_attach(struct tws_softc *sc);
55 void tws_cam_detach(struct tws_softc *sc);
56 void tws_reset(void *arg);
57
58 static void tws_reset_cb(void *arg);
59 static void tws_reinit(void *arg);
60 static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
61 static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req);
62 static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
63 int nseg, int error);
64 static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src,
65 void *sgl_dest, u_int16_t num_sgl_entries);
66 static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
67 static void tws_scsi_err_complete(struct tws_request *req,
68 struct tws_command_header *hdr);
69 static void tws_passthru_err_complete(struct tws_request *req,
70 struct tws_command_header *hdr);
71
72 void tws_timeout(void *arg);
73 static void tws_intr_attn_aen(struct tws_softc *sc);
74 static void tws_intr_attn_error(struct tws_softc *sc);
75 static void tws_intr_resp(struct tws_softc *sc);
76 void tws_intr(void *arg);
77 void tws_cmd_complete(struct tws_request *req);
78 void tws_aen_complete(struct tws_request *req);
79 int tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
80 void tws_getset_param_complete(struct tws_request *req);
81 int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
82 u_int32_t param_size, void *data);
83 int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
84 u_int32_t param_size, void *data);
85
86 extern struct tws_request *tws_get_request(struct tws_softc *sc,
87 u_int16_t type);
88 extern void *tws_release_request(struct tws_request *req);
89 extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
90 extern boolean tws_get_response(struct tws_softc *sc,
91 u_int16_t *req_id, u_int64_t *mfa);
92 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
93 u_int8_t q_type );
94 extern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
95 struct tws_request *req, u_int8_t q_type );
96 extern void tws_send_event(struct tws_softc *sc, u_int8_t event);
97
98 extern struct tws_sense *
99 tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
100
101 extern void tws_fetch_aen(void *arg);
102 extern void tws_disable_db_intr(struct tws_softc *sc);
103 extern void tws_enable_db_intr(struct tws_softc *sc);
104 extern void tws_passthru_complete(struct tws_request *req);
105 extern void tws_aen_synctime_with_host(struct tws_softc *sc);
106 extern void tws_circular_aenq_insert(struct tws_softc *sc,
107 struct tws_circular_q *cq, struct tws_event_packet *aen);
108 extern int tws_use_32bit_sgls;
109 extern boolean tws_ctlr_reset(struct tws_softc *sc);
110 extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc,
111 u_int8_t q_type );
112 extern void tws_turn_off_interrupts(struct tws_softc *sc);
113 extern void tws_turn_on_interrupts(struct tws_softc *sc);
114 extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
115 extern void tws_init_obfl_q(struct tws_softc *sc);
116 extern uint8_t tws_get_state(struct tws_softc *sc);
117 extern void tws_assert_soft_reset(struct tws_softc *sc);
118 extern boolean tws_ctlr_ready(struct tws_softc *sc);
119 extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
120 extern int tws_setup_intr(struct tws_softc *sc, int irqs);
121 extern int tws_teardown_intr(struct tws_softc *sc);
122
123 int
tws_cam_attach(struct tws_softc * sc)124 tws_cam_attach(struct tws_softc *sc)
125 {
126 struct cam_devq *devq;
127
128 TWS_TRACE_DEBUG(sc, "entry", 0, sc);
129 /* Create a device queue for sim */
130
131 /*
132 * if the user sets cam depth to less than 1
133 * cam may get confused
134 */
135 if ( tws_cam_depth < 1 )
136 tws_cam_depth = 1;
137 if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS) )
138 tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
139
140 TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
141
142 if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
143 tws_log(sc, CAM_SIMQ_ALLOC);
144 return(ENOMEM);
145 }
146
147 /*
148 * Create a SIM entry. Though we can support tws_cam_depth
149 * simultaneous requests, we claim to be able to handle only
150 * (tws_cam_depth), so that we always have reserved requests
151 * packet available to service ioctls and internal commands.
152 */
153 sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
154 device_get_unit(sc->tws_dev),
155 &sc->sim_lock,
156 tws_cam_depth, 1, devq);
157 /* 1, 1, devq); */
158 if (sc->sim == NULL) {
159 cam_simq_free(devq);
160 tws_log(sc, CAM_SIM_ALLOC);
161 }
162 /* Register the bus. */
163 mtx_lock(&sc->sim_lock);
164 if (xpt_bus_register(sc->sim,
165 sc->tws_dev,
166 0) != CAM_SUCCESS) {
167 cam_sim_free(sc->sim, TRUE); /* passing true will free the devq */
168 sc->sim = NULL; /* so cam_detach will not try to free it */
169 mtx_unlock(&sc->sim_lock);
170 tws_log(sc, TWS_XPT_BUS_REGISTER);
171 return(ENXIO);
172 }
173 if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
174 CAM_TARGET_WILDCARD,
175 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
176 xpt_bus_deregister(cam_sim_path(sc->sim));
177 /* Passing TRUE to cam_sim_free will free the devq as well. */
178 cam_sim_free(sc->sim, TRUE);
179 tws_log(sc, TWS_XPT_CREATE_PATH);
180 mtx_unlock(&sc->sim_lock);
181 return(ENXIO);
182 }
183 mtx_unlock(&sc->sim_lock);
184
185 return(0);
186 }
187
188 void
tws_cam_detach(struct tws_softc * sc)189 tws_cam_detach(struct tws_softc *sc)
190 {
191 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
192 mtx_lock(&sc->sim_lock);
193 if (sc->path)
194 xpt_free_path(sc->path);
195 if (sc->sim) {
196 xpt_bus_deregister(cam_sim_path(sc->sim));
197 cam_sim_free(sc->sim, TRUE);
198 }
199 mtx_unlock(&sc->sim_lock);
200 }
201
202 int
tws_bus_scan(struct tws_softc * sc)203 tws_bus_scan(struct tws_softc *sc)
204 {
205 union ccb *ccb;
206
207 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
208 if (!(sc->sim))
209 return(ENXIO);
210 ccb = xpt_alloc_ccb();
211 mtx_lock(&sc->sim_lock);
212 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim),
213 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
214 mtx_unlock(&sc->sim_lock);
215 xpt_free_ccb(ccb);
216 return(EIO);
217 }
218 xpt_rescan(ccb);
219 mtx_unlock(&sc->sim_lock);
220 return(0);
221 }
222
223 static void
tws_action(struct cam_sim * sim,union ccb * ccb)224 tws_action(struct cam_sim *sim, union ccb *ccb)
225 {
226 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
227
228 switch( ccb->ccb_h.func_code ) {
229 case XPT_SCSI_IO:
230 {
231 if ( tws_execute_scsi(sc, ccb) )
232 TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
233 break;
234 }
235 case XPT_ABORT:
236 {
237 TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
238 ccb->ccb_h.status = CAM_UA_ABORT;
239 xpt_done(ccb);
240 break;
241 }
242 case XPT_RESET_BUS:
243 {
244 TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
245 break;
246 }
247 case XPT_SET_TRAN_SETTINGS:
248 {
249 TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
250 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
251 xpt_done(ccb);
252
253 break;
254 }
255 case XPT_GET_TRAN_SETTINGS:
256 {
257 TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
258
259 ccb->cts.protocol = PROTO_SCSI;
260 ccb->cts.protocol_version = SCSI_REV_2;
261 ccb->cts.transport = XPORT_SPI;
262 ccb->cts.transport_version = 2;
263
264 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
265 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
266 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
267 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
268 ccb->ccb_h.status = CAM_REQ_CMP;
269 xpt_done(ccb);
270
271 break;
272 }
273 case XPT_CALC_GEOMETRY:
274 {
275 TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb,
276 ccb->ccg.block_size);
277 cam_calc_geometry(&ccb->ccg, 1/* extended */);
278 xpt_done(ccb);
279
280 break;
281 }
282 case XPT_PATH_INQ:
283 {
284 TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
285 ccb->cpi.version_num = 1;
286 ccb->cpi.hba_inquiry = 0;
287 ccb->cpi.target_sprt = 0;
288 ccb->cpi.hba_misc = 0;
289 ccb->cpi.hba_eng_cnt = 0;
290 ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
291 ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
292 ccb->cpi.unit_number = cam_sim_unit(sim);
293 ccb->cpi.bus_id = cam_sim_bus(sim);
294 ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
295 ccb->cpi.base_transfer_speed = 6000000;
296 strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
297 strlcpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
298 strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
299 ccb->cpi.transport = XPORT_SPI;
300 ccb->cpi.transport_version = 2;
301 ccb->cpi.protocol = PROTO_SCSI;
302 ccb->cpi.protocol_version = SCSI_REV_2;
303 ccb->cpi.maxio = TWS_MAX_IO_SIZE;
304 ccb->ccb_h.status = CAM_REQ_CMP;
305 xpt_done(ccb);
306
307 break;
308 }
309 default:
310 TWS_TRACE_DEBUG(sc, "default", sim, ccb);
311 ccb->ccb_h.status = CAM_REQ_INVALID;
312 xpt_done(ccb);
313 break;
314 }
315 }
316
317 static void
tws_scsi_complete(struct tws_request * req)318 tws_scsi_complete(struct tws_request *req)
319 {
320 struct tws_softc *sc = req->sc;
321
322 mtx_lock(&sc->q_lock);
323 tws_q_remove_request(sc, req, TWS_BUSY_Q);
324 mtx_unlock(&sc->q_lock);
325
326 callout_stop(&req->timeout);
327 tws_unmap_request(req->sc, req);
328
329 req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
330 mtx_lock(&sc->sim_lock);
331 xpt_done(req->ccb_ptr);
332 mtx_unlock(&sc->sim_lock);
333
334 mtx_lock(&sc->q_lock);
335 tws_q_insert_tail(sc, req, TWS_FREE_Q);
336 mtx_unlock(&sc->q_lock);
337 }
338
339 void
tws_getset_param_complete(struct tws_request * req)340 tws_getset_param_complete(struct tws_request *req)
341 {
342 struct tws_softc *sc = req->sc;
343
344 TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
345
346 callout_stop(&req->timeout);
347 tws_unmap_request(sc, req);
348
349 free(req->data, M_TWS);
350
351 req->state = TWS_REQ_STATE_FREE;
352 }
353
354 void
tws_aen_complete(struct tws_request * req)355 tws_aen_complete(struct tws_request *req)
356 {
357 struct tws_softc *sc = req->sc;
358 struct tws_command_header *sense;
359 struct tws_event_packet event;
360 u_int16_t aen_code=0;
361
362 TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
363
364 callout_stop(&req->timeout);
365 tws_unmap_request(sc, req);
366
367 sense = (struct tws_command_header *)req->data;
368
369 TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0],
370 sense->sense_data[2]);
371 TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id,
372 sense->status_block.res__severity);
373 TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum,
374 sense->status_block.error);
375 TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header,
376 sense->header_desc.size_sense);
377
378 aen_code = sense->status_block.error;
379
380 switch ( aen_code ) {
381 case TWS_AEN_SYNC_TIME_WITH_HOST :
382 tws_aen_synctime_with_host(sc);
383 break;
384 case TWS_AEN_QUEUE_EMPTY :
385 break;
386 default :
387 bzero(&event, sizeof(struct tws_event_packet));
388 event.sequence_id = sc->seq_id;
389 event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
390 event.aen_code = sense->status_block.error;
391 event.severity = sense->status_block.res__severity & 0x7;
392 event.event_src = TWS_SRC_CTRL_EVENT;
393 strcpy(event.severity_str, tws_sev_str[event.severity]);
394 event.retrieved = TWS_AEN_NOT_RETRIEVED;
395
396 bcopy(sense->err_specific_desc, event.parameter_data,
397 TWS_ERROR_SPECIFIC_DESC_LEN);
398 event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
399 event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
400
401 if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
402 event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
403 event.parameter_len) + 1);
404 }
405
406 device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
407 event.severity_str,
408 event.event_src,
409 event.aen_code,
410 event.parameter_data +
411 (strlen(event.parameter_data) + 1),
412 event.parameter_data);
413
414 mtx_lock(&sc->gen_lock);
415 tws_circular_aenq_insert(sc, &sc->aen_q, &event);
416 sc->seq_id++;
417 mtx_unlock(&sc->gen_lock);
418 break;
419 }
420
421 free(req->data, M_TWS);
422
423 req->state = TWS_REQ_STATE_FREE;
424
425 if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
426 /* timeout(tws_fetch_aen, sc, 1);*/
427 sc->stats.num_aens++;
428 tws_fetch_aen((void *)sc);
429 }
430 }
431
432 void
tws_cmd_complete(struct tws_request * req)433 tws_cmd_complete(struct tws_request *req)
434 {
435 struct tws_softc *sc = req->sc;
436
437 callout_stop(&req->timeout);
438 tws_unmap_request(sc, req);
439 }
440
441 static void
tws_err_complete(struct tws_softc * sc,u_int64_t mfa)442 tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
443 {
444 struct tws_command_header *hdr;
445 struct tws_sense *sen;
446 struct tws_request *req;
447 u_int16_t req_id;
448 u_int32_t reg, status;
449
450 if ( !mfa ) {
451 TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
452 return;
453 } else {
454 /* lookup the sense */
455 sen = tws_find_sense_from_mfa(sc, mfa);
456 if ( sen == NULL ) {
457 TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
458 return;
459 }
460 hdr = sen->hdr;
461 TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
462 req_id = hdr->header_desc.request_id;
463 req = &sc->reqs[req_id];
464 TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
465 if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS )
466 TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
467 }
468
469 switch (req->type) {
470 case TWS_REQ_TYPE_PASSTHRU :
471 tws_passthru_err_complete(req, hdr);
472 break;
473 case TWS_REQ_TYPE_GETSET_PARAM :
474 tws_getset_param_complete(req);
475 break;
476 case TWS_REQ_TYPE_SCSI_IO :
477 tws_scsi_err_complete(req, hdr);
478 break;
479
480 }
481
482 mtx_lock(&sc->io_lock);
483 hdr->header_desc.size_header = 128;
484 reg = (u_int32_t)( mfa>>32);
485 tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
486 reg = (u_int32_t)(mfa);
487 tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
488
489 status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
490 if ( status & TWS_BIT13 ) {
491 device_printf(sc->tws_dev, "OBFL Overrun\n");
492 sc->obfl_q_overrun = true;
493 }
494 mtx_unlock(&sc->io_lock);
495 }
496
497 static void
tws_scsi_err_complete(struct tws_request * req,struct tws_command_header * hdr)498 tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
499 {
500 u_int8_t *sense_data;
501 struct tws_softc *sc = req->sc;
502 union ccb *ccb = req->ccb_ptr;
503
504 TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
505 req->cmd_pkt->cmd.pkt_a.status);
506 if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
507 hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
508 if ( ccb->ccb_h.target_lun ) {
509 TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
510 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
511 } else {
512 TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
513 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
514 }
515
516 } else {
517 TWS_TRACE_DEBUG(sc, "scsi status error",0,0);
518 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
519 if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
520 (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
521 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
522 TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
523 }
524 }
525
526 /* if there were no error simply mark complete error */
527 if (ccb->ccb_h.status == 0)
528 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
529
530 sense_data = (u_int8_t *)&ccb->csio.sense_data;
531 if (sense_data) {
532 memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
533 ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
534 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
535 }
536 ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
537
538 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
539 mtx_lock(&sc->sim_lock);
540 xpt_done(ccb);
541 mtx_unlock(&sc->sim_lock);
542
543 callout_stop(&req->timeout);
544 tws_unmap_request(req->sc, req);
545 mtx_lock(&sc->q_lock);
546 tws_q_remove_request(sc, req, TWS_BUSY_Q);
547 tws_q_insert_tail(sc, req, TWS_FREE_Q);
548 mtx_unlock(&sc->q_lock);
549 }
550
551 static void
tws_passthru_err_complete(struct tws_request * req,struct tws_command_header * hdr)552 tws_passthru_err_complete(struct tws_request *req,
553 struct tws_command_header *hdr)
554 {
555 TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
556 req->error_code = hdr->status_block.error;
557 memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
558 tws_passthru_complete(req);
559 }
560
561 static void
tws_drain_busy_queue(struct tws_softc * sc)562 tws_drain_busy_queue(struct tws_softc *sc)
563 {
564 struct tws_request *req;
565 union ccb *ccb;
566 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
567
568 mtx_lock(&sc->q_lock);
569 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
570 mtx_unlock(&sc->q_lock);
571 while ( req ) {
572 TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id);
573 callout_stop(&req->timeout);
574
575 req->error_code = TWS_REQ_RET_RESET;
576 ccb = (union ccb *)(req->ccb_ptr);
577
578 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
579 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
580 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
581
582 tws_unmap_request(req->sc, req);
583
584 mtx_lock(&sc->sim_lock);
585 xpt_done(req->ccb_ptr);
586 mtx_unlock(&sc->sim_lock);
587
588 mtx_lock(&sc->q_lock);
589 tws_q_insert_tail(sc, req, TWS_FREE_Q);
590 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
591 mtx_unlock(&sc->q_lock);
592 }
593 }
594
595 static void
tws_drain_reserved_reqs(struct tws_softc * sc)596 tws_drain_reserved_reqs(struct tws_softc *sc)
597 {
598 struct tws_request *r;
599
600 r = &sc->reqs[TWS_REQ_TYPE_AEN_FETCH];
601 if ( r->state != TWS_REQ_STATE_FREE ) {
602 TWS_TRACE_DEBUG(sc, "reset aen req", 0, 0);
603 callout_stop(&r->timeout);
604 tws_unmap_request(sc, r);
605 free(r->data, M_TWS);
606 r->state = TWS_REQ_STATE_FREE;
607 r->error_code = TWS_REQ_RET_RESET;
608 }
609
610 r = &sc->reqs[TWS_REQ_TYPE_PASSTHRU];
611 if ( r->state == TWS_REQ_STATE_BUSY ) {
612 TWS_TRACE_DEBUG(sc, "reset passthru req", 0, 0);
613 r->error_code = TWS_REQ_RET_RESET;
614 }
615
616 r = &sc->reqs[TWS_REQ_TYPE_GETSET_PARAM];
617 if ( r->state != TWS_REQ_STATE_FREE ) {
618 TWS_TRACE_DEBUG(sc, "reset setparam req", 0, 0);
619 callout_stop(&r->timeout);
620 tws_unmap_request(sc, r);
621 free(r->data, M_TWS);
622 r->state = TWS_REQ_STATE_FREE;
623 r->error_code = TWS_REQ_RET_RESET;
624 }
625 }
626
627 static void
tws_drain_response_queue(struct tws_softc * sc)628 tws_drain_response_queue(struct tws_softc *sc)
629 {
630 u_int16_t req_id;
631 u_int64_t mfa;
632 while ( tws_get_response(sc, &req_id, &mfa) );
633 }
634
635 static int32_t
tws_execute_scsi(struct tws_softc * sc,union ccb * ccb)636 tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
637 {
638 struct tws_command_packet *cmd_pkt;
639 struct tws_request *req;
640 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
641 struct ccb_scsiio *csio = &(ccb->csio);
642 int error;
643 u_int16_t lun;
644
645 mtx_assert(&sc->sim_lock, MA_OWNED);
646 if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
647 TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
648 ccb_h->status |= CAM_TID_INVALID;
649 xpt_done(ccb);
650 return(0);
651 }
652 if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
653 TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
654 ccb_h->status |= CAM_LUN_INVALID;
655 xpt_done(ccb);
656 return(0);
657 }
658
659 if(ccb_h->flags & CAM_CDB_PHYS) {
660 TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
661 ccb_h->status = CAM_REQ_INVALID;
662 xpt_done(ccb);
663 return(0);
664 }
665
666 /*
667 * We are going to work on this request. Mark it as enqueued (though
668 * we don't actually queue it...)
669 */
670 ccb_h->status |= CAM_SIM_QUEUED;
671
672 req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO);
673 if ( !req ) {
674 TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
675 ccb_h->status |= CAM_REQUEUE_REQ;
676 xpt_done(ccb);
677 return(0);
678 }
679
680 if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
681 if(ccb_h->flags & CAM_DIR_IN)
682 req->flags |= TWS_DIR_IN;
683 if(ccb_h->flags & CAM_DIR_OUT)
684 req->flags |= TWS_DIR_OUT;
685 } else {
686 req->flags = TWS_DIR_NONE; /* no data */
687 }
688
689 req->type = TWS_REQ_TYPE_SCSI_IO;
690 req->cb = tws_scsi_complete;
691
692 cmd_pkt = req->cmd_pkt;
693 /* cmd_pkt->hdr.header_desc.size_header = 128; */
694 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
695 cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
696 cmd_pkt->cmd.pkt_a.status = 0;
697 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
698
699 /* lower nibble */
700 lun = ccb_h->target_lun & 0XF;
701 lun = lun << 12;
702 cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
703 /* upper nibble */
704 lun = ccb_h->target_lun & 0XF0;
705 lun = lun << 8;
706 cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
707
708 #ifdef TWS_DEBUG
709 if ( csio->cdb_len > 16 )
710 TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
711 #endif
712
713 if(ccb_h->flags & CAM_CDB_POINTER)
714 bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
715 else
716 bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
717
718 req->data = ccb;
719 req->flags |= TWS_DATA_CCB;
720 /* save ccb ptr */
721 req->ccb_ptr = ccb;
722 /*
723 * tws_map_load_data_callback will fill in the SGL,
724 * and submit the I/O.
725 */
726 sc->stats.scsi_ios++;
727 callout_reset_sbt(&req->timeout, SBT_1MS * ccb->ccb_h.timeout, 0,
728 tws_timeout, req, 0);
729 error = tws_map_request(sc, req);
730 return(error);
731 }
732
733 int
tws_send_scsi_cmd(struct tws_softc * sc,int cmd)734 tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
735 {
736 struct tws_request *req;
737 struct tws_command_packet *cmd_pkt;
738 int error;
739
740 TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
741 req = tws_get_request(sc, TWS_REQ_TYPE_AEN_FETCH);
742
743 if ( req == NULL )
744 return(ENOMEM);
745
746 req->cb = tws_aen_complete;
747
748 cmd_pkt = req->cmd_pkt;
749 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
750 cmd_pkt->cmd.pkt_a.status = 0;
751 cmd_pkt->cmd.pkt_a.unit = 0;
752 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
753 cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
754
755 cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
756 cmd_pkt->cmd.pkt_a.cdb[4] = 128;
757
758 req->length = TWS_SECTOR_SIZE;
759 req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
760 if ( req->data == NULL )
761 return(ENOMEM);
762 bzero(req->data, TWS_SECTOR_SIZE);
763 req->flags = TWS_DIR_IN;
764
765 callout_reset(&req->timeout, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
766 error = tws_map_request(sc, req);
767 return(error);
768
769 }
770
771 int
tws_set_param(struct tws_softc * sc,u_int32_t table_id,u_int32_t param_id,u_int32_t param_size,void * data)772 tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
773 u_int32_t param_size, void *data)
774 {
775 struct tws_request *req;
776 struct tws_command_packet *cmd_pkt;
777 union tws_command_giga *cmd;
778 struct tws_getset_param *param;
779 int error;
780
781 req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
782 if ( req == NULL ) {
783 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
784 return(ENOMEM);
785 }
786
787 req->length = TWS_SECTOR_SIZE;
788 req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
789 if ( req->data == NULL )
790 return(ENOMEM);
791 bzero(req->data, TWS_SECTOR_SIZE);
792 param = (struct tws_getset_param *)req->data;
793
794 req->cb = tws_getset_param_complete;
795 req->flags = TWS_DIR_OUT;
796 cmd_pkt = req->cmd_pkt;
797
798 cmd = &cmd_pkt->cmd.pkt_g;
799 cmd->param.sgl_off__opcode =
800 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
801 cmd->param.request_id = (u_int8_t)req->request_id;
802 cmd->param.host_id__unit = 0;
803 cmd->param.param_count = 1;
804 cmd->param.size = 2; /* map routine will add sgls */
805
806 /* Specify which parameter we want to set. */
807 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
808 param->parameter_id = (u_int8_t)(param_id);
809 param->parameter_size_bytes = (u_int16_t)param_size;
810 memcpy(param->data, data, param_size);
811
812 callout_reset(&req->timeout, (TWS_IOCTL_TIMEOUT * hz), tws_timeout, req);
813 error = tws_map_request(sc, req);
814 return(error);
815
816 }
817
818 int
tws_get_param(struct tws_softc * sc,u_int32_t table_id,u_int32_t param_id,u_int32_t param_size,void * data)819 tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
820 u_int32_t param_size, void *data)
821 {
822 struct tws_request *req;
823 struct tws_command_packet *cmd_pkt;
824 union tws_command_giga *cmd;
825 struct tws_getset_param *param;
826 u_int16_t reqid;
827 u_int64_t mfa;
828 int error = SUCCESS;
829
830 req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
831 if ( req == NULL ) {
832 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
833 return(FAILURE);
834 }
835
836 req->length = TWS_SECTOR_SIZE;
837 req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
838 if ( req->data == NULL )
839 return(FAILURE);
840 bzero(req->data, TWS_SECTOR_SIZE);
841 param = (struct tws_getset_param *)req->data;
842
843 req->cb = NULL;
844 req->flags = TWS_DIR_IN;
845 cmd_pkt = req->cmd_pkt;
846
847 cmd = &cmd_pkt->cmd.pkt_g;
848 cmd->param.sgl_off__opcode =
849 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
850 cmd->param.request_id = (u_int8_t)req->request_id;
851 cmd->param.host_id__unit = 0;
852 cmd->param.param_count = 1;
853 cmd->param.size = 2; /* map routine will add sgls */
854
855 /* Specify which parameter we want to set. */
856 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
857 param->parameter_id = (u_int8_t)(param_id);
858 param->parameter_size_bytes = (u_int16_t)param_size;
859
860 error = tws_map_request(sc, req);
861 if (!error) {
862 reqid = tws_poll4_response(sc, &mfa);
863 tws_unmap_request(sc, req);
864
865 if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) {
866 memcpy(data, param->data, param_size);
867 } else {
868 error = FAILURE;
869 }
870 }
871
872 free(req->data, M_TWS);
873 req->state = TWS_REQ_STATE_FREE;
874 return(error);
875
876 }
877
878 void
tws_unmap_request(struct tws_softc * sc,struct tws_request * req)879 tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
880 {
881 if (req->data != NULL) {
882 if ( req->flags & TWS_DIR_IN )
883 bus_dmamap_sync(sc->data_tag, req->dma_map,
884 BUS_DMASYNC_POSTREAD);
885 if ( req->flags & TWS_DIR_OUT )
886 bus_dmamap_sync(sc->data_tag, req->dma_map,
887 BUS_DMASYNC_POSTWRITE);
888 mtx_lock(&sc->io_lock);
889 bus_dmamap_unload(sc->data_tag, req->dma_map);
890 mtx_unlock(&sc->io_lock);
891 }
892 }
893
894 int32_t
tws_map_request(struct tws_softc * sc,struct tws_request * req)895 tws_map_request(struct tws_softc *sc, struct tws_request *req)
896 {
897 int32_t error = 0;
898
899 /* If the command involves data, map that too. */
900 if (req->data != NULL) {
901 int my_flags = ((req->type == TWS_REQ_TYPE_SCSI_IO) ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
902
903 /*
904 * Map the data buffer into bus space and build the SG list.
905 */
906 mtx_lock(&sc->io_lock);
907 if (req->flags & TWS_DATA_CCB)
908 error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map,
909 req->data,
910 tws_dmamap_data_load_cbfn, req,
911 my_flags);
912 else
913 error = bus_dmamap_load(sc->data_tag, req->dma_map,
914 req->data, req->length,
915 tws_dmamap_data_load_cbfn, req,
916 my_flags);
917 mtx_unlock(&sc->io_lock);
918
919 if (error == EINPROGRESS) {
920 TWS_TRACE(sc, "in progress", 0, error);
921 tws_freeze_simq(sc, req);
922 error = 0; // EINPROGRESS is not a fatal error.
923 }
924 } else { /* no data involved */
925 error = tws_submit_command(sc, req);
926 }
927 return(error);
928 }
929
930 static void
tws_dmamap_data_load_cbfn(void * arg,bus_dma_segment_t * segs,int nseg,int error)931 tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
932 int nseg, int error)
933 {
934 struct tws_request *req = (struct tws_request *)arg;
935 struct tws_softc *sc = req->sc;
936 u_int16_t sgls = nseg;
937 void *sgl_ptr;
938 struct tws_cmd_generic *gcmd;
939
940 if ( error ) {
941 TWS_TRACE(sc, "SOMETHING BAD HAPPENED! error = %d\n", error, 0);
942 }
943
944 if ( error == EFBIG ) {
945 TWS_TRACE(sc, "not enough data segs", 0, nseg);
946 req->error_code = error;
947 req->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
948 return;
949 }
950
951 if ( req->flags & TWS_DIR_IN )
952 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
953 BUS_DMASYNC_PREREAD);
954 if ( req->flags & TWS_DIR_OUT )
955 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
956 BUS_DMASYNC_PREWRITE);
957 if ( segs ) {
958 if ( (req->type == TWS_REQ_TYPE_PASSTHRU &&
959 GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) !=
960 TWS_FW_CMD_EXECUTE_SCSI) ||
961 req->type == TWS_REQ_TYPE_GETSET_PARAM) {
962 gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
963 sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
964 gcmd->size += sgls *
965 ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 : 2 );
966 tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls);
967
968 } else {
969 tws_fill_sg_list(req->sc, (void *)segs,
970 (void *)&(req->cmd_pkt->cmd.pkt_a.sg_list), sgls);
971 req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
972 }
973 }
974
975 req->error_code = tws_submit_command(req->sc, req);
976
977 }
978
979 static void
tws_fill_sg_list(struct tws_softc * sc,void * sgl_src,void * sgl_dest,u_int16_t num_sgl_entries)980 tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest,
981 u_int16_t num_sgl_entries)
982 {
983 int i;
984
985 if ( sc->is64bit ) {
986 struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
987
988 if ( !tws_use_32bit_sgls ) {
989 struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
990 if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
991 TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
992 for (i = 0; i < num_sgl_entries; i++) {
993 sgl_d[i].address = sgl_s->address;
994 sgl_d[i].length = sgl_s->length;
995 sgl_d[i].flag = 0;
996 sgl_d[i].reserved = 0;
997 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
998 sizeof(bus_dma_segment_t));
999 }
1000 } else {
1001 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1002 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1003 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1004 for (i = 0; i < num_sgl_entries; i++) {
1005 sgl_d[i].address = sgl_s->address;
1006 sgl_d[i].length = sgl_s->length;
1007 sgl_d[i].flag = 0;
1008 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1009 sizeof(bus_dma_segment_t));
1010 }
1011 }
1012 } else {
1013 struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
1014 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1015
1016 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1017 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1018
1019 for (i = 0; i < num_sgl_entries; i++) {
1020 sgl_d[i].address = sgl_s[i].address;
1021 sgl_d[i].length = sgl_s[i].length;
1022 sgl_d[i].flag = 0;
1023 }
1024 }
1025 }
1026
1027 void
tws_intr(void * arg)1028 tws_intr(void *arg)
1029 {
1030 struct tws_softc *sc = (struct tws_softc *)arg;
1031 u_int32_t histat=0, db=0;
1032
1033 if (!(sc)) {
1034 device_printf(sc->tws_dev, "null softc!!!\n");
1035 return;
1036 }
1037
1038 if ( tws_get_state(sc) == TWS_RESET ) {
1039 return;
1040 }
1041
1042 if ( tws_get_state(sc) != TWS_ONLINE ) {
1043 return;
1044 }
1045
1046 sc->stats.num_intrs++;
1047 histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
1048 if ( histat & TWS_BIT2 ) {
1049 TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
1050 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1051 if ( db & TWS_BIT21 ) {
1052 tws_intr_attn_error(sc);
1053 return;
1054 }
1055 if ( db & TWS_BIT18 ) {
1056 tws_intr_attn_aen(sc);
1057 }
1058 }
1059
1060 if ( histat & TWS_BIT3 ) {
1061 tws_intr_resp(sc);
1062 }
1063 }
1064
1065 static void
tws_intr_attn_aen(struct tws_softc * sc)1066 tws_intr_attn_aen(struct tws_softc *sc)
1067 {
1068
1069 /* maskoff db intrs until all the aens are fetched */
1070 /* tws_disable_db_intr(sc); */
1071 tws_fetch_aen((void *)sc);
1072 tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
1073 (void)tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1074
1075 }
1076
1077 static void
tws_intr_attn_error(struct tws_softc * sc)1078 tws_intr_attn_error(struct tws_softc *sc)
1079 {
1080
1081 TWS_TRACE(sc, "attn error", 0, 0);
1082 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
1083 (void)tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1084 device_printf(sc->tws_dev, "Micro controller error.\n");
1085 tws_reset(sc);
1086 }
1087
1088 static void
tws_intr_resp(struct tws_softc * sc)1089 tws_intr_resp(struct tws_softc *sc)
1090 {
1091 u_int16_t req_id;
1092 u_int64_t mfa;
1093
1094 while ( tws_get_response(sc, &req_id, &mfa) ) {
1095 sc->stats.reqs_out++;
1096 if ( req_id == TWS_INVALID_REQID ) {
1097 TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
1098 sc->stats.reqs_errored++;
1099 tws_err_complete(sc, mfa);
1100 continue;
1101 }
1102 sc->reqs[req_id].cb(&sc->reqs[req_id]);
1103 }
1104
1105 }
1106
1107 static void
tws_poll(struct cam_sim * sim)1108 tws_poll(struct cam_sim *sim)
1109 {
1110 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
1111 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
1112 tws_intr((void *) sc);
1113 }
1114
1115 void
tws_timeout(void * arg)1116 tws_timeout(void *arg)
1117 {
1118 struct tws_request *req = (struct tws_request *)arg;
1119 struct tws_softc *sc = req->sc;
1120
1121 if ( req->error_code == TWS_REQ_RET_RESET ) {
1122 return;
1123 }
1124
1125 mtx_lock(&sc->gen_lock);
1126 if ( req->error_code == TWS_REQ_RET_RESET ) {
1127 mtx_unlock(&sc->gen_lock);
1128 return;
1129 }
1130
1131 if ( tws_get_state(sc) == TWS_RESET ) {
1132 mtx_unlock(&sc->gen_lock);
1133 return;
1134 }
1135
1136 xpt_freeze_simq(sc->sim, 1);
1137
1138 tws_send_event(sc, TWS_RESET_START);
1139
1140 if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1141 device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n");
1142 } else if (req->type == TWS_REQ_TYPE_PASSTHRU) {
1143 device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n");
1144 } else {
1145 device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n");
1146 }
1147
1148 tws_assert_soft_reset(sc);
1149 tws_turn_off_interrupts(sc);
1150 tws_reset_cb( (void*) sc );
1151 tws_reinit( (void*) sc );
1152
1153 // device_printf(sc->tws_dev, "Controller Reset complete!\n");
1154 tws_send_event(sc, TWS_RESET_COMPLETE);
1155 mtx_unlock(&sc->gen_lock);
1156
1157 xpt_release_simq(sc->sim, 1);
1158 }
1159
1160 void
tws_reset(void * arg)1161 tws_reset(void *arg)
1162 {
1163 struct tws_softc *sc = (struct tws_softc *)arg;
1164
1165 mtx_lock(&sc->gen_lock);
1166 if ( tws_get_state(sc) == TWS_RESET ) {
1167 mtx_unlock(&sc->gen_lock);
1168 return;
1169 }
1170
1171 xpt_freeze_simq(sc->sim, 1);
1172
1173 tws_send_event(sc, TWS_RESET_START);
1174
1175 device_printf(sc->tws_dev, "Resetting controller\n");
1176
1177 tws_assert_soft_reset(sc);
1178 tws_turn_off_interrupts(sc);
1179 tws_reset_cb( (void*) sc );
1180 tws_reinit( (void*) sc );
1181
1182 // device_printf(sc->tws_dev, "Controller Reset complete!\n");
1183 tws_send_event(sc, TWS_RESET_COMPLETE);
1184 mtx_unlock(&sc->gen_lock);
1185
1186 xpt_release_simq(sc->sim, 1);
1187 }
1188
1189 static void
tws_reset_cb(void * arg)1190 tws_reset_cb(void *arg)
1191 {
1192 struct tws_softc *sc = (struct tws_softc *)arg;
1193 time_t endt;
1194 int found = 0;
1195 u_int32_t reg;
1196
1197 if ( tws_get_state(sc) != TWS_RESET ) {
1198 return;
1199 }
1200
1201 // device_printf(sc->tws_dev, "Draining Busy Queue\n");
1202 tws_drain_busy_queue(sc);
1203 // device_printf(sc->tws_dev, "Draining Reserved Reqs\n");
1204 tws_drain_reserved_reqs(sc);
1205 // device_printf(sc->tws_dev, "Draining Response Queue\n");
1206 tws_drain_response_queue(sc);
1207
1208 // device_printf(sc->tws_dev, "Looking for controller ready flag...\n");
1209 endt = TWS_LOCAL_TIME + TWS_POLL_TIMEOUT;
1210 while ((TWS_LOCAL_TIME <= endt) && (!found)) {
1211 reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
1212 if ( reg & TWS_BIT13 ) {
1213 found = 1;
1214 // device_printf(sc->tws_dev, " ... Got it!\n");
1215 }
1216 }
1217 if ( !found )
1218 device_printf(sc->tws_dev, " ... Controller ready flag NOT found!\n");
1219 }
1220
1221 static void
tws_reinit(void * arg)1222 tws_reinit(void *arg)
1223 {
1224 struct tws_softc *sc = (struct tws_softc *)arg;
1225 int timeout_val=0;
1226 int try=2;
1227 int done=0;
1228
1229 // device_printf(sc->tws_dev, "Waiting for Controller Ready\n");
1230 while ( !done && try ) {
1231 if ( tws_ctlr_ready(sc) ) {
1232 done = 1;
1233 break;
1234 } else {
1235 timeout_val += 5;
1236 if ( timeout_val >= TWS_RESET_TIMEOUT ) {
1237 timeout_val = 0;
1238 if ( try )
1239 tws_assert_soft_reset(sc);
1240 try--;
1241 }
1242 mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz);
1243 }
1244 }
1245
1246 if (!done) {
1247 device_printf(sc->tws_dev, "FAILED to get Controller Ready!\n");
1248 return;
1249 }
1250
1251 sc->obfl_q_overrun = false;
1252 // device_printf(sc->tws_dev, "Sending initConnect\n");
1253 if ( tws_init_connect(sc, tws_queue_depth) ) {
1254 TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
1255 }
1256 tws_init_obfl_q(sc);
1257
1258 tws_turn_on_interrupts(sc);
1259
1260 wakeup_one(sc);
1261 }
1262
1263 static void
tws_freeze_simq(struct tws_softc * sc,struct tws_request * req)1264 tws_freeze_simq(struct tws_softc *sc, struct tws_request *req)
1265 {
1266 /* Only for IO commands */
1267 if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1268 union ccb *ccb = (union ccb *)(req->ccb_ptr);
1269
1270 xpt_freeze_simq(sc->sim, 1);
1271 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1272 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1273 }
1274 }
1275
1276 TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);
1277