xref: /freebsd/sys/dev/tws/tws.c (revision 4e99f45480598189d49d45a825533a6c9e12f02c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2010, LSI Corp.
5  * All rights reserved.
6  * Author : Manjunath Ranganathaiah
7  * Support: freebsdraid@lsi.com
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of the <ORGANIZATION> nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <dev/tws/tws.h>
41 #include <dev/tws/tws_services.h>
42 #include <dev/tws/tws_hdm.h>
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 
47 MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver");
48 int tws_queue_depth = TWS_MAX_REQS;
49 int tws_enable_msi = 0;
50 int tws_enable_msix = 0;
51 
52 
53 
54 /* externs */
55 extern int tws_cam_attach(struct tws_softc *sc);
56 extern void tws_cam_detach(struct tws_softc *sc);
57 extern int tws_init_ctlr(struct tws_softc *sc);
58 extern boolean tws_ctlr_ready(struct tws_softc *sc);
59 extern void tws_turn_off_interrupts(struct tws_softc *sc);
60 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
61                                 u_int8_t q_type );
62 extern struct tws_request *tws_q_remove_request(struct tws_softc *sc,
63                                    struct tws_request *req, u_int8_t q_type );
64 extern struct tws_request *tws_q_remove_head(struct tws_softc *sc,
65                                                        u_int8_t q_type );
66 extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id);
67 extern boolean tws_ctlr_reset(struct tws_softc *sc);
68 extern void tws_intr(void *arg);
69 extern int tws_use_32bit_sgls;
70 
71 
72 struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type);
73 int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
74 void tws_send_event(struct tws_softc *sc, u_int8_t event);
75 uint8_t tws_get_state(struct tws_softc *sc);
76 void tws_release_request(struct tws_request *req);
77 
78 
79 
80 /* Function prototypes */
81 static d_open_t     tws_open;
82 static d_close_t    tws_close;
83 static d_read_t     tws_read;
84 static d_write_t    tws_write;
85 extern d_ioctl_t    tws_ioctl;
86 
87 static int tws_init(struct tws_softc *sc);
88 static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
89                            int nseg, int error);
90 
91 static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size);
92 static int tws_init_aen_q(struct tws_softc *sc);
93 static int tws_init_trace_q(struct tws_softc *sc);
94 static int tws_setup_irq(struct tws_softc *sc);
95 int tws_setup_intr(struct tws_softc *sc, int irqs);
96 int tws_teardown_intr(struct tws_softc *sc);
97 
98 
99 /* Character device entry points */
100 
101 static struct cdevsw tws_cdevsw = {
102     .d_version =    D_VERSION,
103     .d_open =   tws_open,
104     .d_close =  tws_close,
105     .d_read =   tws_read,
106     .d_write =  tws_write,
107     .d_ioctl =  tws_ioctl,
108     .d_name =   "tws",
109 };
110 
111 /*
112  * In the cdevsw routines, we find our softc by using the si_drv1 member
113  * of struct cdev.  We set this variable to point to our softc in our
114  * attach routine when we create the /dev entry.
115  */
116 
117 int
118 tws_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
119 {
120     struct tws_softc *sc = dev->si_drv1;
121 
122     if ( sc )
123         TWS_TRACE_DEBUG(sc, "entry", dev, oflags);
124     return (0);
125 }
126 
127 int
128 tws_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
129 {
130     struct tws_softc *sc = dev->si_drv1;
131 
132     if ( sc )
133         TWS_TRACE_DEBUG(sc, "entry", dev, fflag);
134     return (0);
135 }
136 
137 int
138 tws_read(struct cdev *dev, struct uio *uio, int ioflag)
139 {
140     struct tws_softc *sc = dev->si_drv1;
141 
142     if ( sc )
143         TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
144     return (0);
145 }
146 
147 int
148 tws_write(struct cdev *dev, struct uio *uio, int ioflag)
149 {
150     struct tws_softc *sc = dev->si_drv1;
151 
152     if ( sc )
153         TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
154     return (0);
155 }
156 
157 /* PCI Support Functions */
158 
159 /*
160  * Compare the device ID of this device against the IDs that this driver
161  * supports.  If there is a match, set the description and return success.
162  */
163 static int
164 tws_probe(device_t dev)
165 {
166     static u_int8_t first_ctlr = 1;
167 
168     if ((pci_get_vendor(dev) == TWS_VENDOR_ID) &&
169         (pci_get_device(dev) == TWS_DEVICE_ID)) {
170         device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller");
171         if (first_ctlr) {
172             printf("LSI 3ware device driver for SAS/SATA storage "
173                     "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING);
174             first_ctlr = 0;
175         }
176 
177         return(BUS_PROBE_DEFAULT);
178     }
179     return (ENXIO);
180 }
181 
182 /* Attach function is only called if the probe is successful. */
183 
184 static int
185 tws_attach(device_t dev)
186 {
187     struct tws_softc *sc = device_get_softc(dev);
188     u_int32_t bar;
189     int error=0,i;
190 
191     /* no tracing yet */
192     /* Look up our softc and initialize its fields. */
193     sc->tws_dev = dev;
194     sc->device_id = pci_get_device(dev);
195     sc->subvendor_id = pci_get_subvendor(dev);
196     sc->subdevice_id = pci_get_subdevice(dev);
197 
198     /* Intialize mutexes */
199     mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF);
200     mtx_init( &sc->sim_lock,  "tws_sim_lock", NULL, MTX_DEF);
201     mtx_init( &sc->gen_lock,  "tws_gen_lock", NULL, MTX_DEF);
202     mtx_init( &sc->io_lock,  "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE);
203     callout_init(&sc->stats_timer, 1);
204 
205     if ( tws_init_trace_q(sc) == FAILURE )
206         printf("trace init failure\n");
207     /* send init event */
208     mtx_lock(&sc->gen_lock);
209     tws_send_event(sc, TWS_INIT_START);
210     mtx_unlock(&sc->gen_lock);
211 
212 
213 #if _BYTE_ORDER == _BIG_ENDIAN
214     TWS_TRACE(sc, "BIG endian", 0, 0);
215 #endif
216     /* sysctl context setup */
217     sysctl_ctx_init(&sc->tws_clist);
218     sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
219         SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
220 	device_get_nameunit(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
221     if ( sc->tws_oidp == NULL ) {
222         tws_log(sc, SYSCTL_TREE_NODE_ADD);
223         goto attach_fail_1;
224     }
225     SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
226                       OID_AUTO, "driver_version", CTLFLAG_RD,
227                       TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");
228 
229     pci_enable_busmaster(dev);
230 
231     bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
232     TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
233     bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
234     bar = bar & ~TWS_BIT2;
235     TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
236 
237     /* MFA base address is BAR2 register used for
238      * push mode. Firmware will evatualy move to
239      * pull mode during witch this needs to change
240      */
241 #ifndef TWS_PULL_MODE_ENABLE
242     sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
243     sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
244     TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
245 #endif
246 
247     /* allocate MMIO register space */
248     sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
249     if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
250                                 &(sc->reg_res_id), RF_ACTIVE))
251                                 == NULL) {
252         tws_log(sc, ALLOC_MEMORY_RES);
253         goto attach_fail_1;
254     }
255     sc->bus_tag = rman_get_bustag(sc->reg_res);
256     sc->bus_handle = rman_get_bushandle(sc->reg_res);
257 
258 #ifndef TWS_PULL_MODE_ENABLE
259     /* Allocate bus space for inbound mfa */
260     sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
261     if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
262                           &(sc->mfa_res_id), RF_ACTIVE))
263                                 == NULL) {
264         tws_log(sc, ALLOC_MEMORY_RES);
265         goto attach_fail_2;
266     }
267     sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
268     sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
269 #endif
270 
271     /* Allocate and register our interrupt. */
272     sc->intr_type = TWS_INTx; /* default */
273 
274     if ( tws_enable_msi )
275         sc->intr_type = TWS_MSI;
276     if ( tws_setup_irq(sc) == FAILURE ) {
277         tws_log(sc, ALLOC_MEMORY_RES);
278         goto attach_fail_3;
279     }
280 
281     /*
282      * Create a /dev entry for this device.  The kernel will assign us
283      * a major number automatically.  We use the unit number of this
284      * device as the minor number and name the character device
285      * "tws<unit>".
286      */
287     sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev),
288         UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u",
289         device_get_unit(dev));
290     sc->tws_cdev->si_drv1 = sc;
291 
292     if ( tws_init(sc) == FAILURE ) {
293         tws_log(sc, TWS_INIT_FAILURE);
294         goto attach_fail_4;
295     }
296     if ( tws_init_ctlr(sc) == FAILURE ) {
297         tws_log(sc, TWS_CTLR_INIT_FAILURE);
298         goto attach_fail_4;
299     }
300     if ((error = tws_cam_attach(sc))) {
301         tws_log(sc, TWS_CAM_ATTACH);
302         goto attach_fail_4;
303     }
304     /* send init complete event */
305     mtx_lock(&sc->gen_lock);
306     tws_send_event(sc, TWS_INIT_COMPLETE);
307     mtx_unlock(&sc->gen_lock);
308 
309     TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
310     return(0);
311 
312 attach_fail_4:
313     tws_teardown_intr(sc);
314     destroy_dev(sc->tws_cdev);
315     if (sc->dma_mem_phys)
316 	    bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
317     if (sc->dma_mem)
318 	    bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
319     if (sc->cmd_tag)
320 	    bus_dma_tag_destroy(sc->cmd_tag);
321 attach_fail_3:
322     for(i=0;i<sc->irqs;i++) {
323         if ( sc->irq_res[i] ){
324             if (bus_release_resource(sc->tws_dev,
325                  SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
326                 TWS_TRACE(sc, "bus irq res", 0, 0);
327         }
328     }
329 #ifndef TWS_PULL_MODE_ENABLE
330 attach_fail_2:
331 #endif
332     if ( sc->mfa_res ){
333         if (bus_release_resource(sc->tws_dev,
334                  SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
335             TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
336     }
337     if ( sc->reg_res ){
338         if (bus_release_resource(sc->tws_dev,
339                  SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
340             TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
341     }
342 attach_fail_1:
343     mtx_destroy(&sc->q_lock);
344     mtx_destroy(&sc->sim_lock);
345     mtx_destroy(&sc->gen_lock);
346     mtx_destroy(&sc->io_lock);
347     sysctl_ctx_free(&sc->tws_clist);
348     return (ENXIO);
349 }
350 
351 /* Detach device. */
352 
353 static int
354 tws_detach(device_t dev)
355 {
356     struct tws_softc *sc = device_get_softc(dev);
357     int i;
358     u_int32_t reg;
359 
360     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
361 
362     mtx_lock(&sc->gen_lock);
363     tws_send_event(sc, TWS_UNINIT_START);
364     mtx_unlock(&sc->gen_lock);
365 
366     /* needs to disable interrupt before detaching from cam */
367     tws_turn_off_interrupts(sc);
368     /* clear door bell */
369     tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
370     reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
371     TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
372     sc->obfl_q_overrun = false;
373     tws_init_connect(sc, 1);
374 
375     /* Teardown the state in our softc created in our attach routine. */
376     /* Disconnect the interrupt handler. */
377     tws_teardown_intr(sc);
378 
379     /* Release irq resource */
380     for(i=0;i<sc->irqs;i++) {
381         if ( sc->irq_res[i] ){
382             if (bus_release_resource(sc->tws_dev,
383                      SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
384                 TWS_TRACE(sc, "bus release irq resource",
385                                        i, sc->irq_res_id[i]);
386         }
387     }
388     if ( sc->intr_type == TWS_MSI ) {
389         pci_release_msi(sc->tws_dev);
390     }
391 
392     tws_cam_detach(sc);
393 
394     if (sc->dma_mem_phys)
395 	    bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
396     if (sc->dma_mem)
397 	    bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
398     if (sc->cmd_tag)
399 	    bus_dma_tag_destroy(sc->cmd_tag);
400 
401     /* Release memory resource */
402     if ( sc->mfa_res ){
403         if (bus_release_resource(sc->tws_dev,
404                  SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
405             TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
406     }
407     if ( sc->reg_res ){
408         if (bus_release_resource(sc->tws_dev,
409                  SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
410             TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
411     }
412 
413     for ( i=0; i< tws_queue_depth; i++) {
414 	    if (sc->reqs[i].dma_map)
415 		    bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map);
416 	    callout_drain(&sc->reqs[i].timeout);
417     }
418 
419     callout_drain(&sc->stats_timer);
420     free(sc->reqs, M_TWS);
421     free(sc->sense_bufs, M_TWS);
422     free(sc->scan_ccb, M_TWS);
423     if (sc->ioctl_data_mem)
424             bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map);
425     if (sc->data_tag)
426 	    bus_dma_tag_destroy(sc->data_tag);
427     free(sc->aen_q.q, M_TWS);
428     free(sc->trace_q.q, M_TWS);
429     mtx_destroy(&sc->q_lock);
430     mtx_destroy(&sc->sim_lock);
431     mtx_destroy(&sc->gen_lock);
432     mtx_destroy(&sc->io_lock);
433     destroy_dev(sc->tws_cdev);
434     sysctl_ctx_free(&sc->tws_clist);
435     return (0);
436 }
437 
438 int
439 tws_setup_intr(struct tws_softc *sc, int irqs)
440 {
441     int i, error;
442 
443     for(i=0;i<irqs;i++) {
444         if (!(sc->intr_handle[i])) {
445             if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
446                                     INTR_TYPE_CAM | INTR_MPSAFE,
447                                     NULL,
448                                     tws_intr, sc, &sc->intr_handle[i]))) {
449                 tws_log(sc, SETUP_INTR_RES);
450                 return(FAILURE);
451             }
452         }
453     }
454     return(SUCCESS);
455 
456 }
457 
458 
459 int
460 tws_teardown_intr(struct tws_softc *sc)
461 {
462     int i, error;
463 
464     for(i=0;i<sc->irqs;i++) {
465         if (sc->intr_handle[i]) {
466             error = bus_teardown_intr(sc->tws_dev,
467                                       sc->irq_res[i], sc->intr_handle[i]);
468             sc->intr_handle[i] = NULL;
469         }
470     }
471     return(SUCCESS);
472 }
473 
474 
475 static int
476 tws_setup_irq(struct tws_softc *sc)
477 {
478     int messages;
479 
480     switch(sc->intr_type) {
481         case TWS_INTx :
482             sc->irqs = 1;
483             sc->irq_res_id[0] = 0;
484             sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
485                             &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
486             if ( ! sc->irq_res[0] )
487                 return(FAILURE);
488             if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
489                 return(FAILURE);
490             device_printf(sc->tws_dev, "Using legacy INTx\n");
491             break;
492         case TWS_MSI :
493             sc->irqs = 1;
494             sc->irq_res_id[0] = 1;
495             messages = 1;
496             if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) {
497                 TWS_TRACE(sc, "pci alloc msi fail", 0, messages);
498                 return(FAILURE);
499             }
500             sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
501                               &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
502 
503             if ( !sc->irq_res[0]  )
504                 return(FAILURE);
505             if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
506                 return(FAILURE);
507             device_printf(sc->tws_dev, "Using MSI\n");
508             break;
509 
510     }
511 
512     return(SUCCESS);
513 }
514 
515 static int
516 tws_init(struct tws_softc *sc)
517 {
518 
519     u_int32_t max_sg_elements;
520     u_int32_t dma_mem_size;
521     int error;
522     u_int32_t reg;
523 
524     sc->seq_id = 0;
525     if ( tws_queue_depth > TWS_MAX_REQS )
526         tws_queue_depth = TWS_MAX_REQS;
527     if (tws_queue_depth < TWS_RESERVED_REQS+1)
528         tws_queue_depth = TWS_RESERVED_REQS+1;
529     sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
530     max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
531                                  TWS_MAX_64BIT_SG_ELEMENTS :
532                                  TWS_MAX_32BIT_SG_ELEMENTS;
533     dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
534                              (TWS_SECTOR_SIZE) ;
535     if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */
536                             TWS_ALIGNMENT,           /* alignment */
537                             0,                       /* boundary */
538                             BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
539                             BUS_SPACE_MAXADDR,       /* highaddr */
540                             NULL, NULL,              /* filter, filterarg */
541                             BUS_SPACE_MAXSIZE,       /* maxsize */
542                             max_sg_elements,         /* numsegs */
543                             BUS_SPACE_MAXSIZE,       /* maxsegsize */
544                             0,                       /* flags */
545                             NULL, NULL,              /* lockfunc, lockfuncarg */
546                             &sc->parent_tag          /* tag */
547                            )) {
548         TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
549                                                     sc->is64bit);
550         return(ENOMEM);
551     }
552     /* In bound message frame requires 16byte alignment.
553      * Outbound MF's can live with 4byte alignment - for now just
554      * use 16 for both.
555      */
556     if ( bus_dma_tag_create(sc->parent_tag,       /* parent */
557                             TWS_IN_MF_ALIGNMENT,  /* alignment */
558                             0,                    /* boundary */
559                             BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
560                             BUS_SPACE_MAXADDR,    /* highaddr */
561                             NULL, NULL,           /* filter, filterarg */
562                             dma_mem_size,         /* maxsize */
563                             1,                    /* numsegs */
564                             BUS_SPACE_MAXSIZE,    /* maxsegsize */
565                             0,                    /* flags */
566                             NULL, NULL,           /* lockfunc, lockfuncarg */
567                             &sc->cmd_tag          /* tag */
568                            )) {
569         TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
570         return(ENOMEM);
571     }
572 
573     if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
574                     BUS_DMA_NOWAIT, &sc->cmd_map)) {
575         TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
576         return(ENOMEM);
577     }
578 
579     /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
580     sc->dma_mem_phys=0;
581     error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
582                     dma_mem_size, tws_dmamap_cmds_load_cbfn,
583                     &sc->dma_mem_phys, 0);
584 
585    /*
586     * Create a dma tag for data buffers; size will be the maximum
587     * possible I/O size (128kB).
588     */
589     if (bus_dma_tag_create(sc->parent_tag,         /* parent */
590                            TWS_ALIGNMENT,          /* alignment */
591                            0,                      /* boundary */
592                            BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
593                            BUS_SPACE_MAXADDR,      /* highaddr */
594                            NULL, NULL,             /* filter, filterarg */
595                            TWS_MAX_IO_SIZE,        /* maxsize */
596                            max_sg_elements,        /* nsegments */
597                            TWS_MAX_IO_SIZE,        /* maxsegsize */
598                            BUS_DMA_ALLOCNOW,       /* flags */
599                            busdma_lock_mutex,      /* lockfunc */
600                            &sc->io_lock,           /* lockfuncarg */
601                            &sc->data_tag           /* tag */)) {
602         TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
603         return(ENOMEM);
604     }
605 
606     sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
607                       M_WAITOK | M_ZERO);
608     sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
609                       M_WAITOK | M_ZERO);
610     sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
611     if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem,
612             (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) {
613         device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n");
614         return(ENOMEM);
615     }
616 
617     if ( !tws_ctlr_ready(sc) )
618         if( !tws_ctlr_reset(sc) )
619             return(FAILURE);
620 
621     bzero(&sc->stats, sizeof(struct tws_stats));
622     tws_init_qs(sc);
623     tws_turn_off_interrupts(sc);
624 
625     /*
626      * enable pull mode by setting bit1 .
627      * setting bit0 to 1 will enable interrupt coalesing
628      * will revisit.
629      */
630 
631 #ifdef TWS_PULL_MODE_ENABLE
632 
633     reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
634     TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
635     tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);
636 
637 #endif
638 
639     TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
640     if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
641         return(FAILURE);
642     if ( tws_init_aen_q(sc) == FAILURE )
643         return(FAILURE);
644 
645     return(SUCCESS);
646 
647 }
648 
649 static int
650 tws_init_aen_q(struct tws_softc *sc)
651 {
652     sc->aen_q.head=0;
653     sc->aen_q.tail=0;
654     sc->aen_q.depth=256;
655     sc->aen_q.overflow=0;
656     sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
657                               M_TWS, M_WAITOK | M_ZERO);
658     return(SUCCESS);
659 }
660 
661 static int
662 tws_init_trace_q(struct tws_softc *sc)
663 {
664     sc->trace_q.head=0;
665     sc->trace_q.tail=0;
666     sc->trace_q.depth=256;
667     sc->trace_q.overflow=0;
668     sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
669                               M_TWS, M_WAITOK | M_ZERO);
670     return(SUCCESS);
671 }
672 
673 static int
674 tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
675 {
676 
677     struct tws_command_packet *cmd_buf;
678     cmd_buf = (struct tws_command_packet *)sc->dma_mem;
679     int i;
680 
681     bzero(cmd_buf, dma_mem_size);
682     TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
683     mtx_lock(&sc->q_lock);
684     for ( i=0; i< tws_queue_depth; i++)
685     {
686         if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
687             /* log a ENOMEM failure msg here */
688             mtx_unlock(&sc->q_lock);
689             return(FAILURE);
690         }
691         sc->reqs[i].cmd_pkt =  &cmd_buf[i];
692 
693         sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
694         sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys +
695                               (i * sizeof(struct tws_command_packet));
696 
697         sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys +
698                               sizeof(struct tws_command_header) +
699                               (i * sizeof(struct tws_command_packet));
700         sc->reqs[i].request_id = i;
701         sc->reqs[i].sc = sc;
702 
703         sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;
704 
705 	callout_init(&sc->reqs[i].timeout, 1);
706         sc->reqs[i].state = TWS_REQ_STATE_FREE;
707         if ( i >= TWS_RESERVED_REQS )
708             tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
709     }
710     mtx_unlock(&sc->q_lock);
711     return(SUCCESS);
712 }
713 
714 static void
715 tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
716                            int nseg, int error)
717 {
718 
719     /* printf("command load done \n"); */
720 
721     *((bus_addr_t *)arg) = segs[0].ds_addr;
722 }
723 
724 void
725 tws_send_event(struct tws_softc *sc, u_int8_t event)
726 {
727     mtx_assert(&sc->gen_lock, MA_OWNED);
728     TWS_TRACE_DEBUG(sc, "received event ", 0, event);
729     switch (event) {
730 
731         case TWS_INIT_START:
732             sc->tws_state = TWS_INIT;
733             break;
734 
735         case TWS_INIT_COMPLETE:
736             if (sc->tws_state != TWS_INIT) {
737                 device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state);
738             } else {
739                 sc->tws_state = TWS_ONLINE;
740             }
741             break;
742 
743         case TWS_RESET_START:
744             /* We can transition to reset state from any state except reset*/
745             if (sc->tws_state != TWS_RESET) {
746                 sc->tws_prev_state = sc->tws_state;
747                 sc->tws_state = TWS_RESET;
748             }
749             break;
750 
751         case TWS_RESET_COMPLETE:
752             if (sc->tws_state != TWS_RESET) {
753                 device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state);
754             } else {
755                 sc->tws_state = sc->tws_prev_state;
756             }
757             break;
758 
759         case TWS_SCAN_FAILURE:
760             if (sc->tws_state != TWS_ONLINE) {
761                 device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state);
762             } else {
763                 sc->tws_state = TWS_OFFLINE;
764             }
765             break;
766 
767         case TWS_UNINIT_START:
768             if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) {
769                 device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state);
770             } else {
771                 sc->tws_state = TWS_UNINIT;
772             }
773             break;
774     }
775 
776 }
777 
778 uint8_t
779 tws_get_state(struct tws_softc *sc)
780 {
781 
782     return((u_int8_t)sc->tws_state);
783 
784 }
785 
786 /* Called during system shutdown after sync. */
787 
788 static int
789 tws_shutdown(device_t dev)
790 {
791 
792     struct tws_softc *sc = device_get_softc(dev);
793 
794     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
795 
796     tws_turn_off_interrupts(sc);
797     tws_init_connect(sc, 1);
798 
799     return (0);
800 }
801 
802 /*
803  * Device suspend routine.
804  */
805 static int
806 tws_suspend(device_t dev)
807 {
808     struct tws_softc *sc = device_get_softc(dev);
809 
810     if ( sc )
811         TWS_TRACE_DEBUG(sc, "entry", 0, 0);
812     return (0);
813 }
814 
815 /*
816  * Device resume routine.
817  */
818 static int
819 tws_resume(device_t dev)
820 {
821 
822     struct tws_softc *sc = device_get_softc(dev);
823 
824     if ( sc )
825         TWS_TRACE_DEBUG(sc, "entry", 0, 0);
826     return (0);
827 }
828 
829 
830 struct tws_request *
831 tws_get_request(struct tws_softc *sc, u_int16_t type)
832 {
833     struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock);
834     struct tws_request *r = NULL;
835 
836     mtx_lock(my_mutex);
837 
838     if (type == TWS_REQ_TYPE_SCSI_IO) {
839         r = tws_q_remove_head(sc, TWS_FREE_Q);
840     } else {
841         if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) {
842             r = &sc->reqs[type];
843         }
844     }
845 
846     if ( r ) {
847         bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache));
848         r->data = NULL;
849         r->length = 0;
850         r->type = type;
851         r->flags = TWS_DIR_UNKNOWN;
852         r->error_code = TWS_REQ_RET_INVALID;
853         r->cb = NULL;
854         r->ccb_ptr = NULL;
855 	callout_stop(&r->timeout);
856         r->next = r->prev = NULL;
857 
858         r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY);
859     }
860 
861     mtx_unlock(my_mutex);
862 
863     return(r);
864 }
865 
866 void
867 tws_release_request(struct tws_request *req)
868 {
869 
870     struct tws_softc *sc = req->sc;
871 
872     TWS_TRACE_DEBUG(sc, "entry", sc, 0);
873     mtx_lock(&sc->q_lock);
874     tws_q_insert_tail(sc, req, TWS_FREE_Q);
875     mtx_unlock(&sc->q_lock);
876 }
877 
878 static device_method_t tws_methods[] = {
879     /* Device interface */
880     DEVMETHOD(device_probe,     tws_probe),
881     DEVMETHOD(device_attach,    tws_attach),
882     DEVMETHOD(device_detach,    tws_detach),
883     DEVMETHOD(device_shutdown,  tws_shutdown),
884     DEVMETHOD(device_suspend,   tws_suspend),
885     DEVMETHOD(device_resume,    tws_resume),
886 
887     DEVMETHOD_END
888 };
889 
890 static driver_t tws_driver = {
891         "tws",
892         tws_methods,
893         sizeof(struct tws_softc)
894 };
895 
896 
897 static devclass_t tws_devclass;
898 
899 /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */
900 DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0);
901 MODULE_DEPEND(tws, cam, 1, 1, 1);
902 MODULE_DEPEND(tws, pci, 1, 1, 1);
903 
904 TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth);
905 TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi);
906