xref: /freebsd/sys/dev/tws/tws.c (revision e796cc77c586c2955b2f3940dbf4991b31e8d289)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2010, LSI Corp.
5  * All rights reserved.
6  * Author : Manjunath Ranganathaiah
7  * Support: freebsdraid@lsi.com
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of the <ORGANIZATION> nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <dev/tws/tws.h>
41 #include <dev/tws/tws_services.h>
42 #include <dev/tws/tws_hdm.h>
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 
47 MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver");
48 int tws_queue_depth = TWS_MAX_REQS;
49 int tws_enable_msi = 0;
50 int tws_enable_msix = 0;
51 
52 
53 
54 /* externs */
55 extern int tws_cam_attach(struct tws_softc *sc);
56 extern void tws_cam_detach(struct tws_softc *sc);
57 extern int tws_init_ctlr(struct tws_softc *sc);
58 extern boolean tws_ctlr_ready(struct tws_softc *sc);
59 extern void tws_turn_off_interrupts(struct tws_softc *sc);
60 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
61                                 u_int8_t q_type );
62 extern struct tws_request *tws_q_remove_request(struct tws_softc *sc,
63                                    struct tws_request *req, u_int8_t q_type );
64 extern struct tws_request *tws_q_remove_head(struct tws_softc *sc,
65                                                        u_int8_t q_type );
66 extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id);
67 extern boolean tws_ctlr_reset(struct tws_softc *sc);
68 extern void tws_intr(void *arg);
69 extern int tws_use_32bit_sgls;
70 
71 
72 struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type);
73 int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
74 void tws_send_event(struct tws_softc *sc, u_int8_t event);
75 uint8_t tws_get_state(struct tws_softc *sc);
76 void tws_release_request(struct tws_request *req);
77 
78 
79 
80 /* Function prototypes */
81 static d_open_t     tws_open;
82 static d_close_t    tws_close;
83 static d_read_t     tws_read;
84 static d_write_t    tws_write;
85 extern d_ioctl_t    tws_ioctl;
86 
87 static int tws_init(struct tws_softc *sc);
88 static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
89                            int nseg, int error);
90 
91 static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size);
92 static int tws_init_aen_q(struct tws_softc *sc);
93 static int tws_init_trace_q(struct tws_softc *sc);
94 static int tws_setup_irq(struct tws_softc *sc);
95 int tws_setup_intr(struct tws_softc *sc, int irqs);
96 int tws_teardown_intr(struct tws_softc *sc);
97 
98 
99 /* Character device entry points */
100 
101 static struct cdevsw tws_cdevsw = {
102     .d_version =    D_VERSION,
103     .d_open =   tws_open,
104     .d_close =  tws_close,
105     .d_read =   tws_read,
106     .d_write =  tws_write,
107     .d_ioctl =  tws_ioctl,
108     .d_name =   "tws",
109 };
110 
111 /*
112  * In the cdevsw routines, we find our softc by using the si_drv1 member
113  * of struct cdev.  We set this variable to point to our softc in our
114  * attach routine when we create the /dev entry.
115  */
116 
117 int
118 tws_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
119 {
120     struct tws_softc *sc = dev->si_drv1;
121 
122     if ( sc )
123         TWS_TRACE_DEBUG(sc, "entry", dev, oflags);
124     return (0);
125 }
126 
127 int
128 tws_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
129 {
130     struct tws_softc *sc = dev->si_drv1;
131 
132     if ( sc )
133         TWS_TRACE_DEBUG(sc, "entry", dev, fflag);
134     return (0);
135 }
136 
137 int
138 tws_read(struct cdev *dev, struct uio *uio, int ioflag)
139 {
140     struct tws_softc *sc = dev->si_drv1;
141 
142     if ( sc )
143         TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
144     return (0);
145 }
146 
147 int
148 tws_write(struct cdev *dev, struct uio *uio, int ioflag)
149 {
150     struct tws_softc *sc = dev->si_drv1;
151 
152     if ( sc )
153         TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
154     return (0);
155 }
156 
157 /* PCI Support Functions */
158 
159 /*
160  * Compare the device ID of this device against the IDs that this driver
161  * supports.  If there is a match, set the description and return success.
162  */
163 static int
164 tws_probe(device_t dev)
165 {
166     static u_int8_t first_ctlr = 1;
167 
168     if ((pci_get_vendor(dev) == TWS_VENDOR_ID) &&
169         (pci_get_device(dev) == TWS_DEVICE_ID)) {
170         device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller");
171         if (first_ctlr) {
172             printf("LSI 3ware device driver for SAS/SATA storage "
173                     "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING);
174             first_ctlr = 0;
175         }
176 
177         return(BUS_PROBE_DEFAULT);
178     }
179     return (ENXIO);
180 }
181 
182 /* Attach function is only called if the probe is successful. */
183 
184 static int
185 tws_attach(device_t dev)
186 {
187     struct tws_softc *sc = device_get_softc(dev);
188     u_int32_t bar;
189     int error=0,i;
190 
191     /* no tracing yet */
192     /* Look up our softc and initialize its fields. */
193     sc->tws_dev = dev;
194     sc->device_id = pci_get_device(dev);
195     sc->subvendor_id = pci_get_subvendor(dev);
196     sc->subdevice_id = pci_get_subdevice(dev);
197 
198     /* Intialize mutexes */
199     mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF);
200     mtx_init( &sc->sim_lock,  "tws_sim_lock", NULL, MTX_DEF);
201     mtx_init( &sc->gen_lock,  "tws_gen_lock", NULL, MTX_DEF);
202     mtx_init( &sc->io_lock,  "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE);
203     callout_init(&sc->stats_timer, 1);
204 
205     if ( tws_init_trace_q(sc) == FAILURE )
206         printf("trace init failure\n");
207     /* send init event */
208     mtx_lock(&sc->gen_lock);
209     tws_send_event(sc, TWS_INIT_START);
210     mtx_unlock(&sc->gen_lock);
211 
212 
213 #if _BYTE_ORDER == _BIG_ENDIAN
214     TWS_TRACE(sc, "BIG endian", 0, 0);
215 #endif
216     /* sysctl context setup */
217     sysctl_ctx_init(&sc->tws_clist);
218     sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
219                                    SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
220                                    device_get_nameunit(dev),
221                                    CTLFLAG_RD, 0, "");
222     if ( sc->tws_oidp == NULL ) {
223         tws_log(sc, SYSCTL_TREE_NODE_ADD);
224         goto attach_fail_1;
225     }
226     SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
227                       OID_AUTO, "driver_version", CTLFLAG_RD,
228                       TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");
229 
230     pci_enable_busmaster(dev);
231 
232     bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
233     TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
234     bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
235     bar = bar & ~TWS_BIT2;
236     TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
237 
238     /* MFA base address is BAR2 register used for
239      * push mode. Firmware will evatualy move to
240      * pull mode during witch this needs to change
241      */
242 #ifndef TWS_PULL_MODE_ENABLE
243     sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
244     sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
245     TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
246 #endif
247 
248     /* allocate MMIO register space */
249     sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
250     if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
251                                 &(sc->reg_res_id), RF_ACTIVE))
252                                 == NULL) {
253         tws_log(sc, ALLOC_MEMORY_RES);
254         goto attach_fail_1;
255     }
256     sc->bus_tag = rman_get_bustag(sc->reg_res);
257     sc->bus_handle = rman_get_bushandle(sc->reg_res);
258 
259 #ifndef TWS_PULL_MODE_ENABLE
260     /* Allocate bus space for inbound mfa */
261     sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
262     if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
263                           &(sc->mfa_res_id), RF_ACTIVE))
264                                 == NULL) {
265         tws_log(sc, ALLOC_MEMORY_RES);
266         goto attach_fail_2;
267     }
268     sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
269     sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
270 #endif
271 
272     /* Allocate and register our interrupt. */
273     sc->intr_type = TWS_INTx; /* default */
274 
275     if ( tws_enable_msi )
276         sc->intr_type = TWS_MSI;
277     if ( tws_setup_irq(sc) == FAILURE ) {
278         tws_log(sc, ALLOC_MEMORY_RES);
279         goto attach_fail_3;
280     }
281 
282     /*
283      * Create a /dev entry for this device.  The kernel will assign us
284      * a major number automatically.  We use the unit number of this
285      * device as the minor number and name the character device
286      * "tws<unit>".
287      */
288     sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev),
289         UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u",
290         device_get_unit(dev));
291     sc->tws_cdev->si_drv1 = sc;
292 
293     if ( tws_init(sc) == FAILURE ) {
294         tws_log(sc, TWS_INIT_FAILURE);
295         goto attach_fail_4;
296     }
297     if ( tws_init_ctlr(sc) == FAILURE ) {
298         tws_log(sc, TWS_CTLR_INIT_FAILURE);
299         goto attach_fail_4;
300     }
301     if ((error = tws_cam_attach(sc))) {
302         tws_log(sc, TWS_CAM_ATTACH);
303         goto attach_fail_4;
304     }
305     /* send init complete event */
306     mtx_lock(&sc->gen_lock);
307     tws_send_event(sc, TWS_INIT_COMPLETE);
308     mtx_unlock(&sc->gen_lock);
309 
310     TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
311     return(0);
312 
313 attach_fail_4:
314     tws_teardown_intr(sc);
315     destroy_dev(sc->tws_cdev);
316     if (sc->dma_mem_phys)
317 	    bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
318     if (sc->dma_mem)
319 	    bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
320     if (sc->cmd_tag)
321 	    bus_dma_tag_destroy(sc->cmd_tag);
322 attach_fail_3:
323     for(i=0;i<sc->irqs;i++) {
324         if ( sc->irq_res[i] ){
325             if (bus_release_resource(sc->tws_dev,
326                  SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
327                 TWS_TRACE(sc, "bus irq res", 0, 0);
328         }
329     }
330 #ifndef TWS_PULL_MODE_ENABLE
331 attach_fail_2:
332 #endif
333     if ( sc->mfa_res ){
334         if (bus_release_resource(sc->tws_dev,
335                  SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
336             TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
337     }
338     if ( sc->reg_res ){
339         if (bus_release_resource(sc->tws_dev,
340                  SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
341             TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
342     }
343 attach_fail_1:
344     mtx_destroy(&sc->q_lock);
345     mtx_destroy(&sc->sim_lock);
346     mtx_destroy(&sc->gen_lock);
347     mtx_destroy(&sc->io_lock);
348     sysctl_ctx_free(&sc->tws_clist);
349     return (ENXIO);
350 }
351 
352 /* Detach device. */
353 
354 static int
355 tws_detach(device_t dev)
356 {
357     struct tws_softc *sc = device_get_softc(dev);
358     int i;
359     u_int32_t reg;
360 
361     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
362 
363     mtx_lock(&sc->gen_lock);
364     tws_send_event(sc, TWS_UNINIT_START);
365     mtx_unlock(&sc->gen_lock);
366 
367     /* needs to disable interrupt before detaching from cam */
368     tws_turn_off_interrupts(sc);
369     /* clear door bell */
370     tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
371     reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
372     TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
373     sc->obfl_q_overrun = false;
374     tws_init_connect(sc, 1);
375 
376     /* Teardown the state in our softc created in our attach routine. */
377     /* Disconnect the interrupt handler. */
378     tws_teardown_intr(sc);
379 
380     /* Release irq resource */
381     for(i=0;i<sc->irqs;i++) {
382         if ( sc->irq_res[i] ){
383             if (bus_release_resource(sc->tws_dev,
384                      SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
385                 TWS_TRACE(sc, "bus release irq resource",
386                                        i, sc->irq_res_id[i]);
387         }
388     }
389     if ( sc->intr_type == TWS_MSI ) {
390         pci_release_msi(sc->tws_dev);
391     }
392 
393     tws_cam_detach(sc);
394 
395     if (sc->dma_mem_phys)
396 	    bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
397     if (sc->dma_mem)
398 	    bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
399     if (sc->cmd_tag)
400 	    bus_dma_tag_destroy(sc->cmd_tag);
401 
402     /* Release memory resource */
403     if ( sc->mfa_res ){
404         if (bus_release_resource(sc->tws_dev,
405                  SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
406             TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
407     }
408     if ( sc->reg_res ){
409         if (bus_release_resource(sc->tws_dev,
410                  SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
411             TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
412     }
413 
414     for ( i=0; i< tws_queue_depth; i++) {
415 	    if (sc->reqs[i].dma_map)
416 		    bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map);
417 	    callout_drain(&sc->reqs[i].timeout);
418     }
419 
420     callout_drain(&sc->stats_timer);
421     free(sc->reqs, M_TWS);
422     free(sc->sense_bufs, M_TWS);
423     free(sc->scan_ccb, M_TWS);
424     if (sc->ioctl_data_mem)
425             bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map);
426     if (sc->data_tag)
427 	    bus_dma_tag_destroy(sc->data_tag);
428     free(sc->aen_q.q, M_TWS);
429     free(sc->trace_q.q, M_TWS);
430     mtx_destroy(&sc->q_lock);
431     mtx_destroy(&sc->sim_lock);
432     mtx_destroy(&sc->gen_lock);
433     mtx_destroy(&sc->io_lock);
434     destroy_dev(sc->tws_cdev);
435     sysctl_ctx_free(&sc->tws_clist);
436     return (0);
437 }
438 
439 int
440 tws_setup_intr(struct tws_softc *sc, int irqs)
441 {
442     int i, error;
443 
444     for(i=0;i<irqs;i++) {
445         if (!(sc->intr_handle[i])) {
446             if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
447                                     INTR_TYPE_CAM | INTR_MPSAFE,
448 #if (__FreeBSD_version >= 700000)
449                                     NULL,
450 #endif
451                                     tws_intr, sc, &sc->intr_handle[i]))) {
452                 tws_log(sc, SETUP_INTR_RES);
453                 return(FAILURE);
454             }
455         }
456     }
457     return(SUCCESS);
458 
459 }
460 
461 
462 int
463 tws_teardown_intr(struct tws_softc *sc)
464 {
465     int i, error;
466 
467     for(i=0;i<sc->irqs;i++) {
468         if (sc->intr_handle[i]) {
469             error = bus_teardown_intr(sc->tws_dev,
470                                       sc->irq_res[i], sc->intr_handle[i]);
471             sc->intr_handle[i] = NULL;
472         }
473     }
474     return(SUCCESS);
475 }
476 
477 
478 static int
479 tws_setup_irq(struct tws_softc *sc)
480 {
481     int messages;
482 
483     switch(sc->intr_type) {
484         case TWS_INTx :
485             sc->irqs = 1;
486             sc->irq_res_id[0] = 0;
487             sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
488                             &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
489             if ( ! sc->irq_res[0] )
490                 return(FAILURE);
491             if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
492                 return(FAILURE);
493             device_printf(sc->tws_dev, "Using legacy INTx\n");
494             break;
495         case TWS_MSI :
496             sc->irqs = 1;
497             sc->irq_res_id[0] = 1;
498             messages = 1;
499             if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) {
500                 TWS_TRACE(sc, "pci alloc msi fail", 0, messages);
501                 return(FAILURE);
502             }
503             sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
504                               &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
505 
506             if ( !sc->irq_res[0]  )
507                 return(FAILURE);
508             if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
509                 return(FAILURE);
510             device_printf(sc->tws_dev, "Using MSI\n");
511             break;
512 
513     }
514 
515     return(SUCCESS);
516 }
517 
518 static int
519 tws_init(struct tws_softc *sc)
520 {
521 
522     u_int32_t max_sg_elements;
523     u_int32_t dma_mem_size;
524     int error;
525     u_int32_t reg;
526 
527     sc->seq_id = 0;
528     if ( tws_queue_depth > TWS_MAX_REQS )
529         tws_queue_depth = TWS_MAX_REQS;
530     if (tws_queue_depth < TWS_RESERVED_REQS+1)
531         tws_queue_depth = TWS_RESERVED_REQS+1;
532     sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
533     max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
534                                  TWS_MAX_64BIT_SG_ELEMENTS :
535                                  TWS_MAX_32BIT_SG_ELEMENTS;
536     dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
537                              (TWS_SECTOR_SIZE) ;
538     if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */
539                             TWS_ALIGNMENT,           /* alignment */
540                             0,                       /* boundary */
541                             BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
542                             BUS_SPACE_MAXADDR,       /* highaddr */
543                             NULL, NULL,              /* filter, filterarg */
544                             BUS_SPACE_MAXSIZE,       /* maxsize */
545                             max_sg_elements,         /* numsegs */
546                             BUS_SPACE_MAXSIZE,       /* maxsegsize */
547                             0,                       /* flags */
548                             NULL, NULL,              /* lockfunc, lockfuncarg */
549                             &sc->parent_tag          /* tag */
550                            )) {
551         TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
552                                                     sc->is64bit);
553         return(ENOMEM);
554     }
555     /* In bound message frame requires 16byte alignment.
556      * Outbound MF's can live with 4byte alignment - for now just
557      * use 16 for both.
558      */
559     if ( bus_dma_tag_create(sc->parent_tag,       /* parent */
560                             TWS_IN_MF_ALIGNMENT,  /* alignment */
561                             0,                    /* boundary */
562                             BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
563                             BUS_SPACE_MAXADDR,    /* highaddr */
564                             NULL, NULL,           /* filter, filterarg */
565                             dma_mem_size,         /* maxsize */
566                             1,                    /* numsegs */
567                             BUS_SPACE_MAXSIZE,    /* maxsegsize */
568                             0,                    /* flags */
569                             NULL, NULL,           /* lockfunc, lockfuncarg */
570                             &sc->cmd_tag          /* tag */
571                            )) {
572         TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
573         return(ENOMEM);
574     }
575 
576     if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
577                     BUS_DMA_NOWAIT, &sc->cmd_map)) {
578         TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
579         return(ENOMEM);
580     }
581 
582     /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
583     sc->dma_mem_phys=0;
584     error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
585                     dma_mem_size, tws_dmamap_cmds_load_cbfn,
586                     &sc->dma_mem_phys, 0);
587 
588    /*
589     * Create a dma tag for data buffers; size will be the maximum
590     * possible I/O size (128kB).
591     */
592     if (bus_dma_tag_create(sc->parent_tag,         /* parent */
593                            TWS_ALIGNMENT,          /* alignment */
594                            0,                      /* boundary */
595                            BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
596                            BUS_SPACE_MAXADDR,      /* highaddr */
597                            NULL, NULL,             /* filter, filterarg */
598                            TWS_MAX_IO_SIZE,        /* maxsize */
599                            max_sg_elements,        /* nsegments */
600                            TWS_MAX_IO_SIZE,        /* maxsegsize */
601                            BUS_DMA_ALLOCNOW,       /* flags */
602                            busdma_lock_mutex,      /* lockfunc */
603                            &sc->io_lock,           /* lockfuncarg */
604                            &sc->data_tag           /* tag */)) {
605         TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
606         return(ENOMEM);
607     }
608 
609     sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
610                       M_WAITOK | M_ZERO);
611     sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
612                       M_WAITOK | M_ZERO);
613     sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
614     if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem,
615             (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) {
616         device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n");
617         return(ENOMEM);
618     }
619 
620     if ( !tws_ctlr_ready(sc) )
621         if( !tws_ctlr_reset(sc) )
622             return(FAILURE);
623 
624     bzero(&sc->stats, sizeof(struct tws_stats));
625     tws_init_qs(sc);
626     tws_turn_off_interrupts(sc);
627 
628     /*
629      * enable pull mode by setting bit1 .
630      * setting bit0 to 1 will enable interrupt coalesing
631      * will revisit.
632      */
633 
634 #ifdef TWS_PULL_MODE_ENABLE
635 
636     reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
637     TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
638     tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);
639 
640 #endif
641 
642     TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
643     if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
644         return(FAILURE);
645     if ( tws_init_aen_q(sc) == FAILURE )
646         return(FAILURE);
647 
648     return(SUCCESS);
649 
650 }
651 
652 static int
653 tws_init_aen_q(struct tws_softc *sc)
654 {
655     sc->aen_q.head=0;
656     sc->aen_q.tail=0;
657     sc->aen_q.depth=256;
658     sc->aen_q.overflow=0;
659     sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
660                               M_TWS, M_WAITOK | M_ZERO);
661     return(SUCCESS);
662 }
663 
664 static int
665 tws_init_trace_q(struct tws_softc *sc)
666 {
667     sc->trace_q.head=0;
668     sc->trace_q.tail=0;
669     sc->trace_q.depth=256;
670     sc->trace_q.overflow=0;
671     sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
672                               M_TWS, M_WAITOK | M_ZERO);
673     return(SUCCESS);
674 }
675 
676 static int
677 tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
678 {
679 
680     struct tws_command_packet *cmd_buf;
681     cmd_buf = (struct tws_command_packet *)sc->dma_mem;
682     int i;
683 
684     bzero(cmd_buf, dma_mem_size);
685     TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
686     mtx_lock(&sc->q_lock);
687     for ( i=0; i< tws_queue_depth; i++)
688     {
689         if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
690             /* log a ENOMEM failure msg here */
691             mtx_unlock(&sc->q_lock);
692             return(FAILURE);
693         }
694         sc->reqs[i].cmd_pkt =  &cmd_buf[i];
695 
696         sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
697         sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys +
698                               (i * sizeof(struct tws_command_packet));
699 
700         sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys +
701                               sizeof(struct tws_command_header) +
702                               (i * sizeof(struct tws_command_packet));
703         sc->reqs[i].request_id = i;
704         sc->reqs[i].sc = sc;
705 
706         sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;
707 
708 	callout_init(&sc->reqs[i].timeout, 1);
709         sc->reqs[i].state = TWS_REQ_STATE_FREE;
710         if ( i >= TWS_RESERVED_REQS )
711             tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
712     }
713     mtx_unlock(&sc->q_lock);
714     return(SUCCESS);
715 }
716 
717 static void
718 tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
719                            int nseg, int error)
720 {
721 
722     /* printf("command load done \n"); */
723 
724     *((bus_addr_t *)arg) = segs[0].ds_addr;
725 }
726 
727 void
728 tws_send_event(struct tws_softc *sc, u_int8_t event)
729 {
730     mtx_assert(&sc->gen_lock, MA_OWNED);
731     TWS_TRACE_DEBUG(sc, "received event ", 0, event);
732     switch (event) {
733 
734         case TWS_INIT_START:
735             sc->tws_state = TWS_INIT;
736             break;
737 
738         case TWS_INIT_COMPLETE:
739             if (sc->tws_state != TWS_INIT) {
740                 device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state);
741             } else {
742                 sc->tws_state = TWS_ONLINE;
743             }
744             break;
745 
746         case TWS_RESET_START:
747             /* We can transition to reset state from any state except reset*/
748             if (sc->tws_state != TWS_RESET) {
749                 sc->tws_prev_state = sc->tws_state;
750                 sc->tws_state = TWS_RESET;
751             }
752             break;
753 
754         case TWS_RESET_COMPLETE:
755             if (sc->tws_state != TWS_RESET) {
756                 device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state);
757             } else {
758                 sc->tws_state = sc->tws_prev_state;
759             }
760             break;
761 
762         case TWS_SCAN_FAILURE:
763             if (sc->tws_state != TWS_ONLINE) {
764                 device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state);
765             } else {
766                 sc->tws_state = TWS_OFFLINE;
767             }
768             break;
769 
770         case TWS_UNINIT_START:
771             if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) {
772                 device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state);
773             } else {
774                 sc->tws_state = TWS_UNINIT;
775             }
776             break;
777     }
778 
779 }
780 
781 uint8_t
782 tws_get_state(struct tws_softc *sc)
783 {
784 
785     return((u_int8_t)sc->tws_state);
786 
787 }
788 
789 /* Called during system shutdown after sync. */
790 
791 static int
792 tws_shutdown(device_t dev)
793 {
794 
795     struct tws_softc *sc = device_get_softc(dev);
796 
797     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
798 
799     tws_turn_off_interrupts(sc);
800     tws_init_connect(sc, 1);
801 
802     return (0);
803 }
804 
805 /*
806  * Device suspend routine.
807  */
808 static int
809 tws_suspend(device_t dev)
810 {
811     struct tws_softc *sc = device_get_softc(dev);
812 
813     if ( sc )
814         TWS_TRACE_DEBUG(sc, "entry", 0, 0);
815     return (0);
816 }
817 
818 /*
819  * Device resume routine.
820  */
821 static int
822 tws_resume(device_t dev)
823 {
824 
825     struct tws_softc *sc = device_get_softc(dev);
826 
827     if ( sc )
828         TWS_TRACE_DEBUG(sc, "entry", 0, 0);
829     return (0);
830 }
831 
832 
833 struct tws_request *
834 tws_get_request(struct tws_softc *sc, u_int16_t type)
835 {
836     struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock);
837     struct tws_request *r = NULL;
838 
839     mtx_lock(my_mutex);
840 
841     if (type == TWS_REQ_TYPE_SCSI_IO) {
842         r = tws_q_remove_head(sc, TWS_FREE_Q);
843     } else {
844         if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) {
845             r = &sc->reqs[type];
846         }
847     }
848 
849     if ( r ) {
850         bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache));
851         r->data = NULL;
852         r->length = 0;
853         r->type = type;
854         r->flags = TWS_DIR_UNKNOWN;
855         r->error_code = TWS_REQ_RET_INVALID;
856         r->cb = NULL;
857         r->ccb_ptr = NULL;
858 	callout_stop(&r->timeout);
859         r->next = r->prev = NULL;
860 
861         r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY);
862     }
863 
864     mtx_unlock(my_mutex);
865 
866     return(r);
867 }
868 
869 void
870 tws_release_request(struct tws_request *req)
871 {
872 
873     struct tws_softc *sc = req->sc;
874 
875     TWS_TRACE_DEBUG(sc, "entry", sc, 0);
876     mtx_lock(&sc->q_lock);
877     tws_q_insert_tail(sc, req, TWS_FREE_Q);
878     mtx_unlock(&sc->q_lock);
879 }
880 
881 static device_method_t tws_methods[] = {
882     /* Device interface */
883     DEVMETHOD(device_probe,     tws_probe),
884     DEVMETHOD(device_attach,    tws_attach),
885     DEVMETHOD(device_detach,    tws_detach),
886     DEVMETHOD(device_shutdown,  tws_shutdown),
887     DEVMETHOD(device_suspend,   tws_suspend),
888     DEVMETHOD(device_resume,    tws_resume),
889 
890     DEVMETHOD_END
891 };
892 
893 static driver_t tws_driver = {
894         "tws",
895         tws_methods,
896         sizeof(struct tws_softc)
897 };
898 
899 
900 static devclass_t tws_devclass;
901 
902 /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */
903 DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0);
904 MODULE_DEPEND(tws, cam, 1, 1, 1);
905 MODULE_DEPEND(tws, pci, 1, 1, 1);
906 
907 TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth);
908 TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi);
909