xref: /freebsd/sys/dev/tws/tws.c (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2010, LSI Corp.
5  * All rights reserved.
6  * Author : Manjunath Ranganathaiah
7  * Support: freebsdraid@lsi.com
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of the <ORGANIZATION> nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 #include <dev/tws/tws.h>
39 #include <dev/tws/tws_services.h>
40 #include <dev/tws/tws_hdm.h>
41 
42 #include <cam/cam.h>
43 #include <cam/cam_ccb.h>
44 #include <cam/cam_xpt.h>
45 
46 MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver");
47 int tws_queue_depth = TWS_MAX_REQS;
48 int tws_enable_msi = 0;
49 int tws_enable_msix = 0;
50 
51 /* externs */
52 extern int tws_cam_attach(struct tws_softc *sc);
53 extern void tws_cam_detach(struct tws_softc *sc);
54 extern int tws_init_ctlr(struct tws_softc *sc);
55 extern boolean tws_ctlr_ready(struct tws_softc *sc);
56 extern void tws_turn_off_interrupts(struct tws_softc *sc);
57 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
58                                 u_int8_t q_type );
59 extern struct tws_request *tws_q_remove_request(struct tws_softc *sc,
60                                    struct tws_request *req, u_int8_t q_type );
61 extern struct tws_request *tws_q_remove_head(struct tws_softc *sc,
62                                                        u_int8_t q_type );
63 extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id);
64 extern boolean tws_ctlr_reset(struct tws_softc *sc);
65 extern void tws_intr(void *arg);
66 extern int tws_use_32bit_sgls;
67 
68 struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type);
69 int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
70 void tws_send_event(struct tws_softc *sc, u_int8_t event);
71 uint8_t tws_get_state(struct tws_softc *sc);
72 void tws_release_request(struct tws_request *req);
73 
74 /* Function prototypes */
75 static d_open_t     tws_open;
76 static d_close_t    tws_close;
77 static d_read_t     tws_read;
78 static d_write_t    tws_write;
79 extern d_ioctl_t    tws_ioctl;
80 
81 static int tws_init(struct tws_softc *sc);
82 static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
83                            int nseg, int error);
84 
85 static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size);
86 static int tws_init_aen_q(struct tws_softc *sc);
87 static int tws_init_trace_q(struct tws_softc *sc);
88 static int tws_setup_irq(struct tws_softc *sc);
89 int tws_setup_intr(struct tws_softc *sc, int irqs);
90 int tws_teardown_intr(struct tws_softc *sc);
91 
92 /* Character device entry points */
93 
94 static struct cdevsw tws_cdevsw = {
95     .d_version =    D_VERSION,
96     .d_open =   tws_open,
97     .d_close =  tws_close,
98     .d_read =   tws_read,
99     .d_write =  tws_write,
100     .d_ioctl =  tws_ioctl,
101     .d_name =   "tws",
102 };
103 
104 /*
105  * In the cdevsw routines, we find our softc by using the si_drv1 member
106  * of struct cdev.  We set this variable to point to our softc in our
107  * attach routine when we create the /dev entry.
108  */
109 
110 int
111 tws_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
112 {
113     struct tws_softc *sc = dev->si_drv1;
114 
115     if ( sc )
116         TWS_TRACE_DEBUG(sc, "entry", dev, oflags);
117     return (0);
118 }
119 
120 int
121 tws_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
122 {
123     struct tws_softc *sc = dev->si_drv1;
124 
125     if ( sc )
126         TWS_TRACE_DEBUG(sc, "entry", dev, fflag);
127     return (0);
128 }
129 
130 int
131 tws_read(struct cdev *dev, struct uio *uio, int ioflag)
132 {
133     struct tws_softc *sc = dev->si_drv1;
134 
135     if ( sc )
136         TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
137     return (0);
138 }
139 
140 int
141 tws_write(struct cdev *dev, struct uio *uio, int ioflag)
142 {
143     struct tws_softc *sc = dev->si_drv1;
144 
145     if ( sc )
146         TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
147     return (0);
148 }
149 
150 /* PCI Support Functions */
151 
152 /*
153  * Compare the device ID of this device against the IDs that this driver
154  * supports.  If there is a match, set the description and return success.
155  */
156 static int
157 tws_probe(device_t dev)
158 {
159     static u_int8_t first_ctlr = 1;
160 
161     if ((pci_get_vendor(dev) == TWS_VENDOR_ID) &&
162         (pci_get_device(dev) == TWS_DEVICE_ID)) {
163         device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller");
164         if (first_ctlr) {
165             printf("LSI 3ware device driver for SAS/SATA storage "
166                     "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING);
167             first_ctlr = 0;
168         }
169 
170         return(BUS_PROBE_DEFAULT);
171     }
172     return (ENXIO);
173 }
174 
175 /* Attach function is only called if the probe is successful. */
176 
177 static int
178 tws_attach(device_t dev)
179 {
180     struct tws_softc *sc = device_get_softc(dev);
181     u_int32_t bar;
182     int error=0,i;
183 
184     /* no tracing yet */
185     /* Look up our softc and initialize its fields. */
186     sc->tws_dev = dev;
187     sc->device_id = pci_get_device(dev);
188     sc->subvendor_id = pci_get_subvendor(dev);
189     sc->subdevice_id = pci_get_subdevice(dev);
190 
191     /* Intialize mutexes */
192     mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF);
193     mtx_init( &sc->sim_lock,  "tws_sim_lock", NULL, MTX_DEF);
194     mtx_init( &sc->gen_lock,  "tws_gen_lock", NULL, MTX_DEF);
195     mtx_init( &sc->io_lock,  "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE);
196     callout_init(&sc->stats_timer, 1);
197 
198     if ( tws_init_trace_q(sc) == FAILURE )
199         printf("trace init failure\n");
200     /* send init event */
201     mtx_lock(&sc->gen_lock);
202     tws_send_event(sc, TWS_INIT_START);
203     mtx_unlock(&sc->gen_lock);
204 
205 #if _BYTE_ORDER == _BIG_ENDIAN
206     TWS_TRACE(sc, "BIG endian", 0, 0);
207 #endif
208     /* sysctl context setup */
209     sysctl_ctx_init(&sc->tws_clist);
210     sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
211         SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
212 	device_get_nameunit(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
213     if ( sc->tws_oidp == NULL ) {
214         tws_log(sc, SYSCTL_TREE_NODE_ADD);
215         goto attach_fail_1;
216     }
217     SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
218                       OID_AUTO, "driver_version", CTLFLAG_RD,
219                       TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");
220 
221     pci_enable_busmaster(dev);
222 
223     bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
224     TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
225     bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
226     bar = bar & ~TWS_BIT2;
227     TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
228 
229     /* MFA base address is BAR2 register used for
230      * push mode. Firmware will evatualy move to
231      * pull mode during witch this needs to change
232      */
233 #ifndef TWS_PULL_MODE_ENABLE
234     sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
235     sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
236     TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
237 #endif
238 
239     /* allocate MMIO register space */
240     sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
241     if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
242                                 &(sc->reg_res_id), RF_ACTIVE))
243                                 == NULL) {
244         tws_log(sc, ALLOC_MEMORY_RES);
245         goto attach_fail_1;
246     }
247     sc->bus_tag = rman_get_bustag(sc->reg_res);
248     sc->bus_handle = rman_get_bushandle(sc->reg_res);
249 
250 #ifndef TWS_PULL_MODE_ENABLE
251     /* Allocate bus space for inbound mfa */
252     sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
253     if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
254                           &(sc->mfa_res_id), RF_ACTIVE))
255                                 == NULL) {
256         tws_log(sc, ALLOC_MEMORY_RES);
257         goto attach_fail_2;
258     }
259     sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
260     sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
261 #endif
262 
263     /* Allocate and register our interrupt. */
264     sc->intr_type = TWS_INTx; /* default */
265 
266     if ( tws_enable_msi )
267         sc->intr_type = TWS_MSI;
268     if ( tws_setup_irq(sc) == FAILURE ) {
269         tws_log(sc, ALLOC_MEMORY_RES);
270         goto attach_fail_3;
271     }
272 
273     /*
274      * Create a /dev entry for this device.  The kernel will assign us
275      * a major number automatically.  We use the unit number of this
276      * device as the minor number and name the character device
277      * "tws<unit>".
278      */
279     sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev),
280         UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u",
281         device_get_unit(dev));
282     sc->tws_cdev->si_drv1 = sc;
283 
284     if ( tws_init(sc) == FAILURE ) {
285         tws_log(sc, TWS_INIT_FAILURE);
286         goto attach_fail_4;
287     }
288     if ( tws_init_ctlr(sc) == FAILURE ) {
289         tws_log(sc, TWS_CTLR_INIT_FAILURE);
290         goto attach_fail_4;
291     }
292     if ((error = tws_cam_attach(sc))) {
293         tws_log(sc, TWS_CAM_ATTACH);
294         goto attach_fail_4;
295     }
296     /* send init complete event */
297     mtx_lock(&sc->gen_lock);
298     tws_send_event(sc, TWS_INIT_COMPLETE);
299     mtx_unlock(&sc->gen_lock);
300 
301     TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
302     return(0);
303 
304 attach_fail_4:
305     tws_teardown_intr(sc);
306     destroy_dev(sc->tws_cdev);
307     if (sc->dma_mem_phys)
308 	    bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
309     if (sc->dma_mem)
310 	    bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
311     if (sc->cmd_tag)
312 	    bus_dma_tag_destroy(sc->cmd_tag);
313 attach_fail_3:
314     for(i=0;i<sc->irqs;i++) {
315         if ( sc->irq_res[i] ){
316             if (bus_release_resource(sc->tws_dev,
317                  SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
318                 TWS_TRACE(sc, "bus irq res", 0, 0);
319         }
320     }
321 #ifndef TWS_PULL_MODE_ENABLE
322 attach_fail_2:
323 #endif
324     if ( sc->mfa_res ){
325         if (bus_release_resource(sc->tws_dev,
326                  SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
327             TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
328     }
329     if ( sc->reg_res ){
330         if (bus_release_resource(sc->tws_dev,
331                  SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
332             TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
333     }
334 attach_fail_1:
335     mtx_destroy(&sc->q_lock);
336     mtx_destroy(&sc->sim_lock);
337     mtx_destroy(&sc->gen_lock);
338     mtx_destroy(&sc->io_lock);
339     sysctl_ctx_free(&sc->tws_clist);
340     return (ENXIO);
341 }
342 
343 /* Detach device. */
344 
345 static int
346 tws_detach(device_t dev)
347 {
348     struct tws_softc *sc = device_get_softc(dev);
349     int i;
350     u_int32_t reg __tws_debug;
351 
352     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
353 
354     mtx_lock(&sc->gen_lock);
355     tws_send_event(sc, TWS_UNINIT_START);
356     mtx_unlock(&sc->gen_lock);
357 
358     /* needs to disable interrupt before detaching from cam */
359     tws_turn_off_interrupts(sc);
360     /* clear door bell */
361     tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
362     reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
363     TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
364     sc->obfl_q_overrun = false;
365     tws_init_connect(sc, 1);
366 
367     /* Teardown the state in our softc created in our attach routine. */
368     /* Disconnect the interrupt handler. */
369     tws_teardown_intr(sc);
370 
371     /* Release irq resource */
372     for(i=0;i<sc->irqs;i++) {
373         if ( sc->irq_res[i] ){
374             if (bus_release_resource(sc->tws_dev,
375                      SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
376                 TWS_TRACE(sc, "bus release irq resource",
377                                        i, sc->irq_res_id[i]);
378         }
379     }
380     if ( sc->intr_type == TWS_MSI ) {
381         pci_release_msi(sc->tws_dev);
382     }
383 
384     tws_cam_detach(sc);
385 
386     if (sc->dma_mem_phys)
387 	    bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
388     if (sc->dma_mem)
389 	    bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
390     if (sc->cmd_tag)
391 	    bus_dma_tag_destroy(sc->cmd_tag);
392 
393     /* Release memory resource */
394     if ( sc->mfa_res ){
395         if (bus_release_resource(sc->tws_dev,
396                  SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
397             TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
398     }
399     if ( sc->reg_res ){
400         if (bus_release_resource(sc->tws_dev,
401                  SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
402             TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
403     }
404 
405     for ( i=0; i< tws_queue_depth; i++) {
406 	    if (sc->reqs[i].dma_map)
407 		    bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map);
408 	    callout_drain(&sc->reqs[i].timeout);
409     }
410 
411     callout_drain(&sc->stats_timer);
412     free(sc->reqs, M_TWS);
413     free(sc->sense_bufs, M_TWS);
414     xpt_free_ccb(sc->scan_ccb);
415     if (sc->ioctl_data_mem)
416             bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map);
417     if (sc->data_tag)
418 	    bus_dma_tag_destroy(sc->data_tag);
419     free(sc->aen_q.q, M_TWS);
420     free(sc->trace_q.q, M_TWS);
421     mtx_destroy(&sc->q_lock);
422     mtx_destroy(&sc->sim_lock);
423     mtx_destroy(&sc->gen_lock);
424     mtx_destroy(&sc->io_lock);
425     destroy_dev(sc->tws_cdev);
426     sysctl_ctx_free(&sc->tws_clist);
427     return (0);
428 }
429 
430 int
431 tws_setup_intr(struct tws_softc *sc, int irqs)
432 {
433     int i, error;
434 
435     for(i=0;i<irqs;i++) {
436         if (!(sc->intr_handle[i])) {
437             if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
438                                     INTR_TYPE_CAM | INTR_MPSAFE,
439                                     NULL,
440                                     tws_intr, sc, &sc->intr_handle[i]))) {
441                 tws_log(sc, SETUP_INTR_RES);
442                 return(FAILURE);
443             }
444         }
445     }
446     return(SUCCESS);
447 
448 }
449 
450 int
451 tws_teardown_intr(struct tws_softc *sc)
452 {
453     int i;
454 
455     for(i=0;i<sc->irqs;i++) {
456         if (sc->intr_handle[i]) {
457             bus_teardown_intr(sc->tws_dev,
458                                       sc->irq_res[i], sc->intr_handle[i]);
459             sc->intr_handle[i] = NULL;
460         }
461     }
462     return(SUCCESS);
463 }
464 
465 static int
466 tws_setup_irq(struct tws_softc *sc)
467 {
468     int messages;
469 
470     switch(sc->intr_type) {
471         case TWS_INTx :
472             sc->irqs = 1;
473             sc->irq_res_id[0] = 0;
474             sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
475                             &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
476             if ( ! sc->irq_res[0] )
477                 return(FAILURE);
478             if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
479                 return(FAILURE);
480             device_printf(sc->tws_dev, "Using legacy INTx\n");
481             break;
482         case TWS_MSI :
483             sc->irqs = 1;
484             sc->irq_res_id[0] = 1;
485             messages = 1;
486             if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) {
487                 TWS_TRACE(sc, "pci alloc msi fail", 0, messages);
488                 return(FAILURE);
489             }
490             sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
491                               &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
492 
493             if ( !sc->irq_res[0]  )
494                 return(FAILURE);
495             if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
496                 return(FAILURE);
497             device_printf(sc->tws_dev, "Using MSI\n");
498             break;
499     }
500 
501     return(SUCCESS);
502 }
503 
504 static int
505 tws_init(struct tws_softc *sc)
506 {
507 
508     u_int32_t max_sg_elements;
509     u_int32_t dma_mem_size;
510     u_int32_t reg;
511 
512     sc->seq_id = 0;
513     if ( tws_queue_depth > TWS_MAX_REQS )
514         tws_queue_depth = TWS_MAX_REQS;
515     if (tws_queue_depth < TWS_RESERVED_REQS+1)
516         tws_queue_depth = TWS_RESERVED_REQS+1;
517     sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
518     max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
519                                  TWS_MAX_64BIT_SG_ELEMENTS :
520                                  TWS_MAX_32BIT_SG_ELEMENTS;
521     dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
522                              (TWS_SECTOR_SIZE) ;
523     if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */
524                             TWS_ALIGNMENT,           /* alignment */
525                             0,                       /* boundary */
526                             BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
527                             BUS_SPACE_MAXADDR,       /* highaddr */
528                             NULL, NULL,              /* filter, filterarg */
529                             BUS_SPACE_MAXSIZE,       /* maxsize */
530                             max_sg_elements,         /* numsegs */
531                             BUS_SPACE_MAXSIZE,       /* maxsegsize */
532                             0,                       /* flags */
533                             NULL, NULL,              /* lockfunc, lockfuncarg */
534                             &sc->parent_tag          /* tag */
535                            )) {
536         TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
537                                                     sc->is64bit);
538         return(ENOMEM);
539     }
540     /* In bound message frame requires 16byte alignment.
541      * Outbound MF's can live with 4byte alignment - for now just
542      * use 16 for both.
543      */
544     if ( bus_dma_tag_create(sc->parent_tag,       /* parent */
545                             TWS_IN_MF_ALIGNMENT,  /* alignment */
546                             0,                    /* boundary */
547                             BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
548                             BUS_SPACE_MAXADDR,    /* highaddr */
549                             NULL, NULL,           /* filter, filterarg */
550                             dma_mem_size,         /* maxsize */
551                             1,                    /* numsegs */
552                             BUS_SPACE_MAXSIZE,    /* maxsegsize */
553                             0,                    /* flags */
554                             NULL, NULL,           /* lockfunc, lockfuncarg */
555                             &sc->cmd_tag          /* tag */
556                            )) {
557         TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
558         return(ENOMEM);
559     }
560 
561     if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
562                     BUS_DMA_NOWAIT, &sc->cmd_map)) {
563         TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
564         return(ENOMEM);
565     }
566 
567     /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
568     sc->dma_mem_phys=0;
569     bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
570                     dma_mem_size, tws_dmamap_cmds_load_cbfn,
571                     &sc->dma_mem_phys, 0);
572 
573    /*
574     * Create a dma tag for data buffers; size will be the maximum
575     * possible I/O size (128kB).
576     */
577     if (bus_dma_tag_create(sc->parent_tag,         /* parent */
578                            TWS_ALIGNMENT,          /* alignment */
579                            0,                      /* boundary */
580                            BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
581                            BUS_SPACE_MAXADDR,      /* highaddr */
582                            NULL, NULL,             /* filter, filterarg */
583                            TWS_MAX_IO_SIZE,        /* maxsize */
584                            max_sg_elements,        /* nsegments */
585                            TWS_MAX_IO_SIZE,        /* maxsegsize */
586                            BUS_DMA_ALLOCNOW,       /* flags */
587                            busdma_lock_mutex,      /* lockfunc */
588                            &sc->io_lock,           /* lockfuncarg */
589                            &sc->data_tag           /* tag */)) {
590         TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
591         return(ENOMEM);
592     }
593 
594     sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
595                       M_WAITOK | M_ZERO);
596     sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
597                       M_WAITOK | M_ZERO);
598     sc->scan_ccb = xpt_alloc_ccb();
599     if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem,
600             (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) {
601         device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n");
602         return(ENOMEM);
603     }
604 
605     if ( !tws_ctlr_ready(sc) )
606         if( !tws_ctlr_reset(sc) )
607             return(FAILURE);
608 
609     bzero(&sc->stats, sizeof(struct tws_stats));
610     tws_init_qs(sc);
611     tws_turn_off_interrupts(sc);
612 
613     /*
614      * enable pull mode by setting bit1 .
615      * setting bit0 to 1 will enable interrupt coalesing
616      * will revisit.
617      */
618 
619 #ifdef TWS_PULL_MODE_ENABLE
620 
621     reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
622     TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
623     tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);
624 
625 #endif
626 
627     TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
628     if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
629         return(FAILURE);
630     if ( tws_init_aen_q(sc) == FAILURE )
631         return(FAILURE);
632 
633     return(SUCCESS);
634 
635 }
636 
637 static int
638 tws_init_aen_q(struct tws_softc *sc)
639 {
640     sc->aen_q.head=0;
641     sc->aen_q.tail=0;
642     sc->aen_q.depth=256;
643     sc->aen_q.overflow=0;
644     sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
645                               M_TWS, M_WAITOK | M_ZERO);
646     return(SUCCESS);
647 }
648 
649 static int
650 tws_init_trace_q(struct tws_softc *sc)
651 {
652     sc->trace_q.head=0;
653     sc->trace_q.tail=0;
654     sc->trace_q.depth=256;
655     sc->trace_q.overflow=0;
656     sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
657                               M_TWS, M_WAITOK | M_ZERO);
658     return(SUCCESS);
659 }
660 
661 static int
662 tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
663 {
664 
665     struct tws_command_packet *cmd_buf;
666     cmd_buf = (struct tws_command_packet *)sc->dma_mem;
667     int i;
668 
669     bzero(cmd_buf, dma_mem_size);
670     TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
671     mtx_lock(&sc->q_lock);
672     for ( i=0; i< tws_queue_depth; i++)
673     {
674         if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
675             /* log a ENOMEM failure msg here */
676             mtx_unlock(&sc->q_lock);
677             return(FAILURE);
678         }
679         sc->reqs[i].cmd_pkt =  &cmd_buf[i];
680 
681         sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
682         sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys +
683                               (i * sizeof(struct tws_command_packet));
684 
685         sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys +
686                               sizeof(struct tws_command_header) +
687                               (i * sizeof(struct tws_command_packet));
688         sc->reqs[i].request_id = i;
689         sc->reqs[i].sc = sc;
690 
691         sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;
692 
693 	callout_init(&sc->reqs[i].timeout, 1);
694         sc->reqs[i].state = TWS_REQ_STATE_FREE;
695         if ( i >= TWS_RESERVED_REQS )
696             tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
697     }
698     mtx_unlock(&sc->q_lock);
699     return(SUCCESS);
700 }
701 
702 static void
703 tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
704                            int nseg, int error)
705 {
706 
707     /* printf("command load done \n"); */
708 
709     *((bus_addr_t *)arg) = segs[0].ds_addr;
710 }
711 
712 void
713 tws_send_event(struct tws_softc *sc, u_int8_t event)
714 {
715     mtx_assert(&sc->gen_lock, MA_OWNED);
716     TWS_TRACE_DEBUG(sc, "received event ", 0, event);
717     switch (event) {
718         case TWS_INIT_START:
719             sc->tws_state = TWS_INIT;
720             break;
721 
722         case TWS_INIT_COMPLETE:
723             if (sc->tws_state != TWS_INIT) {
724                 device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state);
725             } else {
726                 sc->tws_state = TWS_ONLINE;
727             }
728             break;
729 
730         case TWS_RESET_START:
731             /* We can transition to reset state from any state except reset*/
732             if (sc->tws_state != TWS_RESET) {
733                 sc->tws_prev_state = sc->tws_state;
734                 sc->tws_state = TWS_RESET;
735             }
736             break;
737 
738         case TWS_RESET_COMPLETE:
739             if (sc->tws_state != TWS_RESET) {
740                 device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state);
741             } else {
742                 sc->tws_state = sc->tws_prev_state;
743             }
744             break;
745 
746         case TWS_SCAN_FAILURE:
747             if (sc->tws_state != TWS_ONLINE) {
748                 device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state);
749             } else {
750                 sc->tws_state = TWS_OFFLINE;
751             }
752             break;
753 
754         case TWS_UNINIT_START:
755             if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) {
756                 device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state);
757             } else {
758                 sc->tws_state = TWS_UNINIT;
759             }
760             break;
761     }
762 
763 }
764 
765 uint8_t
766 tws_get_state(struct tws_softc *sc)
767 {
768 
769     return((u_int8_t)sc->tws_state);
770 
771 }
772 
773 /* Called during system shutdown after sync. */
774 
775 static int
776 tws_shutdown(device_t dev)
777 {
778 
779     struct tws_softc *sc = device_get_softc(dev);
780 
781     TWS_TRACE_DEBUG(sc, "entry", 0, 0);
782 
783     tws_turn_off_interrupts(sc);
784     tws_init_connect(sc, 1);
785 
786     return (0);
787 }
788 
789 /*
790  * Device suspend routine.
791  */
792 static int
793 tws_suspend(device_t dev)
794 {
795     struct tws_softc *sc = device_get_softc(dev);
796 
797     if ( sc )
798         TWS_TRACE_DEBUG(sc, "entry", 0, 0);
799     return (0);
800 }
801 
802 /*
803  * Device resume routine.
804  */
805 static int
806 tws_resume(device_t dev)
807 {
808 
809     struct tws_softc *sc = device_get_softc(dev);
810 
811     if ( sc )
812         TWS_TRACE_DEBUG(sc, "entry", 0, 0);
813     return (0);
814 }
815 
816 struct tws_request *
817 tws_get_request(struct tws_softc *sc, u_int16_t type)
818 {
819     struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock);
820     struct tws_request *r = NULL;
821 
822     mtx_lock(my_mutex);
823 
824     if (type == TWS_REQ_TYPE_SCSI_IO) {
825         r = tws_q_remove_head(sc, TWS_FREE_Q);
826     } else {
827         if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) {
828             r = &sc->reqs[type];
829         }
830     }
831 
832     if ( r ) {
833         bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache));
834         r->data = NULL;
835         r->length = 0;
836         r->type = type;
837         r->flags = TWS_DIR_UNKNOWN;
838         r->error_code = TWS_REQ_RET_INVALID;
839         r->cb = NULL;
840         r->ccb_ptr = NULL;
841 	callout_stop(&r->timeout);
842         r->next = r->prev = NULL;
843 
844         r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY);
845     }
846 
847     mtx_unlock(my_mutex);
848 
849     return(r);
850 }
851 
852 void
853 tws_release_request(struct tws_request *req)
854 {
855 
856     struct tws_softc *sc = req->sc;
857 
858     TWS_TRACE_DEBUG(sc, "entry", sc, 0);
859     mtx_lock(&sc->q_lock);
860     tws_q_insert_tail(sc, req, TWS_FREE_Q);
861     mtx_unlock(&sc->q_lock);
862 }
863 
864 static device_method_t tws_methods[] = {
865     /* Device interface */
866     DEVMETHOD(device_probe,     tws_probe),
867     DEVMETHOD(device_attach,    tws_attach),
868     DEVMETHOD(device_detach,    tws_detach),
869     DEVMETHOD(device_shutdown,  tws_shutdown),
870     DEVMETHOD(device_suspend,   tws_suspend),
871     DEVMETHOD(device_resume,    tws_resume),
872 
873     DEVMETHOD_END
874 };
875 
876 static driver_t tws_driver = {
877         "tws",
878         tws_methods,
879         sizeof(struct tws_softc)
880 };
881 
882 /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */
883 DRIVER_MODULE(tws, pci, tws_driver, 0, 0);
884 MODULE_DEPEND(tws, cam, 1, 1, 1);
885 MODULE_DEPEND(tws, pci, 1, 1, 1);
886 
887 TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth);
888 TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi);
889