xref: /freebsd/sys/dev/ocs_fc/ocs_pci.c (revision a64729f5077d77e13b9497cb33ecb3c82e606ee8)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #define OCS_COPYRIGHT "Copyright (C) 2017 Broadcom. All rights reserved."
33 
34 /**
35  * @file
36  * Implementation of required FreeBSD PCI interface functions
37  */
38 
39 #include "ocs.h"
40 #include "version.h"
41 #include <sys/sysctl.h>
42 #include <sys/malloc.h>
43 
44 static MALLOC_DEFINE(M_OCS, "OCS", "OneCore Storage data");
45 
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 
49 #include <machine/bus.h>
50 
51 /**
52  * Tunable parameters for transport
53  */
54 int logmask = 0;
55 int ctrlmask = 2;
56 int logdest = 1;
57 int loglevel = LOG_INFO;
58 int ramlog_size = 1*1024*1024;
59 int ddump_saved_size = 0;
60 static const char *queue_topology = "eq cq rq cq mq $nulp($nwq(cq wq:ulp=$rpt1)) cq wq:len=256:class=1";
61 
62 static void ocs_release_bus(struct ocs_softc *);
63 static int32_t ocs_intr_alloc(struct ocs_softc *);
64 static int32_t ocs_intr_setup(struct ocs_softc *);
65 static int32_t ocs_intr_teardown(struct ocs_softc *);
66 static int ocs_pci_intx_filter(void *);
67 static void ocs_pci_intr(void *);
68 static int32_t ocs_init_dma_tag(struct ocs_softc *ocs);
69 
70 static int32_t ocs_setup_fcports(ocs_t *ocs);
71 
72 ocs_t *ocs_devices[MAX_OCS_DEVICES];
73 
74 /**
75  * @brief Check support for the given device
76  *
77  * Determine support for a given device by examining the PCI vendor and
78  * device IDs
79  *
80  * @param dev device abstraction
81  *
82  * @return 0 if device is supported, ENXIO otherwise
83  */
84 static int
85 ocs_pci_probe(device_t dev)
86 {
87 	char	*desc = NULL;
88 
89 	if (pci_get_vendor(dev) != PCI_VENDOR_EMULEX) {
90 		return ENXIO;
91 	}
92 
93 	switch (pci_get_device(dev)) {
94 	case PCI_PRODUCT_EMULEX_OCE16001:
95 		desc = "Emulex LightPulse FC Adapter";
96 		break;
97 	case PCI_PRODUCT_EMULEX_LPE31004:
98 		desc = "Emulex LightPulse FC Adapter";
99 		break;
100 	case PCI_PRODUCT_EMULEX_OCE50102:
101 		desc = "Emulex LightPulse 10GbE FCoE/NIC Adapter";
102 		break;
103 	case PCI_PRODUCT_EMULEX_LANCER_G7:
104 		desc = "Emulex LightPulse G7 FC Adapter";
105 		break;
106 	default:
107 		return ENXIO;
108 	}
109 
110 	device_set_desc(dev, desc);
111 
112 	return BUS_PROBE_DEFAULT;
113 }
114 
115 static int
116 ocs_map_g7_bars(device_t dev, struct ocs_softc *ocs)
117 {
118 	int i, r;
119 	uint32_t  val = 0;
120 
121 	for (i = 0, r = 0; i < PCI_MAX_BAR; i++) {
122 		val = pci_read_config(dev, PCIR_BAR(i), 4);
123 		if (!PCI_BAR_MEM(val)) {
124 			continue;
125                 }
126                 if (!(val & PCIM_BAR_MEM_BASE)) {
127 			/* no address */
128 			continue;
129 		}
130 		ocs->reg[r].rid = PCIR_BAR(i);
131 		ocs->reg[r].res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
132 				&ocs->reg[r].rid, RF_ACTIVE);
133 		if (ocs->reg[r].res) {
134 			ocs->reg[r].btag = rman_get_bustag(ocs->reg[r].res);
135 			ocs->reg[r].bhandle = rman_get_bushandle(ocs->reg[r].res);
136 			r++;
137 		} else {
138 			device_printf(dev, "bus_alloc_resource failed rid=%#x\n",
139 			ocs->reg[r].rid);
140 			ocs_release_bus(ocs);
141 			return ENXIO;
142 		}
143 
144 		/*
145 		 * If the 64-bit attribute is set, both this BAR and the
146 		 * next form the complete address. Skip processing the
147 		 * next BAR.
148 		 */
149 		if (val & PCIM_BAR_MEM_64) {
150 			i++;
151 		}
152 	}
153 
154 	return 0;
155 }
156 
157 static int
158 ocs_map_bars(device_t dev, struct ocs_softc *ocs)
159 {
160 	/*
161 	 * Map PCI BAR0 register into the CPU's space.
162 	 */
163 
164 	ocs->reg[0].rid = PCIR_BAR(PCI_64BIT_BAR0);
165 	ocs->reg[0].res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
166 			&ocs->reg[0].rid, RF_ACTIVE);
167 
168 	if (ocs->reg[0].res == NULL) {
169 		device_printf(dev, "bus_alloc_resource failed rid=%#x\n",
170 				ocs->reg[0].rid);
171 		return ENXIO;
172 	}
173 
174 	ocs->reg[0].btag = rman_get_bustag(ocs->reg[0].res);
175 	ocs->reg[0].bhandle = rman_get_bushandle(ocs->reg[0].res);
176 	return 0;
177 }
178 
179 static int
180 ocs_setup_params(struct ocs_softc *ocs)
181 {
182 	int32_t	i = 0;
183 	const char	*hw_war_version;
184 	/* Setup tunable parameters */
185 	ocs->ctrlmask = ctrlmask;
186 	ocs->speed = 0;
187 	ocs->topology = 0;
188 	ocs->ethernet_license = 0;
189 	ocs->num_scsi_ios = 8192;
190 	ocs->enable_hlm = 0;
191 	ocs->hlm_group_size = 8;
192 	ocs->logmask = logmask;
193 
194 	ocs->config_tgt = FALSE;
195 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
196 					"target", &i)) {
197 		if (1 == i) {
198 			ocs->config_tgt = TRUE;
199 			device_printf(ocs->dev, "Enabling target\n");
200 		}
201 	}
202 
203 	ocs->config_ini = TRUE;
204 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
205 					"initiator", &i)) {
206 		if (0 == i) {
207 			ocs->config_ini = FALSE;
208 			device_printf(ocs->dev, "Disabling initiator\n");
209 		}
210 	}
211 	ocs->enable_ini = ocs->config_ini;
212 
213 	if (!ocs->config_ini && !ocs->config_tgt) {
214 		device_printf(ocs->dev, "Unsupported, both initiator and target mode disabled.\n");
215 		return 1;
216         }
217 
218 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
219 					"logmask", &logmask)) {
220 		device_printf(ocs->dev, "logmask = %#x\n", logmask);
221 	}
222 
223 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
224 					"logdest", &logdest)) {
225 		device_printf(ocs->dev, "logdest = %#x\n", logdest);
226 	}
227 
228 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
229 					"loglevel", &loglevel)) {
230 		device_printf(ocs->dev, "loglevel = %#x\n", loglevel);
231 	}
232 
233 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
234 					"ramlog_size", &ramlog_size)) {
235 		device_printf(ocs->dev, "ramlog_size = %#x\n", ramlog_size);
236 	}
237 
238 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
239 					"ddump_saved_size", &ddump_saved_size)) {
240 		device_printf(ocs->dev, "ddump_saved_size= %#x\n", ddump_saved_size);
241 	}
242 
243 	/* If enabled, initailize a RAM logging buffer */
244 	if (logdest & 2) {
245 		ocs->ramlog = ocs_ramlog_init(ocs, ramlog_size/OCS_RAMLOG_DEFAULT_BUFFERS,
246 			OCS_RAMLOG_DEFAULT_BUFFERS);
247 		/* If NULL was returned, then we'll simply skip using the ramlog but */
248 		/* set logdest to 1 to ensure that we at least get default logging.  */
249 		if (ocs->ramlog == NULL) {
250 			logdest = 1;
251 		}
252 	}
253 
254 	/* initialize a saved ddump */
255 	if (ddump_saved_size) {
256 		if (ocs_textbuf_alloc(ocs, &ocs->ddump_saved, ddump_saved_size)) {
257 			ocs_log_err(ocs, "failed to allocate memory for saved ddump\n");
258 		}
259 	}
260 
261 	if (0 == resource_string_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
262 					"hw_war_version", &hw_war_version)) {
263 		device_printf(ocs->dev, "hw_war_version = %s\n", hw_war_version);
264 		ocs->hw_war_version = strdup(hw_war_version, M_OCS);
265 	}
266 
267 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
268 				    "explicit_buffer_list", &i)) {
269 		ocs->explicit_buffer_list = i;
270 	}
271 
272 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
273 					"ethernet_license", &i)) {
274 		ocs->ethernet_license = i;
275 	}
276 
277 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
278 					"speed", &i)) {
279 		device_printf(ocs->dev, "speed = %d Mbps\n", i);
280 		ocs->speed = i;
281 	}
282 	ocs->desc = device_get_desc(ocs->dev);
283 
284 	ocs_device_lock_init(ocs);
285 	ocs->driver_version = STR_BE_MAJOR "." STR_BE_MINOR "." STR_BE_BUILD "." STR_BE_BRANCH;
286 	ocs->model = ocs_pci_model(ocs->pci_vendor, ocs->pci_device);
287 
288 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
289 				    "enable_hlm", &i)) {
290 		device_printf(ocs->dev, "enable_hlm = %d\n", i);
291 		ocs->enable_hlm = i;
292 		if (ocs->enable_hlm) {
293 			ocs->hlm_group_size = 8;
294 
295 			if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
296 						    "hlm_group_size", &i)) {
297 				ocs->hlm_group_size = i;
298 			}
299 			device_printf(ocs->dev, "hlm_group_size = %d\n", i);
300 		}
301 	}
302 
303 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
304 					"num_scsi_ios", &i)) {
305 		ocs->num_scsi_ios = i;
306 		device_printf(ocs->dev, "num_scsi_ios = %d\n", ocs->num_scsi_ios);
307 	} else {
308 		ocs->num_scsi_ios = 8192;
309 	}
310 
311 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
312 					"topology", &i)) {
313 		ocs->topology = i;
314 		device_printf(ocs->dev, "Setting topology=%#x\n", i);
315 	}
316 
317 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
318 				    "num_vports", &i)) {
319 		if (i >= 0 && i <= 254) {
320 			device_printf(ocs->dev, "num_vports = %d\n", i);
321 			ocs->num_vports = i;
322 		} else {
323 			device_printf(ocs->dev, "num_vports: %d not supported \n", i);
324 		}
325 	}
326 
327 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
328 				    "external_loopback", &i)) {
329 		device_printf(ocs->dev, "external_loopback = %d\n", i);
330 		ocs->external_loopback = i;
331 	}
332 
333 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
334 				    "tgt_rscn_delay", &i)) {
335 		device_printf(ocs->dev, "tgt_rscn_delay = %d\n", i);
336 		ocs->tgt_rscn_delay_msec = i * 1000;
337 	}
338 
339 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
340 				    "tgt_rscn_period", &i)) {
341 		device_printf(ocs->dev, "tgt_rscn_period = %d\n", i);
342 		ocs->tgt_rscn_period_msec = i * 1000;
343 	}
344 
345 	if (0 == resource_int_value(device_get_name(ocs->dev), device_get_unit(ocs->dev),
346 				    "target_io_timer", &i)) {
347 		device_printf(ocs->dev, "target_io_timer = %d\n", i);
348 		ocs->target_io_timer_sec = i;
349 	}
350 
351 	hw_global.queue_topology_string = queue_topology;
352 	ocs->rq_selection_policy = 0;
353 	ocs->rr_quanta = 1;
354 	ocs->filter_def = "0,0,0,0";
355 
356 	return 0;
357 }
358 
359 static int32_t
360 ocs_setup_fcports(ocs_t *ocs)
361 {
362 	uint32_t        i = 0, role = 0;
363 	uint64_t sli_wwpn, sli_wwnn;
364 	size_t size;
365 	ocs_xport_t *xport = ocs->xport;
366 	ocs_vport_spec_t *vport;
367 	ocs_fcport *fcp = NULL;
368 
369 	size = sizeof(ocs_fcport) * (ocs->num_vports + 1);
370 
371 	ocs->fcports = ocs_malloc(ocs, size, M_ZERO|M_NOWAIT);
372 	if (ocs->fcports == NULL) {
373 		device_printf(ocs->dev, "Can't allocate fcport \n");
374 		return 1;
375 	}
376 
377 	role = (ocs->enable_ini)? KNOB_ROLE_INITIATOR: 0 |
378 		(ocs->enable_tgt)? KNOB_ROLE_TARGET: 0;
379 
380 	fcp = FCPORT(ocs, i);
381 	fcp->role = role;
382 	i++;
383 
384 	ocs_list_foreach(&xport->vport_list, vport) {
385 		fcp = FCPORT(ocs, i);
386 		vport->tgt_data = fcp;
387 		fcp->vport = vport;
388 		fcp->role = role;
389 
390 		if (ocs_hw_get_def_wwn(ocs, i, &sli_wwpn, &sli_wwnn)) {
391 			ocs_log_err(ocs, "Get default wwn failed \n");
392 			i++;
393 			continue;
394 		}
395 
396 		vport->wwpn = ocs_be64toh(sli_wwpn);
397 		vport->wwnn = ocs_be64toh(sli_wwnn);
398 		i++;
399 		ocs_log_debug(ocs, "VPort wwpn: %lx wwnn: %lx \n", vport->wwpn, vport->wwnn);
400 	}
401 
402 	return 0;
403 }
404 
405 int32_t
406 ocs_device_attach(ocs_t *ocs)
407 {
408         int32_t i;
409 	ocs_io_t *io = NULL;
410 
411         if (ocs->attached) {
412                 ocs_log_warn(ocs, "%s: Device is already attached\n", __func__);
413                 return -1;
414         }
415 
416 	/* Allocate transport object and bring online */
417 	ocs->xport = ocs_xport_alloc(ocs);
418 	if (ocs->xport == NULL) {
419 		device_printf(ocs->dev, "failed to allocate transport object\n");
420 		return ENOMEM;
421 	} else if (ocs_xport_attach(ocs->xport) != 0) {
422 		device_printf(ocs->dev, "%s: failed to attach transport object\n", __func__);
423 		goto fail_xport_attach;
424 	} else if (ocs_xport_initialize(ocs->xport) != 0) {
425 		device_printf(ocs->dev, "%s: failed to initialize transport object\n", __func__);
426 		goto fail_xport_init;
427 	}
428 
429 	if (ocs_init_dma_tag(ocs)) {
430 		goto fail_intr_setup;
431 	}
432 
433 	for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) {
434 		if (bus_dmamap_create(ocs->buf_dmat, 0, &io->tgt_io.dmap)) {
435 			device_printf(ocs->dev, "%s: bad dma map create\n", __func__);
436 		}
437 
438 		io->tgt_io.state = OCS_CAM_IO_FREE;
439 	}
440 
441 	if (ocs_setup_fcports(ocs)) {
442 		device_printf(ocs->dev, "FCports creation failed\n");
443 		goto fail_intr_setup;
444 	}
445 
446 	if (ocs_cam_attach(ocs)) {
447 		device_printf(ocs->dev, "cam attach failed \n");
448 		goto fail_intr_setup;
449 	}
450 
451 	if (ocs_intr_setup(ocs)) {
452 		device_printf(ocs->dev, "Interrupt setup failed\n");
453 		goto fail_intr_setup;
454 	}
455 
456 	if (ocs->enable_ini || ocs->enable_tgt) {
457 		if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE)) {
458 			device_printf(ocs->dev, "Can't init port\n");
459 			goto fail_xport_online;
460 		}
461 	}
462 
463 	ocs->attached = true;
464 
465 	return 0;
466 
467 fail_xport_online:
468 	if (ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN)) {
469 		device_printf(ocs->dev, "Transport Shutdown timed out\n");
470 	}
471 	ocs_intr_teardown(ocs);
472 fail_intr_setup:
473 fail_xport_init:
474 	ocs_xport_detach(ocs->xport);
475 	if (ocs->config_tgt)
476 		ocs_scsi_tgt_del_device(ocs);
477 
478 	ocs_xport_free(ocs->xport);
479 	ocs->xport = NULL;
480 fail_xport_attach:
481 	if (ocs->xport)
482 		ocs_free(ocs, ocs->xport, sizeof(*(ocs->xport)));
483 	ocs->xport = NULL;
484 	return ENXIO;
485 }
486 
487 /**
488  * @brief Connect the driver to the given device
489  *
490  * If the probe routine is successful, the OS will give the driver
491  * the opportunity to connect itself to the device. This routine
492  * maps PCI resources (memory BARs and interrupts) and initialize a
493  * hardware object.
494  *
495  * @param dev device abstraction
496  *
497  * @return 0 if the driver attaches to the device, ENXIO otherwise
498  */
499 
500 static int
501 ocs_pci_attach(device_t dev)
502 {
503 	struct ocs_softc	*ocs;
504 	int			instance;
505 
506 	instance = device_get_unit(dev);
507 
508 	ocs = (struct ocs_softc *)device_get_softc(dev);
509 	if (NULL == ocs) {
510 		device_printf(dev, "cannot allocate softc\n");
511 		return ENOMEM;
512 	}
513 	memset(ocs, 0, sizeof(struct ocs_softc));
514 
515 	if (instance < ARRAY_SIZE(ocs_devices)) {
516 		ocs_devices[instance] = ocs;
517 	} else {
518 		device_printf(dev, "got unexpected ocs instance number %d\n", instance);
519 	}
520 
521 	ocs->instance_index = instance;
522 
523 	ocs->dev = dev;
524 
525 	pci_enable_io(dev, SYS_RES_MEMORY);
526 	pci_enable_busmaster(dev);
527 
528 	ocs->pci_vendor = pci_get_vendor(dev);
529 	ocs->pci_device = pci_get_device(dev);
530 	ocs->pci_subsystem_vendor = pci_get_subvendor(dev);
531 	ocs->pci_subsystem_device = pci_get_subdevice(dev);
532 
533 	snprintf(ocs->businfo, sizeof(ocs->businfo), "%02X:%02X:%02X",
534 		pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
535 
536 	/* Map all memory BARs */
537 	if (ocs->pci_device == PCI_PRODUCT_EMULEX_LANCER_G7) {
538 		if(ocs_map_g7_bars(dev,ocs)) {
539 			device_printf(dev, "Failed to map pci bars\n");
540 			goto release_bus;
541 		}
542 	} else {
543 		if (ocs_map_bars(dev, ocs)) {
544 			device_printf(dev, "Failed to map pci bars\n");
545 			goto release_bus;
546 		}
547 	}
548 
549 	/* create a root DMA tag for the device */
550 	if (bus_dma_tag_create(bus_get_dma_tag(dev),
551 				1,		/* byte alignment */
552 				0,		/* no boundary restrictions */
553 				BUS_SPACE_MAXADDR, /* no minimum low address */
554 				BUS_SPACE_MAXADDR, /* no maximum high address */
555 				NULL,		/* no filter function */
556 				NULL,		/* or arguments */
557 				BUS_SPACE_MAXSIZE, /* max size covered by tag */
558 				BUS_SPACE_UNRESTRICTED, /* no segment count restrictions */
559 				BUS_SPACE_MAXSIZE, /* no segment length restrictions */
560 				0,		/* flags */
561 				NULL,		/* no lock manipulation function */
562 				NULL,		/* or arguments */
563 				&ocs->dmat)) {
564 		device_printf(dev, "parent DMA tag allocation failed\n");
565 		goto release_bus;
566 	}
567 
568 	if (ocs_intr_alloc(ocs)) {
569 		device_printf(dev, "Interrupt allocation failed\n");
570 		goto release_bus;
571 	}
572 
573 	if (PCIC_SERIALBUS == pci_get_class(dev) &&
574 			PCIS_SERIALBUS_FC == pci_get_subclass(dev))
575 		ocs->ocs_xport = OCS_XPORT_FC;
576 	else {
577 		device_printf(dev, "unsupported class (%#x : %#x)\n",
578 				pci_get_class(dev),
579 				pci_get_class(dev));
580 		goto release_bus;
581 	}
582 
583 	/* Setup tunable parameters */
584 	if (ocs_setup_params(ocs)) {
585 		device_printf(ocs->dev, "failed to setup params\n");
586 		goto release_bus;
587 	}
588 
589 	if (ocs_device_attach(ocs)) {
590 		device_printf(ocs->dev, "failed to attach device\n");
591 		goto release_params;
592 	}
593 
594 	ocs->fc_type = FC_TYPE_FCP;
595 
596 	ocs_debug_attach(ocs);
597 
598 	return 0;
599 
600 release_params:
601 	ocs_ramlog_free(ocs, ocs->ramlog);
602 	ocs_device_lock_free(ocs);
603 	free(ocs->hw_war_version, M_OCS);
604 release_bus:
605 	ocs_release_bus(ocs);
606 	return ENXIO;
607 }
608 
609 /**
610  * @brief free resources when pci device detach
611  *
612  * @param ocs pointer to ocs structure
613  *
614  * @return 0 for success, a negative error code value for failure.
615  */
616 
617 int32_t
618 ocs_device_detach(ocs_t *ocs)
619 {
620         int32_t rc = 0, i;
621 	ocs_io_t *io = NULL;
622 
623         if (ocs != NULL) {
624                 if (!ocs->attached) {
625                         ocs_log_warn(ocs, "%s: Device is not attached\n", __func__);
626                         return -1;
627                 }
628 
629                 ocs->attached = FALSE;
630 
631                 rc = ocs_xport_control(ocs->xport, OCS_XPORT_SHUTDOWN);
632                 if (rc) {
633                         ocs_log_err(ocs, "%s: Transport Shutdown timed out\n", __func__);
634                 }
635 
636 		ocs_intr_teardown(ocs);
637 
638                 if (ocs_xport_detach(ocs->xport) != 0) {
639                         ocs_log_err(ocs, "%s: Transport detach failed\n", __func__);
640                 }
641 
642 		ocs_cam_detach(ocs);
643 		ocs_free(ocs, ocs->fcports, sizeof(*(ocs->fcports)));
644 
645 		for (i = 0; (io = ocs_io_get_instance(ocs, i)); i++) {
646 			if (bus_dmamap_destroy(ocs->buf_dmat, io->tgt_io.dmap)) {
647 				device_printf(ocs->dev, "%s: bad dma map destroy\n", __func__);
648 			}
649 		}
650 		bus_dma_tag_destroy(ocs->dmat);
651                 ocs_xport_free(ocs->xport);
652                 ocs->xport = NULL;
653         }
654 
655         return 0;
656 }
657 
658 /**
659  * @brief Detach the driver from the given device
660  *
661  * If the driver is a loadable module, this routine gets called at unload
662  * time. This routine will stop the device and free any allocated resources.
663  *
664  * @param dev device abstraction
665  *
666  * @return 0 if the driver detaches from the device, ENXIO otherwise
667  */
668 static int
669 ocs_pci_detach(device_t dev)
670 {
671 	struct ocs_softc	*ocs;
672 
673 	ocs = (struct ocs_softc *)device_get_softc(dev);
674 	if (!ocs) {
675 		device_printf(dev, "no driver context?!?\n");
676 		return -1;
677 	}
678 
679 	if (ocs->config_tgt && ocs->enable_tgt) {
680 		device_printf(dev, "can't detach with target mode enabled\n");
681 		return EBUSY;
682 	}
683 
684 	ocs_device_detach(ocs);
685 
686 	/*
687 	 * Workaround for OCS SCSI Transport quirk.
688 	 *
689 	 * CTL requires that target mode is disabled prior to unloading the
690 	 * driver (ie ocs->enable_tgt = FALSE), but once the target is disabled,
691 	 * the transport will not call ocs_scsi_tgt_del_device() which deallocates
692 	 * CAM resources. The workaround is to explicitly make the call here.
693 	 */
694 	if (ocs->config_tgt)
695 		ocs_scsi_tgt_del_device(ocs);
696 
697 	/* free strdup created buffer.*/
698 	free(ocs->hw_war_version, M_OCS);
699 
700 	ocs_device_lock_free(ocs);
701 
702 	ocs_debug_detach(ocs);
703 
704 	ocs_ramlog_free(ocs, ocs->ramlog);
705 
706 	ocs_release_bus(ocs);
707 
708 	return 0;
709 }
710 
711 /**
712  * @brief Notify driver of system shutdown
713  *
714  * @param dev device abstraction
715  *
716  * @return 0 if the driver attaches to the device, ENXIO otherwise
717  */
718 static int
719 ocs_pci_shutdown(device_t dev)
720 {
721 	device_printf(dev, "%s\n", __func__);
722 	return 0;
723 }
724 
725 /**
726  * @brief Release bus resources allocated within the soft context
727  *
728  * @param ocs Pointer to the driver's context
729  *
730  * @return none
731  */
732 static void
733 ocs_release_bus(struct ocs_softc *ocs)
734 {
735 
736 	if (NULL != ocs) {
737 		uint32_t	i;
738 
739 		ocs_intr_teardown(ocs);
740 
741 		if (ocs->irq) {
742 			bus_release_resource(ocs->dev, SYS_RES_IRQ,
743 					rman_get_rid(ocs->irq), ocs->irq);
744 
745 			if (ocs->n_vec) {
746 				pci_release_msi(ocs->dev);
747 				ocs->n_vec = 0;
748 			}
749 
750 			ocs->irq = NULL;
751 		}
752 
753 		bus_dma_tag_destroy(ocs->dmat);
754 
755 		for (i = 0; i < PCI_MAX_BAR; i++) {
756 			if (ocs->reg[i].res) {
757 				bus_release_resource(ocs->dev, SYS_RES_MEMORY,
758 						ocs->reg[i].rid,
759 						ocs->reg[i].res);
760 			}
761 		}
762 	}
763 }
764 
765 /**
766  * @brief Allocate and initialize interrupts
767  *
768  * @param ocs Pointer to the driver's context
769  *
770  * @return none
771  */
772 static int32_t
773 ocs_intr_alloc(struct ocs_softc *ocs)
774 {
775 
776 	ocs->n_vec = 1;
777 	if (pci_alloc_msix(ocs->dev, &ocs->n_vec)) {
778 		device_printf(ocs->dev, "MSI-X allocation failed\n");
779 		if (pci_alloc_msi(ocs->dev, &ocs->n_vec)) {
780 			device_printf(ocs->dev, "MSI allocation failed \n");
781 			ocs->irqid = 0;
782 			ocs->n_vec = 0;
783 		} else
784 			ocs->irqid = 1;
785 	} else {
786 		ocs->irqid = 1;
787 	}
788 
789 	ocs->irq = bus_alloc_resource_any(ocs->dev, SYS_RES_IRQ, &ocs->irqid,
790 			RF_ACTIVE | RF_SHAREABLE);
791 	if (NULL == ocs->irq) {
792 		device_printf(ocs->dev, "could not allocate interrupt\n");
793 		return -1;
794 	}
795 
796 	ocs->intr_ctx.vec = 0;
797 	ocs->intr_ctx.softc = ocs;
798 	snprintf(ocs->intr_ctx.name, sizeof(ocs->intr_ctx.name),
799 			"%s_intr_%d",
800 			device_get_nameunit(ocs->dev),
801 			ocs->intr_ctx.vec);
802 
803 	return 0;
804 }
805 
806 /**
807  * @brief Create and attach an interrupt handler
808  *
809  * @param ocs Pointer to the driver's context
810  *
811  * @return 0 on success, non-zero otherwise
812  */
813 static int32_t
814 ocs_intr_setup(struct ocs_softc *ocs)
815 {
816 	driver_filter_t	*filter = NULL;
817 
818 	if (0 == ocs->n_vec) {
819 		filter = ocs_pci_intx_filter;
820 	}
821 
822 	if (bus_setup_intr(ocs->dev, ocs->irq, INTR_MPSAFE | INTR_TYPE_CAM,
823 				filter, ocs_pci_intr, &ocs->intr_ctx,
824 				&ocs->tag)) {
825 		device_printf(ocs->dev, "could not initialize interrupt\n");
826 		return -1;
827 	}
828 
829 	return 0;
830 }
831 
832 /**
833  * @brief Detach an interrupt handler
834  *
835  * @param ocs Pointer to the driver's context
836  *
837  * @return 0 on success, non-zero otherwise
838  */
839 static int32_t
840 ocs_intr_teardown(struct ocs_softc *ocs)
841 {
842 
843 	if (!ocs) {
844 		printf("%s: bad driver context?!?\n", __func__);
845 		return -1;
846 	}
847 
848 	if (ocs->tag) {
849 		bus_teardown_intr(ocs->dev, ocs->irq, ocs->tag);
850 		ocs->tag = NULL;
851 	}
852 
853 	return 0;
854 }
855 
856 /**
857  * @brief PCI interrupt handler
858  *
859  * @param arg pointer to the driver's software context
860  *
861  * @return FILTER_HANDLED if interrupt is processed, FILTER_STRAY otherwise
862  */
863 static int
864 ocs_pci_intx_filter(void *arg)
865 {
866 	ocs_intr_ctx_t	*intr = arg;
867 	struct ocs_softc *ocs = NULL;
868 	uint16_t	val = 0;
869 
870 	if (NULL == intr) {
871 		return FILTER_STRAY;
872 	}
873 
874 	ocs = intr->softc;
875 #ifndef PCIM_STATUS_INTR
876 #define PCIM_STATUS_INTR	0x0008
877 #endif
878 	val = pci_read_config(ocs->dev, PCIR_STATUS, 2);
879 	if (0xffff == val) {
880 		device_printf(ocs->dev, "%s: pci_read_config(PCIR_STATUS) failed\n", __func__);
881 		return FILTER_STRAY;
882 	}
883 	if (0 == (val & PCIM_STATUS_INTR)) {
884 		return FILTER_STRAY;
885 	}
886 
887 	val = pci_read_config(ocs->dev, PCIR_COMMAND, 2);
888 	val |= PCIM_CMD_INTxDIS;
889 	pci_write_config(ocs->dev, PCIR_COMMAND, val, 2);
890 
891 	return FILTER_SCHEDULE_THREAD;
892 }
893 
894 /**
895  * @brief interrupt handler
896  *
897  * @param context pointer to the interrupt context
898  */
899 static void
900 ocs_pci_intr(void *context)
901 {
902 	ocs_intr_ctx_t	*intr = context;
903 	struct ocs_softc *ocs = intr->softc;
904 
905 	mtx_lock(&ocs->sim_lock);
906 		ocs_hw_process(&ocs->hw, intr->vec, OCS_OS_MAX_ISR_TIME_MSEC);
907 	mtx_unlock(&ocs->sim_lock);
908 }
909 
910 /**
911  * @brief Initialize DMA tag
912  *
913  * @param ocs the driver instance's software context
914  *
915  * @return 0 on success, non-zero otherwise
916  */
917 static int32_t
918 ocs_init_dma_tag(struct ocs_softc *ocs)
919 {
920 	uint32_t	max_sgl = 0;
921 	uint32_t	max_sge = 0;
922 
923 	/*
924 	 * IOs can't use the parent DMA tag and must create their
925 	 * own, based primarily on a restricted number of DMA segments.
926 	 * This is more of a BSD requirement than a SLI Port requirement
927 	 */
928 	ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &max_sgl);
929 	ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge);
930 
931 	if (bus_dma_tag_create(ocs->dmat,
932 				1,		/* byte alignment */
933 				0,		/* no boundary restrictions */
934 				BUS_SPACE_MAXADDR, /* no minimum low address */
935 				BUS_SPACE_MAXADDR, /* no maximum high address */
936 				NULL,		/* no filter function */
937 				NULL,		/* or arguments */
938 				BUS_SPACE_MAXSIZE, /* max size covered by tag */
939 				max_sgl, 	/* segment count restrictions */
940 				max_sge,	/* segment length restrictions */
941 				0,		/* flags */
942 				NULL,		/* no lock manipulation function */
943 				NULL,		/* or arguments */
944 				&ocs->buf_dmat)) {
945 		device_printf(ocs->dev, "%s: bad bus_dma_tag_create(buf_dmat)\n", __func__);
946 		return -1;
947 	}
948 	return 0;
949 }
950 
951 int32_t
952 ocs_get_property(const char *prop_name, char *buffer, uint32_t buffer_len)
953 {
954 	return -1;
955 }
956 
957 /**
958  * @brief return pointer to ocs structure given instance index
959  *
960  * A pointer to an ocs structure is returned given an instance index.
961  *
962  * @param index index to ocs_devices array
963  *
964  * @return ocs pointer
965  */
966 
967 ocs_t *ocs_get_instance(uint32_t index)
968 {
969 	if (index < ARRAY_SIZE(ocs_devices)) {
970 		return ocs_devices[index];
971 	}
972 	return NULL;
973 }
974 
975 /**
976  * @brief Return instance index of an opaque ocs structure
977  *
978  * Returns the ocs instance index
979  *
980  * @param os pointer to ocs instance
981  *
982  * @return pointer to ocs instance index
983  */
984 uint32_t
985 ocs_instance(void *os)
986 {
987 	ocs_t *ocs = os;
988 	return ocs->instance_index;
989 }
990 
991 static device_method_t ocs_methods[] = {
992 	DEVMETHOD(device_probe,		ocs_pci_probe),
993 	DEVMETHOD(device_attach,	ocs_pci_attach),
994 	DEVMETHOD(device_detach,	ocs_pci_detach),
995 	DEVMETHOD(device_shutdown,	ocs_pci_shutdown),
996 	{0, 0}
997 };
998 
999 static driver_t ocs_driver = {
1000 	"ocs_fc",
1001 	ocs_methods,
1002 	sizeof(struct ocs_softc)
1003 };
1004 
1005 DRIVER_MODULE(ocs_fc, pci, ocs_driver, 0, 0);
1006 MODULE_VERSION(ocs_fc, 1);
1007