1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2019 Nexenta Systems, Inc.
14 * Copyright 2021 RackTop Systems, Inc.
15 */
16
17 /*
18 * Driver attach/detach routines are found here.
19 */
20
21 /* ---- Private header files ---- */
22 #include <smartpqi.h>
23
24 void *pqi_state;
25
26 /* ---- Autoconfigure forward declarations ---- */
27 static int smartpqi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
28 static int smartpqi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
29 static int smartpqi_quiesce(dev_info_t *dip);
30
31 static struct cb_ops smartpqi_cb_ops = {
32 .cb_open = scsi_hba_open,
33 .cb_close = scsi_hba_close,
34 .cb_strategy = nodev,
35 .cb_print = nodev,
36 .cb_dump = nodev,
37 .cb_read = nodev,
38 .cb_write = nodev,
39 .cb_ioctl = scsi_hba_ioctl,
40 .cb_devmap = nodev,
41 .cb_mmap = nodev,
42 .cb_segmap = nodev,
43 .cb_chpoll = nochpoll,
44 .cb_prop_op = ddi_prop_op,
45 .cb_str = NULL,
46 .cb_flag = D_MP,
47 .cb_rev = CB_REV,
48 .cb_aread = nodev,
49 .cb_awrite = nodev
50 };
51
52 static struct dev_ops smartpqi_ops = {
53 .devo_rev = DEVO_REV,
54 .devo_refcnt = 0,
55 .devo_getinfo = nodev,
56 .devo_identify = nulldev,
57 .devo_probe = nulldev,
58 .devo_attach = smartpqi_attach,
59 .devo_detach = smartpqi_detach,
60 .devo_reset = nodev,
61 .devo_cb_ops = &smartpqi_cb_ops,
62 .devo_bus_ops = NULL,
63 .devo_power = nodev,
64 .devo_quiesce = smartpqi_quiesce
65 };
66
67 static struct modldrv smartpqi_modldrv = {
68 .drv_modops = &mod_driverops,
69 .drv_linkinfo = SMARTPQI_MOD_STRING,
70 .drv_dev_ops = &smartpqi_ops
71 };
72
73 static struct modlinkage smartpqi_modlinkage = {
74 .ml_rev = MODREV_1,
75 .ml_linkage = { &smartpqi_modldrv, NULL }
76 };
77
78 /*
79 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
80 * physical addresses are supported.)
81 *
82 * We believe that the device probably doesn't have any limitations,
83 * but previous generations of this hardware used a 32-bit DMA counter.
84 * Absent better guidance, we choose the same. (Note that the Linux
85 * driver from the vendor imposes no DMA limitations.)
86 */
87 ddi_dma_attr_t smartpqi_dma_attrs = {
88 DMA_ATTR_V0, /* attribute layout version */
89 0x0ull, /* address low - should be 0 (longlong) */
90 0xffffffffffffffffull, /* address high - 64-bit max */
91 0xffffffffull, /* count max - max DMA object size */
92 4096, /* allocation alignment requirements */
93 0x78, /* burstsizes - binary encoded values */
94 1, /* minxfer - gran. of DMA engine */
95 0xffffffffull, /* maxxfer - gran. of DMA engine */
96 0xffffffffull, /* max segment size (DMA boundary) */
97 PQI_MAX_SCATTER_GATHER, /* scatter/gather list length */
98 512, /* granularity - device transfer size */
99 0 /* flags, set to 0 */
100 };
101
102 ddi_device_acc_attr_t smartpqi_dev_attr = {
103 DDI_DEVICE_ATTR_V1,
104 DDI_STRUCTURE_LE_ACC,
105 DDI_STRICTORDER_ACC,
106 DDI_DEFAULT_ACC
107 };
108
109 int
_init(void)110 _init(void)
111 {
112 int ret;
113
114 if ((ret = ddi_soft_state_init(&pqi_state,
115 sizeof (struct pqi_state), SMARTPQI_INITIAL_SOFT_SPACE)) !=
116 0) {
117 return (ret);
118 }
119
120 if ((ret = scsi_hba_init(&smartpqi_modlinkage)) != 0) {
121 ddi_soft_state_fini(&pqi_state);
122 return (ret);
123 }
124
125 if ((ret = mod_install(&smartpqi_modlinkage)) != 0) {
126 scsi_hba_fini(&smartpqi_modlinkage);
127 ddi_soft_state_fini(&pqi_state);
128 }
129
130 return (ret);
131 }
132
133 int
_fini(void)134 _fini(void)
135 {
136 int ret;
137
138 if ((ret = mod_remove(&smartpqi_modlinkage)) == 0) {
139 scsi_hba_fini(&smartpqi_modlinkage);
140 ddi_soft_state_fini(&pqi_state);
141 }
142 return (ret);
143 }
144
145 int
_info(struct modinfo * modinfop)146 _info(struct modinfo *modinfop)
147 {
148 return (mod_info(&smartpqi_modlinkage, modinfop));
149 }
150
151 static int
smartpqi_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)152 smartpqi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
153 {
154 int instance;
155 pqi_state_t s = NULL;
156 int mem_bar = IO_SPACE;
157 char name[32];
158
159 switch (cmd) {
160 case DDI_ATTACH:
161 break;
162
163 case DDI_RESUME:
164 default:
165 return (DDI_FAILURE);
166 }
167
168 instance = ddi_get_instance(dip);
169
170 /* ---- allocate softc structure ---- */
171 if (ddi_soft_state_zalloc(pqi_state, instance) != DDI_SUCCESS)
172 return (DDI_FAILURE);
173
174 if ((s = ddi_get_soft_state(pqi_state, instance)) == NULL)
175 goto fail;
176
177 scsi_size_clean(dip);
178
179 s->s_dip = dip;
180 s->s_instance = instance;
181 s->s_intr_ready = 0;
182 s->s_offline = 0;
183 list_create(&s->s_devnodes, sizeof (struct pqi_device),
184 offsetof(struct pqi_device, pd_list));
185
186 /* ---- Initialize mutex used in interrupt handler ---- */
187 mutex_init(&s->s_mutex, NULL, MUTEX_DRIVER,
188 DDI_INTR_PRI(s->s_intr_pri));
189 mutex_init(&s->s_io_mutex, NULL, MUTEX_DRIVER, NULL);
190 mutex_init(&s->s_intr_mutex, NULL, MUTEX_DRIVER, NULL);
191 cv_init(&s->s_quiescedvar, NULL, CV_DRIVER, NULL);
192 cv_init(&s->s_io_condvar, NULL, CV_DRIVER, NULL);
193 sema_init(&s->s_sync_rqst, 1, NULL, SEMA_DRIVER, NULL);
194
195 (void) snprintf(name, sizeof (name), "smartpqi_cache%d", instance);
196 s->s_cmd_cache = kmem_cache_create(name, sizeof (struct pqi_cmd), 0,
197 pqi_cache_constructor, pqi_cache_destructor, NULL, s, NULL, 0);
198
199 s->s_events_taskq = ddi_taskq_create(s->s_dip, "pqi_events_tq", 1,
200 TASKQ_DEFAULTPRI, 0);
201 s->s_complete_taskq = ddi_taskq_create(s->s_dip, "pqi_complete_tq", 4,
202 TASKQ_DEFAULTPRI, 0);
203
204 s->s_debug_level = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
205 DDI_PROP_DONTPASS, "debug", 0);
206
207 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
208 "disable-mpxio", 0) != 0) {
209 s->s_disable_mpxio = 1;
210 }
211 if (smartpqi_register_intrs(s) == FALSE) {
212 dev_err(s->s_dip, CE_WARN, "unable to register interrupts");
213 goto fail;
214 }
215
216 s->s_msg_dma_attr = smartpqi_dma_attrs;
217 s->s_reg_acc_attr = smartpqi_dev_attr;
218
219 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&s->s_reg, 0,
220 /* sizeof (pqi_ctrl_regs_t) */ 0x8000, &s->s_reg_acc_attr,
221 &s->s_datap) != DDI_SUCCESS) {
222 dev_err(s->s_dip, CE_WARN, "map setup failed");
223 goto fail;
224 }
225
226 if (pqi_check_firmware(s) == B_FALSE) {
227 dev_err(s->s_dip, CE_WARN, "firmware issue");
228 goto fail;
229 }
230 if (pqi_prep_full(s) == B_FALSE) {
231 goto fail;
232 }
233 if (smartpqi_register_hba(s) == FALSE) {
234 dev_err(s->s_dip, CE_WARN, "unable to register SCSI interface");
235 goto fail;
236 }
237 ddi_report_dev(s->s_dip);
238
239 return (DDI_SUCCESS);
240
241 fail:
242 (void) smartpqi_detach(s->s_dip, 0);
243 return (DDI_FAILURE);
244 }
245
246 /*ARGSUSED*/
247 static int
smartpqi_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)248 smartpqi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
249 {
250 int instance;
251 pqi_state_t s;
252 pqi_device_t devp;
253
254 instance = ddi_get_instance(dip);
255 if ((s = ddi_get_soft_state(pqi_state, instance)) != NULL) {
256
257 if (s->s_watchdog != 0) {
258 (void) untimeout(s->s_watchdog);
259 s->s_watchdog = 0;
260 }
261
262 if (s->s_error_dma != NULL) {
263 pqi_free_single(s, s->s_error_dma);
264 s->s_error_dma = NULL;
265 }
266 if (s->s_adminq_dma != NULL) {
267 pqi_free_single(s, s->s_adminq_dma);
268 s->s_adminq_dma = NULL;
269 }
270 if (s->s_queue_dma != NULL) {
271 pqi_free_single(s, s->s_queue_dma);
272 s->s_queue_dma = NULL;
273 }
274
275 /* ---- Safe to always call ---- */
276 pqi_free_io_resource(s);
277
278 if (s->s_cmd_cache != NULL) {
279 kmem_cache_destroy(s->s_cmd_cache);
280 s->s_cmd_cache = NULL;
281 }
282
283 if (s->s_events_taskq != NULL) {
284 ddi_taskq_destroy(s->s_events_taskq);
285 s->s_events_taskq = NULL;
286 }
287 if (s->s_complete_taskq != NULL) {
288 ddi_taskq_destroy(s->s_complete_taskq);
289 s->s_complete_taskq = NULL;
290 }
291
292 while ((devp = list_head(&s->s_devnodes)) != NULL) {
293 /* ---- Better not be any active commands ---- */
294 ASSERT(list_is_empty(&devp->pd_cmd_list));
295
296 ddi_devid_free_guid(devp->pd_guid);
297 if (devp->pd_pip != NULL)
298 (void) mdi_pi_free(devp->pd_pip, 0);
299 if (devp->pd_pip_offlined)
300 (void) mdi_pi_free(devp->pd_pip_offlined, 0);
301 list_destroy(&devp->pd_cmd_list);
302 mutex_destroy(&devp->pd_mutex);
303 list_remove(&s->s_devnodes, devp);
304 kmem_free(devp, sizeof (*devp));
305 }
306 list_destroy(&s->s_devnodes);
307 mutex_destroy(&s->s_mutex);
308 mutex_destroy(&s->s_io_mutex);
309 mutex_destroy(&s->s_intr_mutex);
310
311 cv_destroy(&s->s_quiescedvar);
312 smartpqi_unregister_hba(s);
313 smartpqi_unregister_intrs(s);
314
315 if (s->s_time_of_day != 0) {
316 (void) untimeout(s->s_time_of_day);
317 s->s_time_of_day = 0;
318 }
319
320 ddi_soft_state_free(pqi_state, instance);
321 ddi_prop_remove_all(dip);
322 }
323
324 return (DDI_SUCCESS);
325 }
326
327 static int
smartpqi_quiesce(dev_info_t * dip)328 smartpqi_quiesce(dev_info_t *dip)
329 {
330 pqi_state_t s;
331 int instance;
332
333 /*
334 * ddi_get_soft_state is lock-free, so is safe to call from
335 * quiesce. Furthermore, pqi_hba_reset uses only the safe
336 * drv_usecwait() and register accesses.
337 */
338 instance = ddi_get_instance(dip);
339 if ((s = ddi_get_soft_state(pqi_state, instance)) != NULL) {
340 if (pqi_hba_reset(s)) {
341 return (DDI_SUCCESS);
342 }
343 }
344 /* If we couldn't quiesce for any reason, play it safe and reboot. */
345 return (DDI_FAILURE);
346 }
347