1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2023 Tintri by DDN, Inc. All rights reserved.
14 * Copyright 2021 RackTop Systems, Inc.
15 */
16
17 /*
18 * Driver attach/detach routines are found here.
19 */
20
21 /* ---- Private header files ---- */
22 #include <smartpqi.h>
23
24 void *pqi_state;
25
26 /* ---- Autoconfigure forward declarations ---- */
27 static int smartpqi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
28 static int smartpqi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
29 static int smartpqi_quiesce(dev_info_t *dip);
30
31 static struct cb_ops smartpqi_cb_ops = {
32 .cb_open = scsi_hba_open,
33 .cb_close = scsi_hba_close,
34 .cb_strategy = nodev,
35 .cb_print = nodev,
36 .cb_dump = nodev,
37 .cb_read = nodev,
38 .cb_write = nodev,
39 .cb_ioctl = scsi_hba_ioctl,
40 .cb_devmap = nodev,
41 .cb_mmap = nodev,
42 .cb_segmap = nodev,
43 .cb_chpoll = nochpoll,
44 .cb_prop_op = ddi_prop_op,
45 .cb_str = NULL,
46 .cb_flag = D_MP,
47 .cb_rev = CB_REV,
48 .cb_aread = nodev,
49 .cb_awrite = nodev
50 };
51
52 static struct dev_ops smartpqi_ops = {
53 .devo_rev = DEVO_REV,
54 .devo_refcnt = 0,
55 .devo_getinfo = nodev,
56 .devo_identify = nulldev,
57 .devo_probe = nulldev,
58 .devo_attach = smartpqi_attach,
59 .devo_detach = smartpqi_detach,
60 .devo_reset = nodev,
61 .devo_cb_ops = &smartpqi_cb_ops,
62 .devo_bus_ops = NULL,
63 .devo_power = nodev,
64 .devo_quiesce = smartpqi_quiesce
65 };
66
67 static struct modldrv smartpqi_modldrv = {
68 .drv_modops = &mod_driverops,
69 .drv_linkinfo = SMARTPQI_MOD_STRING,
70 .drv_dev_ops = &smartpqi_ops
71 };
72
73 static struct modlinkage smartpqi_modlinkage = {
74 .ml_rev = MODREV_1,
75 .ml_linkage = { &smartpqi_modldrv, NULL }
76 };
77
78 /*
79 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
80 * physical addresses are supported.)
81 *
82 * We believe that the device probably doesn't have any limitations,
83 * but previous generations of this hardware used a 32-bit DMA counter.
84 * Absent better guidance, we choose the same. (Note that the Linux
85 * driver from the vendor imposes no DMA limitations.)
86 */
87 ddi_dma_attr_t smartpqi_dma_attrs = {
88 .dma_attr_version = DMA_ATTR_V0,
89 .dma_attr_addr_lo = 0x0ull,
90 .dma_attr_addr_hi = 0xffffffffffffffffull,
91 .dma_attr_count_max = 0x00ffffffull,
92 .dma_attr_align = 4096,
93 .dma_attr_burstsizes = 0x78,
94 .dma_attr_minxfer = 1,
95 .dma_attr_maxxfer = 0x00ffffffull,
96 .dma_attr_seg = 0xffffffffull,
97 .dma_attr_sgllen = PQI_MAX_SCATTER_GATHER,
98 .dma_attr_granular = 512,
99 .dma_attr_flags = 0,
100 };
101
102 ddi_device_acc_attr_t smartpqi_dev_attr = {
103 DDI_DEVICE_ATTR_V1,
104 DDI_STRUCTURE_LE_ACC,
105 DDI_STRICTORDER_ACC,
106 DDI_DEFAULT_ACC
107 };
108
109 int
_init(void)110 _init(void)
111 {
112 int ret;
113
114 if ((ret = ddi_soft_state_init(&pqi_state,
115 sizeof (struct pqi_state), SMARTPQI_INITIAL_SOFT_SPACE)) !=
116 0) {
117 return (ret);
118 }
119
120 if ((ret = scsi_hba_init(&smartpqi_modlinkage)) != 0) {
121 ddi_soft_state_fini(&pqi_state);
122 return (ret);
123 }
124
125 if ((ret = mod_install(&smartpqi_modlinkage)) != 0) {
126 scsi_hba_fini(&smartpqi_modlinkage);
127 ddi_soft_state_fini(&pqi_state);
128 }
129
130 return (ret);
131 }
132
133 int
_fini(void)134 _fini(void)
135 {
136 int ret;
137
138 if ((ret = mod_remove(&smartpqi_modlinkage)) == 0) {
139 scsi_hba_fini(&smartpqi_modlinkage);
140 ddi_soft_state_fini(&pqi_state);
141 }
142 return (ret);
143 }
144
145 int
_info(struct modinfo * modinfop)146 _info(struct modinfo *modinfop)
147 {
148 return (mod_info(&smartpqi_modlinkage, modinfop));
149 }
150
151 static int
smartpqi_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)152 smartpqi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
153 {
154 int instance;
155 pqi_state_t *s = NULL;
156 int mem_bar = IO_SPACE;
157 mem_len_pair_t m;
158
159 switch (cmd) {
160 case DDI_ATTACH:
161 break;
162
163 case DDI_RESUME:
164 default:
165 return (DDI_FAILURE);
166 }
167
168 instance = ddi_get_instance(dip);
169
170 /* ---- allocate softc structure ---- */
171 if (ddi_soft_state_zalloc(pqi_state, instance) != DDI_SUCCESS)
172 return (DDI_FAILURE);
173
174 if ((s = ddi_get_soft_state(pqi_state, instance)) == NULL)
175 goto fail;
176
177 scsi_size_clean(dip);
178
179 s->s_dip = dip;
180 s->s_instance = instance;
181 s->s_intr_ready = B_FALSE;
182 s->s_offline = B_FALSE;
183 list_create(&s->s_devnodes, sizeof (struct pqi_device),
184 offsetof(struct pqi_device, pd_list));
185 list_create(&s->s_special_device.pd_cmd_list, sizeof (struct pqi_cmd),
186 offsetof(struct pqi_cmd, pc_list));
187
188 /* ---- Initialize mutex used in interrupt handler ---- */
189 mutex_init(&s->s_mutex, NULL, MUTEX_DRIVER,
190 DDI_INTR_PRI(s->s_intr_pri));
191 mutex_init(&s->s_io_mutex, NULL, MUTEX_DRIVER, NULL);
192 mutex_init(&s->s_intr_mutex, NULL, MUTEX_DRIVER, NULL);
193 mutex_init(&s->s_special_device.pd_mutex, NULL, MUTEX_DRIVER, NULL);
194 cv_init(&s->s_quiescedvar, NULL, CV_DRIVER, NULL);
195 cv_init(&s->s_io_condvar, NULL, CV_DRIVER, NULL);
196
197 m = pqi_alloc_mem_len(256);
198 (void) snprintf(m.mem, m.len, "smartpqi_cache%d", instance);
199 s->s_cmd_cache = kmem_cache_create(m.mem, sizeof (struct pqi_cmd), 0,
200 pqi_cache_constructor, pqi_cache_destructor, NULL, s, NULL, 0);
201
202 (void) snprintf(m.mem, m.len, "pqi_events_taskq%d", instance);
203 s->s_events_taskq = ddi_taskq_create(s->s_dip, m.mem, 1,
204 TASKQ_DEFAULTPRI, 0);
205 (void) snprintf(m.mem, m.len, "pqi_complete_taskq%d", instance);
206 s->s_complete_taskq = ddi_taskq_create(s->s_dip, m.mem, 4,
207 TASKQ_DEFAULTPRI, 0);
208 pqi_free_mem_len(&m);
209
210 s->s_debug_level = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
211 DDI_PROP_DONTPASS, "debug", 0);
212
213 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
214 "disable-mpxio", 0) != 0) {
215 s->s_disable_mpxio = B_TRUE;
216 }
217 if (smartpqi_register_intrs(s) == FALSE) {
218 dev_err(s->s_dip, CE_WARN, "unable to register interrupts");
219 goto fail;
220 }
221
222 s->s_msg_dma_attr = smartpqi_dma_attrs;
223 s->s_reg_acc_attr = smartpqi_dev_attr;
224
225 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&s->s_reg, 0,
226 /* sizeof (pqi_ctrl_regs_t) */ 0x8000, &s->s_reg_acc_attr,
227 &s->s_datap) != DDI_SUCCESS) {
228 dev_err(s->s_dip, CE_WARN, "map setup failed");
229 goto fail;
230 }
231
232 if (pqi_check_firmware(s) == B_FALSE) {
233 dev_err(s->s_dip, CE_WARN, "firmware issue");
234 goto fail;
235 }
236 if (pqi_prep_full(s) == B_FALSE) {
237 goto fail;
238 }
239 if (smartpqi_register_hba(s) == FALSE) {
240 dev_err(s->s_dip, CE_WARN, "unable to register SCSI interface");
241 goto fail;
242 }
243 ddi_report_dev(s->s_dip);
244
245 return (DDI_SUCCESS);
246
247 fail:
248 (void) smartpqi_detach(s->s_dip, 0);
249 return (DDI_FAILURE);
250 }
251
252 static int
smartpqi_detach(dev_info_t * dip,ddi_detach_cmd_t cmd __unused)253 smartpqi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd __unused)
254 {
255 int instance;
256 pqi_state_t *s;
257 pqi_device_t *devp;
258
259 instance = ddi_get_instance(dip);
260 if ((s = ddi_get_soft_state(pqi_state, instance)) != NULL) {
261
262 if (s->s_watchdog != 0) {
263 (void) untimeout(s->s_watchdog);
264 s->s_watchdog = 0;
265 }
266
267 if (s->s_error_dma != NULL) {
268 pqi_free_single(s, s->s_error_dma);
269 s->s_error_dma = NULL;
270 }
271 if (s->s_adminq_dma != NULL) {
272 pqi_free_single(s, s->s_adminq_dma);
273 s->s_adminq_dma = NULL;
274 }
275 if (s->s_queue_dma != NULL) {
276 pqi_free_single(s, s->s_queue_dma);
277 s->s_queue_dma = NULL;
278 }
279
280 /* ---- Safe to always call ---- */
281 pqi_free_io_resource(s);
282
283 if (s->s_cmd_cache != NULL) {
284 kmem_cache_destroy(s->s_cmd_cache);
285 s->s_cmd_cache = NULL;
286 }
287
288 if (s->s_events_taskq != NULL) {
289 ddi_taskq_destroy(s->s_events_taskq);
290 s->s_events_taskq = NULL;
291 }
292 if (s->s_complete_taskq != NULL) {
293 ddi_taskq_destroy(s->s_complete_taskq);
294 s->s_complete_taskq = NULL;
295 }
296
297 while ((devp = list_head(&s->s_devnodes)) != NULL) {
298 /* ---- Better not be any active commands ---- */
299 ASSERT(list_is_empty(&devp->pd_cmd_list));
300
301 ddi_devid_free_guid(devp->pd_guid);
302 if (devp->pd_pip != NULL)
303 (void) mdi_pi_free(devp->pd_pip, 0);
304 if (devp->pd_pip_offlined)
305 (void) mdi_pi_free(devp->pd_pip_offlined, 0);
306 list_destroy(&devp->pd_cmd_list);
307 mutex_destroy(&devp->pd_mutex);
308 list_remove(&s->s_devnodes, devp);
309 kmem_free(devp, sizeof (*devp));
310 }
311 list_destroy(&s->s_devnodes);
312 mutex_destroy(&s->s_mutex);
313 mutex_destroy(&s->s_io_mutex);
314 mutex_destroy(&s->s_intr_mutex);
315
316 cv_destroy(&s->s_quiescedvar);
317 smartpqi_unregister_hba(s);
318 smartpqi_unregister_intrs(s);
319
320 if (s->s_time_of_day != 0) {
321 (void) untimeout(s->s_time_of_day);
322 s->s_time_of_day = 0;
323 }
324
325 ddi_soft_state_free(pqi_state, instance);
326 ddi_prop_remove_all(dip);
327 }
328
329 return (DDI_SUCCESS);
330 }
331
332 static int
smartpqi_quiesce(dev_info_t * dip)333 smartpqi_quiesce(dev_info_t *dip)
334 {
335 pqi_state_t *s;
336 int instance;
337
338 /*
339 * ddi_get_soft_state is lock-free, so is safe to call from
340 * quiesce. Furthermore, pqi_hba_reset uses only the safe
341 * drv_usecwait() and register accesses.
342 */
343 instance = ddi_get_instance(dip);
344 if ((s = ddi_get_soft_state(pqi_state, instance)) != NULL) {
345 if (pqi_hba_reset(s)) {
346 return (DDI_SUCCESS);
347 }
348 }
349 /* If we couldn't quiesce for any reason, play it safe and reboot. */
350 return (DDI_FAILURE);
351 }
352