xref: /titanic_44/usr/src/uts/i86pc/io/ioat/ioat.c (revision 193974072f41a843678abf5f61979c748687e66b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/errno.h>
28 #include <sys/types.h>
29 #include <sys/conf.h>
30 #include <sys/kmem.h>
31 #include <sys/ddi.h>
32 #include <sys/stat.h>
33 #include <sys/sunddi.h>
34 #include <sys/file.h>
35 #include <sys/open.h>
36 #include <sys/modctl.h>
37 #include <sys/ddi_impldefs.h>
38 #include <sys/sysmacros.h>
39 
40 #include <sys/ioat.h>
41 
42 static int ioat_open(dev_t *devp, int flag, int otyp, cred_t *cred);
43 static int ioat_close(dev_t devp, int flag, int otyp, cred_t *cred);
44 static int ioat_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
45 static int ioat_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
46 static int ioat_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
47     void **result);
48 static int ioat_quiesce(dev_info_t *dip);
49 
50 static 	struct cb_ops ioat_cb_ops = {
51 	ioat_open,		/* cb_open */
52 	ioat_close,		/* cb_close */
53 	nodev,			/* cb_strategy */
54 	nodev,			/* cb_print */
55 	nodev,			/* cb_dump */
56 	nodev,			/* cb_read */
57 	nodev,			/* cb_write */
58 	ioat_ioctl,		/* cb_ioctl */
59 	nodev,			/* cb_devmap */
60 	nodev,			/* cb_mmap */
61 	nodev,			/* cb_segmap */
62 	nochpoll,		/* cb_chpoll */
63 	ddi_prop_op,		/* cb_prop_op */
64 	NULL,			/* cb_stream */
65 	D_NEW | D_MP | D_64BIT | D_DEVMAP,	/* cb_flag */
66 	CB_REV
67 };
68 
69 static struct dev_ops ioat_dev_ops = {
70 	DEVO_REV,		/* devo_rev */
71 	0,			/* devo_refcnt */
72 	ioat_getinfo,		/* devo_getinfo */
73 	nulldev,		/* devo_identify */
74 	nulldev,		/* devo_probe */
75 	ioat_attach,		/* devo_attach */
76 	ioat_detach,		/* devo_detach */
77 	nodev,			/* devo_reset */
78 	&ioat_cb_ops,		/* devo_cb_ops */
79 	NULL,			/* devo_bus_ops */
80 	NULL,			/* devo_power */
81 	ioat_quiesce,		/* devo_quiesce */
82 };
83 
84 static struct modldrv ioat_modldrv = {
85 	&mod_driverops,		/* Type of module.  This one is a driver */
86 	"ioat driver",		/* Name of the module. */
87 	&ioat_dev_ops,		/* driver ops */
88 };
89 
90 static struct modlinkage ioat_modlinkage = {
91 	MODREV_1,
92 	(void *) &ioat_modldrv,
93 	NULL
94 };
95 
96 
97 void *ioat_statep;
98 
99 static int ioat_chip_init(ioat_state_t *state);
100 static void ioat_chip_fini(ioat_state_t *state);
101 static int ioat_drv_init(ioat_state_t *state);
102 static void ioat_drv_fini(ioat_state_t *state);
103 static uint_t ioat_isr(caddr_t parm);
104 static void ioat_intr_enable(ioat_state_t *state);
105 static void ioat_intr_disable(ioat_state_t *state);
106 void ioat_detach_finish(ioat_state_t *state);
107 
108 
109 ddi_device_acc_attr_t ioat_acc_attr = {
110 	DDI_DEVICE_ATTR_V0,		/* devacc_attr_version */
111 	DDI_NEVERSWAP_ACC,		/* devacc_attr_endian_flags */
112 	DDI_STORECACHING_OK_ACC,	/* devacc_attr_dataorder */
113 	DDI_DEFAULT_ACC			/* devacc_attr_access */
114 };
115 
116 /* dcopy callback interface */
117 dcopy_device_cb_t ioat_cb = {
118 	DCOPY_DEVICECB_V0,
119 	0,		/* reserved */
120 	ioat_channel_alloc,
121 	ioat_channel_free,
122 	ioat_cmd_alloc,
123 	ioat_cmd_free,
124 	ioat_cmd_post,
125 	ioat_cmd_poll,
126 	ioat_unregister_complete
127 };
128 
129 /*
130  * _init()
131  */
132 int
_init(void)133 _init(void)
134 {
135 	int e;
136 
137 	e = ddi_soft_state_init(&ioat_statep, sizeof (ioat_state_t), 1);
138 	if (e != 0) {
139 		return (e);
140 	}
141 
142 	e = mod_install(&ioat_modlinkage);
143 	if (e != 0) {
144 		ddi_soft_state_fini(&ioat_statep);
145 		return (e);
146 	}
147 
148 	return (0);
149 }
150 
151 /*
152  * _info()
153  */
154 int
_info(struct modinfo * modinfop)155 _info(struct modinfo *modinfop)
156 {
157 	return (mod_info(&ioat_modlinkage, modinfop));
158 }
159 
160 /*
161  * _fini()
162  */
163 int
_fini(void)164 _fini(void)
165 {
166 	int e;
167 
168 	e = mod_remove(&ioat_modlinkage);
169 	if (e != 0) {
170 		return (e);
171 	}
172 
173 	ddi_soft_state_fini(&ioat_statep);
174 
175 	return (0);
176 }
177 
178 /*
179  * ioat_attach()
180  */
181 static int
ioat_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)182 ioat_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
183 {
184 	ioat_state_t *state;
185 	int instance;
186 	int e;
187 
188 
189 	switch (cmd) {
190 	case DDI_ATTACH:
191 		break;
192 
193 	case DDI_RESUME:
194 		instance = ddi_get_instance(dip);
195 		state = ddi_get_soft_state(ioat_statep, instance);
196 		if (state == NULL) {
197 			return (DDI_FAILURE);
198 		}
199 		e = ioat_channel_resume(state);
200 		if (e != DDI_SUCCESS) {
201 			return (DDI_FAILURE);
202 		}
203 		ioat_intr_enable(state);
204 		return (DDI_SUCCESS);
205 
206 	default:
207 		return (DDI_FAILURE);
208 	}
209 
210 	instance = ddi_get_instance(dip);
211 	e = ddi_soft_state_zalloc(ioat_statep, instance);
212 	if (e != DDI_SUCCESS) {
213 		return (DDI_FAILURE);
214 	}
215 	state = ddi_get_soft_state(ioat_statep, instance);
216 	if (state == NULL) {
217 		goto attachfail_get_soft_state;
218 	}
219 
220 	state->is_dip = dip;
221 	state->is_instance = instance;
222 
223 	/* setup the registers, save away some device info */
224 	e = ioat_chip_init(state);
225 	if (e != DDI_SUCCESS) {
226 		goto attachfail_chip_init;
227 	}
228 
229 	/* initialize driver state, must be after chip init */
230 	e = ioat_drv_init(state);
231 	if (e != DDI_SUCCESS) {
232 		goto attachfail_drv_init;
233 	}
234 
235 	/* create the minor node (for the ioctl) */
236 	e = ddi_create_minor_node(dip, "ioat", S_IFCHR, instance, DDI_PSEUDO,
237 	    0);
238 	if (e != DDI_SUCCESS) {
239 		goto attachfail_minor_node;
240 	}
241 
242 	/* Enable device interrupts */
243 	ioat_intr_enable(state);
244 
245 	/* Report that driver was loaded */
246 	ddi_report_dev(dip);
247 
248 	/* register with dcopy */
249 	e = dcopy_device_register(state, &state->is_deviceinfo,
250 	    &state->is_device_handle);
251 	if (e != DCOPY_SUCCESS) {
252 		goto attachfail_register;
253 	}
254 
255 	return (DDI_SUCCESS);
256 
257 attachfail_register:
258 	ioat_intr_disable(state);
259 	ddi_remove_minor_node(dip, NULL);
260 attachfail_minor_node:
261 	ioat_drv_fini(state);
262 attachfail_drv_init:
263 	ioat_chip_fini(state);
264 attachfail_chip_init:
265 attachfail_get_soft_state:
266 	(void) ddi_soft_state_free(ioat_statep, instance);
267 
268 	return (DDI_FAILURE);
269 }
270 
271 /*
272  * ioat_detach()
273  */
274 static int
ioat_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)275 ioat_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
276 {
277 	ioat_state_t *state;
278 	int instance;
279 	int e;
280 
281 
282 	instance = ddi_get_instance(dip);
283 	state = ddi_get_soft_state(ioat_statep, instance);
284 	if (state == NULL) {
285 		return (DDI_FAILURE);
286 	}
287 
288 	switch (cmd) {
289 	case DDI_DETACH:
290 		break;
291 
292 	case DDI_SUSPEND:
293 		ioat_channel_suspend(state);
294 		return (DDI_SUCCESS);
295 
296 	default:
297 		return (DDI_FAILURE);
298 	}
299 
300 	/*
301 	 * try to unregister from dcopy.  Since this driver doesn't follow the
302 	 * traditional parent/child model, we may still be in use so we can't
303 	 * detach yet.
304 	 */
305 	e = dcopy_device_unregister(&state->is_device_handle);
306 	if (e != DCOPY_SUCCESS) {
307 		if (e == DCOPY_PENDING) {
308 			cmn_err(CE_NOTE, "device busy, performing asynchronous"
309 			    " detach\n");
310 		}
311 		return (DDI_FAILURE);
312 	}
313 
314 	ioat_detach_finish(state);
315 
316 	return (DDI_SUCCESS);
317 }
318 
319 /*
320  * ioat_getinfo()
321  */
322 /*ARGSUSED*/
323 static int
ioat_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)324 ioat_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
325 {
326 	ioat_state_t *state;
327 	int instance;
328 	dev_t dev;
329 	int e;
330 
331 
332 	dev = (dev_t)arg;
333 	instance = getminor(dev);
334 
335 	switch (cmd) {
336 	case DDI_INFO_DEVT2DEVINFO:
337 		state = ddi_get_soft_state(ioat_statep, instance);
338 		if (state == NULL) {
339 			return (DDI_FAILURE);
340 		}
341 		*result = (void *)state->is_dip;
342 		e = DDI_SUCCESS;
343 		break;
344 
345 	case DDI_INFO_DEVT2INSTANCE:
346 		*result = (void *)(uintptr_t)instance;
347 		e = DDI_SUCCESS;
348 		break;
349 
350 	default:
351 		e = DDI_FAILURE;
352 		break;
353 	}
354 
355 	return (e);
356 }
357 
358 
359 /*
360  * ioat_open()
361  */
362 /*ARGSUSED*/
363 static int
ioat_open(dev_t * devp,int flag,int otyp,cred_t * cred)364 ioat_open(dev_t *devp, int flag, int otyp, cred_t *cred)
365 {
366 	ioat_state_t *state;
367 	int instance;
368 
369 	instance = getminor(*devp);
370 	state = ddi_get_soft_state(ioat_statep, instance);
371 	if (state == NULL) {
372 		return (ENXIO);
373 	}
374 
375 	return (0);
376 }
377 
378 
379 /*
380  * ioat_close()
381  */
382 /*ARGSUSED*/
383 static int
ioat_close(dev_t devp,int flag,int otyp,cred_t * cred)384 ioat_close(dev_t devp, int flag, int otyp, cred_t *cred)
385 {
386 	return (0);
387 }
388 
389 
390 /*
391  * ioat_chip_init()
392  */
393 static int
ioat_chip_init(ioat_state_t * state)394 ioat_chip_init(ioat_state_t *state)
395 {
396 	ddi_device_acc_attr_t attr;
397 	int e;
398 
399 
400 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
401 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
402 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
403 
404 	e =  ddi_regs_map_setup(state->is_dip, 1, (caddr_t *)&state->is_genregs,
405 	    0, 0, &attr, &state->is_reg_handle);
406 	if (e != DDI_SUCCESS) {
407 		goto chipinitfail_regsmap;
408 	}
409 
410 	/* save away ioat chip info */
411 	state->is_num_channels = (uint_t)ddi_get8(state->is_reg_handle,
412 	    &state->is_genregs[IOAT_CHANCNT]);
413 
414 	/*
415 	 * If we get a bogus value, something is wrong with the H/W, fail to
416 	 * attach.
417 	 */
418 	if (state->is_num_channels == 0) {
419 		goto chipinitfail_numchan;
420 	}
421 
422 	state->is_maxxfer = (uint_t)ddi_get8(state->is_reg_handle,
423 	    &state->is_genregs[IOAT_XFERCAP]);
424 	state->is_chanoff = (uintptr_t)ddi_get16(state->is_reg_handle,
425 	    (uint16_t *)&state->is_genregs[IOAT_PERPORT_OFF]);
426 	state->is_cbver = (uint_t)ddi_get8(state->is_reg_handle,
427 	    &state->is_genregs[IOAT_CBVER]);
428 	state->is_intrdelay = (uint_t)ddi_get16(state->is_reg_handle,
429 	    (uint16_t *)&state->is_genregs[IOAT_INTRDELAY]);
430 	state->is_status = (uint_t)ddi_get16(state->is_reg_handle,
431 	    (uint16_t *)&state->is_genregs[IOAT_CSSTATUS]);
432 	state->is_capabilities = (uint_t)ddi_get32(state->is_reg_handle,
433 	    (uint32_t *)&state->is_genregs[IOAT_DMACAPABILITY]);
434 
435 	if (state->is_cbver & 0x10) {
436 		state->is_ver = IOAT_CBv1;
437 	} else if (state->is_cbver & 0x20) {
438 		state->is_ver = IOAT_CBv2;
439 	} else {
440 		goto chipinitfail_version;
441 	}
442 
443 	return (DDI_SUCCESS);
444 
445 chipinitfail_version:
446 chipinitfail_numchan:
447 	ddi_regs_map_free(&state->is_reg_handle);
448 chipinitfail_regsmap:
449 	return (DDI_FAILURE);
450 }
451 
452 
453 /*
454  * ioat_chip_fini()
455  */
456 static void
ioat_chip_fini(ioat_state_t * state)457 ioat_chip_fini(ioat_state_t *state)
458 {
459 	ddi_regs_map_free(&state->is_reg_handle);
460 }
461 
462 
463 /*
464  * ioat_drv_init()
465  */
466 static int
ioat_drv_init(ioat_state_t * state)467 ioat_drv_init(ioat_state_t *state)
468 {
469 	ddi_acc_handle_t handle;
470 	int e;
471 
472 
473 	mutex_init(&state->is_mutex, NULL, MUTEX_DRIVER, NULL);
474 
475 	state->is_deviceinfo.di_dip = state->is_dip;
476 	state->is_deviceinfo.di_num_dma = state->is_num_channels;
477 	state->is_deviceinfo.di_maxxfer = state->is_maxxfer;
478 	state->is_deviceinfo.di_capabilities = state->is_capabilities;
479 	state->is_deviceinfo.di_cb = &ioat_cb;
480 
481 	e = pci_config_setup(state->is_dip, &handle);
482 	if (e != DDI_SUCCESS) {
483 		goto drvinitfail_config_setup;
484 	}
485 
486 	/* read in Vendor ID */
487 	state->is_deviceinfo.di_id = (uint64_t)pci_config_get16(handle, 0);
488 	state->is_deviceinfo.di_id = state->is_deviceinfo.di_id << 16;
489 
490 	/* read in Device ID */
491 	state->is_deviceinfo.di_id |= (uint64_t)pci_config_get16(handle, 2);
492 	state->is_deviceinfo.di_id = state->is_deviceinfo.di_id << 32;
493 
494 	/* Add in chipset version */
495 	state->is_deviceinfo.di_id |= (uint64_t)state->is_cbver;
496 	pci_config_teardown(&handle);
497 
498 	e = ddi_intr_hilevel(state->is_dip, 0);
499 	if (e != 0) {
500 		cmn_err(CE_WARN, "hilevel interrupt not supported\n");
501 		goto drvinitfail_hilevel;
502 	}
503 
504 	/* we don't support MSIs for v2 yet */
505 	e = ddi_add_intr(state->is_dip, 0, NULL, NULL, ioat_isr,
506 	    (caddr_t)state);
507 	if (e != DDI_SUCCESS) {
508 		goto drvinitfail_add_intr;
509 	}
510 
511 	e = ddi_get_iblock_cookie(state->is_dip, 0, &state->is_iblock_cookie);
512 	if (e != DDI_SUCCESS) {
513 		goto drvinitfail_iblock_cookie;
514 	}
515 
516 	e = ioat_channel_init(state);
517 	if (e != DDI_SUCCESS) {
518 		goto drvinitfail_channel_init;
519 	}
520 
521 	return (DDI_SUCCESS);
522 
523 drvinitfail_channel_init:
524 drvinitfail_iblock_cookie:
525 	ddi_remove_intr(state->is_dip, 0, state->is_iblock_cookie);
526 drvinitfail_add_intr:
527 drvinitfail_hilevel:
528 drvinitfail_config_setup:
529 	mutex_destroy(&state->is_mutex);
530 
531 	return (DDI_FAILURE);
532 }
533 
534 
535 /*
536  * ioat_drv_fini()
537  */
538 static void
ioat_drv_fini(ioat_state_t * state)539 ioat_drv_fini(ioat_state_t *state)
540 {
541 	ioat_channel_fini(state);
542 	ddi_remove_intr(state->is_dip, 0, state->is_iblock_cookie);
543 	mutex_destroy(&state->is_mutex);
544 }
545 
546 
547 /*
548  * ioat_unregister_complete()
549  */
550 void
ioat_unregister_complete(void * device_private,int status)551 ioat_unregister_complete(void *device_private, int status)
552 {
553 	ioat_state_t *state;
554 
555 
556 	state = device_private;
557 
558 	if (status != DCOPY_SUCCESS) {
559 		cmn_err(CE_WARN, "asynchronous detach aborted\n");
560 		return;
561 	}
562 
563 	cmn_err(CE_CONT, "detach completing\n");
564 	ioat_detach_finish(state);
565 }
566 
567 
568 /*
569  * ioat_detach_finish()
570  */
571 void
ioat_detach_finish(ioat_state_t * state)572 ioat_detach_finish(ioat_state_t *state)
573 {
574 	ioat_intr_disable(state);
575 	ddi_remove_minor_node(state->is_dip, NULL);
576 	ioat_drv_fini(state);
577 	ioat_chip_fini(state);
578 	(void) ddi_soft_state_free(ioat_statep, state->is_instance);
579 }
580 
581 
582 /*
583  * ioat_intr_enable()
584  */
585 static void
ioat_intr_enable(ioat_state_t * state)586 ioat_intr_enable(ioat_state_t *state)
587 {
588 	uint32_t intr_status;
589 
590 
591 	/* Clear any pending interrupts */
592 	intr_status = ddi_get32(state->is_reg_handle,
593 	    (uint32_t *)&state->is_genregs[IOAT_ATTNSTATUS]);
594 	if (intr_status != 0) {
595 		ddi_put32(state->is_reg_handle,
596 		    (uint32_t *)&state->is_genregs[IOAT_ATTNSTATUS],
597 		    intr_status);
598 	}
599 
600 	/* Enable interrupts on the device */
601 	ddi_put8(state->is_reg_handle, &state->is_genregs[IOAT_INTRCTL],
602 	    IOAT_INTRCTL_MASTER_EN);
603 }
604 
605 
606 /*
607  * ioat_intr_disable()
608  */
609 static void
ioat_intr_disable(ioat_state_t * state)610 ioat_intr_disable(ioat_state_t *state)
611 {
612 	/*
613 	 * disable interrupts on the device. A read of the interrupt control
614 	 * register clears the enable bit.
615 	 */
616 	(void) ddi_get8(state->is_reg_handle,
617 	    &state->is_genregs[IOAT_INTRCTL]);
618 }
619 
620 
621 /*
622  * ioat_isr()
623  */
624 static uint_t
ioat_isr(caddr_t parm)625 ioat_isr(caddr_t parm)
626 {
627 	uint32_t intr_status;
628 	ioat_state_t *state;
629 	uint8_t intrctrl;
630 	uint32_t chan;
631 	uint_t r;
632 	int i;
633 
634 	state = (ioat_state_t *)parm;
635 
636 	intrctrl = ddi_get8(state->is_reg_handle,
637 	    &state->is_genregs[IOAT_INTRCTL]);
638 	/* master interrupt enable should always be set */
639 	ASSERT(intrctrl & IOAT_INTRCTL_MASTER_EN);
640 
641 	/* If the interrupt status bit isn't set, it's not ours */
642 	if (!(intrctrl & IOAT_INTRCTL_INTR_STAT)) {
643 		/* re-set master interrupt enable (since it clears on read) */
644 		ddi_put8(state->is_reg_handle,
645 		    &state->is_genregs[IOAT_INTRCTL], intrctrl);
646 		return (DDI_INTR_UNCLAIMED);
647 	}
648 
649 	/* see which channels generated the interrupt */
650 	intr_status = ddi_get32(state->is_reg_handle,
651 	    (uint32_t *)&state->is_genregs[IOAT_ATTNSTATUS]);
652 
653 	/* call the intr handler for the channels */
654 	r = DDI_INTR_UNCLAIMED;
655 	chan = 1;
656 	for (i = 0; i < state->is_num_channels; i++) {
657 		if (intr_status & chan) {
658 			ioat_channel_intr(&state->is_channel[i]);
659 			r = DDI_INTR_CLAIMED;
660 		}
661 		chan = chan << 1;
662 	}
663 
664 	/*
665 	 * if interrupt status bit was set, there should have been an
666 	 * attention status bit set too.
667 	 */
668 	ASSERT(r == DDI_INTR_CLAIMED);
669 
670 	/* re-set master interrupt enable (since it clears on read) */
671 	ddi_put8(state->is_reg_handle, &state->is_genregs[IOAT_INTRCTL],
672 	    intrctrl);
673 
674 	return (r);
675 }
676 
677 static int
ioat_quiesce(dev_info_t * dip)678 ioat_quiesce(dev_info_t *dip)
679 {
680 	ioat_state_t *state;
681 	int instance;
682 
683 	instance = ddi_get_instance(dip);
684 	state = ddi_get_soft_state(ioat_statep, instance);
685 	if (state == NULL) {
686 		return (DDI_FAILURE);
687 	}
688 
689 	ioat_intr_disable(state);
690 	ioat_channel_quiesce(state);
691 
692 	return (DDI_SUCCESS);
693 }
694