xref: /illumos-gate/usr/src/uts/common/io/ecpp.c (revision 88f8b78a88cbdc6d8c1af5c3e54bc49d25095c98)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *
31  * IEEE 1284 Parallel Port Device Driver
32  *
33  */
34 
35 #include <sys/param.h>
36 #include <sys/errno.h>
37 #include <sys/file.h>
38 #include <sys/cmn_err.h>
39 #include <sys/stropts.h>
40 #include <sys/debug.h>
41 #include <sys/stream.h>
42 #include <sys/strsun.h>
43 #include <sys/kmem.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/conf.h>		/* req. by dev_ops flags MTSAFE etc. */
47 #include <sys/modctl.h>		/* for modldrv */
48 #include <sys/stat.h>		/* ddi_create_minor_node S_IFCHR */
49 #include <sys/open.h>
50 #include <sys/ddi_impldefs.h>
51 #include <sys/kstat.h>
52 
53 #include <sys/prnio.h>
54 #include <sys/ecppreg.h>	/* hw description */
55 #include <sys/ecppio.h>		/* ioctl description */
56 #include <sys/ecppvar.h>	/* driver description */
57 #include <sys/dma_engine.h>
58 #include <sys/dma_i8237A.h>
59 
60 /*
61  * Background
62  * ==========
63  * IEEE 1284-1994 standard defines "a signalling method for asynchronous,
64  * fully interlocked, bidirectional parallel communications between hosts
65  * and printers or other peripherals." (1.1) The standard defines 5 modes
66  * of operation - Compatibility, Nibble, Byte, ECP and EPP - which differ
67  * in direction, bandwidth, pins assignment, DMA capability, etc.
68  *
69  * Negotiation is a mechanism for moving between modes. Compatibility mode
70  * is a default mode, from which negotiations to other modes occur and
71  * to which both host and peripheral break in case of interface errors.
72  * Compatibility mode provides a unidirectional (forward) channel for
73  * communicating with old pre-1284 peripherals.
74  *
75  * Each mode has a number of phases. [Mode, phase] pair represents the
76  * interface state. Host initiates all transfers, though peripheral can
77  * request backchannel transfer by asserting nErr pin.
78  *
79  * Ecpp driver implements an IEEE 1284-compliant host using a combination
80  * of hardware and software. Hardware part is represented by a controller,
81  * which is a part of the SuperIO chip. Ecpp supports the following SuperIOs:
82  * PC82332/PC82336 (U5/U10/U60), PC97317 (U100), M1553 (Grover).
83  * Struct ecpp_hw describes each SuperIO and is determined in ecpp_attach().
84  *
85  * Negotiation is performed in software. Transfer may be performed either
86  * in software by driving output pins for each byte (PIO method), or with
87  * hardware assistance - SuperIO has a 16-byte FIFO, which is filled by
88  * the driver (normally using DMA), while the chip performs the actual xfer.
89  * PIO is used for Nibble and Compat, DMA is used for ECP and Compat modes.
90  *
91  * Driver currently supports the following modes:
92  *
93  * - Compatibility mode: byte-wide forward channel ~50KB/sec;
94  *   pp->io_mode defines PIO or DMA method of transfer;
95  * - Nibble mode: nibble-wide (4-bit) reverse channel ~30KB/sec;
96  * - ECP mode: byte-wide bidirectional channel (~1MB/sec);
97  *
98  * Theory of operation
99  * ===================
100  * The manner in which ecpp drives 1284 interface is that of a state machine.
101  * State is a combination of 1284 mode {ECPP_*_MODE}, 1284 phase {ECPP_PHASE_*}
102  * and transfer method {PIO, DMA}. State is a function of application actions
103  * {write(2), ioctl(2)} and peripheral reaction.
104  *
105  * 1284 interface state is described by the following variables:
106  *   pp->current_mode  -- 1284 mode used for forward transfers;
107  *   pp->backchannel   -- 1284 mode used for backward transfers;
108  *   pp->curent_phase  -- 1284 phase;
109  *
110  * Bidirectional operation in Compatibility mode is provided by a combination:
111  * pp->current_mode == ECPP_COMPAT_MODE && pp->backchannel == ECPP_NIBBLE_MODE
112  * ECPP_CENTRONICS means no backchannel
113  *
114  * Driver internal state is defined by pp->e_busy as follows:
115  *   ECPP_IDLE	-- idle, no active transfers;
116  *   ECPP_BUSY	-- transfer is in progress;
117  *   ECPP_ERR	-- have data to transfer, but peripheral can`t receive data;
118  *   ECPP_FLUSH	-- flushing the queues;
119  *
120  * When opened, driver is in ECPP_IDLE state, current mode is ECPP_CENTRONICS
121  * Default negotiation tries to negotiate to the best mode supported by printer,
122  * sets pp->current_mode and pp->backchannel accordingly.
123  *
124  * When output data arrives in M_DATA mblks ecpp_wput() puts them on the queue
125  * to let ecpp_wsrv() concatenate small blocks into one big transfer
126  * by copying them into pp->ioblock. If first the mblk data is bigger than
127  * pp->ioblock, then it is used instead of i/o block (pointed by pp->msg)
128  *
129  * Before starting the transfer the driver will check if peripheral is ready
130  * by calling ecpp_check_status() and if it is not, driver goes ECPP_ERR state
131  * and schedules ecpp_wsrv_timer() which would qenable() the wq, effectively
132  * rechecking the peripheral readiness and restarting itself until it is ready.
133  * The transfer is then started by calling ecpp_start(), driver goes ECPP_BUSY
134  *
135  * While transfer is in progress all arriving messages will be queued up.
136  * Transfer can end up in either of two ways:
137  * - interrupt occurs, ecpp_isr() checks if all the data was transferred, if so
138  *   cleanup and go ECPP_IDLE, otherwise putback untransferred and qenable();
139  * - ecpp_xfer_timeout() cancels the transfer and puts back untransferred data;
140  *
141  * PIO transfer method is very CPU intensive: for each sent byte the peripheral
142  * state is checked, then the byte is transfered and driver waits for an nAck
143  * interrupt; ecpp_isr() will then look if there is more data and if so
144  * triggers the soft interrupt, which transfers the next byte. PIO method
145  * is needed only for legacy printers which are sensitive to strobe problem
146  * (Bugid 4192788).
147  *
148  * ecpp_wsrv() is responsible for both starting transfers (ecpp_start()) and
149  * going idle (ecpp_idle_phase()). Many routines qenable() the write queue,
150  * meaning "check if there are pending requests, process them and go idle".
151  *
152  * In it`s idle state the driver will always try to listen to the backchannel
153  * (as advised by 1284).
154  *
155  * The mechanism for handling backchannel requests is as follows:
156  * - when the peripheral has data to send it asserts nErr pin
157  *   (and also nAck in Nibble Mode) which results in an interrupt on the host;
158  * - ISR creates M_CTL message containing an ECPP_BACKCHANNEL byte and
159  *   puts it back on the write queue;
160  * - ecpp_wsrv() gets M_CTL and calls ecpp_peripheral2host(), which kicks off
161  *   the transfer;
162  *
163  * This way Nibble and ECP mode backchannel are implemented.
164  * If the read queue gets full, backchannel request is rejected.
165  * As the application reads data and queue size falls below the low watermark,
166  * ecpp_rsrv() gets called and enables the backchannel again.
167  *
168  * Future enhancements
169  * ===================
170  *
171  * Support new modes: Byte and EPP.
172  */
173 
174 #ifndef ECPP_DEBUG
175 #define	ECPP_DEBUG 0
176 #endif	/* ECPP_DEBUG */
177 int ecpp_debug = ECPP_DEBUG;
178 
179 int noecp = 0;	/* flag not to use ECP mode */
180 
181 /* driver entry point fn definitions */
182 static int 	ecpp_open(queue_t *, dev_t *, int, int, cred_t *);
183 static int	ecpp_close(queue_t *, int, cred_t *);
184 static uint_t 	ecpp_isr(caddr_t);
185 static uint_t	ecpp_softintr(caddr_t);
186 
187 /* configuration entry point fn definitions */
188 static int 	ecpp_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
189 static int	ecpp_attach(dev_info_t *, ddi_attach_cmd_t);
190 static int	ecpp_detach(dev_info_t *, ddi_detach_cmd_t);
191 static struct ecpp_hw_bind *ecpp_determine_sio_type(struct ecppunit *);
192 
193 /* isr support routines */
194 static uint_t 	ecpp_nErr_ihdlr(struct ecppunit *);
195 static uint_t	ecpp_pio_ihdlr(struct ecppunit *);
196 static uint_t	ecpp_dma_ihdlr(struct ecppunit *);
197 static uint_t	ecpp_M1553_intr(struct ecppunit *);
198 
199 /* configuration support routines */
200 static void	ecpp_get_props(struct ecppunit *);
201 
202 /* Streams Routines */
203 static int	ecpp_wput(queue_t *, mblk_t *);
204 static int	ecpp_wsrv(queue_t *);
205 static int	ecpp_rsrv(queue_t *);
206 static void	ecpp_flush(struct ecppunit *, int);
207 static void	ecpp_start(struct ecppunit *, caddr_t, size_t);
208 
209 /* ioctl handling */
210 static void	ecpp_putioc(queue_t *, mblk_t *);
211 static void	ecpp_srvioc(queue_t *, mblk_t *);
212 static void	ecpp_wput_iocdata_devid(queue_t *, mblk_t *, uintptr_t);
213 static void	ecpp_putioc_copyout(queue_t *, mblk_t *, void *, int);
214 static void	ecpp_putioc_stateful_copyin(queue_t *, mblk_t *, size_t);
215 static void	ecpp_srvioc_devid(queue_t *, mblk_t *,
216 				struct ecpp_device_id *, int *);
217 static void	ecpp_srvioc_prnif(queue_t *, mblk_t *);
218 static void 	ecpp_ack_ioctl(queue_t *, mblk_t *);
219 static void 	ecpp_nack_ioctl(queue_t *, mblk_t *, int);
220 
221 /* kstat routines */
222 static void	ecpp_kstat_init(struct ecppunit *);
223 static int	ecpp_kstat_update(kstat_t *, int);
224 static int	ecpp_kstatintr_update(kstat_t *, int);
225 
226 /* dma routines */
227 static void	ecpp_putback_untransfered(struct ecppunit *, void *, uint_t);
228 static uint8_t	ecpp_setup_dma_resources(struct ecppunit *, caddr_t, size_t);
229 static uint8_t	ecpp_init_dma_xfer(struct ecppunit *, caddr_t, size_t);
230 
231 /* pio routines */
232 static void	ecpp_pio_writeb(struct ecppunit *);
233 static void	ecpp_xfer_cleanup(struct ecppunit *);
234 static uint8_t	ecpp_prep_pio_xfer(struct ecppunit *, caddr_t, size_t);
235 
236 /* misc */
237 static uchar_t	ecpp_reset_port_regs(struct ecppunit *);
238 static void	ecpp_xfer_timeout(void *);
239 static void	ecpp_fifo_timer(void *);
240 static void	ecpp_wsrv_timer(void *);
241 static uchar_t	dcr_write(struct ecppunit *, uint8_t);
242 static uchar_t	ecr_write(struct ecppunit *, uint8_t);
243 static uchar_t	ecpp_check_status(struct ecppunit *);
244 static int	ecpp_backchan_req(struct ecppunit *);
245 static void	ecpp_untimeout_unblock(struct ecppunit *, timeout_id_t *);
246 static uint_t	ecpp_get_prn_ifcap(struct ecppunit *);
247 
248 /* stubs */
249 static void	empty_config_mode(struct ecppunit *);
250 static void	empty_mask_intr(struct ecppunit *);
251 
252 /* PC87332 support */
253 static int	pc87332_map_regs(struct ecppunit *);
254 static void	pc87332_unmap_regs(struct ecppunit *);
255 static int	pc87332_config_chip(struct ecppunit *);
256 static void	pc87332_config_mode(struct ecppunit *);
257 static uint8_t	pc87332_read_config_reg(struct ecppunit *, uint8_t);
258 static void	pc87332_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
259 static void	cheerio_mask_intr(struct ecppunit *);
260 static void	cheerio_unmask_intr(struct ecppunit *);
261 static int	cheerio_dma_start(struct ecppunit *);
262 static int	cheerio_dma_stop(struct ecppunit *, size_t *);
263 static size_t	cheerio_getcnt(struct ecppunit *);
264 static void	cheerio_reset_dcsr(struct ecppunit *);
265 
266 /* PC97317 support */
267 static int	pc97317_map_regs(struct ecppunit *);
268 static void	pc97317_unmap_regs(struct ecppunit *);
269 static int	pc97317_config_chip(struct ecppunit *);
270 static void	pc97317_config_mode(struct ecppunit *);
271 
272 /* M1553 Southbridge support */
273 static int	m1553_map_regs(struct ecppunit *pp);
274 static void	m1553_unmap_regs(struct ecppunit *pp);
275 static int	m1553_config_chip(struct ecppunit *);
276 static uint8_t	m1553_read_config_reg(struct ecppunit *, uint8_t);
277 static void	m1553_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
278 
279 /* M1553 Southbridge DMAC 8237 support routines */
280 static int 	dma8237_dma_start(struct ecppunit *);
281 static int	dma8237_dma_stop(struct ecppunit *, size_t *);
282 static size_t	dma8237_getcnt(struct ecppunit *);
283 static void 	dma8237_write_addr(struct ecppunit *, uint32_t);
284 static void	dma8237_write_count(struct ecppunit *, uint32_t);
285 static uint32_t	dma8237_read_count(struct ecppunit *);
286 static void	dma8237_write(struct ecppunit *, int, uint8_t);
287 static uint8_t	dma8237_read(struct ecppunit *, int);
288 #ifdef INCLUDE_DMA8237_READ_ADDR
289 static uint32_t	dma8237_read_addr(struct ecppunit *);
290 #endif
291 
292 /* i86 PC support rountines */
293 
294 #if defined(__x86)
295 static int	x86_dma_start(struct ecppunit *);
296 static int	x86_dma_stop(struct ecppunit *, size_t *);
297 static int	x86_map_regs(struct ecppunit *);
298 static void	x86_unmap_regs(struct ecppunit *);
299 static int	x86_config_chip(struct ecppunit *);
300 static size_t	x86_getcnt(struct ecppunit *);
301 #endif
302 
303 /* IEEE 1284 phase transitions */
304 static void	ecpp_1284_init_interface(struct ecppunit *);
305 static int	ecpp_1284_termination(struct ecppunit *);
306 static uchar_t 	ecpp_idle_phase(struct ecppunit *);
307 static int	ecp_forward2reverse(struct ecppunit *);
308 static int	ecp_reverse2forward(struct ecppunit *);
309 static int	read_nibble_backchan(struct ecppunit *);
310 
311 /* reverse transfers */
312 static uint_t	ecpp_peripheral2host(struct ecppunit *);
313 static uchar_t	ecp_peripheral2host(struct ecppunit *);
314 static uchar_t	nibble_peripheral2host(struct ecppunit *pp, uint8_t *);
315 static int	ecpp_getdevid(struct ecppunit *, uint8_t *, int *, int);
316 static void	ecpp_ecp_read_timeout(void *);
317 static void	ecpp_ecp_read_completion(struct ecppunit *);
318 
319 /* IEEE 1284 mode transitions */
320 static void 	ecpp_default_negotiation(struct ecppunit *);
321 static int 	ecpp_mode_negotiation(struct ecppunit *, uchar_t);
322 static int	ecpp_1284_negotiation(struct ecppunit *, uint8_t, uint8_t *);
323 static int	ecp_negotiation(struct ecppunit *);
324 static int	nibble_negotiation(struct ecppunit *);
325 static int	devidnib_negotiation(struct ecppunit *);
326 
327 /* IEEE 1284 utility routines */
328 static int	wait_dsr(struct ecppunit *, uint8_t, uint8_t, int);
329 
330 /* debugging functions */
331 static void	ecpp_error(dev_info_t *, char *, ...);
332 static uchar_t	ecpp_get_error_status(uchar_t);
333 
334 /*
335  * Chip-dependent structures
336  */
337 static ddi_dma_attr_t cheerio_dma_attr = {
338 	DMA_ATTR_VERSION,	/* version */
339 	0x00000000ull,		/* dlim_addr_lo */
340 	0xfffffffeull,		/* dlim_addr_hi */
341 	0xffffff,		/* DMA counter register */
342 	1,			/* DMA address alignment */
343 	0x74,			/* burst sizes */
344 	0x0001,			/* min effective DMA size */
345 	0xffff,			/* maximum transfer size */
346 	0xffff,			/* segment boundary */
347 	1,			/* s/g list length */
348 	1,			/* granularity of device */
349 	0			/* DMA flags */
350 };
351 
352 static struct ecpp_hw pc87332 = {
353 	pc87332_map_regs,
354 	pc87332_unmap_regs,
355 	pc87332_config_chip,
356 	pc87332_config_mode,
357 	cheerio_mask_intr,
358 	cheerio_unmask_intr,
359 	cheerio_dma_start,
360 	cheerio_dma_stop,
361 	cheerio_getcnt,
362 	&cheerio_dma_attr
363 };
364 
365 static struct ecpp_hw pc97317 = {
366 	pc97317_map_regs,
367 	pc97317_unmap_regs,
368 	pc97317_config_chip,
369 	pc97317_config_mode,
370 	cheerio_mask_intr,
371 	cheerio_unmask_intr,
372 	cheerio_dma_start,
373 	cheerio_dma_stop,
374 	cheerio_getcnt,
375 	&cheerio_dma_attr
376 };
377 
378 static ddi_dma_attr_t i8237_dma_attr = {
379 	DMA_ATTR_VERSION,	/* version */
380 	0x00000000ull,		/* dlim_addr_lo */
381 	0xfffffffeull,		/* dlim_addr_hi */
382 	0xffff,			/* DMA counter register */
383 	1,			/* DMA address alignment */
384 	0x01,			/* burst sizes */
385 	0x0001,			/* min effective DMA size */
386 	0xffff,			/* maximum transfer size */
387 	0x7fff,			/* segment boundary */
388 	1,			/* s/g list length */
389 	1,			/* granularity of device */
390 	0			/* DMA flags */
391 };
392 
393 static struct ecpp_hw m1553 = {
394 	m1553_map_regs,
395 	m1553_unmap_regs,
396 	m1553_config_chip,
397 	empty_config_mode,	/* no config_mode */
398 	empty_mask_intr,	/* no mask_intr */
399 	empty_mask_intr,	/* no unmask_intr */
400 	dma8237_dma_start,
401 	dma8237_dma_stop,
402 	dma8237_getcnt,
403 	&i8237_dma_attr
404 };
405 
406 #if defined(__x86)
407 static ddi_dma_attr_t sb_dma_attr = {
408 	DMA_ATTR_VERSION,	/* version */
409 	0x00000000ull,		/* dlim_addr_lo */
410 	0xffffff,		/* dlim_addr_hi */
411 	0xffff,			/* DMA counter register */
412 	1,			/* DMA address alignment */
413 	0x01,			/* burst sizes */
414 	0x0001,			/* min effective DMA size */
415 	0xffffffff,		/* maximum transfer size */
416 	0xffff,			/* segment boundary */
417 	1,			/* s/g list length */
418 	1,			/* granularity of device */
419 	0			/* DMA flags */
420 };
421 
422 static struct ecpp_hw x86 = {
423 	x86_map_regs,
424 	x86_unmap_regs,
425 	x86_config_chip,
426 	empty_config_mode,	/* no config_mode */
427 	empty_mask_intr,	/* no mask_intr */
428 	empty_mask_intr,	/* no unmask_intr */
429 	x86_dma_start,
430 	x86_dma_stop,
431 	x86_getcnt,
432 	&sb_dma_attr
433 };
434 #endif
435 
436 /*
437  * list of supported devices
438  */
439 struct ecpp_hw_bind ecpp_hw_bind[] = {
440 	{ "ns87317-ecpp",	&pc97317,	"PC97317" },
441 	{ "pnpALI,1533,3",	&m1553,		"M1553" },
442 	{ "ecpp",		&pc87332,	"PC87332" },
443 #if defined(__x86)
444 	{ "lp",			&x86,		"i86pc"},
445 #endif
446 };
447 
448 static ddi_device_acc_attr_t acc_attr = {
449 	DDI_DEVICE_ATTR_V0,
450 	DDI_STRUCTURE_LE_ACC,
451 	DDI_STRICTORDER_ACC
452 };
453 
454 static struct ecpp_transfer_parms default_xfer_parms = {
455 	FWD_TIMEOUT_DEFAULT,	/* write timeout in seconds */
456 	ECPP_CENTRONICS		/* supported mode */
457 };
458 
459 /* prnio interface info string */
460 static const char prn_ifinfo[] = PRN_PARALLEL;
461 
462 /* prnio timeouts */
463 static const struct prn_timeouts prn_timeouts_default = {
464 	FWD_TIMEOUT_DEFAULT,	/* forward timeout */
465 	REV_TIMEOUT_DEFAULT	/* reverse timeout */
466 };
467 
468 static int ecpp_isr_max_delay = ECPP_ISR_MAX_DELAY;
469 static int ecpp_def_timeout = 90;  /* left in for 2.7 compatibility */
470 
471 static void    *ecppsoft_statep;
472 
473 /*
474  * STREAMS framework manages locks for these structures
475  */
476 _NOTE(SCHEME_PROTECTS_DATA("unique per call", iocblk))
477 _NOTE(SCHEME_PROTECTS_DATA("unique per call", datab))
478 _NOTE(SCHEME_PROTECTS_DATA("unique per call", msgb))
479 _NOTE(SCHEME_PROTECTS_DATA("unique per call", queue))
480 _NOTE(SCHEME_PROTECTS_DATA("unique per call", copyreq))
481 _NOTE(SCHEME_PROTECTS_DATA("unique per call", stroptions))
482 
483 struct module_info ecppinfo = {
484 	/* id, name, min pkt siz, max pkt siz, hi water, low water */
485 	42, "ecpp", 0, IO_BLOCK_SZ, ECPPHIWAT, ECPPLOWAT
486 };
487 
488 static struct qinit ecpp_rinit = {
489 	putq, ecpp_rsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
490 };
491 
492 static struct qinit ecpp_wint = {
493 	ecpp_wput, ecpp_wsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
494 };
495 
496 struct streamtab ecpp_str_info = {
497 	&ecpp_rinit, &ecpp_wint, NULL, NULL
498 };
499 
500 static struct cb_ops ecpp_cb_ops = {
501 	nodev,			/* cb_open */
502 	nodev,			/* cb_close */
503 	nodev,			/* cb_strategy */
504 	nodev,			/* cb_print */
505 	nodev,			/* cb_dump */
506 	nodev,			/* cb_read */
507 	nodev,			/* cb_write */
508 	nodev,			/* cb_ioctl */
509 	nodev,			/* cb_devmap */
510 	nodev,			/* cb_mmap */
511 	nodev,			/* cb_segmap */
512 	nochpoll,		/* cb_chpoll */
513 	ddi_prop_op,		/* cb_prop_op */
514 	&ecpp_str_info,		/* cb_stream */
515 	(D_NEW | D_MP | D_MTPERQ)	/* cb_flag */
516 };
517 
518 /*
519  * Declare ops vectors for auto configuration.
520  */
521 struct dev_ops  ecpp_ops = {
522 	DEVO_REV,		/* devo_rev */
523 	0,			/* devo_refcnt */
524 	ecpp_getinfo,		/* devo_getinfo */
525 	nulldev,		/* devo_identify */
526 	nulldev,		/* devo_probe */
527 	ecpp_attach,		/* devo_attach */
528 	ecpp_detach,		/* devo_detach */
529 	nodev,			/* devo_reset */
530 	&ecpp_cb_ops,		/* devo_cb_ops */
531 	(struct bus_ops *)NULL,	/* devo_bus_ops */
532 	nulldev			/* devo_power */
533 };
534 
535 extern struct mod_ops mod_driverops;
536 
537 static struct modldrv ecppmodldrv = {
538 	&mod_driverops,		/* type of module - driver */
539 	"parallel port driver %I%",
540 	&ecpp_ops,
541 };
542 
543 static struct modlinkage ecppmodlinkage = {
544 	MODREV_1,
545 	&ecppmodldrv,
546 	0
547 };
548 
549 
550 /*
551  *
552  * DDI/DKI entry points and supplementary routines
553  *
554  */
555 
556 
557 int
558 _init(void)
559 {
560 	int    error;
561 
562 	if ((error = mod_install(&ecppmodlinkage)) == 0) {
563 		(void) ddi_soft_state_init(&ecppsoft_statep,
564 		    sizeof (struct ecppunit), 1);
565 	}
566 
567 	return (error);
568 }
569 
570 int
571 _fini(void)
572 {
573 	int    error;
574 
575 	if ((error = mod_remove(&ecppmodlinkage)) == 0) {
576 		ddi_soft_state_fini(&ecppsoft_statep);
577 	}
578 
579 	return (error);
580 }
581 
582 int
583 _info(struct modinfo *modinfop)
584 {
585 	return (mod_info(&ecppmodlinkage, modinfop));
586 }
587 
588 static int
589 ecpp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
590 {
591 	int			instance;
592 	char			name[16];
593 	struct ecppunit		*pp;
594 	struct ecpp_hw_bind	*hw_bind;
595 
596 	instance = ddi_get_instance(dip);
597 
598 	switch (cmd) {
599 	case DDI_ATTACH:
600 		break;
601 
602 	case DDI_RESUME:
603 		if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
604 			return (DDI_FAILURE);
605 		}
606 
607 		mutex_enter(&pp->umutex);
608 
609 		pp->suspended = FALSE;
610 
611 		/*
612 		 * Initialize the chip and restore current mode if needed
613 		 */
614 		(void) ECPP_CONFIG_CHIP(pp);
615 		(void) ecpp_reset_port_regs(pp);
616 
617 		if (pp->oflag == TRUE) {
618 			int current_mode = pp->current_mode;
619 
620 			(void) ecpp_1284_termination(pp);
621 			(void) ecpp_mode_negotiation(pp, current_mode);
622 		}
623 
624 		mutex_exit(&pp->umutex);
625 
626 		return (DDI_SUCCESS);
627 
628 	default:
629 		return (DDI_FAILURE);
630 	}
631 
632 	if (ddi_soft_state_zalloc(ecppsoft_statep, instance) != 0) {
633 		ecpp_error(dip, "ddi_soft_state_zalloc failed\n");
634 		goto fail;
635 	}
636 
637 	pp = ddi_get_soft_state(ecppsoft_statep, instance);
638 
639 	pp->dip = dip;
640 	pp->suspended = FALSE;
641 
642 	/*
643 	 * Determine SuperIO type and set chip-dependent variables
644 	 */
645 	hw_bind = ecpp_determine_sio_type(pp);
646 
647 	if (hw_bind == NULL) {
648 		cmn_err(CE_NOTE, "parallel port controller not supported");
649 		goto fail_sio;
650 	} else {
651 		pp->hw = hw_bind->hw;
652 		ecpp_error(pp->dip, "SuperIO type: %s\n", hw_bind->info);
653 	}
654 
655 	/*
656 	 * Map registers
657 	 */
658 	if (ECPP_MAP_REGS(pp) != SUCCESS) {
659 		goto fail_map;
660 	}
661 
662 	if (ddi_dma_alloc_handle(dip, pp->hw->attr, DDI_DMA_DONTWAIT,
663 	    NULL, &pp->dma_handle) != DDI_SUCCESS) {
664 		ecpp_error(dip, "ecpp_attach: failed ddi_dma_alloc_handle\n");
665 		goto fail_dma;
666 	}
667 
668 	if (ddi_get_iblock_cookie(dip, 0,
669 	    &pp->ecpp_trap_cookie) != DDI_SUCCESS) {
670 		ecpp_error(dip, "ecpp_attach: failed ddi_get_iblock_cookie\n");
671 		goto fail_ibc;
672 	}
673 
674 	mutex_init(&pp->umutex, NULL, MUTEX_DRIVER,
675 						(void *)pp->ecpp_trap_cookie);
676 
677 	cv_init(&pp->pport_cv, NULL, CV_DRIVER, NULL);
678 
679 	if (ddi_add_intr(dip, 0, &pp->ecpp_trap_cookie, NULL, ecpp_isr,
680 	    (caddr_t)pp) != DDI_SUCCESS) {
681 		ecpp_error(dip, "ecpp_attach: failed to add hard intr\n");
682 		goto fail_intr;
683 	}
684 
685 	if (ddi_add_softintr(dip, DDI_SOFTINT_LOW,
686 	    &pp->softintr_id, 0, 0, ecpp_softintr,
687 	    (caddr_t)pp) != DDI_SUCCESS) {
688 		ecpp_error(dip, "ecpp_attach: failed to add soft intr\n");
689 		goto fail_softintr;
690 	}
691 
692 	(void) sprintf(name, "ecpp%d", instance);
693 
694 	if (ddi_create_minor_node(dip, name, S_IFCHR, instance,
695 	    DDI_NT_PRINTER, NULL) == DDI_FAILURE) {
696 		ecpp_error(dip, "ecpp_attach: create_minor_node failed\n");
697 		goto fail_minor;
698 	}
699 
700 	pp->ioblock = (caddr_t)kmem_alloc(IO_BLOCK_SZ, KM_SLEEP);
701 	if (pp->ioblock == NULL) {
702 		ecpp_error(dip, "ecpp_attach: kmem_alloc failed\n");
703 		goto fail_iob;
704 	} else {
705 		ecpp_error(pp->dip, "ecpp_attach: ioblock=0x%x\n", pp->ioblock);
706 	}
707 
708 	ecpp_get_props(pp);
709 #if defined(__x86)
710 	if (pp->hw == &x86 && pp->uh.x86.chn != 0xff) {
711 		if (ddi_dmae_alloc(dip, pp->uh.x86.chn,
712 		    DDI_DMA_DONTWAIT, NULL) == DDI_SUCCESS)
713 			ecpp_error(pp->dip, "dmae_alloc success!\n");
714 	}
715 #endif
716 	if (ECPP_CONFIG_CHIP(pp) == FAILURE) {
717 		ecpp_error(pp->dip, "config_chip failed.\n");
718 		goto fail_config;
719 	}
720 
721 	ecpp_kstat_init(pp);
722 
723 	ddi_report_dev(dip);
724 
725 	return (DDI_SUCCESS);
726 
727 fail_config:
728 	ddi_prop_remove_all(dip);
729 	kmem_free(pp->ioblock, IO_BLOCK_SZ);
730 fail_iob:
731 	ddi_remove_minor_node(dip, NULL);
732 fail_minor:
733 	ddi_remove_softintr(pp->softintr_id);
734 fail_softintr:
735 	ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
736 fail_intr:
737 	mutex_destroy(&pp->umutex);
738 	cv_destroy(&pp->pport_cv);
739 fail_ibc:
740 	ddi_dma_free_handle(&pp->dma_handle);
741 fail_dma:
742 	ECPP_UNMAP_REGS(pp);
743 fail_map:
744 fail_sio:
745 	ddi_soft_state_free(ecppsoft_statep, instance);
746 fail:
747 	ecpp_error(dip, "ecpp_attach: failed.\n");
748 
749 	return (DDI_FAILURE);
750 }
751 
752 static int
753 ecpp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
754 {
755 	int		instance;
756 	struct ecppunit *pp;
757 
758 	instance = ddi_get_instance(dip);
759 
760 	switch (cmd) {
761 	case DDI_DETACH:
762 		break;
763 
764 	case DDI_SUSPEND:
765 		if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
766 			return (DDI_FAILURE);
767 		}
768 
769 		mutex_enter(&pp->umutex);
770 		ASSERT(pp->suspended == FALSE);
771 
772 		pp->suspended = TRUE;	/* prevent new transfers */
773 
774 		/*
775 		 * Wait if there's any activity on the port
776 		 */
777 		if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) {
778 			(void) cv_timedwait(&pp->pport_cv, &pp->umutex,
779 			    ddi_get_lbolt() +
780 			    SUSPEND_TOUT * drv_usectohz(1000000));
781 			if ((pp->e_busy == ECPP_BUSY) ||
782 			    (pp->e_busy == ECPP_FLUSH)) {
783 				pp->suspended = FALSE;
784 				mutex_exit(&pp->umutex);
785 				ecpp_error(pp->dip,
786 					"ecpp_detach: suspend timeout\n");
787 				return (DDI_FAILURE);
788 			}
789 		}
790 
791 		mutex_exit(&pp->umutex);
792 		return (DDI_SUCCESS);
793 
794 	default:
795 		return (DDI_FAILURE);
796 	}
797 
798 	pp = ddi_get_soft_state(ecppsoft_statep, instance);
799 #if defined(__x86)
800 	if (pp->hw == &x86 && pp->uh.x86.chn != 0xff)
801 		(void) ddi_dmae_release(pp->dip, pp->uh.x86.chn);
802 #endif
803 	if (pp->dma_handle != NULL)
804 		ddi_dma_free_handle(&pp->dma_handle);
805 
806 	ddi_remove_minor_node(dip, NULL);
807 
808 	ddi_remove_softintr(pp->softintr_id);
809 
810 	ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
811 
812 	if (pp->ksp) {
813 		kstat_delete(pp->ksp);
814 	}
815 	if (pp->intrstats) {
816 		kstat_delete(pp->intrstats);
817 	}
818 
819 	cv_destroy(&pp->pport_cv);
820 
821 	mutex_destroy(&pp->umutex);
822 
823 	ECPP_UNMAP_REGS(pp);
824 
825 	kmem_free(pp->ioblock, IO_BLOCK_SZ);
826 
827 	ddi_prop_remove_all(dip);
828 
829 	ddi_soft_state_free(ecppsoft_statep, instance);
830 
831 	return (DDI_SUCCESS);
832 
833 }
834 
835 /*
836  * ecpp_get_props() reads ecpp.conf for user defineable tuneables.
837  * If the file or a particular variable is not there, a default value
838  * is assigned.
839  */
840 
841 static void
842 ecpp_get_props(struct ecppunit *pp)
843 {
844 	char	*prop;
845 #if defined(__x86)
846 	int	len;
847 	int	value;
848 #endif
849 	/*
850 	 * If fast_centronics is TRUE, non-compliant IEEE 1284
851 	 * peripherals ( Centronics peripherals) will operate in DMA mode.
852 	 * Transfers betwee main memory and the device will be via DMA;
853 	 * peripheral handshaking will be conducted by superio logic.
854 	 * If ecpp can not read the variable correctly fast_centronics will
855 	 * be set to FALSE.  In this case, transfers and handshaking
856 	 * will be conducted by PIO for Centronics devices.
857 	 */
858 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
859 		"fast-centronics", &prop) == DDI_PROP_SUCCESS) {
860 		pp->fast_centronics =
861 				(strcmp(prop, "true") == 0) ? TRUE : FALSE;
862 		ddi_prop_free(prop);
863 	} else {
864 		pp->fast_centronics = FALSE;
865 	}
866 
867 	/*
868 	 * If fast-1284-compatible is set to TRUE, when ecpp communicates
869 	 * with IEEE 1284 compliant peripherals, data transfers between
870 	 * main memory and the parallel port will be conducted by DMA.
871 	 * Handshaking between the port and peripheral will be conducted
872 	 * by superio logic.  This is the default characteristic.  If
873 	 * fast-1284-compatible is set to FALSE, transfers and handshaking
874 	 * will be conducted by PIO.
875 	 */
876 
877 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
878 		"fast-1284-compatible", &prop) == DDI_PROP_SUCCESS) {
879 		pp->fast_compat = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
880 		ddi_prop_free(prop);
881 	} else {
882 		pp->fast_compat = TRUE;
883 	}
884 
885 	/*
886 	 * Some centronics peripherals require the nInit signal to be
887 	 * toggled to reset the device.  If centronics_init_seq is set
888 	 * to TRUE, ecpp will toggle the nInit signal upon every ecpp_open().
889 	 * Applications have the opportunity to toggle the nInit signal
890 	 * with ioctl(2) calls as well.  The default is to set it to FALSE.
891 	 */
892 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
893 		"centronics-init-seq", &prop) == DDI_PROP_SUCCESS) {
894 		pp->init_seq = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
895 		ddi_prop_free(prop);
896 	} else {
897 		pp->init_seq = FALSE;
898 	}
899 
900 	/*
901 	 * If one of the centronics status signals are in an erroneous
902 	 * state, ecpp_wsrv() will be reinvoked centronics-retry ms to
903 	 * check if the status is ok to transfer.  If the property is not
904 	 * found, wsrv_retry will be set to CENTRONICS_RETRY ms.
905 	 */
906 	pp->wsrv_retry = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
907 			"centronics-retry", CENTRONICS_RETRY);
908 
909 	/*
910 	 * In PIO mode, ecpp_isr() will loop for wait for the busy signal
911 	 * to be deasserted before transferring the next byte. wait_for_busy
912 	 * is specificied in microseconds.  If the property is not found
913 	 * ecpp_isr() will wait for a maximum of WAIT_FOR_BUSY us.
914 	 */
915 	pp->wait_for_busy = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
916 			"centronics-wait-for-busy", WAIT_FOR_BUSY);
917 
918 	/*
919 	 * In PIO mode, centronics transfers must hold the data signals
920 	 * for a data_setup_time milliseconds before the strobe is asserted.
921 	 */
922 	pp->data_setup_time = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
923 			"centronics-data-setup-time", DATA_SETUP_TIME);
924 
925 	/*
926 	 * In PIO mode, centronics transfers asserts the strobe signal
927 	 * for a period of strobe_pulse_width milliseconds.
928 	 */
929 	pp->strobe_pulse_width = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
930 			"centronics-strobe-pulse-width", STROBE_PULSE_WIDTH);
931 
932 	/*
933 	 * Upon a transfer the peripheral, ecpp waits write_timeout seconds
934 	 * for the transmission to complete.
935 	 */
936 	default_xfer_parms.write_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
937 		pp->dip, 0, "ecpp-transfer-timeout", ecpp_def_timeout);
938 
939 	pp->xfer_parms = default_xfer_parms;
940 
941 	/*
942 	 * Get dma channel for M1553
943 	 */
944 	if (pp->hw == &m1553) {
945 		pp->uh.m1553.chn = ddi_prop_get_int(DDI_DEV_T_ANY,
946 			pp->dip, 0, "dma-channel", 0x1);
947 		ecpp_error(pp->dip, "ecpp_get_prop:chn=%x\n", pp->uh.m1553.chn);
948 	}
949 #if defined(__x86)
950 	len = sizeof (value);
951 	/* Get dma channel for i86 pc */
952 	if (pp->hw == &x86) {
953 		if (ddi_prop_op(DDI_DEV_T_ANY, pp->dip, PROP_LEN_AND_VAL_BUF,
954 		    DDI_PROP_DONTPASS, "dma-channels", (caddr_t)&value, &len)
955 		    != DDI_PROP_SUCCESS) {
956 			ecpp_error(pp->dip, "No dma channel found\n");
957 			pp->uh.x86.chn = 0xff;
958 			pp->fast_compat = FALSE;
959 			pp->noecpregs = TRUE;
960 		} else
961 			pp->uh.x86.chn = (uint8_t)value;
962 	}
963 #endif
964 	/*
965 	 * these properties are not yet public
966 	 */
967 	pp->ecp_rev_speed = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
968 			"ecp-rev-speed", ECP_REV_SPEED);
969 
970 	pp->rev_watchdog = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
971 			"rev-watchdog", REV_WATCHDOG);
972 
973 	ecpp_error(pp->dip,
974 		"ecpp_get_prop: fast_centronics=%x, fast-1284=%x\n"
975 		"ecpp_get_prop: wsrv_retry=%d, wait_for_busy=%d\n"
976 		"ecpp_get_prop: data_setup=%d, strobe_pulse=%d\n"
977 		"ecpp_get_prop: transfer-timeout=%d\n",
978 		pp->fast_centronics, pp->fast_compat,
979 		pp->wsrv_retry, pp->wait_for_busy,
980 		pp->data_setup_time, pp->strobe_pulse_width,
981 		pp->xfer_parms.write_timeout);
982 }
983 
984 /*ARGSUSED*/
985 int
986 ecpp_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
987 {
988 	dev_t	dev = (dev_t)arg;
989 	struct ecppunit *pp;
990 	int	instance, ret;
991 
992 	instance = getminor(dev);
993 
994 	switch (infocmd) {
995 	case DDI_INFO_DEVT2DEVINFO:
996 		pp = ddi_get_soft_state(ecppsoft_statep, instance);
997 		if (pp != NULL) {
998 			*result = pp->dip;
999 			ret = DDI_SUCCESS;
1000 		} else {
1001 			ret = DDI_FAILURE;
1002 		}
1003 		break;
1004 
1005 	case DDI_INFO_DEVT2INSTANCE:
1006 		*result = (void *)(uintptr_t)instance;
1007 		ret = DDI_SUCCESS;
1008 		break;
1009 
1010 	default:
1011 		ret = DDI_FAILURE;
1012 		break;
1013 	}
1014 
1015 	return (ret);
1016 }
1017 
1018 /*ARGSUSED2*/
1019 static int
1020 ecpp_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *credp)
1021 {
1022 	struct ecppunit *pp;
1023 	int		instance;
1024 	struct stroptions *sop;
1025 	mblk_t		*mop;
1026 
1027 	instance = getminor(*dev);
1028 
1029 	if (instance < 0) {
1030 		return (ENXIO);
1031 	}
1032 
1033 	pp = (struct ecppunit *)ddi_get_soft_state(ecppsoft_statep, instance);
1034 
1035 	if (pp == NULL) {
1036 		return (ENXIO);
1037 	}
1038 
1039 	mutex_enter(&pp->umutex);
1040 
1041 	/*
1042 	 * Parallel port is an exclusive-use device
1043 	 * thus providing print job integrity
1044 	 */
1045 	if (pp->oflag == TRUE) {
1046 		ecpp_error(pp->dip, "ecpp open failed");
1047 		mutex_exit(&pp->umutex);
1048 		return (EBUSY);
1049 	}
1050 
1051 	pp->oflag = TRUE;
1052 
1053 	/* initialize state variables */
1054 	pp->prn_timeouts = prn_timeouts_default;
1055 	pp->xfer_parms = default_xfer_parms;
1056 	pp->current_mode = ECPP_CENTRONICS;
1057 	pp->backchannel = ECPP_CENTRONICS;
1058 	pp->current_phase = ECPP_PHASE_PO;
1059 	pp->port = ECPP_PORT_DMA;
1060 	pp->instance = instance;
1061 	pp->timeout_error = 0;
1062 	pp->saved_dsr = DSR_READ(pp);
1063 	pp->ecpp_drain_counter = 0;
1064 	pp->dma_cancelled = FALSE;
1065 	pp->io_mode = ECPP_DMA;
1066 	pp->joblen = 0;
1067 	pp->tfifo_intr = 0;
1068 	pp->softintr_pending = 0;
1069 	pp->nread = 0;
1070 
1071 	/* clear the state flag */
1072 	pp->e_busy = ECPP_IDLE;
1073 
1074 	pp->readq = RD(q);
1075 	pp->writeq = WR(q);
1076 	pp->msg = NULL;
1077 
1078 	RD(q)->q_ptr = WR(q)->q_ptr = (caddr_t)pp;
1079 
1080 	/*
1081 	 * Get ready: check host/peripheral, negotiate into default mode
1082 	 */
1083 	if (ecpp_reset_port_regs(pp) == FAILURE) {
1084 		mutex_exit(&pp->umutex);
1085 		return (EIO);
1086 	}
1087 
1088 	mutex_exit(&pp->umutex);
1089 
1090 	/*
1091 	 * Configure the Stream head and enable the Stream
1092 	 */
1093 	if (!(mop = allocb(sizeof (struct stroptions), BPRI_MED))) {
1094 		return (EAGAIN);
1095 	}
1096 
1097 	mop->b_datap->db_type = M_SETOPTS;
1098 	mop->b_wptr += sizeof (struct stroptions);
1099 
1100 	/*
1101 	 * if device is open with O_NONBLOCK flag set, let read(2) return 0
1102 	 * if no data waiting to be read.  Writes will block on flow control.
1103 	 */
1104 	sop = (struct stroptions *)mop->b_rptr;
1105 	sop->so_flags = SO_HIWAT | SO_LOWAT | SO_NDELON | SO_MREADON;
1106 	sop->so_hiwat = ECPPHIWAT;
1107 	sop->so_lowat = ECPPLOWAT;
1108 
1109 	/* enable the stream */
1110 	qprocson(q);
1111 
1112 	putnext(q, mop);
1113 
1114 	mutex_enter(&pp->umutex);
1115 
1116 	ecpp_default_negotiation(pp);
1117 
1118 	/* go revidle */
1119 	(void) ecpp_idle_phase(pp);
1120 
1121 	ecpp_error(pp->dip,
1122 		"ecpp_open: mode=%x, phase=%x ecr=%x, dsr=%x, dcr=%x\n",
1123 		pp->current_mode, pp->current_phase,
1124 		ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
1125 
1126 	mutex_exit(&pp->umutex);
1127 
1128 	return (0);
1129 }
1130 
1131 /*ARGSUSED1*/
1132 static int
1133 ecpp_close(queue_t *q, int flag, cred_t *cred_p)
1134 {
1135 	struct ecppunit *pp;
1136 	timeout_id_t	timeout_id, fifo_timer_id, wsrv_timer_id;
1137 
1138 	pp = (struct ecppunit *)q->q_ptr;
1139 
1140 	ecpp_error(pp->dip, "ecpp_close: entering ...\n");
1141 
1142 	mutex_enter(&pp->umutex);
1143 
1144 	/*
1145 	 * ecpp_close() will continue to loop until the
1146 	 * queue has been drained or if the thread
1147 	 * has received a SIG.  Typically, when the queue
1148 	 * has data, the port will be ECPP_BUSY.  However,
1149 	 * after a dma completes and before the wsrv
1150 	 * starts the next transfer, the port may be IDLE.
1151 	 * In this case, ecpp_close() will loop within this
1152 	 * while(qsize) segment.  Since, ecpp_wsrv() runs
1153 	 * at software interupt level, this shouldn't loop
1154 	 * very long.
1155 	 */
1156 	while (pp->e_busy != ECPP_IDLE || qsize(WR(q))) {
1157 		if (!cv_wait_sig(&pp->pport_cv, &pp->umutex)) {
1158 			ecpp_error(pp->dip, "ecpp_close:B: received SIG\n");
1159 			/*
1160 			 * Returning from a signal such as
1161 			 * SIGTERM or SIGKILL
1162 			 */
1163 			ecpp_flush(pp, FWRITE);
1164 			break;
1165 		} else {
1166 			ecpp_error(pp->dip, "ecpp_close:rcvd cv-sig\n");
1167 		}
1168 	}
1169 
1170 	ecpp_error(pp->dip, "ecpp_close: joblen=%d, ctx_cf=%d, "
1171 			"qsize(WR(q))=%d, qsize(RD(q))=%d\n",
1172 			pp->joblen, pp->ctx_cf, qsize(pp->writeq), qsize(q));
1173 
1174 	/*
1175 	 * Cancel all timeouts, disable interrupts
1176 	 *
1177 	 * Note that we can`t call untimeout(9F) with mutex held:
1178 	 * callout may be blocked on the same mutex, and untimeout() will
1179 	 * cv_wait() while callout is executing, thus creating a deadlock
1180 	 * So we zero the timeout id's inside mutex and call untimeout later
1181 	 */
1182 	timeout_id = pp->timeout_id;
1183 	fifo_timer_id = pp->fifo_timer_id;
1184 	wsrv_timer_id = pp->wsrv_timer_id;
1185 
1186 	pp->timeout_id = pp->fifo_timer_id = pp->wsrv_timer_id = 0;
1187 
1188 	pp->softintr_pending = 0;
1189 	pp->dma_cancelled = TRUE;
1190 	ECPP_MASK_INTR(pp);
1191 
1192 	mutex_exit(&pp->umutex);
1193 
1194 	qprocsoff(q);
1195 
1196 	if (timeout_id) {
1197 		(void) untimeout(timeout_id);
1198 	}
1199 	if (fifo_timer_id) {
1200 		(void) untimeout(fifo_timer_id);
1201 	}
1202 	if (wsrv_timer_id) {
1203 		(void) untimeout(wsrv_timer_id);
1204 	}
1205 
1206 	mutex_enter(&pp->umutex);
1207 
1208 	/* set link to Compatible mode */
1209 	if ((pp->current_mode == ECPP_ECP_MODE) &&
1210 	    (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) {
1211 		(void) ecp_reverse2forward(pp);
1212 	}
1213 
1214 	(void) ecpp_1284_termination(pp);
1215 
1216 	pp->oflag = FALSE;
1217 	q->q_ptr = WR(q)->q_ptr = NULL;
1218 	pp->readq = pp->writeq = NULL;
1219 	pp->msg = NULL;
1220 
1221 	ecpp_error(pp->dip, "ecpp_close: ecr=%x, dsr=%x, dcr=%x\n",
1222 		ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
1223 
1224 	mutex_exit(&pp->umutex);
1225 
1226 	return (0);
1227 }
1228 
1229 /*
1230  * standard put procedure for ecpp
1231  */
1232 static int
1233 ecpp_wput(queue_t *q, mblk_t *mp)
1234 {
1235 	struct msgb *nmp;
1236 	struct ecppunit *pp;
1237 
1238 	pp = (struct ecppunit *)q->q_ptr;
1239 
1240 	if (!mp) {
1241 		return (0);
1242 	}
1243 
1244 	if ((mp->b_wptr - mp->b_rptr) <= 0) {
1245 		ecpp_error(pp->dip,
1246 			"ecpp_wput:bogus packet recieved mp=%x\n", mp);
1247 		freemsg(mp);
1248 		return (0);
1249 	}
1250 
1251 	switch (DB_TYPE(mp)) {
1252 	case M_DATA:
1253 		/*
1254 		 * This is a quick fix for multiple message block problem,
1255 		 * it will be changed later with better performance code.
1256 		 */
1257 		if (mp->b_cont) {
1258 			/*
1259 			 * mblk has scattered data ... do msgpullup
1260 			 * if it fails, continue with the current mblk
1261 			 */
1262 			if ((nmp = msgpullup(mp, -1)) != NULL) {
1263 				freemsg(mp);
1264 				mp = nmp;
1265 				ecpp_error(pp->dip,
1266 				    "ecpp_wput:msgpullup: mp=%p len=%d\n",
1267 				    mp, mp->b_wptr - mp->b_rptr);
1268 			}
1269 		}
1270 
1271 		/* let ecpp_wsrv() concatenate small blocks */
1272 		(void) putq(q, mp);
1273 
1274 		break;
1275 
1276 	case M_CTL:
1277 		(void) putq(q, mp);
1278 
1279 		break;
1280 
1281 	case M_IOCTL: {
1282 		struct iocblk *iocbp;
1283 
1284 		iocbp = (struct iocblk *)mp->b_rptr;
1285 
1286 		ecpp_error(pp->dip, "ecpp_wput:M_IOCTL %x\n", iocbp->ioc_cmd);
1287 
1288 		mutex_enter(&pp->umutex);
1289 
1290 		/* TESTIO and GET_STATUS can be used during transfer */
1291 		if ((pp->e_busy == ECPP_BUSY) &&
1292 		    (iocbp->ioc_cmd != BPPIOC_TESTIO) &&
1293 		    (iocbp->ioc_cmd != PRNIOC_GET_STATUS)) {
1294 			mutex_exit(&pp->umutex);
1295 			(void) putq(q, mp);
1296 		} else {
1297 			mutex_exit(&pp->umutex);
1298 			ecpp_putioc(q, mp);
1299 		}
1300 
1301 		break;
1302 	}
1303 
1304 	case M_IOCDATA: {
1305 		struct copyresp *csp;
1306 
1307 		ecpp_error(pp->dip, "ecpp_wput:M_IOCDATA\n");
1308 
1309 		csp = (struct copyresp *)mp->b_rptr;
1310 
1311 		/*
1312 		 * If copy request failed, quit now
1313 		 */
1314 		if (csp->cp_rval != 0) {
1315 			freemsg(mp);
1316 			return (0);
1317 		}
1318 
1319 		switch (csp->cp_cmd) {
1320 		case ECPPIOC_SETPARMS:
1321 		case ECPPIOC_SETREGS:
1322 		case ECPPIOC_SETPORT:
1323 		case ECPPIOC_SETDATA:
1324 		case PRNIOC_SET_IFCAP:
1325 		case PRNIOC_SET_TIMEOUTS:
1326 			/*
1327 			 * need to retrieve and use the data, but if the
1328 			 * device is busy, wait.
1329 			 */
1330 			(void) putq(q, mp);
1331 			break;
1332 
1333 		case ECPPIOC_GETPARMS:
1334 		case ECPPIOC_GETREGS:
1335 		case ECPPIOC_GETPORT:
1336 		case ECPPIOC_GETDATA:
1337 		case BPPIOC_GETERR:
1338 		case BPPIOC_TESTIO:
1339 		case PRNIOC_GET_IFCAP:
1340 		case PRNIOC_GET_STATUS:
1341 		case PRNIOC_GET_1284_STATUS:
1342 		case PRNIOC_GET_TIMEOUTS:
1343 			/* data transfered to user space okay */
1344 			ecpp_ack_ioctl(q, mp);
1345 			break;
1346 
1347 		case ECPPIOC_GETDEVID:
1348 			ecpp_wput_iocdata_devid(q, mp,
1349 				offsetof(struct ecpp_device_id, rlen));
1350 			break;
1351 
1352 		case PRNIOC_GET_1284_DEVID:
1353 			ecpp_wput_iocdata_devid(q, mp,
1354 				offsetof(struct prn_1284_device_id, id_rlen));
1355 			break;
1356 
1357 		case PRNIOC_GET_IFINFO:
1358 			ecpp_wput_iocdata_devid(q, mp,
1359 				offsetof(struct prn_interface_info, if_rlen));
1360 			break;
1361 
1362 		default:
1363 			ecpp_nack_ioctl(q, mp, EINVAL);
1364 			break;
1365 		}
1366 
1367 		break;
1368 	}
1369 
1370 	case M_FLUSH:
1371 		ecpp_error(pp->dip, "ecpp_wput:M_FLUSH\n");
1372 
1373 		if (*mp->b_rptr & FLUSHW) {
1374 			mutex_enter(&pp->umutex);
1375 			ecpp_flush(pp, FWRITE);
1376 			mutex_exit(&pp->umutex);
1377 		}
1378 
1379 		if (*mp->b_rptr & FLUSHR) {
1380 			mutex_enter(&pp->umutex);
1381 			ecpp_flush(pp, FREAD);
1382 			mutex_exit(&pp->umutex);
1383 			qreply(q, mp);
1384 		} else {
1385 			freemsg(mp);
1386 		}
1387 
1388 		break;
1389 
1390 	case M_READ:
1391 		/*
1392 		 * When the user calls read(2), M_READ message is sent to us,
1393 		 * first byte of which is the number of requested bytes
1394 		 * We add up user requests and use resulting number
1395 		 * to calculate the reverse transfer block size
1396 		 */
1397 		mutex_enter(&pp->umutex);
1398 		if (pp->e_busy == ECPP_IDLE) {
1399 			pp->nread += *(size_t *)mp->b_rptr;
1400 			ecpp_error(pp->dip, "ecpp_wput: M_READ %d", pp->nread);
1401 			freemsg(mp);
1402 		} else {
1403 			ecpp_error(pp->dip, "ecpp_wput: M_READ queueing");
1404 			(void) putq(q, mp);
1405 		}
1406 		mutex_exit(&pp->umutex);
1407 		break;
1408 
1409 	default:
1410 		ecpp_error(pp->dip, "ecpp_wput: bad messagetype 0x%x\n",
1411 		    DB_TYPE(mp));
1412 		freemsg(mp);
1413 		break;
1414 	}
1415 
1416 	return (0);
1417 }
1418 
1419 /*
1420  * Process ECPPIOC_GETDEVID-like ioctls
1421  */
1422 static void
1423 ecpp_wput_iocdata_devid(queue_t *q, mblk_t *mp, uintptr_t rlen_offset)
1424 {
1425 	struct copyresp		*csp;
1426 	struct ecpp_copystate	*stp;
1427 	mblk_t			*datamp;
1428 
1429 	csp = (struct copyresp *)mp->b_rptr;
1430 	stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
1431 
1432 	/* determine the state of copyin/copyout process */
1433 	switch (stp->state) {
1434 	case ECPP_STRUCTIN:
1435 		/* user structure has arrived */
1436 		(void) putq(q, mp);
1437 		break;
1438 
1439 	case ECPP_ADDROUT:
1440 		/*
1441 		 * data transfered to user space okay
1442 		 * now update user structure
1443 		 */
1444 		datamp = allocb(sizeof (int), BPRI_MED);
1445 		if (datamp == NULL) {
1446 			ecpp_nack_ioctl(q, mp, ENOSR);
1447 			break;
1448 		}
1449 
1450 		*(int *)datamp->b_rptr =
1451 				*(int *)((char *)&stp->un + rlen_offset);
1452 		stp->state = ECPP_STRUCTOUT;
1453 
1454 		mcopyout(mp, csp->cp_private, sizeof (int),
1455 			(char *)stp->uaddr + rlen_offset, datamp);
1456 		qreply(q, mp);
1457 		break;
1458 
1459 	case ECPP_STRUCTOUT:
1460 		/* user structure was updated okay */
1461 		freemsg(csp->cp_private);
1462 		ecpp_ack_ioctl(q, mp);
1463 		break;
1464 
1465 	default:
1466 		ecpp_nack_ioctl(q, mp, EINVAL);
1467 		break;
1468 	}
1469 }
1470 
1471 static uchar_t
1472 ecpp_get_error_status(uchar_t status)
1473 {
1474 	uchar_t pin_status = 0;
1475 
1476 	if (!(status & ECPP_nERR)) {
1477 		pin_status |= BPP_ERR_ERR;
1478 	}
1479 
1480 	if (status & ECPP_PE) {
1481 		pin_status |= BPP_PE_ERR;
1482 	}
1483 
1484 	if (!(status & ECPP_SLCT)) {
1485 		pin_status |= BPP_SLCT_ERR;
1486 	}
1487 
1488 	if (!(status & ECPP_nBUSY)) {
1489 		pin_status |= BPP_SLCT_ERR;
1490 	}
1491 
1492 	return (pin_status);
1493 }
1494 
1495 /*
1496  * ioctl handler for output PUT procedure.
1497  */
1498 static void
1499 ecpp_putioc(queue_t *q, mblk_t *mp)
1500 {
1501 	struct iocblk	*iocbp;
1502 	struct ecppunit *pp;
1503 
1504 	pp = (struct ecppunit *)q->q_ptr;
1505 
1506 	iocbp = (struct iocblk *)mp->b_rptr;
1507 
1508 	/* I_STR ioctls are invalid */
1509 	if (iocbp->ioc_count != TRANSPARENT) {
1510 		ecpp_nack_ioctl(q, mp, EINVAL);
1511 		return;
1512 	}
1513 
1514 	switch (iocbp->ioc_cmd) {
1515 	case ECPPIOC_SETPARMS: {
1516 		mcopyin(mp, NULL, sizeof (struct ecpp_transfer_parms), NULL);
1517 		qreply(q, mp);
1518 		break;
1519 	}
1520 
1521 	case ECPPIOC_GETPARMS: {
1522 		struct ecpp_transfer_parms xfer_parms;
1523 
1524 		mutex_enter(&pp->umutex);
1525 
1526 		pp->xfer_parms.mode = pp->current_mode;
1527 		xfer_parms = pp->xfer_parms;
1528 
1529 		mutex_exit(&pp->umutex);
1530 
1531 		ecpp_putioc_copyout(q, mp, &xfer_parms, sizeof (xfer_parms));
1532 		break;
1533 	}
1534 
1535 	case ECPPIOC_SETREGS: {
1536 		mutex_enter(&pp->umutex);
1537 		if (pp->current_mode != ECPP_DIAG_MODE) {
1538 			mutex_exit(&pp->umutex);
1539 			ecpp_nack_ioctl(q, mp, EINVAL);
1540 			break;
1541 		}
1542 		mutex_exit(&pp->umutex);
1543 
1544 		mcopyin(mp, NULL, sizeof (struct ecpp_regs), NULL);
1545 		qreply(q, mp);
1546 		break;
1547 	}
1548 
1549 	case ECPPIOC_GETREGS: {
1550 		struct ecpp_regs rg;
1551 
1552 		mutex_enter(&pp->umutex);
1553 
1554 		if (pp->current_mode != ECPP_DIAG_MODE) {
1555 			mutex_exit(&pp->umutex);
1556 			ecpp_nack_ioctl(q, mp, EINVAL);
1557 			break;
1558 		}
1559 
1560 		rg.dsr = DSR_READ(pp);
1561 		rg.dcr = DCR_READ(pp);
1562 
1563 		mutex_exit(&pp->umutex);
1564 
1565 		ecpp_error(pp->dip, "ECPPIOC_GETREGS: dsr=%x,dcr=%x\n",
1566 							rg.dsr, rg.dcr);
1567 
1568 		/* these bits must be 1 */
1569 		rg.dsr |= ECPP_SETREGS_DSR_MASK;
1570 		rg.dcr |= ECPP_SETREGS_DCR_MASK;
1571 
1572 		ecpp_putioc_copyout(q, mp, &rg, sizeof (rg));
1573 		break;
1574 	}
1575 
1576 	case ECPPIOC_SETPORT:
1577 	case ECPPIOC_SETDATA: {
1578 		mutex_enter(&pp->umutex);
1579 		if (pp->current_mode != ECPP_DIAG_MODE) {
1580 			mutex_exit(&pp->umutex);
1581 			ecpp_nack_ioctl(q, mp, EINVAL);
1582 			break;
1583 		}
1584 		mutex_exit(&pp->umutex);
1585 
1586 		/*
1587 		 * each of the commands fetches a byte quantity.
1588 		 */
1589 		mcopyin(mp, NULL, sizeof (uchar_t), NULL);
1590 		qreply(q, mp);
1591 		break;
1592 	}
1593 
1594 	case ECPPIOC_GETDATA:
1595 	case ECPPIOC_GETPORT: {
1596 		uchar_t	byte;
1597 
1598 		mutex_enter(&pp->umutex);
1599 
1600 		/* must be in diagnostic mode for these commands to work */
1601 		if (pp->current_mode != ECPP_DIAG_MODE) {
1602 			mutex_exit(&pp->umutex);
1603 			ecpp_nack_ioctl(q, mp, EINVAL);
1604 			break;
1605 		}
1606 
1607 		if (iocbp->ioc_cmd == ECPPIOC_GETPORT) {
1608 			byte = pp->port;
1609 		} else if (iocbp->ioc_cmd == ECPPIOC_GETDATA) {
1610 			switch (pp->port) {
1611 			case ECPP_PORT_PIO:
1612 				byte = DATAR_READ(pp);
1613 				break;
1614 			case ECPP_PORT_TDMA:
1615 				byte = TFIFO_READ(pp);
1616 				ecpp_error(pp->dip, "GETDATA=0x%x\n", byte);
1617 				break;
1618 			default:
1619 				ecpp_nack_ioctl(q, mp, EINVAL);
1620 				break;
1621 			}
1622 		} else {
1623 			mutex_exit(&pp->umutex);
1624 			ecpp_error(pp->dip, "weird command");
1625 			ecpp_nack_ioctl(q, mp, EINVAL);
1626 			break;
1627 		}
1628 
1629 		mutex_exit(&pp->umutex);
1630 
1631 		ecpp_putioc_copyout(q, mp, &byte, sizeof (byte));
1632 
1633 		break;
1634 	}
1635 
1636 	case BPPIOC_GETERR: {
1637 		struct bpp_error_status bpp_status;
1638 
1639 		mutex_enter(&pp->umutex);
1640 
1641 		bpp_status.timeout_occurred = pp->timeout_error;
1642 		bpp_status.bus_error = 0;	/* not used */
1643 		bpp_status.pin_status = ecpp_get_error_status(pp->saved_dsr);
1644 
1645 		mutex_exit(&pp->umutex);
1646 
1647 		ecpp_putioc_copyout(q, mp, &bpp_status, sizeof (bpp_status));
1648 
1649 		break;
1650 	}
1651 
1652 	case BPPIOC_TESTIO: {
1653 		mutex_enter(&pp->umutex);
1654 
1655 		if (!((pp->current_mode == ECPP_CENTRONICS) ||
1656 				(pp->current_mode == ECPP_COMPAT_MODE))) {
1657 			ecpp_nack_ioctl(q, mp, EINVAL);
1658 		} else {
1659 			pp->saved_dsr = DSR_READ(pp);
1660 
1661 			if ((pp->saved_dsr & ECPP_PE) ||
1662 			    !(pp->saved_dsr & ECPP_SLCT) ||
1663 			    !(pp->saved_dsr & ECPP_nERR)) {
1664 				ecpp_nack_ioctl(q, mp, EIO);
1665 			} else {
1666 				ecpp_ack_ioctl(q, mp);
1667 			}
1668 		}
1669 
1670 		mutex_exit(&pp->umutex);
1671 
1672 		break;
1673 	}
1674 
1675 	case PRNIOC_RESET:
1676 		/*
1677 		 * Initialize interface only if no transfer is in progress
1678 		 */
1679 		mutex_enter(&pp->umutex);
1680 		if (pp->e_busy == ECPP_BUSY) {
1681 			mutex_exit(&pp->umutex);
1682 			ecpp_nack_ioctl(q, mp, EIO);
1683 		} else {
1684 			(void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
1685 
1686 			DCR_WRITE(pp, ECPP_SLCTIN);
1687 			drv_usecwait(2);
1688 			DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
1689 
1690 			ecpp_default_negotiation(pp);
1691 
1692 			mutex_exit(&pp->umutex);
1693 			ecpp_ack_ioctl(q, mp);
1694 		}
1695 		break;
1696 
1697 	case PRNIOC_GET_IFCAP: {
1698 		uint_t		ifcap;
1699 
1700 		mutex_enter(&pp->umutex);
1701 
1702 		ifcap = ecpp_get_prn_ifcap(pp);
1703 
1704 		mutex_exit(&pp->umutex);
1705 
1706 		ecpp_putioc_copyout(q, mp, &ifcap, sizeof (ifcap));
1707 		break;
1708 	}
1709 
1710 	case PRNIOC_SET_IFCAP: {
1711 		mcopyin(mp, NULL, sizeof (uint_t), NULL);
1712 		qreply(q, mp);
1713 		break;
1714 	}
1715 
1716 	case PRNIOC_GET_TIMEOUTS: {
1717 		struct prn_timeouts timeouts;
1718 
1719 		mutex_enter(&pp->umutex);
1720 		timeouts = pp->prn_timeouts;
1721 		mutex_exit(&pp->umutex);
1722 
1723 		ecpp_putioc_copyout(q, mp, &timeouts, sizeof (timeouts));
1724 
1725 		break;
1726 	}
1727 
1728 	case PRNIOC_SET_TIMEOUTS:
1729 		mcopyin(mp, NULL, sizeof (struct prn_timeouts),
1730 				*(caddr_t *)(void *)mp->b_cont->b_rptr);
1731 		qreply(q, mp);
1732 		break;
1733 
1734 	case PRNIOC_GET_STATUS: {
1735 		uint8_t	dsr;
1736 		uint_t	status;
1737 
1738 		mutex_enter(&pp->umutex);
1739 
1740 		/* DSR only makes sense in Centronics & Compat mode */
1741 		if (pp->current_mode == ECPP_CENTRONICS ||
1742 		    pp->current_mode == ECPP_COMPAT_MODE) {
1743 			dsr = DSR_READ(pp);
1744 			if ((dsr & ECPP_PE) ||
1745 			    !(dsr & ECPP_SLCT) || !(dsr & ECPP_nERR)) {
1746 				status = PRN_ONLINE;
1747 			} else {
1748 				status = PRN_ONLINE | PRN_READY;
1749 			}
1750 		} else {
1751 			status = PRN_ONLINE | PRN_READY;
1752 		}
1753 
1754 		mutex_exit(&pp->umutex);
1755 
1756 		ecpp_putioc_copyout(q, mp, &status, sizeof (status));
1757 		break;
1758 	}
1759 
1760 	case PRNIOC_GET_1284_STATUS: {
1761 		uint8_t	dsr;
1762 		uchar_t	status;
1763 
1764 		mutex_enter(&pp->umutex);
1765 
1766 		/* status only makes sense in Centronics & Compat mode */
1767 		if (pp->current_mode != ECPP_COMPAT_MODE &&
1768 		    pp->current_mode != ECPP_CENTRONICS) {
1769 			mutex_exit(&pp->umutex);
1770 			ecpp_nack_ioctl(q, mp, EINVAL);
1771 			break;
1772 		}
1773 
1774 		dsr = DSR_READ(pp);		/* read status */
1775 
1776 		mutex_exit(&pp->umutex);
1777 
1778 		ecpp_error(pp->dip, "PRNIOC_GET_STATUS: %x\n", dsr);
1779 
1780 		status = (dsr & (ECPP_SLCT | ECPP_PE | ECPP_nERR)) |
1781 			(~dsr & ECPP_nBUSY);
1782 
1783 		ecpp_putioc_copyout(q, mp, &status, sizeof (status));
1784 		break;
1785 	}
1786 
1787 	case ECPPIOC_GETDEVID:
1788 		ecpp_putioc_stateful_copyin(q, mp,
1789 					sizeof (struct ecpp_device_id));
1790 		break;
1791 
1792 	case PRNIOC_GET_1284_DEVID:
1793 		ecpp_putioc_stateful_copyin(q, mp,
1794 					sizeof (struct prn_1284_device_id));
1795 		break;
1796 
1797 	case PRNIOC_GET_IFINFO:
1798 		ecpp_putioc_stateful_copyin(q, mp,
1799 					sizeof (struct prn_interface_info));
1800 		break;
1801 
1802 	default:
1803 		ecpp_error(pp->dip, "putioc: unknown IOCTL: %x\n",
1804 			iocbp->ioc_cmd);
1805 		ecpp_nack_ioctl(q, mp, EINVAL);
1806 		break;
1807 	}
1808 }
1809 
1810 /*
1811  * allocate mblk and copyout the requested number of bytes
1812  */
1813 static void
1814 ecpp_putioc_copyout(queue_t *q, mblk_t *mp, void *buf, int len)
1815 {
1816 	mblk_t	*tmp;
1817 
1818 	if ((tmp = allocb(len, BPRI_MED)) == NULL) {
1819 		ecpp_nack_ioctl(q, mp, ENOSR);
1820 		return;
1821 	}
1822 
1823 	bcopy(buf, tmp->b_wptr, len);
1824 
1825 	mcopyout(mp, NULL, len, NULL, tmp);
1826 	qreply(q, mp);
1827 }
1828 
1829 /*
1830  * copyin the structure using struct ecpp_copystate
1831  */
1832 static void
1833 ecpp_putioc_stateful_copyin(queue_t *q, mblk_t *mp, size_t size)
1834 {
1835 	mblk_t *tmp;
1836 	struct ecpp_copystate *stp;
1837 
1838 	if ((tmp = allocb(sizeof (struct ecpp_copystate), BPRI_MED)) == NULL) {
1839 		ecpp_nack_ioctl(q, mp, EAGAIN);
1840 		return;
1841 	}
1842 
1843 	stp = (struct ecpp_copystate *)tmp->b_rptr;
1844 	stp->state = ECPP_STRUCTIN;
1845 	stp->uaddr = *(caddr_t *)mp->b_cont->b_rptr;
1846 
1847 	tmp->b_wptr += sizeof (struct ecpp_copystate);
1848 
1849 	mcopyin(mp, tmp, size, stp->uaddr);
1850 	qreply(q, mp);
1851 }
1852 
1853 /*
1854  * read queue is only used when the peripheral sends data faster,
1855  * then the application consumes it;
1856  * once the low water mark is reached, this routine will be scheduled
1857  */
1858 static int
1859 ecpp_rsrv(queue_t *q)
1860 {
1861 	struct msgb	*mp;
1862 
1863 	/*
1864 	 * send data upstream until next queue is full or the queue is empty
1865 	 */
1866 	while (canputnext(q) && (mp = getq(q))) {
1867 		putnext(q, mp);
1868 	}
1869 
1870 	/*
1871 	 * if there is still space on the queue, enable backchannel
1872 	 */
1873 	if (canputnext(q)) {
1874 		struct ecppunit	*pp = (struct ecppunit *)q->q_ptr;
1875 
1876 		mutex_enter(&pp->umutex);
1877 
1878 		if (pp->e_busy == ECPP_IDLE) {
1879 			(void) ecpp_idle_phase(pp);
1880 			cv_signal(&pp->pport_cv);  /* signal ecpp_close() */
1881 		}
1882 
1883 		mutex_exit(&pp->umutex);
1884 	}
1885 
1886 	return (0);
1887 }
1888 
1889 static int
1890 ecpp_wsrv(queue_t *q)
1891 {
1892 	struct ecppunit	*pp = (struct ecppunit *)q->q_ptr;
1893 	struct msgb	*mp;
1894 	size_t		len, total_len;
1895 	size_t		my_ioblock_sz;
1896 	caddr_t		my_ioblock;
1897 	caddr_t		start_addr;
1898 
1899 	mutex_enter(&pp->umutex);
1900 
1901 	ecpp_error(pp->dip, "ecpp_wsrv: e_busy=%x\n", pp->e_busy);
1902 
1903 	/* if channel is actively doing work, wait till completed */
1904 	if (pp->e_busy == ECPP_BUSY || pp->e_busy == ECPP_FLUSH) {
1905 		mutex_exit(&pp->umutex);
1906 		return (0);
1907 	} else if (pp->suspended == TRUE) {
1908 		/*
1909 		 * if the system is about to suspend and ecpp_detach()
1910 		 * is blocked due to active transfers, wake it up and exit
1911 		 */
1912 		cv_signal(&pp->pport_cv);
1913 		mutex_exit(&pp->umutex);
1914 		return (0);
1915 	}
1916 
1917 	/* peripheral status should be okay before starting transfer */
1918 	if (pp->e_busy == ECPP_ERR) {
1919 		if (ecpp_check_status(pp) == FAILURE) {
1920 			if (pp->wsrv_timer_id == 0) {
1921 				ecpp_error(pp->dip, "wsrv: start wrsv_timer\n");
1922 				pp->wsrv_timer_id = timeout(ecpp_wsrv_timer,
1923 					(caddr_t)pp,
1924 					drv_usectohz(pp->wsrv_retry * 1000));
1925 			} else {
1926 				ecpp_error(pp->dip,
1927 					"ecpp_wsrv: wrsv_timer is active\n");
1928 			}
1929 
1930 			mutex_exit(&pp->umutex);
1931 			return (0);
1932 		} else {
1933 			pp->e_busy = ECPP_IDLE;
1934 		}
1935 	}
1936 
1937 	my_ioblock = pp->ioblock;
1938 	my_ioblock_sz = IO_BLOCK_SZ;
1939 
1940 	/*
1941 	 * it`s important to null pp->msg here,
1942 	 * cleaning up from the previous transfer attempts
1943 	 */
1944 	pp->msg = NULL;
1945 
1946 	start_addr = NULL;
1947 	len = total_len = 0;
1948 	/*
1949 	 * The following loop is implemented to gather the
1950 	 * many small writes that the lp subsystem makes and
1951 	 * compile them into one large dma transfer. The len and
1952 	 * total_len variables are a running count of the number of
1953 	 * bytes that have been gathered. They are bcopied to the
1954 	 * ioblock buffer. The pp->e_busy is set to E_BUSY as soon as
1955 	 * we start gathering packets to indicate the following transfer.
1956 	 */
1957 	while (mp = getq(q)) {
1958 		switch (DB_TYPE(mp)) {
1959 		case M_DATA:
1960 			pp->e_busy = ECPP_BUSY;
1961 			len = mp->b_wptr - mp->b_rptr;
1962 
1963 			if ((total_len == 0) && (len >= my_ioblock_sz)) {
1964 				/*
1965 				 * if the first M_DATA is bigger than ioblock,
1966 				 * just use this mblk and start the transfer
1967 				 */
1968 				total_len = len;
1969 				start_addr = (caddr_t)mp->b_rptr;
1970 				pp->msg = mp;
1971 				goto breakout;
1972 			} else if (total_len + len > my_ioblock_sz) {
1973 				/*
1974 				 * current M_DATA does not fit in ioblock,
1975 				 * put it back and start the transfer
1976 				 */
1977 				(void) putbq(q, mp);
1978 				goto breakout;
1979 			} else {
1980 				/*
1981 				 * otherwise add data to ioblock and free mblk
1982 				 */
1983 				bcopy(mp->b_rptr, my_ioblock, len);
1984 				my_ioblock += len;
1985 				total_len += len;
1986 				start_addr = (caddr_t)pp->ioblock;
1987 				freemsg(mp);
1988 			}
1989 			break;
1990 
1991 		case M_IOCTL:
1992 			/*
1993 			 * Assume a simple loopback test: an application
1994 			 * writes data into the TFIFO, reads it using
1995 			 * ECPPIOC_GETDATA and compares. If the transfer
1996 			 * times out (which is only possible on Grover),
1997 			 * the ioctl might be processed before the data
1998 			 * got to the TFIFO, which leads to miscompare.
1999 			 * So if we met ioctl, postpone it until after xfer.
2000 			 */
2001 			if (total_len > 0) {
2002 				(void) putbq(q, mp);
2003 				goto breakout;
2004 			}
2005 
2006 			ecpp_error(pp->dip, "M_IOCTL.\n");
2007 
2008 			mutex_exit(&pp->umutex);
2009 
2010 			ecpp_putioc(q, mp);
2011 
2012 			mutex_enter(&pp->umutex);
2013 
2014 			break;
2015 
2016 		case M_IOCDATA: {
2017 			struct copyresp *csp = (struct copyresp *)mp->b_rptr;
2018 
2019 			ecpp_error(pp->dip, "M_IOCDATA\n");
2020 
2021 			/*
2022 			 * If copy request failed, quit now
2023 			 */
2024 			if (csp->cp_rval != 0) {
2025 				freemsg(mp);
2026 				break;
2027 			}
2028 
2029 			switch (csp->cp_cmd) {
2030 			case ECPPIOC_SETPARMS:
2031 			case ECPPIOC_SETREGS:
2032 			case ECPPIOC_SETPORT:
2033 			case ECPPIOC_SETDATA:
2034 			case ECPPIOC_GETDEVID:
2035 			case PRNIOC_SET_IFCAP:
2036 			case PRNIOC_GET_1284_DEVID:
2037 			case PRNIOC_SET_TIMEOUTS:
2038 			case PRNIOC_GET_IFINFO:
2039 				ecpp_srvioc(q, mp);
2040 				break;
2041 
2042 			default:
2043 				ecpp_nack_ioctl(q, mp, EINVAL);
2044 				break;
2045 			}
2046 
2047 			break;
2048 		}
2049 
2050 		case M_CTL:
2051 			if (pp->e_busy != ECPP_IDLE) {
2052 				ecpp_error(pp->dip, "wsrv: M_CTL postponed\n");
2053 				(void) putbq(q, mp);
2054 				goto breakout;
2055 			} else {
2056 				ecpp_error(pp->dip, "wsrv: M_CTL\n");
2057 			}
2058 
2059 			/* sanity check */
2060 			if ((mp->b_wptr - mp->b_rptr != sizeof (int)) ||
2061 			    (*(int *)mp->b_rptr != ECPP_BACKCHANNEL)) {
2062 				ecpp_error(pp->dip, "wsrv: bogus M_CTL");
2063 				freemsg(mp);
2064 				break;
2065 			} else {
2066 				freemsg(mp);
2067 			}
2068 
2069 			/* This was a backchannel request */
2070 			(void) ecpp_peripheral2host(pp);
2071 
2072 			/* exit if transfer have been initiated */
2073 			if (pp->e_busy == ECPP_BUSY) {
2074 				goto breakout;
2075 			}
2076 			break;
2077 
2078 		case M_READ:
2079 			pp->nread += *(size_t *)mp->b_rptr;
2080 			freemsg(mp);
2081 			ecpp_error(pp->dip, "wsrv: M_READ %d", pp->nread);
2082 			break;
2083 
2084 		default:
2085 			ecpp_error(pp->dip, "wsrv: should never get here\n");
2086 			freemsg(mp);
2087 			break;
2088 		}
2089 	}
2090 breakout:
2091 	/*
2092 	 * If total_len > 0 then start the transfer, otherwise goto idle state
2093 	 */
2094 	if (total_len > 0) {
2095 		ecpp_error(pp->dip, "wsrv:starting: total_len=%d\n", total_len);
2096 		pp->e_busy = ECPP_BUSY;
2097 		ecpp_start(pp, start_addr, total_len);
2098 	} else {
2099 		ecpp_error(pp->dip, "wsrv:finishing: ebusy=%x\n", pp->e_busy);
2100 
2101 		/* IDLE if xfer_timeout, or FIFO_EMPTY */
2102 		if (pp->e_busy == ECPP_IDLE) {
2103 			(void) ecpp_idle_phase(pp);
2104 			cv_signal(&pp->pport_cv);  /* signal ecpp_close() */
2105 		}
2106 	}
2107 
2108 	mutex_exit(&pp->umutex);
2109 	return (1);
2110 }
2111 
2112 /*
2113  * Ioctl processor for queued ioctl data transfer messages.
2114  */
2115 static void
2116 ecpp_srvioc(queue_t *q, mblk_t *mp)
2117 {
2118 	struct iocblk	*iocbp;
2119 	struct ecppunit *pp;
2120 
2121 	iocbp = (struct iocblk *)mp->b_rptr;
2122 	pp = (struct ecppunit *)q->q_ptr;
2123 
2124 	switch (iocbp->ioc_cmd) {
2125 	case ECPPIOC_SETPARMS: {
2126 		struct ecpp_transfer_parms *xferp;
2127 
2128 		xferp = (struct ecpp_transfer_parms *)mp->b_cont->b_rptr;
2129 
2130 		if (xferp->write_timeout <= 0 ||
2131 				xferp->write_timeout >= ECPP_MAX_TIMEOUT) {
2132 			ecpp_nack_ioctl(q, mp, EINVAL);
2133 			break;
2134 		}
2135 
2136 		if (!((xferp->mode == ECPP_CENTRONICS) ||
2137 			(xferp->mode == ECPP_COMPAT_MODE) ||
2138 			(xferp->mode == ECPP_NIBBLE_MODE) ||
2139 			(xferp->mode == ECPP_ECP_MODE) ||
2140 			(xferp->mode == ECPP_DIAG_MODE))) {
2141 			ecpp_nack_ioctl(q, mp, EINVAL);
2142 			break;
2143 		}
2144 
2145 		pp->xfer_parms = *xferp;
2146 		pp->prn_timeouts.tmo_forward = pp->xfer_parms.write_timeout;
2147 
2148 		ecpp_error(pp->dip, "srvioc: current_mode =%x new mode=%x\n",
2149 			pp->current_mode, pp->xfer_parms.mode);
2150 
2151 		if (ecpp_mode_negotiation(pp, pp->xfer_parms.mode) == FAILURE) {
2152 			ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
2153 		} else {
2154 			/*
2155 			 * mode nego was a success.  If nibble mode check
2156 			 * back channel and set into REVIDLE.
2157 			 */
2158 			if ((pp->current_mode == ECPP_NIBBLE_MODE) &&
2159 			    (read_nibble_backchan(pp) == FAILURE)) {
2160 				/*
2161 				 * problems reading the backchannel
2162 				 * returned to centronics;
2163 				 * ioctl fails.
2164 				 */
2165 				ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
2166 				break;
2167 			}
2168 
2169 			ecpp_ack_ioctl(q, mp);
2170 		}
2171 		if (pp->current_mode != ECPP_DIAG_MODE) {
2172 			pp->port = ECPP_PORT_DMA;
2173 		} else {
2174 			pp->port = ECPP_PORT_PIO;
2175 		}
2176 
2177 		pp->xfer_parms.mode = pp->current_mode;
2178 
2179 		break;
2180 	}
2181 
2182 	case ECPPIOC_SETREGS: {
2183 		struct ecpp_regs *rg;
2184 		uint8_t dcr;
2185 
2186 		rg = (struct ecpp_regs *)mp->b_cont->b_rptr;
2187 
2188 		/* must be in diagnostic mode for these commands to work */
2189 		if (pp->current_mode != ECPP_DIAG_MODE) {
2190 			ecpp_nack_ioctl(q, mp, EINVAL);
2191 			break;
2192 		}
2193 
2194 		/* bits 4-7 must be 1 or return EINVAL */
2195 		if ((rg->dcr & ECPP_SETREGS_DCR_MASK) !=
2196 					ECPP_SETREGS_DCR_MASK) {
2197 			ecpp_nack_ioctl(q, mp, EINVAL);
2198 			break;
2199 		}
2200 
2201 		/* get the old dcr */
2202 		dcr = DCR_READ(pp) & ~ECPP_REV_DIR;
2203 		/* get the new dcr */
2204 		dcr = (dcr & ECPP_SETREGS_DCR_MASK) |
2205 			(rg->dcr & ~ECPP_SETREGS_DCR_MASK);
2206 		DCR_WRITE(pp, dcr);
2207 		ecpp_error(pp->dip, "ECPPIOC_SETREGS:dcr=%x\n", dcr);
2208 		ecpp_ack_ioctl(q, mp);
2209 		break;
2210 	}
2211 
2212 	case ECPPIOC_SETPORT: {
2213 		uchar_t *port;
2214 
2215 		port = (uchar_t *)mp->b_cont->b_rptr;
2216 
2217 		/* must be in diagnostic mode for these commands to work */
2218 		if (pp->current_mode != ECPP_DIAG_MODE) {
2219 			ecpp_nack_ioctl(q, mp, EINVAL);
2220 			break;
2221 		}
2222 
2223 		switch (*port) {
2224 		case ECPP_PORT_PIO:
2225 			/* put superio into PIO mode */
2226 			ECR_WRITE(pp,
2227 				ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
2228 			pp->port = *port;
2229 			ecpp_ack_ioctl(q, mp);
2230 			break;
2231 
2232 		case ECPP_PORT_TDMA:
2233 			ecpp_error(pp->dip, "SETPORT: to TDMA\n");
2234 			pp->tfifo_intr = 1;
2235 			/* change to mode 110 */
2236 			ECR_WRITE(pp,
2237 				ECR_mode_110 | ECPP_INTR_MASK | ECPP_INTR_SRV);
2238 			pp->port = *port;
2239 			ecpp_ack_ioctl(q, mp);
2240 			break;
2241 
2242 		default:
2243 			ecpp_nack_ioctl(q, mp, EINVAL);
2244 		}
2245 
2246 		break;
2247 	}
2248 
2249 	case ECPPIOC_SETDATA: {
2250 		uchar_t *data;
2251 
2252 		data = (uchar_t *)mp->b_cont->b_rptr;
2253 
2254 		/* must be in diagnostic mode for these commands to work */
2255 		if (pp->current_mode != ECPP_DIAG_MODE) {
2256 			ecpp_nack_ioctl(q, mp, EINVAL);
2257 			break;
2258 		}
2259 
2260 		switch (pp->port) {
2261 		case ECPP_PORT_PIO:
2262 			DATAR_WRITE(pp, *data);
2263 			ecpp_ack_ioctl(q, mp);
2264 			break;
2265 
2266 		case ECPP_PORT_TDMA:
2267 			TFIFO_WRITE(pp, *data);
2268 			ecpp_ack_ioctl(q, mp);
2269 			break;
2270 
2271 		default:
2272 			ecpp_nack_ioctl(q, mp, EINVAL);
2273 		}
2274 
2275 		break;
2276 	}
2277 
2278 	case ECPPIOC_GETDEVID: {
2279 		struct copyresp		*csp;
2280 		struct ecpp_copystate	*stp;
2281 		struct ecpp_device_id	*dp;
2282 		struct ecpp_device_id	id;
2283 
2284 		csp = (struct copyresp *)mp->b_rptr;
2285 		stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2286 		dp = (struct ecpp_device_id *)mp->b_cont->b_rptr;
2287 
2288 #ifdef _MULTI_DATAMODEL
2289 		if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
2290 			struct ecpp_device_id32 *dp32;
2291 
2292 			dp32 = (struct ecpp_device_id32 *)dp;
2293 			id.mode = dp32->mode;
2294 			id.len = dp32->len;
2295 			id.addr = (char *)(uintptr_t)dp32->addr;
2296 		} else {
2297 #endif /* _MULTI_DATAMODEL */
2298 			id = *dp;
2299 #ifdef _MULTI_DATAMODEL
2300 		}
2301 #endif /* _MULTI_DATAMODEL */
2302 
2303 		ecpp_srvioc_devid(q, mp, &id, &stp->un.devid.rlen);
2304 		break;
2305 	}
2306 
2307 	case PRNIOC_GET_1284_DEVID: {
2308 		struct copyresp			*csp;
2309 		struct ecpp_copystate		*stp;
2310 		struct prn_1284_device_id	*dp;
2311 		struct ecpp_device_id		id;
2312 
2313 		csp = (struct copyresp *)mp->b_rptr;
2314 		stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2315 		dp = (struct prn_1284_device_id *)mp->b_cont->b_rptr;
2316 
2317 		/* imitate struct ecpp_device_id */
2318 		id.mode = ECPP_NIBBLE_MODE;
2319 
2320 #ifdef _MULTI_DATAMODEL
2321 		if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
2322 			struct prn_1284_device_id32 *dp32;
2323 
2324 			dp32 = (struct prn_1284_device_id32 *)dp;
2325 			id.len = dp32->id_len;
2326 			id.addr = (char *)(uintptr_t)dp32->id_data;
2327 		} else {
2328 #endif /* _MULTI_DATAMODEL */
2329 			id.len = dp->id_len;
2330 			id.addr = (char *)dp->id_data;
2331 #ifdef _MULTI_DATAMODEL
2332 		}
2333 #endif /* _MULTI_DATAMODEL */
2334 
2335 		ecpp_srvioc_devid(q, mp, &id,
2336 				(int *)&stp->un.prn_devid.id_rlen);
2337 		break;
2338 	}
2339 
2340 	case PRNIOC_SET_IFCAP: {
2341 		uint_t	ifcap, new_ifcap;
2342 
2343 		ifcap = ecpp_get_prn_ifcap(pp);
2344 		new_ifcap = *(uint_t *)mp->b_cont->b_rptr;
2345 
2346 		if (ifcap == new_ifcap) {
2347 			ecpp_ack_ioctl(q, mp);
2348 			break;
2349 		}
2350 
2351 		/* only changing PRN_BIDI is supported */
2352 		if ((ifcap ^ new_ifcap) & ~PRN_BIDI) {
2353 			ecpp_nack_ioctl(q, mp, EINVAL);
2354 			break;
2355 		}
2356 
2357 		if (new_ifcap & PRN_BIDI) { 	/* go bidirectional */
2358 			ecpp_default_negotiation(pp);
2359 		} else {			/* go unidirectional */
2360 			(void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
2361 		}
2362 
2363 		ecpp_ack_ioctl(q, mp);
2364 		break;
2365 	}
2366 
2367 	case PRNIOC_SET_TIMEOUTS: {
2368 		struct prn_timeouts	*prn_timeouts;
2369 
2370 		prn_timeouts = (struct prn_timeouts *)mp->b_cont->b_rptr;
2371 
2372 		if (prn_timeouts->tmo_forward > ECPP_MAX_TIMEOUT) {
2373 			ecpp_nack_ioctl(q, mp, EINVAL);
2374 			break;
2375 		}
2376 
2377 		pp->prn_timeouts = *prn_timeouts;
2378 		pp->xfer_parms.write_timeout = (int)prn_timeouts->tmo_forward;
2379 
2380 		ecpp_ack_ioctl(q, mp);
2381 		break;
2382 	}
2383 
2384 	case PRNIOC_GET_IFINFO:
2385 		ecpp_srvioc_prnif(q, mp);
2386 		break;
2387 
2388 	default:		/* unexpected ioctl type */
2389 		ecpp_nack_ioctl(q, mp, EINVAL);
2390 		break;
2391 	}
2392 }
2393 
2394 static void
2395 ecpp_srvioc_devid(queue_t *q, mblk_t *mp, struct ecpp_device_id *id, int *rlen)
2396 {
2397 	struct ecppunit 	*pp;
2398 	struct copyresp		*csp;
2399 	struct ecpp_copystate	*stp;
2400 	int			error;
2401 	int			len;
2402 	int			mode;
2403 	mblk_t			*datamp;
2404 
2405 	pp = (struct ecppunit *)q->q_ptr;
2406 	csp = (struct copyresp *)mp->b_rptr;
2407 	stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2408 	mode = id->mode;
2409 
2410 	/* check arguments */
2411 	if ((mode < ECPP_CENTRONICS) || (mode > ECPP_ECP_MODE)) {
2412 		ecpp_error(pp->dip, "ecpp_srvioc_devid: mode=%x, len=%x\n",
2413 			mode, id->len);
2414 		ecpp_nack_ioctl(q, mp, EINVAL);
2415 		return;
2416 	}
2417 
2418 	/* Currently only Nibble mode is supported */
2419 	if (mode != ECPP_NIBBLE_MODE) {
2420 		ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
2421 		return;
2422 	}
2423 
2424 	if ((id->addr == NULL) && (id->len != 0)) {
2425 		ecpp_nack_ioctl(q, mp, EFAULT);
2426 		return;
2427 	}
2428 
2429 	/* read device ID length */
2430 	if (error = ecpp_getdevid(pp, NULL, &len, mode)) {
2431 		ecpp_nack_ioctl(q, mp, error);
2432 		goto breakout;
2433 	}
2434 
2435 	/* don't take into account two length bytes */
2436 	len -= 2;
2437 	*rlen = len;
2438 
2439 	/* limit transfer to user buffer length */
2440 	if (id->len < len) {
2441 		len = id->len;
2442 	}
2443 
2444 	if (len == 0) {
2445 		/* just return rlen */
2446 		stp->state = ECPP_ADDROUT;
2447 		ecpp_wput_iocdata_devid(q, mp,
2448 				(uintptr_t)rlen - (uintptr_t)&stp->un);
2449 		goto breakout;
2450 	}
2451 
2452 	if ((datamp = allocb(len, BPRI_MED)) == NULL) {
2453 		ecpp_nack_ioctl(q, mp, ENOSR);
2454 		goto breakout;
2455 	}
2456 
2457 	/* read ID string */
2458 	error = ecpp_getdevid(pp, datamp->b_rptr, &len, mode);
2459 	if (error) {
2460 		freemsg(datamp);
2461 		ecpp_nack_ioctl(q, mp, error);
2462 		goto breakout;
2463 	} else {
2464 		datamp->b_wptr += len;
2465 
2466 		stp->state = ECPP_ADDROUT;
2467 		mcopyout(mp, csp->cp_private, len, id->addr, datamp);
2468 		qreply(q, mp);
2469 	}
2470 
2471 	return;
2472 
2473 breakout:
2474 	(void) ecpp_1284_termination(pp);
2475 }
2476 
2477 /*
2478  * PRNIOC_GET_IFINFO: return prnio interface info string
2479  */
2480 static void
2481 ecpp_srvioc_prnif(queue_t *q, mblk_t *mp)
2482 {
2483 	struct copyresp			*csp;
2484 	struct ecpp_copystate		*stp;
2485 	uint_t				len;
2486 	struct prn_interface_info	*ip;
2487 	struct prn_interface_info	info;
2488 	mblk_t				*datamp;
2489 #ifdef _MULTI_DATAMODEL
2490 	struct iocblk		*iocbp = (struct iocblk *)mp->b_rptr;
2491 #endif
2492 
2493 	csp = (struct copyresp *)mp->b_rptr;
2494 	stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2495 	ip = (struct prn_interface_info *)mp->b_cont->b_rptr;
2496 
2497 #ifdef _MULTI_DATAMODEL
2498 	if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
2499 		struct prn_interface_info32 *ip32;
2500 
2501 		ip32 = (struct prn_interface_info32 *)ip;
2502 		info.if_len = ip32->if_len;
2503 		info.if_data = (char *)(uintptr_t)ip32->if_data;
2504 	} else {
2505 #endif /* _MULTI_DATAMODEL */
2506 		info = *ip;
2507 #ifdef _MULTI_DATAMODEL
2508 	}
2509 #endif /* _MULTI_DATAMODEL */
2510 
2511 	len = strlen(prn_ifinfo);
2512 	stp->un.prn_if.if_rlen = len;
2513 	stp->state = ECPP_ADDROUT;
2514 
2515 	/* check arguments */
2516 	if ((info.if_data == NULL) && (info.if_len != 0)) {
2517 		ecpp_nack_ioctl(q, mp, EFAULT);
2518 		return;
2519 	}
2520 
2521 	if (info.if_len == 0) {
2522 		/* just copyout rlen */
2523 		ecpp_wput_iocdata_devid(q, mp,
2524 			offsetof(struct prn_interface_info, if_rlen));
2525 		return;
2526 	}
2527 
2528 	/* if needed, trim to the buffer size */
2529 	if (len > info.if_len) {
2530 		len = info.if_len;
2531 	}
2532 
2533 	if ((datamp = allocb(len, BPRI_MED)) == NULL) {
2534 		ecpp_nack_ioctl(q, mp, ENOSR);
2535 		return;
2536 	}
2537 
2538 	bcopy(&prn_ifinfo[0], datamp->b_wptr, len);
2539 	datamp->b_wptr += len;
2540 
2541 	mcopyout(mp, csp->cp_private, len, info.if_data, datamp);
2542 	qreply(q, mp);
2543 }
2544 
2545 static void
2546 ecpp_flush(struct ecppunit *pp, int cmd)
2547 {
2548 	queue_t		*q;
2549 	uint8_t		ecr, dcr;
2550 	timeout_id_t	timeout_id, fifo_timer_id, wsrv_timer_id;
2551 
2552 	ASSERT(mutex_owned(&pp->umutex));
2553 
2554 	if (!(cmd & FWRITE)) {
2555 		return;
2556 	}
2557 
2558 	q = pp->writeq;
2559 	timeout_id = fifo_timer_id = wsrv_timer_id = 0;
2560 
2561 	ecpp_error(pp->dip, "ecpp_flush e_busy=%x\n", pp->e_busy);
2562 
2563 	/* if there is an ongoing DMA, it needs to be turned off. */
2564 	switch (pp->e_busy) {
2565 	case ECPP_BUSY:
2566 		/*
2567 		 * Change the port status to ECPP_FLUSH to
2568 		 * indicate to ecpp_wsrv that the wq is being flushed.
2569 		 */
2570 		pp->e_busy = ECPP_FLUSH;
2571 
2572 		/*
2573 		 * dma_cancelled indicates to ecpp_isr() that we have
2574 		 * turned off the DMA.  Since the mutex is held, ecpp_isr()
2575 		 * may be blocked.  Once ecpp_flush() finishes and ecpp_isr()
2576 		 * gains the mutex, ecpp_isr() will have a _reset_ DMAC.  Most
2577 		 * significantly, the DMAC will be reset after ecpp_isr() was
2578 		 * invoked.  Therefore we need to have a flag "dma_cancelled"
2579 		 * to signify when the described condition has occured.  If
2580 		 * ecpp_isr() notes a dma_cancelled, it will ignore the DMAC csr
2581 		 * and simply claim the interupt.
2582 		 */
2583 
2584 		pp->dma_cancelled = TRUE;
2585 
2586 		/* either DMA or PIO transfer */
2587 		if (COMPAT_DMA(pp) ||
2588 		    (pp->current_mode == ECPP_ECP_MODE) ||
2589 		    (pp->current_mode == ECPP_DIAG_MODE)) {
2590 			/*
2591 			 * if the bcr is zero, then DMA is complete and
2592 			 * we are waiting for the fifo to drain.  Therefore,
2593 			 * turn off dma.
2594 			 */
2595 			if (ECPP_DMA_STOP(pp, NULL) == FAILURE) {
2596 				ecpp_error(pp->dip,
2597 					"ecpp_flush: dma_stop failed.\n");
2598 			}
2599 
2600 			/*
2601 			 * If the status of the port is ECPP_BUSY,
2602 			 * the DMA is stopped by either explicitly above, or by
2603 			 * ecpp_isr() but the FIFO hasn't drained yet. In either
2604 			 * case, we need to unbind the dma mappings.
2605 			 */
2606 			if (ddi_dma_unbind_handle(
2607 						pp->dma_handle) != DDI_SUCCESS)
2608 				ecpp_error(pp->dip,
2609 						"ecpp_flush: unbind failed.\n");
2610 
2611 			if (pp->msg != NULL) {
2612 				freemsg(pp->msg);
2613 				pp->msg = NULL;
2614 			}
2615 		} else {
2616 			/*
2617 			 * PIO transfer: disable nAck interrups
2618 			 */
2619 			dcr = DCR_READ(pp);
2620 			dcr &= ~(ECPP_REV_DIR | ECPP_INTR_EN);
2621 			DCR_WRITE(pp, dcr);
2622 			ECPP_MASK_INTR(pp);
2623 		}
2624 
2625 		/*
2626 		 * The transfer is cleaned up.  There may or may not be data
2627 		 * in the fifo.  We don't care at this point.  Ie. SuperIO may
2628 		 * transfer the remaining bytes in the fifo or not. it doesn't
2629 		 * matter.  All that is important at this stage is that no more
2630 		 * fifo timers are started.
2631 		 */
2632 
2633 		timeout_id = pp->timeout_id;
2634 		fifo_timer_id = pp->fifo_timer_id;
2635 		pp->timeout_id = pp->fifo_timer_id = 0;
2636 		pp->softintr_pending = 0;
2637 
2638 		break;
2639 
2640 	case ECPP_ERR:
2641 		/*
2642 		 * Change the port status to ECPP_FLUSH to
2643 		 * indicate to ecpp_wsrv that the wq is being flushed.
2644 		 */
2645 		pp->e_busy = ECPP_FLUSH;
2646 
2647 		/*
2648 		 *  Most likely there are mblks in the queue,
2649 		 *  but the driver can not transmit because
2650 		 *  of the bad port status.  In this case,
2651 		 *  ecpp_flush() should make sure ecpp_wsrv_timer()
2652 		 *  is turned off.
2653 		 */
2654 		wsrv_timer_id = pp->wsrv_timer_id;
2655 		pp->wsrv_timer_id = 0;
2656 
2657 		break;
2658 
2659 	case ECPP_IDLE:
2660 		/* No work to do. Ready to flush */
2661 		break;
2662 
2663 	default:
2664 		ecpp_error(pp->dip,
2665 			"ecpp_flush: illegal state %x\n", pp->e_busy);
2666 	}
2667 
2668 	/* in DIAG mode clear TFIFO if needed */
2669 	if (pp->current_mode == ECPP_DIAG_MODE) {
2670 		ecr = ECR_READ(pp);
2671 		if (!(ecr & ECPP_FIFO_EMPTY)) {
2672 			ECR_WRITE(pp,
2673 				ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
2674 			ECR_WRITE(pp, ecr);
2675 		}
2676 	}
2677 
2678 	/* Discard all messages on the output queue. */
2679 	flushq(q, FLUSHDATA);
2680 
2681 	/* The port is no longer flushing or dma'ing for that matter. */
2682 	pp->e_busy = ECPP_IDLE;
2683 
2684 	/* Set the right phase */
2685 	if (pp->current_mode == ECPP_ECP_MODE) {
2686 		if (pp->current_phase == ECPP_PHASE_ECP_REV_XFER) {
2687 			pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
2688 		} else {
2689 			pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
2690 		}
2691 	}
2692 
2693 	/* cancel timeouts if any */
2694 	mutex_exit(&pp->umutex);
2695 
2696 	if (timeout_id) {
2697 		(void) untimeout(timeout_id);
2698 	}
2699 	if (fifo_timer_id) {
2700 		(void) untimeout(fifo_timer_id);
2701 	}
2702 	if (wsrv_timer_id) {
2703 		(void) untimeout(wsrv_timer_id);
2704 	}
2705 
2706 	mutex_enter(&pp->umutex);
2707 
2708 	cv_signal(&pp->pport_cv);	/* wake up ecpp_close() */
2709 }
2710 
2711 static void
2712 ecpp_start(struct ecppunit *pp, caddr_t addr, size_t len)
2713 {
2714 	ASSERT(mutex_owned(&pp->umutex));
2715 	ASSERT(pp->e_busy == ECPP_BUSY);
2716 
2717 	ecpp_error(pp->dip,
2718 		"ecpp_start:current_mode=%x,current_phase=%x,ecr=%x,len=%d\n",
2719 		pp->current_mode, pp->current_phase, ECR_READ(pp), len);
2720 
2721 	pp->dma_dir = DDI_DMA_WRITE;	/* this is a forward transfer */
2722 
2723 	switch (pp->current_mode) {
2724 	case ECPP_NIBBLE_MODE:
2725 		(void) ecpp_1284_termination(pp);
2726 
2727 		/* After termination we are either Compatible or Centronics */
2728 
2729 		/* FALLTHRU */
2730 
2731 	case ECPP_CENTRONICS:
2732 	case ECPP_COMPAT_MODE:
2733 		if (pp->io_mode == ECPP_DMA) {
2734 			if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2735 				return;
2736 			}
2737 		} else {
2738 			/* PIO mode */
2739 			if (ecpp_prep_pio_xfer(pp, addr, len) == FAILURE) {
2740 				return;
2741 			}
2742 			(void) ecpp_pio_writeb(pp);
2743 		}
2744 		break;
2745 
2746 	case ECPP_DIAG_MODE: {
2747 		int	oldlen;
2748 
2749 		/* put superio into TFIFO mode, if not already */
2750 		ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
2751 		/*
2752 		 * DMA would block if the TFIFO is not empty
2753 		 * if by this moment nobody read these bytes, they`re gone
2754 		 */
2755 		drv_usecwait(1);
2756 		if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) {
2757 			ecpp_error(pp->dip,
2758 				"ecpp_start: TFIFO not empty, clearing\n");
2759 			ECR_WRITE(pp,
2760 				ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
2761 			ECR_WRITE(pp,
2762 				ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
2763 		}
2764 
2765 		/* we can DMA at most 16 bytes into TFIFO */
2766 		oldlen = len;
2767 		if (len > ECPP_FIFO_SZ) {
2768 			len = ECPP_FIFO_SZ;
2769 		}
2770 
2771 		if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2772 			return;
2773 		}
2774 
2775 		/* put the rest of data back on the queue */
2776 		if (oldlen > len) {
2777 			ecpp_putback_untransfered(pp, addr + len, oldlen - len);
2778 		}
2779 
2780 		break;
2781 	}
2782 
2783 	case ECPP_ECP_MODE:
2784 		ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE ||
2785 			pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
2786 
2787 		/* if in Reverse Phase negotiate to Forward */
2788 		if (pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) {
2789 			if (ecp_reverse2forward(pp) == FAILURE) {
2790 				if (pp->msg) {
2791 					(void) putbq(pp->writeq, pp->msg);
2792 				} else {
2793 					ecpp_putback_untransfered(pp,
2794 								addr, len);
2795 				}
2796 			}
2797 		}
2798 
2799 		if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2800 			return;
2801 		}
2802 
2803 		break;
2804 	}
2805 
2806 	/* schedule transfer timeout */
2807 	pp->timeout_id = timeout(ecpp_xfer_timeout, (caddr_t)pp,
2808 		pp->xfer_parms.write_timeout * drv_usectohz(1000000));
2809 }
2810 
2811 /*
2812  * Transfer a PIO "block" a byte at a time.
2813  * The block is starts at addr and ends at pp->last_byte
2814  */
2815 static uint8_t
2816 ecpp_prep_pio_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
2817 {
2818 	pp->next_byte = addr;
2819 	pp->last_byte = (caddr_t)((ulong_t)addr + len);
2820 
2821 	if (ecpp_check_status(pp) == FAILURE) {
2822 		/*
2823 		 * if status signals are bad, do not start PIO,
2824 		 * put everything back on the queue.
2825 		 */
2826 		ecpp_error(pp->dip,
2827 			"ecpp_prep_pio_xfer:suspend PIO len=%d\n", len);
2828 
2829 		if (pp->msg != NULL) {
2830 			/*
2831 			 * this circumstance we want to copy the
2832 			 * untransfered section of msg to a new mblk,
2833 			 * then free the orignal one.
2834 			 */
2835 			ecpp_putback_untransfered(pp,
2836 				(void *)pp->msg->b_rptr, len);
2837 			ecpp_error(pp->dip,
2838 				"ecpp_prep_pio_xfer: len1=%d\n", len);
2839 
2840 			freemsg(pp->msg);
2841 			pp->msg = NULL;
2842 		} else {
2843 			ecpp_putback_untransfered(pp, pp->ioblock, len);
2844 			ecpp_error(pp->dip,
2845 				"ecpp_prep_pio_xfer: len2=%d\n", len);
2846 		}
2847 		qenable(pp->writeq);
2848 
2849 		return (FAILURE);
2850 	}
2851 
2852 	pp->dma_cancelled = FALSE;
2853 
2854 	/* pport must be in PIO mode */
2855 	if (ecr_write(pp, ECR_mode_001 |
2856 				ECPP_INTR_MASK | ECPP_INTR_SRV) != SUCCESS) {
2857 		ecpp_error(pp->dip, "ecpp_prep_pio_xfer: failed w/ECR.\n");
2858 	}
2859 
2860 	ecpp_error(pp->dip, "ecpp_prep_pio_xfer: dcr=%x ecr=%x\n",
2861 			DCR_READ(pp), ECR_READ(pp));
2862 
2863 	return (SUCCESS);
2864 }
2865 
2866 static uint8_t
2867 ecpp_init_dma_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
2868 {
2869 	uint8_t ecr_mode[] = {
2870 		0,
2871 		ECR_mode_010,	/* Centronix */
2872 		ECR_mode_010,	/* Compat */
2873 		0,		/* Byte */
2874 		0,		/* Nibble */
2875 		ECR_mode_011,	/* ECP */
2876 		0,		/* Failure */
2877 		ECR_mode_110,	/* Diag */
2878 	};
2879 	uint8_t	ecr;
2880 
2881 	ASSERT((pp->current_mode <= ECPP_DIAG_MODE) &&
2882 		(ecr_mode[pp->current_mode] != 0));
2883 
2884 	if (ecpp_setup_dma_resources(pp, addr, len) == FAILURE) {
2885 		qenable(pp->writeq);
2886 		return (FAILURE);
2887 	}
2888 
2889 	if (ecpp_check_status(pp) == FAILURE) {
2890 		/*
2891 		 * if status signals are bad, do not start DMA, but
2892 		 * rather put everything back on the queue.
2893 		 */
2894 		ecpp_error(pp->dip,
2895 			"ecpp_init_dma_xfer: suspending DMA len=%d\n",
2896 			pp->dma_cookie.dmac_size);
2897 
2898 		if (pp->msg != NULL) {
2899 			/*
2900 			 * this circumstance we want to copy the
2901 			 * untransfered section of msg to a new mblk,
2902 			 * then free the orignal one.
2903 			 */
2904 			ecpp_putback_untransfered(pp,
2905 				(void *)pp->msg->b_rptr, len);
2906 			ecpp_error(pp->dip,
2907 				"ecpp_init_dma_xfer:a:len=%d\n", len);
2908 
2909 			freemsg(pp->msg);
2910 			pp->msg = NULL;
2911 		} else {
2912 			ecpp_putback_untransfered(pp, pp->ioblock, len);
2913 			ecpp_error(pp->dip,
2914 				"ecpp_init_dma_xfer:b:len=%d\n", len);
2915 		}
2916 
2917 		if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
2918 			ecpp_error(pp->dip,
2919 				"ecpp_init_dma_xfer: unbind FAILURE.\n");
2920 		}
2921 		qenable(pp->writeq);
2922 		return (FAILURE);
2923 	}
2924 
2925 	pp->xfercnt = pp->resid = len;
2926 	pp->dma_cancelled = FALSE;
2927 	pp->tfifo_intr = 0;
2928 
2929 	/* set the right ECR mode and disable DMA */
2930 	ecr = ecr_mode[pp->current_mode];
2931 	(void) ecr_write(pp, ecr | ECPP_INTR_SRV | ECPP_INTR_MASK);
2932 
2933 	/* prepare DMAC for a transfer */
2934 	if (ECPP_DMA_START(pp) == FAILURE) {
2935 		ecpp_error(pp->dip, "ecpp_init_dma_xfer: dma_start FAILED.\n");
2936 		return (FAILURE);
2937 	}
2938 
2939 	/* GO! */
2940 	(void) ecr_write(pp, ecr | ECPP_DMA_ENABLE | ECPP_INTR_MASK);
2941 
2942 	return (SUCCESS);
2943 }
2944 
2945 static uint8_t
2946 ecpp_setup_dma_resources(struct ecppunit *pp, caddr_t addr, size_t len)
2947 {
2948 	int	err;
2949 	off_t	woff;
2950 	size_t	wlen;
2951 
2952 	ASSERT(pp->dma_dir == DDI_DMA_READ || pp->dma_dir == DDI_DMA_WRITE);
2953 
2954 	err = ddi_dma_addr_bind_handle(pp->dma_handle, NULL,
2955 		addr, len, pp->dma_dir | DDI_DMA_PARTIAL,
2956 		DDI_DMA_DONTWAIT, NULL,
2957 		&pp->dma_cookie, &pp->dma_cookie_count);
2958 
2959 	switch (err) {
2960 	case DDI_DMA_MAPPED:
2961 		ecpp_error(pp->dip, "ecpp_setup_dma: DMA_MAPPED\n");
2962 
2963 		pp->dma_nwin = 1;
2964 		pp->dma_curwin = 1;
2965 		break;
2966 
2967 	case DDI_DMA_PARTIAL_MAP: {
2968 		ecpp_error(pp->dip, "ecpp_setup_dma: DMA_PARTIAL_MAP\n");
2969 
2970 		if (ddi_dma_numwin(pp->dma_handle,
2971 				&pp->dma_nwin) != DDI_SUCCESS) {
2972 			(void) ddi_dma_unbind_handle(pp->dma_handle);
2973 			return (FAILURE);
2974 		}
2975 		pp->dma_curwin = 1;
2976 
2977 		/*
2978 		 * The very first window is returned by bind_handle,
2979 		 * but we must do this explicitly here, otherwise
2980 		 * next getwin would return wrong cookie dmac_size
2981 		 */
2982 		if (ddi_dma_getwin(pp->dma_handle, 0, &woff, &wlen,
2983 		    &pp->dma_cookie, &pp->dma_cookie_count) != DDI_SUCCESS) {
2984 			ecpp_error(pp->dip,
2985 				"ecpp_setup_dma: ddi_dma_getwin failed!");
2986 			(void) ddi_dma_unbind_handle(pp->dma_handle);
2987 			return (FAILURE);
2988 		}
2989 
2990 		ecpp_error(pp->dip,
2991 			"ecpp_setup_dma: cookies=%d, windows=%d"
2992 			" addr=%lx len=%d\n",
2993 			pp->dma_cookie_count, pp->dma_nwin,
2994 			pp->dma_cookie.dmac_address, pp->dma_cookie.dmac_size);
2995 
2996 		break;
2997 	}
2998 
2999 	default:
3000 		ecpp_error(pp->dip, "ecpp_setup_dma: err=%x\n", err);
3001 		return (FAILURE);
3002 	}
3003 
3004 	return (SUCCESS);
3005 }
3006 
3007 static void
3008 ecpp_ack_ioctl(queue_t *q, mblk_t *mp)
3009 {
3010 	struct iocblk  *iocbp;
3011 
3012 	mp->b_datap->db_type = M_IOCACK;
3013 	mp->b_wptr = mp->b_rptr + sizeof (struct iocblk);
3014 
3015 	if (mp->b_cont) {
3016 		freemsg(mp->b_cont);
3017 		mp->b_cont = NULL;
3018 	}
3019 
3020 	iocbp = (struct iocblk *)mp->b_rptr;
3021 	iocbp->ioc_error = 0;
3022 	iocbp->ioc_count = 0;
3023 	iocbp->ioc_rval = 0;
3024 
3025 	qreply(q, mp);
3026 }
3027 
3028 static void
3029 ecpp_nack_ioctl(queue_t *q, mblk_t *mp, int err)
3030 {
3031 	struct iocblk  *iocbp;
3032 
3033 	mp->b_datap->db_type = M_IOCNAK;
3034 	mp->b_wptr = mp->b_rptr + sizeof (struct iocblk);
3035 	iocbp = (struct iocblk *)mp->b_rptr;
3036 	iocbp->ioc_error = err;
3037 
3038 	if (mp->b_cont) {
3039 		freemsg(mp->b_cont);
3040 		mp->b_cont = NULL;
3041 	}
3042 
3043 	qreply(q, mp);
3044 }
3045 
3046 uint_t
3047 ecpp_isr(caddr_t arg)
3048 {
3049 	struct ecppunit *pp = (struct ecppunit *)(void *)arg;
3050 	uint32_t	dcsr;
3051 	uint8_t		dsr;
3052 	int		cheerio_pend_counter;
3053 	int		retval = DDI_INTR_UNCLAIMED;
3054 	hrtime_t	now;
3055 
3056 	mutex_enter(&pp->umutex);
3057 	/*
3058 	 * interrupt may occur while other thread is holding the lock
3059 	 * and cancels DMA transfer (e.g. ecpp_flush())
3060 	 * since it cannot cancel the interrupt thread,
3061 	 * it just sets dma_cancelled to TRUE,
3062 	 * telling interrupt handler to exit immediately
3063 	 */
3064 	if (pp->dma_cancelled == TRUE) {
3065 		ecpp_error(pp->dip, "dma-cancel isr\n");
3066 
3067 		pp->intr_hard++;
3068 		pp->dma_cancelled = FALSE;
3069 
3070 		mutex_exit(&pp->umutex);
3071 		return (DDI_INTR_CLAIMED);
3072 	}
3073 
3074 	/* Southbridge interrupts are handled separately */
3075 #if defined(__x86)
3076 	if (pp->hw == &x86)
3077 #else
3078 	if (pp->hw == &m1553)
3079 #endif
3080 	{
3081 		retval = ecpp_M1553_intr(pp);
3082 		if (retval == DDI_INTR_UNCLAIMED) {
3083 			goto unexpected;
3084 		}
3085 		mutex_exit(&pp->umutex);
3086 		return (DDI_INTR_CLAIMED);
3087 	}
3088 
3089 	/*
3090 	 * the intr is through the motherboard. it is faster than PCI route.
3091 	 * sometimes ecpp_isr() is invoked before cheerio csr is updated.
3092 	 */
3093 	cheerio_pend_counter = ecpp_isr_max_delay;
3094 	dcsr = GET_DMAC_CSR(pp);
3095 
3096 	while (!(dcsr & DCSR_INT_PEND) && cheerio_pend_counter-- > 0) {
3097 		drv_usecwait(1);
3098 		dcsr = GET_DMAC_CSR(pp);
3099 	}
3100 
3101 	/*
3102 	 * This is a workaround for what seems to be a timing problem
3103 	 * with the delivery of interrupts and CSR updating with the
3104 	 * ebus2 csr, superio and the n_ERR pin from the peripheral.
3105 	 *
3106 	 * delay is not needed for PIO mode
3107 	 */
3108 	if (!COMPAT_PIO(pp)) {
3109 		drv_usecwait(100);
3110 		dcsr = GET_DMAC_CSR(pp);
3111 	}
3112 
3113 	/* on 97317 in Extended mode IRQ_ST of DSR is deasserted when read */
3114 	dsr = DSR_READ(pp);
3115 
3116 	/*
3117 	 * check if interrupt is for this device:
3118 	 * it should be reflected either in cheerio DCSR register
3119 	 * or in IRQ_ST bit of DSR on 97317
3120 	 */
3121 	if ((dcsr & DCSR_INT_PEND) == 0) {
3122 		if (pp->hw != &pc97317) {
3123 			goto unclaimed;
3124 		}
3125 		/*
3126 		 * on Excalibur, reading DSR will deassert SuperIO IRQx line
3127 		 * RIO's DCSR_INT_PEND seems to follow IRQx transitions,
3128 		 * so if DSR is read after interrupt occured, but before
3129 		 * we get here, IRQx and hence INT_PEND will be deasserted
3130 		 * as a result, we can miss a service interrupt in PIO mode
3131 		 *
3132 		 * malicious DSR reader is BPPIOC_TESTIO, which is called
3133 		 * by LP in between data blocks to check printer status
3134 		 * this workaround lets us not to miss an interrupt
3135 		 *
3136 		 * also, nErr interrupt (ECP mode) not always reflected in DCSR
3137 		 */
3138 		if (((dsr & ECPP_IRQ_ST) == 0) ||
3139 		    ((COMPAT_PIO(pp)) && (pp->e_busy == ECPP_BUSY)) ||
3140 		    (((dsr & ECPP_nERR) == 0) &&
3141 		    (pp->current_mode == ECPP_ECP_MODE))) {
3142 			dcsr = 0;
3143 		} else {
3144 			goto unclaimed;
3145 		}
3146 	}
3147 
3148 	pp->intr_hard++;
3149 
3150 	/* the intr is for us - check all possible interrupt sources */
3151 	if (dcsr & DCSR_ERR_PEND) {
3152 		size_t	bcr;
3153 
3154 		/* we are expecting a data transfer interrupt */
3155 		ASSERT(pp->e_busy == ECPP_BUSY);
3156 
3157 		/*
3158 		 * some kind of DMA error
3159 		 */
3160 		if (ECPP_DMA_STOP(pp, &bcr) == FAILURE) {
3161 			ecpp_error(pp->dip, "ecpp_isr: dma_stop failed\n");
3162 		}
3163 
3164 		ecpp_error(pp->dip, "ecpp_isr: DMAC ERROR bcr=%d\n", bcr);
3165 
3166 		ecpp_xfer_cleanup(pp);
3167 
3168 		if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
3169 			ecpp_error(pp->dip, "ecpp_isr(e): unbind failed\n");
3170 		}
3171 
3172 		mutex_exit(&pp->umutex);
3173 		return (DDI_INTR_CLAIMED);
3174 	}
3175 
3176 	if (dcsr & DCSR_TC) {
3177 		retval = ecpp_dma_ihdlr(pp);
3178 		mutex_exit(&pp->umutex);
3179 		return (DDI_INTR_CLAIMED);
3180 	}
3181 
3182 	if (COMPAT_PIO(pp)) {
3183 		retval = ecpp_pio_ihdlr(pp);
3184 		mutex_exit(&pp->umutex);
3185 		return (DDI_INTR_CLAIMED);
3186 	}
3187 
3188 	/* does peripheral need attention? */
3189 	if ((dsr & ECPP_nERR) == 0) {
3190 		retval = ecpp_nErr_ihdlr(pp);
3191 		mutex_exit(&pp->umutex);
3192 		return (DDI_INTR_CLAIMED);
3193 	}
3194 
3195 	pp->intr_hard--;
3196 
3197 unexpected:
3198 
3199 	pp->intr_spurious++;
3200 
3201 	/*
3202 	 * The following procedure tries to prevent soft hangs
3203 	 * in event of peripheral/superio misbehaviour:
3204 	 * if number of unexpected interrupts in the last SPUR_PERIOD ns
3205 	 * exceeded SPUR_CRITICAL, then shut up interrupts
3206 	 */
3207 	now = gethrtime();
3208 	if (pp->lastspur == 0 || now - pp->lastspur > SPUR_PERIOD) {
3209 		/* last unexpected interrupt was long ago */
3210 		pp->lastspur = now;
3211 		pp->nspur = 1;
3212 	} else {
3213 		/* last unexpected interrupt was recently */
3214 		pp->nspur++;
3215 	}
3216 
3217 	if (pp->nspur >= SPUR_CRITICAL) {
3218 		ECPP_MASK_INTR(pp);
3219 		ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK | ECPP_INTR_SRV);
3220 		pp->nspur = 0;
3221 		cmn_err(CE_NOTE, "%s%d: too many interrupt requests",
3222 			ddi_get_name(pp->dip), ddi_get_instance(pp->dip));
3223 	} else {
3224 		ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_SRV | ECPP_INTR_MASK);
3225 	}
3226 
3227 	ecpp_error(pp->dip,
3228 		"isr:unknown: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n",
3229 		dcsr, ECR_READ(pp), dsr, DCR_READ(pp),
3230 		pp->current_mode, pp->current_phase);
3231 
3232 	mutex_exit(&pp->umutex);
3233 	return (DDI_INTR_CLAIMED);
3234 
3235 unclaimed:
3236 
3237 	pp->intr_spurious++;
3238 
3239 	ecpp_error(pp->dip,
3240 		"isr:UNCL: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n",
3241 		dcsr, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp),
3242 		pp->current_mode, pp->current_phase);
3243 
3244 	mutex_exit(&pp->umutex);
3245 	return (DDI_INTR_UNCLAIMED);
3246 }
3247 
3248 /*
3249  * M1553 intr handler
3250  */
3251 static uint_t
3252 ecpp_M1553_intr(struct ecppunit *pp)
3253 {
3254 	int retval = DDI_INTR_UNCLAIMED;
3255 
3256 	pp->intr_hard++;
3257 
3258 	if (pp->e_busy == ECPP_BUSY) {
3259 		/* Centronics or Compat PIO transfer */
3260 		if (COMPAT_PIO(pp)) {
3261 			return (ecpp_pio_ihdlr(pp));
3262 		}
3263 
3264 		/* Centronics or Compat DMA transfer */
3265 		if (COMPAT_DMA(pp) ||
3266 		    (pp->current_mode == ECPP_ECP_MODE) ||
3267 		    (pp->current_mode == ECPP_DIAG_MODE)) {
3268 			return (ecpp_dma_ihdlr(pp));
3269 		}
3270 	}
3271 
3272 	/* Nibble or ECP backchannel request? */
3273 	if ((DSR_READ(pp) & ECPP_nERR) == 0) {
3274 		return (ecpp_nErr_ihdlr(pp));
3275 	}
3276 
3277 	return (retval);
3278 }
3279 
3280 /*
3281  * DMA completion interrupt handler
3282  */
3283 static uint_t
3284 ecpp_dma_ihdlr(struct ecppunit *pp)
3285 {
3286 	clock_t	tm;
3287 
3288 	ecpp_error(pp->dip, "ecpp_dma_ihdlr(%x): ecr=%x, dsr=%x, dcr=%x\n",
3289 		pp->current_mode, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
3290 
3291 	/* we are expecting a data transfer interrupt */
3292 	ASSERT(pp->e_busy == ECPP_BUSY);
3293 
3294 	/* Intr generated while invoking TFIFO mode. Exit */
3295 	if (pp->tfifo_intr == 1) {
3296 		pp->tfifo_intr = 0;
3297 		ecpp_error(pp->dip, "ecpp_dma_ihdlr: tfifo_intr is 1\n");
3298 		return (DDI_INTR_CLAIMED);
3299 	}
3300 
3301 	if (ECPP_DMA_STOP(pp, NULL) == FAILURE) {
3302 		ecpp_error(pp->dip, "ecpp_dma_ihdlr: dma_stop failed\n");
3303 	}
3304 
3305 	if (pp->current_mode == ECPP_ECP_MODE &&
3306 	    pp->current_phase == ECPP_PHASE_ECP_REV_XFER) {
3307 		ecpp_ecp_read_completion(pp);
3308 	} else {
3309 		/*
3310 		 * fifo_timer() will do the cleanup when the FIFO drains
3311 		 */
3312 		if ((ECR_READ(pp) & ECPP_FIFO_EMPTY) ||
3313 		    (pp->current_mode == ECPP_DIAG_MODE)) {
3314 			tm = 0;	/* no use in waiting if FIFO is already empty */
3315 		} else {
3316 			tm = drv_usectohz(FIFO_DRAIN_PERIOD);
3317 		}
3318 		pp->fifo_timer_id = timeout(ecpp_fifo_timer, (caddr_t)pp, tm);
3319 	}
3320 
3321 	/*
3322 	 * Stop the DMA transfer timeout timer
3323 	 * this operation will temporarily give up the mutex,
3324 	 * so we do it in the end of the handler to avoid races
3325 	 */
3326 	ecpp_untimeout_unblock(pp, &pp->timeout_id);
3327 
3328 	return (DDI_INTR_CLAIMED);
3329 }
3330 
3331 /*
3332  * ecpp_pio_ihdlr() is a PIO interrupt processing routine
3333  * It masks interrupts, updates statistics and initiates next byte transfer
3334  */
3335 static uint_t
3336 ecpp_pio_ihdlr(struct ecppunit *pp)
3337 {
3338 	ASSERT(mutex_owned(&pp->umutex));
3339 	ASSERT(pp->e_busy == ECPP_BUSY);
3340 
3341 	/* update statistics */
3342 	pp->joblen++;
3343 	pp->ctxpio_obytes++;
3344 
3345 	/* disable nAck interrups */
3346 	ECPP_MASK_INTR(pp);
3347 	DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_REV_DIR | ECPP_INTR_EN));
3348 
3349 	/*
3350 	 * If it was the last byte of the data block cleanup,
3351 	 * otherwise trigger a soft interrupt to send the next byte
3352 	 */
3353 	if (pp->next_byte >= pp->last_byte) {
3354 		ecpp_xfer_cleanup(pp);
3355 		ecpp_error(pp->dip,
3356 			"ecpp_pio_ihdlr: pp->joblen=%d,pp->ctx_cf=%d,\n",
3357 			pp->joblen, pp->ctx_cf);
3358 	} else {
3359 		if (pp->softintr_pending) {
3360 			ecpp_error(pp->dip,
3361 				"ecpp_pio_ihdlr:E: next byte in progress\n");
3362 		} else {
3363 			pp->softintr_flags = ECPP_SOFTINTR_PIONEXT;
3364 			pp->softintr_pending = 1;
3365 			ddi_trigger_softintr(pp->softintr_id);
3366 		}
3367 	}
3368 
3369 	return (DDI_INTR_CLAIMED);
3370 }
3371 
3372 /*
3373  * ecpp_pio_writeb() sends a byte using Centronics handshake
3374  */
3375 static void
3376 ecpp_pio_writeb(struct ecppunit *pp)
3377 {
3378 	uint8_t	dcr;
3379 
3380 	dcr = DCR_READ(pp) & ~ECPP_REV_DIR;
3381 	dcr |= ECPP_INTR_EN;
3382 
3383 	/* send the next byte */
3384 	DATAR_WRITE(pp, *(pp->next_byte++));
3385 
3386 	drv_usecwait(pp->data_setup_time);
3387 
3388 	/* Now Assert (neg logic) nStrobe */
3389 	if (dcr_write(pp, dcr | ECPP_STB) == FAILURE) {
3390 		ecpp_error(pp->dip, "ecpp_pio_writeb:1: failed w/DCR\n");
3391 	}
3392 
3393 	/* Enable nAck interrupts */
3394 	(void) DSR_READ(pp);	/* ensure IRQ_ST is armed */
3395 	ECPP_UNMASK_INTR(pp);
3396 
3397 	drv_usecwait(pp->strobe_pulse_width);
3398 
3399 	if (dcr_write(pp, dcr & ~ECPP_STB) == FAILURE) {
3400 		ecpp_error(pp->dip, "ecpp_pio_writeb:2: failed w/DCR\n");
3401 	}
3402 }
3403 
3404 /*
3405  * Backchannel request interrupt handler
3406  */
3407 static uint_t
3408 ecpp_nErr_ihdlr(struct ecppunit *pp)
3409 {
3410 	ecpp_error(pp->dip, "ecpp_nErr_ihdlr: mode=%x, phase=%x\n",
3411 				pp->current_mode, pp->current_phase);
3412 
3413 	if (pp->oflag != TRUE) {
3414 		ecpp_error(pp->dip, "ecpp_nErr_ihdlr: not open!\n");
3415 		return (DDI_INTR_UNCLAIMED);
3416 	}
3417 
3418 	if (pp->e_busy == ECPP_BUSY) {
3419 		ecpp_error(pp->dip, "ecpp_nErr_ihdlr: busy\n");
3420 		ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK);
3421 		return (DDI_INTR_CLAIMED);
3422 	}
3423 
3424 	/* mask nErr & nAck interrupts */
3425 	ECPP_MASK_INTR(pp);
3426 	DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_INTR_EN | ECPP_REV_DIR));
3427 	ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK);
3428 
3429 	/* going reverse */
3430 	switch (pp->current_mode) {
3431 	case ECPP_ECP_MODE:
3432 		/*
3433 		 * Peripheral asserts nPeriphRequest (nFault)
3434 		 */
3435 		break;
3436 	case ECPP_NIBBLE_MODE:
3437 		/*
3438 		 * Event 18: Periph asserts nErr to indicate data avail
3439 		 * Event 19: After waiting minimum pulse width,
3440 		 *   periph sets nAck high to generate an interrupt
3441 		 *
3442 		 * Interface is in Interrupt Phase
3443 		 */
3444 		pp->current_phase = ECPP_PHASE_NIBT_REVINTR;
3445 
3446 		break;
3447 	default:
3448 		ecpp_error(pp->dip, "ecpp_nErr_ihdlr: wrong mode!\n");
3449 		return (DDI_INTR_UNCLAIMED);
3450 	}
3451 
3452 	(void) ecpp_backchan_req(pp);	/* put backchannel request on the wq */
3453 
3454 	return (DDI_INTR_CLAIMED);
3455 }
3456 
3457 /*
3458  * Softintr handler does work according to softintr_flags:
3459  * in case of ECPP_SOFTINTR_PIONEXT it sends next byte of PIO transfer
3460  */
3461 static uint_t
3462 ecpp_softintr(caddr_t arg)
3463 {
3464 	struct ecppunit *pp = (struct ecppunit *)arg;
3465 	uint32_t unx_len, ecpp_reattempts = 0;
3466 
3467 	mutex_enter(&pp->umutex);
3468 
3469 	pp->intr_soft++;
3470 
3471 	if (!pp->softintr_pending) {
3472 		mutex_exit(&pp->umutex);
3473 		return (DDI_INTR_CLAIMED);
3474 	} else {
3475 		pp->softintr_pending = 0;
3476 	}
3477 
3478 	if (pp->softintr_flags & ECPP_SOFTINTR_PIONEXT) {
3479 		pp->softintr_flags &= ~ECPP_SOFTINTR_PIONEXT;
3480 		/*
3481 		 * Sent next byte in PIO mode
3482 		 */
3483 		ecpp_reattempts = 0;
3484 		do {
3485 			if (ecpp_check_status(pp) == SUCCESS) {
3486 				pp->e_busy = ECPP_BUSY;
3487 				break;
3488 			}
3489 			drv_usecwait(1);
3490 			if (pp->isr_reattempt_high < ecpp_reattempts) {
3491 				pp->isr_reattempt_high = ecpp_reattempts;
3492 			}
3493 		} while (++ecpp_reattempts < pp->wait_for_busy);
3494 
3495 		/* if the peripheral still not recovered suspend the transfer */
3496 		if (pp->e_busy == ECPP_ERR) {
3497 			++pp->ctx_cf; /* check status fail */
3498 			ecpp_error(pp->dip, "ecpp_softintr:check_status:F: "
3499 				"dsr=%x jl=%d cf_isr=%d\n",
3500 				DSR_READ(pp), pp->joblen, pp->ctx_cf);
3501 
3502 			/*
3503 			 * if status signals are bad,
3504 			 * put everything back on the wq.
3505 			 */
3506 			unx_len = pp->last_byte - pp->next_byte;
3507 			if (pp->msg != NULL) {
3508 				ecpp_putback_untransfered(pp,
3509 					(void *)pp->msg->b_rptr, unx_len);
3510 				ecpp_error(pp->dip,
3511 				    "ecpp_softintr:e1:unx_len=%d\n", unx_len);
3512 
3513 				freemsg(pp->msg);
3514 				pp->msg = NULL;
3515 			} else {
3516 				ecpp_putback_untransfered(pp,
3517 					pp->next_byte, unx_len);
3518 				ecpp_error(pp->dip,
3519 				    "ecpp_softintr:e2:unx_len=%d\n", unx_len);
3520 			}
3521 
3522 			ecpp_xfer_cleanup(pp);
3523 			pp->e_busy = ECPP_ERR;
3524 			qenable(pp->writeq);
3525 		} else {
3526 			/* send the next one */
3527 			pp->e_busy = ECPP_BUSY;
3528 			(void) ecpp_pio_writeb(pp);
3529 		}
3530 	}
3531 
3532 	mutex_exit(&pp->umutex);
3533 	return (DDI_INTR_CLAIMED);
3534 }
3535 
3536 
3537 /*
3538  * Transfer clean-up:
3539  * 	shut down the DMAC
3540  *	stop the transfer timer
3541  *	enable write queue
3542  */
3543 static void
3544 ecpp_xfer_cleanup(struct ecppunit *pp)
3545 {
3546 	ASSERT(mutex_owned(&pp->umutex));
3547 
3548 	/*
3549 	 * if we did not use the ioblock, the mblk that
3550 	 * was used should be freed.
3551 	 */
3552 	if (pp->msg != NULL) {
3553 		freemsg(pp->msg);
3554 		pp->msg = NULL;
3555 	}
3556 
3557 	/* The port is no longer active */
3558 	pp->e_busy = ECPP_IDLE;
3559 
3560 	/* Stop the transfer timeout timer */
3561 	ecpp_untimeout_unblock(pp, &pp->timeout_id);
3562 
3563 	qenable(pp->writeq);
3564 }
3565 
3566 /*VARARGS*/
3567 static void
3568 ecpp_error(dev_info_t *dip, char *fmt, ...)
3569 {
3570 	static	long	last;
3571 	static	char	*lastfmt;
3572 	char		msg_buffer[255];
3573 	va_list	ap;
3574 	time_t	now;
3575 
3576 	if (!ecpp_debug) {
3577 		return;
3578 	}
3579 
3580 	/*
3581 	 * This function is supposed to be a quick non-blockable
3582 	 * wrapper for cmn_err(9F), which provides a sensible degree
3583 	 * of debug message throttling.  Not using any type of lock
3584 	 * is a requirement, but this also leaves two static variables
3585 	 * - last and lastfmt - unprotected. However, this will not do
3586 	 * any harm to driver functionality, it can only weaken throttling.
3587 	 * The following directive asks warlock to not worry about these
3588 	 * variables.
3589 	 */
3590 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(last, lastfmt))
3591 
3592 	/*
3593 	 * Don't print same error message too often.
3594 	 */
3595 	now = gethrestime_sec();
3596 	if ((last == (now & ~1)) && (lastfmt == fmt))
3597 		return;
3598 
3599 	last = now & ~1;
3600 	lastfmt = fmt;
3601 
3602 	va_start(ap, fmt);
3603 	(void) vsprintf(msg_buffer, fmt, ap);
3604 	cmn_err(CE_CONT, "%s%d: %s", ddi_get_name(dip),
3605 			ddi_get_instance(dip), msg_buffer);
3606 	va_end(ap);
3607 }
3608 
3609 /*
3610  * Forward transfer timeout
3611  */
3612 static void
3613 ecpp_xfer_timeout(void *arg)
3614 {
3615 	struct ecppunit	*pp = arg;
3616 	void		*unx_addr;
3617 	size_t		unx_len, xferd;
3618 	uint8_t		dcr;
3619 	timeout_id_t	fifo_timer_id;
3620 
3621 	mutex_enter(&pp->umutex);
3622 
3623 	if (pp->timeout_id == 0) {
3624 		mutex_exit(&pp->umutex);
3625 		return;
3626 	} else {
3627 		pp->timeout_id = 0;
3628 	}
3629 
3630 	pp->xfer_tout++;
3631 
3632 	pp->dma_cancelled = TRUE;	/* prevent race with isr() */
3633 
3634 	if (COMPAT_PIO(pp)) {
3635 		/*
3636 		 * PIO mode timeout
3637 		 */
3638 
3639 		/* turn off nAck interrupts */
3640 		dcr = DCR_READ(pp);
3641 		(void) dcr_write(pp, dcr & ~(ECPP_REV_DIR | ECPP_INTR_EN));
3642 		ECPP_MASK_INTR(pp);
3643 
3644 		pp->softintr_pending = 0;
3645 		unx_len = pp->last_byte - pp->next_byte;
3646 		ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len);
3647 
3648 		if (unx_len > 0) {
3649 			unx_addr = pp->next_byte;
3650 		} else {
3651 			ecpp_xfer_cleanup(pp);
3652 			qenable(pp->writeq);
3653 			mutex_exit(&pp->umutex);
3654 			return;
3655 		}
3656 	} else {
3657 		/*
3658 		 * DMA mode timeout
3659 		 *
3660 		 * If DMAC fails to shut off, continue anyways and attempt
3661 		 * to put untransfered data back on queue.
3662 		 */
3663 		if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) {
3664 			ecpp_error(pp->dip,
3665 				"ecpp_xfer_timeout: failed dma_stop\n");
3666 		}
3667 
3668 		ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len);
3669 
3670 		if (ddi_dma_unbind_handle(pp->dma_handle) == DDI_FAILURE) {
3671 			ecpp_error(pp->dip,
3672 				"ecpp_xfer_timeout: failed unbind\n");
3673 		}
3674 
3675 		/*
3676 		 * if the bcr is zero, then DMA is complete and
3677 		 * we are waiting for the fifo to drain.  So let
3678 		 * ecpp_fifo_timer() look after the clean up.
3679 		 */
3680 		if (unx_len == 0) {
3681 			qenable(pp->writeq);
3682 			mutex_exit(&pp->umutex);
3683 			return;
3684 		} else {
3685 			xferd = pp->dma_cookie.dmac_size - unx_len;
3686 			pp->resid -= xferd;
3687 			unx_len = pp->resid;
3688 
3689 			/* update statistics */
3690 			pp->obytes[pp->current_mode] += xferd;
3691 			pp->joblen += xferd;
3692 
3693 			if (pp->msg != NULL) {
3694 				unx_addr = (caddr_t)pp->msg->b_wptr - unx_len;
3695 			} else {
3696 				unx_addr = pp->ioblock +
3697 							(pp->xfercnt - unx_len);
3698 			}
3699 		}
3700 	}
3701 
3702 	/* Following code is common for PIO and DMA modes */
3703 
3704 	ecpp_putback_untransfered(pp, (caddr_t)unx_addr, unx_len);
3705 
3706 	if (pp->msg != NULL) {
3707 		freemsg(pp->msg);
3708 		pp->msg = NULL;
3709 	}
3710 
3711 	/* mark the error status structure */
3712 	pp->timeout_error = 1;
3713 	pp->e_busy = ECPP_ERR;
3714 	fifo_timer_id = pp->fifo_timer_id;
3715 	pp->fifo_timer_id = 0;
3716 
3717 	qenable(pp->writeq);
3718 
3719 	mutex_exit(&pp->umutex);
3720 
3721 	if (fifo_timer_id) {
3722 		(void) untimeout(fifo_timer_id);
3723 	}
3724 }
3725 
3726 static void
3727 ecpp_putback_untransfered(struct ecppunit *pp, void *startp, uint_t len)
3728 {
3729 	mblk_t *new_mp;
3730 
3731 	ecpp_error(pp->dip, "ecpp_putback_untrans=%d\n", len);
3732 
3733 	if (len == 0) {
3734 		return;
3735 	}
3736 
3737 	new_mp = allocb(len, BPRI_MED);
3738 	if (new_mp == NULL) {
3739 		ecpp_error(pp->dip,
3740 			"ecpp_putback_untransfered: allocb FAILURE.\n");
3741 		return;
3742 	}
3743 
3744 	bcopy(startp, new_mp->b_rptr, len);
3745 	new_mp->b_wptr = new_mp->b_rptr + len;
3746 
3747 	if (!putbq(pp->writeq, new_mp)) {
3748 		freemsg(new_mp);
3749 	}
3750 }
3751 
3752 static uchar_t
3753 ecr_write(struct ecppunit *pp, uint8_t ecr_byte)
3754 {
3755 	int i, current_ecr;
3756 
3757 	for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) {
3758 		ECR_WRITE(pp, ecr_byte);
3759 
3760 		current_ecr = ECR_READ(pp);
3761 
3762 		/* mask off the lower two read-only bits */
3763 		if ((ecr_byte & 0xFC) == (current_ecr & 0xFC))
3764 			return (SUCCESS);
3765 	}
3766 	return (FAILURE);
3767 }
3768 
3769 static uchar_t
3770 dcr_write(struct ecppunit *pp, uint8_t dcr_byte)
3771 {
3772 	uint8_t current_dcr;
3773 	int i;
3774 
3775 	for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) {
3776 		DCR_WRITE(pp, dcr_byte);
3777 
3778 		current_dcr = DCR_READ(pp);
3779 
3780 		/* compare only bits 0-4 (direction bit return 1) */
3781 		if ((dcr_byte & 0x1F) == (current_dcr & 0x1F))
3782 			return (SUCCESS);
3783 	}
3784 	ecpp_error(pp->dip,
3785 		"(%d)dcr_write: dcr written =%x, dcr readback =%x\n",
3786 		i, dcr_byte, current_dcr);
3787 
3788 	return (FAILURE);
3789 }
3790 
3791 static uchar_t
3792 ecpp_reset_port_regs(struct ecppunit *pp)
3793 {
3794 	DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
3795 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
3796 	return (SUCCESS);
3797 }
3798 
3799 /*
3800  * The data transferred by the DMA engine goes through the FIFO,
3801  * so that when the DMA counter reaches zero (and an interrupt occurs)
3802  * the FIFO can still contain data. If this is the case, the ISR will
3803  * schedule this callback to wait until the FIFO drains or a timeout occurs.
3804  */
3805 static void
3806 ecpp_fifo_timer(void *arg)
3807 {
3808 	struct ecppunit *pp = arg;
3809 	uint8_t	ecr;
3810 	timeout_id_t	timeout_id;
3811 
3812 	mutex_enter(&pp->umutex);
3813 
3814 	/*
3815 	 * If the FIFO timer has been turned off, exit.
3816 	 */
3817 	if (pp->fifo_timer_id == 0) {
3818 		ecpp_error(pp->dip, "ecpp_fifo_timer: untimedout\n");
3819 		mutex_exit(&pp->umutex);
3820 		return;
3821 	} else {
3822 		pp->fifo_timer_id = 0;
3823 	}
3824 
3825 	/*
3826 	 * If the FIFO is not empty restart timer.  Wait FIFO_DRAIN_PERIOD
3827 	 * (250 ms) and check FIFO_EMPTY bit again. Repeat until FIFO is
3828 	 * empty or until 10 * FIFO_DRAIN_PERIOD expires.
3829 	 */
3830 	ecr = ECR_READ(pp);
3831 
3832 	if ((pp->current_mode != ECPP_DIAG_MODE) &&
3833 	    (((ecr & ECPP_FIFO_EMPTY) == 0) &&
3834 	    (pp->ecpp_drain_counter < 10))) {
3835 
3836 		ecpp_error(pp->dip,
3837 			"ecpp_fifo_timer(%d):FIFO not empty:ecr=%x\n",
3838 			pp->ecpp_drain_counter, ecr);
3839 
3840 		pp->fifo_timer_id = timeout(ecpp_fifo_timer,
3841 				(caddr_t)pp, drv_usectohz(FIFO_DRAIN_PERIOD));
3842 		++pp->ecpp_drain_counter;
3843 
3844 		mutex_exit(&pp->umutex);
3845 		return;
3846 	}
3847 
3848 	if (pp->current_mode != ECPP_DIAG_MODE) {
3849 		/*
3850 		 * If the FIFO won't drain after 10 FIFO_DRAIN_PERIODs
3851 		 * then don't wait any longer.  Simply clean up the transfer.
3852 		 */
3853 		if (pp->ecpp_drain_counter >= 10) {
3854 			ecpp_error(pp->dip, "ecpp_fifo_timer(%d):"
3855 				" clearing FIFO,can't wait:ecr=%x\n",
3856 				pp->ecpp_drain_counter, ecr);
3857 		} else {
3858 			ecpp_error(pp->dip,
3859 				"ecpp_fifo_timer(%d):FIFO empty:ecr=%x\n",
3860 				pp->ecpp_drain_counter, ecr);
3861 		}
3862 
3863 		pp->ecpp_drain_counter = 0;
3864 	}
3865 
3866 	/*
3867 	 * Main section of routine:
3868 	 *  - stop the DMA transfer timer
3869 	 *  - program DMA with next cookie/window or unbind the DMA mapping
3870 	 *  - update stats
3871 	 *  - if last mblk in queue, signal to close() & return to idle state
3872 	 */
3873 
3874 	/* Stop the DMA transfer timeout timer */
3875 	timeout_id = pp->timeout_id;
3876 	pp->timeout_id = 0;
3877 
3878 	/* data has drained from fifo, it is ok to free dma resource */
3879 	if (pp->current_mode == ECPP_ECP_MODE ||
3880 	    pp->current_mode == ECPP_DIAG_MODE ||
3881 	    COMPAT_DMA(pp)) {
3882 		off_t	off;
3883 		size_t	len;
3884 
3885 		/* update residual */
3886 		pp->resid -= pp->dma_cookie.dmac_size;
3887 
3888 		/* update statistics */
3889 		pp->joblen += pp->dma_cookie.dmac_size;
3890 		if (pp->dma_dir == DDI_DMA_WRITE) {
3891 			pp->obytes[pp->current_mode] +=
3892 						pp->dma_cookie.dmac_size;
3893 		} else {
3894 			pp->ibytes[pp->current_mode] +=
3895 						pp->dma_cookie.dmac_size;
3896 		}
3897 
3898 		/*
3899 		 * Look if any cookies/windows left
3900 		 */
3901 		if (--pp->dma_cookie_count > 0) {
3902 			/* process the next cookie */
3903 			ddi_dma_nextcookie(pp->dma_handle,
3904 						&pp->dma_cookie);
3905 		} else if (pp->dma_curwin < pp->dma_nwin) {
3906 			/* process the next window */
3907 			if (ddi_dma_getwin(pp->dma_handle,
3908 			    pp->dma_curwin, &off, &len,
3909 			    &pp->dma_cookie,
3910 			    &pp->dma_cookie_count) != DDI_SUCCESS) {
3911 				ecpp_error(pp->dip,
3912 				    "ecpp_fifo_timer: ddi_dma_getwin failed\n");
3913 				goto dma_done;
3914 			}
3915 
3916 			pp->dma_curwin++;
3917 		} else {
3918 			goto dma_done;
3919 		}
3920 
3921 		ecpp_error(pp->dip, "ecpp_fifo_timer: next addr=%llx len=%d\n",
3922 			pp->dma_cookie.dmac_address,
3923 			pp->dma_cookie.dmac_size);
3924 
3925 		/* kick off new transfer */
3926 		if (ECPP_DMA_START(pp) != SUCCESS) {
3927 			ecpp_error(pp->dip,
3928 					"ecpp_fifo_timer: dma_start failed\n");
3929 			goto dma_done;
3930 		}
3931 
3932 		(void) ecr_write(pp, (ecr & 0xe0) |
3933 					ECPP_DMA_ENABLE | ECPP_INTR_MASK);
3934 
3935 		mutex_exit(&pp->umutex);
3936 
3937 		if (timeout_id) {
3938 			(void) untimeout(timeout_id);
3939 		}
3940 		return;
3941 
3942 	dma_done:
3943 		if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
3944 			ecpp_error(pp->dip, "ecpp_fifo_timer: unbind failed\n");
3945 		} else {
3946 			ecpp_error(pp->dip, "ecpp_fifo_timer: unbind ok\n");
3947 		}
3948 	}
3949 
3950 	/*
3951 	 * if we did not use the dmablock, the mblk that
3952 	 * was used should be freed.
3953 	 */
3954 	if (pp->msg != NULL) {
3955 		freemsg(pp->msg);
3956 		pp->msg = NULL;
3957 	}
3958 
3959 	/* The port is no longer active */
3960 	pp->e_busy = ECPP_IDLE;
3961 
3962 	qenable(pp->writeq);
3963 
3964 	mutex_exit(&pp->umutex);
3965 
3966 	if (timeout_id) {
3967 		(void) untimeout(timeout_id);
3968 	}
3969 }
3970 
3971 /*
3972  * In Compatibility mode, check if the peripheral is ready to accept data
3973  */
3974 static uint8_t
3975 ecpp_check_status(struct ecppunit *pp)
3976 {
3977 	uint8_t	dsr;
3978 	uint8_t statmask;
3979 
3980 	if (pp->current_mode == ECPP_ECP_MODE ||
3981 	    pp->current_mode == ECPP_DIAG_MODE)
3982 		return (SUCCESS);
3983 
3984 	statmask = ECPP_nERR | ECPP_SLCT | ECPP_nBUSY | ECPP_nACK;
3985 
3986 	dsr = DSR_READ(pp);
3987 	if ((dsr & ECPP_PE) || ((dsr & statmask) != statmask)) {
3988 		pp->e_busy = ECPP_ERR;
3989 		return (FAILURE);
3990 	} else {
3991 		return (SUCCESS);
3992 	}
3993 }
3994 
3995 /*
3996  * if the peripheral is not ready to accept data, write service routine
3997  * periodically reschedules itself to recheck peripheral status
3998  * and start data transfer as soon as possible
3999  */
4000 static void
4001 ecpp_wsrv_timer(void *arg)
4002 {
4003 	struct ecppunit *pp = arg;
4004 
4005 	ecpp_error(pp->dip, "ecpp_wsrv_timer: starting\n");
4006 
4007 	mutex_enter(&pp->umutex);
4008 
4009 	if (pp->wsrv_timer_id == 0) {
4010 		mutex_exit(&pp->umutex);
4011 		return;
4012 	} else {
4013 		pp->wsrv_timer_id = 0;
4014 	}
4015 
4016 	ecpp_error(pp->dip, "ecpp_wsrv_timer: qenabling...\n");
4017 
4018 	qenable(pp->writeq);
4019 
4020 	mutex_exit(&pp->umutex);
4021 }
4022 
4023 /*
4024  * Allocate a message indicating a backchannel request
4025  * and put it on the write queue
4026  */
4027 static int
4028 ecpp_backchan_req(struct ecppunit *pp)
4029 {
4030 	mblk_t	*mp;
4031 
4032 	if ((mp = allocb(sizeof (int), BPRI_MED)) == NULL) {
4033 		ecpp_error(pp->dip, "ecpp_backchan_req: allocb failed\n");
4034 		return (FAILURE);
4035 	} else {
4036 		mp->b_datap->db_type = M_CTL;
4037 		*(int *)mp->b_rptr = ECPP_BACKCHANNEL;
4038 		mp->b_wptr = mp->b_rptr + sizeof (int);
4039 		if (!putbq(pp->writeq, mp)) {
4040 			ecpp_error(pp->dip, "ecpp_backchan_req:putbq failed\n");
4041 			freemsg(mp);
4042 			return (FAILURE);
4043 		}
4044 		return (SUCCESS);
4045 	}
4046 }
4047 
4048 /*
4049  * Cancel the function scheduled with timeout(9F)
4050  * This function is to be called with the mutex held
4051  */
4052 static void
4053 ecpp_untimeout_unblock(struct ecppunit *pp, timeout_id_t *id)
4054 {
4055 	timeout_id_t	saved_id;
4056 
4057 	ASSERT(mutex_owned(&pp->umutex));
4058 
4059 	if (*id) {
4060 		saved_id = *id;
4061 		*id = 0;
4062 		mutex_exit(&pp->umutex);
4063 		(void) untimeout(saved_id);
4064 		mutex_enter(&pp->umutex);
4065 	}
4066 }
4067 
4068 /*
4069  * get prnio interface capabilities
4070  */
4071 static uint_t
4072 ecpp_get_prn_ifcap(struct ecppunit *pp)
4073 {
4074 	uint_t	ifcap;
4075 
4076 	ifcap = PRN_1284_DEVID | PRN_TIMEOUTS | PRN_STREAMS;
4077 
4078 	/* status (DSR) only makes sense in Centronics & Compat modes */
4079 	if (pp->current_mode == ECPP_CENTRONICS ||
4080 	    pp->current_mode == ECPP_COMPAT_MODE) {
4081 		ifcap |= PRN_1284_STATUS;
4082 	} else if (pp->current_mode == ECPP_NIBBLE_MODE ||
4083 		    pp->current_mode == ECPP_ECP_MODE) {
4084 		ifcap |= PRN_BIDI;
4085 	}
4086 
4087 	return (ifcap);
4088 }
4089 
4090 /*
4091  * Determine SuperI/O type
4092  */
4093 static struct ecpp_hw_bind *
4094 ecpp_determine_sio_type(struct ecppunit *pp)
4095 {
4096 	struct ecpp_hw_bind	*hw_bind;
4097 	char			*name;
4098 	int			i;
4099 
4100 	name = ddi_binding_name(pp->dip);
4101 
4102 	for (hw_bind = NULL, i = 0; i < NELEM(ecpp_hw_bind); i++) {
4103 		if (strcmp(name, ecpp_hw_bind[i].name) == 0) {
4104 			hw_bind = &ecpp_hw_bind[i];
4105 			break;
4106 		}
4107 	}
4108 
4109 	return (hw_bind);
4110 }
4111 
4112 
4113 /*
4114  *
4115  * IEEE 1284 support routines:
4116  * 	negotiation and termination;
4117  *	phase transitions;
4118  *	device ID;
4119  *
4120  */
4121 
4122 /*
4123  * Interface initialization, abnormal termination into Compatibility mode
4124  *
4125  * Peripheral may be non-1284, so we set current mode to ECPP_CENTRONICS
4126  */
4127 static void
4128 ecpp_1284_init_interface(struct ecppunit *pp)
4129 {
4130 	ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
4131 
4132 	/*
4133 	 * Toggle the nInit signal if configured in ecpp.conf
4134 	 * for most peripherals it is not needed
4135 	 */
4136 	if (pp->init_seq == TRUE) {
4137 		DCR_WRITE(pp, ECPP_SLCTIN);
4138 		drv_usecwait(50);	/* T(ER) = 50us */
4139 	}
4140 
4141 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4142 
4143 	pp->current_mode = pp->backchannel = ECPP_CENTRONICS;
4144 	pp->current_phase = ECPP_PHASE_C_IDLE;
4145 	ECPP_CONFIG_MODE(pp);
4146 	pp->to_mode[pp->current_mode]++;
4147 
4148 	ecpp_error(pp->dip, "ecpp_1284_init_interface: ok\n");
4149 }
4150 
4151 /*
4152  * ECP mode negotiation
4153  */
4154 static int
4155 ecp_negotiation(struct ecppunit *pp)
4156 {
4157 	uint8_t dsr;
4158 
4159 	/* ECP mode negotiation */
4160 
4161 	if (ecpp_1284_negotiation(pp, ECPP_XREQ_ECP, &dsr) == FAILURE)
4162 		return (FAILURE);
4163 
4164 	/* Event 5: peripheral deasserts PError and Busy, asserts Select */
4165 	if ((dsr & (ECPP_PE | ECPP_nBUSY | ECPP_SLCT)) !=
4166 		(ECPP_nBUSY | ECPP_SLCT)) {
4167 		ecpp_error(pp->dip,
4168 			"ecp_negotiation: failed event 5 %x\n", DSR_READ(pp));
4169 		(void) ecpp_1284_termination(pp);
4170 		return (FAILURE);
4171 	}
4172 
4173 	/* entered Setup Phase */
4174 	pp->current_phase = ECPP_PHASE_ECP_SETUP;
4175 
4176 	/* Event 30: host asserts nAutoFd */
4177 	DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
4178 
4179 	/* Event 31: peripheral asserts PError */
4180 	if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) {
4181 		ecpp_error(pp->dip,
4182 			"ecp_negotiation: failed event 31 %x\n", DSR_READ(pp));
4183 		(void) ecpp_1284_termination(pp);
4184 		return (FAILURE);
4185 	}
4186 
4187 	/* entered Forward Idle Phase */
4188 	pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
4189 
4190 	/* successful negotiation into ECP mode */
4191 	pp->current_mode = ECPP_ECP_MODE;
4192 	pp->backchannel = ECPP_ECP_MODE;
4193 
4194 	ecpp_error(pp->dip, "ecp_negotiation: ok\n");
4195 
4196 	return (SUCCESS);
4197 }
4198 
4199 /*
4200  * Nibble mode negotiation
4201  */
4202 static int
4203 nibble_negotiation(struct ecppunit *pp)
4204 {
4205 	uint8_t	dsr;
4206 
4207 	if (ecpp_1284_negotiation(pp, ECPP_XREQ_NIBBLE, &dsr) == FAILURE) {
4208 		return (FAILURE);
4209 	}
4210 
4211 	/*
4212 	 * If peripheral has data available, PE and nErr will
4213 	 * be set low at Event 5 & 6.
4214 	 */
4215 	if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) {
4216 		pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
4217 	} else {
4218 		pp->current_phase = ECPP_PHASE_NIBT_NAVAIL;
4219 	}
4220 
4221 	/* successful negotiation into Nibble mode */
4222 	pp->current_mode = ECPP_NIBBLE_MODE;
4223 	pp->backchannel = ECPP_NIBBLE_MODE;
4224 
4225 	ecpp_error(pp->dip, "nibble_negotiation: ok (phase=%x)\n",
4226 			pp->current_phase);
4227 
4228 	return (SUCCESS);
4229 
4230 }
4231 
4232 /*
4233  * Wait ptimeout usec for periph to set 'mask' bits to 'val' state
4234  *
4235  * return value < 0 indicates timeout
4236  */
4237 static int
4238 wait_dsr(struct ecppunit *pp, uint8_t mask, uint8_t val, int ptimeout)
4239 {
4240 	while (((DSR_READ(pp) & mask) != val) && ptimeout--) {
4241 		drv_usecwait(1);
4242 	}
4243 
4244 	return (ptimeout);
4245 }
4246 
4247 /*
4248  * 1284 negotiation Events 0..6
4249  * required mode is indicated by extensibility request value
4250  *
4251  * After successful negotiation SUCCESS is returned and
4252  * current mode is set according to xreq,
4253  * otherwise FAILURE is returned and current mode is set to
4254  * either COMPAT (1284 periph) or CENTRONICS (non-1284 periph)
4255  *
4256  * Current phase must be set by the caller (mode-specific negotiation)
4257  *
4258  * If rdsr is not NULL, DSR value after Event 6 is stored here
4259  */
4260 static int
4261 ecpp_1284_negotiation(struct ecppunit *pp, uint8_t xreq, uint8_t *rdsr)
4262 {
4263 	int xflag;
4264 
4265 	ecpp_error(pp->dip, "nego(%x): entering...\n", xreq);
4266 
4267 	/* negotiation should start in Compatibility mode */
4268 	(void) ecpp_1284_termination(pp);
4269 
4270 	/* Set host into Compat mode */
4271 	ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
4272 
4273 	pp->current_phase = ECPP_PHASE_NEGO;
4274 
4275 	/* Event 0: host sets extensibility request on data lines */
4276 	DATAR_WRITE(pp, xreq);
4277 
4278 	/* Event 1: host deassert nSelectin and assert nAutoFd */
4279 	DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
4280 
4281 	drv_usecwait(1);	/* Tp(ecp) == 0.5us */
4282 
4283 	/*
4284 	 * Event 2: peripheral asserts nAck, deasserts nFault,
4285 	 * 			asserts Select, asserts PError
4286 	 */
4287 	if (wait_dsr(pp, ECPP_nERR | ECPP_SLCT | ECPP_PE | ECPP_nACK,
4288 			ECPP_nERR | ECPP_SLCT | ECPP_PE, 35000) < 0) {
4289 		/* peripheral is not 1284-compliant */
4290 		ecpp_error(pp->dip,
4291 			"nego(%x): failed event 2 %x\n", xreq, DSR_READ(pp));
4292 		(void) ecpp_1284_termination(pp);
4293 		return (FAILURE);
4294 	}
4295 
4296 	/*
4297 	 * Event 3: host asserts nStrobe, latching extensibility value into
4298 	 * peripherals input latch.
4299 	 */
4300 	DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_STB);
4301 
4302 	drv_usecwait(2);	/* Tp(ecp) = 0.5us */
4303 
4304 	/*
4305 	 * Event 4: hosts deasserts nStrobe and nAutoFD to acknowledge that
4306 	 * it has recognized an 1284 compatible peripheral
4307 	 */
4308 	DCR_WRITE(pp, ECPP_nINIT);
4309 
4310 	/*
4311 	 * Event 5: Peripheral confirms it supports requested extension
4312 	 * For Nibble mode Xflag must be low, otherwise it must be high
4313 	 */
4314 	xflag = (xreq == ECPP_XREQ_NIBBLE) ? 0 : ECPP_SLCT;
4315 
4316 	/*
4317 	 * Event 6: Peripheral sets nAck high
4318 	 * indicating that status lines are valid
4319 	 */
4320 	if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
4321 		/* Something wrong with peripheral */
4322 		ecpp_error(pp->dip,
4323 			"nego(%x): failed event 6 %x\n", xreq, DSR_READ(pp));
4324 		(void) ecpp_1284_termination(pp);
4325 		return (FAILURE);
4326 	}
4327 
4328 	if ((DSR_READ(pp) & ECPP_SLCT) != xflag) {
4329 		/* Extensibility value is not supported */
4330 		ecpp_error(pp->dip,
4331 			"nego(%x): failed event 5 %x\n", xreq, DSR_READ(pp));
4332 		(void) ecpp_1284_termination(pp);
4333 		return (FAILURE);
4334 	}
4335 
4336 	if (rdsr) {
4337 		*rdsr = DSR_READ(pp);
4338 	}
4339 
4340 	return (SUCCESS);
4341 }
4342 
4343 /*
4344  * 1284 Termination: Events 22..28 - set link to Compatibility mode
4345  *
4346  * This routine is not designed for Immediate termination,
4347  * caller must take care of waiting for a valid state,
4348  * (in particular, in ECP mode current phase must be Forward Idle)
4349  * otherwise interface will be reinitialized
4350  *
4351  * In case of Valid state termination SUCCESS is returned and
4352  * current_mode is ECPP_COMPAT_MODE, current phase is ECPP_PHASE_C_IDLE
4353  * Otherwise interface is reinitialized, FAILURE is returned and
4354  * current mode is ECPP_CENTRONICS, current phase is ECPP_PHASE_C_IDLE
4355  */
4356 static int
4357 ecpp_1284_termination(struct ecppunit *pp)
4358 {
4359 	int	previous_mode = pp->current_mode;
4360 
4361 	if (((pp->current_mode == ECPP_COMPAT_MODE ||
4362 	    pp->current_mode == ECPP_CENTRONICS) &&
4363 	    pp->current_phase == ECPP_PHASE_C_IDLE) ||
4364 	    pp->current_mode == ECPP_DIAG_MODE) {
4365 		ecpp_error(pp->dip, "termination: not needed\n");
4366 		return (SUCCESS);
4367 	}
4368 
4369 	/* Set host into Compat mode, interrupts disabled */
4370 	ECPP_MASK_INTR(pp);
4371 	ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
4372 
4373 	pp->current_mode = ECPP_COMPAT_MODE;	/* needed by next function */
4374 
4375 	ECPP_CONFIG_MODE(pp);
4376 
4377 	/*
4378 	 * EPP mode uses simple nInit pulse for termination
4379 	 */
4380 	if (previous_mode == ECPP_EPP_MODE) {
4381 		/* Event 68: host sets nInit low */
4382 		DCR_WRITE(pp, 0);
4383 
4384 		drv_usecwait(55);	/* T(ER) = 50us */
4385 
4386 		/* Event 69: host sets nInit high */
4387 		DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4388 
4389 		goto endterm;
4390 	}
4391 
4392 	/* terminate peripheral to Compat mode */
4393 	pp->current_phase = ECPP_PHASE_TERM;
4394 
4395 	/* Event 22: hosts sets nSelectIn low and nAutoFd high */
4396 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4397 
4398 	/* Event 23: peripheral deasserts nFault and nBusy */
4399 	/* Event 24: peripheral asserts nAck */
4400 	if (wait_dsr(pp, ECPP_nERR | ECPP_nBUSY | ECPP_nACK,
4401 			ECPP_nERR, 35000) < 0) {
4402 		ecpp_error(pp->dip,
4403 			"termination: failed events 23,24 %x\n", DSR_READ(pp));
4404 		ecpp_1284_init_interface(pp);
4405 		return (FAILURE);
4406 	}
4407 
4408 	drv_usecwait(1);	/* Tp = 0.5us */
4409 
4410 	/* Event 25: hosts sets nAutoFd low */
4411 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN | ECPP_AFX);
4412 
4413 	/* Event 26: the peripheral puts itself in Compatible mode */
4414 
4415 	/* Event 27: peripheral deasserts nAck */
4416 	if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
4417 		ecpp_error(pp->dip,
4418 			"termination: failed event 27 %x\n", DSR_READ(pp));
4419 		ecpp_1284_init_interface(pp);
4420 		return (FAILURE);
4421 	}
4422 
4423 	drv_usecwait(1);	/* Tp = 0.5us */
4424 
4425 	/* Event 28: hosts deasserts nAutoFd */
4426 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4427 
4428 	drv_usecwait(1);	/* Tp = 0.5us */
4429 
4430 endterm:
4431 	/* Compatible mode Idle Phase */
4432 	pp->current_phase = ECPP_PHASE_C_IDLE;
4433 
4434 	ecpp_error(pp->dip, "termination: completed %x %x\n",
4435 			DSR_READ(pp), DCR_READ(pp));
4436 
4437 	return (SUCCESS);
4438 }
4439 
4440 /*
4441  * Initiate ECP backchannel DMA transfer
4442  */
4443 static uchar_t
4444 ecp_peripheral2host(struct ecppunit *pp)
4445 {
4446 	mblk_t		*mp = NULL;
4447 	size_t		len;
4448 	uint32_t	xfer_time;
4449 
4450 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4451 		pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
4452 
4453 	/*
4454 	 * hardware generates cycles to receive data from the peripheral
4455 	 * we only need to read from FIFO
4456 	 */
4457 
4458 	/*
4459 	 * If user issued read(2) of rev_resid bytes, xfer exactly this amount
4460 	 * unless it exceeds ECP_REV_BLKSZ_MAX; otherwise try to read
4461 	 * ECP_REV_BLKSZ_MAX or at least ECP_REV_BLKSZ bytes
4462 	 */
4463 	if (pp->nread > 0) {
4464 		len = min(pp->nread, ECP_REV_BLKSZ_MAX);
4465 	} else {
4466 		len = ECP_REV_BLKSZ_MAX;
4467 	}
4468 
4469 	pp->nread = 0;	/* clear after use */
4470 
4471 	/*
4472 	 * Allocate mblk for data, make max 2 attepmts:
4473 	 * if len bytes block fails, try our block size
4474 	 */
4475 	while ((mp = allocb(len, BPRI_MED)) == NULL) {
4476 		ecpp_error(pp->dip,
4477 				"ecp_periph2host: failed allocb(%d)\n", len);
4478 		if (len > ECP_REV_BLKSZ) {
4479 			len = ECP_REV_BLKSZ;
4480 		} else {
4481 			break;
4482 		}
4483 	}
4484 
4485 	if (mp == NULL) {
4486 		goto fail;
4487 	}
4488 
4489 	pp->msg = mp;
4490 	pp->e_busy = ECPP_BUSY;
4491 	pp->dma_dir = DDI_DMA_READ;
4492 	pp->current_phase = ECPP_PHASE_ECP_REV_XFER;
4493 
4494 	if (ecpp_init_dma_xfer(pp, (caddr_t)mp->b_rptr, len) == FAILURE) {
4495 		goto fail;
4496 	}
4497 
4498 	/*
4499 	 * there are two problems with defining ECP backchannel xfer timeout
4500 	 *
4501 	 * a) IEEE 1284 allows infinite time between backchannel bytes,
4502 	 *    but we must stop at some point to send the data upstream,
4503 	 *    look if any forward transfer requests are pending, etc;
4504 	 *    all that done, we can continue with backchannel data;
4505 	 *
4506 	 * b) we don`t know how much data peripheral has;
4507 	 *    DMA counter is set to our buffer size, which can be bigger
4508 	 *    than needed - in this case a timeout must detect this;
4509 	 *
4510 	 * The timeout we schedule here serves as both the transfer timeout
4511 	 * and a means of detecting backchannel stalls; in fact, there are
4512 	 * two timeouts in one:
4513 	 *
4514 	 * - transfer timeout is based on the ECP bandwidth of ~1MB/sec and
4515 	 *   equals the time needed to transfer the whole buffer
4516 	 *   (but not less than ECP_REV_MINTOUT ms); if it occurs,
4517 	 *   DMA is stopped and the data is sent upstream;
4518 	 *
4519 	 * - backchannel watchdog, which would look at DMA counter
4520 	 *   every rev_watchdog ms and stop the transfer only
4521 	 *   if the counter hasn`t changed since the last time;
4522 	 *   otherwise it would save DMA counter value and restart itself;
4523 	 *
4524 	 * transfer timeout is a multiple of rev_watchdog
4525 	 * and implemented as a downward counter
4526 	 *
4527 	 * on Grover, we can`t access DMAC registers while DMA is in flight,
4528 	 * so we can`t have watchdog on Grover, only timeout
4529 	 */
4530 
4531 	/* calculate number of watchdog invocations equal to the xfer timeout */
4532 	xfer_time = max((1000 * len) / pp->ecp_rev_speed, ECP_REV_MINTOUT);
4533 #if defined(__x86)
4534 	pp->rev_timeout_cnt = (pp->hw == &x86) ? 1 :
4535 #else
4536 	pp->rev_timeout_cnt = (pp->hw == &m1553) ? 1 :
4537 #endif
4538 		max(xfer_time / pp->rev_watchdog, 1);
4539 
4540 	pp->last_dmacnt = len;	/* nothing xferred yet */
4541 
4542 	pp->timeout_id = timeout(ecpp_ecp_read_timeout, (caddr_t)pp,
4543 			drv_usectohz(pp->rev_watchdog * 1000));
4544 
4545 	ecpp_error(pp->dip, "ecp_periph2host: DMA started len=%d\n"
4546 			"xfer_time=%d wdog=%d cnt=%d\n",
4547 			len, xfer_time, pp->rev_watchdog, pp->rev_timeout_cnt);
4548 
4549 	return (SUCCESS);
4550 
4551 fail:
4552 	if (mp) {
4553 		freemsg(mp);
4554 	}
4555 	pp->e_busy = ECPP_IDLE;
4556 	pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
4557 
4558 	return (FAILURE);
4559 }
4560 
4561 /*
4562  * ECP backchannel read timeout
4563  * implements both backchannel watchdog and transfer timeout in ECP mode
4564  * if the transfer is still in progress, reschedule itself,
4565  * otherwise call completion routine
4566  */
4567 static void
4568 ecpp_ecp_read_timeout(void *arg)
4569 {
4570 	struct ecppunit	*pp = arg;
4571 	size_t		dmacnt;
4572 
4573 	mutex_enter(&pp->umutex);
4574 
4575 	if (pp->timeout_id == 0) {
4576 		mutex_exit(&pp->umutex);
4577 		return;
4578 	} else {
4579 		pp->timeout_id = 0;
4580 	}
4581 
4582 	if (--pp->rev_timeout_cnt == 0) {
4583 		/*
4584 		 * Transfer timed out
4585 		 */
4586 		ecpp_error(pp->dip, "ecp_read_timeout: timeout\n");
4587 		pp->xfer_tout++;
4588 		ecpp_ecp_read_completion(pp);
4589 	} else {
4590 		/*
4591 		 * Backchannel watchdog:
4592 		 * look if DMA made any progress from the last time
4593 		 */
4594 		dmacnt = ECPP_DMA_GETCNT(pp);
4595 		if (dmacnt - pp->last_dmacnt == 0) {
4596 			/*
4597 			 * No progress - stop the transfer and send
4598 			 * whatever has been read so far up the stream
4599 			 */
4600 			ecpp_error(pp->dip, "ecp_read_timeout: no progress\n");
4601 			pp->xfer_tout++;
4602 			ecpp_ecp_read_completion(pp);
4603 		} else {
4604 			/*
4605 			 * Something was transferred - restart ourselves
4606 			 */
4607 			ecpp_error(pp->dip, "ecp_read_timeout: restarting\n");
4608 			pp->last_dmacnt = dmacnt;
4609 			pp->timeout_id = timeout(ecpp_ecp_read_timeout,
4610 					(caddr_t)pp,
4611 					drv_usectohz(pp->rev_watchdog * 1000));
4612 		}
4613 	}
4614 
4615 	mutex_exit(&pp->umutex);
4616 }
4617 
4618 /*
4619  * ECP backchannel read completion:
4620  * stop the DMA, free DMA resources and send read data upstream
4621  */
4622 static void
4623 ecpp_ecp_read_completion(struct ecppunit *pp)
4624 {
4625 	size_t	xfer_len, unx_len;
4626 	mblk_t	*mp;
4627 
4628 	ASSERT(mutex_owned(&pp->umutex));
4629 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4630 		pp->current_phase == ECPP_PHASE_ECP_REV_XFER);
4631 	ASSERT(pp->msg != NULL);
4632 
4633 	/*
4634 	 * Stop the transfer and unbind DMA handle
4635 	 */
4636 	if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) {
4637 		unx_len = pp->resid;
4638 		ecpp_error(pp->dip, "ecp_read_completion: failed dma_stop\n");
4639 	}
4640 
4641 	mp = pp->msg;
4642 	xfer_len = pp->resid - unx_len;	/* how much data was transferred */
4643 
4644 	if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
4645 		ecpp_error(pp->dip, "ecp_read_completion: unbind failed.\n");
4646 	}
4647 
4648 	ecpp_error(pp->dip, "ecp_read_completion: xfered %d bytes of %d\n",
4649 			xfer_len, pp->resid);
4650 
4651 	/* clean up and update statistics */
4652 	pp->msg = NULL;
4653 	pp->resid -= xfer_len;
4654 	pp->ibytes[pp->current_mode] += xfer_len;
4655 	pp->e_busy = ECPP_IDLE;
4656 	pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
4657 
4658 	/*
4659 	 * Send the read data up the stream
4660 	 */
4661 	mp->b_wptr += xfer_len;
4662 	if (canputnext(pp->readq)) {
4663 		mutex_exit(&pp->umutex);
4664 		putnext(pp->readq, mp);
4665 		mutex_enter(&pp->umutex);
4666 	} else {
4667 		ecpp_error(pp->dip, "ecp_read_completion: fail canputnext\n");
4668 		if (!putq(pp->readq, mp)) {
4669 			freemsg(mp);
4670 		}
4671 	}
4672 
4673 	/* if bytes left in the FIFO another transfer is needed */
4674 	if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) {
4675 		(void) ecpp_backchan_req(pp);
4676 	}
4677 
4678 	qenable(pp->writeq);
4679 }
4680 
4681 /*
4682  * Read one byte in the Nibble mode
4683  */
4684 static uchar_t
4685 nibble_peripheral2host(struct ecppunit *pp, uint8_t *byte)
4686 {
4687 	uint8_t	n[2];	/* two nibbles */
4688 	int	i;
4689 
4690 	/*
4691 	 * One byte is made of two nibbles
4692 	 */
4693 	for (i = 0; i < 2; i++) {
4694 		/* Event 7, 12: host asserts nAutoFd to move to read a nibble */
4695 		DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
4696 
4697 		/* Event 8: peripheral puts data on the status lines */
4698 
4699 		/* Event 9: peripheral asserts nAck, data available */
4700 		if (wait_dsr(pp, ECPP_nACK, 0, 35000) < 0) {
4701 			ecpp_error(pp->dip,
4702 				"nibble_periph2host(%d): failed event 9 %x\n",
4703 				i + 1, DSR_READ(pp));
4704 			(void) ecpp_1284_termination(pp);
4705 			return (FAILURE);
4706 		}
4707 
4708 		n[i] = DSR_READ(pp);	/* get a nibble */
4709 
4710 		/* Event 10: host deasserts nAutoFd to say it grabbed data */
4711 		DCR_WRITE(pp, ECPP_nINIT);
4712 
4713 		/* (2) Event 13: peripheral asserts PE - end of data phase */
4714 
4715 		/* Event 11: peripheral deasserts nAck to finish handshake */
4716 		if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
4717 			ecpp_error(pp->dip,
4718 				"nibble_periph2host(%d): failed event 11 %x\n",
4719 				i + 1, DSR_READ(pp));
4720 			(void) ecpp_1284_termination(pp);
4721 			return (FAILURE);
4722 		}
4723 	}
4724 
4725 	/* extract data byte from two nibbles - optimized formula */
4726 	*byte = ((((n[1] & ~ECPP_nACK) << 1) | (~n[1] & ECPP_nBUSY)) & 0xf0) |
4727 	    ((((n[0] & ~ECPP_nACK) >> 3) | ((~n[0] & ECPP_nBUSY) >> 4)) & 0x0f);
4728 
4729 	pp->ibytes[ECPP_NIBBLE_MODE]++;
4730 	return (SUCCESS);
4731 }
4732 
4733 /*
4734  * process data transfers requested by the peripheral
4735  */
4736 static uint_t
4737 ecpp_peripheral2host(struct ecppunit *pp)
4738 {
4739 	if (!canputnext(pp->readq)) {
4740 		ecpp_error(pp->dip, "ecpp_peripheral2host: readq full\n");
4741 		return (SUCCESS);
4742 	}
4743 
4744 	switch (pp->backchannel) {
4745 	case ECPP_CENTRONICS:
4746 		/* no backchannel */
4747 		return (SUCCESS);
4748 
4749 	case ECPP_NIBBLE_MODE:
4750 		ASSERT(pp->current_mode == ECPP_NIBBLE_MODE);
4751 
4752 		/*
4753 		 * Event 20: Host sets nAutoFd high to ack request
4754 		 */
4755 		DCR_WRITE(pp, ECPP_nINIT);
4756 
4757 		/* Event 21: Periph sets PError low to ack host */
4758 		if (wait_dsr(pp, ECPP_PE, 0, 35000) < 0) {
4759 			ecpp_error(pp->dip,
4760 				"ecpp_periph2host: failed event 21 %x\n",
4761 				DSR_READ(pp));
4762 			(void) ecpp_1284_termination(pp);
4763 			return (FAILURE);
4764 		}
4765 
4766 		pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
4767 
4768 		/* this routine will read the data in Nibble mode */
4769 		return (ecpp_idle_phase(pp));
4770 
4771 	case ECPP_ECP_MODE:
4772 		if ((pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE) &&
4773 		    (ecp_forward2reverse(pp) == FAILURE)) {
4774 			return (FAILURE);
4775 		}
4776 
4777 		return (ecp_peripheral2host(pp));	/* start the transfer */
4778 
4779 	case ECPP_DIAG_MODE: {
4780 		mblk_t		*mp;
4781 		int		i;
4782 
4783 		if (ECR_READ(pp) & ECPP_FIFO_EMPTY) {
4784 			ecpp_error(pp->dip, "ecpp_periph2host: fifo empty\n");
4785 			return (SUCCESS);
4786 		}
4787 
4788 		/* allocate the FIFO size */
4789 		if ((mp = allocb(ECPP_FIFO_SZ, BPRI_MED)) == NULL) {
4790 			ecpp_error(pp->dip,
4791 				"ecpp_periph2host: allocb FAILURE.\n");
4792 			return (FAILURE);
4793 		}
4794 
4795 		/*
4796 		 * For the time being just read it byte by byte
4797 		 */
4798 		i = ECPP_FIFO_SZ;
4799 		while (i-- && (!(ECR_READ(pp) & ECPP_FIFO_EMPTY))) {
4800 			*mp->b_wptr++ = TFIFO_READ(pp);
4801 			drv_usecwait(1); /* ECR is sometimes slow to update */
4802 		}
4803 
4804 		if (canputnext(pp->readq)) {
4805 			mutex_exit(&pp->umutex);
4806 			mp->b_datap->db_type = M_DATA;
4807 			ecpp_error(pp->dip,
4808 				"ecpp_periph2host: sending %d bytes\n",
4809 				mp->b_wptr - mp->b_rptr);
4810 			putnext(pp->readq, mp);
4811 			mutex_enter(&pp->umutex);
4812 			return (SUCCESS);
4813 		} else {
4814 			ecpp_error(pp->dip,
4815 				"ecpp_periph2host: !canputnext data lost\n");
4816 			freemsg(mp);
4817 			return (FAILURE);
4818 		}
4819 	}
4820 
4821 	default:
4822 		ecpp_error(pp->dip, "ecpp_peripheraltohost: illegal back");
4823 		return (FAILURE);
4824 	}
4825 }
4826 
4827 /*
4828  * Negotiate from ECP Forward Idle to Reverse Idle Phase
4829  *
4830  * (manipulations with dcr/ecr are according to ECP Specification)
4831  */
4832 static int
4833 ecp_forward2reverse(struct ecppunit *pp)
4834 {
4835 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4836 		pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE);
4837 
4838 	/* place port into PS2 mode */
4839 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4840 
4841 	/* set direction bit (DCR3-0 must be 0100 - National) */
4842 	DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT);
4843 
4844 	/* enable hardware assist */
4845 	ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4846 
4847 	drv_usecwait(1);	/* Tp(ecp) = 0.5us */
4848 
4849 	/* Event 39: host sets nInit low */
4850 	DCR_WRITE(pp, ECPP_REV_DIR);
4851 
4852 	/* Event 40: peripheral sets PError low */
4853 
4854 	pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
4855 
4856 	ecpp_error(pp->dip, "ecp_forward2reverse ok\n");
4857 
4858 	return (SUCCESS);
4859 }
4860 
4861 /*
4862  * Negotiate from ECP Reverse Idle to Forward Idle Phase
4863  *
4864  * (manipulations with dcr/ecr are according to ECP Specification)
4865  */
4866 static int
4867 ecp_reverse2forward(struct ecppunit *pp)
4868 {
4869 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4870 		pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
4871 
4872 	/* Event 47: host deasserts nInit */
4873 	DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT);
4874 
4875 	/*
4876 	 * Event 48: peripheral deasserts nAck
4877 	 * Event 49: peripheral asserts PError
4878 	 */
4879 	if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) {
4880 		ecpp_error(pp->dip,
4881 		    "ecp_reverse2forward: failed event 49 %x\n", DSR_READ(pp));
4882 		(void) ecpp_1284_termination(pp);
4883 		return (FAILURE);
4884 	}
4885 
4886 	/* place port into PS2 mode */
4887 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4888 
4889 	/* clear direction bit */
4890 	DCR_WRITE(pp, ECPP_nINIT);
4891 
4892 	/* reenable hardware assist */
4893 	ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4894 
4895 	pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
4896 
4897 	ecpp_error(pp->dip, "ecp_reverse2forward ok\n");
4898 
4899 	return (SUCCESS);
4900 }
4901 
4902 /*
4903  * Default negotiation chooses the best mode supported by peripheral
4904  * Note that backchannel mode may be different from forward mode
4905  */
4906 static void
4907 ecpp_default_negotiation(struct ecppunit *pp)
4908 {
4909 	if (!noecp && (ecpp_mode_negotiation(pp, ECPP_ECP_MODE) == SUCCESS)) {
4910 		/* 1284 compatible device */
4911 		pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO;
4912 		return;
4913 	} else if (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == SUCCESS) {
4914 		/* 1284 compatible device */
4915 		pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO;
4916 	} else {
4917 		/* Centronics device */
4918 		pp->io_mode =
4919 			(pp->fast_centronics == TRUE) ? ECPP_DMA : ECPP_PIO;
4920 	}
4921 	ECPP_CONFIG_MODE(pp);
4922 }
4923 
4924 /*
4925  * Negotiate to the mode indicated by newmode
4926  */
4927 static int
4928 ecpp_mode_negotiation(struct ecppunit *pp, uchar_t newmode)
4929 {
4930 	/* any other mode is impossible */
4931 	ASSERT(pp->current_mode == ECPP_CENTRONICS ||
4932 		pp->current_mode == ECPP_COMPAT_MODE ||
4933 		pp->current_mode == ECPP_NIBBLE_MODE ||
4934 		pp->current_mode == ECPP_ECP_MODE ||
4935 		pp->current_mode == ECPP_DIAG_MODE);
4936 
4937 	if (pp->current_mode == newmode) {
4938 		return (SUCCESS);
4939 	}
4940 
4941 	/* termination from ECP is only allowed from the Forward Idle Phase */
4942 	if ((pp->current_mode == ECPP_ECP_MODE) &&
4943 	    (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) {
4944 		/* this may break into Centronics */
4945 		(void) ecp_reverse2forward(pp);
4946 	}
4947 
4948 	switch (newmode) {
4949 	case ECPP_CENTRONICS:
4950 		(void) ecpp_1284_termination(pp);
4951 
4952 		/* put superio into PIO mode */
4953 		ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
4954 
4955 		pp->current_mode = ECPP_CENTRONICS;
4956 		pp->backchannel = ECPP_CENTRONICS;
4957 		ECPP_CONFIG_MODE(pp);
4958 
4959 		pp->to_mode[pp->current_mode]++;
4960 		return (SUCCESS);
4961 
4962 	case ECPP_COMPAT_MODE:
4963 		/* ECPP_COMPAT_MODE should support Nibble as a backchannel */
4964 		if (pp->current_mode == ECPP_NIBBLE_MODE) {
4965 			if (ecpp_1284_termination(pp) == SUCCESS) {
4966 				pp->current_mode = ECPP_COMPAT_MODE;
4967 				pp->backchannel = ECPP_NIBBLE_MODE;
4968 				ECPP_CONFIG_MODE(pp);
4969 				pp->to_mode[pp->current_mode]++;
4970 				return (SUCCESS);
4971 			} else {
4972 				return (FAILURE);
4973 			}
4974 		}
4975 
4976 		if ((nibble_negotiation(pp) == SUCCESS) &&
4977 		    (ecpp_1284_termination(pp) == SUCCESS)) {
4978 			pp->backchannel = ECPP_NIBBLE_MODE;
4979 			pp->current_mode = ECPP_COMPAT_MODE;
4980 			ECPP_CONFIG_MODE(pp);
4981 			pp->to_mode[pp->current_mode]++;
4982 			return (SUCCESS);
4983 		} else {
4984 			return (FAILURE);
4985 		}
4986 
4987 	case ECPP_NIBBLE_MODE:
4988 		if (nibble_negotiation(pp) == FAILURE) {
4989 			return (FAILURE);
4990 		}
4991 
4992 		pp->backchannel = ECPP_NIBBLE_MODE;
4993 		ECPP_CONFIG_MODE(pp);
4994 		pp->to_mode[pp->current_mode]++;
4995 
4996 		return (SUCCESS);
4997 
4998 	case ECPP_ECP_MODE:
4999 		if (pp->noecpregs)
5000 			return (FAILURE);
5001 		if (ecp_negotiation(pp) == FAILURE) {
5002 			return (FAILURE);
5003 		}
5004 
5005 		/*
5006 		 * National says CTR[3:0] should be 0100b before moving to 011
5007 		 */
5008 		DCR_WRITE(pp, ECPP_nINIT);
5009 
5010 		if (ecr_write(pp, ECR_mode_011 |
5011 			ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5012 			ecpp_error(pp->dip, "mode_nego:ECP: failed w/ecr\n");
5013 			return (FAILURE);
5014 		}
5015 
5016 		ECPP_CONFIG_MODE(pp);
5017 		pp->to_mode[pp->current_mode]++;
5018 
5019 		return (SUCCESS);
5020 
5021 	case ECPP_DIAG_MODE:
5022 		/*
5023 		 * In DIAG mode application can do nasty things(e.g drive pins)
5024 		 * To keep peripheral sane, terminate to Compatibility mode
5025 		 */
5026 		(void) ecpp_1284_termination(pp);
5027 
5028 		/* put superio into TFIFO mode */
5029 		if (ecr_write(pp, ECR_mode_001 |
5030 		    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5031 			ecpp_error(pp->dip, "put to TFIFO: failed w/ecr\n");
5032 			return (FAILURE);
5033 		}
5034 
5035 		pp->current_mode = ECPP_DIAG_MODE;
5036 		pp->backchannel = ECPP_DIAG_MODE;
5037 		ECPP_CONFIG_MODE(pp);
5038 		pp->to_mode[pp->current_mode]++;
5039 
5040 		return (SUCCESS);
5041 
5042 	default:
5043 		ecpp_error(pp->dip,
5044 		    "ecpp_mode_negotiation: mode %d not supported\n", newmode);
5045 		return (FAILURE);
5046 	}
5047 }
5048 
5049 /*
5050  * Standard (9.1): Peripheral data is available only when the host places
5051  * the interface in a mode capable of peripheral-to-host data transfer.
5052  * This requires the host periodically to place the interface in such a mode.
5053  * Polling can be eliminated by leaving the interface in an 1284 idle phase.
5054  */
5055 static uchar_t
5056 ecpp_idle_phase(struct ecppunit *pp)
5057 {
5058 	uchar_t		rval = FAILURE;
5059 
5060 	/*
5061 	 * If there is no space on the read queue, do not reverse channel
5062 	 */
5063 	if (!canputnext(pp->readq)) {
5064 		ecpp_error(pp->dip, "ecpp_idle_phase: readq full\n");
5065 		return (SUCCESS);
5066 	}
5067 
5068 	switch (pp->backchannel) {
5069 	case ECPP_CENTRONICS:
5070 	case ECPP_COMPAT_MODE:
5071 	case ECPP_DIAG_MODE:
5072 		/* nothing */
5073 		ecpp_error(pp->dip, "ecpp_idle_phase: compat idle\n");
5074 		return (SUCCESS);
5075 
5076 	case ECPP_NIBBLE_MODE:
5077 		/*
5078 		 * read as much data as possible, ending up in either
5079 		 * Reverse Idle or Host Busy Data Available phase
5080 		 */
5081 		ecpp_error(pp->dip, "ecpp_idle_phase: nibble backchannel\n");
5082 		if ((pp->current_mode != ECPP_NIBBLE_MODE) &&
5083 		    (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == FAILURE)) {
5084 			break;
5085 		}
5086 
5087 		rval = read_nibble_backchan(pp);
5088 
5089 		/* put interface into Reverse Idle phase */
5090 		if (pp->current_phase == ECPP_PHASE_NIBT_NAVAIL &&
5091 		    canputnext(pp->readq)) {
5092 			ecpp_error(pp->dip, "ecpp_idle_phase: going revidle\n");
5093 
5094 			/*
5095 			 * Event 7: host asserts nAutoFd
5096 			 * enable nAck interrupt to get a backchannel request
5097 			 */
5098 			DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_INTR_EN);
5099 
5100 			ECPP_UNMASK_INTR(pp);
5101 		}
5102 
5103 		break;
5104 
5105 	case ECPP_ECP_MODE:
5106 		/*
5107 		 * if data is already available, request the backchannel xfer
5108 		 * otherwise stay in Forward Idle and enable nErr interrupts
5109 		 */
5110 		ecpp_error(pp->dip, "ecpp_idle_phase: ECP forward\n");
5111 
5112 		ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE ||
5113 			pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
5114 
5115 		/* put interface into Forward Idle phase */
5116 		if ((pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) &&
5117 		    (ecp_reverse2forward(pp) == FAILURE)) {
5118 			return (FAILURE);
5119 		}
5120 
5121 		/*
5122 		 * if data already available, put backchannel request on the wq
5123 		 * otherwise enable nErr interrupts
5124 		 */
5125 		if ((DSR_READ(pp) & ECPP_nERR) == 0) {
5126 			(void) ecpp_backchan_req(pp);
5127 		} else {
5128 			ECR_WRITE(pp,
5129 				ECR_READ(pp) & ~ECPP_INTR_MASK | ECPP_INTR_SRV);
5130 
5131 			ECPP_UNMASK_INTR(pp);
5132 		}
5133 
5134 		return (SUCCESS);
5135 
5136 	default:
5137 		ecpp_error(pp->dip, "ecpp_idle_phase: illegal backchannel");
5138 	}
5139 
5140 	return (rval);
5141 }
5142 
5143 /*
5144  * This routine will leave the port in ECPP_PHASE_NIBT_REVIDLE
5145  * Due to flow control, though, it may stop at ECPP_PHASE_NIBT_AVAIL,
5146  * and continue later as the user consumes data from the read queue
5147  *
5148  * The current phase should be NIBT_AVAIL or NIBT_NAVAIL
5149  * If some events fail during transfer, termination puts link
5150  * to Compatibility mode and FAILURE is returned
5151  */
5152 static int
5153 read_nibble_backchan(struct ecppunit *pp)
5154 {
5155 	mblk_t		*mp;
5156 	int		i;
5157 	int		rval = SUCCESS;
5158 
5159 	ASSERT(pp->current_mode == ECPP_NIBBLE_MODE);
5160 
5161 	pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE))
5162 					? ECPP_PHASE_NIBT_NAVAIL
5163 					: ECPP_PHASE_NIBT_AVAIL;
5164 
5165 	ecpp_error(pp->dip, "read_nibble_backchan: %x\n", DSR_READ(pp));
5166 
5167 	/*
5168 	 * While data is available, read it in NIBBLE_REV_BLKSZ byte chunks
5169 	 * and send up the stream
5170 	 */
5171 	while (pp->current_phase == ECPP_PHASE_NIBT_AVAIL && rval == SUCCESS) {
5172 		/* see if there's space on the queue */
5173 		if (!canputnext(pp->readq)) {
5174 			ecpp_error(pp->dip,
5175 				"read_nibble_backchan: canputnext failed\n");
5176 			return (SUCCESS);
5177 		}
5178 
5179 		if ((mp = allocb(NIBBLE_REV_BLKSZ, BPRI_MED)) == NULL) {
5180 			ecpp_error(pp->dip,
5181 				"read_nibble_backchan: allocb failed\n");
5182 			return (SUCCESS);
5183 		}
5184 
5185 		/* read a chunk of data from the peripheral byte by byte */
5186 		i = NIBBLE_REV_BLKSZ;
5187 		while (i-- && !(DSR_READ(pp) & ECPP_nERR)) {
5188 			if (nibble_peripheral2host(pp, mp->b_wptr) != SUCCESS) {
5189 				rval = FAILURE;
5190 				break;
5191 			}
5192 			mp->b_wptr++;
5193 		}
5194 
5195 		pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE))
5196 						? ECPP_PHASE_NIBT_NAVAIL
5197 						: ECPP_PHASE_NIBT_AVAIL;
5198 
5199 		if (mp->b_wptr - mp->b_rptr > 0) {
5200 			ecpp_error(pp->dip,
5201 				"read_nibble_backchan: sending %d bytes\n",
5202 				mp->b_wptr - mp->b_rptr);
5203 			pp->nread = 0;
5204 			mutex_exit(&pp->umutex);
5205 			putnext(pp->readq, mp);
5206 			mutex_enter(&pp->umutex);
5207 		} else {
5208 			freemsg(mp);
5209 		}
5210 	}
5211 
5212 	return (rval);
5213 }
5214 
5215 /*
5216  * 'Request Device ID using nibble mode' negotiation
5217  */
5218 static int
5219 devidnib_negotiation(struct ecppunit *pp)
5220 {
5221 	uint8_t dsr;
5222 
5223 	if (ecpp_1284_negotiation(pp,
5224 			ECPP_XREQ_NIBBLE | ECPP_XREQ_ID, &dsr) == FAILURE) {
5225 		return (FAILURE);
5226 	}
5227 
5228 	/*
5229 	 * If peripheral has data available, PE and nErr will
5230 	 * be set low at Event 5 & 6.
5231 	 */
5232 	if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) {
5233 		pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
5234 	} else {
5235 		pp->current_phase = ECPP_PHASE_NIBT_NAVAIL;
5236 	}
5237 
5238 	ecpp_error(pp->dip, "ecpp_devidnib_nego: current_phase=%x\n",
5239 			pp->current_phase);
5240 
5241 	/* successful negotiation into Nibble mode */
5242 	pp->current_mode = ECPP_NIBBLE_MODE;
5243 	pp->backchannel = ECPP_NIBBLE_MODE;
5244 
5245 	ecpp_error(pp->dip, "ecpp_devidnib_nego: ok\n");
5246 
5247 	return (SUCCESS);
5248 }
5249 
5250 /*
5251  * Read 1284 device ID sequence
5252  *
5253  * This function should be called two times:
5254  * 1) ecpp_getdevid(pp, NULL, &len) - to retrieve ID length;
5255  * 2) ecpp_getdevid(pp, buffer, &len) - to read len bytes into buffer
5256  *
5257  * After 2) port is in Compatible mode
5258  * If the caller fails to make second call, it must reset port to Centronics
5259  *
5260  */
5261 static int
5262 ecpp_getdevid(struct ecppunit *pp, uint8_t *id, int *lenp, int mode)
5263 {
5264 	uint8_t lenhi, lenlo;
5265 	uint8_t dsr;
5266 	int i;
5267 
5268 	switch (mode) {
5269 	case ECPP_NIBBLE_MODE:
5270 		/* negotiate only if neccessary */
5271 		if ((pp->current_mode != mode) || (id == NULL)) {
5272 			if (devidnib_negotiation(pp) == FAILURE) {
5273 				return (EIO);
5274 			}
5275 		}
5276 
5277 		if (pp->current_phase != ECPP_PHASE_NIBT_AVAIL) {
5278 			return (EIO);
5279 		}
5280 
5281 		/*
5282 		 * Event 14: Host tristates data bus, peripheral
5283 		 * asserts nERR if data available, usually the
5284 		 * status bits (7-0) and requires two reads since
5285 		 * only nibbles are transfered.
5286 		 */
5287 		dsr = DSR_READ(pp);
5288 
5289 		if (id == NULL) {
5290 			/*
5291 			 * first two bytes are the length of the sequence
5292 			 * (incl. these bytes)
5293 			 * first byte is MSB
5294 			 */
5295 			if ((dsr & ECPP_nERR) ||
5296 			    (nibble_peripheral2host(pp, &lenhi) == FAILURE) ||
5297 			    (dsr & ECPP_nERR) ||
5298 			    (nibble_peripheral2host(pp, &lenlo) == FAILURE)) {
5299 				ecpp_error(pp->dip,
5300 				    "ecpp_getdevid: id length read error\n");
5301 				return (EIO);
5302 			}
5303 
5304 			*lenp = (lenhi << 8) | (lenlo);
5305 
5306 			ecpp_error(pp->dip,
5307 				"ecpp_getdevid: id length = %d\n", *lenp);
5308 
5309 			if (*lenp < 2) {
5310 				return (EIO);
5311 			}
5312 		} else {
5313 			/*
5314 			 * read the rest of the data
5315 			 */
5316 			i = *lenp;
5317 			while (i && ((dsr & ECPP_nERR) == 0)) {
5318 				if (nibble_peripheral2host(pp, id++) == FAILURE)
5319 					break;
5320 
5321 				i--;
5322 				dsr = DSR_READ(pp);
5323 			}
5324 			ecpp_error(pp->dip,
5325 				"ecpp_getdevid: read %d bytes\n", *lenp - i);
5326 
5327 			/*
5328 			 * 1284: After receiving the sequence, the host is
5329 			 * required to return the link to the Compatibility mode
5330 			 */
5331 			(void) ecpp_1284_termination(pp);
5332 		}
5333 
5334 		break;
5335 
5336 	/* Other modes are not yet supported */
5337 	default:
5338 		return (EINVAL);
5339 	}
5340 
5341 	return (0);
5342 }
5343 
5344 /*
5345  * Various hardware support
5346  *
5347  * First define some stubs for functions that do nothing
5348  */
5349 
5350 /*ARGSUSED*/
5351 static void
5352 empty_config_mode(struct ecppunit *pp)
5353 {
5354 }
5355 
5356 /*ARGSUSED*/
5357 static void
5358 empty_mask_intr(struct ecppunit *pp)
5359 {
5360 }
5361 
5362 #if defined(__x86)
5363 static size_t
5364 x86_getcnt(struct ecppunit *pp)
5365 {
5366 	int count;
5367 
5368 	(void) ddi_dmae_getcnt(pp->dip, pp->uh.x86.chn, &count);
5369 	return (count);
5370 }
5371 #endif
5372 
5373 /*
5374  *
5375  * National PC87332 and PC97317 SuperIOs support routines
5376  * These chips are used in PCI-based Darwin, Quark, Quasar, Excalibur
5377  * and use EBus DMA facilities (Cheerio or RIO)
5378  *
5379  */
5380 
5381 static int
5382 pc87332_map_regs(struct ecppunit *pp)
5383 {
5384 	if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.ebus.c_reg, 0,
5385 	    sizeof (struct config_reg), &acc_attr,
5386 	    &pp->uh.ebus.c_handle) != DDI_SUCCESS) {
5387 		ecpp_error(pp->dip, "pc87332_map_regs: failed c_reg\n");
5388 		goto fail;
5389 	}
5390 
5391 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
5392 	    sizeof (struct info_reg), &acc_attr, &pp->i_handle)
5393 	    != DDI_SUCCESS) {
5394 		ecpp_error(pp->dip, "pc87332_map_regs: failed i_reg\n");
5395 		goto fail;
5396 	}
5397 
5398 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400,
5399 	    sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
5400 	    != DDI_SUCCESS) {
5401 		ecpp_error(pp->dip, "pc87332_map_regs: failed f_reg\n");
5402 		goto fail;
5403 	}
5404 
5405 	if (ddi_regs_map_setup(pp->dip, 2, (caddr_t *)&pp->uh.ebus.dmac, 0,
5406 	    sizeof (struct cheerio_dma_reg), &acc_attr,
5407 	    &pp->uh.ebus.d_handle) != DDI_SUCCESS) {
5408 		ecpp_error(pp->dip, "pc87332_map_regs: failed dmac\n");
5409 		goto fail;
5410 	}
5411 
5412 	return (SUCCESS);
5413 
5414 fail:
5415 	pc87332_unmap_regs(pp);
5416 	return (FAILURE);
5417 }
5418 
5419 static void
5420 pc87332_unmap_regs(struct ecppunit *pp)
5421 {
5422 	if (pp->uh.ebus.c_handle) {
5423 		ddi_regs_map_free(&pp->uh.ebus.c_handle);
5424 	}
5425 	if (pp->uh.ebus.d_handle) {
5426 		ddi_regs_map_free(&pp->uh.ebus.d_handle);
5427 	}
5428 	if (pp->i_handle) {
5429 		ddi_regs_map_free(&pp->i_handle);
5430 	}
5431 	if (pp->f_handle) {
5432 		ddi_regs_map_free(&pp->f_handle);
5433 	}
5434 }
5435 
5436 static uint8_t
5437 pc87332_read_config_reg(struct ecppunit *pp, uint8_t reg_num)
5438 {
5439 	uint8_t retval;
5440 
5441 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num);
5442 	retval = PP_GETB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data);
5443 
5444 	return (retval);
5445 }
5446 
5447 static void
5448 pc87332_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val)
5449 {
5450 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num);
5451 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val);
5452 
5453 	/*
5454 	 * second write to this register is needed.  the register behaves as
5455 	 * a fifo.  the first value written goes to the data register.  the
5456 	 * second write pushes the initial value to the register indexed.
5457 	 */
5458 
5459 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val);
5460 }
5461 
5462 static int
5463 pc87332_config_chip(struct ecppunit *pp)
5464 {
5465 	uint8_t pmc, fcr;
5466 
5467 	pp->current_phase = ECPP_PHASE_INIT;
5468 
5469 	/* ECP DMA configuration bit (PMC4) must be set */
5470 	pmc = pc87332_read_config_reg(pp, PMC);
5471 	if (!(pmc & PC87332_PMC_ECP_DMA_CONFIG)) {
5472 		pc87332_write_config_reg(pp, PMC,
5473 					pmc | PC87332_PMC_ECP_DMA_CONFIG);
5474 	}
5475 
5476 	/*
5477 	 * The Parallel Port Multiplexor pins must be driven.
5478 	 * Check to see if FCR3 is zero, if not clear FCR3.
5479 	 */
5480 	fcr = pc87332_read_config_reg(pp, FCR);
5481 	if (fcr & PC87332_FCR_PPM_FLOAT_CTL) {
5482 		pc87332_write_config_reg(pp, FCR,
5483 					fcr & ~PC87332_FCR_PPM_FLOAT_CTL);
5484 	}
5485 
5486 	/*
5487 	 * clear bits 3-0 in CTR (aka DCR) prior to enabling ECP mode
5488 	 * CTR5 can not be cleared in SPP mode, CTR5 will return 1.
5489 	 * "FAILURE" in this case is ok.  Better to use dcr_write()
5490 	 * to ensure reliable writing to DCR.
5491 	 */
5492 	if (dcr_write(pp, ECPP_DCR_SET | ECPP_nINIT) == FAILURE) {
5493 		ecpp_error(pp->dip, "ecpp_config_87332: DCR config\n");
5494 	}
5495 
5496 	/* enable ECP mode, level intr (note that DCR bits 3-0 == 0x0) */
5497 	pc87332_write_config_reg(pp, PCR,
5498 				PC87332_PCR_INTR_LEVL | PC87332_PCR_ECP_EN);
5499 
5500 	/* put SuperIO in initial state */
5501 	if (ecr_write(pp, ECR_mode_001 |
5502 			ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5503 		ecpp_error(pp->dip, "ecpp_config_87332: ECR\n");
5504 	}
5505 
5506 	if (dcr_write(pp, ECPP_DCR_SET | ECPP_SLCTIN | ECPP_nINIT) == FAILURE) {
5507 		ecpp_error(pp->dip, "ecpp_config_87332: w/DCR failed2.\n");
5508 		return (FAILURE);
5509 
5510 	}
5511 	/* we are in centronic mode */
5512 	pp->current_mode = ECPP_CENTRONICS;
5513 
5514 	/* in compatible mode with no data transfer in progress */
5515 	pp->current_phase = ECPP_PHASE_C_IDLE;
5516 
5517 	return (SUCCESS);
5518 }
5519 
5520 /*
5521  * A new mode was set, do some mode specific reconfiguration
5522  * in this case - set interrupt characteristic
5523  */
5524 static void
5525 pc87332_config_mode(struct ecppunit *pp)
5526 {
5527 	if (COMPAT_PIO(pp)) {
5528 		pc87332_write_config_reg(pp, PCR, 0x04);
5529 	} else {
5530 		pc87332_write_config_reg(pp, PCR, 0x14);
5531 	}
5532 }
5533 
5534 static int
5535 pc97317_map_regs(struct ecppunit *pp)
5536 {
5537 	if (pc87332_map_regs(pp) != SUCCESS) {
5538 		return (FAILURE);
5539 	}
5540 
5541 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->uh.ebus.c2_reg,
5542 			0x403, sizeof (struct config2_reg), &acc_attr,
5543 			&pp->uh.ebus.c2_handle) != DDI_SUCCESS) {
5544 		ecpp_error(pp->dip, "pc97317_map_regs: failed c2_reg\n");
5545 		pc87332_unmap_regs(pp);
5546 		return (FAILURE);
5547 	} else {
5548 		return (SUCCESS);
5549 	}
5550 }
5551 
5552 static void
5553 pc97317_unmap_regs(struct ecppunit *pp)
5554 {
5555 	if (pp->uh.ebus.c2_handle) {
5556 		ddi_regs_map_free(&pp->uh.ebus.c2_handle);
5557 	}
5558 
5559 	pc87332_unmap_regs(pp);
5560 }
5561 
5562 /*
5563  * OBP should configure the PC97317 such that it does not need further
5564  * configuration.  Upon sustaining, it may be necessary to examine
5565  * or change the configuration registers.  This routine is left in
5566  * the file for that purpose.
5567  */
5568 static int
5569 pc97317_config_chip(struct ecppunit *pp)
5570 {
5571 	uint8_t conreg;
5572 
5573 	/* set the logical device name */
5574 	pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4);
5575 
5576 	/* SPP Compatibility */
5577 	PP_PUTB(pp->uh.ebus.c2_handle,
5578 		&pp->uh.ebus.c2_reg->eir, PC97317_CONFIG2_CONTROL2);
5579 	PP_PUTB(pp->uh.ebus.c2_handle, &pp->uh.ebus.c2_reg->edr, 0x80);
5580 
5581 	/* low interrupt polarity */
5582 	pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00);
5583 
5584 	/* ECP mode */
5585 	pc87332_write_config_reg(pp, PC97317_CONFIG_PP_CONFIG, 0xf2);
5586 
5587 	if (dcr_write(pp, ECPP_SLCTIN | ECPP_nINIT) == FAILURE) {
5588 		ecpp_error(pp->dip, "pc97317_config_chip: failed w/DCR\n");
5589 	}
5590 
5591 	if (ecr_write(pp, ECR_mode_001 |
5592 			ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5593 		ecpp_error(pp->dip, "pc97317_config_chip: failed w/ECR\n");
5594 	}
5595 
5596 #ifdef DEBUG
5597 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DEV_NO);
5598 	ecpp_error(pp->dip, "97317:conreg7(logical dev)=%x\n", conreg);
5599 
5600 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_MSB);
5601 	ecpp_error(pp->dip, "97317:conreg60(addrHi)=%x\n", conreg);
5602 
5603 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_LSB);
5604 	ecpp_error(pp->dip, "97317:conreg61(addrLo)=%x\n", conreg);
5605 
5606 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_SEL);
5607 	ecpp_error(pp->dip, "97317:conreg70(IRQL)=%x\n", conreg);
5608 
5609 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_TYPE);
5610 	ecpp_error(pp->dip, "97317:conreg71(intr type)=%x\n", conreg);
5611 
5612 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_ACTIVATE);
5613 	ecpp_error(pp->dip, "97317:conreg30(Active)=%x\n", conreg);
5614 
5615 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_IO_RANGE);
5616 	ecpp_error(pp->dip, "97317:conreg31(IO Range Check)=%x\n", conreg);
5617 
5618 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA0_CHAN);
5619 	ecpp_error(pp->dip, "97317:conreg74(DMA0 Chan)=%x\n", conreg);
5620 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA1_CHAN);
5621 	ecpp_error(pp->dip, "97317:conreg75(DMA1 Chan)=%x\n", conreg);
5622 
5623 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG);
5624 	ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg);
5625 
5626 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG);
5627 	ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg);
5628 #endif /* DEBUG */
5629 
5630 	return (SUCCESS);
5631 }
5632 
5633 /*
5634  * A new mode was set, do some mode specific reconfiguration
5635  * in this case - set interrupt polarity
5636  */
5637 static void
5638 pc97317_config_mode(struct ecppunit *pp)
5639 {
5640 	/* set the logical device name */
5641 	pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4);
5642 
5643 	if (COMPAT_PIO(pp) || pp->current_mode == ECPP_NIBBLE_MODE) {
5644 		pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x02);
5645 	} else {
5646 		pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00);
5647 	}
5648 }
5649 
5650 static void
5651 cheerio_mask_intr(struct ecppunit *pp)
5652 {
5653 	/* mask Cheerio interrupts */
5654 	AND_SET_LONG_R(pp->uh.ebus.d_handle,
5655 			&pp->uh.ebus.dmac->csr, ~DCSR_INT_EN);
5656 }
5657 
5658 static void
5659 cheerio_unmask_intr(struct ecppunit *pp)
5660 {
5661 	/* unmask Cheerio interrupts */
5662 	OR_SET_LONG_R(pp->uh.ebus.d_handle,
5663 			&pp->uh.ebus.dmac->csr, DCSR_INT_EN | DCSR_TCI_DIS);
5664 }
5665 
5666 static int
5667 cheerio_dma_start(struct ecppunit *pp)
5668 {
5669 	cheerio_reset_dcsr(pp);
5670 	SET_DMAC_BCR(pp, pp->dma_cookie.dmac_size);
5671 	SET_DMAC_ACR(pp, pp->dma_cookie.dmac_address);
5672 
5673 	if (pp->dma_dir == DDI_DMA_READ) {
5674 		SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA |
5675 		    DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0 | DCSR_WRITE);
5676 	} else {
5677 		SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA |
5678 				DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0);
5679 	}
5680 
5681 	return (SUCCESS);
5682 }
5683 
5684 /*
5685  * Note: BCR is reset to 0, so counter should always be read before dma_stop
5686  */
5687 static int
5688 cheerio_dma_stop(struct ecppunit *pp, size_t *countp)
5689 {
5690 	uint8_t ecr;
5691 
5692 	/* disable DMA and byte counter */
5693 	AND_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr,
5694 		~(DCSR_EN_DMA | DCSR_EN_CNT| DCSR_INT_EN));
5695 
5696 	/* ACK and disable the TC interrupt */
5697 	OR_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr,
5698 		DCSR_TC | DCSR_TCI_DIS);
5699 
5700 	/* read DMA count if requested */
5701 	if (countp) {
5702 		*countp = cheerio_getcnt(pp);
5703 	}
5704 
5705 	cheerio_reset_dcsr(pp);
5706 	SET_DMAC_BCR(pp, 0);
5707 
5708 	/* turn off SuperIO's DMA */
5709 	ecr = ECR_READ(pp);
5710 	if (ecr_write(pp, ecr & ~ECPP_DMA_ENABLE) == FAILURE) {
5711 		return (FAILURE);
5712 	}
5713 
5714 	/* Disable SuperIO interrupts and DMA */
5715 	ecr = ECR_READ(pp);
5716 
5717 	return (ecr_write(pp, ecr | ECPP_INTR_SRV));
5718 }
5719 
5720 static size_t
5721 cheerio_getcnt(struct ecppunit *pp)
5722 {
5723 	return (GET_DMAC_BCR(pp));
5724 }
5725 
5726 /*
5727  * Reset the DCSR by first setting the RESET bit to 1.  Poll the
5728  * DCSR_CYC_PEND bit to make sure there are no more pending DMA cycles.
5729  * If there are no more pending cycles, clear the RESET bit.
5730  */
5731 static void
5732 cheerio_reset_dcsr(struct ecppunit *pp)
5733 {
5734 	int	timeout = DMAC_RESET_TIMEOUT;
5735 
5736 	SET_DMAC_CSR(pp, DCSR_RESET);
5737 
5738 	while (GET_DMAC_CSR(pp) & DCSR_CYC_PEND) {
5739 		if (timeout == 0) {
5740 			ecpp_error(pp->dip, "cheerio_reset_dcsr: timeout\n");
5741 			break;
5742 		} else {
5743 			drv_usecwait(1);
5744 			timeout--;
5745 		}
5746 	}
5747 
5748 	SET_DMAC_CSR(pp, 0);
5749 }
5750 
5751 /*
5752  *
5753  * Grover Southbridge (M1553) support routines
5754  * Southbridge contains an Intel 8237 DMAC onboard which is used
5755  * to transport data to/from PCI space to superio parallel port
5756  *
5757  */
5758 
5759 
5760 static int
5761 m1553_map_regs(struct ecppunit *pp)
5762 {
5763 	if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.m1553.isa_space,
5764 			0, sizeof (struct isaspace), &acc_attr,
5765 			&pp->uh.m1553.d_handle) != DDI_SUCCESS) {
5766 		ecpp_error(pp->dip, "m1553_map_regs: failed isa space\n");
5767 		goto fail;
5768 	}
5769 
5770 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
5771 			sizeof (struct info_reg), &acc_attr, &pp->i_handle)
5772 			!= DDI_SUCCESS) {
5773 		ecpp_error(pp->dip, "m1553_map_regs: failed i_reg\n");
5774 		goto fail;
5775 	}
5776 
5777 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400,
5778 			sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
5779 			!= DDI_SUCCESS) {
5780 		ecpp_error(pp->dip, "m1553_map_regs: failed f_reg\n");
5781 		goto fail;
5782 	}
5783 
5784 	return (SUCCESS);
5785 
5786 fail:
5787 	m1553_unmap_regs(pp);
5788 	return (FAILURE);
5789 }
5790 
5791 static void
5792 m1553_unmap_regs(struct ecppunit *pp)
5793 {
5794 	if (pp->uh.m1553.d_handle) {
5795 		ddi_regs_map_free(&pp->uh.m1553.d_handle);
5796 	}
5797 	if (pp->i_handle) {
5798 		ddi_regs_map_free(&pp->i_handle);
5799 	}
5800 	if (pp->f_handle) {
5801 		ddi_regs_map_free(&pp->f_handle);
5802 	}
5803 }
5804 
5805 #if defined(__x86)
5806 static int
5807 x86_map_regs(struct ecppunit *pp)
5808 {
5809 	int nregs = 0;
5810 
5811 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
5812 	    sizeof (struct info_reg), &acc_attr, &pp->i_handle)
5813 	    != DDI_SUCCESS) {
5814 		ecpp_error(pp->dip, "x86_map_regs: failed i_reg\n");
5815 		goto fail;
5816 	}
5817 	if (ddi_dev_nregs(pp->dip, &nregs) == DDI_SUCCESS && nregs == 2) {
5818 		if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->f_reg, 0,
5819 		    sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
5820 		    != DDI_SUCCESS) {
5821 			ecpp_error(pp->dip, "x86_map_regs: failed f_reg\n");
5822 			goto fail;
5823 		} else
5824 			pp->noecpregs = FALSE;
5825 	} else {
5826 		pp->noecpregs = TRUE;
5827 	}
5828 	return (SUCCESS);
5829 fail:
5830 	x86_unmap_regs(pp);
5831 	return (FAILURE);
5832 }
5833 
5834 static void
5835 x86_unmap_regs(struct ecppunit *pp)
5836 {
5837 	if (pp->i_handle) {
5838 		ddi_regs_map_free(&pp->i_handle);
5839 	}
5840 	if (pp->f_handle) {
5841 		ddi_regs_map_free(&pp->f_handle);
5842 	}
5843 }
5844 #endif
5845 
5846 static uint8_t
5847 m1553_read_config_reg(struct ecppunit *pp, uint8_t reg_num)
5848 {
5849 	uint8_t retval;
5850 
5851 	dma8237_write(pp, 0x3F0, reg_num);
5852 	retval = dma8237_read(pp, 0x3F1);
5853 
5854 	return (retval);
5855 }
5856 
5857 static void
5858 m1553_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val)
5859 {
5860 	dma8237_write(pp, 0x3F0, reg_num);
5861 	dma8237_write(pp, 0x3F1, val);
5862 }
5863 
5864 static int
5865 m1553_config_chip(struct ecppunit *pp)
5866 {
5867 	uint8_t conreg;
5868 
5869 	/* Unlock configuration regs with "key sequence" */
5870 	dma8237_write(pp, 0x3F0, 0x51);
5871 	dma8237_write(pp, 0x3F0, 0x23);
5872 
5873 	m1553_write_config_reg(pp, PnP_CONFIG_DEV_NO, 0x3);
5874 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_DEV_NO);
5875 	ecpp_error(pp->dip, "M1553:conreg7(logical dev)=%x\n", conreg);
5876 
5877 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_ACTIVATE);
5878 	ecpp_error(pp->dip, "M1553:conreg30(Active)=%x\n", conreg);
5879 
5880 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_MSB);
5881 	ecpp_error(pp->dip, "M1553:conreg60(addrHi)=%x\n", conreg);
5882 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_LSB);
5883 	ecpp_error(pp->dip, "M1553:conreg61(addrLo)=%x\n", conreg);
5884 
5885 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_INTR_SEL);
5886 	ecpp_error(pp->dip, "M1553:conreg70(IRQL)=%x\n", conreg);
5887 
5888 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_DMA0_CHAN);
5889 	ecpp_error(pp->dip, "M1553:conreg74(DMA0 Chan)=%x\n", conreg);
5890 
5891 	/* set FIFO threshold 1 and ECP mode, preserve bit 7 (IRQ polarity) */
5892 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0);
5893 	conreg = (conreg & ~0x7F) | 0x0A;
5894 	m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG0, conreg);
5895 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0);
5896 	ecpp_error(pp->dip, "M1553:conregFO(pport conf)=%x\n", conreg);
5897 
5898 	m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG1, 0x04);
5899 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG1);
5900 	ecpp_error(pp->dip, "M1553:conregF1(outconf)=%x\n", conreg);
5901 
5902 	/* lock configuration regs with key */
5903 	dma8237_write(pp, 0x3F0, 0xBB);
5904 
5905 	/* Set ECR, DCR in known state */
5906 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
5907 	DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
5908 
5909 	ecpp_error(pp->dip, "m1553_config_chip: ecr=%x, dsr=%x, dcr=%x\n",
5910 		ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
5911 
5912 	return (SUCCESS);
5913 }
5914 
5915 #if defined(__x86)
5916 static int
5917 x86_config_chip(struct ecppunit *pp)
5918 {
5919 	if (ecr_write(pp, ECR_mode_001 |
5920 	    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5921 		ecpp_error(pp->dip, "config chip: failed w/ecr\n");
5922 		pp->noecpregs = TRUE;
5923 	}
5924 	if (pp->noecpregs)
5925 		pp->fast_compat = FALSE;
5926 	DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
5927 	ecpp_error(pp->dip, "x86_config_chip: ecr=%x, dsr=%x, dcr=%x\n",
5928 	    ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
5929 	return (SUCCESS);
5930 }
5931 #endif
5932 
5933 /*
5934  * dma8237_dma_start() programs the selected 8 bit channel
5935  * of DMAC1 with the dma cookie.  pp->dma_cookie must
5936  * be set before this routine is called.
5937  */
5938 static int
5939 dma8237_dma_start(struct ecppunit *pp)
5940 {
5941 	uint8_t chn;
5942 
5943 	chn = pp->uh.m1553.chn;
5944 
5945 	ASSERT(chn <= DMAE_CH3 &&
5946 		pp->dma_cookie.dmac_size != 0 &&
5947 		pp->dma_cookie.dmac_address != NULL);
5948 
5949 	/* At this point Southbridge has not yet asserted DREQ */
5950 
5951 	/* set mode to read-from-memory. */
5952 	dma8237_write(pp, DMAC2_MODE, DMAMODE_CASC);
5953 	if (pp->dma_dir == DDI_DMA_READ) {
5954 		dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE |
5955 							DMAMODE_READ | chn);
5956 	} else {
5957 		dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE |
5958 							DMAMODE_WRITE | chn);
5959 	}
5960 
5961 	dma8237_write_addr(pp, pp->dma_cookie.dmac_address);
5962 	dma8237_write_count(pp, pp->dma_cookie.dmac_size - 1);
5963 
5964 	/*
5965 	 * M1553 chip does not permit to access DMA register banks
5966 	 * while DMA is in flight. As a result, ecpp and floppy drivers
5967 	 * can potentially corrupt each other's DMA. The interlocking mechanism
5968 	 * is provided by a parent nexus driver (isadma), which is enabled
5969 	 * indirectly through a DMAC1_ALLMASK register access:
5970 	 *
5971 	 * writing a non-zero value to this register enters a lock,
5972 	 * writing zero releases the lock.
5973 	 *
5974 	 * DMA transfer must only occur after entering a lock.
5975 	 * If the lock is already owned by other driver, we will block.
5976 	 *
5977 	 * The following operation unmasks our channel and masks all others
5978 	 */
5979 	dma8237_write(pp, DMAC1_ALLMASK, ~(1 << chn));
5980 	pp->uh.m1553.isadma_entered = 1;
5981 
5982 	return (SUCCESS);
5983 }
5984 
5985 static int
5986 dma8237_dma_stop(struct ecppunit *pp, size_t *countp)
5987 {
5988 	uint8_t ecr;
5989 
5990 	/* stop DMA */
5991 	ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV;
5992 	(void) ecr_write(pp, ecr);
5993 
5994 	if (pp->uh.m1553.isadma_entered) {
5995 		/* reset the channel mask so we can issue PIO's to our device */
5996 		dma8237_write(pp, DMAC1_ALLMASK, 0);
5997 		pp->uh.m1553.isadma_entered = 0;
5998 
5999 	}
6000 
6001 	/* read DMA count if requested */
6002 	if (countp) {
6003 		*countp = dma8237_getcnt(pp);
6004 		if (pp->dma_dir == DDI_DMA_READ && *countp > 0) {
6005 			(*countp)++;	/* need correction for reverse xfers */
6006 		}
6007 	}
6008 	return (SUCCESS);
6009 }
6010 #if defined(__x86)
6011 static int
6012 x86_dma_start(struct ecppunit *pp)
6013 {
6014 	uint8_t chn;
6015 	struct ddi_dmae_req dmaereq;
6016 
6017 	chn = pp->uh.x86.chn;
6018 	ASSERT(chn <= DMAE_CH3 &&
6019 	    pp->dma_cookie.dmac_size != 0 &&
6020 	    pp->dma_cookie.dmac_address != NULL);
6021 	bzero(&dmaereq, sizeof (struct ddi_dmae_req));
6022 	dmaereq.der_command =
6023 	    (pp->dma_dir & DDI_DMA_READ) ? DMAE_CMD_READ : DMAE_CMD_WRITE;
6024 	if (ddi_dmae_prog(pp->dip, &dmaereq, &pp->dma_cookie, chn)
6025 	    != DDI_SUCCESS)
6026 		ecpp_error(pp->dip, "prog failed !!!\n");
6027 	ecpp_error(pp->dip, "dma_started..\n");
6028 	return (SUCCESS);
6029 }
6030 
6031 static int
6032 x86_dma_stop(struct ecppunit *pp, size_t *countp)
6033 {
6034 	uint8_t ecr;
6035 
6036 	/* stop DMA */
6037 	if (pp->uh.x86.chn == 0xff)
6038 		return (FAILURE);
6039 	ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV;
6040 	(void) ecr_write(pp, ecr);
6041 	ecpp_error(pp->dip, "dma_stop\n");
6042 
6043 	/* read DMA count if requested */
6044 	if (countp) {
6045 		*countp = x86_getcnt(pp);
6046 	}
6047 	ecpp_error(pp->dip, "dma_stoped..\n");
6048 	return (SUCCESS);
6049 }
6050 #endif
6051 
6052 /* channel must be masked */
6053 static void
6054 dma8237_write_addr(struct ecppunit *pp, uint32_t addr)
6055 {
6056 	uint8_t c_addr, c_lpage;
6057 	uint16_t c_hpage, *p;
6058 
6059 	switch (pp->uh.m1553.chn) {
6060 	case DMAE_CH0:
6061 		c_addr = DMA_0ADR;
6062 		c_lpage = DMA_0PAGE;
6063 		c_hpage = DMA_0HPG;
6064 		break;
6065 
6066 	case DMAE_CH1:
6067 		c_addr = DMA_1ADR;
6068 		c_lpage = DMA_1PAGE;
6069 		c_hpage = DMA_1HPG;
6070 		break;
6071 
6072 	case DMAE_CH2:
6073 		c_addr = DMA_2ADR;
6074 		c_lpage = DMA_2PAGE;
6075 		c_hpage = DMA_2HPG;
6076 		break;
6077 
6078 	case DMAE_CH3:
6079 		c_addr = DMA_3ADR;
6080 		c_lpage = DMA_3PAGE;
6081 		c_hpage = DMA_3HPG;
6082 		break;
6083 
6084 	default:
6085 		return;
6086 	}
6087 
6088 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr];
6089 	ddi_put16(pp->uh.m1553.d_handle, p, addr & 0xFFFF);
6090 
6091 	dma8237_write(pp, c_lpage, (addr & 0xFF0000) >> 16);
6092 	dma8237_write(pp, c_hpage, (addr & 0xFF000000) >> 24);
6093 
6094 }
6095 
6096 /*
6097  * This function may be useful during debugging,
6098  * so we leave it in, but do not include in the binary
6099  */
6100 #ifdef INCLUDE_DMA8237_READ_ADDR
6101 static uint32_t
6102 dma8237_read_addr(struct ecppunit *pp)
6103 {
6104 	uint8_t rval3, rval4;
6105 	uint16_t rval16;
6106 	uint32_t rval;
6107 	uint8_t c_addr, c_lpage;
6108 	uint16_t c_hpage, *p;
6109 
6110 	switch (pp->uh.m1553.chn) {
6111 	case DMAE_CH0:
6112 		c_addr = DMA_0ADR;
6113 		c_lpage = DMA_0PAGE;
6114 		c_hpage = DMA_0HPG;
6115 		break;
6116 
6117 	case DMAE_CH1:
6118 		c_addr = DMA_1ADR;
6119 		c_lpage = DMA_1PAGE;
6120 		c_hpage = DMA_1HPG;
6121 		break;
6122 
6123 	case DMAE_CH2:
6124 		c_addr = DMA_2ADR;
6125 		c_lpage = DMA_2PAGE;
6126 		c_hpage = DMA_2HPG;
6127 		break;
6128 
6129 	case DMAE_CH3:
6130 		c_addr = DMA_3ADR;
6131 		c_lpage = DMA_3PAGE;
6132 		c_hpage = DMA_3HPG;
6133 		break;
6134 
6135 	default:
6136 		return (NULL);
6137 	}
6138 
6139 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr];
6140 	rval16 = ddi_get16(pp->uh.m1553.d_handle, p);
6141 
6142 	rval3 = dma8237_read(pp, c_lpage);
6143 	rval4 = dma8237_read(pp, c_hpage);
6144 
6145 	rval = rval16 | (rval3 << 16) | (rval4 <<24);
6146 
6147 	return (rval);
6148 }
6149 #endif
6150 
6151 static void
6152 dma8237_write_count(struct ecppunit *pp, uint32_t count)
6153 {
6154 	uint8_t c_wcnt;
6155 	uint16_t *p;
6156 
6157 	switch (pp->uh.m1553.chn) {
6158 	case DMAE_CH0:
6159 		c_wcnt = DMA_0WCNT;
6160 		break;
6161 
6162 	case DMAE_CH1:
6163 		c_wcnt = DMA_1WCNT;
6164 		break;
6165 
6166 	case DMAE_CH2:
6167 		c_wcnt = DMA_2WCNT;
6168 		break;
6169 
6170 	case DMAE_CH3:
6171 		c_wcnt = DMA_3WCNT;
6172 		break;
6173 
6174 	default:
6175 		return;
6176 	}
6177 
6178 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt];
6179 	ddi_put16(pp->uh.m1553.d_handle, p, count & 0xFFFF);
6180 
6181 }
6182 
6183 static uint32_t
6184 dma8237_read_count(struct ecppunit *pp)
6185 {
6186 	uint8_t c_wcnt;
6187 	uint16_t *p;
6188 
6189 	switch (pp->uh.m1553.chn) {
6190 	case DMAE_CH0:
6191 		c_wcnt = DMA_0WCNT;
6192 		break;
6193 
6194 	case DMAE_CH1:
6195 		c_wcnt = DMA_1WCNT;
6196 		break;
6197 
6198 	case DMAE_CH2:
6199 		c_wcnt = DMA_2WCNT;
6200 		break;
6201 
6202 	case DMAE_CH3:
6203 		c_wcnt = DMA_3WCNT;
6204 		break;
6205 
6206 	default:
6207 		return (NULL);
6208 	}
6209 
6210 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt];
6211 	return (ddi_get16(pp->uh.m1553.d_handle, p));
6212 
6213 }
6214 
6215 static void
6216 dma8237_write(struct ecppunit *pp, int reg_num, uint8_t val)
6217 {
6218 	ddi_put8(pp->uh.m1553.d_handle,
6219 		&pp->uh.m1553.isa_space->isa_reg[reg_num], val);
6220 }
6221 
6222 static uint8_t
6223 dma8237_read(struct ecppunit *pp, int reg_num)
6224 {
6225 	return (ddi_get8(pp->uh.m1553.d_handle,
6226 	    &pp->uh.m1553.isa_space->isa_reg[reg_num]));
6227 }
6228 
6229 static size_t
6230 dma8237_getcnt(struct ecppunit *pp)
6231 {
6232 	uint32_t cnt;
6233 
6234 	if ((cnt = dma8237_read_count(pp)) == 0xffff)
6235 		cnt = 0;
6236 	else
6237 		cnt++;
6238 	return (cnt);
6239 }
6240 
6241 
6242 /*
6243  *
6244  * Kstat support routines
6245  *
6246  */
6247 static void
6248 ecpp_kstat_init(struct ecppunit *pp)
6249 {
6250 	struct ecppkstat *ekp;
6251 	char buf[16];
6252 
6253 	/*
6254 	 * Allocate, initialize and install interrupt counter kstat
6255 	 */
6256 	(void) sprintf(buf, "ecppc%d", pp->instance);
6257 	pp->intrstats = kstat_create("ecpp", pp->instance, buf, "controller",
6258 		KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
6259 	if (pp->intrstats == NULL) {
6260 		ecpp_error(pp->dip, "ecpp_kstat_init:1: kstat_create failed");
6261 	} else {
6262 		pp->intrstats->ks_update = ecpp_kstatintr_update;
6263 		pp->intrstats->ks_private = (void *) pp;
6264 		kstat_install(pp->intrstats);
6265 	}
6266 
6267 	/*
6268 	 * Allocate, initialize and install misc stats kstat
6269 	 */
6270 	pp->ksp = kstat_create("ecpp", pp->instance, NULL, "misc",
6271 		KSTAT_TYPE_NAMED,
6272 		sizeof (struct ecppkstat) / sizeof (kstat_named_t),
6273 		KSTAT_FLAG_PERSISTENT);
6274 	if (pp->ksp == NULL) {
6275 		ecpp_error(pp->dip, "ecpp_kstat_init:2: kstat_create failed");
6276 		return;
6277 	}
6278 
6279 	ekp = (struct ecppkstat *)pp->ksp->ks_data;
6280 
6281 #define	EK_NAMED_INIT(name) \
6282 	kstat_named_init(&ekp->ek_##name, #name, KSTAT_DATA_UINT32)
6283 
6284 	EK_NAMED_INIT(ctx_obytes);
6285 	EK_NAMED_INIT(ctxpio_obytes);
6286 	EK_NAMED_INIT(nib_ibytes);
6287 	EK_NAMED_INIT(ecp_obytes);
6288 	EK_NAMED_INIT(ecp_ibytes);
6289 	EK_NAMED_INIT(epp_obytes);
6290 	EK_NAMED_INIT(epp_ibytes);
6291 	EK_NAMED_INIT(diag_obytes);
6292 	EK_NAMED_INIT(to_ctx);
6293 	EK_NAMED_INIT(to_nib);
6294 	EK_NAMED_INIT(to_ecp);
6295 	EK_NAMED_INIT(to_epp);
6296 	EK_NAMED_INIT(to_diag);
6297 	EK_NAMED_INIT(xfer_tout);
6298 	EK_NAMED_INIT(ctx_cf);
6299 	EK_NAMED_INIT(joblen);
6300 	EK_NAMED_INIT(isr_reattempt_high);
6301 	EK_NAMED_INIT(mode);
6302 	EK_NAMED_INIT(phase);
6303 	EK_NAMED_INIT(backchan);
6304 	EK_NAMED_INIT(iomode);
6305 	EK_NAMED_INIT(state);
6306 
6307 	pp->ksp->ks_update = ecpp_kstat_update;
6308 	pp->ksp->ks_private = (void *) pp;
6309 	kstat_install(pp->ksp);
6310 }
6311 
6312 static int
6313 ecpp_kstat_update(kstat_t *ksp, int rw)
6314 {
6315 	struct ecppunit *pp;
6316 	struct ecppkstat *ekp;
6317 
6318 	/*
6319 	 * For the time being there is no point
6320 	 * in supporting writable kstats
6321 	 */
6322 	if (rw == KSTAT_WRITE) {
6323 		return (EACCES);
6324 	}
6325 
6326 	pp = (struct ecppunit *)ksp->ks_private;
6327 	ekp = (struct ecppkstat *)ksp->ks_data;
6328 
6329 	mutex_enter(&pp->umutex);
6330 
6331 	ekp->ek_ctx_obytes.value.ui32	= pp->obytes[ECPP_CENTRONICS] +
6332 						pp->obytes[ECPP_COMPAT_MODE];
6333 	ekp->ek_ctxpio_obytes.value.ui32 = pp->ctxpio_obytes;
6334 	ekp->ek_nib_ibytes.value.ui32	= pp->ibytes[ECPP_NIBBLE_MODE];
6335 	ekp->ek_ecp_obytes.value.ui32	= pp->obytes[ECPP_ECP_MODE];
6336 	ekp->ek_ecp_ibytes.value.ui32	= pp->ibytes[ECPP_ECP_MODE];
6337 	ekp->ek_epp_obytes.value.ui32	= pp->obytes[ECPP_EPP_MODE];
6338 	ekp->ek_epp_ibytes.value.ui32	= pp->ibytes[ECPP_EPP_MODE];
6339 	ekp->ek_diag_obytes.value.ui32	= pp->obytes[ECPP_DIAG_MODE];
6340 	ekp->ek_to_ctx.value.ui32	= pp->to_mode[ECPP_CENTRONICS] +
6341 						pp->to_mode[ECPP_COMPAT_MODE];
6342 	ekp->ek_to_nib.value.ui32	= pp->to_mode[ECPP_NIBBLE_MODE];
6343 	ekp->ek_to_ecp.value.ui32	= pp->to_mode[ECPP_ECP_MODE];
6344 	ekp->ek_to_epp.value.ui32	= pp->to_mode[ECPP_EPP_MODE];
6345 	ekp->ek_to_diag.value.ui32	= pp->to_mode[ECPP_DIAG_MODE];
6346 	ekp->ek_xfer_tout.value.ui32	= pp->xfer_tout;
6347 	ekp->ek_ctx_cf.value.ui32	= pp->ctx_cf;
6348 	ekp->ek_joblen.value.ui32	= pp->joblen;
6349 	ekp->ek_isr_reattempt_high.value.ui32	= pp->isr_reattempt_high;
6350 	ekp->ek_mode.value.ui32		= pp->current_mode;
6351 	ekp->ek_phase.value.ui32	= pp->current_phase;
6352 	ekp->ek_backchan.value.ui32	= pp->backchannel;
6353 	ekp->ek_iomode.value.ui32	= pp->io_mode;
6354 	ekp->ek_state.value.ui32	= pp->e_busy;
6355 
6356 	mutex_exit(&pp->umutex);
6357 
6358 	return (0);
6359 }
6360 
6361 static int
6362 ecpp_kstatintr_update(kstat_t *ksp, int rw)
6363 {
6364 	struct ecppunit *pp;
6365 
6366 	/*
6367 	 * For the time being there is no point
6368 	 * in supporting writable kstats
6369 	 */
6370 	if (rw == KSTAT_WRITE) {
6371 		return (EACCES);
6372 	}
6373 
6374 	pp = (struct ecppunit *)ksp->ks_private;
6375 
6376 	mutex_enter(&pp->umutex);
6377 
6378 	KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_HARD] = pp->intr_hard;
6379 	KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SPURIOUS] = pp->intr_spurious;
6380 	KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SOFT] = pp->intr_soft;
6381 
6382 	mutex_exit(&pp->umutex);
6383 
6384 	return (0);
6385 }
6386