xref: /illumos-gate/usr/src/uts/common/io/ecpp.c (revision f70049b72ff8162093254e3d617172d6df9705f1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  *
29  * IEEE 1284 Parallel Port Device Driver
30  *
31  */
32 
33 #include <sys/param.h>
34 #include <sys/errno.h>
35 #include <sys/file.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stropts.h>
38 #include <sys/debug.h>
39 #include <sys/stream.h>
40 #include <sys/strsun.h>
41 #include <sys/kmem.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/conf.h>		/* req. by dev_ops flags MTSAFE etc. */
45 #include <sys/modctl.h>		/* for modldrv */
46 #include <sys/stat.h>		/* ddi_create_minor_node S_IFCHR */
47 #include <sys/open.h>
48 #include <sys/ddi_impldefs.h>
49 #include <sys/kstat.h>
50 
51 #include <sys/prnio.h>
52 #include <sys/ecppreg.h>	/* hw description */
53 #include <sys/ecppio.h>		/* ioctl description */
54 #include <sys/ecppvar.h>	/* driver description */
55 #include <sys/dma_engine.h>
56 #include <sys/dma_i8237A.h>
57 
58 /*
59  * Background
60  * ==========
61  * IEEE 1284-1994 standard defines "a signalling method for asynchronous,
62  * fully interlocked, bidirectional parallel communications between hosts
63  * and printers or other peripherals." (1.1) The standard defines 5 modes
64  * of operation - Compatibility, Nibble, Byte, ECP and EPP - which differ
65  * in direction, bandwidth, pins assignment, DMA capability, etc.
66  *
67  * Negotiation is a mechanism for moving between modes. Compatibility mode
68  * is a default mode, from which negotiations to other modes occur and
69  * to which both host and peripheral break in case of interface errors.
70  * Compatibility mode provides a unidirectional (forward) channel for
71  * communicating with old pre-1284 peripherals.
72  *
73  * Each mode has a number of phases. [Mode, phase] pair represents the
74  * interface state. Host initiates all transfers, though peripheral can
75  * request backchannel transfer by asserting nErr pin.
76  *
77  * Ecpp driver implements an IEEE 1284-compliant host using a combination
78  * of hardware and software. Hardware part is represented by a controller,
79  * which is a part of the SuperIO chip. Ecpp supports the following SuperIOs:
80  * PC82332/PC82336 (U5/U10/U60), PC97317 (U100), M1553 (Grover).
81  * Struct ecpp_hw describes each SuperIO and is determined in ecpp_attach().
82  *
83  * Negotiation is performed in software. Transfer may be performed either
84  * in software by driving output pins for each byte (PIO method), or with
85  * hardware assistance - SuperIO has a 16-byte FIFO, which is filled by
86  * the driver (normally using DMA), while the chip performs the actual xfer.
87  * PIO is used for Nibble and Compat, DMA is used for ECP and Compat modes.
88  *
89  * Driver currently supports the following modes:
90  *
91  * - Compatibility mode: byte-wide forward channel ~50KB/sec;
92  *   pp->io_mode defines PIO or DMA method of transfer;
93  * - Nibble mode: nibble-wide (4-bit) reverse channel ~30KB/sec;
94  * - ECP mode: byte-wide bidirectional channel (~1MB/sec);
95  *
96  * Theory of operation
97  * ===================
98  * The manner in which ecpp drives 1284 interface is that of a state machine.
99  * State is a combination of 1284 mode {ECPP_*_MODE}, 1284 phase {ECPP_PHASE_*}
100  * and transfer method {PIO, DMA}. State is a function of application actions
101  * {write(2), ioctl(2)} and peripheral reaction.
102  *
103  * 1284 interface state is described by the following variables:
104  *   pp->current_mode  -- 1284 mode used for forward transfers;
105  *   pp->backchannel   -- 1284 mode used for backward transfers;
106  *   pp->curent_phase  -- 1284 phase;
107  *
108  * Bidirectional operation in Compatibility mode is provided by a combination:
109  * pp->current_mode == ECPP_COMPAT_MODE && pp->backchannel == ECPP_NIBBLE_MODE
110  * ECPP_CENTRONICS means no backchannel
111  *
112  * Driver internal state is defined by pp->e_busy as follows:
113  *   ECPP_IDLE	-- idle, no active transfers;
114  *   ECPP_BUSY	-- transfer is in progress;
115  *   ECPP_ERR	-- have data to transfer, but peripheral can`t receive data;
116  *   ECPP_FLUSH	-- flushing the queues;
117  *
118  * When opened, driver is in ECPP_IDLE state, current mode is ECPP_CENTRONICS
119  * Default negotiation tries to negotiate to the best mode supported by printer,
120  * sets pp->current_mode and pp->backchannel accordingly.
121  *
122  * When output data arrives in M_DATA mblks ecpp_wput() puts them on the queue
123  * to let ecpp_wsrv() concatenate small blocks into one big transfer
124  * by copying them into pp->ioblock. If first the mblk data is bigger than
125  * pp->ioblock, then it is used instead of i/o block (pointed by pp->msg)
126  *
127  * Before starting the transfer the driver will check if peripheral is ready
128  * by calling ecpp_check_status() and if it is not, driver goes ECPP_ERR state
129  * and schedules ecpp_wsrv_timer() which would qenable() the wq, effectively
130  * rechecking the peripheral readiness and restarting itself until it is ready.
131  * The transfer is then started by calling ecpp_start(), driver goes ECPP_BUSY
132  *
133  * While transfer is in progress all arriving messages will be queued up.
134  * Transfer can end up in either of two ways:
135  * - interrupt occurs, ecpp_isr() checks if all the data was transferred, if so
136  *   cleanup and go ECPP_IDLE, otherwise putback untransferred and qenable();
137  * - ecpp_xfer_timeout() cancels the transfer and puts back untransferred data;
138  *
139  * PIO transfer method is very CPU intensive: for each sent byte the peripheral
140  * state is checked, then the byte is transfered and driver waits for an nAck
141  * interrupt; ecpp_isr() will then look if there is more data and if so
142  * triggers the soft interrupt, which transfers the next byte. PIO method
143  * is needed only for legacy printers which are sensitive to strobe problem
144  * (Bugid 4192788).
145  *
146  * ecpp_wsrv() is responsible for both starting transfers (ecpp_start()) and
147  * going idle (ecpp_idle_phase()). Many routines qenable() the write queue,
148  * meaning "check if there are pending requests, process them and go idle".
149  *
150  * In it`s idle state the driver will always try to listen to the backchannel
151  * (as advised by 1284).
152  *
153  * The mechanism for handling backchannel requests is as follows:
154  * - when the peripheral has data to send it asserts nErr pin
155  *   (and also nAck in Nibble Mode) which results in an interrupt on the host;
156  * - ISR creates M_CTL message containing an ECPP_BACKCHANNEL byte and
157  *   puts it back on the write queue;
158  * - ecpp_wsrv() gets M_CTL and calls ecpp_peripheral2host(), which kicks off
159  *   the transfer;
160  *
161  * This way Nibble and ECP mode backchannel are implemented.
162  * If the read queue gets full, backchannel request is rejected.
163  * As the application reads data and queue size falls below the low watermark,
164  * ecpp_rsrv() gets called and enables the backchannel again.
165  *
166  * Future enhancements
167  * ===================
168  *
169  * Support new modes: Byte and EPP.
170  */
171 
172 #ifndef ECPP_DEBUG
173 #define	ECPP_DEBUG 0
174 #endif	/* ECPP_DEBUG */
175 int ecpp_debug = ECPP_DEBUG;
176 
177 int noecp = 0;	/* flag not to use ECP mode */
178 
179 /* driver entry point fn definitions */
180 static int	ecpp_open(queue_t *, dev_t *, int, int, cred_t *);
181 static int	ecpp_close(queue_t *, int, cred_t *);
182 static uint_t	ecpp_isr(caddr_t);
183 static uint_t	ecpp_softintr(caddr_t);
184 
185 /* configuration entry point fn definitions */
186 static int	ecpp_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
187 static int	ecpp_attach(dev_info_t *, ddi_attach_cmd_t);
188 static int	ecpp_detach(dev_info_t *, ddi_detach_cmd_t);
189 static struct ecpp_hw_bind *ecpp_determine_sio_type(struct ecppunit *);
190 
191 /* isr support routines */
192 static uint_t	ecpp_nErr_ihdlr(struct ecppunit *);
193 static uint_t	ecpp_pio_ihdlr(struct ecppunit *);
194 static uint_t	ecpp_dma_ihdlr(struct ecppunit *);
195 static uint_t	ecpp_M1553_intr(struct ecppunit *);
196 
197 /* configuration support routines */
198 static void	ecpp_get_props(struct ecppunit *);
199 
200 /* Streams Routines */
201 static int	ecpp_wput(queue_t *, mblk_t *);
202 static int	ecpp_wsrv(queue_t *);
203 static int	ecpp_rsrv(queue_t *);
204 static void	ecpp_flush(struct ecppunit *, int);
205 static void	ecpp_start(struct ecppunit *, caddr_t, size_t);
206 
207 /* ioctl handling */
208 static void	ecpp_putioc(queue_t *, mblk_t *);
209 static void	ecpp_srvioc(queue_t *, mblk_t *);
210 static void	ecpp_wput_iocdata_devid(queue_t *, mblk_t *, uintptr_t);
211 static void	ecpp_putioc_copyout(queue_t *, mblk_t *, void *, int);
212 static void	ecpp_putioc_stateful_copyin(queue_t *, mblk_t *, size_t);
213 static void	ecpp_srvioc_devid(queue_t *, mblk_t *,
214 				struct ecpp_device_id *, int *);
215 static void	ecpp_srvioc_prnif(queue_t *, mblk_t *);
216 static void	ecpp_ack_ioctl(queue_t *, mblk_t *);
217 static void	ecpp_nack_ioctl(queue_t *, mblk_t *, int);
218 
219 /* kstat routines */
220 static void	ecpp_kstat_init(struct ecppunit *);
221 static int	ecpp_kstat_update(kstat_t *, int);
222 static int	ecpp_kstatintr_update(kstat_t *, int);
223 
224 /* dma routines */
225 static void	ecpp_putback_untransfered(struct ecppunit *, void *, uint_t);
226 static uint8_t	ecpp_setup_dma_resources(struct ecppunit *, caddr_t, size_t);
227 static uint8_t	ecpp_init_dma_xfer(struct ecppunit *, caddr_t, size_t);
228 
229 /* pio routines */
230 static void	ecpp_pio_writeb(struct ecppunit *);
231 static void	ecpp_xfer_cleanup(struct ecppunit *);
232 static uint8_t	ecpp_prep_pio_xfer(struct ecppunit *, caddr_t, size_t);
233 
234 /* misc */
235 static uchar_t	ecpp_reset_port_regs(struct ecppunit *);
236 static void	ecpp_xfer_timeout(void *);
237 static void	ecpp_fifo_timer(void *);
238 static void	ecpp_wsrv_timer(void *);
239 static uchar_t	dcr_write(struct ecppunit *, uint8_t);
240 static uchar_t	ecr_write(struct ecppunit *, uint8_t);
241 static uchar_t	ecpp_check_status(struct ecppunit *);
242 static int	ecpp_backchan_req(struct ecppunit *);
243 static void	ecpp_untimeout_unblock(struct ecppunit *, timeout_id_t *);
244 static uint_t	ecpp_get_prn_ifcap(struct ecppunit *);
245 
246 /* stubs */
247 static void	empty_config_mode(struct ecppunit *);
248 static void	empty_mask_intr(struct ecppunit *);
249 
250 /* PC87332 support */
251 static int	pc87332_map_regs(struct ecppunit *);
252 static void	pc87332_unmap_regs(struct ecppunit *);
253 static int	pc87332_config_chip(struct ecppunit *);
254 static void	pc87332_config_mode(struct ecppunit *);
255 static uint8_t	pc87332_read_config_reg(struct ecppunit *, uint8_t);
256 static void	pc87332_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
257 static void	cheerio_mask_intr(struct ecppunit *);
258 static void	cheerio_unmask_intr(struct ecppunit *);
259 static int	cheerio_dma_start(struct ecppunit *);
260 static int	cheerio_dma_stop(struct ecppunit *, size_t *);
261 static size_t	cheerio_getcnt(struct ecppunit *);
262 static void	cheerio_reset_dcsr(struct ecppunit *);
263 
264 /* PC97317 support */
265 static int	pc97317_map_regs(struct ecppunit *);
266 static void	pc97317_unmap_regs(struct ecppunit *);
267 static int	pc97317_config_chip(struct ecppunit *);
268 static void	pc97317_config_mode(struct ecppunit *);
269 
270 /* M1553 Southbridge support */
271 static int	m1553_map_regs(struct ecppunit *pp);
272 static void	m1553_unmap_regs(struct ecppunit *pp);
273 static int	m1553_config_chip(struct ecppunit *);
274 static uint8_t	m1553_read_config_reg(struct ecppunit *, uint8_t);
275 static void	m1553_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
276 
277 /* M1553 Southbridge DMAC 8237 support routines */
278 static int	dma8237_dma_start(struct ecppunit *);
279 static int	dma8237_dma_stop(struct ecppunit *, size_t *);
280 static size_t	dma8237_getcnt(struct ecppunit *);
281 static void	dma8237_write_addr(struct ecppunit *, uint32_t);
282 static void	dma8237_write_count(struct ecppunit *, uint32_t);
283 static uint32_t	dma8237_read_count(struct ecppunit *);
284 static void	dma8237_write(struct ecppunit *, int, uint8_t);
285 static uint8_t	dma8237_read(struct ecppunit *, int);
286 #ifdef INCLUDE_DMA8237_READ_ADDR
287 static uint32_t	dma8237_read_addr(struct ecppunit *);
288 #endif
289 
290 /* i86 PC support rountines */
291 
292 #if defined(__x86)
293 static int	x86_dma_start(struct ecppunit *);
294 static int	x86_dma_stop(struct ecppunit *, size_t *);
295 static int	x86_map_regs(struct ecppunit *);
296 static void	x86_unmap_regs(struct ecppunit *);
297 static int	x86_config_chip(struct ecppunit *);
298 static size_t	x86_getcnt(struct ecppunit *);
299 #endif
300 
301 /* IEEE 1284 phase transitions */
302 static void	ecpp_1284_init_interface(struct ecppunit *);
303 static int	ecpp_1284_termination(struct ecppunit *);
304 static uchar_t	ecpp_idle_phase(struct ecppunit *);
305 static int	ecp_forward2reverse(struct ecppunit *);
306 static int	ecp_reverse2forward(struct ecppunit *);
307 static int	read_nibble_backchan(struct ecppunit *);
308 
309 /* reverse transfers */
310 static uint_t	ecpp_peripheral2host(struct ecppunit *);
311 static uchar_t	ecp_peripheral2host(struct ecppunit *);
312 static uchar_t	nibble_peripheral2host(struct ecppunit *pp, uint8_t *);
313 static int	ecpp_getdevid(struct ecppunit *, uint8_t *, int *, int);
314 static void	ecpp_ecp_read_timeout(void *);
315 static void	ecpp_ecp_read_completion(struct ecppunit *);
316 
317 /* IEEE 1284 mode transitions */
318 static void	ecpp_default_negotiation(struct ecppunit *);
319 static int	ecpp_mode_negotiation(struct ecppunit *, uchar_t);
320 static int	ecpp_1284_negotiation(struct ecppunit *, uint8_t, uint8_t *);
321 static int	ecp_negotiation(struct ecppunit *);
322 static int	nibble_negotiation(struct ecppunit *);
323 static int	devidnib_negotiation(struct ecppunit *);
324 
325 /* IEEE 1284 utility routines */
326 static int	wait_dsr(struct ecppunit *, uint8_t, uint8_t, int);
327 
328 /* debugging functions */
329 static void	ecpp_error(dev_info_t *, char *, ...);
330 static uchar_t	ecpp_get_error_status(uchar_t);
331 
332 /*
333  * Chip-dependent structures
334  */
335 static ddi_dma_attr_t cheerio_dma_attr = {
336 	DMA_ATTR_VERSION,	/* version */
337 	0x00000000ull,		/* dlim_addr_lo */
338 	0xfffffffeull,		/* dlim_addr_hi */
339 	0xffffff,		/* DMA counter register */
340 	1,			/* DMA address alignment */
341 	0x74,			/* burst sizes */
342 	0x0001,			/* min effective DMA size */
343 	0xffff,			/* maximum transfer size */
344 	0xffff,			/* segment boundary */
345 	1,			/* s/g list length */
346 	1,			/* granularity of device */
347 	0			/* DMA flags */
348 };
349 
350 static struct ecpp_hw pc87332 = {
351 	pc87332_map_regs,
352 	pc87332_unmap_regs,
353 	pc87332_config_chip,
354 	pc87332_config_mode,
355 	cheerio_mask_intr,
356 	cheerio_unmask_intr,
357 	cheerio_dma_start,
358 	cheerio_dma_stop,
359 	cheerio_getcnt,
360 	&cheerio_dma_attr
361 };
362 
363 static struct ecpp_hw pc97317 = {
364 	pc97317_map_regs,
365 	pc97317_unmap_regs,
366 	pc97317_config_chip,
367 	pc97317_config_mode,
368 	cheerio_mask_intr,
369 	cheerio_unmask_intr,
370 	cheerio_dma_start,
371 	cheerio_dma_stop,
372 	cheerio_getcnt,
373 	&cheerio_dma_attr
374 };
375 
376 static ddi_dma_attr_t i8237_dma_attr = {
377 	DMA_ATTR_VERSION,	/* version */
378 	0x00000000ull,		/* dlim_addr_lo */
379 	0xfffffffeull,		/* dlim_addr_hi */
380 	0xffff,			/* DMA counter register */
381 	1,			/* DMA address alignment */
382 	0x01,			/* burst sizes */
383 	0x0001,			/* min effective DMA size */
384 	0xffff,			/* maximum transfer size */
385 	0x7fff,			/* segment boundary */
386 	1,			/* s/g list length */
387 	1,			/* granularity of device */
388 	0			/* DMA flags */
389 };
390 
391 static struct ecpp_hw m1553 = {
392 	m1553_map_regs,
393 	m1553_unmap_regs,
394 	m1553_config_chip,
395 	empty_config_mode,	/* no config_mode */
396 	empty_mask_intr,	/* no mask_intr */
397 	empty_mask_intr,	/* no unmask_intr */
398 	dma8237_dma_start,
399 	dma8237_dma_stop,
400 	dma8237_getcnt,
401 	&i8237_dma_attr
402 };
403 
404 #if defined(__x86)
405 static ddi_dma_attr_t sb_dma_attr = {
406 	DMA_ATTR_VERSION,	/* version */
407 	0x00000000ull,		/* dlim_addr_lo */
408 	0xffffff,		/* dlim_addr_hi */
409 	0xffff,			/* DMA counter register */
410 	1,			/* DMA address alignment */
411 	0x01,			/* burst sizes */
412 	0x0001,			/* min effective DMA size */
413 	0xffffffff,		/* maximum transfer size */
414 	0xffff,			/* segment boundary */
415 	1,			/* s/g list length */
416 	1,			/* granularity of device */
417 	0			/* DMA flags */
418 };
419 
420 static struct ecpp_hw x86 = {
421 	x86_map_regs,
422 	x86_unmap_regs,
423 	x86_config_chip,
424 	empty_config_mode,	/* no config_mode */
425 	empty_mask_intr,	/* no mask_intr */
426 	empty_mask_intr,	/* no unmask_intr */
427 	x86_dma_start,
428 	x86_dma_stop,
429 	x86_getcnt,
430 	&sb_dma_attr
431 };
432 #endif
433 
434 /*
435  * list of supported devices
436  */
437 struct ecpp_hw_bind ecpp_hw_bind[] = {
438 	{ "ns87317-ecpp",	&pc97317,	"PC97317" },
439 	{ "pnpALI,1533,3",	&m1553,		"M1553" },
440 	{ "ecpp",		&pc87332,	"PC87332" },
441 #if defined(__x86)
442 	{ "lp",			&x86,		"i86pc"},
443 #endif
444 };
445 
446 static ddi_device_acc_attr_t acc_attr = {
447 	DDI_DEVICE_ATTR_V0,
448 	DDI_STRUCTURE_LE_ACC,
449 	DDI_STRICTORDER_ACC
450 };
451 
452 static struct ecpp_transfer_parms default_xfer_parms = {
453 	FWD_TIMEOUT_DEFAULT,	/* write timeout in seconds */
454 	ECPP_CENTRONICS		/* supported mode */
455 };
456 
457 /* prnio interface info string */
458 static const char prn_ifinfo[] = PRN_PARALLEL;
459 
460 /* prnio timeouts */
461 static const struct prn_timeouts prn_timeouts_default = {
462 	FWD_TIMEOUT_DEFAULT,	/* forward timeout */
463 	REV_TIMEOUT_DEFAULT	/* reverse timeout */
464 };
465 
466 static int ecpp_isr_max_delay = ECPP_ISR_MAX_DELAY;
467 static int ecpp_def_timeout = 90;  /* left in for 2.7 compatibility */
468 
469 static void    *ecppsoft_statep;
470 
471 /*
472  * STREAMS framework manages locks for these structures
473  */
474 _NOTE(SCHEME_PROTECTS_DATA("unique per call", iocblk))
475 _NOTE(SCHEME_PROTECTS_DATA("unique per call", datab))
476 _NOTE(SCHEME_PROTECTS_DATA("unique per call", msgb))
477 _NOTE(SCHEME_PROTECTS_DATA("unique per call", queue))
478 _NOTE(SCHEME_PROTECTS_DATA("unique per call", copyreq))
479 _NOTE(SCHEME_PROTECTS_DATA("unique per call", stroptions))
480 
481 struct module_info ecppinfo = {
482 	/* id, name, min pkt siz, max pkt siz, hi water, low water */
483 	42, "ecpp", 0, IO_BLOCK_SZ, ECPPHIWAT, ECPPLOWAT
484 };
485 
486 static struct qinit ecpp_rinit = {
487 	putq, ecpp_rsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
488 };
489 
490 static struct qinit ecpp_wint = {
491 	ecpp_wput, ecpp_wsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
492 };
493 
494 struct streamtab ecpp_str_info = {
495 	&ecpp_rinit, &ecpp_wint, NULL, NULL
496 };
497 
498 static struct cb_ops ecpp_cb_ops = {
499 	nodev,			/* cb_open */
500 	nodev,			/* cb_close */
501 	nodev,			/* cb_strategy */
502 	nodev,			/* cb_print */
503 	nodev,			/* cb_dump */
504 	nodev,			/* cb_read */
505 	nodev,			/* cb_write */
506 	nodev,			/* cb_ioctl */
507 	nodev,			/* cb_devmap */
508 	nodev,			/* cb_mmap */
509 	nodev,			/* cb_segmap */
510 	nochpoll,		/* cb_chpoll */
511 	ddi_prop_op,		/* cb_prop_op */
512 	&ecpp_str_info,		/* cb_stream */
513 	(D_NEW | D_MP | D_MTPERQ)	/* cb_flag */
514 };
515 
516 /*
517  * Declare ops vectors for auto configuration.
518  */
519 struct dev_ops  ecpp_ops = {
520 	DEVO_REV,		/* devo_rev */
521 	0,			/* devo_refcnt */
522 	ecpp_getinfo,		/* devo_getinfo */
523 	nulldev,		/* devo_identify */
524 	nulldev,		/* devo_probe */
525 	ecpp_attach,		/* devo_attach */
526 	ecpp_detach,		/* devo_detach */
527 	nodev,			/* devo_reset */
528 	&ecpp_cb_ops,		/* devo_cb_ops */
529 	(struct bus_ops *)NULL,	/* devo_bus_ops */
530 	nulldev,		/* devo_power */
531 	ddi_quiesce_not_needed,	/* devo_quiesce */
532 };
533 
534 extern struct mod_ops mod_driverops;
535 
536 static struct modldrv ecppmodldrv = {
537 	&mod_driverops,		/* type of module - driver */
538 	"parallel port driver",
539 	&ecpp_ops,
540 };
541 
542 static struct modlinkage ecppmodlinkage = {
543 	MODREV_1,
544 	&ecppmodldrv,
545 	0
546 };
547 
548 
549 /*
550  *
551  * DDI/DKI entry points and supplementary routines
552  *
553  */
554 
555 
556 int
_init(void)557 _init(void)
558 {
559 	int    error;
560 
561 	if ((error = mod_install(&ecppmodlinkage)) == 0) {
562 		(void) ddi_soft_state_init(&ecppsoft_statep,
563 		    sizeof (struct ecppunit), 1);
564 	}
565 
566 	return (error);
567 }
568 
569 int
_fini(void)570 _fini(void)
571 {
572 	int    error;
573 
574 	if ((error = mod_remove(&ecppmodlinkage)) == 0) {
575 		ddi_soft_state_fini(&ecppsoft_statep);
576 	}
577 
578 	return (error);
579 }
580 
581 int
_info(struct modinfo * modinfop)582 _info(struct modinfo *modinfop)
583 {
584 	return (mod_info(&ecppmodlinkage, modinfop));
585 }
586 
587 static int
ecpp_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)588 ecpp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
589 {
590 	int			instance;
591 	char			name[16];
592 	struct ecppunit		*pp;
593 	struct ecpp_hw_bind	*hw_bind;
594 
595 	instance = ddi_get_instance(dip);
596 
597 	switch (cmd) {
598 	case DDI_ATTACH:
599 		break;
600 
601 	case DDI_RESUME:
602 		if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
603 			return (DDI_FAILURE);
604 		}
605 
606 		mutex_enter(&pp->umutex);
607 
608 		pp->suspended = FALSE;
609 
610 		/*
611 		 * Initialize the chip and restore current mode if needed
612 		 */
613 		(void) ECPP_CONFIG_CHIP(pp);
614 		(void) ecpp_reset_port_regs(pp);
615 
616 		if (pp->oflag == TRUE) {
617 			int current_mode = pp->current_mode;
618 
619 			(void) ecpp_1284_termination(pp);
620 			(void) ecpp_mode_negotiation(pp, current_mode);
621 		}
622 
623 		mutex_exit(&pp->umutex);
624 
625 		return (DDI_SUCCESS);
626 
627 	default:
628 		return (DDI_FAILURE);
629 	}
630 
631 	if (ddi_soft_state_zalloc(ecppsoft_statep, instance) != 0) {
632 		ecpp_error(dip, "ddi_soft_state_zalloc failed\n");
633 		goto fail;
634 	}
635 
636 	pp = ddi_get_soft_state(ecppsoft_statep, instance);
637 
638 	pp->dip = dip;
639 	pp->suspended = FALSE;
640 
641 	/*
642 	 * Determine SuperIO type and set chip-dependent variables
643 	 */
644 	hw_bind = ecpp_determine_sio_type(pp);
645 
646 	if (hw_bind == NULL) {
647 		cmn_err(CE_NOTE, "parallel port controller not supported");
648 		goto fail_sio;
649 	} else {
650 		pp->hw = hw_bind->hw;
651 		ecpp_error(pp->dip, "SuperIO type: %s\n", hw_bind->info);
652 	}
653 
654 	/*
655 	 * Map registers
656 	 */
657 	if (ECPP_MAP_REGS(pp) != SUCCESS) {
658 		goto fail_map;
659 	}
660 
661 	if (ddi_dma_alloc_handle(dip, pp->hw->attr, DDI_DMA_DONTWAIT,
662 	    NULL, &pp->dma_handle) != DDI_SUCCESS) {
663 		ecpp_error(dip, "ecpp_attach: failed ddi_dma_alloc_handle\n");
664 		goto fail_dma;
665 	}
666 
667 	if (ddi_get_iblock_cookie(dip, 0,
668 	    &pp->ecpp_trap_cookie) != DDI_SUCCESS) {
669 		ecpp_error(dip, "ecpp_attach: failed ddi_get_iblock_cookie\n");
670 		goto fail_ibc;
671 	}
672 
673 	mutex_init(&pp->umutex, NULL, MUTEX_DRIVER,
674 	    (void *)pp->ecpp_trap_cookie);
675 
676 	cv_init(&pp->pport_cv, NULL, CV_DRIVER, NULL);
677 
678 	if (ddi_add_intr(dip, 0, &pp->ecpp_trap_cookie, NULL, ecpp_isr,
679 	    (caddr_t)pp) != DDI_SUCCESS) {
680 		ecpp_error(dip, "ecpp_attach: failed to add hard intr\n");
681 		goto fail_intr;
682 	}
683 
684 	if (ddi_add_softintr(dip, DDI_SOFTINT_LOW,
685 	    &pp->softintr_id, 0, 0, ecpp_softintr,
686 	    (caddr_t)pp) != DDI_SUCCESS) {
687 		ecpp_error(dip, "ecpp_attach: failed to add soft intr\n");
688 		goto fail_softintr;
689 	}
690 
691 	(void) sprintf(name, "ecpp%d", instance);
692 
693 	if (ddi_create_minor_node(dip, name, S_IFCHR, instance,
694 	    DDI_NT_PRINTER, 0) == DDI_FAILURE) {
695 		ecpp_error(dip, "ecpp_attach: create_minor_node failed\n");
696 		goto fail_minor;
697 	}
698 
699 	pp->ioblock = (caddr_t)kmem_alloc(IO_BLOCK_SZ, KM_SLEEP);
700 	if (pp->ioblock == NULL) {
701 		ecpp_error(dip, "ecpp_attach: kmem_alloc failed\n");
702 		goto fail_iob;
703 	} else {
704 		ecpp_error(pp->dip, "ecpp_attach: ioblock=0x%x\n", pp->ioblock);
705 	}
706 
707 	ecpp_get_props(pp);
708 #if defined(__x86)
709 	if (pp->hw == &x86 && pp->uh.x86.chn != 0xff) {
710 		if (ddi_dmae_alloc(dip, pp->uh.x86.chn,
711 		    DDI_DMA_DONTWAIT, NULL) == DDI_SUCCESS)
712 			ecpp_error(pp->dip, "dmae_alloc success!\n");
713 	}
714 #endif
715 	if (ECPP_CONFIG_CHIP(pp) == FAILURE) {
716 		ecpp_error(pp->dip, "config_chip failed.\n");
717 		goto fail_config;
718 	}
719 
720 	ecpp_kstat_init(pp);
721 
722 	ddi_report_dev(dip);
723 
724 	return (DDI_SUCCESS);
725 
726 fail_config:
727 	ddi_prop_remove_all(dip);
728 	kmem_free(pp->ioblock, IO_BLOCK_SZ);
729 fail_iob:
730 	ddi_remove_minor_node(dip, NULL);
731 fail_minor:
732 	ddi_remove_softintr(pp->softintr_id);
733 fail_softintr:
734 	ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
735 fail_intr:
736 	mutex_destroy(&pp->umutex);
737 	cv_destroy(&pp->pport_cv);
738 fail_ibc:
739 	ddi_dma_free_handle(&pp->dma_handle);
740 fail_dma:
741 	ECPP_UNMAP_REGS(pp);
742 fail_map:
743 fail_sio:
744 	ddi_soft_state_free(ecppsoft_statep, instance);
745 fail:
746 	ecpp_error(dip, "ecpp_attach: failed.\n");
747 
748 	return (DDI_FAILURE);
749 }
750 
751 static int
ecpp_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)752 ecpp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
753 {
754 	int		instance;
755 	struct ecppunit *pp;
756 
757 	instance = ddi_get_instance(dip);
758 
759 	switch (cmd) {
760 	case DDI_DETACH:
761 		break;
762 
763 	case DDI_SUSPEND:
764 		if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
765 			return (DDI_FAILURE);
766 		}
767 
768 		mutex_enter(&pp->umutex);
769 		ASSERT(pp->suspended == FALSE);
770 
771 		pp->suspended = TRUE;	/* prevent new transfers */
772 
773 		/*
774 		 * Wait if there's any activity on the port
775 		 */
776 		if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) {
777 			(void) cv_reltimedwait(&pp->pport_cv, &pp->umutex,
778 			    SUSPEND_TOUT * drv_usectohz(1000000),
779 			    TR_CLOCK_TICK);
780 			if ((pp->e_busy == ECPP_BUSY) ||
781 			    (pp->e_busy == ECPP_FLUSH)) {
782 				pp->suspended = FALSE;
783 				mutex_exit(&pp->umutex);
784 				ecpp_error(pp->dip,
785 				    "ecpp_detach: suspend timeout\n");
786 				return (DDI_FAILURE);
787 			}
788 		}
789 
790 		mutex_exit(&pp->umutex);
791 		return (DDI_SUCCESS);
792 
793 	default:
794 		return (DDI_FAILURE);
795 	}
796 
797 	pp = ddi_get_soft_state(ecppsoft_statep, instance);
798 #if defined(__x86)
799 	if (pp->hw == &x86 && pp->uh.x86.chn != 0xff)
800 		(void) ddi_dmae_release(pp->dip, pp->uh.x86.chn);
801 #endif
802 	if (pp->dma_handle != NULL)
803 		ddi_dma_free_handle(&pp->dma_handle);
804 
805 	ddi_remove_minor_node(dip, NULL);
806 
807 	ddi_remove_softintr(pp->softintr_id);
808 
809 	ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
810 
811 	if (pp->ksp) {
812 		kstat_delete(pp->ksp);
813 	}
814 	if (pp->intrstats) {
815 		kstat_delete(pp->intrstats);
816 	}
817 
818 	cv_destroy(&pp->pport_cv);
819 
820 	mutex_destroy(&pp->umutex);
821 
822 	ECPP_UNMAP_REGS(pp);
823 
824 	kmem_free(pp->ioblock, IO_BLOCK_SZ);
825 
826 	ddi_prop_remove_all(dip);
827 
828 	ddi_soft_state_free(ecppsoft_statep, instance);
829 
830 	return (DDI_SUCCESS);
831 
832 }
833 
834 /*
835  * ecpp_get_props() reads ecpp.conf for user defineable tuneables.
836  * If the file or a particular variable is not there, a default value
837  * is assigned.
838  */
839 
840 static void
ecpp_get_props(struct ecppunit * pp)841 ecpp_get_props(struct ecppunit *pp)
842 {
843 	char	*prop;
844 #if defined(__x86)
845 	int	len;
846 	int	value;
847 #endif
848 	/*
849 	 * If fast_centronics is TRUE, non-compliant IEEE 1284
850 	 * peripherals ( Centronics peripherals) will operate in DMA mode.
851 	 * Transfers betwee main memory and the device will be via DMA;
852 	 * peripheral handshaking will be conducted by superio logic.
853 	 * If ecpp can not read the variable correctly fast_centronics will
854 	 * be set to FALSE.  In this case, transfers and handshaking
855 	 * will be conducted by PIO for Centronics devices.
856 	 */
857 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
858 	    "fast-centronics", &prop) == DDI_PROP_SUCCESS) {
859 		pp->fast_centronics =
860 		    (strcmp(prop, "true") == 0) ? TRUE : FALSE;
861 		ddi_prop_free(prop);
862 	} else {
863 		pp->fast_centronics = FALSE;
864 	}
865 
866 	/*
867 	 * If fast-1284-compatible is set to TRUE, when ecpp communicates
868 	 * with IEEE 1284 compliant peripherals, data transfers between
869 	 * main memory and the parallel port will be conducted by DMA.
870 	 * Handshaking between the port and peripheral will be conducted
871 	 * by superio logic.  This is the default characteristic.  If
872 	 * fast-1284-compatible is set to FALSE, transfers and handshaking
873 	 * will be conducted by PIO.
874 	 */
875 
876 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
877 	    "fast-1284-compatible", &prop) == DDI_PROP_SUCCESS) {
878 		pp->fast_compat = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
879 		ddi_prop_free(prop);
880 	} else {
881 		pp->fast_compat = TRUE;
882 	}
883 
884 	/*
885 	 * Some centronics peripherals require the nInit signal to be
886 	 * toggled to reset the device.  If centronics_init_seq is set
887 	 * to TRUE, ecpp will toggle the nInit signal upon every ecpp_open().
888 	 * Applications have the opportunity to toggle the nInit signal
889 	 * with ioctl(2) calls as well.  The default is to set it to FALSE.
890 	 */
891 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
892 	    "centronics-init-seq", &prop) == DDI_PROP_SUCCESS) {
893 		pp->init_seq = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
894 		ddi_prop_free(prop);
895 	} else {
896 		pp->init_seq = FALSE;
897 	}
898 
899 	/*
900 	 * If one of the centronics status signals are in an erroneous
901 	 * state, ecpp_wsrv() will be reinvoked centronics-retry ms to
902 	 * check if the status is ok to transfer.  If the property is not
903 	 * found, wsrv_retry will be set to CENTRONICS_RETRY ms.
904 	 */
905 	pp->wsrv_retry = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
906 	    "centronics-retry", CENTRONICS_RETRY);
907 
908 	/*
909 	 * In PIO mode, ecpp_isr() will loop for wait for the busy signal
910 	 * to be deasserted before transferring the next byte. wait_for_busy
911 	 * is specificied in microseconds.  If the property is not found
912 	 * ecpp_isr() will wait for a maximum of WAIT_FOR_BUSY us.
913 	 */
914 	pp->wait_for_busy = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
915 	    "centronics-wait-for-busy", WAIT_FOR_BUSY);
916 
917 	/*
918 	 * In PIO mode, centronics transfers must hold the data signals
919 	 * for a data_setup_time milliseconds before the strobe is asserted.
920 	 */
921 	pp->data_setup_time = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
922 	    "centronics-data-setup-time", DATA_SETUP_TIME);
923 
924 	/*
925 	 * In PIO mode, centronics transfers asserts the strobe signal
926 	 * for a period of strobe_pulse_width milliseconds.
927 	 */
928 	pp->strobe_pulse_width = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
929 	    "centronics-strobe-pulse-width", STROBE_PULSE_WIDTH);
930 
931 	/*
932 	 * Upon a transfer the peripheral, ecpp waits write_timeout seconds
933 	 * for the transmission to complete.
934 	 */
935 	default_xfer_parms.write_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
936 	    pp->dip, 0, "ecpp-transfer-timeout", ecpp_def_timeout);
937 
938 	pp->xfer_parms = default_xfer_parms;
939 
940 	/*
941 	 * Get dma channel for M1553
942 	 */
943 	if (pp->hw == &m1553) {
944 		pp->uh.m1553.chn = ddi_prop_get_int(DDI_DEV_T_ANY,
945 		    pp->dip, 0, "dma-channel", 0x1);
946 		ecpp_error(pp->dip, "ecpp_get_prop:chn=%x\n", pp->uh.m1553.chn);
947 	}
948 #if defined(__x86)
949 	len = sizeof (value);
950 	/* Get dma channel for i86 pc */
951 	if (pp->hw == &x86) {
952 		if (ddi_prop_op(DDI_DEV_T_ANY, pp->dip, PROP_LEN_AND_VAL_BUF,
953 		    DDI_PROP_DONTPASS, "dma-channels", (caddr_t)&value, &len)
954 		    != DDI_PROP_SUCCESS) {
955 			ecpp_error(pp->dip, "No dma channel found\n");
956 			pp->uh.x86.chn = 0xff;
957 			pp->fast_compat = FALSE;
958 			pp->noecpregs = TRUE;
959 		} else
960 			pp->uh.x86.chn = (uint8_t)value;
961 	}
962 #endif
963 	/*
964 	 * these properties are not yet public
965 	 */
966 	pp->ecp_rev_speed = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
967 	    "ecp-rev-speed", ECP_REV_SPEED);
968 
969 	pp->rev_watchdog = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
970 	    "rev-watchdog", REV_WATCHDOG);
971 
972 	ecpp_error(pp->dip,
973 	    "ecpp_get_prop: fast_centronics=%x, fast-1284=%x\n"
974 	    "ecpp_get_prop: wsrv_retry=%d, wait_for_busy=%d\n"
975 	    "ecpp_get_prop: data_setup=%d, strobe_pulse=%d\n"
976 	    "ecpp_get_prop: transfer-timeout=%d\n",
977 	    pp->fast_centronics, pp->fast_compat,
978 	    pp->wsrv_retry, pp->wait_for_busy,
979 	    pp->data_setup_time, pp->strobe_pulse_width,
980 	    pp->xfer_parms.write_timeout);
981 }
982 
983 /*ARGSUSED*/
984 int
ecpp_getinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)985 ecpp_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
986 {
987 	dev_t	dev = (dev_t)arg;
988 	struct ecppunit *pp;
989 	int	instance, ret;
990 
991 	instance = getminor(dev);
992 
993 	switch (infocmd) {
994 	case DDI_INFO_DEVT2DEVINFO:
995 		pp = ddi_get_soft_state(ecppsoft_statep, instance);
996 		if (pp != NULL) {
997 			*result = pp->dip;
998 			ret = DDI_SUCCESS;
999 		} else {
1000 			ret = DDI_FAILURE;
1001 		}
1002 		break;
1003 
1004 	case DDI_INFO_DEVT2INSTANCE:
1005 		*result = (void *)(uintptr_t)instance;
1006 		ret = DDI_SUCCESS;
1007 		break;
1008 
1009 	default:
1010 		ret = DDI_FAILURE;
1011 		break;
1012 	}
1013 
1014 	return (ret);
1015 }
1016 
1017 /*ARGSUSED2*/
1018 static int
ecpp_open(queue_t * q,dev_t * dev,int flag,int sflag,cred_t * credp)1019 ecpp_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *credp)
1020 {
1021 	struct ecppunit *pp;
1022 	int		instance;
1023 	struct stroptions *sop;
1024 	mblk_t		*mop;
1025 
1026 	instance = getminor(*dev);
1027 
1028 	if (instance < 0) {
1029 		return (ENXIO);
1030 	}
1031 
1032 	pp = (struct ecppunit *)ddi_get_soft_state(ecppsoft_statep, instance);
1033 
1034 	if (pp == NULL) {
1035 		return (ENXIO);
1036 	}
1037 
1038 	mutex_enter(&pp->umutex);
1039 
1040 	/*
1041 	 * Parallel port is an exclusive-use device
1042 	 * thus providing print job integrity
1043 	 */
1044 	if (pp->oflag == TRUE) {
1045 		ecpp_error(pp->dip, "ecpp open failed");
1046 		mutex_exit(&pp->umutex);
1047 		return (EBUSY);
1048 	}
1049 
1050 	pp->oflag = TRUE;
1051 
1052 	/* initialize state variables */
1053 	pp->prn_timeouts = prn_timeouts_default;
1054 	pp->xfer_parms = default_xfer_parms;
1055 	pp->current_mode = ECPP_CENTRONICS;
1056 	pp->backchannel = ECPP_CENTRONICS;
1057 	pp->current_phase = ECPP_PHASE_PO;
1058 	pp->port = ECPP_PORT_DMA;
1059 	pp->instance = instance;
1060 	pp->timeout_error = 0;
1061 	pp->saved_dsr = DSR_READ(pp);
1062 	pp->ecpp_drain_counter = 0;
1063 	pp->dma_cancelled = FALSE;
1064 	pp->io_mode = ECPP_DMA;
1065 	pp->joblen = 0;
1066 	pp->tfifo_intr = 0;
1067 	pp->softintr_pending = 0;
1068 	pp->nread = 0;
1069 
1070 	/* clear the state flag */
1071 	pp->e_busy = ECPP_IDLE;
1072 
1073 	pp->readq = RD(q);
1074 	pp->writeq = WR(q);
1075 	pp->msg = NULL;
1076 
1077 	RD(q)->q_ptr = WR(q)->q_ptr = (caddr_t)pp;
1078 
1079 	/*
1080 	 * Get ready: check host/peripheral, negotiate into default mode
1081 	 */
1082 	if (ecpp_reset_port_regs(pp) == FAILURE) {
1083 		mutex_exit(&pp->umutex);
1084 		return (EIO);
1085 	}
1086 
1087 	mutex_exit(&pp->umutex);
1088 
1089 	/*
1090 	 * Configure the Stream head and enable the Stream
1091 	 */
1092 	if (!(mop = allocb(sizeof (struct stroptions), BPRI_MED))) {
1093 		return (EAGAIN);
1094 	}
1095 
1096 	mop->b_datap->db_type = M_SETOPTS;
1097 	mop->b_wptr += sizeof (struct stroptions);
1098 
1099 	/*
1100 	 * if device is open with O_NONBLOCK flag set, let read(2) return 0
1101 	 * if no data waiting to be read.  Writes will block on flow control.
1102 	 */
1103 	sop = (struct stroptions *)mop->b_rptr;
1104 	sop->so_flags = SO_HIWAT | SO_LOWAT | SO_NDELON | SO_MREADON;
1105 	sop->so_hiwat = ECPPHIWAT;
1106 	sop->so_lowat = ECPPLOWAT;
1107 
1108 	/* enable the stream */
1109 	qprocson(q);
1110 
1111 	putnext(q, mop);
1112 
1113 	mutex_enter(&pp->umutex);
1114 
1115 	ecpp_default_negotiation(pp);
1116 
1117 	/* go revidle */
1118 	(void) ecpp_idle_phase(pp);
1119 
1120 	ecpp_error(pp->dip,
1121 	    "ecpp_open: mode=%x, phase=%x ecr=%x, dsr=%x, dcr=%x\n",
1122 	    pp->current_mode, pp->current_phase,
1123 	    ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
1124 
1125 	mutex_exit(&pp->umutex);
1126 
1127 	return (0);
1128 }
1129 
1130 /*ARGSUSED1*/
1131 static int
ecpp_close(queue_t * q,int flag,cred_t * cred_p)1132 ecpp_close(queue_t *q, int flag, cred_t *cred_p)
1133 {
1134 	struct ecppunit *pp;
1135 	timeout_id_t	timeout_id, fifo_timer_id, wsrv_timer_id;
1136 
1137 	pp = (struct ecppunit *)q->q_ptr;
1138 
1139 	ecpp_error(pp->dip, "ecpp_close: entering ...\n");
1140 
1141 	mutex_enter(&pp->umutex);
1142 
1143 	/*
1144 	 * ecpp_close() will continue to loop until the
1145 	 * queue has been drained or if the thread
1146 	 * has received a SIG.  Typically, when the queue
1147 	 * has data, the port will be ECPP_BUSY.  However,
1148 	 * after a dma completes and before the wsrv
1149 	 * starts the next transfer, the port may be IDLE.
1150 	 * In this case, ecpp_close() will loop within this
1151 	 * while(qsize) segment.  Since, ecpp_wsrv() runs
1152 	 * at software interupt level, this shouldn't loop
1153 	 * very long.
1154 	 */
1155 	while (pp->e_busy != ECPP_IDLE || qsize(WR(q))) {
1156 		if (!cv_wait_sig(&pp->pport_cv, &pp->umutex)) {
1157 			ecpp_error(pp->dip, "ecpp_close:B: received SIG\n");
1158 			/*
1159 			 * Returning from a signal such as
1160 			 * SIGTERM or SIGKILL
1161 			 */
1162 			ecpp_flush(pp, FWRITE);
1163 			break;
1164 		} else {
1165 			ecpp_error(pp->dip, "ecpp_close:rcvd cv-sig\n");
1166 		}
1167 	}
1168 
1169 	ecpp_error(pp->dip, "ecpp_close: joblen=%d, ctx_cf=%d, "
1170 	    "qsize(WR(q))=%d, qsize(RD(q))=%d\n",
1171 	    pp->joblen, pp->ctx_cf, qsize(pp->writeq), qsize(q));
1172 
1173 	/*
1174 	 * Cancel all timeouts, disable interrupts
1175 	 *
1176 	 * Note that we can`t call untimeout(9F) with mutex held:
1177 	 * callout may be blocked on the same mutex, and untimeout() will
1178 	 * cv_wait() while callout is executing, thus creating a deadlock
1179 	 * So we zero the timeout id's inside mutex and call untimeout later
1180 	 */
1181 	timeout_id = pp->timeout_id;
1182 	fifo_timer_id = pp->fifo_timer_id;
1183 	wsrv_timer_id = pp->wsrv_timer_id;
1184 
1185 	pp->timeout_id = pp->fifo_timer_id = pp->wsrv_timer_id = 0;
1186 
1187 	pp->softintr_pending = 0;
1188 	pp->dma_cancelled = TRUE;
1189 	ECPP_MASK_INTR(pp);
1190 
1191 	mutex_exit(&pp->umutex);
1192 
1193 	qprocsoff(q);
1194 
1195 	if (timeout_id) {
1196 		(void) untimeout(timeout_id);
1197 	}
1198 	if (fifo_timer_id) {
1199 		(void) untimeout(fifo_timer_id);
1200 	}
1201 	if (wsrv_timer_id) {
1202 		(void) untimeout(wsrv_timer_id);
1203 	}
1204 
1205 	mutex_enter(&pp->umutex);
1206 
1207 	/* set link to Compatible mode */
1208 	if ((pp->current_mode == ECPP_ECP_MODE) &&
1209 	    (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) {
1210 		(void) ecp_reverse2forward(pp);
1211 	}
1212 
1213 	(void) ecpp_1284_termination(pp);
1214 
1215 	pp->oflag = FALSE;
1216 	q->q_ptr = WR(q)->q_ptr = NULL;
1217 	pp->readq = pp->writeq = NULL;
1218 	pp->msg = NULL;
1219 
1220 	ecpp_error(pp->dip, "ecpp_close: ecr=%x, dsr=%x, dcr=%x\n",
1221 	    ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
1222 
1223 	mutex_exit(&pp->umutex);
1224 
1225 	return (0);
1226 }
1227 
1228 /*
1229  * standard put procedure for ecpp
1230  */
1231 static int
ecpp_wput(queue_t * q,mblk_t * mp)1232 ecpp_wput(queue_t *q, mblk_t *mp)
1233 {
1234 	struct msgb *nmp;
1235 	struct ecppunit *pp;
1236 
1237 	pp = (struct ecppunit *)q->q_ptr;
1238 
1239 	if (!mp) {
1240 		return (0);
1241 	}
1242 
1243 	if ((mp->b_wptr - mp->b_rptr) <= 0) {
1244 		ecpp_error(pp->dip,
1245 		    "ecpp_wput:bogus packet recieved mp=%x\n", mp);
1246 		freemsg(mp);
1247 		return (0);
1248 	}
1249 
1250 	switch (DB_TYPE(mp)) {
1251 	case M_DATA:
1252 		/*
1253 		 * This is a quick fix for multiple message block problem,
1254 		 * it will be changed later with better performance code.
1255 		 */
1256 		if (mp->b_cont) {
1257 			/*
1258 			 * mblk has scattered data ... do msgpullup
1259 			 * if it fails, continue with the current mblk
1260 			 */
1261 			if ((nmp = msgpullup(mp, -1)) != NULL) {
1262 				freemsg(mp);
1263 				mp = nmp;
1264 				ecpp_error(pp->dip,
1265 				    "ecpp_wput:msgpullup: mp=%p len=%d\n",
1266 				    mp, mp->b_wptr - mp->b_rptr);
1267 			}
1268 		}
1269 
1270 		/* let ecpp_wsrv() concatenate small blocks */
1271 		(void) putq(q, mp);
1272 
1273 		break;
1274 
1275 	case M_CTL:
1276 		(void) putq(q, mp);
1277 
1278 		break;
1279 
1280 	case M_IOCTL: {
1281 		struct iocblk *iocbp;
1282 
1283 		iocbp = (struct iocblk *)mp->b_rptr;
1284 
1285 		ecpp_error(pp->dip, "ecpp_wput:M_IOCTL %x\n", iocbp->ioc_cmd);
1286 
1287 		mutex_enter(&pp->umutex);
1288 
1289 		/* TESTIO and GET_STATUS can be used during transfer */
1290 		if ((pp->e_busy == ECPP_BUSY) &&
1291 		    (iocbp->ioc_cmd != BPPIOC_TESTIO) &&
1292 		    (iocbp->ioc_cmd != PRNIOC_GET_STATUS)) {
1293 			mutex_exit(&pp->umutex);
1294 			(void) putq(q, mp);
1295 		} else {
1296 			mutex_exit(&pp->umutex);
1297 			ecpp_putioc(q, mp);
1298 		}
1299 
1300 		break;
1301 	}
1302 
1303 	case M_IOCDATA: {
1304 		struct copyresp *csp;
1305 
1306 		ecpp_error(pp->dip, "ecpp_wput:M_IOCDATA\n");
1307 
1308 		csp = (struct copyresp *)mp->b_rptr;
1309 
1310 		/*
1311 		 * If copy request failed, quit now
1312 		 */
1313 		if (csp->cp_rval != 0) {
1314 			freemsg(mp);
1315 			return (0);
1316 		}
1317 
1318 		switch (csp->cp_cmd) {
1319 		case ECPPIOC_SETPARMS:
1320 		case ECPPIOC_SETREGS:
1321 		case ECPPIOC_SETPORT:
1322 		case ECPPIOC_SETDATA:
1323 		case PRNIOC_SET_IFCAP:
1324 		case PRNIOC_SET_TIMEOUTS:
1325 			/*
1326 			 * need to retrieve and use the data, but if the
1327 			 * device is busy, wait.
1328 			 */
1329 			(void) putq(q, mp);
1330 			break;
1331 
1332 		case ECPPIOC_GETPARMS:
1333 		case ECPPIOC_GETREGS:
1334 		case ECPPIOC_GETPORT:
1335 		case ECPPIOC_GETDATA:
1336 		case BPPIOC_GETERR:
1337 		case BPPIOC_TESTIO:
1338 		case PRNIOC_GET_IFCAP:
1339 		case PRNIOC_GET_STATUS:
1340 		case PRNIOC_GET_1284_STATUS:
1341 		case PRNIOC_GET_TIMEOUTS:
1342 			/* data transfered to user space okay */
1343 			ecpp_ack_ioctl(q, mp);
1344 			break;
1345 
1346 		case ECPPIOC_GETDEVID:
1347 			ecpp_wput_iocdata_devid(q, mp,
1348 			    offsetof(struct ecpp_device_id, rlen));
1349 			break;
1350 
1351 		case PRNIOC_GET_1284_DEVID:
1352 			ecpp_wput_iocdata_devid(q, mp,
1353 			    offsetof(struct prn_1284_device_id, id_rlen));
1354 			break;
1355 
1356 		case PRNIOC_GET_IFINFO:
1357 			ecpp_wput_iocdata_devid(q, mp,
1358 			    offsetof(struct prn_interface_info, if_rlen));
1359 			break;
1360 
1361 		default:
1362 			ecpp_nack_ioctl(q, mp, EINVAL);
1363 			break;
1364 		}
1365 
1366 		break;
1367 	}
1368 
1369 	case M_FLUSH:
1370 		ecpp_error(pp->dip, "ecpp_wput:M_FLUSH\n");
1371 
1372 		if (*mp->b_rptr & FLUSHW) {
1373 			mutex_enter(&pp->umutex);
1374 			ecpp_flush(pp, FWRITE);
1375 			mutex_exit(&pp->umutex);
1376 		}
1377 
1378 		if (*mp->b_rptr & FLUSHR) {
1379 			mutex_enter(&pp->umutex);
1380 			ecpp_flush(pp, FREAD);
1381 			mutex_exit(&pp->umutex);
1382 			qreply(q, mp);
1383 		} else {
1384 			freemsg(mp);
1385 		}
1386 
1387 		break;
1388 
1389 	case M_READ:
1390 		/*
1391 		 * When the user calls read(2), M_READ message is sent to us,
1392 		 * first byte of which is the number of requested bytes
1393 		 * We add up user requests and use resulting number
1394 		 * to calculate the reverse transfer block size
1395 		 */
1396 		mutex_enter(&pp->umutex);
1397 		if (pp->e_busy == ECPP_IDLE) {
1398 			pp->nread += *(size_t *)mp->b_rptr;
1399 			ecpp_error(pp->dip, "ecpp_wput: M_READ %d", pp->nread);
1400 			freemsg(mp);
1401 		} else {
1402 			ecpp_error(pp->dip, "ecpp_wput: M_READ queueing");
1403 			(void) putq(q, mp);
1404 		}
1405 		mutex_exit(&pp->umutex);
1406 		break;
1407 
1408 	default:
1409 		ecpp_error(pp->dip, "ecpp_wput: bad messagetype 0x%x\n",
1410 		    DB_TYPE(mp));
1411 		freemsg(mp);
1412 		break;
1413 	}
1414 
1415 	return (0);
1416 }
1417 
1418 /*
1419  * Process ECPPIOC_GETDEVID-like ioctls
1420  */
1421 static void
ecpp_wput_iocdata_devid(queue_t * q,mblk_t * mp,uintptr_t rlen_offset)1422 ecpp_wput_iocdata_devid(queue_t *q, mblk_t *mp, uintptr_t rlen_offset)
1423 {
1424 	struct copyresp		*csp;
1425 	struct ecpp_copystate	*stp;
1426 	mblk_t			*datamp;
1427 
1428 	csp = (struct copyresp *)mp->b_rptr;
1429 	stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
1430 
1431 	/* determine the state of copyin/copyout process */
1432 	switch (stp->state) {
1433 	case ECPP_STRUCTIN:
1434 		/* user structure has arrived */
1435 		(void) putq(q, mp);
1436 		break;
1437 
1438 	case ECPP_ADDROUT:
1439 		/*
1440 		 * data transfered to user space okay
1441 		 * now update user structure
1442 		 */
1443 		datamp = allocb(sizeof (int), BPRI_MED);
1444 		if (datamp == NULL) {
1445 			ecpp_nack_ioctl(q, mp, ENOSR);
1446 			break;
1447 		}
1448 
1449 		*(int *)datamp->b_rptr =
1450 		    *(int *)((char *)&stp->un + rlen_offset);
1451 		stp->state = ECPP_STRUCTOUT;
1452 
1453 		mcopyout(mp, csp->cp_private, sizeof (int),
1454 		    (char *)stp->uaddr + rlen_offset, datamp);
1455 		qreply(q, mp);
1456 		break;
1457 
1458 	case ECPP_STRUCTOUT:
1459 		/* user structure was updated okay */
1460 		freemsg(csp->cp_private);
1461 		ecpp_ack_ioctl(q, mp);
1462 		break;
1463 
1464 	default:
1465 		ecpp_nack_ioctl(q, mp, EINVAL);
1466 		break;
1467 	}
1468 }
1469 
1470 static uchar_t
ecpp_get_error_status(uchar_t status)1471 ecpp_get_error_status(uchar_t status)
1472 {
1473 	uchar_t pin_status = 0;
1474 
1475 	if (!(status & ECPP_nERR)) {
1476 		pin_status |= BPP_ERR_ERR;
1477 	}
1478 
1479 	if (status & ECPP_PE) {
1480 		pin_status |= BPP_PE_ERR;
1481 	}
1482 
1483 	if (!(status & ECPP_SLCT)) {
1484 		pin_status |= BPP_SLCT_ERR;
1485 	}
1486 
1487 	if (!(status & ECPP_nBUSY)) {
1488 		pin_status |= BPP_SLCT_ERR;
1489 	}
1490 
1491 	return (pin_status);
1492 }
1493 
1494 /*
1495  * ioctl handler for output PUT procedure.
1496  */
1497 static void
ecpp_putioc(queue_t * q,mblk_t * mp)1498 ecpp_putioc(queue_t *q, mblk_t *mp)
1499 {
1500 	struct iocblk	*iocbp;
1501 	struct ecppunit *pp;
1502 
1503 	pp = (struct ecppunit *)q->q_ptr;
1504 
1505 	iocbp = (struct iocblk *)mp->b_rptr;
1506 
1507 	/* I_STR ioctls are invalid */
1508 	if (iocbp->ioc_count != TRANSPARENT) {
1509 		ecpp_nack_ioctl(q, mp, EINVAL);
1510 		return;
1511 	}
1512 
1513 	switch (iocbp->ioc_cmd) {
1514 	case ECPPIOC_SETPARMS: {
1515 		mcopyin(mp, NULL, sizeof (struct ecpp_transfer_parms), NULL);
1516 		qreply(q, mp);
1517 		break;
1518 	}
1519 
1520 	case ECPPIOC_GETPARMS: {
1521 		struct ecpp_transfer_parms xfer_parms;
1522 
1523 		mutex_enter(&pp->umutex);
1524 
1525 		pp->xfer_parms.mode = pp->current_mode;
1526 		xfer_parms = pp->xfer_parms;
1527 
1528 		mutex_exit(&pp->umutex);
1529 
1530 		ecpp_putioc_copyout(q, mp, &xfer_parms, sizeof (xfer_parms));
1531 		break;
1532 	}
1533 
1534 	case ECPPIOC_SETREGS: {
1535 		mutex_enter(&pp->umutex);
1536 		if (pp->current_mode != ECPP_DIAG_MODE) {
1537 			mutex_exit(&pp->umutex);
1538 			ecpp_nack_ioctl(q, mp, EINVAL);
1539 			break;
1540 		}
1541 		mutex_exit(&pp->umutex);
1542 
1543 		mcopyin(mp, NULL, sizeof (struct ecpp_regs), NULL);
1544 		qreply(q, mp);
1545 		break;
1546 	}
1547 
1548 	case ECPPIOC_GETREGS: {
1549 		struct ecpp_regs rg;
1550 
1551 		mutex_enter(&pp->umutex);
1552 
1553 		if (pp->current_mode != ECPP_DIAG_MODE) {
1554 			mutex_exit(&pp->umutex);
1555 			ecpp_nack_ioctl(q, mp, EINVAL);
1556 			break;
1557 		}
1558 
1559 		rg.dsr = DSR_READ(pp);
1560 		rg.dcr = DCR_READ(pp);
1561 
1562 		mutex_exit(&pp->umutex);
1563 
1564 		ecpp_error(pp->dip, "ECPPIOC_GETREGS: dsr=%x,dcr=%x\n",
1565 		    rg.dsr, rg.dcr);
1566 
1567 		/* these bits must be 1 */
1568 		rg.dsr |= ECPP_SETREGS_DSR_MASK;
1569 		rg.dcr |= ECPP_SETREGS_DCR_MASK;
1570 
1571 		ecpp_putioc_copyout(q, mp, &rg, sizeof (rg));
1572 		break;
1573 	}
1574 
1575 	case ECPPIOC_SETPORT:
1576 	case ECPPIOC_SETDATA: {
1577 		mutex_enter(&pp->umutex);
1578 		if (pp->current_mode != ECPP_DIAG_MODE) {
1579 			mutex_exit(&pp->umutex);
1580 			ecpp_nack_ioctl(q, mp, EINVAL);
1581 			break;
1582 		}
1583 		mutex_exit(&pp->umutex);
1584 
1585 		/*
1586 		 * each of the commands fetches a byte quantity.
1587 		 */
1588 		mcopyin(mp, NULL, sizeof (uchar_t), NULL);
1589 		qreply(q, mp);
1590 		break;
1591 	}
1592 
1593 	case ECPPIOC_GETDATA:
1594 	case ECPPIOC_GETPORT: {
1595 		uchar_t	byte;
1596 
1597 		mutex_enter(&pp->umutex);
1598 
1599 		/* must be in diagnostic mode for these commands to work */
1600 		if (pp->current_mode != ECPP_DIAG_MODE) {
1601 			mutex_exit(&pp->umutex);
1602 			ecpp_nack_ioctl(q, mp, EINVAL);
1603 			break;
1604 		}
1605 
1606 		if (iocbp->ioc_cmd == ECPPIOC_GETPORT) {
1607 			byte = pp->port;
1608 		} else if (iocbp->ioc_cmd == ECPPIOC_GETDATA) {
1609 			switch (pp->port) {
1610 			case ECPP_PORT_PIO:
1611 				byte = DATAR_READ(pp);
1612 				break;
1613 			case ECPP_PORT_TDMA:
1614 				byte = TFIFO_READ(pp);
1615 				ecpp_error(pp->dip, "GETDATA=0x%x\n", byte);
1616 				break;
1617 			default:
1618 				ecpp_nack_ioctl(q, mp, EINVAL);
1619 				break;
1620 			}
1621 		} else {
1622 			mutex_exit(&pp->umutex);
1623 			ecpp_error(pp->dip, "weird command");
1624 			ecpp_nack_ioctl(q, mp, EINVAL);
1625 			break;
1626 		}
1627 
1628 		mutex_exit(&pp->umutex);
1629 
1630 		ecpp_putioc_copyout(q, mp, &byte, sizeof (byte));
1631 
1632 		break;
1633 	}
1634 
1635 	case BPPIOC_GETERR: {
1636 		struct bpp_error_status bpp_status;
1637 
1638 		mutex_enter(&pp->umutex);
1639 
1640 		bpp_status.timeout_occurred = pp->timeout_error;
1641 		bpp_status.bus_error = 0;	/* not used */
1642 		bpp_status.pin_status = ecpp_get_error_status(pp->saved_dsr);
1643 
1644 		mutex_exit(&pp->umutex);
1645 
1646 		ecpp_putioc_copyout(q, mp, &bpp_status, sizeof (bpp_status));
1647 
1648 		break;
1649 	}
1650 
1651 	case BPPIOC_TESTIO: {
1652 		mutex_enter(&pp->umutex);
1653 
1654 		if (!((pp->current_mode == ECPP_CENTRONICS) ||
1655 		    (pp->current_mode == ECPP_COMPAT_MODE))) {
1656 			ecpp_nack_ioctl(q, mp, EINVAL);
1657 		} else {
1658 			pp->saved_dsr = DSR_READ(pp);
1659 
1660 			if ((pp->saved_dsr & ECPP_PE) ||
1661 			    !(pp->saved_dsr & ECPP_SLCT) ||
1662 			    !(pp->saved_dsr & ECPP_nERR)) {
1663 				ecpp_nack_ioctl(q, mp, EIO);
1664 			} else {
1665 				ecpp_ack_ioctl(q, mp);
1666 			}
1667 		}
1668 
1669 		mutex_exit(&pp->umutex);
1670 
1671 		break;
1672 	}
1673 
1674 	case PRNIOC_RESET:
1675 		/*
1676 		 * Initialize interface only if no transfer is in progress
1677 		 */
1678 		mutex_enter(&pp->umutex);
1679 		if (pp->e_busy == ECPP_BUSY) {
1680 			mutex_exit(&pp->umutex);
1681 			ecpp_nack_ioctl(q, mp, EIO);
1682 		} else {
1683 			(void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
1684 
1685 			DCR_WRITE(pp, ECPP_SLCTIN);
1686 			drv_usecwait(2);
1687 			DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
1688 
1689 			ecpp_default_negotiation(pp);
1690 
1691 			mutex_exit(&pp->umutex);
1692 			ecpp_ack_ioctl(q, mp);
1693 		}
1694 		break;
1695 
1696 	case PRNIOC_GET_IFCAP: {
1697 		uint_t		ifcap;
1698 
1699 		mutex_enter(&pp->umutex);
1700 
1701 		ifcap = ecpp_get_prn_ifcap(pp);
1702 
1703 		mutex_exit(&pp->umutex);
1704 
1705 		ecpp_putioc_copyout(q, mp, &ifcap, sizeof (ifcap));
1706 		break;
1707 	}
1708 
1709 	case PRNIOC_SET_IFCAP: {
1710 		mcopyin(mp, NULL, sizeof (uint_t), NULL);
1711 		qreply(q, mp);
1712 		break;
1713 	}
1714 
1715 	case PRNIOC_GET_TIMEOUTS: {
1716 		struct prn_timeouts timeouts;
1717 
1718 		mutex_enter(&pp->umutex);
1719 		timeouts = pp->prn_timeouts;
1720 		mutex_exit(&pp->umutex);
1721 
1722 		ecpp_putioc_copyout(q, mp, &timeouts, sizeof (timeouts));
1723 
1724 		break;
1725 	}
1726 
1727 	case PRNIOC_SET_TIMEOUTS:
1728 		mcopyin(mp, NULL, sizeof (struct prn_timeouts),
1729 		    *(caddr_t *)(void *)mp->b_cont->b_rptr);
1730 		qreply(q, mp);
1731 		break;
1732 
1733 	case PRNIOC_GET_STATUS: {
1734 		uint8_t	dsr;
1735 		uint_t	status;
1736 
1737 		mutex_enter(&pp->umutex);
1738 
1739 		/* DSR only makes sense in Centronics & Compat mode */
1740 		if (pp->current_mode == ECPP_CENTRONICS ||
1741 		    pp->current_mode == ECPP_COMPAT_MODE) {
1742 			dsr = DSR_READ(pp);
1743 			if ((dsr & ECPP_PE) ||
1744 			    !(dsr & ECPP_SLCT) || !(dsr & ECPP_nERR)) {
1745 				status = PRN_ONLINE;
1746 			} else {
1747 				status = PRN_ONLINE | PRN_READY;
1748 			}
1749 		} else {
1750 			status = PRN_ONLINE | PRN_READY;
1751 		}
1752 
1753 		mutex_exit(&pp->umutex);
1754 
1755 		ecpp_putioc_copyout(q, mp, &status, sizeof (status));
1756 		break;
1757 	}
1758 
1759 	case PRNIOC_GET_1284_STATUS: {
1760 		uint8_t	dsr;
1761 		uchar_t	status;
1762 
1763 		mutex_enter(&pp->umutex);
1764 
1765 		/* status only makes sense in Centronics & Compat mode */
1766 		if (pp->current_mode != ECPP_COMPAT_MODE &&
1767 		    pp->current_mode != ECPP_CENTRONICS) {
1768 			mutex_exit(&pp->umutex);
1769 			ecpp_nack_ioctl(q, mp, EINVAL);
1770 			break;
1771 		}
1772 
1773 		dsr = DSR_READ(pp);		/* read status */
1774 
1775 		mutex_exit(&pp->umutex);
1776 
1777 		ecpp_error(pp->dip, "PRNIOC_GET_STATUS: %x\n", dsr);
1778 
1779 		status = (dsr & (ECPP_SLCT | ECPP_PE | ECPP_nERR)) |
1780 		    (~dsr & ECPP_nBUSY);
1781 
1782 		ecpp_putioc_copyout(q, mp, &status, sizeof (status));
1783 		break;
1784 	}
1785 
1786 	case ECPPIOC_GETDEVID:
1787 		ecpp_putioc_stateful_copyin(q, mp,
1788 		    sizeof (struct ecpp_device_id));
1789 		break;
1790 
1791 	case PRNIOC_GET_1284_DEVID:
1792 		ecpp_putioc_stateful_copyin(q, mp,
1793 		    sizeof (struct prn_1284_device_id));
1794 		break;
1795 
1796 	case PRNIOC_GET_IFINFO:
1797 		ecpp_putioc_stateful_copyin(q, mp,
1798 		    sizeof (struct prn_interface_info));
1799 		break;
1800 
1801 	default:
1802 		ecpp_error(pp->dip, "putioc: unknown IOCTL: %x\n",
1803 		    iocbp->ioc_cmd);
1804 		ecpp_nack_ioctl(q, mp, EINVAL);
1805 		break;
1806 	}
1807 }
1808 
1809 /*
1810  * allocate mblk and copyout the requested number of bytes
1811  */
1812 static void
ecpp_putioc_copyout(queue_t * q,mblk_t * mp,void * buf,int len)1813 ecpp_putioc_copyout(queue_t *q, mblk_t *mp, void *buf, int len)
1814 {
1815 	mblk_t	*tmp;
1816 
1817 	if ((tmp = allocb(len, BPRI_MED)) == NULL) {
1818 		ecpp_nack_ioctl(q, mp, ENOSR);
1819 		return;
1820 	}
1821 
1822 	bcopy(buf, tmp->b_wptr, len);
1823 
1824 	mcopyout(mp, NULL, len, NULL, tmp);
1825 	qreply(q, mp);
1826 }
1827 
1828 /*
1829  * copyin the structure using struct ecpp_copystate
1830  */
1831 static void
ecpp_putioc_stateful_copyin(queue_t * q,mblk_t * mp,size_t size)1832 ecpp_putioc_stateful_copyin(queue_t *q, mblk_t *mp, size_t size)
1833 {
1834 	mblk_t *tmp;
1835 	struct ecpp_copystate *stp;
1836 
1837 	if ((tmp = allocb(sizeof (struct ecpp_copystate), BPRI_MED)) == NULL) {
1838 		ecpp_nack_ioctl(q, mp, EAGAIN);
1839 		return;
1840 	}
1841 
1842 	stp = (struct ecpp_copystate *)tmp->b_rptr;
1843 	stp->state = ECPP_STRUCTIN;
1844 	stp->uaddr = *(caddr_t *)mp->b_cont->b_rptr;
1845 
1846 	tmp->b_wptr += sizeof (struct ecpp_copystate);
1847 
1848 	mcopyin(mp, tmp, size, stp->uaddr);
1849 	qreply(q, mp);
1850 }
1851 
1852 /*
1853  * read queue is only used when the peripheral sends data faster,
1854  * then the application consumes it;
1855  * once the low water mark is reached, this routine will be scheduled
1856  */
1857 static int
ecpp_rsrv(queue_t * q)1858 ecpp_rsrv(queue_t *q)
1859 {
1860 	struct msgb	*mp;
1861 
1862 	/*
1863 	 * send data upstream until next queue is full or the queue is empty
1864 	 */
1865 	while (canputnext(q) && (mp = getq(q))) {
1866 		putnext(q, mp);
1867 	}
1868 
1869 	/*
1870 	 * if there is still space on the queue, enable backchannel
1871 	 */
1872 	if (canputnext(q)) {
1873 		struct ecppunit	*pp = (struct ecppunit *)q->q_ptr;
1874 
1875 		mutex_enter(&pp->umutex);
1876 
1877 		if (pp->e_busy == ECPP_IDLE) {
1878 			(void) ecpp_idle_phase(pp);
1879 			cv_signal(&pp->pport_cv);  /* signal ecpp_close() */
1880 		}
1881 
1882 		mutex_exit(&pp->umutex);
1883 	}
1884 
1885 	return (0);
1886 }
1887 
1888 static int
ecpp_wsrv(queue_t * q)1889 ecpp_wsrv(queue_t *q)
1890 {
1891 	struct ecppunit	*pp = (struct ecppunit *)q->q_ptr;
1892 	struct msgb	*mp;
1893 	size_t		len, total_len;
1894 	size_t		my_ioblock_sz;
1895 	caddr_t		my_ioblock;
1896 	caddr_t		start_addr;
1897 
1898 	mutex_enter(&pp->umutex);
1899 
1900 	ecpp_error(pp->dip, "ecpp_wsrv: e_busy=%x\n", pp->e_busy);
1901 
1902 	/* if channel is actively doing work, wait till completed */
1903 	if (pp->e_busy == ECPP_BUSY || pp->e_busy == ECPP_FLUSH) {
1904 		mutex_exit(&pp->umutex);
1905 		return (0);
1906 	} else if (pp->suspended == TRUE) {
1907 		/*
1908 		 * if the system is about to suspend and ecpp_detach()
1909 		 * is blocked due to active transfers, wake it up and exit
1910 		 */
1911 		cv_signal(&pp->pport_cv);
1912 		mutex_exit(&pp->umutex);
1913 		return (0);
1914 	}
1915 
1916 	/* peripheral status should be okay before starting transfer */
1917 	if (pp->e_busy == ECPP_ERR) {
1918 		if (ecpp_check_status(pp) == FAILURE) {
1919 			if (pp->wsrv_timer_id == 0) {
1920 				ecpp_error(pp->dip, "wsrv: start wrsv_timer\n");
1921 				pp->wsrv_timer_id = timeout(ecpp_wsrv_timer,
1922 				    (caddr_t)pp,
1923 				    drv_usectohz(pp->wsrv_retry * 1000));
1924 			} else {
1925 				ecpp_error(pp->dip,
1926 				    "ecpp_wsrv: wrsv_timer is active\n");
1927 			}
1928 
1929 			mutex_exit(&pp->umutex);
1930 			return (0);
1931 		} else {
1932 			pp->e_busy = ECPP_IDLE;
1933 		}
1934 	}
1935 
1936 	my_ioblock = pp->ioblock;
1937 	my_ioblock_sz = IO_BLOCK_SZ;
1938 
1939 	/*
1940 	 * it`s important to null pp->msg here,
1941 	 * cleaning up from the previous transfer attempts
1942 	 */
1943 	pp->msg = NULL;
1944 
1945 	start_addr = NULL;
1946 	len = total_len = 0;
1947 	/*
1948 	 * The following loop is implemented to gather the
1949 	 * many small writes that the lp subsystem makes and
1950 	 * compile them into one large dma transfer. The len and
1951 	 * total_len variables are a running count of the number of
1952 	 * bytes that have been gathered. They are bcopied to the
1953 	 * ioblock buffer. The pp->e_busy is set to E_BUSY as soon as
1954 	 * we start gathering packets to indicate the following transfer.
1955 	 */
1956 	while (mp = getq(q)) {
1957 		switch (DB_TYPE(mp)) {
1958 		case M_DATA:
1959 			pp->e_busy = ECPP_BUSY;
1960 			len = mp->b_wptr - mp->b_rptr;
1961 
1962 			if ((total_len == 0) && (len >= my_ioblock_sz)) {
1963 				/*
1964 				 * if the first M_DATA is bigger than ioblock,
1965 				 * just use this mblk and start the transfer
1966 				 */
1967 				total_len = len;
1968 				start_addr = (caddr_t)mp->b_rptr;
1969 				pp->msg = mp;
1970 				goto breakout;
1971 			} else if (total_len + len > my_ioblock_sz) {
1972 				/*
1973 				 * current M_DATA does not fit in ioblock,
1974 				 * put it back and start the transfer
1975 				 */
1976 				(void) putbq(q, mp);
1977 				goto breakout;
1978 			} else {
1979 				/*
1980 				 * otherwise add data to ioblock and free mblk
1981 				 */
1982 				bcopy(mp->b_rptr, my_ioblock, len);
1983 				my_ioblock += len;
1984 				total_len += len;
1985 				start_addr = (caddr_t)pp->ioblock;
1986 				freemsg(mp);
1987 			}
1988 			break;
1989 
1990 		case M_IOCTL:
1991 			/*
1992 			 * Assume a simple loopback test: an application
1993 			 * writes data into the TFIFO, reads it using
1994 			 * ECPPIOC_GETDATA and compares. If the transfer
1995 			 * times out (which is only possible on Grover),
1996 			 * the ioctl might be processed before the data
1997 			 * got to the TFIFO, which leads to miscompare.
1998 			 * So if we met ioctl, postpone it until after xfer.
1999 			 */
2000 			if (total_len > 0) {
2001 				(void) putbq(q, mp);
2002 				goto breakout;
2003 			}
2004 
2005 			ecpp_error(pp->dip, "M_IOCTL.\n");
2006 
2007 			mutex_exit(&pp->umutex);
2008 
2009 			ecpp_putioc(q, mp);
2010 
2011 			mutex_enter(&pp->umutex);
2012 
2013 			break;
2014 
2015 		case M_IOCDATA: {
2016 			struct copyresp *csp = (struct copyresp *)mp->b_rptr;
2017 
2018 			ecpp_error(pp->dip, "M_IOCDATA\n");
2019 
2020 			/*
2021 			 * If copy request failed, quit now
2022 			 */
2023 			if (csp->cp_rval != 0) {
2024 				freemsg(mp);
2025 				break;
2026 			}
2027 
2028 			switch (csp->cp_cmd) {
2029 			case ECPPIOC_SETPARMS:
2030 			case ECPPIOC_SETREGS:
2031 			case ECPPIOC_SETPORT:
2032 			case ECPPIOC_SETDATA:
2033 			case ECPPIOC_GETDEVID:
2034 			case PRNIOC_SET_IFCAP:
2035 			case PRNIOC_GET_1284_DEVID:
2036 			case PRNIOC_SET_TIMEOUTS:
2037 			case PRNIOC_GET_IFINFO:
2038 				ecpp_srvioc(q, mp);
2039 				break;
2040 
2041 			default:
2042 				ecpp_nack_ioctl(q, mp, EINVAL);
2043 				break;
2044 			}
2045 
2046 			break;
2047 		}
2048 
2049 		case M_CTL:
2050 			if (pp->e_busy != ECPP_IDLE) {
2051 				ecpp_error(pp->dip, "wsrv: M_CTL postponed\n");
2052 				(void) putbq(q, mp);
2053 				goto breakout;
2054 			} else {
2055 				ecpp_error(pp->dip, "wsrv: M_CTL\n");
2056 			}
2057 
2058 			/* sanity check */
2059 			if ((mp->b_wptr - mp->b_rptr != sizeof (int)) ||
2060 			    (*(int *)mp->b_rptr != ECPP_BACKCHANNEL)) {
2061 				ecpp_error(pp->dip, "wsrv: bogus M_CTL");
2062 				freemsg(mp);
2063 				break;
2064 			} else {
2065 				freemsg(mp);
2066 			}
2067 
2068 			/* This was a backchannel request */
2069 			(void) ecpp_peripheral2host(pp);
2070 
2071 			/* exit if transfer have been initiated */
2072 			if (pp->e_busy == ECPP_BUSY) {
2073 				goto breakout;
2074 			}
2075 			break;
2076 
2077 		case M_READ:
2078 			pp->nread += *(size_t *)mp->b_rptr;
2079 			freemsg(mp);
2080 			ecpp_error(pp->dip, "wsrv: M_READ %d", pp->nread);
2081 			break;
2082 
2083 		default:
2084 			ecpp_error(pp->dip, "wsrv: should never get here\n");
2085 			freemsg(mp);
2086 			break;
2087 		}
2088 	}
2089 breakout:
2090 	/*
2091 	 * If total_len > 0 then start the transfer, otherwise goto idle state
2092 	 */
2093 	if (total_len > 0) {
2094 		ecpp_error(pp->dip, "wsrv:starting: total_len=%d\n", total_len);
2095 		pp->e_busy = ECPP_BUSY;
2096 		ecpp_start(pp, start_addr, total_len);
2097 	} else {
2098 		ecpp_error(pp->dip, "wsrv:finishing: ebusy=%x\n", pp->e_busy);
2099 
2100 		/* IDLE if xfer_timeout, or FIFO_EMPTY */
2101 		if (pp->e_busy == ECPP_IDLE) {
2102 			(void) ecpp_idle_phase(pp);
2103 			cv_signal(&pp->pport_cv);  /* signal ecpp_close() */
2104 		}
2105 	}
2106 
2107 	mutex_exit(&pp->umutex);
2108 	return (1);
2109 }
2110 
2111 /*
2112  * Ioctl processor for queued ioctl data transfer messages.
2113  */
2114 static void
ecpp_srvioc(queue_t * q,mblk_t * mp)2115 ecpp_srvioc(queue_t *q, mblk_t *mp)
2116 {
2117 	struct iocblk	*iocbp;
2118 	struct ecppunit *pp;
2119 
2120 	iocbp = (struct iocblk *)mp->b_rptr;
2121 	pp = (struct ecppunit *)q->q_ptr;
2122 
2123 	switch (iocbp->ioc_cmd) {
2124 	case ECPPIOC_SETPARMS: {
2125 		struct ecpp_transfer_parms *xferp;
2126 
2127 		xferp = (struct ecpp_transfer_parms *)mp->b_cont->b_rptr;
2128 
2129 		if (xferp->write_timeout <= 0 ||
2130 		    xferp->write_timeout >= ECPP_MAX_TIMEOUT) {
2131 			ecpp_nack_ioctl(q, mp, EINVAL);
2132 			break;
2133 		}
2134 
2135 		if (!((xferp->mode == ECPP_CENTRONICS) ||
2136 		    (xferp->mode == ECPP_COMPAT_MODE) ||
2137 		    (xferp->mode == ECPP_NIBBLE_MODE) ||
2138 		    (xferp->mode == ECPP_ECP_MODE) ||
2139 		    (xferp->mode == ECPP_DIAG_MODE))) {
2140 			ecpp_nack_ioctl(q, mp, EINVAL);
2141 			break;
2142 		}
2143 
2144 		pp->xfer_parms = *xferp;
2145 		pp->prn_timeouts.tmo_forward = pp->xfer_parms.write_timeout;
2146 
2147 		ecpp_error(pp->dip, "srvioc: current_mode =%x new mode=%x\n",
2148 		    pp->current_mode, pp->xfer_parms.mode);
2149 
2150 		if (ecpp_mode_negotiation(pp, pp->xfer_parms.mode) == FAILURE) {
2151 			ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
2152 		} else {
2153 			/*
2154 			 * mode nego was a success.  If nibble mode check
2155 			 * back channel and set into REVIDLE.
2156 			 */
2157 			if ((pp->current_mode == ECPP_NIBBLE_MODE) &&
2158 			    (read_nibble_backchan(pp) == FAILURE)) {
2159 				/*
2160 				 * problems reading the backchannel
2161 				 * returned to centronics;
2162 				 * ioctl fails.
2163 				 */
2164 				ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
2165 				break;
2166 			}
2167 
2168 			ecpp_ack_ioctl(q, mp);
2169 		}
2170 		if (pp->current_mode != ECPP_DIAG_MODE) {
2171 			pp->port = ECPP_PORT_DMA;
2172 		} else {
2173 			pp->port = ECPP_PORT_PIO;
2174 		}
2175 
2176 		pp->xfer_parms.mode = pp->current_mode;
2177 
2178 		break;
2179 	}
2180 
2181 	case ECPPIOC_SETREGS: {
2182 		struct ecpp_regs *rg;
2183 		uint8_t dcr;
2184 
2185 		rg = (struct ecpp_regs *)mp->b_cont->b_rptr;
2186 
2187 		/* must be in diagnostic mode for these commands to work */
2188 		if (pp->current_mode != ECPP_DIAG_MODE) {
2189 			ecpp_nack_ioctl(q, mp, EINVAL);
2190 			break;
2191 		}
2192 
2193 		/* bits 4-7 must be 1 or return EINVAL */
2194 		if ((rg->dcr & ECPP_SETREGS_DCR_MASK) !=
2195 		    ECPP_SETREGS_DCR_MASK) {
2196 			ecpp_nack_ioctl(q, mp, EINVAL);
2197 			break;
2198 		}
2199 
2200 		/* get the old dcr */
2201 		dcr = DCR_READ(pp) & ~ECPP_REV_DIR;
2202 		/* get the new dcr */
2203 		dcr = (dcr & ECPP_SETREGS_DCR_MASK) |
2204 		    (rg->dcr & ~ECPP_SETREGS_DCR_MASK);
2205 		DCR_WRITE(pp, dcr);
2206 		ecpp_error(pp->dip, "ECPPIOC_SETREGS:dcr=%x\n", dcr);
2207 		ecpp_ack_ioctl(q, mp);
2208 		break;
2209 	}
2210 
2211 	case ECPPIOC_SETPORT: {
2212 		uchar_t *port;
2213 
2214 		port = (uchar_t *)mp->b_cont->b_rptr;
2215 
2216 		/* must be in diagnostic mode for these commands to work */
2217 		if (pp->current_mode != ECPP_DIAG_MODE) {
2218 			ecpp_nack_ioctl(q, mp, EINVAL);
2219 			break;
2220 		}
2221 
2222 		switch (*port) {
2223 		case ECPP_PORT_PIO:
2224 			/* put superio into PIO mode */
2225 			ECR_WRITE(pp,
2226 			    ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
2227 			pp->port = *port;
2228 			ecpp_ack_ioctl(q, mp);
2229 			break;
2230 
2231 		case ECPP_PORT_TDMA:
2232 			ecpp_error(pp->dip, "SETPORT: to TDMA\n");
2233 			pp->tfifo_intr = 1;
2234 			/* change to mode 110 */
2235 			ECR_WRITE(pp,
2236 			    ECR_mode_110 | ECPP_INTR_MASK | ECPP_INTR_SRV);
2237 			pp->port = *port;
2238 			ecpp_ack_ioctl(q, mp);
2239 			break;
2240 
2241 		default:
2242 			ecpp_nack_ioctl(q, mp, EINVAL);
2243 		}
2244 
2245 		break;
2246 	}
2247 
2248 	case ECPPIOC_SETDATA: {
2249 		uchar_t *data;
2250 
2251 		data = (uchar_t *)mp->b_cont->b_rptr;
2252 
2253 		/* must be in diagnostic mode for these commands to work */
2254 		if (pp->current_mode != ECPP_DIAG_MODE) {
2255 			ecpp_nack_ioctl(q, mp, EINVAL);
2256 			break;
2257 		}
2258 
2259 		switch (pp->port) {
2260 		case ECPP_PORT_PIO:
2261 			DATAR_WRITE(pp, *data);
2262 			ecpp_ack_ioctl(q, mp);
2263 			break;
2264 
2265 		case ECPP_PORT_TDMA:
2266 			TFIFO_WRITE(pp, *data);
2267 			ecpp_ack_ioctl(q, mp);
2268 			break;
2269 
2270 		default:
2271 			ecpp_nack_ioctl(q, mp, EINVAL);
2272 		}
2273 
2274 		break;
2275 	}
2276 
2277 	case ECPPIOC_GETDEVID: {
2278 		struct copyresp		*csp;
2279 		struct ecpp_copystate	*stp;
2280 		struct ecpp_device_id	*dp;
2281 		struct ecpp_device_id	id;
2282 
2283 		csp = (struct copyresp *)mp->b_rptr;
2284 		stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2285 		dp = (struct ecpp_device_id *)mp->b_cont->b_rptr;
2286 
2287 #ifdef _MULTI_DATAMODEL
2288 		if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
2289 			struct ecpp_device_id32 *dp32;
2290 
2291 			dp32 = (struct ecpp_device_id32 *)dp;
2292 			id.mode = dp32->mode;
2293 			id.len = dp32->len;
2294 			id.addr = (char *)(uintptr_t)dp32->addr;
2295 		} else {
2296 #endif /* _MULTI_DATAMODEL */
2297 			id = *dp;
2298 #ifdef _MULTI_DATAMODEL
2299 		}
2300 #endif /* _MULTI_DATAMODEL */
2301 
2302 		ecpp_srvioc_devid(q, mp, &id, &stp->un.devid.rlen);
2303 		break;
2304 	}
2305 
2306 	case PRNIOC_GET_1284_DEVID: {
2307 		struct copyresp			*csp;
2308 		struct ecpp_copystate		*stp;
2309 		struct prn_1284_device_id	*dp;
2310 		struct ecpp_device_id		id;
2311 
2312 		csp = (struct copyresp *)mp->b_rptr;
2313 		stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2314 		dp = (struct prn_1284_device_id *)mp->b_cont->b_rptr;
2315 
2316 		/* imitate struct ecpp_device_id */
2317 		id.mode = ECPP_NIBBLE_MODE;
2318 
2319 #ifdef _MULTI_DATAMODEL
2320 		if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
2321 			struct prn_1284_device_id32 *dp32;
2322 
2323 			dp32 = (struct prn_1284_device_id32 *)dp;
2324 			id.len = dp32->id_len;
2325 			id.addr = (char *)(uintptr_t)dp32->id_data;
2326 		} else {
2327 #endif /* _MULTI_DATAMODEL */
2328 			id.len = dp->id_len;
2329 			id.addr = (char *)dp->id_data;
2330 #ifdef _MULTI_DATAMODEL
2331 		}
2332 #endif /* _MULTI_DATAMODEL */
2333 
2334 		ecpp_srvioc_devid(q, mp, &id,
2335 		    (int *)&stp->un.prn_devid.id_rlen);
2336 		break;
2337 	}
2338 
2339 	case PRNIOC_SET_IFCAP: {
2340 		uint_t	ifcap, new_ifcap;
2341 
2342 		ifcap = ecpp_get_prn_ifcap(pp);
2343 		new_ifcap = *(uint_t *)mp->b_cont->b_rptr;
2344 
2345 		if (ifcap == new_ifcap) {
2346 			ecpp_ack_ioctl(q, mp);
2347 			break;
2348 		}
2349 
2350 		/* only changing PRN_BIDI is supported */
2351 		if ((ifcap ^ new_ifcap) & ~PRN_BIDI) {
2352 			ecpp_nack_ioctl(q, mp, EINVAL);
2353 			break;
2354 		}
2355 
2356 		if (new_ifcap & PRN_BIDI) {	/* go bidirectional */
2357 			ecpp_default_negotiation(pp);
2358 		} else {			/* go unidirectional */
2359 			(void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
2360 		}
2361 
2362 		ecpp_ack_ioctl(q, mp);
2363 		break;
2364 	}
2365 
2366 	case PRNIOC_SET_TIMEOUTS: {
2367 		struct prn_timeouts	*prn_timeouts;
2368 
2369 		prn_timeouts = (struct prn_timeouts *)mp->b_cont->b_rptr;
2370 
2371 		if (prn_timeouts->tmo_forward > ECPP_MAX_TIMEOUT) {
2372 			ecpp_nack_ioctl(q, mp, EINVAL);
2373 			break;
2374 		}
2375 
2376 		pp->prn_timeouts = *prn_timeouts;
2377 		pp->xfer_parms.write_timeout = (int)prn_timeouts->tmo_forward;
2378 
2379 		ecpp_ack_ioctl(q, mp);
2380 		break;
2381 	}
2382 
2383 	case PRNIOC_GET_IFINFO:
2384 		ecpp_srvioc_prnif(q, mp);
2385 		break;
2386 
2387 	default:		/* unexpected ioctl type */
2388 		ecpp_nack_ioctl(q, mp, EINVAL);
2389 		break;
2390 	}
2391 }
2392 
2393 static void
ecpp_srvioc_devid(queue_t * q,mblk_t * mp,struct ecpp_device_id * id,int * rlen)2394 ecpp_srvioc_devid(queue_t *q, mblk_t *mp, struct ecpp_device_id *id, int *rlen)
2395 {
2396 	struct ecppunit		*pp;
2397 	struct copyresp		*csp;
2398 	struct ecpp_copystate	*stp;
2399 	int			error;
2400 	int			len;
2401 	int			mode;
2402 	mblk_t			*datamp;
2403 
2404 	pp = (struct ecppunit *)q->q_ptr;
2405 	csp = (struct copyresp *)mp->b_rptr;
2406 	stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2407 	mode = id->mode;
2408 
2409 	/* check arguments */
2410 	if ((mode < ECPP_CENTRONICS) || (mode > ECPP_ECP_MODE)) {
2411 		ecpp_error(pp->dip, "ecpp_srvioc_devid: mode=%x, len=%x\n",
2412 		    mode, id->len);
2413 		ecpp_nack_ioctl(q, mp, EINVAL);
2414 		return;
2415 	}
2416 
2417 	/* Currently only Nibble mode is supported */
2418 	if (mode != ECPP_NIBBLE_MODE) {
2419 		ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
2420 		return;
2421 	}
2422 
2423 	if ((id->addr == NULL) && (id->len != 0)) {
2424 		ecpp_nack_ioctl(q, mp, EFAULT);
2425 		return;
2426 	}
2427 
2428 	/* read device ID length */
2429 	if (error = ecpp_getdevid(pp, NULL, &len, mode)) {
2430 		ecpp_nack_ioctl(q, mp, error);
2431 		goto breakout;
2432 	}
2433 
2434 	/* don't take into account two length bytes */
2435 	len -= 2;
2436 	*rlen = len;
2437 
2438 	/* limit transfer to user buffer length */
2439 	if (id->len < len) {
2440 		len = id->len;
2441 	}
2442 
2443 	if (len == 0) {
2444 		/* just return rlen */
2445 		stp->state = ECPP_ADDROUT;
2446 		ecpp_wput_iocdata_devid(q, mp,
2447 		    (uintptr_t)rlen - (uintptr_t)&stp->un);
2448 		goto breakout;
2449 	}
2450 
2451 	if ((datamp = allocb(len, BPRI_MED)) == NULL) {
2452 		ecpp_nack_ioctl(q, mp, ENOSR);
2453 		goto breakout;
2454 	}
2455 
2456 	/* read ID string */
2457 	error = ecpp_getdevid(pp, datamp->b_rptr, &len, mode);
2458 	if (error) {
2459 		freemsg(datamp);
2460 		ecpp_nack_ioctl(q, mp, error);
2461 		goto breakout;
2462 	} else {
2463 		datamp->b_wptr += len;
2464 
2465 		stp->state = ECPP_ADDROUT;
2466 		mcopyout(mp, csp->cp_private, len, id->addr, datamp);
2467 		qreply(q, mp);
2468 	}
2469 
2470 	return;
2471 
2472 breakout:
2473 	(void) ecpp_1284_termination(pp);
2474 }
2475 
2476 /*
2477  * PRNIOC_GET_IFINFO: return prnio interface info string
2478  */
2479 static void
ecpp_srvioc_prnif(queue_t * q,mblk_t * mp)2480 ecpp_srvioc_prnif(queue_t *q, mblk_t *mp)
2481 {
2482 	struct copyresp			*csp;
2483 	struct ecpp_copystate		*stp;
2484 	uint_t				len;
2485 	struct prn_interface_info	*ip;
2486 	struct prn_interface_info	info;
2487 	mblk_t				*datamp;
2488 #ifdef _MULTI_DATAMODEL
2489 	struct iocblk		*iocbp = (struct iocblk *)mp->b_rptr;
2490 #endif
2491 
2492 	csp = (struct copyresp *)mp->b_rptr;
2493 	stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2494 	ip = (struct prn_interface_info *)mp->b_cont->b_rptr;
2495 
2496 #ifdef _MULTI_DATAMODEL
2497 	if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
2498 		struct prn_interface_info32 *ip32;
2499 
2500 		ip32 = (struct prn_interface_info32 *)ip;
2501 		info.if_len = ip32->if_len;
2502 		info.if_data = (char *)(uintptr_t)ip32->if_data;
2503 	} else {
2504 #endif /* _MULTI_DATAMODEL */
2505 		info = *ip;
2506 #ifdef _MULTI_DATAMODEL
2507 	}
2508 #endif /* _MULTI_DATAMODEL */
2509 
2510 	len = strlen(prn_ifinfo);
2511 	stp->un.prn_if.if_rlen = len;
2512 	stp->state = ECPP_ADDROUT;
2513 
2514 	/* check arguments */
2515 	if ((info.if_data == NULL) && (info.if_len != 0)) {
2516 		ecpp_nack_ioctl(q, mp, EFAULT);
2517 		return;
2518 	}
2519 
2520 	if (info.if_len == 0) {
2521 		/* just copyout rlen */
2522 		ecpp_wput_iocdata_devid(q, mp,
2523 		    offsetof(struct prn_interface_info, if_rlen));
2524 		return;
2525 	}
2526 
2527 	/* if needed, trim to the buffer size */
2528 	if (len > info.if_len) {
2529 		len = info.if_len;
2530 	}
2531 
2532 	if ((datamp = allocb(len, BPRI_MED)) == NULL) {
2533 		ecpp_nack_ioctl(q, mp, ENOSR);
2534 		return;
2535 	}
2536 
2537 	bcopy(&prn_ifinfo[0], datamp->b_wptr, len);
2538 	datamp->b_wptr += len;
2539 
2540 	mcopyout(mp, csp->cp_private, len, info.if_data, datamp);
2541 	qreply(q, mp);
2542 }
2543 
2544 static void
ecpp_flush(struct ecppunit * pp,int cmd)2545 ecpp_flush(struct ecppunit *pp, int cmd)
2546 {
2547 	queue_t		*q;
2548 	uint8_t		ecr, dcr;
2549 	timeout_id_t	timeout_id, fifo_timer_id, wsrv_timer_id;
2550 
2551 	ASSERT(mutex_owned(&pp->umutex));
2552 
2553 	if (!(cmd & FWRITE)) {
2554 		return;
2555 	}
2556 
2557 	q = pp->writeq;
2558 	timeout_id = fifo_timer_id = wsrv_timer_id = 0;
2559 
2560 	ecpp_error(pp->dip, "ecpp_flush e_busy=%x\n", pp->e_busy);
2561 
2562 	/* if there is an ongoing DMA, it needs to be turned off. */
2563 	switch (pp->e_busy) {
2564 	case ECPP_BUSY:
2565 		/*
2566 		 * Change the port status to ECPP_FLUSH to
2567 		 * indicate to ecpp_wsrv that the wq is being flushed.
2568 		 */
2569 		pp->e_busy = ECPP_FLUSH;
2570 
2571 		/*
2572 		 * dma_cancelled indicates to ecpp_isr() that we have
2573 		 * turned off the DMA.  Since the mutex is held, ecpp_isr()
2574 		 * may be blocked.  Once ecpp_flush() finishes and ecpp_isr()
2575 		 * gains the mutex, ecpp_isr() will have a _reset_ DMAC.  Most
2576 		 * significantly, the DMAC will be reset after ecpp_isr() was
2577 		 * invoked.  Therefore we need to have a flag "dma_cancelled"
2578 		 * to signify when the described condition has occured.  If
2579 		 * ecpp_isr() notes a dma_cancelled, it will ignore the DMAC csr
2580 		 * and simply claim the interupt.
2581 		 */
2582 
2583 		pp->dma_cancelled = TRUE;
2584 
2585 		/* either DMA or PIO transfer */
2586 		if (COMPAT_DMA(pp) ||
2587 		    (pp->current_mode == ECPP_ECP_MODE) ||
2588 		    (pp->current_mode == ECPP_DIAG_MODE)) {
2589 			/*
2590 			 * if the bcr is zero, then DMA is complete and
2591 			 * we are waiting for the fifo to drain.  Therefore,
2592 			 * turn off dma.
2593 			 */
2594 			if (ECPP_DMA_STOP(pp, NULL) == FAILURE) {
2595 				ecpp_error(pp->dip,
2596 				    "ecpp_flush: dma_stop failed.\n");
2597 			}
2598 
2599 			/*
2600 			 * If the status of the port is ECPP_BUSY,
2601 			 * the DMA is stopped by either explicitly above, or by
2602 			 * ecpp_isr() but the FIFO hasn't drained yet. In either
2603 			 * case, we need to unbind the dma mappings.
2604 			 */
2605 			if (ddi_dma_unbind_handle(
2606 			    pp->dma_handle) != DDI_SUCCESS)
2607 				ecpp_error(pp->dip,
2608 				    "ecpp_flush: unbind failed.\n");
2609 
2610 			if (pp->msg != NULL) {
2611 				freemsg(pp->msg);
2612 				pp->msg = NULL;
2613 			}
2614 		} else {
2615 			/*
2616 			 * PIO transfer: disable nAck interrups
2617 			 */
2618 			dcr = DCR_READ(pp);
2619 			dcr &= ~(ECPP_REV_DIR | ECPP_INTR_EN);
2620 			DCR_WRITE(pp, dcr);
2621 			ECPP_MASK_INTR(pp);
2622 		}
2623 
2624 		/*
2625 		 * The transfer is cleaned up.  There may or may not be data
2626 		 * in the fifo.  We don't care at this point.  Ie. SuperIO may
2627 		 * transfer the remaining bytes in the fifo or not. it doesn't
2628 		 * matter.  All that is important at this stage is that no more
2629 		 * fifo timers are started.
2630 		 */
2631 
2632 		timeout_id = pp->timeout_id;
2633 		fifo_timer_id = pp->fifo_timer_id;
2634 		pp->timeout_id = pp->fifo_timer_id = 0;
2635 		pp->softintr_pending = 0;
2636 
2637 		break;
2638 
2639 	case ECPP_ERR:
2640 		/*
2641 		 * Change the port status to ECPP_FLUSH to
2642 		 * indicate to ecpp_wsrv that the wq is being flushed.
2643 		 */
2644 		pp->e_busy = ECPP_FLUSH;
2645 
2646 		/*
2647 		 *  Most likely there are mblks in the queue,
2648 		 *  but the driver can not transmit because
2649 		 *  of the bad port status.  In this case,
2650 		 *  ecpp_flush() should make sure ecpp_wsrv_timer()
2651 		 *  is turned off.
2652 		 */
2653 		wsrv_timer_id = pp->wsrv_timer_id;
2654 		pp->wsrv_timer_id = 0;
2655 
2656 		break;
2657 
2658 	case ECPP_IDLE:
2659 		/* No work to do. Ready to flush */
2660 		break;
2661 
2662 	default:
2663 		ecpp_error(pp->dip,
2664 		    "ecpp_flush: illegal state %x\n", pp->e_busy);
2665 	}
2666 
2667 	/* in DIAG mode clear TFIFO if needed */
2668 	if (pp->current_mode == ECPP_DIAG_MODE) {
2669 		ecr = ECR_READ(pp);
2670 		if (!(ecr & ECPP_FIFO_EMPTY)) {
2671 			ECR_WRITE(pp,
2672 			    ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
2673 			ECR_WRITE(pp, ecr);
2674 		}
2675 	}
2676 
2677 	/* Discard all messages on the output queue. */
2678 	flushq(q, FLUSHDATA);
2679 
2680 	/* The port is no longer flushing or dma'ing for that matter. */
2681 	pp->e_busy = ECPP_IDLE;
2682 
2683 	/* Set the right phase */
2684 	if (pp->current_mode == ECPP_ECP_MODE) {
2685 		if (pp->current_phase == ECPP_PHASE_ECP_REV_XFER) {
2686 			pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
2687 		} else {
2688 			pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
2689 		}
2690 	}
2691 
2692 	/* cancel timeouts if any */
2693 	mutex_exit(&pp->umutex);
2694 
2695 	if (timeout_id) {
2696 		(void) untimeout(timeout_id);
2697 	}
2698 	if (fifo_timer_id) {
2699 		(void) untimeout(fifo_timer_id);
2700 	}
2701 	if (wsrv_timer_id) {
2702 		(void) untimeout(wsrv_timer_id);
2703 	}
2704 
2705 	mutex_enter(&pp->umutex);
2706 
2707 	cv_signal(&pp->pport_cv);	/* wake up ecpp_close() */
2708 }
2709 
2710 static void
ecpp_start(struct ecppunit * pp,caddr_t addr,size_t len)2711 ecpp_start(struct ecppunit *pp, caddr_t addr, size_t len)
2712 {
2713 	ASSERT(mutex_owned(&pp->umutex));
2714 	ASSERT(pp->e_busy == ECPP_BUSY);
2715 
2716 	ecpp_error(pp->dip,
2717 	    "ecpp_start:current_mode=%x,current_phase=%x,ecr=%x,len=%d\n",
2718 	    pp->current_mode, pp->current_phase, ECR_READ(pp), len);
2719 
2720 	pp->dma_dir = DDI_DMA_WRITE;	/* this is a forward transfer */
2721 
2722 	switch (pp->current_mode) {
2723 	case ECPP_NIBBLE_MODE:
2724 		(void) ecpp_1284_termination(pp);
2725 
2726 		/* After termination we are either Compatible or Centronics */
2727 
2728 		/* FALLTHRU */
2729 
2730 	case ECPP_CENTRONICS:
2731 	case ECPP_COMPAT_MODE:
2732 		if (pp->io_mode == ECPP_DMA) {
2733 			if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2734 				return;
2735 			}
2736 		} else {
2737 			/* PIO mode */
2738 			if (ecpp_prep_pio_xfer(pp, addr, len) == FAILURE) {
2739 				return;
2740 			}
2741 			(void) ecpp_pio_writeb(pp);
2742 		}
2743 		break;
2744 
2745 	case ECPP_DIAG_MODE: {
2746 		int	oldlen;
2747 
2748 		/* put superio into TFIFO mode, if not already */
2749 		ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
2750 		/*
2751 		 * DMA would block if the TFIFO is not empty
2752 		 * if by this moment nobody read these bytes, they`re gone
2753 		 */
2754 		drv_usecwait(1);
2755 		if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) {
2756 			ecpp_error(pp->dip,
2757 			    "ecpp_start: TFIFO not empty, clearing\n");
2758 			ECR_WRITE(pp,
2759 			    ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
2760 			ECR_WRITE(pp,
2761 			    ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
2762 		}
2763 
2764 		/* we can DMA at most 16 bytes into TFIFO */
2765 		oldlen = len;
2766 		if (len > ECPP_FIFO_SZ) {
2767 			len = ECPP_FIFO_SZ;
2768 		}
2769 
2770 		if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2771 			return;
2772 		}
2773 
2774 		/* put the rest of data back on the queue */
2775 		if (oldlen > len) {
2776 			ecpp_putback_untransfered(pp, addr + len, oldlen - len);
2777 		}
2778 
2779 		break;
2780 	}
2781 
2782 	case ECPP_ECP_MODE:
2783 		ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE ||
2784 		    pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
2785 
2786 		/* if in Reverse Phase negotiate to Forward */
2787 		if (pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) {
2788 			if (ecp_reverse2forward(pp) == FAILURE) {
2789 				if (pp->msg) {
2790 					(void) putbq(pp->writeq, pp->msg);
2791 				} else {
2792 					ecpp_putback_untransfered(pp,
2793 					    addr, len);
2794 				}
2795 			}
2796 		}
2797 
2798 		if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2799 			return;
2800 		}
2801 
2802 		break;
2803 	}
2804 
2805 	/* schedule transfer timeout */
2806 	pp->timeout_id = timeout(ecpp_xfer_timeout, (caddr_t)pp,
2807 	    pp->xfer_parms.write_timeout * drv_usectohz(1000000));
2808 }
2809 
2810 /*
2811  * Transfer a PIO "block" a byte at a time.
2812  * The block is starts at addr and ends at pp->last_byte
2813  */
2814 static uint8_t
ecpp_prep_pio_xfer(struct ecppunit * pp,caddr_t addr,size_t len)2815 ecpp_prep_pio_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
2816 {
2817 	pp->next_byte = addr;
2818 	pp->last_byte = (caddr_t)((ulong_t)addr + len);
2819 
2820 	if (ecpp_check_status(pp) == FAILURE) {
2821 		/*
2822 		 * if status signals are bad, do not start PIO,
2823 		 * put everything back on the queue.
2824 		 */
2825 		ecpp_error(pp->dip,
2826 		    "ecpp_prep_pio_xfer:suspend PIO len=%d\n", len);
2827 
2828 		if (pp->msg != NULL) {
2829 			/*
2830 			 * this circumstance we want to copy the
2831 			 * untransfered section of msg to a new mblk,
2832 			 * then free the orignal one.
2833 			 */
2834 			ecpp_putback_untransfered(pp,
2835 			    (void *)pp->msg->b_rptr, len);
2836 			ecpp_error(pp->dip,
2837 			    "ecpp_prep_pio_xfer: len1=%d\n", len);
2838 
2839 			freemsg(pp->msg);
2840 			pp->msg = NULL;
2841 		} else {
2842 			ecpp_putback_untransfered(pp, pp->ioblock, len);
2843 			ecpp_error(pp->dip,
2844 			    "ecpp_prep_pio_xfer: len2=%d\n", len);
2845 		}
2846 		qenable(pp->writeq);
2847 
2848 		return (FAILURE);
2849 	}
2850 
2851 	pp->dma_cancelled = FALSE;
2852 
2853 	/* pport must be in PIO mode */
2854 	if (ecr_write(pp, ECR_mode_001 |
2855 	    ECPP_INTR_MASK | ECPP_INTR_SRV) != SUCCESS) {
2856 		ecpp_error(pp->dip, "ecpp_prep_pio_xfer: failed w/ECR.\n");
2857 	}
2858 
2859 	ecpp_error(pp->dip, "ecpp_prep_pio_xfer: dcr=%x ecr=%x\n",
2860 	    DCR_READ(pp), ECR_READ(pp));
2861 
2862 	return (SUCCESS);
2863 }
2864 
2865 static uint8_t
ecpp_init_dma_xfer(struct ecppunit * pp,caddr_t addr,size_t len)2866 ecpp_init_dma_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
2867 {
2868 	uint8_t ecr_mode[] = {
2869 		0,
2870 		ECR_mode_010,	/* Centronix */
2871 		ECR_mode_010,	/* Compat */
2872 		0,		/* Byte */
2873 		0,		/* Nibble */
2874 		ECR_mode_011,	/* ECP */
2875 		0,		/* Failure */
2876 		ECR_mode_110,	/* Diag */
2877 	};
2878 	uint8_t	ecr;
2879 
2880 	ASSERT((pp->current_mode <= ECPP_DIAG_MODE) &&
2881 	    (ecr_mode[pp->current_mode] != 0));
2882 
2883 	if (ecpp_setup_dma_resources(pp, addr, len) == FAILURE) {
2884 		qenable(pp->writeq);
2885 		return (FAILURE);
2886 	}
2887 
2888 	if (ecpp_check_status(pp) == FAILURE) {
2889 		/*
2890 		 * if status signals are bad, do not start DMA, but
2891 		 * rather put everything back on the queue.
2892 		 */
2893 		ecpp_error(pp->dip,
2894 		    "ecpp_init_dma_xfer: suspending DMA len=%d\n",
2895 		    pp->dma_cookie.dmac_size);
2896 
2897 		if (pp->msg != NULL) {
2898 			/*
2899 			 * this circumstance we want to copy the
2900 			 * untransfered section of msg to a new mblk,
2901 			 * then free the orignal one.
2902 			 */
2903 			ecpp_putback_untransfered(pp,
2904 			    (void *)pp->msg->b_rptr, len);
2905 			ecpp_error(pp->dip,
2906 			    "ecpp_init_dma_xfer:a:len=%d\n", len);
2907 
2908 			freemsg(pp->msg);
2909 			pp->msg = NULL;
2910 		} else {
2911 			ecpp_putback_untransfered(pp, pp->ioblock, len);
2912 			ecpp_error(pp->dip,
2913 			    "ecpp_init_dma_xfer:b:len=%d\n", len);
2914 		}
2915 
2916 		if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
2917 			ecpp_error(pp->dip,
2918 			    "ecpp_init_dma_xfer: unbind FAILURE.\n");
2919 		}
2920 		qenable(pp->writeq);
2921 		return (FAILURE);
2922 	}
2923 
2924 	pp->xfercnt = pp->resid = len;
2925 	pp->dma_cancelled = FALSE;
2926 	pp->tfifo_intr = 0;
2927 
2928 	/* set the right ECR mode and disable DMA */
2929 	ecr = ecr_mode[pp->current_mode];
2930 	(void) ecr_write(pp, ecr | ECPP_INTR_SRV | ECPP_INTR_MASK);
2931 
2932 	/* prepare DMAC for a transfer */
2933 	if (ECPP_DMA_START(pp) == FAILURE) {
2934 		ecpp_error(pp->dip, "ecpp_init_dma_xfer: dma_start FAILED.\n");
2935 		return (FAILURE);
2936 	}
2937 
2938 	/* GO! */
2939 	(void) ecr_write(pp, ecr | ECPP_DMA_ENABLE | ECPP_INTR_MASK);
2940 
2941 	return (SUCCESS);
2942 }
2943 
2944 static uint8_t
ecpp_setup_dma_resources(struct ecppunit * pp,caddr_t addr,size_t len)2945 ecpp_setup_dma_resources(struct ecppunit *pp, caddr_t addr, size_t len)
2946 {
2947 	int	err;
2948 	off_t	woff;
2949 	size_t	wlen;
2950 
2951 	ASSERT(pp->dma_dir == DDI_DMA_READ || pp->dma_dir == DDI_DMA_WRITE);
2952 
2953 	err = ddi_dma_addr_bind_handle(pp->dma_handle, NULL,
2954 	    addr, len, pp->dma_dir | DDI_DMA_PARTIAL,
2955 	    DDI_DMA_DONTWAIT, NULL,
2956 	    &pp->dma_cookie, &pp->dma_cookie_count);
2957 
2958 	switch (err) {
2959 	case DDI_DMA_MAPPED:
2960 		ecpp_error(pp->dip, "ecpp_setup_dma: DMA_MAPPED\n");
2961 
2962 		pp->dma_nwin = 1;
2963 		pp->dma_curwin = 1;
2964 		break;
2965 
2966 	case DDI_DMA_PARTIAL_MAP: {
2967 		ecpp_error(pp->dip, "ecpp_setup_dma: DMA_PARTIAL_MAP\n");
2968 
2969 		if (ddi_dma_numwin(pp->dma_handle,
2970 		    &pp->dma_nwin) != DDI_SUCCESS) {
2971 			(void) ddi_dma_unbind_handle(pp->dma_handle);
2972 			return (FAILURE);
2973 		}
2974 		pp->dma_curwin = 1;
2975 
2976 		/*
2977 		 * The very first window is returned by bind_handle,
2978 		 * but we must do this explicitly here, otherwise
2979 		 * next getwin would return wrong cookie dmac_size
2980 		 */
2981 		if (ddi_dma_getwin(pp->dma_handle, 0, &woff, &wlen,
2982 		    &pp->dma_cookie, &pp->dma_cookie_count) != DDI_SUCCESS) {
2983 			ecpp_error(pp->dip,
2984 			    "ecpp_setup_dma: ddi_dma_getwin failed!");
2985 			(void) ddi_dma_unbind_handle(pp->dma_handle);
2986 			return (FAILURE);
2987 		}
2988 
2989 		ecpp_error(pp->dip,
2990 		    "ecpp_setup_dma: cookies=%d, windows=%d"
2991 		    " addr=%lx len=%d\n",
2992 		    pp->dma_cookie_count, pp->dma_nwin,
2993 		    pp->dma_cookie.dmac_address, pp->dma_cookie.dmac_size);
2994 
2995 		break;
2996 	}
2997 
2998 	default:
2999 		ecpp_error(pp->dip, "ecpp_setup_dma: err=%x\n", err);
3000 		return (FAILURE);
3001 	}
3002 
3003 	return (SUCCESS);
3004 }
3005 
3006 static void
ecpp_ack_ioctl(queue_t * q,mblk_t * mp)3007 ecpp_ack_ioctl(queue_t *q, mblk_t *mp)
3008 {
3009 	struct iocblk  *iocbp;
3010 
3011 	mp->b_datap->db_type = M_IOCACK;
3012 	mp->b_wptr = mp->b_rptr + sizeof (struct iocblk);
3013 
3014 	if (mp->b_cont) {
3015 		freemsg(mp->b_cont);
3016 		mp->b_cont = NULL;
3017 	}
3018 
3019 	iocbp = (struct iocblk *)mp->b_rptr;
3020 	iocbp->ioc_error = 0;
3021 	iocbp->ioc_count = 0;
3022 	iocbp->ioc_rval = 0;
3023 
3024 	qreply(q, mp);
3025 }
3026 
3027 static void
ecpp_nack_ioctl(queue_t * q,mblk_t * mp,int err)3028 ecpp_nack_ioctl(queue_t *q, mblk_t *mp, int err)
3029 {
3030 	struct iocblk  *iocbp;
3031 
3032 	mp->b_datap->db_type = M_IOCNAK;
3033 	mp->b_wptr = mp->b_rptr + sizeof (struct iocblk);
3034 	iocbp = (struct iocblk *)mp->b_rptr;
3035 	iocbp->ioc_error = err;
3036 
3037 	if (mp->b_cont) {
3038 		freemsg(mp->b_cont);
3039 		mp->b_cont = NULL;
3040 	}
3041 
3042 	qreply(q, mp);
3043 }
3044 
3045 uint_t
ecpp_isr(caddr_t arg)3046 ecpp_isr(caddr_t arg)
3047 {
3048 	struct ecppunit *pp = (struct ecppunit *)(void *)arg;
3049 	uint32_t	dcsr;
3050 	uint8_t		dsr;
3051 	int		cheerio_pend_counter;
3052 	int		retval = DDI_INTR_UNCLAIMED;
3053 	hrtime_t	now;
3054 
3055 	dsr = 0;
3056 	dcsr = 0;
3057 	mutex_enter(&pp->umutex);
3058 	/*
3059 	 * interrupt may occur while other thread is holding the lock
3060 	 * and cancels DMA transfer (e.g. ecpp_flush())
3061 	 * since it cannot cancel the interrupt thread,
3062 	 * it just sets dma_cancelled to TRUE,
3063 	 * telling interrupt handler to exit immediately
3064 	 */
3065 	if (pp->dma_cancelled == TRUE) {
3066 		ecpp_error(pp->dip, "dma-cancel isr\n");
3067 
3068 		pp->intr_hard++;
3069 		pp->dma_cancelled = FALSE;
3070 
3071 		mutex_exit(&pp->umutex);
3072 		return (DDI_INTR_CLAIMED);
3073 	}
3074 
3075 	/* Southbridge interrupts are handled separately */
3076 #if defined(__x86)
3077 	if (pp->hw == &x86)
3078 #else
3079 	if (pp->hw == &m1553)
3080 #endif
3081 	{
3082 		retval = ecpp_M1553_intr(pp);
3083 		if (retval == DDI_INTR_UNCLAIMED) {
3084 			goto unexpected;
3085 		}
3086 		mutex_exit(&pp->umutex);
3087 		return (DDI_INTR_CLAIMED);
3088 	}
3089 
3090 	/*
3091 	 * the intr is through the motherboard. it is faster than PCI route.
3092 	 * sometimes ecpp_isr() is invoked before cheerio csr is updated.
3093 	 */
3094 	cheerio_pend_counter = ecpp_isr_max_delay;
3095 	dcsr = GET_DMAC_CSR(pp);
3096 
3097 	while (!(dcsr & DCSR_INT_PEND) && cheerio_pend_counter-- > 0) {
3098 		drv_usecwait(1);
3099 		dcsr = GET_DMAC_CSR(pp);
3100 	}
3101 
3102 	/*
3103 	 * This is a workaround for what seems to be a timing problem
3104 	 * with the delivery of interrupts and CSR updating with the
3105 	 * ebus2 csr, superio and the n_ERR pin from the peripheral.
3106 	 *
3107 	 * delay is not needed for PIO mode
3108 	 */
3109 	if (!COMPAT_PIO(pp)) {
3110 		drv_usecwait(100);
3111 		dcsr = GET_DMAC_CSR(pp);
3112 	}
3113 
3114 	/* on 97317 in Extended mode IRQ_ST of DSR is deasserted when read */
3115 	dsr = DSR_READ(pp);
3116 
3117 	/*
3118 	 * check if interrupt is for this device:
3119 	 * it should be reflected either in cheerio DCSR register
3120 	 * or in IRQ_ST bit of DSR on 97317
3121 	 */
3122 	if ((dcsr & DCSR_INT_PEND) == 0) {
3123 		if (pp->hw != &pc97317) {
3124 			goto unclaimed;
3125 		}
3126 		/*
3127 		 * on Excalibur, reading DSR will deassert SuperIO IRQx line
3128 		 * RIO's DCSR_INT_PEND seems to follow IRQx transitions,
3129 		 * so if DSR is read after interrupt occured, but before
3130 		 * we get here, IRQx and hence INT_PEND will be deasserted
3131 		 * as a result, we can miss a service interrupt in PIO mode
3132 		 *
3133 		 * malicious DSR reader is BPPIOC_TESTIO, which is called
3134 		 * by LP in between data blocks to check printer status
3135 		 * this workaround lets us not to miss an interrupt
3136 		 *
3137 		 * also, nErr interrupt (ECP mode) not always reflected in DCSR
3138 		 */
3139 		if (((dsr & ECPP_IRQ_ST) == 0) ||
3140 		    ((COMPAT_PIO(pp)) && (pp->e_busy == ECPP_BUSY)) ||
3141 		    (((dsr & ECPP_nERR) == 0) &&
3142 		    (pp->current_mode == ECPP_ECP_MODE))) {
3143 			dcsr = 0;
3144 		} else {
3145 			goto unclaimed;
3146 		}
3147 	}
3148 
3149 	pp->intr_hard++;
3150 
3151 	/* the intr is for us - check all possible interrupt sources */
3152 	if (dcsr & DCSR_ERR_PEND) {
3153 		size_t	bcr;
3154 
3155 		/* we are expecting a data transfer interrupt */
3156 		ASSERT(pp->e_busy == ECPP_BUSY);
3157 
3158 		/*
3159 		 * some kind of DMA error
3160 		 */
3161 		if (ECPP_DMA_STOP(pp, &bcr) == FAILURE) {
3162 			ecpp_error(pp->dip, "ecpp_isr: dma_stop failed\n");
3163 		}
3164 
3165 		ecpp_error(pp->dip, "ecpp_isr: DMAC ERROR bcr=%d\n", bcr);
3166 
3167 		ecpp_xfer_cleanup(pp);
3168 
3169 		if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
3170 			ecpp_error(pp->dip, "ecpp_isr(e): unbind failed\n");
3171 		}
3172 
3173 		mutex_exit(&pp->umutex);
3174 		return (DDI_INTR_CLAIMED);
3175 	}
3176 
3177 	if (dcsr & DCSR_TC) {
3178 		retval = ecpp_dma_ihdlr(pp);
3179 		mutex_exit(&pp->umutex);
3180 		return (DDI_INTR_CLAIMED);
3181 	}
3182 
3183 	if (COMPAT_PIO(pp)) {
3184 		retval = ecpp_pio_ihdlr(pp);
3185 		mutex_exit(&pp->umutex);
3186 		return (DDI_INTR_CLAIMED);
3187 	}
3188 
3189 	/* does peripheral need attention? */
3190 	if ((dsr & ECPP_nERR) == 0) {
3191 		retval = ecpp_nErr_ihdlr(pp);
3192 		mutex_exit(&pp->umutex);
3193 		return (DDI_INTR_CLAIMED);
3194 	}
3195 
3196 	pp->intr_hard--;
3197 
3198 unexpected:
3199 
3200 	pp->intr_spurious++;
3201 
3202 	/*
3203 	 * The following procedure tries to prevent soft hangs
3204 	 * in event of peripheral/superio misbehaviour:
3205 	 * if number of unexpected interrupts in the last SPUR_PERIOD ns
3206 	 * exceeded SPUR_CRITICAL, then shut up interrupts
3207 	 */
3208 	now = gethrtime();
3209 	if (pp->lastspur == 0 || now - pp->lastspur > SPUR_PERIOD) {
3210 		/* last unexpected interrupt was long ago */
3211 		pp->lastspur = now;
3212 		pp->nspur = 1;
3213 	} else {
3214 		/* last unexpected interrupt was recently */
3215 		pp->nspur++;
3216 	}
3217 
3218 	if (pp->nspur >= SPUR_CRITICAL) {
3219 		ECPP_MASK_INTR(pp);
3220 		ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK | ECPP_INTR_SRV);
3221 		pp->nspur = 0;
3222 		cmn_err(CE_NOTE, "%s%d: too many interrupt requests",
3223 		    ddi_get_name(pp->dip), ddi_get_instance(pp->dip));
3224 	} else {
3225 		ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_SRV | ECPP_INTR_MASK);
3226 	}
3227 
3228 	ecpp_error(pp->dip,
3229 	    "isr:unknown: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n",
3230 	    dcsr, ECR_READ(pp), dsr, DCR_READ(pp),
3231 	    pp->current_mode, pp->current_phase);
3232 
3233 	mutex_exit(&pp->umutex);
3234 	return (DDI_INTR_CLAIMED);
3235 
3236 unclaimed:
3237 
3238 	pp->intr_spurious++;
3239 
3240 	ecpp_error(pp->dip,
3241 	    "isr:UNCL: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n",
3242 	    dcsr, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp),
3243 	    pp->current_mode, pp->current_phase);
3244 
3245 	mutex_exit(&pp->umutex);
3246 	return (DDI_INTR_UNCLAIMED);
3247 }
3248 
3249 /*
3250  * M1553 intr handler
3251  */
3252 static uint_t
ecpp_M1553_intr(struct ecppunit * pp)3253 ecpp_M1553_intr(struct ecppunit *pp)
3254 {
3255 	int retval = DDI_INTR_UNCLAIMED;
3256 
3257 	pp->intr_hard++;
3258 
3259 	if (pp->e_busy == ECPP_BUSY) {
3260 		/* Centronics or Compat PIO transfer */
3261 		if (COMPAT_PIO(pp)) {
3262 			return (ecpp_pio_ihdlr(pp));
3263 		}
3264 
3265 		/* Centronics or Compat DMA transfer */
3266 		if (COMPAT_DMA(pp) ||
3267 		    (pp->current_mode == ECPP_ECP_MODE) ||
3268 		    (pp->current_mode == ECPP_DIAG_MODE)) {
3269 			return (ecpp_dma_ihdlr(pp));
3270 		}
3271 	}
3272 
3273 	/* Nibble or ECP backchannel request? */
3274 	if ((DSR_READ(pp) & ECPP_nERR) == 0) {
3275 		return (ecpp_nErr_ihdlr(pp));
3276 	}
3277 
3278 	return (retval);
3279 }
3280 
3281 /*
3282  * DMA completion interrupt handler
3283  */
3284 static uint_t
ecpp_dma_ihdlr(struct ecppunit * pp)3285 ecpp_dma_ihdlr(struct ecppunit *pp)
3286 {
3287 	clock_t	tm;
3288 
3289 	ecpp_error(pp->dip, "ecpp_dma_ihdlr(%x): ecr=%x, dsr=%x, dcr=%x\n",
3290 	    pp->current_mode, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
3291 
3292 	/* we are expecting a data transfer interrupt */
3293 	ASSERT(pp->e_busy == ECPP_BUSY);
3294 
3295 	/* Intr generated while invoking TFIFO mode. Exit */
3296 	if (pp->tfifo_intr == 1) {
3297 		pp->tfifo_intr = 0;
3298 		ecpp_error(pp->dip, "ecpp_dma_ihdlr: tfifo_intr is 1\n");
3299 		return (DDI_INTR_CLAIMED);
3300 	}
3301 
3302 	if (ECPP_DMA_STOP(pp, NULL) == FAILURE) {
3303 		ecpp_error(pp->dip, "ecpp_dma_ihdlr: dma_stop failed\n");
3304 	}
3305 
3306 	if (pp->current_mode == ECPP_ECP_MODE &&
3307 	    pp->current_phase == ECPP_PHASE_ECP_REV_XFER) {
3308 		ecpp_ecp_read_completion(pp);
3309 	} else {
3310 		/*
3311 		 * fifo_timer() will do the cleanup when the FIFO drains
3312 		 */
3313 		if ((ECR_READ(pp) & ECPP_FIFO_EMPTY) ||
3314 		    (pp->current_mode == ECPP_DIAG_MODE)) {
3315 			tm = 0;	/* no use in waiting if FIFO is already empty */
3316 		} else {
3317 			tm = drv_usectohz(FIFO_DRAIN_PERIOD);
3318 		}
3319 		pp->fifo_timer_id = timeout(ecpp_fifo_timer, (caddr_t)pp, tm);
3320 	}
3321 
3322 	/*
3323 	 * Stop the DMA transfer timeout timer
3324 	 * this operation will temporarily give up the mutex,
3325 	 * so we do it in the end of the handler to avoid races
3326 	 */
3327 	ecpp_untimeout_unblock(pp, &pp->timeout_id);
3328 
3329 	return (DDI_INTR_CLAIMED);
3330 }
3331 
3332 /*
3333  * ecpp_pio_ihdlr() is a PIO interrupt processing routine
3334  * It masks interrupts, updates statistics and initiates next byte transfer
3335  */
3336 static uint_t
ecpp_pio_ihdlr(struct ecppunit * pp)3337 ecpp_pio_ihdlr(struct ecppunit *pp)
3338 {
3339 	ASSERT(mutex_owned(&pp->umutex));
3340 	ASSERT(pp->e_busy == ECPP_BUSY);
3341 
3342 	/* update statistics */
3343 	pp->joblen++;
3344 	pp->ctxpio_obytes++;
3345 
3346 	/* disable nAck interrups */
3347 	ECPP_MASK_INTR(pp);
3348 	DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_REV_DIR | ECPP_INTR_EN));
3349 
3350 	/*
3351 	 * If it was the last byte of the data block cleanup,
3352 	 * otherwise trigger a soft interrupt to send the next byte
3353 	 */
3354 	if (pp->next_byte >= pp->last_byte) {
3355 		ecpp_xfer_cleanup(pp);
3356 		ecpp_error(pp->dip,
3357 		    "ecpp_pio_ihdlr: pp->joblen=%d,pp->ctx_cf=%d,\n",
3358 		    pp->joblen, pp->ctx_cf);
3359 	} else {
3360 		if (pp->softintr_pending) {
3361 			ecpp_error(pp->dip,
3362 			    "ecpp_pio_ihdlr:E: next byte in progress\n");
3363 		} else {
3364 			pp->softintr_flags = ECPP_SOFTINTR_PIONEXT;
3365 			pp->softintr_pending = 1;
3366 			ddi_trigger_softintr(pp->softintr_id);
3367 		}
3368 	}
3369 
3370 	return (DDI_INTR_CLAIMED);
3371 }
3372 
3373 /*
3374  * ecpp_pio_writeb() sends a byte using Centronics handshake
3375  */
3376 static void
ecpp_pio_writeb(struct ecppunit * pp)3377 ecpp_pio_writeb(struct ecppunit *pp)
3378 {
3379 	uint8_t	dcr;
3380 
3381 	dcr = DCR_READ(pp) & ~ECPP_REV_DIR;
3382 	dcr |= ECPP_INTR_EN;
3383 
3384 	/* send the next byte */
3385 	DATAR_WRITE(pp, *(pp->next_byte++));
3386 
3387 	drv_usecwait(pp->data_setup_time);
3388 
3389 	/* Now Assert (neg logic) nStrobe */
3390 	if (dcr_write(pp, dcr | ECPP_STB) == FAILURE) {
3391 		ecpp_error(pp->dip, "ecpp_pio_writeb:1: failed w/DCR\n");
3392 	}
3393 
3394 	/* Enable nAck interrupts */
3395 	(void) DSR_READ(pp);	/* ensure IRQ_ST is armed */
3396 	ECPP_UNMASK_INTR(pp);
3397 
3398 	drv_usecwait(pp->strobe_pulse_width);
3399 
3400 	if (dcr_write(pp, dcr & ~ECPP_STB) == FAILURE) {
3401 		ecpp_error(pp->dip, "ecpp_pio_writeb:2: failed w/DCR\n");
3402 	}
3403 }
3404 
3405 /*
3406  * Backchannel request interrupt handler
3407  */
3408 static uint_t
ecpp_nErr_ihdlr(struct ecppunit * pp)3409 ecpp_nErr_ihdlr(struct ecppunit *pp)
3410 {
3411 	ecpp_error(pp->dip, "ecpp_nErr_ihdlr: mode=%x, phase=%x\n",
3412 	    pp->current_mode, pp->current_phase);
3413 
3414 	if (pp->oflag != TRUE) {
3415 		ecpp_error(pp->dip, "ecpp_nErr_ihdlr: not open!\n");
3416 		return (DDI_INTR_UNCLAIMED);
3417 	}
3418 
3419 	if (pp->e_busy == ECPP_BUSY) {
3420 		ecpp_error(pp->dip, "ecpp_nErr_ihdlr: busy\n");
3421 		ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK);
3422 		return (DDI_INTR_CLAIMED);
3423 	}
3424 
3425 	/* mask nErr & nAck interrupts */
3426 	ECPP_MASK_INTR(pp);
3427 	DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_INTR_EN | ECPP_REV_DIR));
3428 	ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK);
3429 
3430 	/* going reverse */
3431 	switch (pp->current_mode) {
3432 	case ECPP_ECP_MODE:
3433 		/*
3434 		 * Peripheral asserts nPeriphRequest (nFault)
3435 		 */
3436 		break;
3437 	case ECPP_NIBBLE_MODE:
3438 		/*
3439 		 * Event 18: Periph asserts nErr to indicate data avail
3440 		 * Event 19: After waiting minimum pulse width,
3441 		 *   periph sets nAck high to generate an interrupt
3442 		 *
3443 		 * Interface is in Interrupt Phase
3444 		 */
3445 		pp->current_phase = ECPP_PHASE_NIBT_REVINTR;
3446 
3447 		break;
3448 	default:
3449 		ecpp_error(pp->dip, "ecpp_nErr_ihdlr: wrong mode!\n");
3450 		return (DDI_INTR_UNCLAIMED);
3451 	}
3452 
3453 	(void) ecpp_backchan_req(pp);	/* put backchannel request on the wq */
3454 
3455 	return (DDI_INTR_CLAIMED);
3456 }
3457 
3458 /*
3459  * Softintr handler does work according to softintr_flags:
3460  * in case of ECPP_SOFTINTR_PIONEXT it sends next byte of PIO transfer
3461  */
3462 static uint_t
ecpp_softintr(caddr_t arg)3463 ecpp_softintr(caddr_t arg)
3464 {
3465 	struct ecppunit *pp = (struct ecppunit *)arg;
3466 	uint32_t unx_len, ecpp_reattempts = 0;
3467 
3468 	mutex_enter(&pp->umutex);
3469 
3470 	pp->intr_soft++;
3471 
3472 	if (!pp->softintr_pending) {
3473 		mutex_exit(&pp->umutex);
3474 		return (DDI_INTR_CLAIMED);
3475 	} else {
3476 		pp->softintr_pending = 0;
3477 	}
3478 
3479 	if (pp->softintr_flags & ECPP_SOFTINTR_PIONEXT) {
3480 		pp->softintr_flags &= ~ECPP_SOFTINTR_PIONEXT;
3481 		/*
3482 		 * Sent next byte in PIO mode
3483 		 */
3484 		ecpp_reattempts = 0;
3485 		do {
3486 			if (ecpp_check_status(pp) == SUCCESS) {
3487 				pp->e_busy = ECPP_BUSY;
3488 				break;
3489 			}
3490 			drv_usecwait(1);
3491 			if (pp->isr_reattempt_high < ecpp_reattempts) {
3492 				pp->isr_reattempt_high = ecpp_reattempts;
3493 			}
3494 		} while (++ecpp_reattempts < pp->wait_for_busy);
3495 
3496 		/* if the peripheral still not recovered suspend the transfer */
3497 		if (pp->e_busy == ECPP_ERR) {
3498 			++pp->ctx_cf; /* check status fail */
3499 			ecpp_error(pp->dip, "ecpp_softintr:check_status:F: "
3500 			    "dsr=%x jl=%d cf_isr=%d\n",
3501 			    DSR_READ(pp), pp->joblen, pp->ctx_cf);
3502 
3503 			/*
3504 			 * if status signals are bad,
3505 			 * put everything back on the wq.
3506 			 */
3507 			unx_len = pp->last_byte - pp->next_byte;
3508 			if (pp->msg != NULL) {
3509 				ecpp_putback_untransfered(pp,
3510 				    (void *)pp->msg->b_rptr, unx_len);
3511 				ecpp_error(pp->dip,
3512 				    "ecpp_softintr:e1:unx_len=%d\n", unx_len);
3513 
3514 				freemsg(pp->msg);
3515 				pp->msg = NULL;
3516 			} else {
3517 				ecpp_putback_untransfered(pp,
3518 				    pp->next_byte, unx_len);
3519 				ecpp_error(pp->dip,
3520 				    "ecpp_softintr:e2:unx_len=%d\n", unx_len);
3521 			}
3522 
3523 			ecpp_xfer_cleanup(pp);
3524 			pp->e_busy = ECPP_ERR;
3525 			qenable(pp->writeq);
3526 		} else {
3527 			/* send the next one */
3528 			pp->e_busy = ECPP_BUSY;
3529 			(void) ecpp_pio_writeb(pp);
3530 		}
3531 	}
3532 
3533 	mutex_exit(&pp->umutex);
3534 	return (DDI_INTR_CLAIMED);
3535 }
3536 
3537 
3538 /*
3539  * Transfer clean-up:
3540  *	shut down the DMAC
3541  *	stop the transfer timer
3542  *	enable write queue
3543  */
3544 static void
ecpp_xfer_cleanup(struct ecppunit * pp)3545 ecpp_xfer_cleanup(struct ecppunit *pp)
3546 {
3547 	ASSERT(mutex_owned(&pp->umutex));
3548 
3549 	/*
3550 	 * if we did not use the ioblock, the mblk that
3551 	 * was used should be freed.
3552 	 */
3553 	if (pp->msg != NULL) {
3554 		freemsg(pp->msg);
3555 		pp->msg = NULL;
3556 	}
3557 
3558 	/* The port is no longer active */
3559 	pp->e_busy = ECPP_IDLE;
3560 
3561 	/* Stop the transfer timeout timer */
3562 	ecpp_untimeout_unblock(pp, &pp->timeout_id);
3563 
3564 	qenable(pp->writeq);
3565 }
3566 
3567 /*VARARGS*/
3568 static void
ecpp_error(dev_info_t * dip,char * fmt,...)3569 ecpp_error(dev_info_t *dip, char *fmt, ...)
3570 {
3571 	static	long	last;
3572 	static	char	*lastfmt;
3573 	char		msg_buffer[255];
3574 	va_list	ap;
3575 	time_t	now;
3576 
3577 	if (!ecpp_debug) {
3578 		return;
3579 	}
3580 
3581 	/*
3582 	 * This function is supposed to be a quick non-blockable
3583 	 * wrapper for cmn_err(9F), which provides a sensible degree
3584 	 * of debug message throttling.  Not using any type of lock
3585 	 * is a requirement, but this also leaves two static variables
3586 	 * - last and lastfmt - unprotected. However, this will not do
3587 	 * any harm to driver functionality, it can only weaken throttling.
3588 	 * The following directive asks warlock to not worry about these
3589 	 * variables.
3590 	 */
3591 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(last, lastfmt))
3592 
3593 	/*
3594 	 * Don't print same error message too often.
3595 	 */
3596 	now = gethrestime_sec();
3597 	if ((last == (now & ~1)) && (lastfmt == fmt))
3598 		return;
3599 
3600 	last = now & ~1;
3601 	lastfmt = fmt;
3602 
3603 	va_start(ap, fmt);
3604 	(void) vsprintf(msg_buffer, fmt, ap);
3605 	cmn_err(CE_CONT, "%s%d: %s", ddi_get_name(dip),
3606 	    ddi_get_instance(dip), msg_buffer);
3607 	va_end(ap);
3608 }
3609 
3610 /*
3611  * Forward transfer timeout
3612  */
3613 static void
ecpp_xfer_timeout(void * arg)3614 ecpp_xfer_timeout(void *arg)
3615 {
3616 	struct ecppunit	*pp = arg;
3617 	void		*unx_addr;
3618 	size_t		unx_len, xferd;
3619 	uint8_t		dcr;
3620 	timeout_id_t	fifo_timer_id;
3621 
3622 	mutex_enter(&pp->umutex);
3623 
3624 	if (pp->timeout_id == 0) {
3625 		mutex_exit(&pp->umutex);
3626 		return;
3627 	} else {
3628 		pp->timeout_id = 0;
3629 	}
3630 
3631 	pp->xfer_tout++;
3632 
3633 	pp->dma_cancelled = TRUE;	/* prevent race with isr() */
3634 
3635 	if (COMPAT_PIO(pp)) {
3636 		/*
3637 		 * PIO mode timeout
3638 		 */
3639 
3640 		/* turn off nAck interrupts */
3641 		dcr = DCR_READ(pp);
3642 		(void) dcr_write(pp, dcr & ~(ECPP_REV_DIR | ECPP_INTR_EN));
3643 		ECPP_MASK_INTR(pp);
3644 
3645 		pp->softintr_pending = 0;
3646 		unx_len = pp->last_byte - pp->next_byte;
3647 		ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len);
3648 
3649 		if (unx_len > 0) {
3650 			unx_addr = pp->next_byte;
3651 		} else {
3652 			ecpp_xfer_cleanup(pp);
3653 			qenable(pp->writeq);
3654 			mutex_exit(&pp->umutex);
3655 			return;
3656 		}
3657 	} else {
3658 		/*
3659 		 * DMA mode timeout
3660 		 *
3661 		 * If DMAC fails to shut off, continue anyways and attempt
3662 		 * to put untransfered data back on queue.
3663 		 */
3664 		if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) {
3665 			ecpp_error(pp->dip,
3666 			    "ecpp_xfer_timeout: failed dma_stop\n");
3667 		}
3668 
3669 		ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len);
3670 
3671 		if (ddi_dma_unbind_handle(pp->dma_handle) == DDI_FAILURE) {
3672 			ecpp_error(pp->dip,
3673 			    "ecpp_xfer_timeout: failed unbind\n");
3674 		}
3675 
3676 		/*
3677 		 * if the bcr is zero, then DMA is complete and
3678 		 * we are waiting for the fifo to drain.  So let
3679 		 * ecpp_fifo_timer() look after the clean up.
3680 		 */
3681 		if (unx_len == 0) {
3682 			qenable(pp->writeq);
3683 			mutex_exit(&pp->umutex);
3684 			return;
3685 		} else {
3686 			xferd = pp->dma_cookie.dmac_size - unx_len;
3687 			pp->resid -= xferd;
3688 			unx_len = pp->resid;
3689 
3690 			/* update statistics */
3691 			pp->obytes[pp->current_mode] += xferd;
3692 			pp->joblen += xferd;
3693 
3694 			if (pp->msg != NULL) {
3695 				unx_addr = (caddr_t)pp->msg->b_wptr - unx_len;
3696 			} else {
3697 				unx_addr = pp->ioblock +
3698 				    (pp->xfercnt - unx_len);
3699 			}
3700 		}
3701 	}
3702 
3703 	/* Following code is common for PIO and DMA modes */
3704 
3705 	ecpp_putback_untransfered(pp, (caddr_t)unx_addr, unx_len);
3706 
3707 	if (pp->msg != NULL) {
3708 		freemsg(pp->msg);
3709 		pp->msg = NULL;
3710 	}
3711 
3712 	/* mark the error status structure */
3713 	pp->timeout_error = 1;
3714 	pp->e_busy = ECPP_ERR;
3715 	fifo_timer_id = pp->fifo_timer_id;
3716 	pp->fifo_timer_id = 0;
3717 
3718 	qenable(pp->writeq);
3719 
3720 	mutex_exit(&pp->umutex);
3721 
3722 	if (fifo_timer_id) {
3723 		(void) untimeout(fifo_timer_id);
3724 	}
3725 }
3726 
3727 static void
ecpp_putback_untransfered(struct ecppunit * pp,void * startp,uint_t len)3728 ecpp_putback_untransfered(struct ecppunit *pp, void *startp, uint_t len)
3729 {
3730 	mblk_t *new_mp;
3731 
3732 	ecpp_error(pp->dip, "ecpp_putback_untrans=%d\n", len);
3733 
3734 	if (len == 0) {
3735 		return;
3736 	}
3737 
3738 	new_mp = allocb(len, BPRI_MED);
3739 	if (new_mp == NULL) {
3740 		ecpp_error(pp->dip,
3741 		    "ecpp_putback_untransfered: allocb FAILURE.\n");
3742 		return;
3743 	}
3744 
3745 	bcopy(startp, new_mp->b_rptr, len);
3746 	new_mp->b_wptr = new_mp->b_rptr + len;
3747 
3748 	if (!putbq(pp->writeq, new_mp)) {
3749 		freemsg(new_mp);
3750 	}
3751 }
3752 
3753 static uchar_t
ecr_write(struct ecppunit * pp,uint8_t ecr_byte)3754 ecr_write(struct ecppunit *pp, uint8_t ecr_byte)
3755 {
3756 	int i, current_ecr;
3757 
3758 	for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) {
3759 		ECR_WRITE(pp, ecr_byte);
3760 
3761 		current_ecr = ECR_READ(pp);
3762 
3763 		/* mask off the lower two read-only bits */
3764 		if ((ecr_byte & 0xFC) == (current_ecr & 0xFC))
3765 			return (SUCCESS);
3766 	}
3767 	return (FAILURE);
3768 }
3769 
3770 static uchar_t
dcr_write(struct ecppunit * pp,uint8_t dcr_byte)3771 dcr_write(struct ecppunit *pp, uint8_t dcr_byte)
3772 {
3773 	uint8_t current_dcr;
3774 	int i;
3775 
3776 	for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) {
3777 		DCR_WRITE(pp, dcr_byte);
3778 
3779 		current_dcr = DCR_READ(pp);
3780 
3781 		/* compare only bits 0-4 (direction bit return 1) */
3782 		if ((dcr_byte & 0x1F) == (current_dcr & 0x1F))
3783 			return (SUCCESS);
3784 	}
3785 	ecpp_error(pp->dip,
3786 	    "(%d)dcr_write: dcr written =%x, dcr readback =%x\n",
3787 	    i, dcr_byte, current_dcr);
3788 
3789 	return (FAILURE);
3790 }
3791 
3792 static uchar_t
ecpp_reset_port_regs(struct ecppunit * pp)3793 ecpp_reset_port_regs(struct ecppunit *pp)
3794 {
3795 	DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
3796 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
3797 	return (SUCCESS);
3798 }
3799 
3800 /*
3801  * The data transferred by the DMA engine goes through the FIFO,
3802  * so that when the DMA counter reaches zero (and an interrupt occurs)
3803  * the FIFO can still contain data. If this is the case, the ISR will
3804  * schedule this callback to wait until the FIFO drains or a timeout occurs.
3805  */
3806 static void
ecpp_fifo_timer(void * arg)3807 ecpp_fifo_timer(void *arg)
3808 {
3809 	struct ecppunit *pp = arg;
3810 	uint8_t	ecr;
3811 	timeout_id_t	timeout_id;
3812 
3813 	mutex_enter(&pp->umutex);
3814 
3815 	/*
3816 	 * If the FIFO timer has been turned off, exit.
3817 	 */
3818 	if (pp->fifo_timer_id == 0) {
3819 		ecpp_error(pp->dip, "ecpp_fifo_timer: untimedout\n");
3820 		mutex_exit(&pp->umutex);
3821 		return;
3822 	} else {
3823 		pp->fifo_timer_id = 0;
3824 	}
3825 
3826 	/*
3827 	 * If the FIFO is not empty restart timer.  Wait FIFO_DRAIN_PERIOD
3828 	 * (250 ms) and check FIFO_EMPTY bit again. Repeat until FIFO is
3829 	 * empty or until 10 * FIFO_DRAIN_PERIOD expires.
3830 	 */
3831 	ecr = ECR_READ(pp);
3832 
3833 	if ((pp->current_mode != ECPP_DIAG_MODE) &&
3834 	    (((ecr & ECPP_FIFO_EMPTY) == 0) &&
3835 	    (pp->ecpp_drain_counter < 10))) {
3836 
3837 		ecpp_error(pp->dip,
3838 		    "ecpp_fifo_timer(%d):FIFO not empty:ecr=%x\n",
3839 		    pp->ecpp_drain_counter, ecr);
3840 
3841 		pp->fifo_timer_id = timeout(ecpp_fifo_timer,
3842 		    (caddr_t)pp, drv_usectohz(FIFO_DRAIN_PERIOD));
3843 		++pp->ecpp_drain_counter;
3844 
3845 		mutex_exit(&pp->umutex);
3846 		return;
3847 	}
3848 
3849 	if (pp->current_mode != ECPP_DIAG_MODE) {
3850 		/*
3851 		 * If the FIFO won't drain after 10 FIFO_DRAIN_PERIODs
3852 		 * then don't wait any longer.  Simply clean up the transfer.
3853 		 */
3854 		if (pp->ecpp_drain_counter >= 10) {
3855 			ecpp_error(pp->dip, "ecpp_fifo_timer(%d):"
3856 			    " clearing FIFO,can't wait:ecr=%x\n",
3857 			    pp->ecpp_drain_counter, ecr);
3858 		} else {
3859 			ecpp_error(pp->dip,
3860 			    "ecpp_fifo_timer(%d):FIFO empty:ecr=%x\n",
3861 			    pp->ecpp_drain_counter, ecr);
3862 		}
3863 
3864 		pp->ecpp_drain_counter = 0;
3865 	}
3866 
3867 	/*
3868 	 * Main section of routine:
3869 	 *  - stop the DMA transfer timer
3870 	 *  - program DMA with next cookie/window or unbind the DMA mapping
3871 	 *  - update stats
3872 	 *  - if last mblk in queue, signal to close() & return to idle state
3873 	 */
3874 
3875 	/* Stop the DMA transfer timeout timer */
3876 	timeout_id = pp->timeout_id;
3877 	pp->timeout_id = 0;
3878 
3879 	/* data has drained from fifo, it is ok to free dma resource */
3880 	if (pp->current_mode == ECPP_ECP_MODE ||
3881 	    pp->current_mode == ECPP_DIAG_MODE ||
3882 	    COMPAT_DMA(pp)) {
3883 		off_t	off;
3884 		size_t	len;
3885 
3886 		/* update residual */
3887 		pp->resid -= pp->dma_cookie.dmac_size;
3888 
3889 		/* update statistics */
3890 		pp->joblen += pp->dma_cookie.dmac_size;
3891 		if (pp->dma_dir == DDI_DMA_WRITE) {
3892 			pp->obytes[pp->current_mode] +=
3893 			    pp->dma_cookie.dmac_size;
3894 		} else {
3895 			pp->ibytes[pp->current_mode] +=
3896 			    pp->dma_cookie.dmac_size;
3897 		}
3898 
3899 		/*
3900 		 * Look if any cookies/windows left
3901 		 */
3902 		if (--pp->dma_cookie_count > 0) {
3903 			/* process the next cookie */
3904 			ddi_dma_nextcookie(pp->dma_handle,
3905 			    &pp->dma_cookie);
3906 		} else if (pp->dma_curwin < pp->dma_nwin) {
3907 			/* process the next window */
3908 			if (ddi_dma_getwin(pp->dma_handle,
3909 			    pp->dma_curwin, &off, &len,
3910 			    &pp->dma_cookie,
3911 			    &pp->dma_cookie_count) != DDI_SUCCESS) {
3912 				ecpp_error(pp->dip,
3913 				    "ecpp_fifo_timer: ddi_dma_getwin failed\n");
3914 				goto dma_done;
3915 			}
3916 
3917 			pp->dma_curwin++;
3918 		} else {
3919 			goto dma_done;
3920 		}
3921 
3922 		ecpp_error(pp->dip, "ecpp_fifo_timer: next addr=%llx len=%d\n",
3923 		    pp->dma_cookie.dmac_address,
3924 		    pp->dma_cookie.dmac_size);
3925 
3926 		/* kick off new transfer */
3927 		if (ECPP_DMA_START(pp) != SUCCESS) {
3928 			ecpp_error(pp->dip,
3929 			    "ecpp_fifo_timer: dma_start failed\n");
3930 			goto dma_done;
3931 		}
3932 
3933 		(void) ecr_write(pp, (ecr & 0xe0) |
3934 		    ECPP_DMA_ENABLE | ECPP_INTR_MASK);
3935 
3936 		mutex_exit(&pp->umutex);
3937 
3938 		if (timeout_id) {
3939 			(void) untimeout(timeout_id);
3940 		}
3941 		return;
3942 
3943 	dma_done:
3944 		if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
3945 			ecpp_error(pp->dip, "ecpp_fifo_timer: unbind failed\n");
3946 		} else {
3947 			ecpp_error(pp->dip, "ecpp_fifo_timer: unbind ok\n");
3948 		}
3949 	}
3950 
3951 	/*
3952 	 * if we did not use the dmablock, the mblk that
3953 	 * was used should be freed.
3954 	 */
3955 	if (pp->msg != NULL) {
3956 		freemsg(pp->msg);
3957 		pp->msg = NULL;
3958 	}
3959 
3960 	/* The port is no longer active */
3961 	pp->e_busy = ECPP_IDLE;
3962 
3963 	qenable(pp->writeq);
3964 
3965 	mutex_exit(&pp->umutex);
3966 
3967 	if (timeout_id) {
3968 		(void) untimeout(timeout_id);
3969 	}
3970 }
3971 
3972 /*
3973  * In Compatibility mode, check if the peripheral is ready to accept data
3974  */
3975 static uint8_t
ecpp_check_status(struct ecppunit * pp)3976 ecpp_check_status(struct ecppunit *pp)
3977 {
3978 	uint8_t	dsr;
3979 	uint8_t statmask;
3980 
3981 	if (pp->current_mode == ECPP_ECP_MODE ||
3982 	    pp->current_mode == ECPP_DIAG_MODE)
3983 		return (SUCCESS);
3984 
3985 	statmask = ECPP_nERR | ECPP_SLCT | ECPP_nBUSY | ECPP_nACK;
3986 
3987 	dsr = DSR_READ(pp);
3988 	if ((dsr & ECPP_PE) || ((dsr & statmask) != statmask)) {
3989 		pp->e_busy = ECPP_ERR;
3990 		return (FAILURE);
3991 	} else {
3992 		return (SUCCESS);
3993 	}
3994 }
3995 
3996 /*
3997  * if the peripheral is not ready to accept data, write service routine
3998  * periodically reschedules itself to recheck peripheral status
3999  * and start data transfer as soon as possible
4000  */
4001 static void
ecpp_wsrv_timer(void * arg)4002 ecpp_wsrv_timer(void *arg)
4003 {
4004 	struct ecppunit *pp = arg;
4005 
4006 	ecpp_error(pp->dip, "ecpp_wsrv_timer: starting\n");
4007 
4008 	mutex_enter(&pp->umutex);
4009 
4010 	if (pp->wsrv_timer_id == 0) {
4011 		mutex_exit(&pp->umutex);
4012 		return;
4013 	} else {
4014 		pp->wsrv_timer_id = 0;
4015 	}
4016 
4017 	ecpp_error(pp->dip, "ecpp_wsrv_timer: qenabling...\n");
4018 
4019 	qenable(pp->writeq);
4020 
4021 	mutex_exit(&pp->umutex);
4022 }
4023 
4024 /*
4025  * Allocate a message indicating a backchannel request
4026  * and put it on the write queue
4027  */
4028 static int
ecpp_backchan_req(struct ecppunit * pp)4029 ecpp_backchan_req(struct ecppunit *pp)
4030 {
4031 	mblk_t	*mp;
4032 
4033 	if ((mp = allocb(sizeof (int), BPRI_MED)) == NULL) {
4034 		ecpp_error(pp->dip, "ecpp_backchan_req: allocb failed\n");
4035 		return (FAILURE);
4036 	} else {
4037 		mp->b_datap->db_type = M_CTL;
4038 		*(int *)mp->b_rptr = ECPP_BACKCHANNEL;
4039 		mp->b_wptr = mp->b_rptr + sizeof (int);
4040 		if (!putbq(pp->writeq, mp)) {
4041 			ecpp_error(pp->dip, "ecpp_backchan_req:putbq failed\n");
4042 			freemsg(mp);
4043 			return (FAILURE);
4044 		}
4045 		return (SUCCESS);
4046 	}
4047 }
4048 
4049 /*
4050  * Cancel the function scheduled with timeout(9F)
4051  * This function is to be called with the mutex held
4052  */
4053 static void
ecpp_untimeout_unblock(struct ecppunit * pp,timeout_id_t * id)4054 ecpp_untimeout_unblock(struct ecppunit *pp, timeout_id_t *id)
4055 {
4056 	timeout_id_t	saved_id;
4057 
4058 	ASSERT(mutex_owned(&pp->umutex));
4059 
4060 	if (*id) {
4061 		saved_id = *id;
4062 		*id = 0;
4063 		mutex_exit(&pp->umutex);
4064 		(void) untimeout(saved_id);
4065 		mutex_enter(&pp->umutex);
4066 	}
4067 }
4068 
4069 /*
4070  * get prnio interface capabilities
4071  */
4072 static uint_t
ecpp_get_prn_ifcap(struct ecppunit * pp)4073 ecpp_get_prn_ifcap(struct ecppunit *pp)
4074 {
4075 	uint_t	ifcap;
4076 
4077 	ifcap = PRN_1284_DEVID | PRN_TIMEOUTS | PRN_STREAMS;
4078 
4079 	/* status (DSR) only makes sense in Centronics & Compat modes */
4080 	if (pp->current_mode == ECPP_CENTRONICS ||
4081 	    pp->current_mode == ECPP_COMPAT_MODE) {
4082 		ifcap |= PRN_1284_STATUS;
4083 	} else if (pp->current_mode == ECPP_NIBBLE_MODE ||
4084 	    pp->current_mode == ECPP_ECP_MODE) {
4085 		ifcap |= PRN_BIDI;
4086 	}
4087 
4088 	return (ifcap);
4089 }
4090 
4091 /*
4092  * Determine SuperI/O type
4093  */
4094 static struct ecpp_hw_bind *
ecpp_determine_sio_type(struct ecppunit * pp)4095 ecpp_determine_sio_type(struct ecppunit *pp)
4096 {
4097 	struct ecpp_hw_bind	*hw_bind;
4098 	char			*name;
4099 	int			i;
4100 
4101 	name = ddi_binding_name(pp->dip);
4102 
4103 	for (hw_bind = NULL, i = 0; i < NELEM(ecpp_hw_bind); i++) {
4104 		if (strcmp(name, ecpp_hw_bind[i].name) == 0) {
4105 			hw_bind = &ecpp_hw_bind[i];
4106 			break;
4107 		}
4108 	}
4109 
4110 	return (hw_bind);
4111 }
4112 
4113 
4114 /*
4115  *
4116  * IEEE 1284 support routines:
4117  *	negotiation and termination;
4118  *	phase transitions;
4119  *	device ID;
4120  *
4121  */
4122 
4123 /*
4124  * Interface initialization, abnormal termination into Compatibility mode
4125  *
4126  * Peripheral may be non-1284, so we set current mode to ECPP_CENTRONICS
4127  */
4128 static void
ecpp_1284_init_interface(struct ecppunit * pp)4129 ecpp_1284_init_interface(struct ecppunit *pp)
4130 {
4131 	ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
4132 
4133 	/*
4134 	 * Toggle the nInit signal if configured in ecpp.conf
4135 	 * for most peripherals it is not needed
4136 	 */
4137 	if (pp->init_seq == TRUE) {
4138 		DCR_WRITE(pp, ECPP_SLCTIN);
4139 		drv_usecwait(50);	/* T(ER) = 50us */
4140 	}
4141 
4142 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4143 
4144 	pp->current_mode = pp->backchannel = ECPP_CENTRONICS;
4145 	pp->current_phase = ECPP_PHASE_C_IDLE;
4146 	ECPP_CONFIG_MODE(pp);
4147 	pp->to_mode[pp->current_mode]++;
4148 
4149 	ecpp_error(pp->dip, "ecpp_1284_init_interface: ok\n");
4150 }
4151 
4152 /*
4153  * ECP mode negotiation
4154  */
4155 static int
ecp_negotiation(struct ecppunit * pp)4156 ecp_negotiation(struct ecppunit *pp)
4157 {
4158 	uint8_t dsr;
4159 
4160 	/* ECP mode negotiation */
4161 
4162 	if (ecpp_1284_negotiation(pp, ECPP_XREQ_ECP, &dsr) == FAILURE)
4163 		return (FAILURE);
4164 
4165 	/* Event 5: peripheral deasserts PError and Busy, asserts Select */
4166 	if ((dsr & (ECPP_PE | ECPP_nBUSY | ECPP_SLCT)) !=
4167 	    (ECPP_nBUSY | ECPP_SLCT)) {
4168 		ecpp_error(pp->dip,
4169 		    "ecp_negotiation: failed event 5 %x\n", DSR_READ(pp));
4170 		(void) ecpp_1284_termination(pp);
4171 		return (FAILURE);
4172 	}
4173 
4174 	/* entered Setup Phase */
4175 	pp->current_phase = ECPP_PHASE_ECP_SETUP;
4176 
4177 	/* Event 30: host asserts nAutoFd */
4178 	DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
4179 
4180 	/* Event 31: peripheral asserts PError */
4181 	if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) {
4182 		ecpp_error(pp->dip,
4183 		    "ecp_negotiation: failed event 31 %x\n", DSR_READ(pp));
4184 		(void) ecpp_1284_termination(pp);
4185 		return (FAILURE);
4186 	}
4187 
4188 	/* entered Forward Idle Phase */
4189 	pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
4190 
4191 	/* successful negotiation into ECP mode */
4192 	pp->current_mode = ECPP_ECP_MODE;
4193 	pp->backchannel = ECPP_ECP_MODE;
4194 
4195 	ecpp_error(pp->dip, "ecp_negotiation: ok\n");
4196 
4197 	return (SUCCESS);
4198 }
4199 
4200 /*
4201  * Nibble mode negotiation
4202  */
4203 static int
nibble_negotiation(struct ecppunit * pp)4204 nibble_negotiation(struct ecppunit *pp)
4205 {
4206 	uint8_t	dsr;
4207 
4208 	if (ecpp_1284_negotiation(pp, ECPP_XREQ_NIBBLE, &dsr) == FAILURE) {
4209 		return (FAILURE);
4210 	}
4211 
4212 	/*
4213 	 * If peripheral has data available, PE and nErr will
4214 	 * be set low at Event 5 & 6.
4215 	 */
4216 	if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) {
4217 		pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
4218 	} else {
4219 		pp->current_phase = ECPP_PHASE_NIBT_NAVAIL;
4220 	}
4221 
4222 	/* successful negotiation into Nibble mode */
4223 	pp->current_mode = ECPP_NIBBLE_MODE;
4224 	pp->backchannel = ECPP_NIBBLE_MODE;
4225 
4226 	ecpp_error(pp->dip, "nibble_negotiation: ok (phase=%x)\n",
4227 	    pp->current_phase);
4228 
4229 	return (SUCCESS);
4230 
4231 }
4232 
4233 /*
4234  * Wait ptimeout usec for periph to set 'mask' bits to 'val' state
4235  *
4236  * return value < 0 indicates timeout
4237  */
4238 static int
wait_dsr(struct ecppunit * pp,uint8_t mask,uint8_t val,int ptimeout)4239 wait_dsr(struct ecppunit *pp, uint8_t mask, uint8_t val, int ptimeout)
4240 {
4241 	while (((DSR_READ(pp) & mask) != val) && ptimeout--) {
4242 		drv_usecwait(1);
4243 	}
4244 
4245 	return (ptimeout);
4246 }
4247 
4248 /*
4249  * 1284 negotiation Events 0..6
4250  * required mode is indicated by extensibility request value
4251  *
4252  * After successful negotiation SUCCESS is returned and
4253  * current mode is set according to xreq,
4254  * otherwise FAILURE is returned and current mode is set to
4255  * either COMPAT (1284 periph) or CENTRONICS (non-1284 periph)
4256  *
4257  * Current phase must be set by the caller (mode-specific negotiation)
4258  *
4259  * If rdsr is not NULL, DSR value after Event 6 is stored here
4260  */
4261 static int
ecpp_1284_negotiation(struct ecppunit * pp,uint8_t xreq,uint8_t * rdsr)4262 ecpp_1284_negotiation(struct ecppunit *pp, uint8_t xreq, uint8_t *rdsr)
4263 {
4264 	int xflag;
4265 
4266 	ecpp_error(pp->dip, "nego(%x): entering...\n", xreq);
4267 
4268 	/* negotiation should start in Compatibility mode */
4269 	(void) ecpp_1284_termination(pp);
4270 
4271 	/* Set host into Compat mode */
4272 	ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
4273 
4274 	pp->current_phase = ECPP_PHASE_NEGO;
4275 
4276 	/* Event 0: host sets extensibility request on data lines */
4277 	DATAR_WRITE(pp, xreq);
4278 
4279 	/* Event 1: host deassert nSelectin and assert nAutoFd */
4280 	DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
4281 
4282 	drv_usecwait(1);	/* Tp(ecp) == 0.5us */
4283 
4284 	/*
4285 	 * Event 2: peripheral asserts nAck, deasserts nFault,
4286 	 *			asserts Select, asserts PError
4287 	 */
4288 	if (wait_dsr(pp, ECPP_nERR | ECPP_SLCT | ECPP_PE | ECPP_nACK,
4289 	    ECPP_nERR | ECPP_SLCT | ECPP_PE, 35000) < 0) {
4290 		/* peripheral is not 1284-compliant */
4291 		ecpp_error(pp->dip,
4292 		    "nego(%x): failed event 2 %x\n", xreq, DSR_READ(pp));
4293 		(void) ecpp_1284_termination(pp);
4294 		return (FAILURE);
4295 	}
4296 
4297 	/*
4298 	 * Event 3: host asserts nStrobe, latching extensibility value into
4299 	 * peripherals input latch.
4300 	 */
4301 	DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_STB);
4302 
4303 	drv_usecwait(2);	/* Tp(ecp) = 0.5us */
4304 
4305 	/*
4306 	 * Event 4: hosts deasserts nStrobe and nAutoFD to acknowledge that
4307 	 * it has recognized an 1284 compatible peripheral
4308 	 */
4309 	DCR_WRITE(pp, ECPP_nINIT);
4310 
4311 	/*
4312 	 * Event 5: Peripheral confirms it supports requested extension
4313 	 * For Nibble mode Xflag must be low, otherwise it must be high
4314 	 */
4315 	xflag = (xreq == ECPP_XREQ_NIBBLE) ? 0 : ECPP_SLCT;
4316 
4317 	/*
4318 	 * Event 6: Peripheral sets nAck high
4319 	 * indicating that status lines are valid
4320 	 */
4321 	if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
4322 		/* Something wrong with peripheral */
4323 		ecpp_error(pp->dip,
4324 		    "nego(%x): failed event 6 %x\n", xreq, DSR_READ(pp));
4325 		(void) ecpp_1284_termination(pp);
4326 		return (FAILURE);
4327 	}
4328 
4329 	if ((DSR_READ(pp) & ECPP_SLCT) != xflag) {
4330 		/* Extensibility value is not supported */
4331 		ecpp_error(pp->dip,
4332 		    "nego(%x): failed event 5 %x\n", xreq, DSR_READ(pp));
4333 		(void) ecpp_1284_termination(pp);
4334 		return (FAILURE);
4335 	}
4336 
4337 	if (rdsr) {
4338 		*rdsr = DSR_READ(pp);
4339 	}
4340 
4341 	return (SUCCESS);
4342 }
4343 
4344 /*
4345  * 1284 Termination: Events 22..28 - set link to Compatibility mode
4346  *
4347  * This routine is not designed for Immediate termination,
4348  * caller must take care of waiting for a valid state,
4349  * (in particular, in ECP mode current phase must be Forward Idle)
4350  * otherwise interface will be reinitialized
4351  *
4352  * In case of Valid state termination SUCCESS is returned and
4353  * current_mode is ECPP_COMPAT_MODE, current phase is ECPP_PHASE_C_IDLE
4354  * Otherwise interface is reinitialized, FAILURE is returned and
4355  * current mode is ECPP_CENTRONICS, current phase is ECPP_PHASE_C_IDLE
4356  */
4357 static int
ecpp_1284_termination(struct ecppunit * pp)4358 ecpp_1284_termination(struct ecppunit *pp)
4359 {
4360 	int	previous_mode = pp->current_mode;
4361 
4362 	if (((pp->current_mode == ECPP_COMPAT_MODE ||
4363 	    pp->current_mode == ECPP_CENTRONICS) &&
4364 	    pp->current_phase == ECPP_PHASE_C_IDLE) ||
4365 	    pp->current_mode == ECPP_DIAG_MODE) {
4366 		ecpp_error(pp->dip, "termination: not needed\n");
4367 		return (SUCCESS);
4368 	}
4369 
4370 	/* Set host into Compat mode, interrupts disabled */
4371 	ECPP_MASK_INTR(pp);
4372 	ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
4373 
4374 	pp->current_mode = ECPP_COMPAT_MODE;	/* needed by next function */
4375 
4376 	ECPP_CONFIG_MODE(pp);
4377 
4378 	/*
4379 	 * EPP mode uses simple nInit pulse for termination
4380 	 */
4381 	if (previous_mode == ECPP_EPP_MODE) {
4382 		/* Event 68: host sets nInit low */
4383 		DCR_WRITE(pp, 0);
4384 
4385 		drv_usecwait(55);	/* T(ER) = 50us */
4386 
4387 		/* Event 69: host sets nInit high */
4388 		DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4389 
4390 		goto endterm;
4391 	}
4392 
4393 	/* terminate peripheral to Compat mode */
4394 	pp->current_phase = ECPP_PHASE_TERM;
4395 
4396 	/* Event 22: hosts sets nSelectIn low and nAutoFd high */
4397 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4398 
4399 	/* Event 23: peripheral deasserts nFault and nBusy */
4400 	/* Event 24: peripheral asserts nAck */
4401 	if (wait_dsr(pp, ECPP_nERR | ECPP_nBUSY | ECPP_nACK,
4402 	    ECPP_nERR, 35000) < 0) {
4403 		ecpp_error(pp->dip,
4404 		    "termination: failed events 23,24 %x\n", DSR_READ(pp));
4405 		ecpp_1284_init_interface(pp);
4406 		return (FAILURE);
4407 	}
4408 
4409 	drv_usecwait(1);	/* Tp = 0.5us */
4410 
4411 	/* Event 25: hosts sets nAutoFd low */
4412 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN | ECPP_AFX);
4413 
4414 	/* Event 26: the peripheral puts itself in Compatible mode */
4415 
4416 	/* Event 27: peripheral deasserts nAck */
4417 	if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
4418 		ecpp_error(pp->dip,
4419 		    "termination: failed event 27 %x\n", DSR_READ(pp));
4420 		ecpp_1284_init_interface(pp);
4421 		return (FAILURE);
4422 	}
4423 
4424 	drv_usecwait(1);	/* Tp = 0.5us */
4425 
4426 	/* Event 28: hosts deasserts nAutoFd */
4427 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4428 
4429 	drv_usecwait(1);	/* Tp = 0.5us */
4430 
4431 endterm:
4432 	/* Compatible mode Idle Phase */
4433 	pp->current_phase = ECPP_PHASE_C_IDLE;
4434 
4435 	ecpp_error(pp->dip, "termination: completed %x %x\n",
4436 	    DSR_READ(pp), DCR_READ(pp));
4437 
4438 	return (SUCCESS);
4439 }
4440 
4441 /*
4442  * Initiate ECP backchannel DMA transfer
4443  */
4444 static uchar_t
ecp_peripheral2host(struct ecppunit * pp)4445 ecp_peripheral2host(struct ecppunit *pp)
4446 {
4447 	mblk_t		*mp = NULL;
4448 	size_t		len;
4449 	uint32_t	xfer_time;
4450 
4451 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4452 	    pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
4453 
4454 	/*
4455 	 * hardware generates cycles to receive data from the peripheral
4456 	 * we only need to read from FIFO
4457 	 */
4458 
4459 	/*
4460 	 * If user issued read(2) of rev_resid bytes, xfer exactly this amount
4461 	 * unless it exceeds ECP_REV_BLKSZ_MAX; otherwise try to read
4462 	 * ECP_REV_BLKSZ_MAX or at least ECP_REV_BLKSZ bytes
4463 	 */
4464 	if (pp->nread > 0) {
4465 		len = min(pp->nread, ECP_REV_BLKSZ_MAX);
4466 	} else {
4467 		len = ECP_REV_BLKSZ_MAX;
4468 	}
4469 
4470 	pp->nread = 0;	/* clear after use */
4471 
4472 	/*
4473 	 * Allocate mblk for data, make max 2 attepmts:
4474 	 * if len bytes block fails, try our block size
4475 	 */
4476 	while ((mp = allocb(len, BPRI_MED)) == NULL) {
4477 		ecpp_error(pp->dip,
4478 		    "ecp_periph2host: failed allocb(%d)\n", len);
4479 		if (len > ECP_REV_BLKSZ) {
4480 			len = ECP_REV_BLKSZ;
4481 		} else {
4482 			break;
4483 		}
4484 	}
4485 
4486 	if (mp == NULL) {
4487 		goto fail;
4488 	}
4489 
4490 	pp->msg = mp;
4491 	pp->e_busy = ECPP_BUSY;
4492 	pp->dma_dir = DDI_DMA_READ;
4493 	pp->current_phase = ECPP_PHASE_ECP_REV_XFER;
4494 
4495 	if (ecpp_init_dma_xfer(pp, (caddr_t)mp->b_rptr, len) == FAILURE) {
4496 		goto fail;
4497 	}
4498 
4499 	/*
4500 	 * there are two problems with defining ECP backchannel xfer timeout
4501 	 *
4502 	 * a) IEEE 1284 allows infinite time between backchannel bytes,
4503 	 *    but we must stop at some point to send the data upstream,
4504 	 *    look if any forward transfer requests are pending, etc;
4505 	 *    all that done, we can continue with backchannel data;
4506 	 *
4507 	 * b) we don`t know how much data peripheral has;
4508 	 *    DMA counter is set to our buffer size, which can be bigger
4509 	 *    than needed - in this case a timeout must detect this;
4510 	 *
4511 	 * The timeout we schedule here serves as both the transfer timeout
4512 	 * and a means of detecting backchannel stalls; in fact, there are
4513 	 * two timeouts in one:
4514 	 *
4515 	 * - transfer timeout is based on the ECP bandwidth of ~1MB/sec and
4516 	 *   equals the time needed to transfer the whole buffer
4517 	 *   (but not less than ECP_REV_MINTOUT ms); if it occurs,
4518 	 *   DMA is stopped and the data is sent upstream;
4519 	 *
4520 	 * - backchannel watchdog, which would look at DMA counter
4521 	 *   every rev_watchdog ms and stop the transfer only
4522 	 *   if the counter hasn`t changed since the last time;
4523 	 *   otherwise it would save DMA counter value and restart itself;
4524 	 *
4525 	 * transfer timeout is a multiple of rev_watchdog
4526 	 * and implemented as a downward counter
4527 	 *
4528 	 * on Grover, we can`t access DMAC registers while DMA is in flight,
4529 	 * so we can`t have watchdog on Grover, only timeout
4530 	 */
4531 
4532 	/* calculate number of watchdog invocations equal to the xfer timeout */
4533 	xfer_time = max((1000 * len) / pp->ecp_rev_speed, ECP_REV_MINTOUT);
4534 #if defined(__x86)
4535 	pp->rev_timeout_cnt = (pp->hw == &x86) ? 1 :
4536 	    max(xfer_time / pp->rev_watchdog, 1);
4537 #else
4538 	pp->rev_timeout_cnt = (pp->hw == &m1553) ? 1 :
4539 	    max(xfer_time / pp->rev_watchdog, 1);
4540 #endif
4541 
4542 	pp->last_dmacnt = len;	/* nothing xferred yet */
4543 
4544 	pp->timeout_id = timeout(ecpp_ecp_read_timeout, (caddr_t)pp,
4545 	    drv_usectohz(pp->rev_watchdog * 1000));
4546 
4547 	ecpp_error(pp->dip, "ecp_periph2host: DMA started len=%d\n"
4548 	    "xfer_time=%d wdog=%d cnt=%d\n",
4549 	    len, xfer_time, pp->rev_watchdog, pp->rev_timeout_cnt);
4550 
4551 	return (SUCCESS);
4552 
4553 fail:
4554 	if (mp) {
4555 		freemsg(mp);
4556 	}
4557 	pp->e_busy = ECPP_IDLE;
4558 	pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
4559 
4560 	return (FAILURE);
4561 }
4562 
4563 /*
4564  * ECP backchannel read timeout
4565  * implements both backchannel watchdog and transfer timeout in ECP mode
4566  * if the transfer is still in progress, reschedule itself,
4567  * otherwise call completion routine
4568  */
4569 static void
ecpp_ecp_read_timeout(void * arg)4570 ecpp_ecp_read_timeout(void *arg)
4571 {
4572 	struct ecppunit	*pp = arg;
4573 	size_t		dmacnt;
4574 
4575 	mutex_enter(&pp->umutex);
4576 
4577 	if (pp->timeout_id == 0) {
4578 		mutex_exit(&pp->umutex);
4579 		return;
4580 	} else {
4581 		pp->timeout_id = 0;
4582 	}
4583 
4584 	if (--pp->rev_timeout_cnt == 0) {
4585 		/*
4586 		 * Transfer timed out
4587 		 */
4588 		ecpp_error(pp->dip, "ecp_read_timeout: timeout\n");
4589 		pp->xfer_tout++;
4590 		ecpp_ecp_read_completion(pp);
4591 	} else {
4592 		/*
4593 		 * Backchannel watchdog:
4594 		 * look if DMA made any progress from the last time
4595 		 */
4596 		dmacnt = ECPP_DMA_GETCNT(pp);
4597 		if (dmacnt - pp->last_dmacnt == 0) {
4598 			/*
4599 			 * No progress - stop the transfer and send
4600 			 * whatever has been read so far up the stream
4601 			 */
4602 			ecpp_error(pp->dip, "ecp_read_timeout: no progress\n");
4603 			pp->xfer_tout++;
4604 			ecpp_ecp_read_completion(pp);
4605 		} else {
4606 			/*
4607 			 * Something was transferred - restart ourselves
4608 			 */
4609 			ecpp_error(pp->dip, "ecp_read_timeout: restarting\n");
4610 			pp->last_dmacnt = dmacnt;
4611 			pp->timeout_id = timeout(ecpp_ecp_read_timeout,
4612 			    (caddr_t)pp,
4613 			    drv_usectohz(pp->rev_watchdog * 1000));
4614 		}
4615 	}
4616 
4617 	mutex_exit(&pp->umutex);
4618 }
4619 
4620 /*
4621  * ECP backchannel read completion:
4622  * stop the DMA, free DMA resources and send read data upstream
4623  */
4624 static void
ecpp_ecp_read_completion(struct ecppunit * pp)4625 ecpp_ecp_read_completion(struct ecppunit *pp)
4626 {
4627 	size_t	xfer_len, unx_len;
4628 	mblk_t	*mp;
4629 
4630 	ASSERT(mutex_owned(&pp->umutex));
4631 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4632 	    pp->current_phase == ECPP_PHASE_ECP_REV_XFER);
4633 	ASSERT(pp->msg != NULL);
4634 
4635 	/*
4636 	 * Stop the transfer and unbind DMA handle
4637 	 */
4638 	if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) {
4639 		unx_len = pp->resid;
4640 		ecpp_error(pp->dip, "ecp_read_completion: failed dma_stop\n");
4641 	}
4642 
4643 	mp = pp->msg;
4644 	xfer_len = pp->resid - unx_len;	/* how much data was transferred */
4645 
4646 	if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
4647 		ecpp_error(pp->dip, "ecp_read_completion: unbind failed.\n");
4648 	}
4649 
4650 	ecpp_error(pp->dip, "ecp_read_completion: xfered %d bytes of %d\n",
4651 	    xfer_len, pp->resid);
4652 
4653 	/* clean up and update statistics */
4654 	pp->msg = NULL;
4655 	pp->resid -= xfer_len;
4656 	pp->ibytes[pp->current_mode] += xfer_len;
4657 	pp->e_busy = ECPP_IDLE;
4658 	pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
4659 
4660 	/*
4661 	 * Send the read data up the stream
4662 	 */
4663 	mp->b_wptr += xfer_len;
4664 	if (canputnext(pp->readq)) {
4665 		mutex_exit(&pp->umutex);
4666 		putnext(pp->readq, mp);
4667 		mutex_enter(&pp->umutex);
4668 	} else {
4669 		ecpp_error(pp->dip, "ecp_read_completion: fail canputnext\n");
4670 		if (!putq(pp->readq, mp)) {
4671 			freemsg(mp);
4672 		}
4673 	}
4674 
4675 	/* if bytes left in the FIFO another transfer is needed */
4676 	if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) {
4677 		(void) ecpp_backchan_req(pp);
4678 	}
4679 
4680 	qenable(pp->writeq);
4681 }
4682 
4683 /*
4684  * Read one byte in the Nibble mode
4685  */
4686 static uchar_t
nibble_peripheral2host(struct ecppunit * pp,uint8_t * byte)4687 nibble_peripheral2host(struct ecppunit *pp, uint8_t *byte)
4688 {
4689 	uint8_t	n[2];	/* two nibbles */
4690 	int	i;
4691 
4692 	/*
4693 	 * One byte is made of two nibbles
4694 	 */
4695 	for (i = 0; i < 2; i++) {
4696 		/* Event 7, 12: host asserts nAutoFd to move to read a nibble */
4697 		DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
4698 
4699 		/* Event 8: peripheral puts data on the status lines */
4700 
4701 		/* Event 9: peripheral asserts nAck, data available */
4702 		if (wait_dsr(pp, ECPP_nACK, 0, 35000) < 0) {
4703 			ecpp_error(pp->dip,
4704 			    "nibble_periph2host(%d): failed event 9 %x\n",
4705 			    i + 1, DSR_READ(pp));
4706 			(void) ecpp_1284_termination(pp);
4707 			return (FAILURE);
4708 		}
4709 
4710 		n[i] = DSR_READ(pp);	/* get a nibble */
4711 
4712 		/* Event 10: host deasserts nAutoFd to say it grabbed data */
4713 		DCR_WRITE(pp, ECPP_nINIT);
4714 
4715 		/* (2) Event 13: peripheral asserts PE - end of data phase */
4716 
4717 		/* Event 11: peripheral deasserts nAck to finish handshake */
4718 		if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
4719 			ecpp_error(pp->dip,
4720 			    "nibble_periph2host(%d): failed event 11 %x\n",
4721 			    i + 1, DSR_READ(pp));
4722 			(void) ecpp_1284_termination(pp);
4723 			return (FAILURE);
4724 		}
4725 	}
4726 
4727 	/* extract data byte from two nibbles - optimized formula */
4728 	*byte = ((((n[1] & ~ECPP_nACK) << 1) | (~n[1] & ECPP_nBUSY)) & 0xf0) |
4729 	    ((((n[0] & ~ECPP_nACK) >> 3) | ((~n[0] & ECPP_nBUSY) >> 4)) & 0x0f);
4730 
4731 	pp->ibytes[ECPP_NIBBLE_MODE]++;
4732 	return (SUCCESS);
4733 }
4734 
4735 /*
4736  * process data transfers requested by the peripheral
4737  */
4738 static uint_t
ecpp_peripheral2host(struct ecppunit * pp)4739 ecpp_peripheral2host(struct ecppunit *pp)
4740 {
4741 	if (!canputnext(pp->readq)) {
4742 		ecpp_error(pp->dip, "ecpp_peripheral2host: readq full\n");
4743 		return (SUCCESS);
4744 	}
4745 
4746 	switch (pp->backchannel) {
4747 	case ECPP_CENTRONICS:
4748 		/* no backchannel */
4749 		return (SUCCESS);
4750 
4751 	case ECPP_NIBBLE_MODE:
4752 		ASSERT(pp->current_mode == ECPP_NIBBLE_MODE);
4753 
4754 		/*
4755 		 * Event 20: Host sets nAutoFd high to ack request
4756 		 */
4757 		DCR_WRITE(pp, ECPP_nINIT);
4758 
4759 		/* Event 21: Periph sets PError low to ack host */
4760 		if (wait_dsr(pp, ECPP_PE, 0, 35000) < 0) {
4761 			ecpp_error(pp->dip,
4762 			    "ecpp_periph2host: failed event 21 %x\n",
4763 			    DSR_READ(pp));
4764 			(void) ecpp_1284_termination(pp);
4765 			return (FAILURE);
4766 		}
4767 
4768 		pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
4769 
4770 		/* this routine will read the data in Nibble mode */
4771 		return (ecpp_idle_phase(pp));
4772 
4773 	case ECPP_ECP_MODE:
4774 		if ((pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE) &&
4775 		    (ecp_forward2reverse(pp) == FAILURE)) {
4776 			return (FAILURE);
4777 		}
4778 
4779 		return (ecp_peripheral2host(pp));	/* start the transfer */
4780 
4781 	case ECPP_DIAG_MODE: {
4782 		mblk_t		*mp;
4783 		int		i;
4784 
4785 		if (ECR_READ(pp) & ECPP_FIFO_EMPTY) {
4786 			ecpp_error(pp->dip, "ecpp_periph2host: fifo empty\n");
4787 			return (SUCCESS);
4788 		}
4789 
4790 		/* allocate the FIFO size */
4791 		if ((mp = allocb(ECPP_FIFO_SZ, BPRI_MED)) == NULL) {
4792 			ecpp_error(pp->dip,
4793 			    "ecpp_periph2host: allocb FAILURE.\n");
4794 			return (FAILURE);
4795 		}
4796 
4797 		/*
4798 		 * For the time being just read it byte by byte
4799 		 */
4800 		i = ECPP_FIFO_SZ;
4801 		while (i-- && (!(ECR_READ(pp) & ECPP_FIFO_EMPTY))) {
4802 			*mp->b_wptr++ = TFIFO_READ(pp);
4803 			drv_usecwait(1); /* ECR is sometimes slow to update */
4804 		}
4805 
4806 		if (canputnext(pp->readq)) {
4807 			mutex_exit(&pp->umutex);
4808 			mp->b_datap->db_type = M_DATA;
4809 			ecpp_error(pp->dip,
4810 			    "ecpp_periph2host: sending %d bytes\n",
4811 			    mp->b_wptr - mp->b_rptr);
4812 			putnext(pp->readq, mp);
4813 			mutex_enter(&pp->umutex);
4814 			return (SUCCESS);
4815 		} else {
4816 			ecpp_error(pp->dip,
4817 			    "ecpp_periph2host: !canputnext data lost\n");
4818 			freemsg(mp);
4819 			return (FAILURE);
4820 		}
4821 	}
4822 
4823 	default:
4824 		ecpp_error(pp->dip, "ecpp_peripheraltohost: illegal back");
4825 		return (FAILURE);
4826 	}
4827 }
4828 
4829 /*
4830  * Negotiate from ECP Forward Idle to Reverse Idle Phase
4831  *
4832  * (manipulations with dcr/ecr are according to ECP Specification)
4833  */
4834 static int
ecp_forward2reverse(struct ecppunit * pp)4835 ecp_forward2reverse(struct ecppunit *pp)
4836 {
4837 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4838 	    pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE);
4839 
4840 	/* place port into PS2 mode */
4841 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4842 
4843 	/* set direction bit (DCR3-0 must be 0100 - National) */
4844 	DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT);
4845 
4846 	/* enable hardware assist */
4847 	ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4848 
4849 	drv_usecwait(1);	/* Tp(ecp) = 0.5us */
4850 
4851 	/* Event 39: host sets nInit low */
4852 	DCR_WRITE(pp, ECPP_REV_DIR);
4853 
4854 	/* Event 40: peripheral sets PError low */
4855 
4856 	pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
4857 
4858 	ecpp_error(pp->dip, "ecp_forward2reverse ok\n");
4859 
4860 	return (SUCCESS);
4861 }
4862 
4863 /*
4864  * Negotiate from ECP Reverse Idle to Forward Idle Phase
4865  *
4866  * (manipulations with dcr/ecr are according to ECP Specification)
4867  */
4868 static int
ecp_reverse2forward(struct ecppunit * pp)4869 ecp_reverse2forward(struct ecppunit *pp)
4870 {
4871 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4872 	    pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
4873 
4874 	/* Event 47: host deasserts nInit */
4875 	DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT);
4876 
4877 	/*
4878 	 * Event 48: peripheral deasserts nAck
4879 	 * Event 49: peripheral asserts PError
4880 	 */
4881 	if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) {
4882 		ecpp_error(pp->dip,
4883 		    "ecp_reverse2forward: failed event 49 %x\n", DSR_READ(pp));
4884 		(void) ecpp_1284_termination(pp);
4885 		return (FAILURE);
4886 	}
4887 
4888 	/* place port into PS2 mode */
4889 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4890 
4891 	/* clear direction bit */
4892 	DCR_WRITE(pp, ECPP_nINIT);
4893 
4894 	/* reenable hardware assist */
4895 	ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4896 
4897 	pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
4898 
4899 	ecpp_error(pp->dip, "ecp_reverse2forward ok\n");
4900 
4901 	return (SUCCESS);
4902 }
4903 
4904 /*
4905  * Default negotiation chooses the best mode supported by peripheral
4906  * Note that backchannel mode may be different from forward mode
4907  */
4908 static void
ecpp_default_negotiation(struct ecppunit * pp)4909 ecpp_default_negotiation(struct ecppunit *pp)
4910 {
4911 	if (!noecp && (ecpp_mode_negotiation(pp, ECPP_ECP_MODE) == SUCCESS)) {
4912 		/* 1284 compatible device */
4913 		pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO;
4914 		return;
4915 	} else if (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == SUCCESS) {
4916 		/* 1284 compatible device */
4917 		pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO;
4918 	} else {
4919 		/* Centronics device */
4920 		pp->io_mode =
4921 		    (pp->fast_centronics == TRUE) ? ECPP_DMA : ECPP_PIO;
4922 	}
4923 	ECPP_CONFIG_MODE(pp);
4924 }
4925 
4926 /*
4927  * Negotiate to the mode indicated by newmode
4928  */
4929 static int
ecpp_mode_negotiation(struct ecppunit * pp,uchar_t newmode)4930 ecpp_mode_negotiation(struct ecppunit *pp, uchar_t newmode)
4931 {
4932 	/* any other mode is impossible */
4933 	ASSERT(pp->current_mode == ECPP_CENTRONICS ||
4934 	    pp->current_mode == ECPP_COMPAT_MODE ||
4935 	    pp->current_mode == ECPP_NIBBLE_MODE ||
4936 	    pp->current_mode == ECPP_ECP_MODE ||
4937 	    pp->current_mode == ECPP_DIAG_MODE);
4938 
4939 	if (pp->current_mode == newmode) {
4940 		return (SUCCESS);
4941 	}
4942 
4943 	/* termination from ECP is only allowed from the Forward Idle Phase */
4944 	if ((pp->current_mode == ECPP_ECP_MODE) &&
4945 	    (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) {
4946 		/* this may break into Centronics */
4947 		(void) ecp_reverse2forward(pp);
4948 	}
4949 
4950 	switch (newmode) {
4951 	case ECPP_CENTRONICS:
4952 		(void) ecpp_1284_termination(pp);
4953 
4954 		/* put superio into PIO mode */
4955 		ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
4956 
4957 		pp->current_mode = ECPP_CENTRONICS;
4958 		pp->backchannel = ECPP_CENTRONICS;
4959 		ECPP_CONFIG_MODE(pp);
4960 
4961 		pp->to_mode[pp->current_mode]++;
4962 		return (SUCCESS);
4963 
4964 	case ECPP_COMPAT_MODE:
4965 		/* ECPP_COMPAT_MODE should support Nibble as a backchannel */
4966 		if (pp->current_mode == ECPP_NIBBLE_MODE) {
4967 			if (ecpp_1284_termination(pp) == SUCCESS) {
4968 				pp->current_mode = ECPP_COMPAT_MODE;
4969 				pp->backchannel = ECPP_NIBBLE_MODE;
4970 				ECPP_CONFIG_MODE(pp);
4971 				pp->to_mode[pp->current_mode]++;
4972 				return (SUCCESS);
4973 			} else {
4974 				return (FAILURE);
4975 			}
4976 		}
4977 
4978 		if ((nibble_negotiation(pp) == SUCCESS) &&
4979 		    (ecpp_1284_termination(pp) == SUCCESS)) {
4980 			pp->backchannel = ECPP_NIBBLE_MODE;
4981 			pp->current_mode = ECPP_COMPAT_MODE;
4982 			ECPP_CONFIG_MODE(pp);
4983 			pp->to_mode[pp->current_mode]++;
4984 			return (SUCCESS);
4985 		} else {
4986 			return (FAILURE);
4987 		}
4988 
4989 	case ECPP_NIBBLE_MODE:
4990 		if (nibble_negotiation(pp) == FAILURE) {
4991 			return (FAILURE);
4992 		}
4993 
4994 		pp->backchannel = ECPP_NIBBLE_MODE;
4995 		ECPP_CONFIG_MODE(pp);
4996 		pp->to_mode[pp->current_mode]++;
4997 
4998 		return (SUCCESS);
4999 
5000 	case ECPP_ECP_MODE:
5001 		if (pp->noecpregs)
5002 			return (FAILURE);
5003 		if (ecp_negotiation(pp) == FAILURE) {
5004 			return (FAILURE);
5005 		}
5006 
5007 		/*
5008 		 * National says CTR[3:0] should be 0100b before moving to 011
5009 		 */
5010 		DCR_WRITE(pp, ECPP_nINIT);
5011 
5012 		if (ecr_write(pp, ECR_mode_011 |
5013 		    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5014 			ecpp_error(pp->dip, "mode_nego:ECP: failed w/ecr\n");
5015 			return (FAILURE);
5016 		}
5017 
5018 		ECPP_CONFIG_MODE(pp);
5019 		pp->to_mode[pp->current_mode]++;
5020 
5021 		return (SUCCESS);
5022 
5023 	case ECPP_DIAG_MODE:
5024 		/*
5025 		 * In DIAG mode application can do nasty things(e.g drive pins)
5026 		 * To keep peripheral sane, terminate to Compatibility mode
5027 		 */
5028 		(void) ecpp_1284_termination(pp);
5029 
5030 		/* put superio into TFIFO mode */
5031 		if (ecr_write(pp, ECR_mode_001 |
5032 		    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5033 			ecpp_error(pp->dip, "put to TFIFO: failed w/ecr\n");
5034 			return (FAILURE);
5035 		}
5036 
5037 		pp->current_mode = ECPP_DIAG_MODE;
5038 		pp->backchannel = ECPP_DIAG_MODE;
5039 		ECPP_CONFIG_MODE(pp);
5040 		pp->to_mode[pp->current_mode]++;
5041 
5042 		return (SUCCESS);
5043 
5044 	default:
5045 		ecpp_error(pp->dip,
5046 		    "ecpp_mode_negotiation: mode %d not supported\n", newmode);
5047 		return (FAILURE);
5048 	}
5049 }
5050 
5051 /*
5052  * Standard (9.1): Peripheral data is available only when the host places
5053  * the interface in a mode capable of peripheral-to-host data transfer.
5054  * This requires the host periodically to place the interface in such a mode.
5055  * Polling can be eliminated by leaving the interface in an 1284 idle phase.
5056  */
5057 static uchar_t
ecpp_idle_phase(struct ecppunit * pp)5058 ecpp_idle_phase(struct ecppunit *pp)
5059 {
5060 	uchar_t		rval = FAILURE;
5061 
5062 	/*
5063 	 * If there is no space on the read queue, do not reverse channel
5064 	 */
5065 	if (!canputnext(pp->readq)) {
5066 		ecpp_error(pp->dip, "ecpp_idle_phase: readq full\n");
5067 		return (SUCCESS);
5068 	}
5069 
5070 	switch (pp->backchannel) {
5071 	case ECPP_CENTRONICS:
5072 	case ECPP_COMPAT_MODE:
5073 	case ECPP_DIAG_MODE:
5074 		/* nothing */
5075 		ecpp_error(pp->dip, "ecpp_idle_phase: compat idle\n");
5076 		return (SUCCESS);
5077 
5078 	case ECPP_NIBBLE_MODE:
5079 		/*
5080 		 * read as much data as possible, ending up in either
5081 		 * Reverse Idle or Host Busy Data Available phase
5082 		 */
5083 		ecpp_error(pp->dip, "ecpp_idle_phase: nibble backchannel\n");
5084 		if ((pp->current_mode != ECPP_NIBBLE_MODE) &&
5085 		    (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == FAILURE)) {
5086 			break;
5087 		}
5088 
5089 		rval = read_nibble_backchan(pp);
5090 
5091 		/* put interface into Reverse Idle phase */
5092 		if (pp->current_phase == ECPP_PHASE_NIBT_NAVAIL &&
5093 		    canputnext(pp->readq)) {
5094 			ecpp_error(pp->dip, "ecpp_idle_phase: going revidle\n");
5095 
5096 			/*
5097 			 * Event 7: host asserts nAutoFd
5098 			 * enable nAck interrupt to get a backchannel request
5099 			 */
5100 			DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_INTR_EN);
5101 
5102 			ECPP_UNMASK_INTR(pp);
5103 		}
5104 
5105 		break;
5106 
5107 	case ECPP_ECP_MODE:
5108 		/*
5109 		 * if data is already available, request the backchannel xfer
5110 		 * otherwise stay in Forward Idle and enable nErr interrupts
5111 		 */
5112 		ecpp_error(pp->dip, "ecpp_idle_phase: ECP forward\n");
5113 
5114 		ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE ||
5115 		    pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
5116 
5117 		/* put interface into Forward Idle phase */
5118 		if ((pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) &&
5119 		    (ecp_reverse2forward(pp) == FAILURE)) {
5120 			return (FAILURE);
5121 		}
5122 
5123 		/*
5124 		 * if data already available, put backchannel request on the wq
5125 		 * otherwise enable nErr interrupts
5126 		 */
5127 		if ((DSR_READ(pp) & ECPP_nERR) == 0) {
5128 			(void) ecpp_backchan_req(pp);
5129 		} else {
5130 			ECR_WRITE(pp,
5131 			    ECR_READ(pp) & ~ECPP_INTR_MASK | ECPP_INTR_SRV);
5132 
5133 			ECPP_UNMASK_INTR(pp);
5134 		}
5135 
5136 		return (SUCCESS);
5137 
5138 	default:
5139 		ecpp_error(pp->dip, "ecpp_idle_phase: illegal backchannel");
5140 	}
5141 
5142 	return (rval);
5143 }
5144 
5145 /*
5146  * This routine will leave the port in ECPP_PHASE_NIBT_REVIDLE
5147  * Due to flow control, though, it may stop at ECPP_PHASE_NIBT_AVAIL,
5148  * and continue later as the user consumes data from the read queue
5149  *
5150  * The current phase should be NIBT_AVAIL or NIBT_NAVAIL
5151  * If some events fail during transfer, termination puts link
5152  * to Compatibility mode and FAILURE is returned
5153  */
5154 static int
read_nibble_backchan(struct ecppunit * pp)5155 read_nibble_backchan(struct ecppunit *pp)
5156 {
5157 	mblk_t		*mp;
5158 	int		i;
5159 	int		rval = SUCCESS;
5160 
5161 	ASSERT(pp->current_mode == ECPP_NIBBLE_MODE);
5162 
5163 	pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE))
5164 	    ? ECPP_PHASE_NIBT_NAVAIL : ECPP_PHASE_NIBT_AVAIL;
5165 
5166 	ecpp_error(pp->dip, "read_nibble_backchan: %x\n", DSR_READ(pp));
5167 
5168 	/*
5169 	 * While data is available, read it in NIBBLE_REV_BLKSZ byte chunks
5170 	 * and send up the stream
5171 	 */
5172 	while (pp->current_phase == ECPP_PHASE_NIBT_AVAIL && rval == SUCCESS) {
5173 		/* see if there's space on the queue */
5174 		if (!canputnext(pp->readq)) {
5175 			ecpp_error(pp->dip,
5176 			    "read_nibble_backchan: canputnext failed\n");
5177 			return (SUCCESS);
5178 		}
5179 
5180 		if ((mp = allocb(NIBBLE_REV_BLKSZ, BPRI_MED)) == NULL) {
5181 			ecpp_error(pp->dip,
5182 			    "read_nibble_backchan: allocb failed\n");
5183 			return (SUCCESS);
5184 		}
5185 
5186 		/* read a chunk of data from the peripheral byte by byte */
5187 		i = NIBBLE_REV_BLKSZ;
5188 		while (i-- && !(DSR_READ(pp) & ECPP_nERR)) {
5189 			if (nibble_peripheral2host(pp, mp->b_wptr) != SUCCESS) {
5190 				rval = FAILURE;
5191 				break;
5192 			}
5193 			mp->b_wptr++;
5194 		}
5195 
5196 		pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE))
5197 		    ? ECPP_PHASE_NIBT_NAVAIL
5198 		    : ECPP_PHASE_NIBT_AVAIL;
5199 
5200 		if (mp->b_wptr - mp->b_rptr > 0) {
5201 			ecpp_error(pp->dip,
5202 			    "read_nibble_backchan: sending %d bytes\n",
5203 			    mp->b_wptr - mp->b_rptr);
5204 			pp->nread = 0;
5205 			mutex_exit(&pp->umutex);
5206 			putnext(pp->readq, mp);
5207 			mutex_enter(&pp->umutex);
5208 		} else {
5209 			freemsg(mp);
5210 		}
5211 	}
5212 
5213 	return (rval);
5214 }
5215 
5216 /*
5217  * 'Request Device ID using nibble mode' negotiation
5218  */
5219 static int
devidnib_negotiation(struct ecppunit * pp)5220 devidnib_negotiation(struct ecppunit *pp)
5221 {
5222 	uint8_t dsr;
5223 
5224 	if (ecpp_1284_negotiation(pp,
5225 	    ECPP_XREQ_NIBBLE | ECPP_XREQ_ID, &dsr) == FAILURE) {
5226 		return (FAILURE);
5227 	}
5228 
5229 	/*
5230 	 * If peripheral has data available, PE and nErr will
5231 	 * be set low at Event 5 & 6.
5232 	 */
5233 	if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) {
5234 		pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
5235 	} else {
5236 		pp->current_phase = ECPP_PHASE_NIBT_NAVAIL;
5237 	}
5238 
5239 	ecpp_error(pp->dip, "ecpp_devidnib_nego: current_phase=%x\n",
5240 	    pp->current_phase);
5241 
5242 	/* successful negotiation into Nibble mode */
5243 	pp->current_mode = ECPP_NIBBLE_MODE;
5244 	pp->backchannel = ECPP_NIBBLE_MODE;
5245 
5246 	ecpp_error(pp->dip, "ecpp_devidnib_nego: ok\n");
5247 
5248 	return (SUCCESS);
5249 }
5250 
5251 /*
5252  * Read 1284 device ID sequence
5253  *
5254  * This function should be called two times:
5255  * 1) ecpp_getdevid(pp, NULL, &len) - to retrieve ID length;
5256  * 2) ecpp_getdevid(pp, buffer, &len) - to read len bytes into buffer
5257  *
5258  * After 2) port is in Compatible mode
5259  * If the caller fails to make second call, it must reset port to Centronics
5260  *
5261  */
5262 static int
ecpp_getdevid(struct ecppunit * pp,uint8_t * id,int * lenp,int mode)5263 ecpp_getdevid(struct ecppunit *pp, uint8_t *id, int *lenp, int mode)
5264 {
5265 	uint8_t lenhi, lenlo;
5266 	uint8_t dsr;
5267 	int i;
5268 
5269 	switch (mode) {
5270 	case ECPP_NIBBLE_MODE:
5271 		/* negotiate only if neccessary */
5272 		if ((pp->current_mode != mode) || (id == NULL)) {
5273 			if (devidnib_negotiation(pp) == FAILURE) {
5274 				return (EIO);
5275 			}
5276 		}
5277 
5278 		if (pp->current_phase != ECPP_PHASE_NIBT_AVAIL) {
5279 			return (EIO);
5280 		}
5281 
5282 		/*
5283 		 * Event 14: Host tristates data bus, peripheral
5284 		 * asserts nERR if data available, usually the
5285 		 * status bits (7-0) and requires two reads since
5286 		 * only nibbles are transfered.
5287 		 */
5288 		dsr = DSR_READ(pp);
5289 
5290 		if (id == NULL) {
5291 			/*
5292 			 * first two bytes are the length of the sequence
5293 			 * (incl. these bytes)
5294 			 * first byte is MSB
5295 			 */
5296 			if ((dsr & ECPP_nERR) ||
5297 			    (nibble_peripheral2host(pp, &lenhi) == FAILURE) ||
5298 			    (dsr & ECPP_nERR) ||
5299 			    (nibble_peripheral2host(pp, &lenlo) == FAILURE)) {
5300 				ecpp_error(pp->dip,
5301 				    "ecpp_getdevid: id length read error\n");
5302 				return (EIO);
5303 			}
5304 
5305 			*lenp = (lenhi << 8) | (lenlo);
5306 
5307 			ecpp_error(pp->dip,
5308 			    "ecpp_getdevid: id length = %d\n", *lenp);
5309 
5310 			if (*lenp < 2) {
5311 				return (EIO);
5312 			}
5313 		} else {
5314 			/*
5315 			 * read the rest of the data
5316 			 */
5317 			i = *lenp;
5318 			while (i && ((dsr & ECPP_nERR) == 0)) {
5319 				if (nibble_peripheral2host(pp, id++) == FAILURE)
5320 					break;
5321 
5322 				i--;
5323 				dsr = DSR_READ(pp);
5324 			}
5325 			ecpp_error(pp->dip,
5326 			    "ecpp_getdevid: read %d bytes\n", *lenp - i);
5327 
5328 			/*
5329 			 * 1284: After receiving the sequence, the host is
5330 			 * required to return the link to the Compatibility mode
5331 			 */
5332 			(void) ecpp_1284_termination(pp);
5333 		}
5334 
5335 		break;
5336 
5337 	/* Other modes are not yet supported */
5338 	default:
5339 		return (EINVAL);
5340 	}
5341 
5342 	return (0);
5343 }
5344 
5345 /*
5346  * Various hardware support
5347  *
5348  * First define some stubs for functions that do nothing
5349  */
5350 
5351 /*ARGSUSED*/
5352 static void
empty_config_mode(struct ecppunit * pp)5353 empty_config_mode(struct ecppunit *pp)
5354 {
5355 }
5356 
5357 /*ARGSUSED*/
5358 static void
empty_mask_intr(struct ecppunit * pp)5359 empty_mask_intr(struct ecppunit *pp)
5360 {
5361 }
5362 
5363 #if defined(__x86)
5364 static size_t
x86_getcnt(struct ecppunit * pp)5365 x86_getcnt(struct ecppunit *pp)
5366 {
5367 	int count;
5368 
5369 	(void) ddi_dmae_getcnt(pp->dip, pp->uh.x86.chn, &count);
5370 	return (count);
5371 }
5372 #endif
5373 
5374 /*
5375  *
5376  * National PC87332 and PC97317 SuperIOs support routines
5377  * These chips are used in PCI-based Darwin, Quark, Quasar, Excalibur
5378  * and use EBus DMA facilities (Cheerio or RIO)
5379  *
5380  */
5381 
5382 static int
pc87332_map_regs(struct ecppunit * pp)5383 pc87332_map_regs(struct ecppunit *pp)
5384 {
5385 	if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.ebus.c_reg, 0,
5386 	    sizeof (struct config_reg), &acc_attr,
5387 	    &pp->uh.ebus.c_handle) != DDI_SUCCESS) {
5388 		ecpp_error(pp->dip, "pc87332_map_regs: failed c_reg\n");
5389 		goto fail;
5390 	}
5391 
5392 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
5393 	    sizeof (struct info_reg), &acc_attr, &pp->i_handle)
5394 	    != DDI_SUCCESS) {
5395 		ecpp_error(pp->dip, "pc87332_map_regs: failed i_reg\n");
5396 		goto fail;
5397 	}
5398 
5399 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400,
5400 	    sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
5401 	    != DDI_SUCCESS) {
5402 		ecpp_error(pp->dip, "pc87332_map_regs: failed f_reg\n");
5403 		goto fail;
5404 	}
5405 
5406 	if (ddi_regs_map_setup(pp->dip, 2, (caddr_t *)&pp->uh.ebus.dmac, 0,
5407 	    sizeof (struct cheerio_dma_reg), &acc_attr,
5408 	    &pp->uh.ebus.d_handle) != DDI_SUCCESS) {
5409 		ecpp_error(pp->dip, "pc87332_map_regs: failed dmac\n");
5410 		goto fail;
5411 	}
5412 
5413 	return (SUCCESS);
5414 
5415 fail:
5416 	pc87332_unmap_regs(pp);
5417 	return (FAILURE);
5418 }
5419 
5420 static void
pc87332_unmap_regs(struct ecppunit * pp)5421 pc87332_unmap_regs(struct ecppunit *pp)
5422 {
5423 	if (pp->uh.ebus.c_handle) {
5424 		ddi_regs_map_free(&pp->uh.ebus.c_handle);
5425 	}
5426 	if (pp->uh.ebus.d_handle) {
5427 		ddi_regs_map_free(&pp->uh.ebus.d_handle);
5428 	}
5429 	if (pp->i_handle) {
5430 		ddi_regs_map_free(&pp->i_handle);
5431 	}
5432 	if (pp->f_handle) {
5433 		ddi_regs_map_free(&pp->f_handle);
5434 	}
5435 }
5436 
5437 static uint8_t
pc87332_read_config_reg(struct ecppunit * pp,uint8_t reg_num)5438 pc87332_read_config_reg(struct ecppunit *pp, uint8_t reg_num)
5439 {
5440 	uint8_t retval;
5441 
5442 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num);
5443 	retval = PP_GETB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data);
5444 
5445 	return (retval);
5446 }
5447 
5448 static void
pc87332_write_config_reg(struct ecppunit * pp,uint8_t reg_num,uint8_t val)5449 pc87332_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val)
5450 {
5451 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num);
5452 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val);
5453 
5454 	/*
5455 	 * second write to this register is needed.  the register behaves as
5456 	 * a fifo.  the first value written goes to the data register.  the
5457 	 * second write pushes the initial value to the register indexed.
5458 	 */
5459 
5460 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val);
5461 }
5462 
5463 static int
pc87332_config_chip(struct ecppunit * pp)5464 pc87332_config_chip(struct ecppunit *pp)
5465 {
5466 	uint8_t pmc, fcr;
5467 
5468 	pp->current_phase = ECPP_PHASE_INIT;
5469 
5470 	/* ECP DMA configuration bit (PMC4) must be set */
5471 	pmc = pc87332_read_config_reg(pp, PMC);
5472 	if (!(pmc & PC87332_PMC_ECP_DMA_CONFIG)) {
5473 		pc87332_write_config_reg(pp, PMC,
5474 		    pmc | PC87332_PMC_ECP_DMA_CONFIG);
5475 	}
5476 
5477 	/*
5478 	 * The Parallel Port Multiplexor pins must be driven.
5479 	 * Check to see if FCR3 is zero, if not clear FCR3.
5480 	 */
5481 	fcr = pc87332_read_config_reg(pp, FCR);
5482 	if (fcr & PC87332_FCR_PPM_FLOAT_CTL) {
5483 		pc87332_write_config_reg(pp, FCR,
5484 		    fcr & ~PC87332_FCR_PPM_FLOAT_CTL);
5485 	}
5486 
5487 	/*
5488 	 * clear bits 3-0 in CTR (aka DCR) prior to enabling ECP mode
5489 	 * CTR5 can not be cleared in SPP mode, CTR5 will return 1.
5490 	 * "FAILURE" in this case is ok.  Better to use dcr_write()
5491 	 * to ensure reliable writing to DCR.
5492 	 */
5493 	if (dcr_write(pp, ECPP_DCR_SET | ECPP_nINIT) == FAILURE) {
5494 		ecpp_error(pp->dip, "ecpp_config_87332: DCR config\n");
5495 	}
5496 
5497 	/* enable ECP mode, level intr (note that DCR bits 3-0 == 0x0) */
5498 	pc87332_write_config_reg(pp, PCR,
5499 	    PC87332_PCR_INTR_LEVL | PC87332_PCR_ECP_EN);
5500 
5501 	/* put SuperIO in initial state */
5502 	if (ecr_write(pp, ECR_mode_001 |
5503 	    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5504 		ecpp_error(pp->dip, "ecpp_config_87332: ECR\n");
5505 	}
5506 
5507 	if (dcr_write(pp, ECPP_DCR_SET | ECPP_SLCTIN | ECPP_nINIT) == FAILURE) {
5508 		ecpp_error(pp->dip, "ecpp_config_87332: w/DCR failed2.\n");
5509 		return (FAILURE);
5510 
5511 	}
5512 	/* we are in centronic mode */
5513 	pp->current_mode = ECPP_CENTRONICS;
5514 
5515 	/* in compatible mode with no data transfer in progress */
5516 	pp->current_phase = ECPP_PHASE_C_IDLE;
5517 
5518 	return (SUCCESS);
5519 }
5520 
5521 /*
5522  * A new mode was set, do some mode specific reconfiguration
5523  * in this case - set interrupt characteristic
5524  */
5525 static void
pc87332_config_mode(struct ecppunit * pp)5526 pc87332_config_mode(struct ecppunit *pp)
5527 {
5528 	if (COMPAT_PIO(pp)) {
5529 		pc87332_write_config_reg(pp, PCR, 0x04);
5530 	} else {
5531 		pc87332_write_config_reg(pp, PCR, 0x14);
5532 	}
5533 }
5534 
5535 static int
pc97317_map_regs(struct ecppunit * pp)5536 pc97317_map_regs(struct ecppunit *pp)
5537 {
5538 	if (pc87332_map_regs(pp) != SUCCESS) {
5539 		return (FAILURE);
5540 	}
5541 
5542 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->uh.ebus.c2_reg,
5543 	    0x403, sizeof (struct config2_reg), &acc_attr,
5544 	    &pp->uh.ebus.c2_handle) != DDI_SUCCESS) {
5545 		ecpp_error(pp->dip, "pc97317_map_regs: failed c2_reg\n");
5546 		pc87332_unmap_regs(pp);
5547 		return (FAILURE);
5548 	} else {
5549 		return (SUCCESS);
5550 	}
5551 }
5552 
5553 static void
pc97317_unmap_regs(struct ecppunit * pp)5554 pc97317_unmap_regs(struct ecppunit *pp)
5555 {
5556 	if (pp->uh.ebus.c2_handle) {
5557 		ddi_regs_map_free(&pp->uh.ebus.c2_handle);
5558 	}
5559 
5560 	pc87332_unmap_regs(pp);
5561 }
5562 
5563 /*
5564  * OBP should configure the PC97317 such that it does not need further
5565  * configuration.  Upon sustaining, it may be necessary to examine
5566  * or change the configuration registers.  This routine is left in
5567  * the file for that purpose.
5568  */
5569 static int
pc97317_config_chip(struct ecppunit * pp)5570 pc97317_config_chip(struct ecppunit *pp)
5571 {
5572 	uint8_t conreg;
5573 
5574 	/* set the logical device name */
5575 	pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4);
5576 
5577 	/* SPP Compatibility */
5578 	PP_PUTB(pp->uh.ebus.c2_handle,
5579 	    &pp->uh.ebus.c2_reg->eir, PC97317_CONFIG2_CONTROL2);
5580 	PP_PUTB(pp->uh.ebus.c2_handle, &pp->uh.ebus.c2_reg->edr, 0x80);
5581 
5582 	/* low interrupt polarity */
5583 	pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00);
5584 
5585 	/* ECP mode */
5586 	pc87332_write_config_reg(pp, PC97317_CONFIG_PP_CONFIG, 0xf2);
5587 
5588 	if (dcr_write(pp, ECPP_SLCTIN | ECPP_nINIT) == FAILURE) {
5589 		ecpp_error(pp->dip, "pc97317_config_chip: failed w/DCR\n");
5590 	}
5591 
5592 	if (ecr_write(pp, ECR_mode_001 |
5593 	    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5594 		ecpp_error(pp->dip, "pc97317_config_chip: failed w/ECR\n");
5595 	}
5596 
5597 #ifdef DEBUG
5598 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DEV_NO);
5599 	ecpp_error(pp->dip, "97317:conreg7(logical dev)=%x\n", conreg);
5600 
5601 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_MSB);
5602 	ecpp_error(pp->dip, "97317:conreg60(addrHi)=%x\n", conreg);
5603 
5604 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_LSB);
5605 	ecpp_error(pp->dip, "97317:conreg61(addrLo)=%x\n", conreg);
5606 
5607 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_SEL);
5608 	ecpp_error(pp->dip, "97317:conreg70(IRQL)=%x\n", conreg);
5609 
5610 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_TYPE);
5611 	ecpp_error(pp->dip, "97317:conreg71(intr type)=%x\n", conreg);
5612 
5613 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_ACTIVATE);
5614 	ecpp_error(pp->dip, "97317:conreg30(Active)=%x\n", conreg);
5615 
5616 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_IO_RANGE);
5617 	ecpp_error(pp->dip, "97317:conreg31(IO Range Check)=%x\n", conreg);
5618 
5619 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA0_CHAN);
5620 	ecpp_error(pp->dip, "97317:conreg74(DMA0 Chan)=%x\n", conreg);
5621 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA1_CHAN);
5622 	ecpp_error(pp->dip, "97317:conreg75(DMA1 Chan)=%x\n", conreg);
5623 
5624 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG);
5625 	ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg);
5626 
5627 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG);
5628 	ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg);
5629 #endif /* DEBUG */
5630 
5631 	return (SUCCESS);
5632 }
5633 
5634 /*
5635  * A new mode was set, do some mode specific reconfiguration
5636  * in this case - set interrupt polarity
5637  */
5638 static void
pc97317_config_mode(struct ecppunit * pp)5639 pc97317_config_mode(struct ecppunit *pp)
5640 {
5641 	/* set the logical device name */
5642 	pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4);
5643 
5644 	if (COMPAT_PIO(pp) || pp->current_mode == ECPP_NIBBLE_MODE) {
5645 		pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x02);
5646 	} else {
5647 		pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00);
5648 	}
5649 }
5650 
5651 static void
cheerio_mask_intr(struct ecppunit * pp)5652 cheerio_mask_intr(struct ecppunit *pp)
5653 {
5654 	/* mask Cheerio interrupts */
5655 	AND_SET_LONG_R(pp->uh.ebus.d_handle,
5656 	    &pp->uh.ebus.dmac->csr, ~DCSR_INT_EN);
5657 }
5658 
5659 static void
cheerio_unmask_intr(struct ecppunit * pp)5660 cheerio_unmask_intr(struct ecppunit *pp)
5661 {
5662 	/* unmask Cheerio interrupts */
5663 	OR_SET_LONG_R(pp->uh.ebus.d_handle,
5664 	    &pp->uh.ebus.dmac->csr, DCSR_INT_EN | DCSR_TCI_DIS);
5665 }
5666 
5667 static int
cheerio_dma_start(struct ecppunit * pp)5668 cheerio_dma_start(struct ecppunit *pp)
5669 {
5670 	cheerio_reset_dcsr(pp);
5671 	SET_DMAC_BCR(pp, pp->dma_cookie.dmac_size);
5672 	SET_DMAC_ACR(pp, pp->dma_cookie.dmac_address);
5673 
5674 	if (pp->dma_dir == DDI_DMA_READ) {
5675 		SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA |
5676 		    DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0 | DCSR_WRITE);
5677 	} else {
5678 		SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA |
5679 		    DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0);
5680 	}
5681 
5682 	return (SUCCESS);
5683 }
5684 
5685 /*
5686  * Note: BCR is reset to 0, so counter should always be read before dma_stop
5687  */
5688 static int
cheerio_dma_stop(struct ecppunit * pp,size_t * countp)5689 cheerio_dma_stop(struct ecppunit *pp, size_t *countp)
5690 {
5691 	uint8_t ecr;
5692 
5693 	/* disable DMA and byte counter */
5694 	AND_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr,
5695 	    ~(DCSR_EN_DMA | DCSR_EN_CNT| DCSR_INT_EN));
5696 
5697 	/* ACK and disable the TC interrupt */
5698 	OR_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr,
5699 	    DCSR_TC | DCSR_TCI_DIS);
5700 
5701 	/* read DMA count if requested */
5702 	if (countp) {
5703 		*countp = cheerio_getcnt(pp);
5704 	}
5705 
5706 	cheerio_reset_dcsr(pp);
5707 	SET_DMAC_BCR(pp, 0);
5708 
5709 	/* turn off SuperIO's DMA */
5710 	ecr = ECR_READ(pp);
5711 	if (ecr_write(pp, ecr & ~ECPP_DMA_ENABLE) == FAILURE) {
5712 		return (FAILURE);
5713 	}
5714 
5715 	/* Disable SuperIO interrupts and DMA */
5716 	ecr = ECR_READ(pp);
5717 
5718 	return (ecr_write(pp, ecr | ECPP_INTR_SRV));
5719 }
5720 
5721 static size_t
cheerio_getcnt(struct ecppunit * pp)5722 cheerio_getcnt(struct ecppunit *pp)
5723 {
5724 	return (GET_DMAC_BCR(pp));
5725 }
5726 
5727 /*
5728  * Reset the DCSR by first setting the RESET bit to 1.  Poll the
5729  * DCSR_CYC_PEND bit to make sure there are no more pending DMA cycles.
5730  * If there are no more pending cycles, clear the RESET bit.
5731  */
5732 static void
cheerio_reset_dcsr(struct ecppunit * pp)5733 cheerio_reset_dcsr(struct ecppunit *pp)
5734 {
5735 	int	timeout = DMAC_RESET_TIMEOUT;
5736 
5737 	SET_DMAC_CSR(pp, DCSR_RESET);
5738 
5739 	while (GET_DMAC_CSR(pp) & DCSR_CYC_PEND) {
5740 		if (timeout == 0) {
5741 			ecpp_error(pp->dip, "cheerio_reset_dcsr: timeout\n");
5742 			break;
5743 		} else {
5744 			drv_usecwait(1);
5745 			timeout--;
5746 		}
5747 	}
5748 
5749 	SET_DMAC_CSR(pp, 0);
5750 }
5751 
5752 /*
5753  *
5754  * Grover Southbridge (M1553) support routines
5755  * Southbridge contains an Intel 8237 DMAC onboard which is used
5756  * to transport data to/from PCI space to superio parallel port
5757  *
5758  */
5759 
5760 
5761 static int
m1553_map_regs(struct ecppunit * pp)5762 m1553_map_regs(struct ecppunit *pp)
5763 {
5764 	if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.m1553.isa_space,
5765 	    0, sizeof (struct isaspace), &acc_attr,
5766 	    &pp->uh.m1553.d_handle) != DDI_SUCCESS) {
5767 		ecpp_error(pp->dip, "m1553_map_regs: failed isa space\n");
5768 		goto fail;
5769 	}
5770 
5771 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
5772 	    sizeof (struct info_reg), &acc_attr, &pp->i_handle)
5773 	    != DDI_SUCCESS) {
5774 		ecpp_error(pp->dip, "m1553_map_regs: failed i_reg\n");
5775 		goto fail;
5776 	}
5777 
5778 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400,
5779 	    sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
5780 	    != DDI_SUCCESS) {
5781 		ecpp_error(pp->dip, "m1553_map_regs: failed f_reg\n");
5782 		goto fail;
5783 	}
5784 
5785 	return (SUCCESS);
5786 
5787 fail:
5788 	m1553_unmap_regs(pp);
5789 	return (FAILURE);
5790 }
5791 
5792 static void
m1553_unmap_regs(struct ecppunit * pp)5793 m1553_unmap_regs(struct ecppunit *pp)
5794 {
5795 	if (pp->uh.m1553.d_handle) {
5796 		ddi_regs_map_free(&pp->uh.m1553.d_handle);
5797 	}
5798 	if (pp->i_handle) {
5799 		ddi_regs_map_free(&pp->i_handle);
5800 	}
5801 	if (pp->f_handle) {
5802 		ddi_regs_map_free(&pp->f_handle);
5803 	}
5804 }
5805 
5806 #if defined(__x86)
5807 static int
x86_map_regs(struct ecppunit * pp)5808 x86_map_regs(struct ecppunit *pp)
5809 {
5810 	int nregs = 0;
5811 
5812 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
5813 	    sizeof (struct info_reg), &acc_attr, &pp->i_handle)
5814 	    != DDI_SUCCESS) {
5815 		ecpp_error(pp->dip, "x86_map_regs: failed i_reg\n");
5816 		goto fail;
5817 	}
5818 	if (ddi_dev_nregs(pp->dip, &nregs) == DDI_SUCCESS && nregs == 2) {
5819 		if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->f_reg, 0,
5820 		    sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
5821 		    != DDI_SUCCESS) {
5822 			ecpp_error(pp->dip, "x86_map_regs: failed f_reg\n");
5823 			goto fail;
5824 		} else
5825 			pp->noecpregs = FALSE;
5826 	} else {
5827 		pp->noecpregs = TRUE;
5828 	}
5829 	return (SUCCESS);
5830 fail:
5831 	x86_unmap_regs(pp);
5832 	return (FAILURE);
5833 }
5834 
5835 static void
x86_unmap_regs(struct ecppunit * pp)5836 x86_unmap_regs(struct ecppunit *pp)
5837 {
5838 	if (pp->i_handle) {
5839 		ddi_regs_map_free(&pp->i_handle);
5840 	}
5841 	if (pp->f_handle) {
5842 		ddi_regs_map_free(&pp->f_handle);
5843 	}
5844 }
5845 #endif
5846 
5847 static uint8_t
m1553_read_config_reg(struct ecppunit * pp,uint8_t reg_num)5848 m1553_read_config_reg(struct ecppunit *pp, uint8_t reg_num)
5849 {
5850 	uint8_t retval;
5851 
5852 	dma8237_write(pp, 0x3F0, reg_num);
5853 	retval = dma8237_read(pp, 0x3F1);
5854 
5855 	return (retval);
5856 }
5857 
5858 static void
m1553_write_config_reg(struct ecppunit * pp,uint8_t reg_num,uint8_t val)5859 m1553_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val)
5860 {
5861 	dma8237_write(pp, 0x3F0, reg_num);
5862 	dma8237_write(pp, 0x3F1, val);
5863 }
5864 
5865 static int
m1553_config_chip(struct ecppunit * pp)5866 m1553_config_chip(struct ecppunit *pp)
5867 {
5868 	uint8_t conreg;
5869 
5870 	/* Unlock configuration regs with "key sequence" */
5871 	dma8237_write(pp, 0x3F0, 0x51);
5872 	dma8237_write(pp, 0x3F0, 0x23);
5873 
5874 	m1553_write_config_reg(pp, PnP_CONFIG_DEV_NO, 0x3);
5875 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_DEV_NO);
5876 	ecpp_error(pp->dip, "M1553:conreg7(logical dev)=%x\n", conreg);
5877 
5878 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_ACTIVATE);
5879 	ecpp_error(pp->dip, "M1553:conreg30(Active)=%x\n", conreg);
5880 
5881 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_MSB);
5882 	ecpp_error(pp->dip, "M1553:conreg60(addrHi)=%x\n", conreg);
5883 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_LSB);
5884 	ecpp_error(pp->dip, "M1553:conreg61(addrLo)=%x\n", conreg);
5885 
5886 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_INTR_SEL);
5887 	ecpp_error(pp->dip, "M1553:conreg70(IRQL)=%x\n", conreg);
5888 
5889 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_DMA0_CHAN);
5890 	ecpp_error(pp->dip, "M1553:conreg74(DMA0 Chan)=%x\n", conreg);
5891 
5892 	/* set FIFO threshold 1 and ECP mode, preserve bit 7 (IRQ polarity) */
5893 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0);
5894 	conreg = (conreg & ~0x7F) | 0x0A;
5895 	m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG0, conreg);
5896 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0);
5897 	ecpp_error(pp->dip, "M1553:conregFO(pport conf)=%x\n", conreg);
5898 
5899 	m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG1, 0x04);
5900 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG1);
5901 	ecpp_error(pp->dip, "M1553:conregF1(outconf)=%x\n", conreg);
5902 
5903 	/* lock configuration regs with key */
5904 	dma8237_write(pp, 0x3F0, 0xBB);
5905 
5906 	/* Set ECR, DCR in known state */
5907 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
5908 	DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
5909 
5910 	ecpp_error(pp->dip, "m1553_config_chip: ecr=%x, dsr=%x, dcr=%x\n",
5911 	    ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
5912 
5913 	return (SUCCESS);
5914 }
5915 
5916 #if defined(__x86)
5917 static int
x86_config_chip(struct ecppunit * pp)5918 x86_config_chip(struct ecppunit *pp)
5919 {
5920 	if (ecr_write(pp, ECR_mode_001 |
5921 	    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5922 		ecpp_error(pp->dip, "config chip: failed w/ecr\n");
5923 		pp->noecpregs = TRUE;
5924 	}
5925 	if (pp->noecpregs)
5926 		pp->fast_compat = FALSE;
5927 	DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
5928 	ecpp_error(pp->dip, "x86_config_chip: ecr=%x, dsr=%x, dcr=%x\n",
5929 	    ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
5930 	return (SUCCESS);
5931 }
5932 #endif
5933 
5934 /*
5935  * dma8237_dma_start() programs the selected 8 bit channel
5936  * of DMAC1 with the dma cookie.  pp->dma_cookie must
5937  * be set before this routine is called.
5938  */
5939 static int
dma8237_dma_start(struct ecppunit * pp)5940 dma8237_dma_start(struct ecppunit *pp)
5941 {
5942 	uint8_t chn;
5943 
5944 	chn = pp->uh.m1553.chn;
5945 
5946 	ASSERT(chn <= DMAE_CH3 &&
5947 	    pp->dma_cookie.dmac_size != 0 &&
5948 	    pp->dma_cookie.dmac_address != 0);
5949 
5950 	/* At this point Southbridge has not yet asserted DREQ */
5951 
5952 	/* set mode to read-from-memory. */
5953 	dma8237_write(pp, DMAC2_MODE, DMAMODE_CASC);
5954 	if (pp->dma_dir == DDI_DMA_READ) {
5955 		dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE |
5956 		    DMAMODE_READ | chn);
5957 	} else {
5958 		dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE |
5959 		    DMAMODE_WRITE | chn);
5960 	}
5961 
5962 	dma8237_write_addr(pp, pp->dma_cookie.dmac_address);
5963 	dma8237_write_count(pp, pp->dma_cookie.dmac_size - 1);
5964 
5965 	/*
5966 	 * M1553 chip does not permit to access DMA register banks
5967 	 * while DMA is in flight. As a result, ecpp and floppy drivers
5968 	 * can potentially corrupt each other's DMA. The interlocking mechanism
5969 	 * is provided by a parent nexus driver (isadma), which is enabled
5970 	 * indirectly through a DMAC1_ALLMASK register access:
5971 	 *
5972 	 * writing a non-zero value to this register enters a lock,
5973 	 * writing zero releases the lock.
5974 	 *
5975 	 * DMA transfer must only occur after entering a lock.
5976 	 * If the lock is already owned by other driver, we will block.
5977 	 *
5978 	 * The following operation unmasks our channel and masks all others
5979 	 */
5980 	dma8237_write(pp, DMAC1_ALLMASK, ~(1 << chn));
5981 	pp->uh.m1553.isadma_entered = 1;
5982 
5983 	return (SUCCESS);
5984 }
5985 
5986 static int
dma8237_dma_stop(struct ecppunit * pp,size_t * countp)5987 dma8237_dma_stop(struct ecppunit *pp, size_t *countp)
5988 {
5989 	uint8_t ecr;
5990 
5991 	/* stop DMA */
5992 	ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV;
5993 	(void) ecr_write(pp, ecr);
5994 
5995 	if (pp->uh.m1553.isadma_entered) {
5996 		/* reset the channel mask so we can issue PIO's to our device */
5997 		dma8237_write(pp, DMAC1_ALLMASK, 0);
5998 		pp->uh.m1553.isadma_entered = 0;
5999 
6000 	}
6001 
6002 	/* read DMA count if requested */
6003 	if (countp) {
6004 		*countp = dma8237_getcnt(pp);
6005 		if (pp->dma_dir == DDI_DMA_READ && *countp > 0) {
6006 			(*countp)++;	/* need correction for reverse xfers */
6007 		}
6008 	}
6009 	return (SUCCESS);
6010 }
6011 #if defined(__x86)
6012 static int
x86_dma_start(struct ecppunit * pp)6013 x86_dma_start(struct ecppunit *pp)
6014 {
6015 	uint8_t chn;
6016 	struct ddi_dmae_req dmaereq;
6017 
6018 	chn = pp->uh.x86.chn;
6019 	ASSERT(chn <= DMAE_CH3 &&
6020 	    pp->dma_cookie.dmac_size != 0 &&
6021 	    pp->dma_cookie.dmac_address != 0);
6022 	bzero(&dmaereq, sizeof (struct ddi_dmae_req));
6023 	dmaereq.der_command =
6024 	    (pp->dma_dir & DDI_DMA_READ) ? DMAE_CMD_READ : DMAE_CMD_WRITE;
6025 	if (ddi_dmae_prog(pp->dip, &dmaereq, &pp->dma_cookie, chn)
6026 	    != DDI_SUCCESS)
6027 		ecpp_error(pp->dip, "prog failed !!!\n");
6028 	ecpp_error(pp->dip, "dma_started..\n");
6029 	return (SUCCESS);
6030 }
6031 
6032 static int
x86_dma_stop(struct ecppunit * pp,size_t * countp)6033 x86_dma_stop(struct ecppunit *pp, size_t *countp)
6034 {
6035 	uint8_t ecr;
6036 
6037 	/* stop DMA */
6038 	if (pp->uh.x86.chn == 0xff)
6039 		return (FAILURE);
6040 	ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV;
6041 	(void) ecr_write(pp, ecr);
6042 	ecpp_error(pp->dip, "dma_stop\n");
6043 
6044 	/* read DMA count if requested */
6045 	if (countp) {
6046 		*countp = x86_getcnt(pp);
6047 	}
6048 	ecpp_error(pp->dip, "dma_stoped..\n");
6049 	return (SUCCESS);
6050 }
6051 #endif
6052 
6053 /* channel must be masked */
6054 static void
dma8237_write_addr(struct ecppunit * pp,uint32_t addr)6055 dma8237_write_addr(struct ecppunit *pp, uint32_t addr)
6056 {
6057 	uint8_t c_addr, c_lpage;
6058 	uint16_t c_hpage, *p;
6059 
6060 	switch (pp->uh.m1553.chn) {
6061 	case DMAE_CH0:
6062 		c_addr = DMA_0ADR;
6063 		c_lpage = DMA_0PAGE;
6064 		c_hpage = DMA_0HPG;
6065 		break;
6066 
6067 	case DMAE_CH1:
6068 		c_addr = DMA_1ADR;
6069 		c_lpage = DMA_1PAGE;
6070 		c_hpage = DMA_1HPG;
6071 		break;
6072 
6073 	case DMAE_CH2:
6074 		c_addr = DMA_2ADR;
6075 		c_lpage = DMA_2PAGE;
6076 		c_hpage = DMA_2HPG;
6077 		break;
6078 
6079 	case DMAE_CH3:
6080 		c_addr = DMA_3ADR;
6081 		c_lpage = DMA_3PAGE;
6082 		c_hpage = DMA_3HPG;
6083 		break;
6084 
6085 	default:
6086 		return;
6087 	}
6088 
6089 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr];
6090 	ddi_put16(pp->uh.m1553.d_handle, p, addr & 0xFFFF);
6091 
6092 	dma8237_write(pp, c_lpage, (addr & 0xFF0000) >> 16);
6093 	dma8237_write(pp, c_hpage, (addr & 0xFF000000) >> 24);
6094 
6095 }
6096 
6097 /*
6098  * This function may be useful during debugging,
6099  * so we leave it in, but do not include in the binary
6100  */
6101 #ifdef INCLUDE_DMA8237_READ_ADDR
6102 static uint32_t
dma8237_read_addr(struct ecppunit * pp)6103 dma8237_read_addr(struct ecppunit *pp)
6104 {
6105 	uint8_t rval3, rval4;
6106 	uint16_t rval16;
6107 	uint32_t rval;
6108 	uint8_t c_addr, c_lpage;
6109 	uint16_t c_hpage, *p;
6110 
6111 	switch (pp->uh.m1553.chn) {
6112 	case DMAE_CH0:
6113 		c_addr = DMA_0ADR;
6114 		c_lpage = DMA_0PAGE;
6115 		c_hpage = DMA_0HPG;
6116 		break;
6117 
6118 	case DMAE_CH1:
6119 		c_addr = DMA_1ADR;
6120 		c_lpage = DMA_1PAGE;
6121 		c_hpage = DMA_1HPG;
6122 		break;
6123 
6124 	case DMAE_CH2:
6125 		c_addr = DMA_2ADR;
6126 		c_lpage = DMA_2PAGE;
6127 		c_hpage = DMA_2HPG;
6128 		break;
6129 
6130 	case DMAE_CH3:
6131 		c_addr = DMA_3ADR;
6132 		c_lpage = DMA_3PAGE;
6133 		c_hpage = DMA_3HPG;
6134 		break;
6135 
6136 	default:
6137 		return (NULL);
6138 	}
6139 
6140 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr];
6141 	rval16 = ddi_get16(pp->uh.m1553.d_handle, p);
6142 
6143 	rval3 = dma8237_read(pp, c_lpage);
6144 	rval4 = dma8237_read(pp, c_hpage);
6145 
6146 	rval = rval16 | (rval3 << 16) | (rval4 <<24);
6147 
6148 	return (rval);
6149 }
6150 #endif
6151 
6152 static void
dma8237_write_count(struct ecppunit * pp,uint32_t count)6153 dma8237_write_count(struct ecppunit *pp, uint32_t count)
6154 {
6155 	uint8_t c_wcnt;
6156 	uint16_t *p;
6157 
6158 	switch (pp->uh.m1553.chn) {
6159 	case DMAE_CH0:
6160 		c_wcnt = DMA_0WCNT;
6161 		break;
6162 
6163 	case DMAE_CH1:
6164 		c_wcnt = DMA_1WCNT;
6165 		break;
6166 
6167 	case DMAE_CH2:
6168 		c_wcnt = DMA_2WCNT;
6169 		break;
6170 
6171 	case DMAE_CH3:
6172 		c_wcnt = DMA_3WCNT;
6173 		break;
6174 
6175 	default:
6176 		return;
6177 	}
6178 
6179 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt];
6180 	ddi_put16(pp->uh.m1553.d_handle, p, count & 0xFFFF);
6181 
6182 }
6183 
6184 static uint32_t
dma8237_read_count(struct ecppunit * pp)6185 dma8237_read_count(struct ecppunit *pp)
6186 {
6187 	uint8_t c_wcnt;
6188 	uint16_t *p;
6189 
6190 	switch (pp->uh.m1553.chn) {
6191 	case DMAE_CH0:
6192 		c_wcnt = DMA_0WCNT;
6193 		break;
6194 
6195 	case DMAE_CH1:
6196 		c_wcnt = DMA_1WCNT;
6197 		break;
6198 
6199 	case DMAE_CH2:
6200 		c_wcnt = DMA_2WCNT;
6201 		break;
6202 
6203 	case DMAE_CH3:
6204 		c_wcnt = DMA_3WCNT;
6205 		break;
6206 
6207 	default:
6208 		return (0);
6209 	}
6210 
6211 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt];
6212 	return (ddi_get16(pp->uh.m1553.d_handle, p));
6213 
6214 }
6215 
6216 static void
dma8237_write(struct ecppunit * pp,int reg_num,uint8_t val)6217 dma8237_write(struct ecppunit *pp, int reg_num, uint8_t val)
6218 {
6219 	ddi_put8(pp->uh.m1553.d_handle,
6220 	    &pp->uh.m1553.isa_space->isa_reg[reg_num], val);
6221 }
6222 
6223 static uint8_t
dma8237_read(struct ecppunit * pp,int reg_num)6224 dma8237_read(struct ecppunit *pp, int reg_num)
6225 {
6226 	return (ddi_get8(pp->uh.m1553.d_handle,
6227 	    &pp->uh.m1553.isa_space->isa_reg[reg_num]));
6228 }
6229 
6230 static size_t
dma8237_getcnt(struct ecppunit * pp)6231 dma8237_getcnt(struct ecppunit *pp)
6232 {
6233 	uint32_t cnt;
6234 
6235 	if ((cnt = dma8237_read_count(pp)) == 0xffff)
6236 		cnt = 0;
6237 	else
6238 		cnt++;
6239 	return (cnt);
6240 }
6241 
6242 
6243 /*
6244  *
6245  * Kstat support routines
6246  *
6247  */
6248 static void
ecpp_kstat_init(struct ecppunit * pp)6249 ecpp_kstat_init(struct ecppunit *pp)
6250 {
6251 	struct ecppkstat *ekp;
6252 	char buf[16];
6253 
6254 	/*
6255 	 * Allocate, initialize and install interrupt counter kstat
6256 	 */
6257 	(void) sprintf(buf, "ecppc%d", pp->instance);
6258 	pp->intrstats = kstat_create("ecpp", pp->instance, buf, "controller",
6259 	    KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
6260 	if (pp->intrstats == NULL) {
6261 		ecpp_error(pp->dip, "ecpp_kstat_init:1: kstat_create failed");
6262 	} else {
6263 		pp->intrstats->ks_update = ecpp_kstatintr_update;
6264 		pp->intrstats->ks_private = (void *) pp;
6265 		kstat_install(pp->intrstats);
6266 	}
6267 
6268 	/*
6269 	 * Allocate, initialize and install misc stats kstat
6270 	 */
6271 	pp->ksp = kstat_create("ecpp", pp->instance, NULL, "misc",
6272 	    KSTAT_TYPE_NAMED,
6273 	    sizeof (struct ecppkstat) / sizeof (kstat_named_t),
6274 	    KSTAT_FLAG_PERSISTENT);
6275 	if (pp->ksp == NULL) {
6276 		ecpp_error(pp->dip, "ecpp_kstat_init:2: kstat_create failed");
6277 		return;
6278 	}
6279 
6280 	ekp = (struct ecppkstat *)pp->ksp->ks_data;
6281 
6282 #define	EK_NAMED_INIT(name) \
6283 	kstat_named_init(&ekp->ek_##name, #name, KSTAT_DATA_UINT32)
6284 
6285 	EK_NAMED_INIT(ctx_obytes);
6286 	EK_NAMED_INIT(ctxpio_obytes);
6287 	EK_NAMED_INIT(nib_ibytes);
6288 	EK_NAMED_INIT(ecp_obytes);
6289 	EK_NAMED_INIT(ecp_ibytes);
6290 	EK_NAMED_INIT(epp_obytes);
6291 	EK_NAMED_INIT(epp_ibytes);
6292 	EK_NAMED_INIT(diag_obytes);
6293 	EK_NAMED_INIT(to_ctx);
6294 	EK_NAMED_INIT(to_nib);
6295 	EK_NAMED_INIT(to_ecp);
6296 	EK_NAMED_INIT(to_epp);
6297 	EK_NAMED_INIT(to_diag);
6298 	EK_NAMED_INIT(xfer_tout);
6299 	EK_NAMED_INIT(ctx_cf);
6300 	EK_NAMED_INIT(joblen);
6301 	EK_NAMED_INIT(isr_reattempt_high);
6302 	EK_NAMED_INIT(mode);
6303 	EK_NAMED_INIT(phase);
6304 	EK_NAMED_INIT(backchan);
6305 	EK_NAMED_INIT(iomode);
6306 	EK_NAMED_INIT(state);
6307 
6308 	pp->ksp->ks_update = ecpp_kstat_update;
6309 	pp->ksp->ks_private = (void *) pp;
6310 	kstat_install(pp->ksp);
6311 }
6312 
6313 static int
ecpp_kstat_update(kstat_t * ksp,int rw)6314 ecpp_kstat_update(kstat_t *ksp, int rw)
6315 {
6316 	struct ecppunit *pp;
6317 	struct ecppkstat *ekp;
6318 
6319 	/*
6320 	 * For the time being there is no point
6321 	 * in supporting writable kstats
6322 	 */
6323 	if (rw == KSTAT_WRITE) {
6324 		return (EACCES);
6325 	}
6326 
6327 	pp = (struct ecppunit *)ksp->ks_private;
6328 	ekp = (struct ecppkstat *)ksp->ks_data;
6329 
6330 	mutex_enter(&pp->umutex);
6331 
6332 	ekp->ek_ctx_obytes.value.ui32	= pp->obytes[ECPP_CENTRONICS] +
6333 	    pp->obytes[ECPP_COMPAT_MODE];
6334 	ekp->ek_ctxpio_obytes.value.ui32 = pp->ctxpio_obytes;
6335 	ekp->ek_nib_ibytes.value.ui32	= pp->ibytes[ECPP_NIBBLE_MODE];
6336 	ekp->ek_ecp_obytes.value.ui32	= pp->obytes[ECPP_ECP_MODE];
6337 	ekp->ek_ecp_ibytes.value.ui32	= pp->ibytes[ECPP_ECP_MODE];
6338 	ekp->ek_epp_obytes.value.ui32	= pp->obytes[ECPP_EPP_MODE];
6339 	ekp->ek_epp_ibytes.value.ui32	= pp->ibytes[ECPP_EPP_MODE];
6340 	ekp->ek_diag_obytes.value.ui32	= pp->obytes[ECPP_DIAG_MODE];
6341 	ekp->ek_to_ctx.value.ui32	= pp->to_mode[ECPP_CENTRONICS] +
6342 	    pp->to_mode[ECPP_COMPAT_MODE];
6343 	ekp->ek_to_nib.value.ui32	= pp->to_mode[ECPP_NIBBLE_MODE];
6344 	ekp->ek_to_ecp.value.ui32	= pp->to_mode[ECPP_ECP_MODE];
6345 	ekp->ek_to_epp.value.ui32	= pp->to_mode[ECPP_EPP_MODE];
6346 	ekp->ek_to_diag.value.ui32	= pp->to_mode[ECPP_DIAG_MODE];
6347 	ekp->ek_xfer_tout.value.ui32	= pp->xfer_tout;
6348 	ekp->ek_ctx_cf.value.ui32	= pp->ctx_cf;
6349 	ekp->ek_joblen.value.ui32	= pp->joblen;
6350 	ekp->ek_isr_reattempt_high.value.ui32	= pp->isr_reattempt_high;
6351 	ekp->ek_mode.value.ui32		= pp->current_mode;
6352 	ekp->ek_phase.value.ui32	= pp->current_phase;
6353 	ekp->ek_backchan.value.ui32	= pp->backchannel;
6354 	ekp->ek_iomode.value.ui32	= pp->io_mode;
6355 	ekp->ek_state.value.ui32	= pp->e_busy;
6356 
6357 	mutex_exit(&pp->umutex);
6358 
6359 	return (0);
6360 }
6361 
6362 static int
ecpp_kstatintr_update(kstat_t * ksp,int rw)6363 ecpp_kstatintr_update(kstat_t *ksp, int rw)
6364 {
6365 	struct ecppunit *pp;
6366 
6367 	/*
6368 	 * For the time being there is no point
6369 	 * in supporting writable kstats
6370 	 */
6371 	if (rw == KSTAT_WRITE) {
6372 		return (EACCES);
6373 	}
6374 
6375 	pp = (struct ecppunit *)ksp->ks_private;
6376 
6377 	mutex_enter(&pp->umutex);
6378 
6379 	KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_HARD] = pp->intr_hard;
6380 	KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SPURIOUS] = pp->intr_spurious;
6381 	KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SOFT] = pp->intr_soft;
6382 
6383 	mutex_exit(&pp->umutex);
6384 
6385 	return (0);
6386 }
6387