xref: /illumos-gate/usr/src/uts/common/io/usb/clients/ccid/ccid.c (revision ca783257c986cddcc674ae22916a6766b98a2d36)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2019, Joyent, Inc.
14  */
15 
16 /*
17  * USB CCID class driver
18  *
19  * Slot Detection
20  * --------------
21  *
22  * A CCID reader has one or more slots, each of which may or may not have a ICC
23  * (integrated circuit card) present. Some readers actually have a card that's
24  * permanently plugged in while other readers allow for cards to be inserted and
25  * removed. We model all CCID readers that don't have removable cards as ones
26  * that are removable, but never fire any events. Readers with removable cards
27  * are required to have an Interrupt-IN pipe.
28  *
29  * Each slot starts in an unknown state. After attaching we always kick off a
30  * discovery. When a change event comes in, that causes us to kick off a
31  * discovery again, though we focus it only on those endpoints that have noted a
32  * change. At attach time we logically mark that every endpoint has changed,
33  * allowing us to figure out what its actual state is. We don't rely on any
34  * initial Interrupt-IN polling to allow for the case where either the hardware
35  * doesn't report it or to better handle the devices without an Interrupt-IN
36  * entry. Just because we open up the Interrupt-IN pipe, hardware is not
37  * obligated to tell us, as the detaching and reattaching of a driver will not
38  * cause a power cycle.
39  *
40  * The Interrupt-IN exception callback may need to restart polling. In addition,
41  * we may fail to start or restart polling due to a transient issue. In cases
42  * where the attempt to start polling has failed, we try again in one second
43  * with a timeout.
44  *
45  * Discovery is run through a taskq. The various slots are checked serially. If
46  * a discovery is running when another change event comes in, we flag ourselves
47  * for a follow up run. This means that it's possible that we end up processing
48  * items early and that the follow up run is ignored.
49  *
50  * Two state flags are used to keep track of this dance: CCID_F_WORKER_REQUESTED
51  * and CCID_F_WORKER_RUNNING. The first is used to indicate that discovery is
52  * desired. The second is to indicate that it is actively running. When
53  * discovery is requested, the caller first checks the current flags. If neither
54  * flag is set, then it knows that it can kick off discovery. Regardless if it
55  * can kick off the taskq, it always sets requested. Once the taskq entry
56  * starts, it removes any DISCOVER_REQUESTED flags and sets DISCOVER_RUNNING. If
57  * at the end of discovery, we find that another request has been made, the
58  * discovery function will kick off another entry in the taskq.
59  *
60  * The one possible problem with this model is that it means that we aren't
61  * throttling the set of incoming requests with respect to taskq dispatches.
62  * However, because these are only driven by an Interrupt-IN pipe, it is hoped
63  * that the frequency will be rather reduced. If it turns out that that's not
64  * the case, we may need to use a timeout or another trick to ensure that only
65  * one discovery per tick or so is initialized. The main reason we don't just do
66  * that off the bat and add a delay is because of contactless cards which may
67  * need to be acted upon in a soft real-time fashion.
68  *
69  * Command Handling
70  * ----------------
71  *
72  * Commands are issued to a CCID reader on a Bulk-OUT pipe. Responses are
73  * generated as a series of one or more messages on a Bulk-IN pipe. To correlate
74  * these commands a sequence number is used. This sequence number is one byte
75  * and can be in the range [ CCID_SEQ_MIN, CCID_SEQ_MAX ]. To keep track of the
76  * allocated IDs we leverage an ID space.
77  *
78  * A CCID reader contains a number of slots. Each slot can be addressed
79  * separately as each slot represents a separate place that a card may be
80  * inserted or not. A given slot may only have a single outstanding command. A
81  * given CCID reader may only have a number of commands outstanding to the CCID
82  * device as a whole based on a value in the class descriptor (see the
83  * ccd_bMacCCIDBusySlots member of the ccid_class_descr_t).
84  *
85  * To simplify the driver, we only support issuing a single command to a CCID
86  * reader at any given time. All commands that are outstanding are queued in a
87  * per-device list ccid_command_queue. The head of the queue is the current
88  * command that we believe is outstanding to the reader or will be shortly. The
89  * command is issued by sending a Bulk-OUT request with a CCID header. Once we
90  * have the Bulk-OUT request acknowledged, we begin sending Bulk-IN messages to
91  * the controller. Once the Bulk-IN message is acknowledged, then we complete
92  * the command and proceed to the next command. This is summarized in the
93  * following state machine:
94  *
95  *              +-----------------------------------------------------+
96  *              |                                                     |
97  *              |                        ccid_command_queue           |
98  *              |                    +---+---+---------+---+---+      |
99  *              v                    | h |   |         |   | t |      |
100  *  +-------------------------+      | e |   |         |   | a |      |
101  *  | ccid_command_dispatch() |<-----| a |   |   ...   |   | i |      |
102  *  +-----------+-------------+      | d |   |         |   | l |      |
103  *              |                    +---+---+---------+---+---+      |
104  *              v                                                     |
105  *  +-------------------------+      +-------------------------+      |
106  *  | usb_pipe_bulk_xfer()    |----->| ccid_dispatch_bulk_cb() |      |
107  *  | ccid_bulkout_pipe       |      +------------+------------+      |
108  *  +-------------------------+                   |                   |
109  *                                                |                   |
110  *              |                                 v                   |
111  *              |                    +-------------------------+      |
112  *              |                    | ccid_bulkin_schedule()  |      |
113  *              v                    +------------+------------+      |
114  *                                                |                   |
115  *     /--------------------\                     |                   |
116  *    /                      \                    v                   |
117  *    |  ###    CCID HW      |       +-------------------------+      |
118  *    |  ###                 |       | usb_pipe_bulk_xfer()    |      |
119  *    |                      | ----> | ccid_bulkin_pipe        |      |
120  *    |                      |       +------------+------------+      |
121  *    \                      /                    |                   |
122  *     \--------------------/                     |                   |
123  *                                                v                   |
124  *                                   +-------------------------+      |
125  *                                   | ccid_reply_bulk_cb()    |      |
126  *                                   +------------+------------+      |
127  *                                                |                   |
128  *                                                |                   |
129  *                                                v                   |
130  *                                   +-------------------------+      |
131  *                                   | ccid_command_complete() +------+
132  *                                   +-------------------------+
133  *
134  *
135  * APDU and TPDU Processing and Parameter Selection
136  * ------------------------------------------------
137  *
138  * Readers provide four different modes for us to be able to transmit data to
139  * and from the card. These are:
140  *
141  *   1. Character Mode
142  *   2. TPDU Mode
143  *   3. Short APDU Mode
144  *   4. Extended APDU Mode
145  *
146  * Readers either support mode 1, mode 2, mode 3, or mode 3 and 4. All readers
147  * that support extended APDUs support short APDUs. At this time, we do not
148  * support character mode or TPDU mode, and we use only short APDUs even for
149  * readers that support extended APDUs.
150  *
151  * The ICC and the reader need to be in agreement in order for them to be able
152  * to exchange information. The ICC indicates what it supports by replying to a
153  * power on command with an ATR (answer to reset). This data can be parsed to
154  * indicate which of two protocols the ICC supports. These protocols are
155  * referred to as:
156  *
157  *  o T=0
158  *  o T=1
159  *
160  * These protocols are defined in the ISO/IEC 7816-3:2006 specification. When a
161  * reader supports an APDU mode, then the driver does not have to worry about
162  * the underlying protocol and can just send an application data unit (APDU).
163  * Otherwise, the driver must take the application data (APDU) and transform it
164  * into the form required by the corresponding protocol.
165  *
166  * There are several parameters that need to be negotiated to ensure that the
167  * protocols work correctly. To negotiate these parameters and to select a
168  * protocol, the driver must construct a PPS (protocol and parameters structure)
169  * request and exchange that with the ICC. A reader may optionally take care of
170  * performing this and indicates its support for this in dwFeatures member of
171  * the USB class descriptor.
172  *
173  * In addition, the reader itself must often be told of these configuration
174  * changes through the means of a CCID_REQUEST_SET_PARAMS command. Once both of
175  * these have been performed, the reader and ICC can communicate to their hearts
176  * desire.
177  *
178  * Both the negotiation and the setting of the parameters can be performed
179  * automatically by the CCID reader. When the reader supports APDU exchanges,
180  * then it must support some aspects of this negotiation. Because of that, we
181  * never consider performing this and only support readers that offer this kind
182  * of automation.
183  *
184  * User I/O Basics
185  * ---------------
186  *
187  * A user performs I/O by writing APDUs (Application Protocol Data Units). A
188  * user issues a system call that ends up in write(9E) (write(2), writev(2),
189  * pwrite(2), pwritev(2), etc.). The user data is consumed by the CCID driver
190  * and a series of commands will then be issued to the device, depending on the
191  * protocol mode. The write(9E) call does not block for this to finish. Once
192  * write(9E) has returned, the user may block in a read(2) related system call
193  * or poll for POLLIN.
194  *
195  * A thread may not call read(9E) without having called write(9E). This model is
196  * due to the limited capability of hardware. Only a single command can be going
197  * on a given slot and due to the fact that many commands change the hardware
198  * state, we do not try to multiplex multiple calls to write() or read().
199  *
200  *
201  * User I/O, Transaction Ends, ICC removals, and Reader Removals
202  * -------------------------------------------------------------
203  *
204  * While the I/O model given to user land is somewhat simple, there are a lot of
205  * tricky pieces to get right because we are in a multi-threaded pre-emptible
206  * system. In general, there are four different levels of state that we need to
207  * keep track of:
208  *
209  *   1. User threads in I/O
210  *   2. Kernel protocol level support (T=1, apdu, etc.).
211  *   3. Slot/ICC state
212  *   4. CCID Reader state
213  *
214  * Of course, each level cares about the state above it. The kernel protocol
215  * level state (2) cares about the User threads in I/O (1). The same is true
216  * with the other levels caring about the levels above it. With this in mind
217  * there are three non-data path things that can go wrong:
218  *
219  *   A. The user can end a transaction (whether through an ioctl or close(9E)).
220  *   B. The ICC can be removed
221  *   C. The CCID device can be removed or reset at a USB level.
222  *
223  * Each of these has implications on the outstanding I/O and other states of
224  * the world. When events of type A occur, we need to clean up states 1 and 2.
225  * Then events of type B occur we need to clean up states 1-3. When events of
226  * type C occur we need to clean up states 1-4. The following discusses how we
227  * should clean up these different states:
228  *
229  * Cleaning up State 1:
230  *
231  *   To clean up the User threads in I/O there are three different cases to
232  *   consider. The first is cleaning up a thread that is in the middle of
233  *   write(9E). The second is cleaning up thread that is blocked in read(9E).
234  *   The third is dealing with threads that are stuck in chpoll(9E).
235  *
236  *   To handle the write case, we have a series of flags that is on the CCID
237  *   slot's I/O structure (ccid_io_t, cs_io on the ccid_slot_t). When a thread
238  *   begins its I/O it will set the CCID_IO_F_PREPARING flag. This flag is used
239  *   to indicate that there is a thread that is performing a write(9E), but it
240  *   is not holding the ccid_mutex because of the operations that it is taking.
241  *   Once it has finished, the thread will remove that flag and instead
242  *   CCID_IO_F_IN_PROGRESS will be set. If we find that the CCID_IO_F_PREPARING
243  *   flag is set, then we will need to wait for it to be removed before
244  *   continuing. The fact that there is an outstanding physical I/O will be
245  *   dealt with when we clean up state 2.
246  *
247  *   To handle the read case, we have a flag on the ccid_minor_t which indicates
248  *   that a thread is blocked on a condition variable (cm_read_cv), waiting for
249  *   the I/O to complete. The way this gets cleaned up varies a bit on each of
250  *   the different cases as each one will trigger a different error to the
251  *   thread. In all cases, the condition variable will be signaled. Then,
252  *   whenever the thread comes out of the condition variable it will always
253  *   check the state to see if it has been woken up because the transaction is
254  *   being closed, the ICC has been removed, or the reader is being
255  *   disconnected. In all such cases, the thread in read will end up receiving
256  *   an error (ECANCELED, ENXIO, and ENODEV respectively).
257  *
258  *   If we have hit the case that this needs to be cleaned up, then the
259  *   CCID_MINOR_F_READ_WAITING flag will be set on the ccid_minor_t's flags
260  *   member (cm_flags). In this case, the broader system must change the
261  *   corresponding system state flag for the appropriate condition, signal the
262  *   read cv, and then wait on an additional cv in the minor, the
263  *   ccid_iowait_cv).
264  *
265  *   Cleaning up the poll state is somewhat simpler. If any of the conditions
266  *   (A-C) occur, then we must flag POLLERR. In addition if B and C occur, then
267  *   we will flag POLLHUP at the same time. This will guarantee that any threads
268  *   in poll(9E) are woken up.
269  *
270  * Cleaning up State 2.
271  *
272  *   While the user I/O thread is a somewhat straightforward, the kernel
273  *   protocol level is a bit more complicated. The core problem is that when a
274  *   user issues a logical I/O through an APDU, that may result in a series of
275  *   one or more protocol level physical commands. The core crux of the issue
276  *   with cleaning up this state is twofold:
277  *
278  *     1. We don't want to block a user thread while I/O is outstanding
279  *     2. We need to take one of several steps to clean up the aforementioned
280  *        I/O
281  *
282  *   To try and deal with that, there are a number of different things that we
283  *   do. The first thing we do is that we clean up the user state based on the
284  *   notes in cleaning up in State 1. Importantly we need to _block_ on this
285  *   activity.
286  *
287  *   Once that is done, we need to proceed to step 2. Since we're doing only
288  *   APDU processing, this is as simple as waiting for that command to complete
289  *   and/or potentially issue an abort or reset.
290  *
291  *   While this is ongoing an additional flag (CCID_SLOT_F_NEED_IO_TEARDOWN)
292  *   will be set on the slot to make sure that we know that we can't issue new
293  *   I/O or that we can't proceed to the next transaction until this phase is
294  *   finished.
295  *
296  * Cleaning up State 3
297  *
298  *   When the ICC is removed, this is not dissimilar to the previous states. To
299  *   handle this we need to first make sure that state 1 and state 2 are
300  *   finished being cleaned up. We will have to _block_ on this from the worker
301  *   thread. The problem is that we have certain values such as the operations
302  *   vector, the ATR data, etc. that we need to make sure are still valid while
303  *   we're in the process of cleaning up state. Once all that is done and the
304  *   worker thread proceeds we will consider processing a new ICC insertion.
305  *   The one good side is that if the ICC was removed, then it should be simpler
306  *   to handle all of the outstanding I/O.
307  *
308  * Cleaning up State 4
309  *
310  *   When the reader is removed, then we need to clean up all the prior states.
311  *   However, this is somewhat simpler than the other cases, as once this
312  *   happens our detach endpoint will be called to clean up all of our
313  *   resources. Therefore, before we call detach, we need to explicitly clean up
314  *   state 1; however, we then at this time leave all the remaining state to be
315  *   cleaned up during detach(9E) as part of normal tear down.
316  */
317 
318 #include <sys/modctl.h>
319 #include <sys/errno.h>
320 #include <sys/conf.h>
321 #include <sys/ddi.h>
322 #include <sys/sunddi.h>
323 #include <sys/cmn_err.h>
324 #include <sys/sysmacros.h>
325 #include <sys/stream.h>
326 #include <sys/strsun.h>
327 #include <sys/strsubr.h>
328 #include <sys/filio.h>
329 
330 #define	USBDRV_MAJOR_VER	2
331 #define	USBDRV_MINOR_VER	0
332 #include <sys/usb/usba.h>
333 #include <sys/usb/usba/usbai_private.h>
334 #include <sys/usb/clients/ccid/ccid.h>
335 #include <sys/usb/clients/ccid/uccid.h>
336 
337 #include <atr.h>
338 
339 /*
340  * Set the amount of parallelism we'll want to have from kernel threads which
341  * are processing CCID requests. This is used to size the number of asynchronous
342  * requests in the pipe policy. A single command can only ever be outstanding to
343  * a single slot. However, multiple slots may potentially be able to be
344  * scheduled in parallel. However, we don't actually support this at all and
345  * we'll only ever issue a single command. This basically covers the ability to
346  * have some other asynchronous operation outstanding if needed.
347  */
348 #define	CCID_NUM_ASYNC_REQS	2
349 
350 /*
351  * This is the number of Bulk-IN requests that we will have cached per CCID
352  * device. While many commands will generate a single response, the commands
353  * also have the ability to generate time extensions, which means that we'll
354  * want to be able to schedule another Bulk-IN request immediately. If we run
355  * out, we will attempt to refill said cache and will not fail commands
356  * needlessly.
357  */
358 #define	CCID_BULK_NALLOCED		16
359 
360 /*
361  * This is a time in seconds for the bulk-out command to run and be submitted.
362  */
363 #define	CCID_BULK_OUT_TIMEOUT	5
364 #define	CCID_BULK_IN_TIMEOUT	5
365 
366 /*
367  * There are two different Interrupt-IN packets that we might receive. The
368  * first, RDR_to_PC_HardwareError, is a fixed four byte packet. However, the
369  * other one, RDR_to_PC_NotifySlotChange, varies in size as it has two bits per
370  * potential slot plus one byte that's always used. The maximum number of slots
371  * in a device is 256. This means there can be up to 64 bytes worth of data plus
372  * the extra byte, so 65 bytes.
373  */
374 #define	CCID_INTR_RESPONSE_SIZE	65
375 
376 /*
377  * Minimum and maximum minor ids. We treat the maximum valid 32-bit minor as
378  * what we can use due to issues in some file systems and the minors that they
379  * can use. We reserved zero as an invalid minor number to make it easier to
380  * tell if things have been initialized or not.
381  */
382 #define	CCID_MINOR_MIN		1
383 #define	CCID_MINOR_MAX		MAXMIN32
384 #define	CCID_MINOR_INVALID	0
385 
386 /*
387  * This value represents the minimum size value that we require in the CCID
388  * class descriptor's dwMaxCCIDMessageLength member. We got to 64 bytes based on
389  * the required size of a bulk transfer packet size. Especially as many CCID
390  * devices are these class of speeds. The specification does require that the
391  * minimum size of the dwMaxCCIDMessageLength member is at least the size of its
392  * bulk endpoint packet size.
393  */
394 #define	CCID_MIN_MESSAGE_LENGTH	64
395 
396 /*
397  * Required forward declarations.
398  */
399 struct ccid;
400 struct ccid_slot;
401 struct ccid_minor;
402 struct ccid_command;
403 
404 /*
405  * This structure is used to map between the global set of minor numbers and the
406  * things represented by them.
407  *
408  * We have two different kinds of  minor nodes. The first are CCID slots. The
409  * second are cloned opens of those slots. Each of these items has a
410  * ccid_minor_idx_t embedded in them that is used to index them in an AVL tree.
411  * Given that the number of entries that should be present here is unlikely to
412  * be terribly large at any given time, it is hoped that an AVL tree will
413  * suffice for now.
414  */
415 typedef struct ccid_minor_idx {
416 	id_t cmi_minor;
417 	avl_node_t cmi_avl;
418 	boolean_t cmi_isslot;
419 	union {
420 		struct ccid_slot *cmi_slot;
421 		struct ccid_minor *cmi_user;
422 	} cmi_data;
423 } ccid_minor_idx_t;
424 
425 typedef enum ccid_minor_flags {
426 	CCID_MINOR_F_WAITING		= 1 << 0,
427 	CCID_MINOR_F_HAS_EXCL		= 1 << 1,
428 	CCID_MINOR_F_TXN_RESET		= 1 << 2,
429 	CCID_MINOR_F_READ_WAITING	= 1 << 3,
430 	CCID_MINOR_F_WRITABLE		= 1 << 4,
431 } ccid_minor_flags_t;
432 
433 typedef struct ccid_minor {
434 	ccid_minor_idx_t	cm_idx;		/* write-once */
435 	cred_t			*cm_opener;	/* write-once */
436 	struct ccid_slot	*cm_slot;	/* write-once */
437 	list_node_t		cm_minor_list;
438 	list_node_t		cm_excl_list;
439 	kcondvar_t		cm_read_cv;
440 	kcondvar_t		cm_iowait_cv;
441 	kcondvar_t		cm_excl_cv;
442 	ccid_minor_flags_t	cm_flags;
443 	struct pollhead		cm_pollhead;
444 } ccid_minor_t;
445 
446 typedef enum ccid_slot_flags {
447 	CCID_SLOT_F_CHANGED		= 1 << 0,
448 	CCID_SLOT_F_INTR_GONE		= 1 << 1,
449 	CCID_SLOT_F_INTR_ADD		= 1 << 2,
450 	CCID_SLOT_F_PRESENT		= 1 << 3,
451 	CCID_SLOT_F_ACTIVE		= 1 << 4,
452 	CCID_SLOT_F_NEED_TXN_RESET	= 1 << 5,
453 	CCID_SLOT_F_NEED_IO_TEARDOWN	= 1 << 6,
454 	CCID_SLOT_F_INTR_OVERCURRENT	= 1 << 7,
455 } ccid_slot_flags_t;
456 
457 #define	CCID_SLOT_F_INTR_MASK	(CCID_SLOT_F_CHANGED | CCID_SLOT_F_INTR_GONE | \
458     CCID_SLOT_F_INTR_ADD)
459 #define	CCID_SLOT_F_WORK_MASK	(CCID_SLOT_F_INTR_MASK | \
460     CCID_SLOT_F_NEED_TXN_RESET | CCID_SLOT_F_INTR_OVERCURRENT)
461 #define	CCID_SLOT_F_NOEXCL_MASK	(CCID_SLOT_F_NEED_TXN_RESET | \
462     CCID_SLOT_F_NEED_IO_TEARDOWN)
463 
464 typedef void (*icc_init_func_t)(struct ccid *, struct ccid_slot *);
465 typedef int (*icc_transmit_func_t)(struct ccid *, struct ccid_slot *);
466 typedef void (*icc_complete_func_t)(struct ccid *, struct ccid_slot *,
467     struct ccid_command *);
468 typedef void (*icc_teardown_func_t)(struct ccid *, struct ccid_slot *, int);
469 typedef void (*icc_fini_func_t)(struct ccid *, struct ccid_slot *);
470 
471 typedef struct ccid_icc {
472 	atr_data_t		*icc_atr_data;
473 	atr_protocol_t		icc_protocols;
474 	atr_protocol_t		icc_cur_protocol;
475 	ccid_params_t		icc_params;
476 	icc_init_func_t		icc_init;
477 	icc_transmit_func_t	icc_tx;
478 	icc_complete_func_t	icc_complete;
479 	icc_teardown_func_t	icc_teardown;
480 	icc_fini_func_t		icc_fini;
481 } ccid_icc_t;
482 
483 /*
484  * Structure used to take care of and map I/O requests and things. This may not
485  * make sense as we develop the T=0 and T=1 code.
486  */
487 typedef enum ccid_io_flags {
488 	/*
489 	 * This flag is used during the period that a thread has started calling
490 	 * into ccid_write(9E), but before it has finished queuing up the write.
491 	 * This blocks pollout or another thread in write.
492 	 */
493 	CCID_IO_F_PREPARING	= 1 << 0,
494 	/*
495 	 * This flag is used once a ccid_write() ICC tx function has
496 	 * successfully completed. While this is set, the device is not
497 	 * writable; however, it is legal to call ccid_read() and block. This
498 	 * flag will remain set until the actual write is done. This indicates
499 	 * that the transmission protocol has finished.
500 	 */
501 	CCID_IO_F_IN_PROGRESS	= 1 << 1,
502 	/*
503 	 * This flag is used to indicate that the logical I/O has completed in
504 	 * one way or the other and that a reader can consume data. When this
505 	 * flag is set, then POLLIN | POLLRDNORM should be signaled. Until the
506 	 * I/O is consumed via ccid_read(), calls to ccid_write() will fail with
507 	 * EBUSY. When this flag is set, the kernel protocol level should be
508 	 * idle and it should be safe to tear down.
509 	 */
510 	CCID_IO_F_DONE		= 1 << 2,
511 } ccid_io_flags_t;
512 
513 /*
514  * If any of the flags in the POLLOUT group are set, then the device is not
515  * writeable. The same distinction isn't true for POLLIN. We are only readable
516  * if CCID_IO_F_DONE is set. However, you are allowed to call read as soon as
517  * CCID_IO_F_IN_PROGRESS is set.
518  */
519 #define	CCID_IO_F_POLLOUT_FLAGS	(CCID_IO_F_PREPARING | CCID_IO_F_IN_PROGRESS | \
520     CCID_IO_F_DONE)
521 #define	CCID_IO_F_ALL_FLAGS	(CCID_IO_F_PREPARING | CCID_IO_F_IN_PROGRESS | \
522     CCID_IO_F_DONE | CCID_IO_F_ABANDONED)
523 
524 typedef struct ccid_io {
525 	ccid_io_flags_t	ci_flags;
526 	size_t		ci_ilen;
527 	uint8_t		ci_ibuf[CCID_APDU_LEN_MAX];
528 	mblk_t		*ci_omp;
529 	kcondvar_t	ci_cv;
530 	struct ccid_command *ci_command;
531 	int		ci_errno;
532 	mblk_t		*ci_data;
533 } ccid_io_t;
534 
535 typedef struct ccid_slot {
536 	ccid_minor_idx_t	cs_idx;		/* WO */
537 	uint_t			cs_slotno;	/* WO */
538 	struct ccid		*cs_ccid;	/* WO */
539 	ccid_slot_flags_t	cs_flags;
540 	ccid_class_voltage_t	cs_voltage;
541 	mblk_t			*cs_atr;
542 	struct ccid_command	*cs_command;
543 	ccid_minor_t		*cs_excl_minor;
544 	list_t			cs_excl_waiters;
545 	list_t			cs_minors;
546 	ccid_icc_t		cs_icc;
547 	ccid_io_t		cs_io;
548 } ccid_slot_t;
549 
550 typedef enum ccid_attach_state {
551 	CCID_ATTACH_USB_CLIENT	= 1 << 0,
552 	CCID_ATTACH_MUTEX_INIT	= 1 << 1,
553 	CCID_ATTACH_TASKQ	= 1 << 2,
554 	CCID_ATTACH_CMD_LIST	= 1 << 3,
555 	CCID_ATTACH_OPEN_PIPES	= 1 << 4,
556 	CCID_ATTACH_SEQ_IDS	= 1 << 5,
557 	CCID_ATTACH_SLOTS	= 1 << 6,
558 	CCID_ATTACH_HOTPLUG_CB	= 1 << 7,
559 	CCID_ATTACH_INTR_ACTIVE	= 1 << 8,
560 	CCID_ATTACH_MINORS	= 1 << 9,
561 } ccid_attach_state_t;
562 
563 typedef enum ccid_flags {
564 	CCID_F_HAS_INTR		= 1 << 0,
565 	CCID_F_NEEDS_PPS	= 1 << 1,
566 	CCID_F_NEEDS_PARAMS	= 1 << 2,
567 	CCID_F_NEEDS_DATAFREQ	= 1 << 3,
568 	CCID_F_DETACHING	= 1 << 5,
569 	CCID_F_WORKER_REQUESTED	= 1 << 6,
570 	CCID_F_WORKER_RUNNING	= 1 << 7,
571 	CCID_F_DISCONNECTED	= 1 << 8
572 } ccid_flags_t;
573 
574 #define	CCID_F_DEV_GONE_MASK	(CCID_F_DETACHING | CCID_F_DISCONNECTED)
575 #define	CCID_F_WORKER_MASK	(CCID_F_WORKER_REQUESTED | \
576     CCID_F_WORKER_RUNNING)
577 
578 typedef struct ccid_stats {
579 	uint64_t	cst_intr_errs;
580 	uint64_t	cst_intr_restart;
581 	uint64_t	cst_intr_unknown;
582 	uint64_t	cst_intr_slot_change;
583 	uint64_t	cst_intr_hwerr;
584 	uint64_t	cst_intr_inval;
585 	uint64_t	cst_ndiscover;
586 	hrtime_t	cst_lastdiscover;
587 } ccid_stats_t;
588 
589 typedef struct ccid {
590 	dev_info_t		*ccid_dip;
591 	kmutex_t		ccid_mutex;
592 	ccid_attach_state_t	ccid_attach;
593 	ccid_flags_t		ccid_flags;
594 	id_space_t		*ccid_seqs;
595 	ddi_taskq_t		*ccid_taskq;
596 	usb_client_dev_data_t	*ccid_dev_data;
597 	ccid_class_descr_t	ccid_class;		/* WO */
598 	usb_ep_xdescr_t		ccid_bulkin_xdesc;	/* WO */
599 	usb_pipe_handle_t	ccid_bulkin_pipe;	/* WO */
600 	usb_ep_xdescr_t		ccid_bulkout_xdesc;	/* WO */
601 	usb_pipe_handle_t	ccid_bulkout_pipe;	/* WO */
602 	usb_ep_xdescr_t		ccid_intrin_xdesc;	/* WO */
603 	usb_pipe_handle_t	ccid_intrin_pipe;	/* WO */
604 	usb_pipe_handle_t	ccid_control_pipe;	/* WO */
605 	uint_t			ccid_nslots;		/* WO */
606 	size_t			ccid_bufsize;		/* WO */
607 	ccid_slot_t		*ccid_slots;
608 	timeout_id_t		ccid_poll_timeout;
609 	ccid_stats_t		ccid_stats;
610 	list_t			ccid_command_queue;
611 	list_t			ccid_complete_queue;
612 	usb_bulk_req_t		*ccid_bulkin_cache[CCID_BULK_NALLOCED];
613 	uint_t			ccid_bulkin_alloced;
614 	usb_bulk_req_t		*ccid_bulkin_dispatched;
615 } ccid_t;
616 
617 /*
618  * Command structure for an individual CCID command that we issue to a
619  * controller. Note that the command caches a copy of some of the data that's
620  * normally inside the CCID header in host-endian fashion.
621  */
622 typedef enum ccid_command_state {
623 	CCID_COMMAND_ALLOCATED	= 0x0,
624 	CCID_COMMAND_QUEUED,
625 	CCID_COMMAND_DISPATCHED,
626 	CCID_COMMAND_REPLYING,
627 	CCID_COMMAND_COMPLETE,
628 	CCID_COMMAND_TRANSPORT_ERROR,
629 	CCID_COMMAND_CCID_ABORTED
630 } ccid_command_state_t;
631 
632 typedef enum ccid_command_flags {
633 	CCID_COMMAND_F_USER	= 1 << 0,
634 } ccid_command_flags_t;
635 
636 typedef struct ccid_command {
637 	list_node_t		cc_list_node;
638 	kcondvar_t		cc_cv;
639 	uint8_t			cc_mtype;
640 	ccid_response_code_t	cc_rtype;
641 	uint8_t			cc_slot;
642 	ccid_command_state_t	cc_state;
643 	ccid_command_flags_t	cc_flags;
644 	int			cc_usb;
645 	usb_cr_t		cc_usbcr;
646 	size_t			cc_reqlen;
647 	id_t			cc_seq;
648 	usb_bulk_req_t		*cc_ubrp;
649 	ccid_t			*cc_ccid;
650 	hrtime_t		cc_queue_time;
651 	hrtime_t		cc_dispatch_time;
652 	hrtime_t		cc_dispatch_cb_time;
653 	hrtime_t		cc_response_time;
654 	hrtime_t		cc_completion_time;
655 	mblk_t			*cc_response;
656 } ccid_command_t;
657 
658 /*
659  * ddi_soft_state(9F) pointer. This is used for instances of a CCID controller.
660  */
661 static void *ccid_softstate;
662 
663 /*
664  * This is used to keep track of our minor nodes.
665  */
666 static kmutex_t ccid_idxlock;
667 static avl_tree_t ccid_idx;
668 static id_space_t *ccid_minors;
669 
670 /*
671  * Required Forwards
672  */
673 static void ccid_intr_poll_init(ccid_t *);
674 static void ccid_worker_request(ccid_t *);
675 static void ccid_command_dispatch(ccid_t *);
676 static void ccid_command_free(ccid_command_t *);
677 static int ccid_bulkin_schedule(ccid_t *);
678 static void ccid_command_bcopy(ccid_command_t *, const void *, size_t);
679 
680 static int ccid_write_apdu(ccid_t *, ccid_slot_t *);
681 static void ccid_complete_apdu(ccid_t *, ccid_slot_t *, ccid_command_t *);
682 static void ccid_teardown_apdu(ccid_t *, ccid_slot_t *, int);
683 
684 
685 static int
ccid_idx_comparator(const void * l,const void * r)686 ccid_idx_comparator(const void *l, const void *r)
687 {
688 	const ccid_minor_idx_t *lc = l, *rc = r;
689 
690 	if (lc->cmi_minor > rc->cmi_minor)
691 		return (1);
692 	if (lc->cmi_minor < rc->cmi_minor)
693 		return (-1);
694 	return (0);
695 }
696 
697 static void
ccid_error(ccid_t * ccid,const char * fmt,...)698 ccid_error(ccid_t *ccid, const char *fmt, ...)
699 {
700 	va_list ap;
701 
702 	va_start(ap, fmt);
703 	if (ccid != NULL) {
704 		vdev_err(ccid->ccid_dip, CE_WARN, fmt, ap);
705 	} else {
706 		vcmn_err(CE_WARN, fmt, ap);
707 	}
708 	va_end(ap);
709 }
710 
711 static void
ccid_minor_idx_free(ccid_minor_idx_t * idx)712 ccid_minor_idx_free(ccid_minor_idx_t *idx)
713 {
714 	ccid_minor_idx_t *ip;
715 
716 	VERIFY3S(idx->cmi_minor, !=, CCID_MINOR_INVALID);
717 	mutex_enter(&ccid_idxlock);
718 	ip = avl_find(&ccid_idx, idx, NULL);
719 	VERIFY3P(idx, ==, ip);
720 	avl_remove(&ccid_idx, idx);
721 	id_free(ccid_minors, idx->cmi_minor);
722 	idx->cmi_minor = CCID_MINOR_INVALID;
723 	mutex_exit(&ccid_idxlock);
724 }
725 
726 static boolean_t
ccid_minor_idx_alloc(ccid_minor_idx_t * idx,boolean_t sleep)727 ccid_minor_idx_alloc(ccid_minor_idx_t *idx, boolean_t sleep)
728 {
729 	id_t id;
730 
731 	mutex_enter(&ccid_idxlock);
732 	if (sleep) {
733 		id = id_alloc(ccid_minors);
734 	} else {
735 		id = id_alloc_nosleep(ccid_minors);
736 	}
737 	if (id == -1) {
738 		mutex_exit(&ccid_idxlock);
739 		return (B_FALSE);
740 	}
741 	idx->cmi_minor = id;
742 	avl_add(&ccid_idx, idx);
743 	mutex_exit(&ccid_idxlock);
744 
745 	return (B_TRUE);
746 }
747 
748 static ccid_minor_idx_t *
ccid_minor_find(minor_t m)749 ccid_minor_find(minor_t m)
750 {
751 	ccid_minor_idx_t i = { 0 };
752 	ccid_minor_idx_t *ret;
753 
754 	i.cmi_minor = m;
755 	mutex_enter(&ccid_idxlock);
756 	ret = avl_find(&ccid_idx, &i, NULL);
757 	mutex_exit(&ccid_idxlock);
758 
759 	return (ret);
760 }
761 
762 static ccid_minor_idx_t *
ccid_minor_find_user(minor_t m)763 ccid_minor_find_user(minor_t m)
764 {
765 	ccid_minor_idx_t *idx;
766 
767 	idx = ccid_minor_find(m);
768 	if (idx == NULL) {
769 		return (NULL);
770 	}
771 	VERIFY0(idx->cmi_isslot);
772 	return (idx);
773 }
774 
775 static void
ccid_clear_io(ccid_io_t * io)776 ccid_clear_io(ccid_io_t *io)
777 {
778 	freemsg(io->ci_data);
779 	io->ci_data = NULL;
780 	io->ci_errno = 0;
781 	io->ci_flags &= ~CCID_IO_F_DONE;
782 	io->ci_ilen = 0;
783 	bzero(io->ci_ibuf, sizeof (io->ci_ibuf));
784 }
785 
786 /*
787  * Check if the conditions are met to signal the next exclusive holder. For this
788  * to be true, there should be no one holding it. In addition, there must be
789  * someone in the queue waiting. Finally, we want to make sure that the ICC, if
790  * present, is in a state where it could handle these kinds of issues. That
791  * means that we shouldn't have an outstanding I/O question or warm reset
792  * ongoing. However, we must not block this on the condition of an ICC being
793  * present. But, if the reader has been disconnected, don't signal anyone.
794  */
795 static void
ccid_slot_excl_maybe_signal(ccid_slot_t * slot)796 ccid_slot_excl_maybe_signal(ccid_slot_t *slot)
797 {
798 	ccid_minor_t *cmp;
799 
800 	VERIFY(MUTEX_HELD(&slot->cs_ccid->ccid_mutex));
801 
802 	if ((slot->cs_ccid->ccid_flags & CCID_F_DISCONNECTED) != 0)
803 		return;
804 	if (slot->cs_excl_minor != NULL)
805 		return;
806 	if ((slot->cs_flags & CCID_SLOT_F_NOEXCL_MASK) != 0)
807 		return;
808 	cmp = list_head(&slot->cs_excl_waiters);
809 	if (cmp == NULL)
810 		return;
811 	cv_signal(&cmp->cm_excl_cv);
812 }
813 
814 static void
ccid_slot_excl_rele(ccid_slot_t * slot)815 ccid_slot_excl_rele(ccid_slot_t *slot)
816 {
817 	ccid_minor_t *cmp;
818 	ccid_t *ccid = slot->cs_ccid;
819 
820 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
821 	VERIFY3P(slot->cs_excl_minor, !=, NULL);
822 
823 	cmp = slot->cs_excl_minor;
824 
825 	/*
826 	 * If we have an outstanding command left by the user when they've
827 	 * closed the slot, we need to clean up this command. We need to call
828 	 * the protocol specific handler here to determine what to do. If the
829 	 * command has completed, but the user has never called read, then it
830 	 * will simply clean it up. Otherwise it will indicate that there is
831 	 * some amount of external state still ongoing to take care of and clean
832 	 * up later.
833 	 */
834 	if (slot->cs_icc.icc_teardown != NULL) {
835 		slot->cs_icc.icc_teardown(ccid, slot, ECANCELED);
836 	}
837 
838 	/*
839 	 * There may either be a thread blocked in read or in the process of
840 	 * preparing a write. In either case, we need to make sure that they're
841 	 * woken up or finish, before we finish tear down.
842 	 */
843 	while ((cmp->cm_flags & CCID_MINOR_F_READ_WAITING) != 0 ||
844 	    (slot->cs_io.ci_flags & CCID_IO_F_PREPARING) != 0) {
845 		cv_wait(&cmp->cm_iowait_cv, &ccid->ccid_mutex);
846 	}
847 
848 	/*
849 	 * At this point, we hold the lock and there should be no other threads
850 	 * that are past the basic sanity checks. So at this point, note that
851 	 * this minor no longer has exclusive access (causing other read/write
852 	 * calls to fail) and start the process of cleaning up the outstanding
853 	 * I/O on the slot. It is OK that at this point the thread may try to
854 	 * obtain exclusive access again. It will end up blocking on everything
855 	 * else.
856 	 */
857 	cmp->cm_flags &= ~CCID_MINOR_F_HAS_EXCL;
858 	slot->cs_excl_minor = NULL;
859 
860 	/*
861 	 * If at this point, we have an I/O that's noted as being done, but no
862 	 * one blocked in read, then we need to clean that up. The ICC teardown
863 	 * function is only designed to take care of in-flight I/Os.
864 	 */
865 	if ((slot->cs_io.ci_flags & CCID_IO_F_DONE) != 0)
866 		ccid_clear_io(&slot->cs_io);
867 
868 	/*
869 	 * Regardless of when we're polling, we need to go through and error
870 	 * out.
871 	 */
872 	pollwakeup(&cmp->cm_pollhead, POLLERR);
873 
874 	/*
875 	 * If we've been asked to reset the card before handing it off, schedule
876 	 * that. Otherwise, allow the next entry in the queue to get woken up
877 	 * and given access to the card.
878 	 */
879 	if ((cmp->cm_flags & CCID_MINOR_F_TXN_RESET) != 0) {
880 		slot->cs_flags |= CCID_SLOT_F_NEED_TXN_RESET;
881 		ccid_worker_request(ccid);
882 		cmp->cm_flags &= ~CCID_MINOR_F_TXN_RESET;
883 	} else {
884 		ccid_slot_excl_maybe_signal(slot);
885 	}
886 }
887 
888 static int
ccid_slot_excl_req(ccid_slot_t * slot,ccid_minor_t * cmp,boolean_t nosleep)889 ccid_slot_excl_req(ccid_slot_t *slot, ccid_minor_t *cmp, boolean_t nosleep)
890 {
891 	VERIFY(MUTEX_HELD(&slot->cs_ccid->ccid_mutex));
892 
893 	if (slot->cs_excl_minor == cmp) {
894 		VERIFY((cmp->cm_flags & CCID_MINOR_F_HAS_EXCL) != 0);
895 		return (EEXIST);
896 	}
897 
898 	if ((cmp->cm_flags & CCID_MINOR_F_WAITING) != 0) {
899 		return (EINPROGRESS);
900 	}
901 
902 	/*
903 	 * If we were asked to try and fail quickly, do that before the main
904 	 * loop.
905 	 */
906 	if (nosleep && slot->cs_excl_minor != NULL &&
907 	    (slot->cs_flags & CCID_SLOT_F_NOEXCL_MASK) == 0) {
908 		return (EBUSY);
909 	}
910 
911 	/*
912 	 * Mark that we're waiting in case we race with another thread trying to
913 	 * claim exclusive access for this. Insert ourselves on the wait list.
914 	 * If for some reason we get a signal, then we can't know for certain if
915 	 * we had a signal / cv race. In such a case, we always wake up the
916 	 * next person in the queue (potentially spuriously).
917 	 */
918 	cmp->cm_flags |= CCID_MINOR_F_WAITING;
919 	list_insert_tail(&slot->cs_excl_waiters, cmp);
920 	while (slot->cs_excl_minor != NULL ||
921 	    (slot->cs_flags & CCID_SLOT_F_NOEXCL_MASK) != 0) {
922 		if (cv_wait_sig(&cmp->cm_excl_cv, &slot->cs_ccid->ccid_mutex) ==
923 		    0) {
924 			/*
925 			 * Remove ourselves from the list and try to signal the
926 			 * next thread.
927 			 */
928 			list_remove(&slot->cs_excl_waiters, cmp);
929 			cmp->cm_flags &= ~CCID_MINOR_F_WAITING;
930 			ccid_slot_excl_maybe_signal(slot);
931 			return (EINTR);
932 		}
933 
934 		/*
935 		 * Check if the reader is going away. If so, then we're done
936 		 * here.
937 		 */
938 		if ((slot->cs_ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
939 			list_remove(&slot->cs_excl_waiters, cmp);
940 			cmp->cm_flags &= ~CCID_MINOR_F_WAITING;
941 			return (ENODEV);
942 		}
943 	}
944 
945 	VERIFY0(slot->cs_flags & CCID_SLOT_F_NOEXCL_MASK);
946 	list_remove(&slot->cs_excl_waiters, cmp);
947 
948 	cmp->cm_flags &= ~CCID_MINOR_F_WAITING;
949 	cmp->cm_flags |= CCID_MINOR_F_HAS_EXCL;
950 	slot->cs_excl_minor = cmp;
951 	return (0);
952 }
953 
954 /*
955  * Check whether or not we're in a state that we can signal a POLLIN. To be able
956  * to signal a POLLIN (meaning that we can read) the following must be true:
957  *
958  *   o There is a client that has an exclusive hold open
959  *   o There is a data which is readable by the client (an I/O is done).
960  *
961  * Unlike with pollout, we don't care about the state of the ICC.
962  */
963 static void
ccid_slot_pollin_signal(ccid_slot_t * slot)964 ccid_slot_pollin_signal(ccid_slot_t *slot)
965 {
966 	ccid_t *ccid = slot->cs_ccid;
967 	ccid_minor_t *cmp = slot->cs_excl_minor;
968 
969 	if (cmp == NULL)
970 		return;
971 
972 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
973 
974 	if ((slot->cs_io.ci_flags & CCID_IO_F_DONE) == 0)
975 		return;
976 
977 	pollwakeup(&cmp->cm_pollhead, POLLIN | POLLRDNORM);
978 }
979 
980 /*
981  * Check whether or not we're in a state that we can signal a POLLOUT. To be
982  * able to signal a POLLOUT (meaning that we can write) the following must be
983  * true:
984  *
985  *   o There is a minor which has an exclusive hold on the device
986  *   o There is no outstanding I/O activity going on, meaning that there is no
987  *     operation in progress and any write data has been consumed.
988  *   o There is an ICC present
989  *   o There is no outstanding I/O cleanup being done, whether a T=1 abort, a
990  *     warm reset, or something else.
991  */
992 static void
ccid_slot_pollout_signal(ccid_slot_t * slot)993 ccid_slot_pollout_signal(ccid_slot_t *slot)
994 {
995 	ccid_t *ccid = slot->cs_ccid;
996 	ccid_minor_t *cmp = slot->cs_excl_minor;
997 
998 	if (cmp == NULL)
999 		return;
1000 
1001 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
1002 
1003 	if ((slot->cs_io.ci_flags & CCID_IO_F_POLLOUT_FLAGS) != 0 ||
1004 	    (slot->cs_flags & CCID_SLOT_F_ACTIVE) == 0 ||
1005 	    (ccid->ccid_flags & CCID_F_DEV_GONE_MASK) != 0 ||
1006 	    (slot->cs_flags & CCID_SLOT_F_NEED_IO_TEARDOWN) != 0)
1007 		return;
1008 
1009 	pollwakeup(&cmp->cm_pollhead, POLLOUT);
1010 }
1011 
1012 static void
ccid_slot_io_teardown_done(ccid_slot_t * slot)1013 ccid_slot_io_teardown_done(ccid_slot_t *slot)
1014 {
1015 	ccid_t *ccid = slot->cs_ccid;
1016 
1017 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
1018 	slot->cs_flags &= ~CCID_SLOT_F_NEED_IO_TEARDOWN;
1019 	cv_broadcast(&slot->cs_io.ci_cv);
1020 
1021 	ccid_slot_pollout_signal(slot);
1022 }
1023 
1024 /*
1025  * This will probably need to change when we start doing TPDU processing.
1026  */
1027 static size_t
ccid_command_resp_length(ccid_command_t * cc)1028 ccid_command_resp_length(ccid_command_t *cc)
1029 {
1030 	uint32_t len;
1031 	const ccid_header_t *cch;
1032 
1033 	VERIFY3P(cc, !=, NULL);
1034 	VERIFY3P(cc->cc_response, !=, NULL);
1035 
1036 	/*
1037 	 * Fetch out an arbitrarily aligned LE uint32_t value from the header.
1038 	 */
1039 	cch = (ccid_header_t *)cc->cc_response->b_rptr;
1040 	bcopy(&cch->ch_length, &len, sizeof (len));
1041 	len = LE_32(len);
1042 	return (len);
1043 }
1044 
1045 static uint8_t
ccid_command_resp_param2(ccid_command_t * cc)1046 ccid_command_resp_param2(ccid_command_t *cc)
1047 {
1048 	const ccid_header_t *cch;
1049 	uint8_t val;
1050 
1051 	VERIFY3P(cc, !=, NULL);
1052 	VERIFY3P(cc->cc_response, !=, NULL);
1053 
1054 	cch = (ccid_header_t *)cc->cc_response->b_rptr;
1055 	bcopy(&cch->ch_param2, &val, sizeof (val));
1056 	return (val);
1057 }
1058 
1059 /*
1060  * Complete a single command. The way that a command completes depends on the
1061  * kind of command that occurs. If this command is flagged as a user command,
1062  * that implies that it must be handled in a different way from administrative
1063  * commands. User commands are placed into the minor to consume via a read(9E).
1064  * Non-user commands are placed into a completion queue and must be picked up
1065  * via the ccid_command_poll() interface.
1066  */
1067 static void
ccid_command_complete(ccid_command_t * cc)1068 ccid_command_complete(ccid_command_t *cc)
1069 {
1070 	ccid_t *ccid = cc->cc_ccid;
1071 
1072 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
1073 	cc->cc_completion_time = gethrtime();
1074 	list_remove(&ccid->ccid_command_queue, cc);
1075 
1076 	if (cc->cc_flags & CCID_COMMAND_F_USER) {
1077 		ccid_slot_t *slot;
1078 
1079 		slot = &ccid->ccid_slots[cc->cc_slot];
1080 		ASSERT3P(slot->cs_icc.icc_complete, !=, NULL);
1081 		slot->cs_icc.icc_complete(ccid, slot, cc);
1082 	} else {
1083 		list_insert_tail(&ccid->ccid_complete_queue, cc);
1084 		cv_broadcast(&cc->cc_cv);
1085 	}
1086 
1087 	/*
1088 	 * Finally, we also need to kick off the next command.
1089 	 */
1090 	ccid_command_dispatch(ccid);
1091 }
1092 
1093 static void
ccid_command_state_transition(ccid_command_t * cc,ccid_command_state_t state)1094 ccid_command_state_transition(ccid_command_t *cc, ccid_command_state_t state)
1095 {
1096 	VERIFY(MUTEX_HELD(&cc->cc_ccid->ccid_mutex));
1097 
1098 	cc->cc_state = state;
1099 	cv_broadcast(&cc->cc_cv);
1100 }
1101 
1102 static void
ccid_command_transport_error(ccid_command_t * cc,int usb_status,usb_cr_t cr)1103 ccid_command_transport_error(ccid_command_t *cc, int usb_status, usb_cr_t cr)
1104 {
1105 	VERIFY(MUTEX_HELD(&cc->cc_ccid->ccid_mutex));
1106 
1107 	ccid_command_state_transition(cc, CCID_COMMAND_TRANSPORT_ERROR);
1108 	cc->cc_usb = usb_status;
1109 	cc->cc_usbcr = cr;
1110 	cc->cc_response = NULL;
1111 
1112 	ccid_command_complete(cc);
1113 }
1114 
1115 static void
ccid_command_status_decode(ccid_command_t * cc,ccid_reply_command_status_t * comp,ccid_reply_icc_status_t * iccp,ccid_command_err_t * errp)1116 ccid_command_status_decode(ccid_command_t *cc,
1117     ccid_reply_command_status_t *comp, ccid_reply_icc_status_t *iccp,
1118     ccid_command_err_t *errp)
1119 {
1120 	ccid_header_t cch;
1121 	size_t mblen;
1122 
1123 	VERIFY3S(cc->cc_state, ==, CCID_COMMAND_COMPLETE);
1124 	VERIFY3P(cc->cc_response, !=, NULL);
1125 	mblen = msgsize(cc->cc_response);
1126 	VERIFY3U(mblen, >=, sizeof (cch));
1127 
1128 	bcopy(cc->cc_response->b_rptr, &cch, sizeof (cch));
1129 	if (comp != NULL) {
1130 		*comp = CCID_REPLY_STATUS(cch.ch_param0);
1131 	}
1132 
1133 	if (iccp != NULL) {
1134 		*iccp = CCID_REPLY_ICC(cch.ch_param0);
1135 	}
1136 
1137 	if (errp != NULL) {
1138 		*errp = cch.ch_param1;
1139 	}
1140 }
1141 
1142 static void
ccid_reply_bulk_cb(usb_pipe_handle_t ph,usb_bulk_req_t * ubrp)1143 ccid_reply_bulk_cb(usb_pipe_handle_t ph, usb_bulk_req_t *ubrp)
1144 {
1145 	size_t mlen;
1146 	ccid_t *ccid;
1147 	ccid_slot_t *slot;
1148 	ccid_header_t cch;
1149 	ccid_command_t *cc;
1150 
1151 	boolean_t header_valid = B_FALSE;
1152 
1153 	VERIFY(ubrp->bulk_data != NULL);
1154 	mlen = msgsize(ubrp->bulk_data);
1155 	ccid = (ccid_t *)ubrp->bulk_client_private;
1156 	mutex_enter(&ccid->ccid_mutex);
1157 
1158 	/*
1159 	 * Before we do anything else, we should mark that this Bulk-IN request
1160 	 * is no longer being dispatched.
1161 	 */
1162 	VERIFY3P(ubrp, ==, ccid->ccid_bulkin_dispatched);
1163 	ccid->ccid_bulkin_dispatched = NULL;
1164 
1165 	if ((cc = list_head(&ccid->ccid_command_queue)) == NULL) {
1166 		/*
1167 		 * This is certainly an odd case. This means that we got some
1168 		 * response but there are no entries in the queue. Go ahead and
1169 		 * free this. We're done here.
1170 		 */
1171 		mutex_exit(&ccid->ccid_mutex);
1172 		usb_free_bulk_req(ubrp);
1173 		return;
1174 	}
1175 
1176 	if (mlen >= sizeof (ccid_header_t)) {
1177 		bcopy(ubrp->bulk_data->b_rptr, &cch, sizeof (cch));
1178 		header_valid = B_TRUE;
1179 	}
1180 
1181 	/*
1182 	 * If the current command isn't in the replying state, then something is
1183 	 * clearly wrong and this probably isn't intended for the current
1184 	 * command. That said, if we have enough bytes, let's check the sequence
1185 	 * number as that might be indicative of a bug otherwise.
1186 	 */
1187 	if (cc->cc_state != CCID_COMMAND_REPLYING) {
1188 		if (header_valid) {
1189 			VERIFY3S(cch.ch_seq, !=, cc->cc_seq);
1190 		}
1191 		mutex_exit(&ccid->ccid_mutex);
1192 		usb_free_bulk_req(ubrp);
1193 		return;
1194 	}
1195 
1196 	/*
1197 	 * CCID section 6.2.7 says that if we get a short or zero length packet,
1198 	 * then we need to treat that as though the running command was aborted
1199 	 * for some reason. However, section 3.1.3 talks about sending zero
1200 	 * length packets on general principle.  To further complicate things,
1201 	 * we don't have the sequence number.
1202 	 *
1203 	 * If we have an outstanding command still, then we opt to treat the
1204 	 * zero length packet as an abort.
1205 	 */
1206 	if (!header_valid) {
1207 		ccid_command_state_transition(cc, CCID_COMMAND_CCID_ABORTED);
1208 		ccid_command_complete(cc);
1209 		mutex_exit(&ccid->ccid_mutex);
1210 		usb_free_bulk_req(ubrp);
1211 		return;
1212 	}
1213 
1214 	slot = &ccid->ccid_slots[cc->cc_slot];
1215 
1216 	/*
1217 	 * If the sequence or slot number don't match the head of the list or
1218 	 * the response type is unexpected for this command then we should be
1219 	 * very suspect of the hardware at this point. At a minimum we should
1220 	 * fail this command and issue a reset.
1221 	 */
1222 	if (cch.ch_seq != cc->cc_seq ||
1223 	    cch.ch_slot != cc->cc_slot ||
1224 	    cch.ch_mtype != cc->cc_rtype) {
1225 		ccid_command_state_transition(cc, CCID_COMMAND_CCID_ABORTED);
1226 		ccid_command_complete(cc);
1227 		slot->cs_flags |= CCID_SLOT_F_NEED_TXN_RESET;
1228 		ccid_worker_request(ccid);
1229 		mutex_exit(&ccid->ccid_mutex);
1230 		usb_free_bulk_req(ubrp);
1231 		return;
1232 	}
1233 
1234 	/*
1235 	 * Check that we have all the bytes that we were told we'd have. If we
1236 	 * don't, simulate this as an aborted command and issue a reset.
1237 	 */
1238 	if (LE_32(cch.ch_length) + sizeof (ccid_header_t) > mlen) {
1239 		ccid_command_state_transition(cc, CCID_COMMAND_CCID_ABORTED);
1240 		ccid_command_complete(cc);
1241 		slot->cs_flags |= CCID_SLOT_F_NEED_TXN_RESET;
1242 		ccid_worker_request(ccid);
1243 		mutex_exit(&ccid->ccid_mutex);
1244 		usb_free_bulk_req(ubrp);
1245 		return;
1246 	}
1247 
1248 	/*
1249 	 * This response is for us. Before we complete the command check to see
1250 	 * what the state of the command is. If the command indicates that more
1251 	 * time has been requested, then we need to schedule a new Bulk-IN
1252 	 * request.
1253 	 */
1254 	if (CCID_REPLY_STATUS(cch.ch_param0) == CCID_REPLY_STATUS_MORE_TIME) {
1255 		int ret;
1256 
1257 		ret = ccid_bulkin_schedule(ccid);
1258 		if (ret != USB_SUCCESS) {
1259 			ccid_command_transport_error(cc, ret, USB_CR_OK);
1260 			slot->cs_flags |= CCID_SLOT_F_NEED_TXN_RESET;
1261 			ccid_worker_request(ccid);
1262 		}
1263 		mutex_exit(&ccid->ccid_mutex);
1264 		usb_free_bulk_req(ubrp);
1265 		return;
1266 	}
1267 
1268 	/*
1269 	 * Take the message block from the Bulk-IN request and store it on the
1270 	 * command. We want this regardless if it succeeded, failed, or we have
1271 	 * some unexpected status value.
1272 	 */
1273 	cc->cc_response = ubrp->bulk_data;
1274 	ubrp->bulk_data = NULL;
1275 	ccid_command_state_transition(cc, CCID_COMMAND_COMPLETE);
1276 	ccid_command_complete(cc);
1277 	mutex_exit(&ccid->ccid_mutex);
1278 	usb_free_bulk_req(ubrp);
1279 }
1280 
1281 static void
ccid_reply_bulk_exc_cb(usb_pipe_handle_t ph,usb_bulk_req_t * ubrp)1282 ccid_reply_bulk_exc_cb(usb_pipe_handle_t ph, usb_bulk_req_t *ubrp)
1283 {
1284 	ccid_t *ccid;
1285 	ccid_command_t *cc;
1286 
1287 	ccid = (ccid_t *)ubrp->bulk_client_private;
1288 	mutex_enter(&ccid->ccid_mutex);
1289 
1290 	/*
1291 	 * Before we do anything else, we should mark that this Bulk-IN request
1292 	 * is no longer being dispatched.
1293 	 */
1294 	VERIFY3P(ubrp, ==, ccid->ccid_bulkin_dispatched);
1295 	ccid->ccid_bulkin_dispatched = NULL;
1296 
1297 	/*
1298 	 * While there are many different reasons that the Bulk-IN request could
1299 	 * have failed, each of these are treated as a transport error. If we
1300 	 * have a dispatched command, then we treat this as corresponding to
1301 	 * that command. Otherwise, we drop this.
1302 	 */
1303 	if ((cc = list_head(&ccid->ccid_command_queue)) != NULL) {
1304 		if (cc->cc_state == CCID_COMMAND_REPLYING) {
1305 			ccid_command_transport_error(cc, USB_SUCCESS,
1306 			    ubrp->bulk_completion_reason);
1307 		}
1308 	}
1309 	mutex_exit(&ccid->ccid_mutex);
1310 	usb_free_bulk_req(ubrp);
1311 }
1312 
1313 /*
1314  * Fill the Bulk-IN cache. If we do not entirely fill this, that's fine. If
1315  * there are no scheduled resources then we'll deal with that when we actually
1316  * get there.
1317  */
1318 static void
ccid_bulkin_cache_refresh(ccid_t * ccid)1319 ccid_bulkin_cache_refresh(ccid_t *ccid)
1320 {
1321 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
1322 	while (ccid->ccid_bulkin_alloced < CCID_BULK_NALLOCED) {
1323 		usb_bulk_req_t *ubrp;
1324 
1325 		if ((ubrp = usb_alloc_bulk_req(ccid->ccid_dip,
1326 		    ccid->ccid_bufsize, 0)) == NULL)
1327 			return;
1328 
1329 		ubrp->bulk_len = ccid->ccid_bufsize;
1330 		ubrp->bulk_timeout = CCID_BULK_IN_TIMEOUT;
1331 		ubrp->bulk_client_private = (usb_opaque_t)ccid;
1332 		ubrp->bulk_attributes = USB_ATTRS_SHORT_XFER_OK |
1333 		    USB_ATTRS_AUTOCLEARING;
1334 		ubrp->bulk_cb = ccid_reply_bulk_cb;
1335 		ubrp->bulk_exc_cb = ccid_reply_bulk_exc_cb;
1336 
1337 		ccid->ccid_bulkin_cache[ccid->ccid_bulkin_alloced] = ubrp;
1338 		ccid->ccid_bulkin_alloced++;
1339 	}
1340 
1341 }
1342 
1343 static usb_bulk_req_t *
ccid_bulkin_cache_get(ccid_t * ccid)1344 ccid_bulkin_cache_get(ccid_t *ccid)
1345 {
1346 	usb_bulk_req_t *ubrp;
1347 
1348 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
1349 
1350 	if (ccid->ccid_bulkin_alloced == 0) {
1351 		ccid_bulkin_cache_refresh(ccid);
1352 		if (ccid->ccid_bulkin_alloced == 0)
1353 			return (NULL);
1354 	}
1355 
1356 	ccid->ccid_bulkin_alloced--;
1357 	ubrp = ccid->ccid_bulkin_cache[ccid->ccid_bulkin_alloced];
1358 	VERIFY3P(ubrp, !=, NULL);
1359 	ccid->ccid_bulkin_cache[ccid->ccid_bulkin_alloced] = NULL;
1360 
1361 	return (ubrp);
1362 }
1363 
1364 /*
1365  * Attempt to schedule a Bulk-In request. Note that only one should ever be
1366  * scheduled at any time.
1367  */
1368 static int
ccid_bulkin_schedule(ccid_t * ccid)1369 ccid_bulkin_schedule(ccid_t *ccid)
1370 {
1371 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
1372 	if (ccid->ccid_bulkin_dispatched == NULL) {
1373 		usb_bulk_req_t *ubrp;
1374 		int ret;
1375 
1376 		ubrp = ccid_bulkin_cache_get(ccid);
1377 		if (ubrp == NULL) {
1378 			return (USB_NO_RESOURCES);
1379 		}
1380 
1381 		if ((ret = usb_pipe_bulk_xfer(ccid->ccid_bulkin_pipe, ubrp,
1382 		    0)) != USB_SUCCESS) {
1383 			ccid_error(ccid,
1384 			    "!failed to schedule Bulk-In response: %d", ret);
1385 			usb_free_bulk_req(ubrp);
1386 			return (ret);
1387 		}
1388 
1389 		ccid->ccid_bulkin_dispatched = ubrp;
1390 	}
1391 
1392 	return (USB_SUCCESS);
1393 }
1394 
1395 /*
1396  * Make sure that the head of the queue has been dispatched. If a dispatch to
1397  * the device fails, fail the command and try the next one.
1398  */
1399 static void
ccid_command_dispatch(ccid_t * ccid)1400 ccid_command_dispatch(ccid_t *ccid)
1401 {
1402 	ccid_command_t *cc;
1403 
1404 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
1405 	while ((cc = list_head(&ccid->ccid_command_queue)) != NULL) {
1406 		int ret;
1407 
1408 		if ((ccid->ccid_flags & CCID_F_DEV_GONE_MASK) != 0)
1409 			return;
1410 
1411 		/*
1412 		 * Head of the queue is already being processed. We're done
1413 		 * here.
1414 		 */
1415 		if (cc->cc_state > CCID_COMMAND_QUEUED) {
1416 			return;
1417 		}
1418 
1419 		/*
1420 		 * Mark the command as being dispatched to the device. This
1421 		 * prevents anyone else from getting in and confusing things.
1422 		 */
1423 		ccid_command_state_transition(cc, CCID_COMMAND_DISPATCHED);
1424 		cc->cc_dispatch_time = gethrtime();
1425 
1426 		/*
1427 		 * Drop the global lock while we schedule the USB I/O.
1428 		 */
1429 		mutex_exit(&ccid->ccid_mutex);
1430 
1431 		ret = usb_pipe_bulk_xfer(ccid->ccid_bulkout_pipe, cc->cc_ubrp,
1432 		    0);
1433 		mutex_enter(&ccid->ccid_mutex);
1434 		if (ret != USB_SUCCESS) {
1435 			/*
1436 			 * We don't need to free the usb_bulk_req_t here as it
1437 			 * will be taken care of when the command itself is
1438 			 * freed.
1439 			 */
1440 			ccid_error(ccid, "!Bulk pipe dispatch failed: %d\n",
1441 			    ret);
1442 			ccid_command_transport_error(cc, ret, USB_CR_OK);
1443 		}
1444 	}
1445 }
1446 
1447 static int
ccid_command_queue(ccid_t * ccid,ccid_command_t * cc)1448 ccid_command_queue(ccid_t *ccid, ccid_command_t *cc)
1449 {
1450 	id_t seq;
1451 	ccid_header_t *cchead;
1452 
1453 	seq = id_alloc_nosleep(ccid->ccid_seqs);
1454 	if (seq == -1)
1455 		return (ENOMEM);
1456 	cc->cc_seq = seq;
1457 	VERIFY3U(seq, <=, UINT8_MAX);
1458 	cchead = (void *)cc->cc_ubrp->bulk_data->b_rptr;
1459 	cchead->ch_seq = (uint8_t)seq;
1460 
1461 	mutex_enter(&ccid->ccid_mutex);
1462 	/*
1463 	 * Take a shot at filling up our reply cache while we're submitting this
1464 	 * command.
1465 	 */
1466 	ccid_bulkin_cache_refresh(ccid);
1467 	list_insert_tail(&ccid->ccid_command_queue, cc);
1468 	ccid_command_state_transition(cc, CCID_COMMAND_QUEUED);
1469 	cc->cc_queue_time = gethrtime();
1470 	ccid_command_dispatch(ccid);
1471 	mutex_exit(&ccid->ccid_mutex);
1472 
1473 	return (0);
1474 }
1475 
1476 /*
1477  * Normal callback for Bulk-Out requests which represents commands issued to the
1478  * device.
1479  */
1480 static void
ccid_dispatch_bulk_cb(usb_pipe_handle_t ph,usb_bulk_req_t * ubrp)1481 ccid_dispatch_bulk_cb(usb_pipe_handle_t ph, usb_bulk_req_t *ubrp)
1482 {
1483 	int ret;
1484 	ccid_command_t *cc = (void *)ubrp->bulk_client_private;
1485 	ccid_t *ccid = cc->cc_ccid;
1486 
1487 	mutex_enter(&ccid->ccid_mutex);
1488 	VERIFY3S(cc->cc_state, ==, CCID_COMMAND_DISPATCHED);
1489 	ccid_command_state_transition(cc, CCID_COMMAND_REPLYING);
1490 	cc->cc_dispatch_cb_time = gethrtime();
1491 
1492 	/*
1493 	 * Since we have successfully sent the command, give it a Bulk-In
1494 	 * response to reply to us with. If that fails, we'll note a transport
1495 	 * error which will kick off the next command if needed.
1496 	 */
1497 	ret = ccid_bulkin_schedule(ccid);
1498 	if (ret != USB_SUCCESS) {
1499 		ccid_command_transport_error(cc, ret, USB_CR_OK);
1500 	}
1501 	mutex_exit(&ccid->ccid_mutex);
1502 }
1503 
1504 /*
1505  * Exception callback for the Bulk-Out requests which represent commands issued
1506  * to the device.
1507  */
1508 static void
ccid_dispatch_bulk_exc_cb(usb_pipe_handle_t ph,usb_bulk_req_t * ubrp)1509 ccid_dispatch_bulk_exc_cb(usb_pipe_handle_t ph, usb_bulk_req_t *ubrp)
1510 {
1511 	ccid_command_t *cc = (void *)ubrp->bulk_client_private;
1512 	ccid_t *ccid = cc->cc_ccid;
1513 
1514 	mutex_enter(&ccid->ccid_mutex);
1515 	ccid_command_transport_error(cc, USB_SUCCESS,
1516 	    ubrp->bulk_completion_reason);
1517 	mutex_exit(&ccid->ccid_mutex);
1518 }
1519 
1520 static void
ccid_command_free(ccid_command_t * cc)1521 ccid_command_free(ccid_command_t *cc)
1522 {
1523 	VERIFY0(list_link_active(&cc->cc_list_node));
1524 	VERIFY(cc->cc_state == CCID_COMMAND_ALLOCATED ||
1525 	    cc->cc_state >= CCID_COMMAND_COMPLETE);
1526 
1527 	if (cc->cc_response != NULL) {
1528 		freemsgchain(cc->cc_response);
1529 		cc->cc_response = NULL;
1530 	}
1531 
1532 	if (cc->cc_ubrp != NULL) {
1533 		usb_free_bulk_req(cc->cc_ubrp);
1534 		cc->cc_ubrp = NULL;
1535 	}
1536 
1537 	if (cc->cc_seq != 0) {
1538 		id_free(cc->cc_ccid->ccid_seqs, cc->cc_seq);
1539 		cc->cc_seq = 0;
1540 	}
1541 
1542 	cv_destroy(&cc->cc_cv);
1543 	kmem_free(cc, sizeof (ccid_command_t));
1544 }
1545 
1546 /*
1547  * Copy len bytes of data from buf into the allocated message block.
1548  */
1549 static void
ccid_command_bcopy(ccid_command_t * cc,const void * buf,size_t len)1550 ccid_command_bcopy(ccid_command_t *cc, const void *buf, size_t len)
1551 {
1552 	size_t mlen;
1553 
1554 	mlen = msgsize(cc->cc_ubrp->bulk_data);
1555 	VERIFY3U(mlen + len, >=, len);
1556 	VERIFY3U(mlen + len, >=, mlen);
1557 	mlen += len;
1558 	VERIFY3U(mlen, <=, cc->cc_ubrp->bulk_len);
1559 
1560 	bcopy(buf, cc->cc_ubrp->bulk_data->b_wptr, len);
1561 	cc->cc_ubrp->bulk_data->b_wptr += len;
1562 }
1563 
1564 /*
1565  * Allocate a command of a specific size and parameters. This will allocate a
1566  * USB bulk transfer that the caller will copy data to.
1567  */
1568 static int
ccid_command_alloc(ccid_t * ccid,ccid_slot_t * slot,boolean_t block,mblk_t * datamp,size_t datasz,uint8_t mtype,uint8_t param0,uint8_t param1,uint8_t param2,ccid_command_t ** ccp)1569 ccid_command_alloc(ccid_t *ccid, ccid_slot_t *slot, boolean_t block,
1570     mblk_t *datamp, size_t datasz, uint8_t mtype, uint8_t param0,
1571     uint8_t param1, uint8_t param2, ccid_command_t **ccp)
1572 {
1573 	size_t allocsz;
1574 	int kmflag, usbflag;
1575 	ccid_command_t *cc;
1576 	ccid_header_t *cchead;
1577 	ccid_response_code_t rtype;
1578 
1579 	switch (mtype) {
1580 	case CCID_REQUEST_POWER_ON:
1581 	case CCID_REQUEST_POWER_OFF:
1582 	case CCID_REQUEST_SLOT_STATUS:
1583 	case CCID_REQUEST_GET_PARAMS:
1584 	case CCID_REQUEST_RESET_PARAMS:
1585 	case CCID_REQUEST_ICC_CLOCK:
1586 	case CCID_REQUEST_T0APDU:
1587 	case CCID_REQUEST_MECHANICAL:
1588 	case CCID_REQEUST_ABORT:
1589 		if (datasz != 0)
1590 			return (EINVAL);
1591 		break;
1592 	case CCID_REQUEST_TRANSFER_BLOCK:
1593 	case CCID_REQUEST_ESCAPE:
1594 	case CCID_REQUEST_SECURE:
1595 	case CCID_REQUEST_SET_PARAMS:
1596 	case CCID_REQUEST_DATA_CLOCK:
1597 		break;
1598 	default:
1599 		return (EINVAL);
1600 	}
1601 
1602 	switch (mtype) {
1603 	case CCID_REQUEST_POWER_ON:
1604 	case CCID_REQUEST_SECURE:
1605 	case CCID_REQUEST_TRANSFER_BLOCK:
1606 		rtype = CCID_RESPONSE_DATA_BLOCK;
1607 		break;
1608 
1609 	case CCID_REQUEST_POWER_OFF:
1610 	case CCID_REQUEST_SLOT_STATUS:
1611 	case CCID_REQUEST_ICC_CLOCK:
1612 	case CCID_REQUEST_T0APDU:
1613 	case CCID_REQUEST_MECHANICAL:
1614 	case CCID_REQEUST_ABORT:
1615 		rtype = CCID_RESPONSE_SLOT_STATUS;
1616 		break;
1617 
1618 	case CCID_REQUEST_GET_PARAMS:
1619 	case CCID_REQUEST_RESET_PARAMS:
1620 	case CCID_REQUEST_SET_PARAMS:
1621 		rtype = CCID_RESPONSE_PARAMETERS;
1622 		break;
1623 
1624 	case CCID_REQUEST_ESCAPE:
1625 		rtype = CCID_RESPONSE_ESCAPE;
1626 		break;
1627 
1628 	case CCID_REQUEST_DATA_CLOCK:
1629 		rtype = CCID_RESPONSE_DATA_CLOCK;
1630 		break;
1631 	default:
1632 		return (EINVAL);
1633 	}
1634 
1635 	if (block) {
1636 		kmflag = KM_SLEEP;
1637 		usbflag = USB_FLAGS_SLEEP;
1638 	} else {
1639 		kmflag = KM_NOSLEEP_LAZY;
1640 		usbflag = 0;
1641 	}
1642 
1643 	if (datasz + sizeof (ccid_header_t) < datasz)
1644 		return (EINVAL);
1645 	if (datasz + sizeof (ccid_header_t) > ccid->ccid_bufsize)
1646 		return (EINVAL);
1647 
1648 	cc = kmem_zalloc(sizeof (ccid_command_t), kmflag);
1649 	if (cc == NULL)
1650 		return (ENOMEM);
1651 
1652 	allocsz = datasz + sizeof (ccid_header_t);
1653 	if (datamp == NULL) {
1654 		cc->cc_ubrp = usb_alloc_bulk_req(ccid->ccid_dip, allocsz,
1655 		    usbflag);
1656 	} else {
1657 		cc->cc_ubrp = usb_alloc_bulk_req(ccid->ccid_dip, 0, usbflag);
1658 	}
1659 	if (cc->cc_ubrp == NULL) {
1660 		kmem_free(cc, sizeof (ccid_command_t));
1661 		return (ENOMEM);
1662 	}
1663 
1664 	list_link_init(&cc->cc_list_node);
1665 	cv_init(&cc->cc_cv, NULL, CV_DRIVER, NULL);
1666 	cc->cc_mtype = mtype;
1667 	cc->cc_rtype = rtype;
1668 	cc->cc_slot = slot->cs_slotno;
1669 	cc->cc_reqlen = datasz;
1670 	cc->cc_ccid = ccid;
1671 	cc->cc_state = CCID_COMMAND_ALLOCATED;
1672 
1673 	/*
1674 	 * Fill in bulk request attributes. Note that short transfers out
1675 	 * are not OK.
1676 	 */
1677 	if (datamp != NULL) {
1678 		cc->cc_ubrp->bulk_data = datamp;
1679 	}
1680 	cc->cc_ubrp->bulk_len = allocsz;
1681 	cc->cc_ubrp->bulk_timeout = CCID_BULK_OUT_TIMEOUT;
1682 	cc->cc_ubrp->bulk_client_private = (usb_opaque_t)cc;
1683 	cc->cc_ubrp->bulk_attributes = USB_ATTRS_AUTOCLEARING;
1684 	cc->cc_ubrp->bulk_cb = ccid_dispatch_bulk_cb;
1685 	cc->cc_ubrp->bulk_exc_cb = ccid_dispatch_bulk_exc_cb;
1686 
1687 	/*
1688 	 * Fill in the command header. We fill in everything except the sequence
1689 	 * number, which is done by the actual dispatch code.
1690 	 */
1691 	cchead = (void *)cc->cc_ubrp->bulk_data->b_rptr;
1692 	cchead->ch_mtype = mtype;
1693 	cchead->ch_length = LE_32(datasz);
1694 	cchead->ch_slot = slot->cs_slotno;
1695 	cchead->ch_seq = 0;
1696 	cchead->ch_param0 = param0;
1697 	cchead->ch_param1 = param1;
1698 	cchead->ch_param2 = param2;
1699 	cc->cc_ubrp->bulk_data->b_wptr += sizeof (ccid_header_t);
1700 	*ccp = cc;
1701 
1702 	return (0);
1703 }
1704 
1705 /*
1706  * The rest of the stack is in charge of timing out commands and potentially
1707  * aborting them. At this point in time, there's no specific timeout aspect
1708  * here.
1709  */
1710 static void
ccid_command_poll(ccid_t * ccid,ccid_command_t * cc)1711 ccid_command_poll(ccid_t *ccid, ccid_command_t *cc)
1712 {
1713 	VERIFY0(cc->cc_flags & CCID_COMMAND_F_USER);
1714 
1715 	mutex_enter(&ccid->ccid_mutex);
1716 	while ((cc->cc_state < CCID_COMMAND_COMPLETE) &&
1717 	    (ccid->ccid_flags & CCID_F_DEV_GONE_MASK) == 0) {
1718 		cv_wait(&cc->cc_cv, &ccid->ccid_mutex);
1719 	}
1720 
1721 	/*
1722 	 * Treat this as a consumption and remove it from the completion list.
1723 	 */
1724 #ifdef DEBUG
1725 	ccid_command_t *check;
1726 	for (check = list_head(&ccid->ccid_complete_queue); check != NULL;
1727 	    check = list_next(&ccid->ccid_complete_queue, check)) {
1728 		if (cc == check)
1729 			break;
1730 	}
1731 	ASSERT3P(check, !=, NULL);
1732 #endif
1733 	VERIFY(list_link_active(&cc->cc_list_node));
1734 	list_remove(&ccid->ccid_complete_queue, cc);
1735 	mutex_exit(&ccid->ccid_mutex);
1736 }
1737 
1738 static int
ccid_command_power_off(ccid_t * ccid,ccid_slot_t * cs)1739 ccid_command_power_off(ccid_t *ccid, ccid_slot_t *cs)
1740 {
1741 	int ret;
1742 	ccid_command_t *cc;
1743 	ccid_reply_icc_status_t cis;
1744 	ccid_reply_command_status_t crs;
1745 
1746 	if ((ret = ccid_command_alloc(ccid, cs, B_TRUE, NULL, 0,
1747 	    CCID_REQUEST_POWER_OFF, 0, 0, 0, &cc)) != 0) {
1748 		return (ret);
1749 	}
1750 
1751 	if ((ret = ccid_command_queue(ccid, cc)) != 0) {
1752 		ccid_command_free(cc);
1753 		return (ret);
1754 	}
1755 
1756 	ccid_command_poll(ccid, cc);
1757 
1758 	if (cc->cc_state != CCID_COMMAND_COMPLETE) {
1759 		ret = EIO;
1760 		goto done;
1761 	}
1762 
1763 	ccid_command_status_decode(cc, &crs, &cis, NULL);
1764 	if (crs == CCID_REPLY_STATUS_FAILED) {
1765 		if (cis == CCID_REPLY_ICC_MISSING) {
1766 			ret = ENXIO;
1767 		} else {
1768 			ret = EIO;
1769 		}
1770 	} else {
1771 		ret = 0;
1772 	}
1773 done:
1774 	ccid_command_free(cc);
1775 	return (ret);
1776 }
1777 
1778 static int
ccid_command_power_on(ccid_t * ccid,ccid_slot_t * cs,ccid_class_voltage_t volt,mblk_t ** atrp)1779 ccid_command_power_on(ccid_t *ccid, ccid_slot_t *cs, ccid_class_voltage_t volt,
1780     mblk_t **atrp)
1781 {
1782 	int ret;
1783 	ccid_command_t *cc;
1784 	ccid_reply_command_status_t crs;
1785 	ccid_reply_icc_status_t cis;
1786 	ccid_command_err_t cce;
1787 
1788 	if (atrp == NULL)
1789 		return (EINVAL);
1790 
1791 	*atrp = NULL;
1792 
1793 	switch (volt) {
1794 	case CCID_CLASS_VOLT_AUTO:
1795 	case CCID_CLASS_VOLT_5_0:
1796 	case CCID_CLASS_VOLT_3_0:
1797 	case CCID_CLASS_VOLT_1_8:
1798 		break;
1799 	default:
1800 		return (EINVAL);
1801 	}
1802 
1803 	if ((ret = ccid_command_alloc(ccid, cs, B_TRUE, NULL, 0,
1804 	    CCID_REQUEST_POWER_ON, volt, 0, 0, &cc)) != 0) {
1805 		return (ret);
1806 	}
1807 
1808 	if ((ret = ccid_command_queue(ccid, cc)) != 0) {
1809 		ccid_command_free(cc);
1810 		return (ret);
1811 	}
1812 
1813 	ccid_command_poll(ccid, cc);
1814 
1815 	if (cc->cc_state != CCID_COMMAND_COMPLETE) {
1816 		ret = EIO;
1817 		goto done;
1818 	}
1819 
1820 	/*
1821 	 * Look for a few specific errors here:
1822 	 *
1823 	 * - ICC_MUTE via a few potential ways
1824 	 * - Bad voltage
1825 	 */
1826 	ccid_command_status_decode(cc, &crs, &cis, &cce);
1827 	if (crs == CCID_REPLY_STATUS_FAILED) {
1828 		if (cis == CCID_REPLY_ICC_MISSING) {
1829 			ret = ENXIO;
1830 		} else if (cis == CCID_REPLY_ICC_INACTIVE &&
1831 		    cce == 7) {
1832 			/*
1833 			 * This means that byte 7 was invalid. In other words,
1834 			 * that the voltage wasn't correct. See Table 6.1-2
1835 			 * 'Errors' in the CCID r1.1.0 spec.
1836 			 */
1837 			ret = ENOTSUP;
1838 		} else {
1839 			ret = EIO;
1840 		}
1841 	} else {
1842 		size_t len;
1843 
1844 		len = ccid_command_resp_length(cc);
1845 		if (len == 0) {
1846 			ret = EINVAL;
1847 			goto done;
1848 		}
1849 
1850 #ifdef	DEBUG
1851 		/*
1852 		 * This should have already been checked by the response
1853 		 * framework, but sanity check this again.
1854 		 */
1855 		size_t mlen = msgsize(cc->cc_response);
1856 		VERIFY3U(mlen, >=, len + sizeof (ccid_header_t));
1857 #endif
1858 
1859 		/*
1860 		 * Munge the message block to have the ATR. We want to make sure
1861 		 * that the write pointer is set to the maximum length that we
1862 		 * got back from the driver (the message block could strictly
1863 		 * speaking be larger, because we got a larger transfer for some
1864 		 * reason).
1865 		 */
1866 		cc->cc_response->b_rptr += sizeof (ccid_header_t);
1867 		cc->cc_response->b_wptr = cc->cc_response->b_rptr + len;
1868 		*atrp = cc->cc_response;
1869 		cc->cc_response = NULL;
1870 		ret = 0;
1871 	}
1872 
1873 done:
1874 	ccid_command_free(cc);
1875 	return (ret);
1876 }
1877 
1878 static int
ccid_command_get_parameters(ccid_t * ccid,ccid_slot_t * slot,atr_protocol_t * protp,ccid_params_t * paramsp)1879 ccid_command_get_parameters(ccid_t *ccid, ccid_slot_t *slot,
1880     atr_protocol_t *protp, ccid_params_t *paramsp)
1881 {
1882 	int ret;
1883 	uint8_t prot;
1884 	size_t mlen;
1885 	ccid_command_t *cc;
1886 	ccid_reply_command_status_t crs;
1887 	ccid_reply_icc_status_t cis;
1888 	const void *cpbuf;
1889 
1890 	if ((ret = ccid_command_alloc(ccid, slot, B_TRUE, NULL, 0,
1891 	    CCID_REQUEST_GET_PARAMS, 0, 0, 0, &cc)) != 0) {
1892 		return (ret);
1893 	}
1894 
1895 	if ((ret = ccid_command_queue(ccid, cc)) != 0)
1896 		goto done;
1897 
1898 	ccid_command_poll(ccid, cc);
1899 
1900 	if (cc->cc_state != CCID_COMMAND_COMPLETE) {
1901 		ret = EIO;
1902 		goto done;
1903 	}
1904 
1905 	ccid_command_status_decode(cc, &crs, &cis, NULL);
1906 	if (crs != CCID_REPLY_STATUS_COMPLETE) {
1907 		if (cis == CCID_REPLY_ICC_MISSING) {
1908 			ret = ENXIO;
1909 		} else {
1910 			ret = EIO;
1911 		}
1912 		goto done;
1913 	}
1914 
1915 	/*
1916 	 * The protocol is in ch_param2 of the header.
1917 	 */
1918 	prot = ccid_command_resp_param2(cc);
1919 	mlen = ccid_command_resp_length(cc);
1920 	cpbuf = cc->cc_response->b_rptr + sizeof (ccid_header_t);
1921 
1922 	ret = 0;
1923 	switch (prot) {
1924 	case 0:
1925 		if (mlen < sizeof (ccid_params_t0_t)) {
1926 			ret = EOVERFLOW;
1927 			goto done;
1928 		}
1929 		*protp = ATR_P_T0;
1930 		bcopy(cpbuf, &paramsp->ccp_t0, sizeof (ccid_params_t0_t));
1931 		break;
1932 	case 1:
1933 		if (mlen < sizeof (ccid_params_t1_t)) {
1934 			ret = EOVERFLOW;
1935 			goto done;
1936 		}
1937 		*protp = ATR_P_T1;
1938 		bcopy(cpbuf, &paramsp->ccp_t1, sizeof (ccid_params_t1_t));
1939 		break;
1940 	default:
1941 		ret = ECHRNG;
1942 		break;
1943 	}
1944 
1945 done:
1946 	ccid_command_free(cc);
1947 	return (ret);
1948 }
1949 
1950 static void
ccid_hw_error(ccid_t * ccid,ccid_intr_hwerr_t * hwerr)1951 ccid_hw_error(ccid_t *ccid, ccid_intr_hwerr_t *hwerr)
1952 {
1953 	ccid_slot_t *slot;
1954 
1955 	/* Make sure the slot number is within range. */
1956 	if (hwerr->cih_slot >= ccid->ccid_nslots) {
1957 		ccid->ccid_stats.cst_intr_inval++;
1958 		return;
1959 	}
1960 
1961 	slot = &ccid->ccid_slots[hwerr->cih_slot];
1962 
1963 	/* The only error condition defined by the spec is overcurrent. */
1964 	if (hwerr->cih_code != CCID_INTR_HWERR_OVERCURRENT) {
1965 		ccid->ccid_stats.cst_intr_inval++;
1966 		return;
1967 	}
1968 
1969 	/*
1970 	 * The worker thread will take care of this situation.
1971 	 */
1972 	slot->cs_flags |= CCID_SLOT_F_INTR_OVERCURRENT;
1973 	ccid_worker_request(ccid);
1974 }
1975 
1976 static void
ccid_intr_pipe_cb(usb_pipe_handle_t ph,usb_intr_req_t * uirp)1977 ccid_intr_pipe_cb(usb_pipe_handle_t ph, usb_intr_req_t *uirp)
1978 {
1979 	mblk_t *mp;
1980 	size_t msglen, explen;
1981 	uint_t i;
1982 	boolean_t change;
1983 	ccid_intr_hwerr_t ccid_hwerr;
1984 	ccid_t *ccid = (ccid_t *)uirp->intr_client_private;
1985 
1986 	mp = uirp->intr_data;
1987 	if (mp == NULL)
1988 		goto done;
1989 
1990 	msglen = msgsize(mp);
1991 	if (msglen == 0)
1992 		goto done;
1993 
1994 	switch (mp->b_rptr[0]) {
1995 	case CCID_INTR_CODE_SLOT_CHANGE:
1996 		mutex_enter(&ccid->ccid_mutex);
1997 		ccid->ccid_stats.cst_intr_slot_change++;
1998 
1999 		explen = 1 + ((2 * ccid->ccid_nslots + (NBBY-1)) / NBBY);
2000 		if (msglen < explen) {
2001 			ccid->ccid_stats.cst_intr_inval++;
2002 			mutex_exit(&ccid->ccid_mutex);
2003 			goto done;
2004 		}
2005 
2006 		change = B_FALSE;
2007 		for (i = 0; i < ccid->ccid_nslots; i++) {
2008 			uint_t byte = (i * 2 / NBBY) + 1;
2009 			uint_t shift = i * 2 % NBBY;
2010 			uint_t present = 1 << shift;
2011 			uint_t delta = 2 << shift;
2012 
2013 			if (mp->b_rptr[byte] & delta) {
2014 				ccid_slot_t *slot = &ccid->ccid_slots[i];
2015 
2016 				slot->cs_flags &= ~CCID_SLOT_F_INTR_MASK;
2017 				slot->cs_flags |= CCID_SLOT_F_CHANGED;
2018 				if (mp->b_rptr[byte] & present) {
2019 					slot->cs_flags |= CCID_SLOT_F_INTR_ADD;
2020 				} else {
2021 					slot->cs_flags |= CCID_SLOT_F_INTR_GONE;
2022 				}
2023 				change = B_TRUE;
2024 			}
2025 		}
2026 
2027 		if (change) {
2028 			ccid_worker_request(ccid);
2029 		}
2030 		mutex_exit(&ccid->ccid_mutex);
2031 		break;
2032 	case CCID_INTR_CODE_HW_ERROR:
2033 		mutex_enter(&ccid->ccid_mutex);
2034 		ccid->ccid_stats.cst_intr_hwerr++;
2035 
2036 		if (msglen < sizeof (ccid_intr_hwerr_t)) {
2037 			ccid->ccid_stats.cst_intr_inval++;
2038 			mutex_exit(&ccid->ccid_mutex);
2039 			goto done;
2040 		}
2041 
2042 		bcopy(mp->b_rptr, &ccid_hwerr, sizeof (ccid_intr_hwerr_t));
2043 		ccid_hw_error(ccid, &ccid_hwerr);
2044 
2045 		mutex_exit(&ccid->ccid_mutex);
2046 		break;
2047 	default:
2048 		mutex_enter(&ccid->ccid_mutex);
2049 		ccid->ccid_stats.cst_intr_unknown++;
2050 		mutex_exit(&ccid->ccid_mutex);
2051 		break;
2052 	}
2053 
2054 done:
2055 	usb_free_intr_req(uirp);
2056 }
2057 
2058 static void
ccid_intr_pipe_except_cb(usb_pipe_handle_t ph,usb_intr_req_t * uirp)2059 ccid_intr_pipe_except_cb(usb_pipe_handle_t ph, usb_intr_req_t *uirp)
2060 {
2061 	ccid_t *ccid = (ccid_t *)uirp->intr_client_private;
2062 
2063 	ccid->ccid_stats.cst_intr_errs++;
2064 	switch (uirp->intr_completion_reason) {
2065 	case USB_CR_PIPE_RESET:
2066 	case USB_CR_NO_RESOURCES:
2067 		ccid->ccid_stats.cst_intr_restart++;
2068 		ccid_intr_poll_init(ccid);
2069 		break;
2070 	default:
2071 		break;
2072 	}
2073 	usb_free_intr_req(uirp);
2074 }
2075 
2076 /*
2077  * Clean up all the state associated with this slot and its ICC.
2078  */
2079 static void
ccid_slot_teardown(ccid_t * ccid,ccid_slot_t * slot,boolean_t signal)2080 ccid_slot_teardown(ccid_t *ccid, ccid_slot_t *slot, boolean_t signal)
2081 {
2082 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2083 
2084 	if (slot->cs_icc.icc_fini != NULL) {
2085 		slot->cs_icc.icc_fini(ccid, slot);
2086 	}
2087 
2088 	atr_data_reset(slot->cs_icc.icc_atr_data);
2089 	slot->cs_icc.icc_protocols = ATR_P_NONE;
2090 	slot->cs_icc.icc_cur_protocol = ATR_P_NONE;
2091 	slot->cs_icc.icc_init = NULL;
2092 	slot->cs_icc.icc_tx = NULL;
2093 	slot->cs_icc.icc_complete = NULL;
2094 	slot->cs_icc.icc_teardown = NULL;
2095 	slot->cs_icc.icc_fini = NULL;
2096 
2097 	slot->cs_voltage = 0;
2098 	freemsgchain(slot->cs_atr);
2099 	slot->cs_atr = NULL;
2100 
2101 	if (signal && slot->cs_excl_minor != NULL) {
2102 		pollwakeup(&slot->cs_excl_minor->cm_pollhead, POLLHUP);
2103 	}
2104 }
2105 
2106 /*
2107  * Wait for teardown of outstanding user I/O.
2108  */
2109 static void
ccid_slot_io_teardown(ccid_t * ccid,ccid_slot_t * slot)2110 ccid_slot_io_teardown(ccid_t *ccid, ccid_slot_t *slot)
2111 {
2112 	/*
2113 	 * If there is outstanding user I/O, then we need to go ahead and take
2114 	 * care of that. Once this function returns, the user I/O will have been
2115 	 * dealt with; however, before we can tear down things, we need to make
2116 	 * sure that the logical I/O has been completed.
2117 	 */
2118 	if (slot->cs_icc.icc_teardown != NULL) {
2119 		slot->cs_icc.icc_teardown(ccid, slot, ENXIO);
2120 	}
2121 
2122 	while ((slot->cs_flags & CCID_SLOT_F_NEED_IO_TEARDOWN) != 0) {
2123 		cv_wait(&slot->cs_io.ci_cv, &ccid->ccid_mutex);
2124 	}
2125 }
2126 
2127 /*
2128  * The given CCID slot has been inactivated. Clean up.
2129  */
2130 static void
ccid_slot_inactive(ccid_t * ccid,ccid_slot_t * slot)2131 ccid_slot_inactive(ccid_t *ccid, ccid_slot_t *slot)
2132 {
2133 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2134 
2135 	slot->cs_flags &= ~CCID_SLOT_F_ACTIVE;
2136 
2137 	ccid_slot_io_teardown(ccid, slot);
2138 
2139 	/*
2140 	 * Now that we've finished completely waiting for the logical I/O to be
2141 	 * torn down, it's safe for us to proceed with the rest of the needed
2142 	 * tear down.
2143 	 */
2144 	ccid_slot_teardown(ccid, slot, B_TRUE);
2145 }
2146 
2147 /*
2148  * The given CCID slot has been removed. Clean up.
2149  */
2150 static void
ccid_slot_removed(ccid_t * ccid,ccid_slot_t * slot,boolean_t notify)2151 ccid_slot_removed(ccid_t *ccid, ccid_slot_t *slot, boolean_t notify)
2152 {
2153 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2154 	if ((slot->cs_flags & CCID_SLOT_F_PRESENT) == 0) {
2155 		VERIFY0(slot->cs_flags & CCID_SLOT_F_ACTIVE);
2156 		return;
2157 	}
2158 
2159 	/*
2160 	 * This slot is gone, mark the flags accordingly.
2161 	 */
2162 	slot->cs_flags &= ~CCID_SLOT_F_PRESENT;
2163 
2164 	ccid_slot_inactive(ccid, slot);
2165 }
2166 
2167 static void
ccid_slot_setup_functions(ccid_t * ccid,ccid_slot_t * slot)2168 ccid_slot_setup_functions(ccid_t *ccid, ccid_slot_t *slot)
2169 {
2170 	uint_t bits = CCID_CLASS_F_SHORT_APDU_XCHG | CCID_CLASS_F_EXT_APDU_XCHG;
2171 
2172 	slot->cs_icc.icc_init = NULL;
2173 	slot->cs_icc.icc_tx = NULL;
2174 	slot->cs_icc.icc_complete = NULL;
2175 	slot->cs_icc.icc_teardown = NULL;
2176 	slot->cs_icc.icc_fini = NULL;
2177 
2178 	switch (ccid->ccid_class.ccd_dwFeatures & bits) {
2179 	case CCID_CLASS_F_SHORT_APDU_XCHG:
2180 	case CCID_CLASS_F_EXT_APDU_XCHG:
2181 		/*
2182 		 * Readers with extended APDU support always also support
2183 		 * short APDUs. We only ever use short APDUs.
2184 		 */
2185 		slot->cs_icc.icc_tx = ccid_write_apdu;
2186 		slot->cs_icc.icc_complete = ccid_complete_apdu;
2187 		slot->cs_icc.icc_teardown = ccid_teardown_apdu;
2188 		break;
2189 	default:
2190 		break;
2191 	}
2192 
2193 	/*
2194 	 * When we don't have a supported tx function, we don't want to end
2195 	 * up blocking attach. It's important we attach so that users can try
2196 	 * and determine information about the ICC and reader.
2197 	 */
2198 	if (slot->cs_icc.icc_tx == NULL) {
2199 		ccid_error(ccid, "!CCID does not support I/O transfers to ICC");
2200 	}
2201 }
2202 
2203 /*
2204  * We have an ICC present in a slot. We require that the reader does all
2205  * protocol and parameter related initializations for us. Just parse the ATR
2206  * for our own use and use GET_PARAMS to query the parameters the reader set
2207  * up for us.
2208  */
2209 static boolean_t
ccid_slot_params_init(ccid_t * ccid,ccid_slot_t * slot,mblk_t * atr)2210 ccid_slot_params_init(ccid_t *ccid, ccid_slot_t *slot, mblk_t *atr)
2211 {
2212 	int ret;
2213 	atr_parsecode_t p;
2214 	atr_protocol_t prot;
2215 	atr_data_t *data;
2216 
2217 	/*
2218 	 * Use the slot's atr data structure. This is only used when we're in
2219 	 * the worker context, so it should be safe to access in a lockless
2220 	 * fashion.
2221 	 */
2222 	data = slot->cs_icc.icc_atr_data;
2223 	atr_data_reset(data);
2224 	if ((p = atr_parse(atr->b_rptr, msgsize(atr), data)) != ATR_CODE_OK) {
2225 		ccid_error(ccid, "!failed to parse ATR data from slot %d: %s",
2226 		    slot->cs_slotno, atr_strerror(p));
2227 		return (B_FALSE);
2228 	}
2229 
2230 	if ((ret = ccid_command_get_parameters(ccid, slot, &prot,
2231 	    &slot->cs_icc.icc_params)) != 0) {
2232 		ccid_error(ccid, "!failed to get parameters for slot %u: %d",
2233 		    slot->cs_slotno, ret);
2234 		return (B_FALSE);
2235 	}
2236 
2237 	slot->cs_icc.icc_protocols = atr_supported_protocols(data);
2238 	slot->cs_icc.icc_cur_protocol = prot;
2239 
2240 	if ((ccid->ccid_flags & (CCID_F_NEEDS_PPS | CCID_F_NEEDS_PARAMS |
2241 	    CCID_F_NEEDS_DATAFREQ)) != 0) {
2242 		ccid_error(ccid, "!CCID reader does not support required "
2243 		    "protocol/parameter setup automation");
2244 		return (B_FALSE);
2245 	}
2246 
2247 	return (B_TRUE);
2248 }
2249 
2250 /*
2251  * Set up the ICC function parameters and initialize the ICC engine.
2252  */
2253 static boolean_t
ccid_slot_prot_init(ccid_t * ccid,ccid_slot_t * slot)2254 ccid_slot_prot_init(ccid_t *ccid, ccid_slot_t *slot)
2255 {
2256 	ccid_slot_setup_functions(ccid, slot);
2257 
2258 	if (slot->cs_icc.icc_init != NULL) {
2259 		slot->cs_icc.icc_init(ccid, slot);
2260 	}
2261 
2262 	return (B_TRUE);
2263 }
2264 
2265 static int
ccid_slot_power_on(ccid_t * ccid,ccid_slot_t * slot,ccid_class_voltage_t volts,mblk_t ** atr)2266 ccid_slot_power_on(ccid_t *ccid, ccid_slot_t *slot, ccid_class_voltage_t volts,
2267     mblk_t **atr)
2268 {
2269 	int ret;
2270 
2271 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2272 
2273 	mutex_exit(&ccid->ccid_mutex);
2274 	if ((ret = ccid_command_power_on(ccid, slot, volts, atr)) != 0) {
2275 		/*
2276 		 * If we got ENXIO, then we know that there is no ICC
2277 		 * present. This could happen for a number of reasons.
2278 		 * For example, we could have just started up and no
2279 		 * card was plugged in (we default to assuming that one
2280 		 * is). Also, some readers won't really tell us that
2281 		 * nothing is there until after the power on fails,
2282 		 * hence why we don't bother with doing a status check
2283 		 * and just try to power on.
2284 		 */
2285 		if (ret == ENXIO) {
2286 			mutex_enter(&ccid->ccid_mutex);
2287 			slot->cs_flags &= ~CCID_SLOT_F_PRESENT;
2288 			return (ret);
2289 		}
2290 
2291 		/*
2292 		 * If we fail to power off the card, check to make sure
2293 		 * it hasn't been removed.
2294 		 */
2295 		if (ccid_command_power_off(ccid, slot) == ENXIO) {
2296 			mutex_enter(&ccid->ccid_mutex);
2297 			slot->cs_flags &= ~CCID_SLOT_F_PRESENT;
2298 			return (ENXIO);
2299 		}
2300 
2301 		mutex_enter(&ccid->ccid_mutex);
2302 		return (ret);
2303 	}
2304 
2305 	if (!ccid_slot_params_init(ccid, slot, *atr)) {
2306 		ccid_error(ccid, "!failed to set slot paramters for ICC");
2307 		mutex_enter(&ccid->ccid_mutex);
2308 		return (ENOTSUP);
2309 	}
2310 
2311 	if (!ccid_slot_prot_init(ccid, slot)) {
2312 		ccid_error(ccid, "!failed to setup protocol for ICC");
2313 		mutex_enter(&ccid->ccid_mutex);
2314 		return (ENOTSUP);
2315 	}
2316 
2317 	mutex_enter(&ccid->ccid_mutex);
2318 	return (0);
2319 }
2320 
2321 static int
ccid_slot_power_off(ccid_t * ccid,ccid_slot_t * slot)2322 ccid_slot_power_off(ccid_t *ccid, ccid_slot_t *slot)
2323 {
2324 	int ret;
2325 
2326 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2327 
2328 	ccid_slot_io_teardown(ccid, slot);
2329 
2330 	/*
2331 	 * Now that we've finished completely waiting for the logical I/O to be
2332 	 * torn down, try and power off the ICC.
2333 	 */
2334 	mutex_exit(&ccid->ccid_mutex);
2335 	ret = ccid_command_power_off(ccid, slot);
2336 	mutex_enter(&ccid->ccid_mutex);
2337 
2338 	if (ret != 0)
2339 		return (ret);
2340 
2341 	ccid_slot_inactive(ccid, slot);
2342 
2343 	return (ret);
2344 }
2345 
2346 static int
ccid_slot_inserted(ccid_t * ccid,ccid_slot_t * slot)2347 ccid_slot_inserted(ccid_t *ccid, ccid_slot_t *slot)
2348 {
2349 	uint_t nvolts = 4;
2350 	uint_t cvolt = 0;
2351 	mblk_t *atr = NULL;
2352 	ccid_class_voltage_t volts[4] = { CCID_CLASS_VOLT_AUTO,
2353 	    CCID_CLASS_VOLT_5_0, CCID_CLASS_VOLT_3_0, CCID_CLASS_VOLT_1_8 };
2354 
2355 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2356 	if ((ccid->ccid_flags & CCID_F_DEV_GONE_MASK) != 0) {
2357 		return (0);
2358 	}
2359 
2360 	if ((slot->cs_flags & CCID_SLOT_F_ACTIVE) != 0) {
2361 		return (0);
2362 	}
2363 
2364 	slot->cs_flags |= CCID_SLOT_F_PRESENT;
2365 
2366 	/*
2367 	 * Now, we need to activate this ccid device before we can do anything
2368 	 * with it. First, power on the device. There are two hardware features
2369 	 * which may be at play. There may be automatic voltage detection and
2370 	 * automatic activation on insertion. In theory, when either of those
2371 	 * are present, we should always try to use the auto voltage.
2372 	 *
2373 	 * What's less clear in the specification is if the Auto-Voltage
2374 	 * property is present is if we should try manual voltages or not. For
2375 	 * the moment we do.
2376 	 */
2377 	if ((ccid->ccid_class.ccd_dwFeatures &
2378 	    (CCID_CLASS_F_AUTO_ICC_ACTIVATE | CCID_CLASS_F_AUTO_ICC_VOLTAGE)) ==
2379 	    0) {
2380 		/* Skip auto-voltage */
2381 		cvolt++;
2382 	}
2383 
2384 	for (; cvolt < nvolts; cvolt++) {
2385 		int ret;
2386 
2387 		if (volts[cvolt] != CCID_CLASS_VOLT_AUTO &&
2388 		    (ccid->ccid_class.ccd_bVoltageSupport & volts[cvolt]) ==
2389 		    0) {
2390 			continue;
2391 		}
2392 
2393 		ret = ccid_slot_power_on(ccid, slot, volts[cvolt], &atr);
2394 		if (ret != 0) {
2395 			freemsg(atr);
2396 			atr = NULL;
2397 			continue;
2398 		}
2399 
2400 		break;
2401 	}
2402 
2403 	if (cvolt >= nvolts) {
2404 		ccid_error(ccid, "!failed to activate and power on ICC, no "
2405 		    "supported voltages found");
2406 		goto notsup;
2407 	}
2408 
2409 	slot->cs_voltage = volts[cvolt];
2410 	slot->cs_atr = atr;
2411 	slot->cs_flags |= CCID_SLOT_F_ACTIVE;
2412 
2413 	ccid_slot_pollout_signal(slot);
2414 
2415 	return (0);
2416 
2417 notsup:
2418 	freemsg(atr);
2419 	ccid_slot_teardown(ccid, slot, B_FALSE);
2420 	return (ENOTSUP);
2421 }
2422 
2423 static int
ccid_slot_warm_reset(ccid_t * ccid,ccid_slot_t * slot)2424 ccid_slot_warm_reset(ccid_t *ccid, ccid_slot_t *slot)
2425 {
2426 	int ret;
2427 	mblk_t *atr;
2428 	ccid_class_voltage_t voltage;
2429 
2430 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2431 
2432 	ccid_slot_io_teardown(ccid, slot);
2433 
2434 	voltage = slot->cs_voltage;
2435 
2436 	ccid_slot_teardown(ccid, slot, B_FALSE);
2437 
2438 	ret = ccid_slot_power_on(ccid, slot, voltage, &atr);
2439 	if (ret != 0) {
2440 		freemsg(atr);
2441 		return (ret);
2442 	}
2443 
2444 	slot->cs_voltage = voltage;
2445 	slot->cs_atr = atr;
2446 
2447 	return (ret);
2448 }
2449 
2450 static boolean_t
ccid_slot_reset(ccid_t * ccid,ccid_slot_t * slot)2451 ccid_slot_reset(ccid_t *ccid, ccid_slot_t *slot)
2452 {
2453 	int ret;
2454 
2455 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2456 	VERIFY(slot->cs_flags & CCID_SLOT_F_NEED_TXN_RESET);
2457 	VERIFY(ccid->ccid_flags & CCID_F_WORKER_RUNNING);
2458 
2459 	if (ccid->ccid_flags & CCID_F_DEV_GONE_MASK)
2460 		return (B_TRUE);
2461 
2462 	/*
2463 	 * Power off the ICC. This will wait for logical I/O if needed.
2464 	 */
2465 	ret = ccid_slot_power_off(ccid, slot);
2466 
2467 	/*
2468 	 * If we failed to power off the ICC because the ICC is removed, then
2469 	 * just return that we failed, so that we can let the next lap clean
2470 	 * things up by noting that the ICC has been removed.
2471 	 */
2472 	if (ret != 0 && ret == ENXIO) {
2473 		return (B_FALSE);
2474 	}
2475 
2476 	if (ret != 0) {
2477 		ccid_error(ccid, "!failed to reset slot %d for next txn: %d; "
2478 		    "taking another lap", slot->cs_slotno, ret);
2479 		return (B_FALSE);
2480 	}
2481 
2482 	/*
2483 	 * Mimic a slot insertion to power this back on. Don't worry about
2484 	 * success or failure, because as far as we care for resetting it, we've
2485 	 * done our duty once we've powered it off successfully.
2486 	 */
2487 	(void) ccid_slot_inserted(ccid, slot);
2488 
2489 	return (B_TRUE);
2490 }
2491 
2492 /*
2493  * We've been asked to perform some amount of work on the various slots that we
2494  * have. This may be because the slot needs to be reset due to the completion of
2495  * a transaction or it may be because an ICC inside of the slot has been
2496  * removed.
2497  */
2498 static void
ccid_worker(void * arg)2499 ccid_worker(void *arg)
2500 {
2501 	uint_t i;
2502 	ccid_t *ccid = arg;
2503 
2504 	mutex_enter(&ccid->ccid_mutex);
2505 	ccid->ccid_stats.cst_ndiscover++;
2506 	ccid->ccid_stats.cst_lastdiscover = gethrtime();
2507 	ccid->ccid_flags |= CCID_F_WORKER_RUNNING;
2508 	ccid->ccid_flags &= ~CCID_F_WORKER_REQUESTED;
2509 
2510 	for (i = 0; i < ccid->ccid_nslots; i++) {
2511 		ccid_slot_t *slot = &ccid->ccid_slots[i];
2512 		uint_t flags;
2513 
2514 		VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2515 
2516 		if ((ccid->ccid_flags & CCID_F_DEV_GONE_MASK) != 0) {
2517 			ccid->ccid_flags &= ~CCID_F_WORKER_MASK;
2518 			mutex_exit(&ccid->ccid_mutex);
2519 			return;
2520 		}
2521 
2522 		/*
2523 		 * Snapshot the flags before we start processing the worker. At
2524 		 * this time we clear out all of the change flags as we'll be
2525 		 * operating on the device. We do not clear the
2526 		 * CCID_SLOT_F_NEED_TXN_RESET flag, as we want to make sure that
2527 		 * this is maintained until we're done here.
2528 		 */
2529 		flags = slot->cs_flags & CCID_SLOT_F_WORK_MASK;
2530 		slot->cs_flags &= ~CCID_SLOT_F_INTR_MASK;
2531 
2532 		if ((flags & CCID_SLOT_F_INTR_OVERCURRENT) != 0) {
2533 			ccid_slot_inactive(ccid, slot);
2534 		}
2535 
2536 		if ((flags & CCID_SLOT_F_CHANGED) != 0) {
2537 			if (flags & CCID_SLOT_F_INTR_GONE) {
2538 				ccid_slot_removed(ccid, slot, B_TRUE);
2539 			} else {
2540 				(void) ccid_slot_inserted(ccid, slot);
2541 				if ((slot->cs_flags & CCID_SLOT_F_ACTIVE) !=
2542 				    0) {
2543 					ccid_slot_excl_maybe_signal(slot);
2544 				}
2545 			}
2546 			VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2547 		}
2548 
2549 		if ((flags & CCID_SLOT_F_NEED_TXN_RESET) != 0) {
2550 			/*
2551 			 * If the CCID_SLOT_F_PRESENT flag is set, then we
2552 			 * should attempt to power off and power on the ICC in
2553 			 * an attempt to reset it. If this fails, trigger
2554 			 * another worker that needs to operate.
2555 			 */
2556 			if ((slot->cs_flags & CCID_SLOT_F_PRESENT) != 0) {
2557 				if (!ccid_slot_reset(ccid, slot)) {
2558 					ccid_worker_request(ccid);
2559 					continue;
2560 				}
2561 			}
2562 
2563 			VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2564 			slot->cs_flags &= ~CCID_SLOT_F_NEED_TXN_RESET;
2565 			/*
2566 			 * Try to signal the next thread waiting for exclusive
2567 			 * access.
2568 			 */
2569 			ccid_slot_excl_maybe_signal(slot);
2570 		}
2571 	}
2572 
2573 	/*
2574 	 * If we have a request to operate again, delay before we consider this,
2575 	 * to make sure we don't do too much work ourselves.
2576 	 */
2577 	if ((ccid->ccid_flags & CCID_F_WORKER_REQUESTED) != 0) {
2578 		mutex_exit(&ccid->ccid_mutex);
2579 		delay(drv_usectohz(1000) * 10);
2580 		mutex_enter(&ccid->ccid_mutex);
2581 	}
2582 
2583 	ccid->ccid_flags &= ~CCID_F_WORKER_RUNNING;
2584 	if ((ccid->ccid_flags & CCID_F_DEV_GONE_MASK) != 0) {
2585 		ccid->ccid_flags &= ~CCID_F_WORKER_REQUESTED;
2586 		mutex_exit(&ccid->ccid_mutex);
2587 		return;
2588 	}
2589 
2590 	if ((ccid->ccid_flags & CCID_F_WORKER_REQUESTED) != 0) {
2591 		(void) ddi_taskq_dispatch(ccid->ccid_taskq, ccid_worker, ccid,
2592 		    DDI_SLEEP);
2593 	}
2594 	mutex_exit(&ccid->ccid_mutex);
2595 }
2596 
2597 static void
ccid_worker_request(ccid_t * ccid)2598 ccid_worker_request(ccid_t *ccid)
2599 {
2600 	boolean_t run;
2601 
2602 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
2603 	if ((ccid->ccid_flags & CCID_F_DEV_GONE_MASK) != 0) {
2604 		return;
2605 	}
2606 
2607 	run = (ccid->ccid_flags & CCID_F_WORKER_MASK) == 0;
2608 	ccid->ccid_flags |= CCID_F_WORKER_REQUESTED;
2609 	if (run) {
2610 		mutex_exit(&ccid->ccid_mutex);
2611 		(void) ddi_taskq_dispatch(ccid->ccid_taskq, ccid_worker, ccid,
2612 		    DDI_SLEEP);
2613 		mutex_enter(&ccid->ccid_mutex);
2614 	}
2615 }
2616 
2617 static void
ccid_intr_restart_timeout(void * arg)2618 ccid_intr_restart_timeout(void *arg)
2619 {
2620 	ccid_t *ccid = arg;
2621 
2622 	mutex_enter(&ccid->ccid_mutex);
2623 	if ((ccid->ccid_flags & CCID_F_DEV_GONE_MASK) != 0) {
2624 		ccid->ccid_poll_timeout = NULL;
2625 		mutex_exit(&ccid->ccid_mutex);
2626 	}
2627 	mutex_exit(&ccid->ccid_mutex);
2628 
2629 	ccid_intr_poll_init(ccid);
2630 }
2631 
2632 /*
2633  * Search for the current class descriptor from the configuration cloud and
2634  * parse it for our use. We do this by first finding the current interface
2635  * descriptor and expecting it to be one of the next descriptors.
2636  */
2637 static boolean_t
ccid_parse_class_desc(ccid_t * ccid)2638 ccid_parse_class_desc(ccid_t *ccid)
2639 {
2640 	uint_t i;
2641 	size_t len, tlen;
2642 	usb_client_dev_data_t *dp;
2643 	usb_alt_if_data_t *alt;
2644 
2645 	/*
2646 	 * Establish the target length we're looking for from usb_parse_data().
2647 	 * Note that we cannot use the sizeof (ccid_class_descr_t) for this
2648 	 * because that function does not know how to account for the padding at
2649 	 * the end of the target structure (which is reasonble). So we manually
2650 	 * figure out the number of bytes it should in theory write.
2651 	 */
2652 	tlen = offsetof(ccid_class_descr_t, ccd_bMaxCCIDBusySlots) +
2653 	    sizeof (ccid->ccid_class.ccd_bMaxCCIDBusySlots);
2654 	dp = ccid->ccid_dev_data;
2655 	alt = &dp->dev_curr_cfg->cfg_if[dp->dev_curr_if].if_alt[0];
2656 	for (i = 0; i < alt->altif_n_cvs; i++) {
2657 		usb_cvs_data_t *cvs = &alt->altif_cvs[i];
2658 		if (cvs->cvs_buf == NULL)
2659 			continue;
2660 		if (cvs->cvs_buf_len != CCID_DESCR_LENGTH)
2661 			continue;
2662 		if (cvs->cvs_buf[1] != CCID_DESCR_TYPE)
2663 			continue;
2664 		if ((len = usb_parse_data("ccscc3lcllc5lccscc", cvs->cvs_buf,
2665 		    cvs->cvs_buf_len, &ccid->ccid_class,
2666 		    sizeof (ccid->ccid_class))) >= tlen) {
2667 			return (B_TRUE);
2668 		}
2669 		ccid_error(ccid, "!failed to parse CCID class descriptor from "
2670 		    "cvs %u, expected %lu bytes, received %lu", i, tlen, len);
2671 	}
2672 
2673 	ccid_error(ccid, "!failed to find matching CCID class descriptor");
2674 	return (B_FALSE);
2675 }
2676 
2677 /*
2678  * Verify whether or not we can support this CCID reader.
2679  */
2680 static boolean_t
ccid_supported(ccid_t * ccid)2681 ccid_supported(ccid_t *ccid)
2682 {
2683 	usb_client_dev_data_t *dp;
2684 	usb_alt_if_data_t *alt;
2685 	ccid_class_features_t feat;
2686 	uint_t bits;
2687 	uint16_t ver = ccid->ccid_class.ccd_bcdCCID;
2688 
2689 	if (CCID_VERSION_MAJOR(ver) != CCID_VERSION_ONE) {
2690 		ccid_error(ccid, "!refusing to attach to CCID with unsupported "
2691 		    "version %x.%2x", CCID_VERSION_MAJOR(ver),
2692 		    CCID_VERSION_MINOR(ver));
2693 		return (B_FALSE);
2694 	}
2695 
2696 	/*
2697 	 * Check the number of endpoints. This should have either two or three.
2698 	 * If three, that means we should expect an interrupt-IN endpoint.
2699 	 * Otherwise, we shouldn't. Any other value indicates something weird
2700 	 * that we should ignore.
2701 	 */
2702 	dp = ccid->ccid_dev_data;
2703 	alt = &dp->dev_curr_cfg->cfg_if[dp->dev_curr_if].if_alt[0];
2704 	switch (alt->altif_descr.bNumEndpoints) {
2705 	case 2:
2706 		ccid->ccid_flags &= ~CCID_F_HAS_INTR;
2707 		break;
2708 	case 3:
2709 		ccid->ccid_flags |= CCID_F_HAS_INTR;
2710 		break;
2711 	default:
2712 		ccid_error(ccid, "!refusing to attach to CCID with unsupported "
2713 		    "number of endpoints: %d", alt->altif_descr.bNumEndpoints);
2714 		return (B_FALSE);
2715 	}
2716 
2717 	/*
2718 	 * Try and determine the appropriate buffer size. This can be a little
2719 	 * tricky. The class descriptor tells us the maximum size that the
2720 	 * reader accepts. While it may be tempting to try and use a larger
2721 	 * value such as the maximum size, the readers really don't like
2722 	 * receiving bulk transfers that large. However, there are also reports
2723 	 * of readers that will overwrite to a fixed minimum size. Until we see
2724 	 * such a thing in the wild there's probably no point in trying to deal
2725 	 * with it here.
2726 	 */
2727 	ccid->ccid_bufsize = ccid->ccid_class.ccd_dwMaxCCIDMessageLength;
2728 	if (ccid->ccid_bufsize < CCID_MIN_MESSAGE_LENGTH) {
2729 		ccid_error(ccid, "!CCID reader maximum CCID message length (%u)"
2730 		    " is less than minimum packet length (%u)",
2731 		    ccid->ccid_bufsize, CCID_MIN_MESSAGE_LENGTH);
2732 		return (B_FALSE);
2733 	}
2734 
2735 	/*
2736 	 * At this time, we do not require that the system have automatic ICC
2737 	 * activation or automatic ICC voltage. These are handled automatically
2738 	 * by the system.
2739 	 */
2740 	feat = ccid->ccid_class.ccd_dwFeatures;
2741 
2742 	/*
2743 	 * Check the number of data rates that are supported by the reader. If
2744 	 * the reader has a non-zero value and we don't support automatic
2745 	 * negotiation then warn about that.
2746 	 */
2747 	if (ccid->ccid_class.ccd_bNumDataRatesSupported != 0 &&
2748 	    (feat & CCID_CLASS_F_AUTO_BAUD) == 0) {
2749 		ccid_error(ccid, "!CCID reader only supports fixed clock rates,"
2750 		    " data will be limited to default values");
2751 	}
2752 
2753 	/*
2754 	 * Check which automatic features the reader provides and which features
2755 	 * it does not. Missing features will require additional work before a
2756 	 * card can be activated. Note, this also applies to APDU based readers
2757 	 * which may need to have various aspects of the device negotiated.
2758 	 */
2759 
2760 	/*
2761 	 * The footnote for these two bits in CCID r1.1.0 indicates that
2762 	 * when neither are missing we have to do the PPS negotiation
2763 	 * ourselves.
2764 	 */
2765 	bits = CCID_CLASS_F_AUTO_PARAM_NEG | CCID_CLASS_F_AUTO_PPS;
2766 	if ((feat & bits) == 0) {
2767 		ccid->ccid_flags |= CCID_F_NEEDS_PPS;
2768 	}
2769 
2770 	if ((feat & CCID_CLASS_F_AUTO_PARAM_NEG) == 0) {
2771 		ccid->ccid_flags |= CCID_F_NEEDS_PARAMS;
2772 	}
2773 
2774 	bits = CCID_CLASS_F_AUTO_BAUD | CCID_CLASS_F_AUTO_ICC_CLOCK;
2775 	if ((feat & bits) != bits) {
2776 		ccid->ccid_flags |= CCID_F_NEEDS_DATAFREQ;
2777 	}
2778 
2779 	return (B_TRUE);
2780 }
2781 
2782 static boolean_t
ccid_open_pipes(ccid_t * ccid)2783 ccid_open_pipes(ccid_t *ccid)
2784 {
2785 	int ret;
2786 	usb_ep_data_t *ep;
2787 	usb_client_dev_data_t *data;
2788 	usb_pipe_policy_t policy;
2789 
2790 	data = ccid->ccid_dev_data;
2791 
2792 	/*
2793 	 * First fill all the descriptors.
2794 	 */
2795 	ep = usb_lookup_ep_data(ccid->ccid_dip, data, data->dev_curr_if, 0, 0,
2796 	    USB_EP_ATTR_BULK, USB_EP_DIR_IN);
2797 	if (ep == NULL) {
2798 		ccid_error(ccid, "!failed to find CCID Bulk-IN endpoint");
2799 		return (B_FALSE);
2800 	}
2801 
2802 	if ((ret = usb_ep_xdescr_fill(USB_EP_XDESCR_CURRENT_VERSION,
2803 	    ccid->ccid_dip, ep, &ccid->ccid_bulkin_xdesc)) != USB_SUCCESS) {
2804 		ccid_error(ccid, "!failed to fill Bulk-IN xdescr: %d", ret);
2805 		return (B_FALSE);
2806 	}
2807 
2808 	ep = usb_lookup_ep_data(ccid->ccid_dip, data, data->dev_curr_if, 0, 0,
2809 	    USB_EP_ATTR_BULK, USB_EP_DIR_OUT);
2810 	if (ep == NULL) {
2811 		ccid_error(ccid, "!failed to find CCID Bulk-OUT endpoint");
2812 		return (B_FALSE);
2813 	}
2814 
2815 	if ((ret = usb_ep_xdescr_fill(USB_EP_XDESCR_CURRENT_VERSION,
2816 	    ccid->ccid_dip, ep, &ccid->ccid_bulkout_xdesc)) != USB_SUCCESS) {
2817 		ccid_error(ccid, "!failed to fill Bulk-OUT xdescr: %d", ret);
2818 		return (B_FALSE);
2819 	}
2820 
2821 	if (ccid->ccid_flags & CCID_F_HAS_INTR) {
2822 		ep = usb_lookup_ep_data(ccid->ccid_dip, data, data->dev_curr_if,
2823 		    0, 0, USB_EP_ATTR_INTR, USB_EP_DIR_IN);
2824 		if (ep == NULL) {
2825 			ccid_error(ccid, "!failed to find CCID Intr-IN "
2826 			    "endpoint");
2827 			return (B_FALSE);
2828 		}
2829 
2830 		if ((ret = usb_ep_xdescr_fill(USB_EP_XDESCR_CURRENT_VERSION,
2831 		    ccid->ccid_dip, ep, &ccid->ccid_intrin_xdesc)) !=
2832 		    USB_SUCCESS) {
2833 			ccid_error(ccid, "!failed to fill Intr-OUT xdescr: %d",
2834 			    ret);
2835 			return (B_FALSE);
2836 		}
2837 	}
2838 
2839 	/*
2840 	 * Now open up the pipes.
2841 	 */
2842 	bzero(&policy, sizeof (policy));
2843 	policy.pp_max_async_reqs = CCID_NUM_ASYNC_REQS;
2844 
2845 	if ((ret = usb_pipe_xopen(ccid->ccid_dip, &ccid->ccid_bulkin_xdesc,
2846 	    &policy, USB_FLAGS_SLEEP, &ccid->ccid_bulkin_pipe)) !=
2847 	    USB_SUCCESS) {
2848 		ccid_error(ccid, "!failed to open Bulk-IN pipe: %d\n", ret);
2849 		return (B_FALSE);
2850 	}
2851 
2852 	if ((ret = usb_pipe_xopen(ccid->ccid_dip, &ccid->ccid_bulkout_xdesc,
2853 	    &policy, USB_FLAGS_SLEEP, &ccid->ccid_bulkout_pipe)) !=
2854 	    USB_SUCCESS) {
2855 		ccid_error(ccid, "!failed to open Bulk-OUT pipe: %d\n", ret);
2856 		usb_pipe_close(ccid->ccid_dip, ccid->ccid_bulkin_pipe,
2857 		    USB_FLAGS_SLEEP, NULL, NULL);
2858 		ccid->ccid_bulkin_pipe = NULL;
2859 		return (B_FALSE);
2860 	}
2861 
2862 	if (ccid->ccid_flags & CCID_F_HAS_INTR) {
2863 		if ((ret = usb_pipe_xopen(ccid->ccid_dip,
2864 		    &ccid->ccid_intrin_xdesc, &policy, USB_FLAGS_SLEEP,
2865 		    &ccid->ccid_intrin_pipe)) != USB_SUCCESS) {
2866 			ccid_error(ccid, "!failed to open Intr-IN pipe: %d\n",
2867 			    ret);
2868 			usb_pipe_close(ccid->ccid_dip, ccid->ccid_bulkin_pipe,
2869 			    USB_FLAGS_SLEEP, NULL, NULL);
2870 			ccid->ccid_bulkin_pipe = NULL;
2871 			usb_pipe_close(ccid->ccid_dip, ccid->ccid_bulkout_pipe,
2872 			    USB_FLAGS_SLEEP, NULL, NULL);
2873 			ccid->ccid_bulkout_pipe = NULL;
2874 			return (B_FALSE);
2875 		}
2876 	}
2877 
2878 	ccid->ccid_control_pipe = data->dev_default_ph;
2879 	return (B_TRUE);
2880 }
2881 
2882 static void
ccid_slots_fini(ccid_t * ccid)2883 ccid_slots_fini(ccid_t *ccid)
2884 {
2885 	uint_t i;
2886 
2887 	for (i = 0; i < ccid->ccid_nslots; i++) {
2888 		VERIFY3U(ccid->ccid_slots[i].cs_slotno, ==, i);
2889 
2890 		if (ccid->ccid_slots[i].cs_command != NULL) {
2891 			ccid_command_free(ccid->ccid_slots[i].cs_command);
2892 			ccid->ccid_slots[i].cs_command = NULL;
2893 		}
2894 
2895 		cv_destroy(&ccid->ccid_slots[i].cs_io.ci_cv);
2896 		freemsgchain(ccid->ccid_slots[i].cs_atr);
2897 		atr_data_free(ccid->ccid_slots[i].cs_icc.icc_atr_data);
2898 		list_destroy(&ccid->ccid_slots[i].cs_minors);
2899 		list_destroy(&ccid->ccid_slots[i].cs_excl_waiters);
2900 	}
2901 
2902 	ddi_remove_minor_node(ccid->ccid_dip, NULL);
2903 	kmem_free(ccid->ccid_slots, sizeof (ccid_slot_t) * ccid->ccid_nslots);
2904 	ccid->ccid_nslots = 0;
2905 	ccid->ccid_slots = NULL;
2906 }
2907 
2908 static boolean_t
ccid_slots_init(ccid_t * ccid)2909 ccid_slots_init(ccid_t *ccid)
2910 {
2911 	uint_t i;
2912 
2913 	/*
2914 	 * The class descriptor has the maximum index that one can index into.
2915 	 * We therefore have to add one to determine the actual number of slots
2916 	 * that exist.
2917 	 */
2918 	ccid->ccid_nslots = ccid->ccid_class.ccd_bMaxSlotIndex + 1;
2919 	ccid->ccid_slots = kmem_zalloc(sizeof (ccid_slot_t) * ccid->ccid_nslots,
2920 	    KM_SLEEP);
2921 	for (i = 0; i < ccid->ccid_nslots; i++) {
2922 		ccid_slot_t *slot = &ccid->ccid_slots[i];
2923 
2924 		/*
2925 		 * We initialize every possible slot as having changed to make
2926 		 * sure that we have a chance to discover it. See the slot
2927 		 * detection section in the big theory statement for more info.
2928 		 */
2929 		slot->cs_flags |= CCID_SLOT_F_CHANGED;
2930 		slot->cs_slotno = i;
2931 		slot->cs_ccid = ccid;
2932 		slot->cs_icc.icc_atr_data = atr_data_alloc();
2933 		slot->cs_idx.cmi_minor = CCID_MINOR_INVALID;
2934 		slot->cs_idx.cmi_isslot = B_TRUE;
2935 		slot->cs_idx.cmi_data.cmi_slot = slot;
2936 		cv_init(&slot->cs_io.ci_cv, NULL, CV_DRIVER, NULL);
2937 		list_create(&slot->cs_minors, sizeof (ccid_minor_t),
2938 		    offsetof(ccid_minor_t, cm_minor_list));
2939 		list_create(&slot->cs_excl_waiters, sizeof (ccid_minor_t),
2940 		    offsetof(ccid_minor_t, cm_excl_list));
2941 	}
2942 
2943 	return (B_TRUE);
2944 }
2945 
2946 static void
ccid_minors_fini(ccid_t * ccid)2947 ccid_minors_fini(ccid_t *ccid)
2948 {
2949 	uint_t i;
2950 
2951 	ddi_remove_minor_node(ccid->ccid_dip, NULL);
2952 	for (i = 0; i < ccid->ccid_nslots; i++) {
2953 		if (ccid->ccid_slots[i].cs_idx.cmi_minor == CCID_MINOR_INVALID)
2954 			continue;
2955 		ccid_minor_idx_free(&ccid->ccid_slots[i].cs_idx);
2956 	}
2957 }
2958 
2959 static boolean_t
ccid_minors_init(ccid_t * ccid)2960 ccid_minors_init(ccid_t *ccid)
2961 {
2962 	uint_t i;
2963 
2964 	for (i = 0; i < ccid->ccid_nslots; i++) {
2965 		char buf[32];
2966 
2967 		(void) ccid_minor_idx_alloc(&ccid->ccid_slots[i].cs_idx,
2968 		    B_TRUE);
2969 
2970 		(void) snprintf(buf, sizeof (buf), "slot%u", i);
2971 		if (ddi_create_minor_node(ccid->ccid_dip, buf, S_IFCHR,
2972 		    ccid->ccid_slots[i].cs_idx.cmi_minor,
2973 		    DDI_NT_CCID_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
2974 			ccid_minors_fini(ccid);
2975 			return (B_FALSE);
2976 		}
2977 	}
2978 
2979 	return (B_TRUE);
2980 }
2981 
2982 static void
ccid_intr_poll_fini(ccid_t * ccid)2983 ccid_intr_poll_fini(ccid_t *ccid)
2984 {
2985 	if (ccid->ccid_flags & CCID_F_HAS_INTR) {
2986 		timeout_id_t tid;
2987 
2988 		mutex_enter(&ccid->ccid_mutex);
2989 		tid = ccid->ccid_poll_timeout;
2990 		ccid->ccid_poll_timeout = NULL;
2991 		mutex_exit(&ccid->ccid_mutex);
2992 		(void) untimeout(tid);
2993 		usb_pipe_stop_intr_polling(ccid->ccid_intrin_pipe,
2994 		    USB_FLAGS_SLEEP);
2995 	} else {
2996 		VERIFY3P(ccid->ccid_intrin_pipe, ==, NULL);
2997 	}
2998 }
2999 
3000 static void
ccid_intr_poll_init(ccid_t * ccid)3001 ccid_intr_poll_init(ccid_t *ccid)
3002 {
3003 	int ret;
3004 	usb_intr_req_t *uirp;
3005 
3006 	uirp = usb_alloc_intr_req(ccid->ccid_dip, 0, USB_FLAGS_SLEEP);
3007 	uirp->intr_client_private = (usb_opaque_t)ccid;
3008 	uirp->intr_attributes = USB_ATTRS_SHORT_XFER_OK |
3009 	    USB_ATTRS_AUTOCLEARING;
3010 	uirp->intr_len = CCID_INTR_RESPONSE_SIZE;
3011 	uirp->intr_cb = ccid_intr_pipe_cb;
3012 	uirp->intr_exc_cb = ccid_intr_pipe_except_cb;
3013 
3014 	mutex_enter(&ccid->ccid_mutex);
3015 	if (ccid->ccid_flags & CCID_F_DEV_GONE_MASK) {
3016 		mutex_exit(&ccid->ccid_mutex);
3017 		usb_free_intr_req(uirp);
3018 		return;
3019 	}
3020 
3021 	if ((ret = usb_pipe_intr_xfer(ccid->ccid_intrin_pipe, uirp,
3022 	    USB_FLAGS_SLEEP)) != USB_SUCCESS) {
3023 		ccid_error(ccid, "!failed to start polling on CCID Intr-IN "
3024 		    "pipe: %d", ret);
3025 		ccid->ccid_poll_timeout = timeout(ccid_intr_restart_timeout,
3026 		    ccid, drv_usectohz(1000000));
3027 		usb_free_intr_req(uirp);
3028 	}
3029 	mutex_exit(&ccid->ccid_mutex);
3030 }
3031 
3032 static void
ccid_cleanup_bulkin(ccid_t * ccid)3033 ccid_cleanup_bulkin(ccid_t *ccid)
3034 {
3035 	uint_t i;
3036 
3037 	VERIFY3P(ccid->ccid_bulkin_dispatched, ==, NULL);
3038 	for (i = 0; i < ccid->ccid_bulkin_alloced; i++) {
3039 		VERIFY3P(ccid->ccid_bulkin_cache[i], !=, NULL);
3040 		usb_free_bulk_req(ccid->ccid_bulkin_cache[i]);
3041 		ccid->ccid_bulkin_cache[i] = NULL;
3042 	}
3043 
3044 #ifdef	DEBUG
3045 	for (i = 0; i < CCID_BULK_NALLOCED; i++) {
3046 		VERIFY3P(ccid->ccid_bulkin_cache[i], ==, NULL);
3047 	}
3048 #endif
3049 	ccid->ccid_bulkin_alloced = 0;
3050 }
3051 
3052 static int
ccid_disconnect_cb(dev_info_t * dip)3053 ccid_disconnect_cb(dev_info_t *dip)
3054 {
3055 	int inst;
3056 	ccid_t *ccid;
3057 	uint_t i;
3058 
3059 	if (dip == NULL)
3060 		goto done;
3061 
3062 	inst = ddi_get_instance(dip);
3063 	ccid = ddi_get_soft_state(ccid_softstate, inst);
3064 	if (ccid == NULL)
3065 		goto done;
3066 	VERIFY3P(dip, ==, ccid->ccid_dip);
3067 
3068 	mutex_enter(&ccid->ccid_mutex);
3069 	/*
3070 	 * First, set the disconnected flag. This will make sure that anyone
3071 	 * that tries to make additional operations will be kicked out. This
3072 	 * flag is checked by detach and by users.
3073 	 */
3074 	ccid->ccid_flags |= CCID_F_DISCONNECTED;
3075 
3076 	/*
3077 	 * Now, go through any threads that are blocked on a minor for exclusive
3078 	 * access. They should be woken up and they'll fail due to the fact that
3079 	 * we've set the disconnected flag above.
3080 	 */
3081 	for (i = 0; i < ccid->ccid_nslots; i++) {
3082 		ccid_minor_t *cmp;
3083 		ccid_slot_t *slot = &ccid->ccid_slots[i];
3084 
3085 		for (cmp = list_head(&slot->cs_excl_waiters); cmp != NULL;
3086 		    cmp = list_next(&slot->cs_excl_waiters, cmp)) {
3087 			cv_signal(&cmp->cm_excl_cv);
3088 		}
3089 	}
3090 
3091 	/*
3092 	 * Finally, we need to basically wake up anyone blocked in read and make
3093 	 * sure that they don't wait there forever and make sure that anyone
3094 	 * polling gets a POLLHUP. We can't really distinguish between this and
3095 	 * an ICC being removed. It will be discovered when someone tries to do
3096 	 * an operation and they receive an ENODEV. We only need to do this on
3097 	 * minors that have exclusive access. Don't worry about them finishing
3098 	 * up, this'll be done as part of detach.
3099 	 */
3100 	for (i = 0; i < ccid->ccid_nslots; i++) {
3101 		ccid_slot_t *slot = &ccid->ccid_slots[i];
3102 		if (slot->cs_excl_minor == NULL)
3103 			continue;
3104 
3105 		pollwakeup(&slot->cs_excl_minor->cm_pollhead,
3106 		    POLLHUP | POLLERR);
3107 		cv_signal(&slot->cs_excl_minor->cm_read_cv);
3108 	}
3109 
3110 	/*
3111 	 * If there are outstanding commands, they will ultimately be cleaned
3112 	 * up as the USB commands themselves time out. We will get notified
3113 	 * through the various bulk xfer exception callbacks, which will induce
3114 	 * the cleanup through ccid_command_transport_error(). This will also
3115 	 * take care of commands waiting for I/O teardown.
3116 	 */
3117 	mutex_exit(&ccid->ccid_mutex);
3118 
3119 done:
3120 	return (USB_SUCCESS);
3121 }
3122 
3123 static usb_event_t ccid_usb_events = {
3124 	ccid_disconnect_cb,
3125 	NULL,
3126 	NULL,
3127 	NULL
3128 };
3129 
3130 static void
ccid_cleanup(dev_info_t * dip)3131 ccid_cleanup(dev_info_t *dip)
3132 {
3133 	int inst;
3134 	ccid_t *ccid;
3135 
3136 	if (dip == NULL)
3137 		return;
3138 
3139 	inst = ddi_get_instance(dip);
3140 	ccid = ddi_get_soft_state(ccid_softstate, inst);
3141 	if (ccid == NULL)
3142 		return;
3143 	VERIFY3P(dip, ==, ccid->ccid_dip);
3144 
3145 	/*
3146 	 * Make sure we set the detaching flag so anything running in the
3147 	 * background knows to stop.
3148 	 */
3149 	mutex_enter(&ccid->ccid_mutex);
3150 	ccid->ccid_flags |= CCID_F_DETACHING;
3151 	mutex_exit(&ccid->ccid_mutex);
3152 
3153 	if ((ccid->ccid_attach & CCID_ATTACH_MINORS) != 0) {
3154 		ccid_minors_fini(ccid);
3155 		ccid->ccid_attach &= ~CCID_ATTACH_MINORS;
3156 	}
3157 
3158 	if ((ccid->ccid_attach & CCID_ATTACH_INTR_ACTIVE) != 0) {
3159 		ccid_intr_poll_fini(ccid);
3160 		ccid->ccid_attach &= ~CCID_ATTACH_INTR_ACTIVE;
3161 	}
3162 
3163 	/*
3164 	 * At this point, we have shut down the interrupt pipe, the last place
3165 	 * aside from a user that could have kicked off I/O. So finally wait for
3166 	 * any worker threads.
3167 	 */
3168 	if (ccid->ccid_taskq != NULL) {
3169 		ddi_taskq_wait(ccid->ccid_taskq);
3170 		mutex_enter(&ccid->ccid_mutex);
3171 		VERIFY0(ccid->ccid_flags & CCID_F_WORKER_MASK);
3172 		mutex_exit(&ccid->ccid_mutex);
3173 	}
3174 
3175 	if ((ccid->ccid_attach & CCID_ATTACH_HOTPLUG_CB) != 0) {
3176 		usb_unregister_event_cbs(dip, &ccid_usb_events);
3177 		ccid->ccid_attach &= ~CCID_ATTACH_HOTPLUG_CB;
3178 	}
3179 
3180 	if ((ccid->ccid_attach & CCID_ATTACH_SLOTS) != 0) {
3181 		ccid_slots_fini(ccid);
3182 		ccid->ccid_attach &= ~CCID_ATTACH_SLOTS;
3183 	}
3184 
3185 	if ((ccid->ccid_attach & CCID_ATTACH_SEQ_IDS) != 0) {
3186 		id_space_destroy(ccid->ccid_seqs);
3187 		ccid->ccid_seqs = NULL;
3188 		ccid->ccid_attach &= ~CCID_ATTACH_SEQ_IDS;
3189 	}
3190 
3191 	if ((ccid->ccid_attach & CCID_ATTACH_OPEN_PIPES) != 0) {
3192 		usb_pipe_close(dip, ccid->ccid_bulkin_pipe, USB_FLAGS_SLEEP,
3193 		    NULL, NULL);
3194 		ccid->ccid_bulkin_pipe = NULL;
3195 		usb_pipe_close(dip, ccid->ccid_bulkout_pipe, USB_FLAGS_SLEEP,
3196 		    NULL, NULL);
3197 		ccid->ccid_bulkout_pipe = NULL;
3198 		if ((ccid->ccid_flags & CCID_F_HAS_INTR) != 0) {
3199 			usb_pipe_close(dip, ccid->ccid_intrin_pipe,
3200 			    USB_FLAGS_SLEEP, NULL, NULL);
3201 			ccid->ccid_intrin_pipe = NULL;
3202 		} else {
3203 			VERIFY3P(ccid->ccid_intrin_pipe, ==, NULL);
3204 		}
3205 		ccid->ccid_control_pipe = NULL;
3206 		ccid->ccid_attach &= ~CCID_ATTACH_OPEN_PIPES;
3207 	}
3208 
3209 	/*
3210 	 * Now that all of the pipes are closed. If we happened to have any
3211 	 * cached bulk requests, we should free them.
3212 	 */
3213 	ccid_cleanup_bulkin(ccid);
3214 
3215 	if (ccid->ccid_attach & CCID_ATTACH_CMD_LIST) {
3216 		ccid_command_t *cc;
3217 
3218 		while ((cc = list_remove_head(&ccid->ccid_command_queue)) !=
3219 		    NULL) {
3220 			ccid_command_free(cc);
3221 		}
3222 		list_destroy(&ccid->ccid_command_queue);
3223 
3224 		while ((cc = list_remove_head(&ccid->ccid_complete_queue)) !=
3225 		    NULL) {
3226 			ccid_command_free(cc);
3227 		}
3228 		list_destroy(&ccid->ccid_complete_queue);
3229 	}
3230 
3231 	if ((ccid->ccid_attach & CCID_ATTACH_TASKQ) != 0) {
3232 		ddi_taskq_destroy(ccid->ccid_taskq);
3233 		ccid->ccid_taskq = NULL;
3234 		ccid->ccid_attach &= ~CCID_ATTACH_TASKQ;
3235 	}
3236 
3237 	if ((ccid->ccid_attach & CCID_ATTACH_MUTEX_INIT) != 0) {
3238 		mutex_destroy(&ccid->ccid_mutex);
3239 		ccid->ccid_attach &= ~CCID_ATTACH_MUTEX_INIT;
3240 	}
3241 
3242 	if ((ccid->ccid_attach & CCID_ATTACH_USB_CLIENT) != 0) {
3243 		usb_client_detach(dip, ccid->ccid_dev_data);
3244 		ccid->ccid_dev_data = NULL;
3245 		ccid->ccid_attach &= ~CCID_ATTACH_USB_CLIENT;
3246 	}
3247 
3248 	ASSERT0(ccid->ccid_attach);
3249 	ddi_soft_state_free(ccid_softstate, inst);
3250 }
3251 
3252 static int
ccid_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)3253 ccid_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3254 {
3255 	ccid_t *ccid;
3256 	int inst, ret;
3257 	char buf[64];
3258 
3259 	if (cmd != DDI_ATTACH)
3260 		return (DDI_FAILURE);
3261 
3262 	inst = ddi_get_instance(dip);
3263 	if (ddi_soft_state_zalloc(ccid_softstate, inst) != DDI_SUCCESS) {
3264 		ccid_error(NULL, "!failed to allocate soft state for ccid "
3265 		    "instance %d", inst);
3266 		return (DDI_FAILURE);
3267 	}
3268 
3269 	ccid = ddi_get_soft_state(ccid_softstate, inst);
3270 	ccid->ccid_dip = dip;
3271 
3272 	if ((ret = usb_client_attach(dip, USBDRV_VERSION, 0)) != USB_SUCCESS) {
3273 		ccid_error(ccid, "!failed to attach to usb client: %d", ret);
3274 		goto cleanup;
3275 	}
3276 	ccid->ccid_attach |= CCID_ATTACH_USB_CLIENT;
3277 
3278 	if ((ret = usb_get_dev_data(dip, &ccid->ccid_dev_data, USB_PARSE_LVL_IF,
3279 	    0)) != USB_SUCCESS) {
3280 		ccid_error(ccid, "!failed to get usb device data: %d", ret);
3281 		goto cleanup;
3282 	}
3283 
3284 	mutex_init(&ccid->ccid_mutex, NULL, MUTEX_DRIVER,
3285 	    ccid->ccid_dev_data->dev_iblock_cookie);
3286 	ccid->ccid_attach |= CCID_ATTACH_MUTEX_INIT;
3287 
3288 	(void) snprintf(buf, sizeof (buf), "ccid%d_taskq", inst);
3289 	ccid->ccid_taskq = ddi_taskq_create(dip, buf, 1, TASKQ_DEFAULTPRI, 0);
3290 	if (ccid->ccid_taskq == NULL) {
3291 		ccid_error(ccid, "!failed to create CCID taskq");
3292 		goto cleanup;
3293 	}
3294 	ccid->ccid_attach |= CCID_ATTACH_TASKQ;
3295 
3296 	list_create(&ccid->ccid_command_queue, sizeof (ccid_command_t),
3297 	    offsetof(ccid_command_t, cc_list_node));
3298 	list_create(&ccid->ccid_complete_queue, sizeof (ccid_command_t),
3299 	    offsetof(ccid_command_t, cc_list_node));
3300 
3301 	if (!ccid_parse_class_desc(ccid)) {
3302 		ccid_error(ccid, "!failed to parse CCID class descriptor");
3303 		goto cleanup;
3304 	}
3305 
3306 	if (!ccid_supported(ccid)) {
3307 		ccid_error(ccid,
3308 		    "!CCID reader is not supported, not attaching");
3309 		goto cleanup;
3310 	}
3311 
3312 	if (!ccid_open_pipes(ccid)) {
3313 		ccid_error(ccid, "!failed to open CCID pipes, not attaching");
3314 		goto cleanup;
3315 	}
3316 	ccid->ccid_attach |= CCID_ATTACH_OPEN_PIPES;
3317 
3318 	(void) snprintf(buf, sizeof (buf), "ccid%d_seqs", inst);
3319 	if ((ccid->ccid_seqs = id_space_create(buf, CCID_SEQ_MIN,
3320 	    CCID_SEQ_MAX + 1)) == NULL) {
3321 		ccid_error(ccid, "!failed to create CCID sequence id space");
3322 		goto cleanup;
3323 	}
3324 	ccid->ccid_attach |= CCID_ATTACH_SEQ_IDS;
3325 
3326 	if (!ccid_slots_init(ccid)) {
3327 		ccid_error(ccid, "!failed to initialize CCID slot structures");
3328 		goto cleanup;
3329 	}
3330 	ccid->ccid_attach |= CCID_ATTACH_SLOTS;
3331 
3332 	if (usb_register_event_cbs(dip, &ccid_usb_events, 0) != USB_SUCCESS) {
3333 		ccid_error(ccid, "!failed to register USB hotplug callbacks");
3334 		goto cleanup;
3335 	}
3336 	ccid->ccid_attach |= CCID_ATTACH_HOTPLUG_CB;
3337 
3338 	/*
3339 	 * Before we enable the interrupt pipe, take a shot at priming our
3340 	 * bulkin_cache.
3341 	 */
3342 	mutex_enter(&ccid->ccid_mutex);
3343 	ccid_bulkin_cache_refresh(ccid);
3344 	mutex_exit(&ccid->ccid_mutex);
3345 
3346 	if (ccid->ccid_flags & CCID_F_HAS_INTR) {
3347 		ccid_intr_poll_init(ccid);
3348 	}
3349 	ccid->ccid_attach |= CCID_ATTACH_INTR_ACTIVE;
3350 
3351 	/*
3352 	 * Create minor nodes for each slot.
3353 	 */
3354 	if (!ccid_minors_init(ccid)) {
3355 		ccid_error(ccid, "!failed to create minor nodes");
3356 		goto cleanup;
3357 	}
3358 	ccid->ccid_attach |= CCID_ATTACH_MINORS;
3359 
3360 	mutex_enter(&ccid->ccid_mutex);
3361 	ccid_worker_request(ccid);
3362 	mutex_exit(&ccid->ccid_mutex);
3363 
3364 	return (DDI_SUCCESS);
3365 
3366 cleanup:
3367 	ccid_cleanup(dip);
3368 	return (DDI_FAILURE);
3369 }
3370 
3371 static int
ccid_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** outp)3372 ccid_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **outp)
3373 {
3374 	return (DDI_FAILURE);
3375 }
3376 
3377 static int
ccid_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)3378 ccid_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3379 {
3380 	int inst;
3381 	ccid_t *ccid;
3382 
3383 	if (cmd != DDI_DETACH)
3384 		return (DDI_FAILURE);
3385 
3386 	inst = ddi_get_instance(dip);
3387 	ccid = ddi_get_soft_state(ccid_softstate, inst);
3388 	VERIFY3P(ccid, !=, NULL);
3389 	VERIFY3P(dip, ==, ccid->ccid_dip);
3390 
3391 	mutex_enter(&ccid->ccid_mutex);
3392 
3393 	/*
3394 	 * If the device hasn't been disconnected from a USB sense, refuse to
3395 	 * detach. Otherwise, there's no way to guarantee that the ccid
3396 	 * driver will be attached when a user hotplugs an ICC.
3397 	 */
3398 	if ((ccid->ccid_flags & CCID_F_DISCONNECTED) == 0) {
3399 		mutex_exit(&ccid->ccid_mutex);
3400 		return (DDI_FAILURE);
3401 	}
3402 
3403 	if (!list_is_empty(&ccid->ccid_command_queue) ||
3404 	    !list_is_empty(&ccid->ccid_complete_queue)) {
3405 		mutex_exit(&ccid->ccid_mutex);
3406 		return (DDI_FAILURE);
3407 	}
3408 	mutex_exit(&ccid->ccid_mutex);
3409 
3410 	ccid_cleanup(dip);
3411 	return (DDI_SUCCESS);
3412 }
3413 
3414 static void
ccid_minor_free(ccid_minor_t * cmp)3415 ccid_minor_free(ccid_minor_t *cmp)
3416 {
3417 	VERIFY3U(cmp->cm_idx.cmi_minor, ==, CCID_MINOR_INVALID);
3418 	crfree(cmp->cm_opener);
3419 	cv_destroy(&cmp->cm_iowait_cv);
3420 	cv_destroy(&cmp->cm_read_cv);
3421 	cv_destroy(&cmp->cm_excl_cv);
3422 	kmem_free(cmp, sizeof (ccid_minor_t));
3423 
3424 }
3425 
3426 static int
ccid_open(dev_t * devp,int flag,int otyp,cred_t * credp)3427 ccid_open(dev_t *devp, int flag, int otyp, cred_t *credp)
3428 {
3429 	ccid_minor_idx_t *idx;
3430 	ccid_minor_t *cmp;
3431 	ccid_slot_t *slot;
3432 
3433 	/*
3434 	 * Always check the zone first, to make sure we lie about it existing.
3435 	 */
3436 	if (crgetzoneid(credp) != GLOBAL_ZONEID)
3437 		return (ENOENT);
3438 
3439 	if ((otyp & (FNDELAY | FEXCL)) != 0)
3440 		return (EINVAL);
3441 
3442 	if (drv_priv(credp) != 0)
3443 		return (EPERM);
3444 
3445 	if (otyp != OTYP_CHR)
3446 		return (ENOTSUP);
3447 
3448 	if ((flag & FREAD) != FREAD)
3449 		return (EINVAL);
3450 
3451 	idx = ccid_minor_find(getminor(*devp));
3452 	if (idx == NULL) {
3453 		return (ENOENT);
3454 	}
3455 
3456 	/*
3457 	 * We don't expect anyone to be able to get a non-slot related minor. If
3458 	 * that somehow happens, guard against it and error out.
3459 	 */
3460 	if (!idx->cmi_isslot) {
3461 		return (ENOENT);
3462 	}
3463 
3464 	slot = idx->cmi_data.cmi_slot;
3465 
3466 	mutex_enter(&slot->cs_ccid->ccid_mutex);
3467 	if ((slot->cs_ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
3468 		mutex_exit(&slot->cs_ccid->ccid_mutex);
3469 		return (ENODEV);
3470 	}
3471 	mutex_exit(&slot->cs_ccid->ccid_mutex);
3472 
3473 	cmp = kmem_zalloc(sizeof (ccid_minor_t), KM_SLEEP);
3474 
3475 	cmp->cm_idx.cmi_minor = CCID_MINOR_INVALID;
3476 	cmp->cm_idx.cmi_isslot = B_FALSE;
3477 	cmp->cm_idx.cmi_data.cmi_user = cmp;
3478 	if (!ccid_minor_idx_alloc(&cmp->cm_idx, B_FALSE)) {
3479 		kmem_free(cmp, sizeof (ccid_minor_t));
3480 		return (ENOSPC);
3481 	}
3482 	cv_init(&cmp->cm_excl_cv, NULL, CV_DRIVER, NULL);
3483 	cv_init(&cmp->cm_read_cv, NULL, CV_DRIVER, NULL);
3484 	cv_init(&cmp->cm_iowait_cv, NULL, CV_DRIVER, NULL);
3485 	cmp->cm_opener = crdup(credp);
3486 	cmp->cm_slot = slot;
3487 	*devp = makedevice(getmajor(*devp), cmp->cm_idx.cmi_minor);
3488 
3489 	if ((flag & FWRITE) == FWRITE) {
3490 		cmp->cm_flags |= CCID_MINOR_F_WRITABLE;
3491 	}
3492 
3493 	mutex_enter(&slot->cs_ccid->ccid_mutex);
3494 	list_insert_tail(&slot->cs_minors, cmp);
3495 	mutex_exit(&slot->cs_ccid->ccid_mutex);
3496 
3497 	return (0);
3498 }
3499 
3500 /*
3501  * Copy a command which may have a message block chain out to the user.
3502  */
3503 static int
ccid_read_copyout(struct uio * uiop,const mblk_t * mp)3504 ccid_read_copyout(struct uio *uiop, const mblk_t *mp)
3505 {
3506 	offset_t off;
3507 
3508 	off = uiop->uio_loffset;
3509 	VERIFY3P(mp->b_next, ==, NULL);
3510 
3511 	for (; mp != NULL; mp = mp->b_cont) {
3512 		int ret;
3513 
3514 		if (MBLKL(mp) == 0)
3515 			continue;
3516 
3517 		ret = uiomove(mp->b_rptr, MBLKL(mp), UIO_READ, uiop);
3518 		if (ret != 0) {
3519 			return (EFAULT);
3520 		}
3521 	}
3522 
3523 	uiop->uio_loffset = off;
3524 	return (0);
3525 }
3526 
3527 /*
3528  * Called to indicate that we are ready for a user to consume the I/O.
3529  */
3530 static void
ccid_user_io_done(ccid_t * ccid,ccid_slot_t * slot)3531 ccid_user_io_done(ccid_t *ccid, ccid_slot_t *slot)
3532 {
3533 	ccid_minor_t *cmp;
3534 
3535 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
3536 
3537 	slot->cs_io.ci_flags &= ~CCID_IO_F_IN_PROGRESS;
3538 	slot->cs_io.ci_flags |= CCID_IO_F_DONE;
3539 	cmp = slot->cs_excl_minor;
3540 	if (cmp != NULL) {
3541 		ccid_slot_pollin_signal(slot);
3542 		cv_signal(&cmp->cm_read_cv);
3543 	}
3544 }
3545 
3546 /*
3547  * This is called in a few different sitautions. It's called when an exclusive
3548  * hold is being released by a user on the slot. It's also called when the ICC
3549  * is removed, the reader has been unplugged, or the ICC is being reset. In all
3550  * these cases we need to make sure that I/O is taken care of and we won't be
3551  * leaving behind vestigial garbage.
3552  */
3553 static void
ccid_teardown_apdu(ccid_t * ccid,ccid_slot_t * slot,int error)3554 ccid_teardown_apdu(ccid_t *ccid, ccid_slot_t *slot, int error)
3555 {
3556 
3557 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
3558 
3559 	/*
3560 	 * If no I/O is in progress, then there's nothing to do at our end.
3561 	 */
3562 	if ((slot->cs_io.ci_flags & CCID_IO_F_IN_PROGRESS) == 0) {
3563 		return;
3564 	}
3565 
3566 	slot->cs_io.ci_errno = error;
3567 	ccid_user_io_done(ccid, slot);
3568 
3569 	/*
3570 	 * There is still I/O going on. We need to mark this on the slot such
3571 	 * that no one can gain ownership of it or issue commands. This will
3572 	 * block hand off of a slot.
3573 	 */
3574 	slot->cs_flags |= CCID_SLOT_F_NEED_IO_TEARDOWN;
3575 }
3576 
3577 /*
3578  * This function is called in response to a CCID command completing.
3579  */
3580 static void
ccid_complete_apdu(ccid_t * ccid,ccid_slot_t * slot,ccid_command_t * cc)3581 ccid_complete_apdu(ccid_t *ccid, ccid_slot_t *slot, ccid_command_t *cc)
3582 {
3583 	ccid_reply_command_status_t crs;
3584 	ccid_reply_icc_status_t cis;
3585 	ccid_command_err_t cce;
3586 
3587 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
3588 	VERIFY3P(slot->cs_io.ci_command, ==, cc);
3589 
3590 	/*
3591 	 * This completion could be called due to the fact that a user is no
3592 	 * longer present, but we still have outstanding work to do in the
3593 	 * stack. As such, we need to go through and check if the flag was set
3594 	 * on the slot during teardown and if so, clean it up now.
3595 	 */
3596 	if ((slot->cs_flags & CCID_SLOT_F_NEED_IO_TEARDOWN) != 0) {
3597 		ccid_command_free(cc);
3598 		slot->cs_io.ci_command = NULL;
3599 		ccid_slot_io_teardown_done(slot);
3600 		return;
3601 	}
3602 
3603 	/*
3604 	 * Process this command and figure out what we should logically be
3605 	 * returning to the user.
3606 	 */
3607 	if (cc->cc_state != CCID_COMMAND_COMPLETE) {
3608 		slot->cs_io.ci_errno = EIO;
3609 		slot->cs_flags |= CCID_SLOT_F_NEED_TXN_RESET;
3610 		ccid_worker_request(ccid);
3611 		goto consume;
3612 	}
3613 
3614 	ccid_command_status_decode(cc, &crs, &cis, &cce);
3615 	if (crs == CCID_REPLY_STATUS_COMPLETE) {
3616 		mblk_t *mp;
3617 
3618 		mp = cc->cc_response;
3619 		cc->cc_response = NULL;
3620 		mp->b_rptr += sizeof (ccid_header_t);
3621 		slot->cs_io.ci_errno = 0;
3622 		slot->cs_io.ci_data = mp;
3623 	} else if (cis == CCID_REPLY_ICC_MISSING) {
3624 		slot->cs_io.ci_errno = ENXIO;
3625 	} else {
3626 		/*
3627 		 * There are a few more semantic things we can do
3628 		 * with the errors here that we're throwing out and
3629 		 * lumping as EIO. Oh well.
3630 		 */
3631 		slot->cs_io.ci_errno = EIO;
3632 	}
3633 
3634 	/*
3635 	 * Now, we can go ahead and wake up a reader to process this command.
3636 	 */
3637 consume:
3638 	slot->cs_io.ci_command = NULL;
3639 	ccid_command_free(cc);
3640 	ccid_user_io_done(ccid, slot);
3641 }
3642 
3643 /*
3644  * We have the user buffer in the CCID slot. Given that, transform it into
3645  * something that we can send to the device. For APDU's this is simply creating
3646  * a transfer command and copying it into that buffer.
3647  */
3648 static int
ccid_write_apdu(ccid_t * ccid,ccid_slot_t * slot)3649 ccid_write_apdu(ccid_t *ccid, ccid_slot_t *slot)
3650 {
3651 	int ret;
3652 	ccid_command_t *cc;
3653 
3654 	VERIFY(MUTEX_HELD(&ccid->ccid_mutex));
3655 
3656 	if ((ret = ccid_command_alloc(ccid, slot, B_FALSE, NULL,
3657 	    slot->cs_io.ci_ilen, CCID_REQUEST_TRANSFER_BLOCK, 0, 0, 0,
3658 	    &cc)) != 0) {
3659 		return (ret);
3660 	}
3661 
3662 	cc->cc_flags |= CCID_COMMAND_F_USER;
3663 	ccid_command_bcopy(cc, slot->cs_io.ci_ibuf, slot->cs_io.ci_ilen);
3664 
3665 	slot->cs_io.ci_command = cc;
3666 	mutex_exit(&ccid->ccid_mutex);
3667 
3668 	if ((ret = ccid_command_queue(ccid, cc)) != 0) {
3669 		mutex_enter(&ccid->ccid_mutex);
3670 		slot->cs_io.ci_command = NULL;
3671 		ccid_command_free(cc);
3672 		return (ret);
3673 	}
3674 
3675 	mutex_enter(&ccid->ccid_mutex);
3676 
3677 	return (0);
3678 }
3679 
3680 static int
ccid_read(dev_t dev,struct uio * uiop,cred_t * credp)3681 ccid_read(dev_t dev, struct uio *uiop, cred_t *credp)
3682 {
3683 	int ret;
3684 	ccid_minor_idx_t *idx;
3685 	ccid_minor_t *cmp;
3686 	ccid_slot_t *slot;
3687 	ccid_t *ccid;
3688 	boolean_t done;
3689 
3690 	if (uiop->uio_resid <= 0) {
3691 		return (EINVAL);
3692 	}
3693 
3694 	if ((idx = ccid_minor_find_user(getminor(dev))) == NULL) {
3695 		return (ENOENT);
3696 	}
3697 
3698 	cmp = idx->cmi_data.cmi_user;
3699 	slot = cmp->cm_slot;
3700 	ccid = slot->cs_ccid;
3701 
3702 	mutex_enter(&ccid->ccid_mutex);
3703 	if ((ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
3704 		mutex_exit(&ccid->ccid_mutex);
3705 		return (ENODEV);
3706 	}
3707 
3708 	/*
3709 	 * First, check if we have exclusive access. If not, we're done.
3710 	 */
3711 	if ((cmp->cm_flags & CCID_MINOR_F_HAS_EXCL) == 0) {
3712 		mutex_exit(&ccid->ccid_mutex);
3713 		return (EACCES);
3714 	}
3715 
3716 	/*
3717 	 * While it's tempting to mirror ccid_write() here and check if we have
3718 	 * a tx or rx function, that actually has no relevance on read. The only
3719 	 * thing that matters is whether or not we actually have an I/O.
3720 	 */
3721 
3722 	/*
3723 	 * If there's been no write I/O issued, then this read is not allowed.
3724 	 * While this may seem like a silly constraint, it certainly simplifies
3725 	 * a lot of the surrounding logic and fits with the current consumer
3726 	 * model.
3727 	 */
3728 	if ((slot->cs_io.ci_flags & (CCID_IO_F_IN_PROGRESS | CCID_IO_F_DONE)) ==
3729 	    0) {
3730 		mutex_exit(&ccid->ccid_mutex);
3731 		return (ENODATA);
3732 	}
3733 
3734 	/*
3735 	 * If another thread is already blocked in read, then don't allow us
3736 	 * in. We only want to allow one thread to attempt to consume a read,
3737 	 * just as we only allow one thread to initiate a write.
3738 	 */
3739 	if ((cmp->cm_flags & CCID_MINOR_F_READ_WAITING) != 0) {
3740 		mutex_exit(&ccid->ccid_mutex);
3741 		return (EBUSY);
3742 	}
3743 
3744 	/*
3745 	 * Check if an I/O has completed. Once it has, call the protocol
3746 	 * specific code. Note that the lock may be dropped after polling. In
3747 	 * such a case we will have to logically recheck several conditions.
3748 	 *
3749 	 * Note, we don't really care if the slot is active or not as I/O could
3750 	 * have been in flight while the slot was inactive.
3751 	 */
3752 	while ((slot->cs_io.ci_flags & CCID_IO_F_DONE) == 0) {
3753 		if (uiop->uio_fmode & FNONBLOCK) {
3754 			mutex_exit(&ccid->ccid_mutex);
3755 			return (EWOULDBLOCK);
3756 		}
3757 
3758 		/*
3759 		 * While we perform a cv_wait_sig() we'll end up dropping the
3760 		 * CCID mutex. This means that we need to notify the rest of the
3761 		 * driver that a thread is blocked in read. This is used not
3762 		 * only for excluding multiple threads trying to read from the
3763 		 * device, but more importantly so that we know that if the ICC
3764 		 * or reader are removed, that we need to wake up this thread.
3765 		 */
3766 		cmp->cm_flags |= CCID_MINOR_F_READ_WAITING;
3767 		ret = cv_wait_sig(&cmp->cm_read_cv, &ccid->ccid_mutex);
3768 		cmp->cm_flags &= ~CCID_MINOR_F_READ_WAITING;
3769 		cv_signal(&cmp->cm_iowait_cv);
3770 
3771 		if (ret == 0) {
3772 			mutex_exit(&ccid->ccid_mutex);
3773 			return (EINTR);
3774 		}
3775 
3776 		/*
3777 		 * Check if the reader has been removed. We do not need to check
3778 		 * for other conditions, as we'll end up being told that the I/O
3779 		 * is done and that the error has been set.
3780 		 */
3781 		if ((ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
3782 			mutex_exit(&ccid->ccid_mutex);
3783 			return (ENODEV);
3784 		}
3785 	}
3786 
3787 	/*
3788 	 * We'll either have an error or data available for the user at this
3789 	 * point that we can copy out. We need to make sure that it's not too
3790 	 * large. The data should have already been adjusted such that we only
3791 	 * have data payloads.
3792 	 */
3793 	done = B_FALSE;
3794 	if (slot->cs_io.ci_errno == 0) {
3795 		size_t mlen;
3796 
3797 		mlen = msgsize(slot->cs_io.ci_data);
3798 		if (mlen > uiop->uio_resid) {
3799 			ret = EOVERFLOW;
3800 		} else {
3801 			ret = ccid_read_copyout(uiop, slot->cs_io.ci_data);
3802 			if (ret == 0) {
3803 				done = B_TRUE;
3804 			}
3805 		}
3806 	} else {
3807 		ret = slot->cs_io.ci_errno;
3808 		done = B_TRUE;
3809 	}
3810 
3811 	if (done) {
3812 		ccid_clear_io(&slot->cs_io);
3813 		ccid_slot_pollout_signal(slot);
3814 	}
3815 
3816 	mutex_exit(&ccid->ccid_mutex);
3817 
3818 	return (ret);
3819 }
3820 
3821 static int
ccid_write(dev_t dev,struct uio * uiop,cred_t * credp)3822 ccid_write(dev_t dev, struct uio *uiop, cred_t *credp)
3823 {
3824 	int ret;
3825 	ccid_minor_idx_t *idx;
3826 	ccid_minor_t *cmp;
3827 	ccid_slot_t *slot;
3828 	ccid_t *ccid;
3829 	size_t len, cbytes;
3830 
3831 	if (uiop->uio_resid > CCID_APDU_LEN_MAX) {
3832 		return (E2BIG);
3833 	}
3834 
3835 	if (uiop->uio_resid <= 0) {
3836 		return (EINVAL);
3837 	}
3838 
3839 	len = uiop->uio_resid;
3840 	idx = ccid_minor_find_user(getminor(dev));
3841 	if (idx == NULL) {
3842 		return (ENOENT);
3843 	}
3844 
3845 	cmp = idx->cmi_data.cmi_user;
3846 	slot = cmp->cm_slot;
3847 	ccid = slot->cs_ccid;
3848 
3849 	/*
3850 	 * Now that we have the slot, verify whether or not we can perform this
3851 	 * I/O.
3852 	 */
3853 	mutex_enter(&ccid->ccid_mutex);
3854 	if ((ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
3855 		mutex_exit(&ccid->ccid_mutex);
3856 		return (ENODEV);
3857 	}
3858 
3859 	/*
3860 	 * Check that we are open for writing, have exclusive access, and
3861 	 * there's a card present. If not, error out.
3862 	 */
3863 	if ((cmp->cm_flags & (CCID_MINOR_F_WRITABLE | CCID_MINOR_F_HAS_EXCL)) !=
3864 	    (CCID_MINOR_F_WRITABLE | CCID_MINOR_F_HAS_EXCL)) {
3865 		mutex_exit(&ccid->ccid_mutex);
3866 		return (EACCES);
3867 	}
3868 
3869 	if ((slot->cs_flags & CCID_SLOT_F_ACTIVE) == 0) {
3870 		mutex_exit(&ccid->ccid_mutex);
3871 		return (ENXIO);
3872 	}
3873 
3874 	/*
3875 	 * Make sure that we have a supported transmit function.
3876 	 */
3877 	if (slot->cs_icc.icc_tx == NULL) {
3878 		mutex_exit(&ccid->ccid_mutex);
3879 		return (ENOTSUP);
3880 	}
3881 
3882 	/*
3883 	 * See if another command is in progress. If so, try to claim it.
3884 	 * Otherwise, fail with EBUSY. Note, we only fail for commands that are
3885 	 * user initiated. There may be other commands that are ongoing in the
3886 	 * system.
3887 	 */
3888 	if ((slot->cs_io.ci_flags & CCID_IO_F_POLLOUT_FLAGS) != 0) {
3889 		mutex_exit(&ccid->ccid_mutex);
3890 		return (EBUSY);
3891 	}
3892 
3893 	/*
3894 	 * Use uiocopy and not uiomove. This way if we fail for whatever reason,
3895 	 * we don't have to worry about restoring the original buffer.
3896 	 */
3897 	if (uiocopy(slot->cs_io.ci_ibuf, len, UIO_WRITE, uiop, &cbytes) != 0) {
3898 		mutex_exit(&ccid->ccid_mutex);
3899 		return (EFAULT);
3900 	}
3901 
3902 	slot->cs_io.ci_ilen = len;
3903 	slot->cs_io.ci_flags |= CCID_IO_F_PREPARING;
3904 	slot->cs_io.ci_omp = NULL;
3905 
3906 	/*
3907 	 * Now that we're here, go ahead and call the actual tx function.
3908 	 */
3909 	if ((ret = slot->cs_icc.icc_tx(ccid, slot)) != 0) {
3910 		/*
3911 		 * The command wasn't actually transmitted. In this case we need
3912 		 * to reset the copied in data and signal anyone who is polling
3913 		 * that this is writeable again. We don't have to worry about
3914 		 * readers at this point, as they won't get in unless
3915 		 * CCID_IO_F_IN_PROGRESS has been set.
3916 		 */
3917 		slot->cs_io.ci_ilen = 0;
3918 		bzero(slot->cs_io.ci_ibuf, sizeof (slot->cs_io.ci_ibuf));
3919 		slot->cs_io.ci_flags &= ~CCID_IO_F_PREPARING;
3920 
3921 		ccid_slot_pollout_signal(slot);
3922 	} else {
3923 		slot->cs_io.ci_flags &= ~CCID_IO_F_PREPARING;
3924 		slot->cs_io.ci_flags |= CCID_IO_F_IN_PROGRESS;
3925 		uiop->uio_resid -= cbytes;
3926 	}
3927 	/*
3928 	 * Notify a waiter that we've moved on.
3929 	 */
3930 	cv_signal(&slot->cs_excl_minor->cm_iowait_cv);
3931 	mutex_exit(&ccid->ccid_mutex);
3932 
3933 	return (ret);
3934 }
3935 
3936 static int
ccid_ioctl_status(ccid_slot_t * slot,intptr_t arg,int mode)3937 ccid_ioctl_status(ccid_slot_t *slot, intptr_t arg, int mode)
3938 {
3939 	uccid_cmd_status_t ucs;
3940 	ccid_t *ccid = slot->cs_ccid;
3941 
3942 	if (ddi_copyin((void *)arg, &ucs, sizeof (ucs), mode & FKIOCTL) != 0)
3943 		return (EFAULT);
3944 
3945 	if (ucs.ucs_version != UCCID_VERSION_ONE)
3946 		return (EINVAL);
3947 
3948 	ucs.ucs_status = 0;
3949 	ucs.ucs_instance = ddi_get_instance(slot->cs_ccid->ccid_dip);
3950 	ucs.ucs_slot = slot->cs_slotno;
3951 
3952 	mutex_enter(&slot->cs_ccid->ccid_mutex);
3953 	if ((slot->cs_ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
3954 		mutex_exit(&slot->cs_ccid->ccid_mutex);
3955 		return (ENODEV);
3956 	}
3957 
3958 	if ((slot->cs_flags & CCID_SLOT_F_PRESENT) != 0)
3959 		ucs.ucs_status |= UCCID_STATUS_F_CARD_PRESENT;
3960 	if ((slot->cs_flags & CCID_SLOT_F_ACTIVE) != 0)
3961 		ucs.ucs_status |= UCCID_STATUS_F_CARD_ACTIVE;
3962 
3963 	if (slot->cs_atr != NULL) {
3964 		ucs.ucs_atrlen = MIN(UCCID_ATR_MAX, MBLKL(slot->cs_atr));
3965 		bcopy(slot->cs_atr->b_rptr, ucs.ucs_atr, ucs.ucs_atrlen);
3966 	} else {
3967 		bzero(ucs.ucs_atr, sizeof (ucs.ucs_atr));
3968 		ucs.ucs_atrlen = 0;
3969 	}
3970 
3971 	bcopy(&ccid->ccid_class, &ucs.ucs_class, sizeof (ucs.ucs_class));
3972 
3973 	if (ccid->ccid_dev_data->dev_product != NULL) {
3974 		(void) strlcpy(ucs.ucs_product,
3975 		    ccid->ccid_dev_data->dev_product, sizeof (ucs.ucs_product));
3976 		ucs.ucs_status |= UCCID_STATUS_F_PRODUCT_VALID;
3977 	} else {
3978 		ucs.ucs_product[0] = '\0';
3979 	}
3980 
3981 	if (ccid->ccid_dev_data->dev_serial != NULL) {
3982 		(void) strlcpy(ucs.ucs_serial, ccid->ccid_dev_data->dev_serial,
3983 		    sizeof (ucs.ucs_serial));
3984 		ucs.ucs_status |= UCCID_STATUS_F_SERIAL_VALID;
3985 	} else {
3986 		ucs.ucs_serial[0] = '\0';
3987 	}
3988 	mutex_exit(&slot->cs_ccid->ccid_mutex);
3989 
3990 	if ((slot->cs_flags & CCID_SLOT_F_ACTIVE) != 0) {
3991 		ucs.ucs_status |= UCCID_STATUS_F_PARAMS_VALID;
3992 		ucs.ucs_prot = (uccid_prot_t)slot->cs_icc.icc_cur_protocol;
3993 		ucs.ucs_params = slot->cs_icc.icc_params;
3994 	}
3995 
3996 	if (ddi_copyout(&ucs, (void *)arg, sizeof (ucs), mode & FKIOCTL) != 0)
3997 		return (EFAULT);
3998 
3999 	return (0);
4000 }
4001 
4002 static int
ccid_ioctl_txn_begin(ccid_slot_t * slot,ccid_minor_t * cmp,intptr_t arg,int mode)4003 ccid_ioctl_txn_begin(ccid_slot_t *slot, ccid_minor_t *cmp, intptr_t arg,
4004     int mode)
4005 {
4006 	int ret;
4007 	uccid_cmd_txn_begin_t uct;
4008 	boolean_t nowait;
4009 
4010 	if (ddi_copyin((void *)arg, &uct, sizeof (uct), mode & FKIOCTL) != 0)
4011 		return (EFAULT);
4012 
4013 	if (uct.uct_version != UCCID_VERSION_ONE)
4014 		return (EINVAL);
4015 
4016 	if ((uct.uct_flags & ~UCCID_TXN_DONT_BLOCK) != 0)
4017 		return (EINVAL);
4018 	nowait = (uct.uct_flags & UCCID_TXN_DONT_BLOCK) != 0;
4019 
4020 	mutex_enter(&slot->cs_ccid->ccid_mutex);
4021 	if ((slot->cs_ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
4022 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4023 		return (ENODEV);
4024 	}
4025 
4026 	if ((cmp->cm_flags & CCID_MINOR_F_WRITABLE) == 0) {
4027 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4028 		return (EBADF);
4029 	}
4030 
4031 	ret = ccid_slot_excl_req(slot, cmp, nowait);
4032 	mutex_exit(&slot->cs_ccid->ccid_mutex);
4033 
4034 	return (ret);
4035 }
4036 
4037 static int
ccid_ioctl_txn_end(ccid_slot_t * slot,ccid_minor_t * cmp,intptr_t arg,int mode)4038 ccid_ioctl_txn_end(ccid_slot_t *slot, ccid_minor_t *cmp, intptr_t arg, int mode)
4039 {
4040 	uccid_cmd_txn_end_t uct;
4041 
4042 	if (ddi_copyin((void *)arg, &uct, sizeof (uct), mode & FKIOCTL) != 0) {
4043 		return (EFAULT);
4044 	}
4045 
4046 	if (uct.uct_version != UCCID_VERSION_ONE) {
4047 		return (EINVAL);
4048 	}
4049 
4050 	mutex_enter(&slot->cs_ccid->ccid_mutex);
4051 	if ((slot->cs_ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
4052 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4053 		return (ENODEV);
4054 	}
4055 
4056 	if (slot->cs_excl_minor != cmp) {
4057 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4058 		return (EINVAL);
4059 	}
4060 	VERIFY3S(cmp->cm_flags & CCID_MINOR_F_HAS_EXCL, !=, 0);
4061 
4062 	/*
4063 	 * Require exactly one of the flags to be set.
4064 	 */
4065 	switch (uct.uct_flags) {
4066 	case UCCID_TXN_END_RESET:
4067 		cmp->cm_flags |= CCID_MINOR_F_TXN_RESET;
4068 
4069 	case UCCID_TXN_END_RELEASE:
4070 		break;
4071 
4072 	default:
4073 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4074 		return (EINVAL);
4075 	}
4076 
4077 	ccid_slot_excl_rele(slot);
4078 	mutex_exit(&slot->cs_ccid->ccid_mutex);
4079 
4080 	return (0);
4081 }
4082 
4083 static int
ccid_ioctl_fionread(ccid_slot_t * slot,ccid_minor_t * cmp,intptr_t arg,int mode)4084 ccid_ioctl_fionread(ccid_slot_t *slot, ccid_minor_t *cmp, intptr_t arg,
4085     int mode)
4086 {
4087 	int data;
4088 
4089 	mutex_enter(&slot->cs_ccid->ccid_mutex);
4090 	if ((slot->cs_ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
4091 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4092 		return (ENODEV);
4093 	}
4094 
4095 	if ((cmp->cm_flags & CCID_MINOR_F_HAS_EXCL) == 0) {
4096 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4097 		return (EACCES);
4098 	}
4099 
4100 	if ((cmp->cm_flags & CCID_MINOR_F_WRITABLE) == 0) {
4101 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4102 		return (EBADF);
4103 	}
4104 
4105 	if ((slot->cs_io.ci_flags & CCID_IO_F_DONE) != 0) {
4106 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4107 		return (ENODATA);
4108 	}
4109 
4110 	/*
4111 	 * If there's an error, claim that there's at least one byte to read
4112 	 * even if it means we'll get the error and consume it. FIONREAD only
4113 	 * allows up to an int of data. Realistically because we don't allow
4114 	 * extended APDUs, the amount of data here should be always less than
4115 	 * INT_MAX.
4116 	 */
4117 	if (slot->cs_io.ci_errno != 0) {
4118 		data = 1;
4119 	} else {
4120 		size_t s = msgsize(slot->cs_io.ci_data);
4121 		data = MIN(s, INT_MAX);
4122 	}
4123 
4124 	if (ddi_copyout(&data, (void *)arg, sizeof (data), mode & FKIOCTL) !=
4125 	    0) {
4126 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4127 		return (EFAULT);
4128 	}
4129 
4130 	mutex_exit(&slot->cs_ccid->ccid_mutex);
4131 	return (0);
4132 }
4133 
4134 static int
ccid_ioctl_icc_modify(ccid_slot_t * slot,ccid_minor_t * cmp,intptr_t arg,int mode)4135 ccid_ioctl_icc_modify(ccid_slot_t *slot, ccid_minor_t *cmp, intptr_t arg,
4136     int mode)
4137 {
4138 	int ret = 0;
4139 	uccid_cmd_icc_modify_t uci;
4140 	ccid_t *ccid;
4141 
4142 	if (ddi_copyin((void *)arg, &uci, sizeof (uci), mode & FKIOCTL) != 0) {
4143 		return (EFAULT);
4144 	}
4145 
4146 	if (uci.uci_version != UCCID_VERSION_ONE) {
4147 		return (EINVAL);
4148 	}
4149 
4150 	switch (uci.uci_action) {
4151 	case UCCID_ICC_POWER_ON:
4152 	case UCCID_ICC_POWER_OFF:
4153 	case UCCID_ICC_WARM_RESET:
4154 		break;
4155 	default:
4156 		return (EINVAL);
4157 	}
4158 
4159 	ccid = slot->cs_ccid;
4160 	mutex_enter(&ccid->ccid_mutex);
4161 	if ((slot->cs_ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
4162 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4163 		return (ENODEV);
4164 	}
4165 
4166 	if ((cmp->cm_flags & CCID_MINOR_F_WRITABLE) == 0) {
4167 		mutex_exit(&slot->cs_ccid->ccid_mutex);
4168 		return (EBADF);
4169 	}
4170 
4171 	switch (uci.uci_action) {
4172 	case UCCID_ICC_WARM_RESET:
4173 		ret = ccid_slot_warm_reset(ccid, slot);
4174 		break;
4175 
4176 	case UCCID_ICC_POWER_OFF:
4177 		ret = ccid_slot_power_off(ccid, slot);
4178 		break;
4179 
4180 	case UCCID_ICC_POWER_ON:
4181 		ret = ccid_slot_inserted(ccid, slot);
4182 		break;
4183 	}
4184 
4185 	mutex_exit(&ccid->ccid_mutex);
4186 
4187 	return (ret);
4188 }
4189 
4190 static int
ccid_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)4191 ccid_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
4192     int *rvalp)
4193 {
4194 	ccid_minor_idx_t *idx;
4195 	ccid_slot_t *slot;
4196 	ccid_minor_t *cmp;
4197 
4198 	idx = ccid_minor_find_user(getminor(dev));
4199 	if (idx == NULL) {
4200 		return (ENOENT);
4201 	}
4202 
4203 	cmp = idx->cmi_data.cmi_user;
4204 	slot = cmp->cm_slot;
4205 
4206 	switch (cmd) {
4207 	case UCCID_CMD_TXN_BEGIN:
4208 		return (ccid_ioctl_txn_begin(slot, cmp, arg, mode));
4209 	case UCCID_CMD_TXN_END:
4210 		return (ccid_ioctl_txn_end(slot, cmp, arg, mode));
4211 	case UCCID_CMD_STATUS:
4212 		return (ccid_ioctl_status(slot, arg, mode));
4213 	case FIONREAD:
4214 		return (ccid_ioctl_fionread(slot, cmp, arg, mode));
4215 	case UCCID_CMD_ICC_MODIFY:
4216 		return (ccid_ioctl_icc_modify(slot, cmp, arg, mode));
4217 	default:
4218 		break;
4219 	}
4220 
4221 	return (ENOTTY);
4222 }
4223 
4224 static int
ccid_chpoll(dev_t dev,short events,int anyyet,short * reventsp,struct pollhead ** phpp)4225 ccid_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
4226     struct pollhead **phpp)
4227 {
4228 	short ready = 0;
4229 	ccid_minor_idx_t *idx;
4230 	ccid_minor_t *cmp;
4231 	ccid_slot_t *slot;
4232 	ccid_t *ccid;
4233 
4234 	idx = ccid_minor_find_user(getminor(dev));
4235 	if (idx == NULL) {
4236 		return (ENOENT);
4237 	}
4238 
4239 	cmp = idx->cmi_data.cmi_user;
4240 	slot = cmp->cm_slot;
4241 	ccid = slot->cs_ccid;
4242 
4243 	mutex_enter(&ccid->ccid_mutex);
4244 	if ((ccid->ccid_flags & CCID_F_DISCONNECTED) != 0) {
4245 		mutex_exit(&ccid->ccid_mutex);
4246 		return (ENODEV);
4247 	}
4248 
4249 	if (!(cmp->cm_flags & CCID_MINOR_F_HAS_EXCL) != 0) {
4250 		mutex_exit(&ccid->ccid_mutex);
4251 		return (EACCES);
4252 	}
4253 
4254 	/*
4255 	 * If the CCID_IO_F_DONE flag is set, then we're always
4256 	 * readable. However, flags are insufficient to be writeable.
4257 	 */
4258 	if ((slot->cs_io.ci_flags & CCID_IO_F_DONE) != 0) {
4259 		ready |= POLLIN | POLLRDNORM;
4260 	} else if ((slot->cs_flags & CCID_SLOT_F_ACTIVE) != 0 &&
4261 	    (slot->cs_io.ci_flags & CCID_IO_F_POLLOUT_FLAGS) == 0 &&
4262 	    slot->cs_icc.icc_tx != NULL) {
4263 		ready |= POLLOUT;
4264 	}
4265 
4266 	if ((slot->cs_flags & CCID_SLOT_F_PRESENT) == 0) {
4267 		ready |= POLLHUP;
4268 	}
4269 
4270 	*reventsp = ready & events;
4271 	if ((*reventsp == 0 && !anyyet) || (events & POLLET)) {
4272 		*phpp = &cmp->cm_pollhead;
4273 	}
4274 
4275 	mutex_exit(&ccid->ccid_mutex);
4276 
4277 	return (0);
4278 }
4279 
4280 static int
ccid_close(dev_t dev,int flag,int otyp,cred_t * credp)4281 ccid_close(dev_t dev, int flag, int otyp, cred_t *credp)
4282 {
4283 	ccid_minor_idx_t *idx;
4284 	ccid_minor_t *cmp;
4285 	ccid_slot_t *slot;
4286 
4287 	idx = ccid_minor_find_user(getminor(dev));
4288 	if (idx == NULL) {
4289 		return (ENOENT);
4290 	}
4291 
4292 	/*
4293 	 * First tear down the global index entry.
4294 	 */
4295 	cmp = idx->cmi_data.cmi_user;
4296 	slot = cmp->cm_slot;
4297 	ccid_minor_idx_free(idx);
4298 
4299 	/*
4300 	 * If the minor node was closed without an explicit transaction end,
4301 	 * then we need to assume that the reader's ICC is in an arbitrary
4302 	 * state. For example, the ICC could have a specific PIV applet
4303 	 * selected. In such a case, the only safe thing to do is to force a
4304 	 * reset.
4305 	 */
4306 	mutex_enter(&slot->cs_ccid->ccid_mutex);
4307 	if ((cmp->cm_flags & CCID_MINOR_F_HAS_EXCL) != 0) {
4308 		cmp->cm_flags |= CCID_MINOR_F_TXN_RESET;
4309 		ccid_slot_excl_rele(slot);
4310 	}
4311 
4312 	list_remove(&slot->cs_minors, cmp);
4313 	mutex_exit(&slot->cs_ccid->ccid_mutex);
4314 
4315 	pollhead_clean(&cmp->cm_pollhead);
4316 	ccid_minor_free(cmp);
4317 
4318 	return (0);
4319 }
4320 
4321 static struct cb_ops ccid_cb_ops = {
4322 	ccid_open,		/* cb_open */
4323 	ccid_close,		/* cb_close */
4324 	nodev,			/* cb_strategy */
4325 	nodev,			/* cb_print */
4326 	nodev,			/* cb_dump */
4327 	ccid_read,		/* cb_read */
4328 	ccid_write,		/* cb_write */
4329 	ccid_ioctl,		/* cb_ioctl */
4330 	nodev,			/* cb_devmap */
4331 	nodev,			/* cb_mmap */
4332 	nodev,			/* cb_segmap */
4333 	ccid_chpoll,		/* cb_chpoll */
4334 	ddi_prop_op,		/* cb_prop_op */
4335 	NULL,			/* cb_stream */
4336 	D_MP,			/* cb_flag */
4337 	CB_REV,			/* cb_rev */
4338 	nodev,			/* cb_aread */
4339 	nodev			/* cb_awrite */
4340 };
4341 
4342 static struct dev_ops ccid_dev_ops = {
4343 	DEVO_REV,		/* devo_rev */
4344 	0,			/* devo_refcnt */
4345 	ccid_getinfo,		/* devo_getinfo */
4346 	nulldev,		/* devo_identify */
4347 	nulldev,		/* devo_probe */
4348 	ccid_attach,		/* devo_attach */
4349 	ccid_detach,		/* devo_detach */
4350 	nodev,			/* devo_reset */
4351 	&ccid_cb_ops,		/* devo_cb_ops */
4352 	NULL,			/* devo_bus_ops */
4353 	NULL,			/* devo_power */
4354 	ddi_quiesce_not_supported /* devo_quiesce */
4355 };
4356 
4357 static struct modldrv ccid_modldrv = {
4358 	&mod_driverops,
4359 	"USB CCID",
4360 	&ccid_dev_ops
4361 };
4362 
4363 static struct modlinkage ccid_modlinkage = {
4364 	MODREV_1,
4365 	{ &ccid_modldrv, NULL }
4366 };
4367 
4368 int
_init(void)4369 _init(void)
4370 {
4371 	int ret;
4372 
4373 	if ((ret = ddi_soft_state_init(&ccid_softstate, sizeof (ccid_t),
4374 	    0)) != 0) {
4375 		return (ret);
4376 	}
4377 
4378 	if ((ccid_minors = id_space_create("ccid_minors", CCID_MINOR_MIN,
4379 	    INT_MAX)) == NULL) {
4380 		ddi_soft_state_fini(&ccid_softstate);
4381 		return (ret);
4382 	}
4383 
4384 	if ((ret = mod_install(&ccid_modlinkage)) != 0) {
4385 		id_space_destroy(ccid_minors);
4386 		ccid_minors = NULL;
4387 		ddi_soft_state_fini(&ccid_softstate);
4388 		return (ret);
4389 	}
4390 
4391 	mutex_init(&ccid_idxlock, NULL, MUTEX_DRIVER, NULL);
4392 	avl_create(&ccid_idx, ccid_idx_comparator, sizeof (ccid_minor_idx_t),
4393 	    offsetof(ccid_minor_idx_t, cmi_avl));
4394 
4395 	return (ret);
4396 }
4397 
4398 int
_info(struct modinfo * modinfop)4399 _info(struct modinfo *modinfop)
4400 {
4401 	return (mod_info(&ccid_modlinkage, modinfop));
4402 }
4403 
4404 int
_fini(void)4405 _fini(void)
4406 {
4407 	int ret;
4408 
4409 	if ((ret = mod_remove(&ccid_modlinkage)) != 0) {
4410 		return (ret);
4411 	}
4412 
4413 	avl_destroy(&ccid_idx);
4414 	mutex_destroy(&ccid_idxlock);
4415 	id_space_destroy(ccid_minors);
4416 	ccid_minors = NULL;
4417 	ddi_soft_state_fini(&ccid_softstate);
4418 
4419 	return (ret);
4420 }
4421