xref: /titanic_52/usr/src/uts/common/os/ddi_periodic.c (revision a288e5a9793fdffe5e842d7e61ab45263e75eaca)
1*a288e5a9SJoshua M. Clulow /*
2*a288e5a9SJoshua M. Clulow  * This file and its contents are supplied under the terms of the
3*a288e5a9SJoshua M. Clulow  * Common Development and Distribution License ("CDDL"), version 1.0.
4*a288e5a9SJoshua M. Clulow  * You may only use this file in accordance with the terms of version
5*a288e5a9SJoshua M. Clulow  * 1.0 of the CDDL.
6*a288e5a9SJoshua M. Clulow  *
7*a288e5a9SJoshua M. Clulow  * A full copy of the text of the CDDL should have accompanied this
8*a288e5a9SJoshua M. Clulow  * source.  A copy of the CDDL is also available via the Internet at
9*a288e5a9SJoshua M. Clulow  * http://www.illumos.org/license/CDDL.
10*a288e5a9SJoshua M. Clulow  */
11*a288e5a9SJoshua M. Clulow /*
12*a288e5a9SJoshua M. Clulow  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
13*a288e5a9SJoshua M. Clulow  */
14*a288e5a9SJoshua M. Clulow 
15*a288e5a9SJoshua M. Clulow #include <sys/cmn_err.h>
16*a288e5a9SJoshua M. Clulow #include <sys/ddi_periodic.h>
17*a288e5a9SJoshua M. Clulow #include <sys/id_space.h>
18*a288e5a9SJoshua M. Clulow #include <sys/kobj.h>
19*a288e5a9SJoshua M. Clulow #include <sys/sysmacros.h>
20*a288e5a9SJoshua M. Clulow #include <sys/systm.h>
21*a288e5a9SJoshua M. Clulow #include <sys/taskq.h>
22*a288e5a9SJoshua M. Clulow #include <sys/taskq_impl.h>
23*a288e5a9SJoshua M. Clulow #include <sys/time.h>
24*a288e5a9SJoshua M. Clulow #include <sys/types.h>
25*a288e5a9SJoshua M. Clulow #include <sys/sdt.h>
26*a288e5a9SJoshua M. Clulow 
27*a288e5a9SJoshua M. Clulow extern void sir_on(int);
28*a288e5a9SJoshua M. Clulow 
29*a288e5a9SJoshua M. Clulow /*
30*a288e5a9SJoshua M. Clulow  * The ddi_periodic_add(9F) Implementation
31*a288e5a9SJoshua M. Clulow  *
32*a288e5a9SJoshua M. Clulow  * This file contains the implementation of the ddi_periodic_add(9F) interface.
33*a288e5a9SJoshua M. Clulow  * It is a thin wrapper around the cyclic subsystem (see documentation in
34*a288e5a9SJoshua M. Clulow  * uts/common/os/cyclic.c), providing a DDI interface for registering
35*a288e5a9SJoshua M. Clulow  * (and unregistering) callbacks for periodic invocation at arbitrary
36*a288e5a9SJoshua M. Clulow  * interrupt levels, or in kernel context.
37*a288e5a9SJoshua M. Clulow  *
38*a288e5a9SJoshua M. Clulow  * Each call to ddi_periodic_add will result in a new opaque handle, as
39*a288e5a9SJoshua M. Clulow  * allocated from an id_space, a new "periodic" object (ddi_periodic_impl_t)
40*a288e5a9SJoshua M. Clulow  * and a registered cyclic.
41*a288e5a9SJoshua M. Clulow  *
42*a288e5a9SJoshua M. Clulow  * Operation
43*a288e5a9SJoshua M. Clulow  *
44*a288e5a9SJoshua M. Clulow  * Whenever the cyclic fires, our cyclic handler checks that the particular
45*a288e5a9SJoshua M. Clulow  * periodic is not dispatched already (we do not support overlapping execution
46*a288e5a9SJoshua M. Clulow  * of the consumer's handler function), and not yet cancelled.  If both of
47*a288e5a9SJoshua M. Clulow  * these conditions hold, we mark the periodic as DPF_DISPATCHED and enqueue it
48*a288e5a9SJoshua M. Clulow  * to either the taskq (for DDI_IPL_0) or to one of the soft interrupt queues
49*a288e5a9SJoshua M. Clulow  * (DDI_IPL_1 to DDI_IPL_10).
50*a288e5a9SJoshua M. Clulow  *
51*a288e5a9SJoshua M. Clulow  * While the taskq (or soft interrupt handler) is handling a particular
52*a288e5a9SJoshua M. Clulow  * periodic, we mark it as DPF_EXECUTING.  When complete, we reset both
53*a288e5a9SJoshua M. Clulow  * DPF_DISPATCHED and DPF_EXECUTING.
54*a288e5a9SJoshua M. Clulow  *
55*a288e5a9SJoshua M. Clulow  * Cancellation
56*a288e5a9SJoshua M. Clulow  *
57*a288e5a9SJoshua M. Clulow  * ddi_periodic_delete(9F) historically had spectacularly loose semantics with
58*a288e5a9SJoshua M. Clulow  * respect to cancellation concurrent with handler execution.  These semantics
59*a288e5a9SJoshua M. Clulow  * are now tighter:
60*a288e5a9SJoshua M. Clulow  *
61*a288e5a9SJoshua M. Clulow  *   1. At most one invocation of ddi_periodic_delete(9F) will actually
62*a288e5a9SJoshua M. Clulow  *      perform the deletion, all others will return immediately.
63*a288e5a9SJoshua M. Clulow  *   2. The invocation that performs the deletion will _block_ until
64*a288e5a9SJoshua M. Clulow  *      the handler is no longer running, and all resources have been
65*a288e5a9SJoshua M. Clulow  *      released.
66*a288e5a9SJoshua M. Clulow  *
67*a288e5a9SJoshua M. Clulow  * We affect this model by removing the cancelling periodic from the
68*a288e5a9SJoshua M. Clulow  * global list and marking it DPF_CANCELLED.  This will prevent further
69*a288e5a9SJoshua M. Clulow  * execution of the handler.  We then wait on a CV until the DPF_EXECUTING
70*a288e5a9SJoshua M. Clulow  * and DPF_DISPATCHED flags are clear, which means the periodic is removed
71*a288e5a9SJoshua M. Clulow  * from all request queues, is no longer executing, and may be freed.  At this
72*a288e5a9SJoshua M. Clulow  * point we return the opaque ID to the id_space and free the memory.
73*a288e5a9SJoshua M. Clulow  *
74*a288e5a9SJoshua M. Clulow  * NOTE:
75*a288e5a9SJoshua M. Clulow  * The ddi_periodic_add(9F) interface is presently limited to a minimum period
76*a288e5a9SJoshua M. Clulow  * of 10ms between firings.
77*a288e5a9SJoshua M. Clulow  */
78*a288e5a9SJoshua M. Clulow 
79*a288e5a9SJoshua M. Clulow /*
80*a288e5a9SJoshua M. Clulow  * Tuneables:
81*a288e5a9SJoshua M. Clulow  */
82*a288e5a9SJoshua M. Clulow int ddi_periodic_max_id = 1024;
83*a288e5a9SJoshua M. Clulow int ddi_periodic_taskq_threadcount = 4;
84*a288e5a9SJoshua M. Clulow hrtime_t ddi_periodic_resolution = 10000000;
85*a288e5a9SJoshua M. Clulow 
86*a288e5a9SJoshua M. Clulow /*
87*a288e5a9SJoshua M. Clulow  * Globals:
88*a288e5a9SJoshua M. Clulow  */
89*a288e5a9SJoshua M. Clulow static kmem_cache_t *periodic_cache;
90*a288e5a9SJoshua M. Clulow static id_space_t *periodic_id_space;
91*a288e5a9SJoshua M. Clulow static taskq_t *periodic_taskq;
92*a288e5a9SJoshua M. Clulow 
93*a288e5a9SJoshua M. Clulow /*
94*a288e5a9SJoshua M. Clulow  * periodics_lock protects the list of all periodics (periodics), and
95*a288e5a9SJoshua M. Clulow  * each of the soft interrupt request queues (periodic_softint_queue).
96*a288e5a9SJoshua M. Clulow  *
97*a288e5a9SJoshua M. Clulow  * Do not hold an individual periodic's lock while obtaining periodics_lock.
98*a288e5a9SJoshua M. Clulow  * While in the periodic_softint_queue list, the periodic will be marked
99*a288e5a9SJoshua M. Clulow  * DPF_DISPATCHED, and thus safe from frees.  Only the invocation of
100*a288e5a9SJoshua M. Clulow  * i_untimeout() that removes the periodic from the global list is allowed
101*a288e5a9SJoshua M. Clulow  * to free it.
102*a288e5a9SJoshua M. Clulow  */
103*a288e5a9SJoshua M. Clulow static kmutex_t periodics_lock;
104*a288e5a9SJoshua M. Clulow static list_t periodics;
105*a288e5a9SJoshua M. Clulow static list_t periodic_softint_queue[10]; /* for IPL1 up to IPL10 */
106*a288e5a9SJoshua M. Clulow 
107*a288e5a9SJoshua M. Clulow typedef enum periodic_ipl {
108*a288e5a9SJoshua M. Clulow 	PERI_IPL_0 = 0,
109*a288e5a9SJoshua M. Clulow 	PERI_IPL_1,
110*a288e5a9SJoshua M. Clulow 	PERI_IPL_2,
111*a288e5a9SJoshua M. Clulow 	PERI_IPL_3,
112*a288e5a9SJoshua M. Clulow 	PERI_IPL_4,
113*a288e5a9SJoshua M. Clulow 	PERI_IPL_5,
114*a288e5a9SJoshua M. Clulow 	PERI_IPL_6,
115*a288e5a9SJoshua M. Clulow 	PERI_IPL_7,
116*a288e5a9SJoshua M. Clulow 	PERI_IPL_8,
117*a288e5a9SJoshua M. Clulow 	PERI_IPL_9,
118*a288e5a9SJoshua M. Clulow 	PERI_IPL_10
119*a288e5a9SJoshua M. Clulow } periodic_ipl_t;
120*a288e5a9SJoshua M. Clulow 
121*a288e5a9SJoshua M. Clulow static char *
122*a288e5a9SJoshua M. Clulow periodic_handler_symbol(ddi_periodic_impl_t *dpr)
123*a288e5a9SJoshua M. Clulow {
124*a288e5a9SJoshua M. Clulow 	ulong_t off;
125*a288e5a9SJoshua M. Clulow 
126*a288e5a9SJoshua M. Clulow 	return (kobj_getsymname((uintptr_t)dpr->dpr_handler, &off));
127*a288e5a9SJoshua M. Clulow }
128*a288e5a9SJoshua M. Clulow 
129*a288e5a9SJoshua M. Clulow /*
130*a288e5a9SJoshua M. Clulow  * This function may be called either from a soft interrupt handler
131*a288e5a9SJoshua M. Clulow  * (ddi_periodic_softintr), or as a taskq worker function.
132*a288e5a9SJoshua M. Clulow  */
133*a288e5a9SJoshua M. Clulow static void
134*a288e5a9SJoshua M. Clulow periodic_execute(void *arg)
135*a288e5a9SJoshua M. Clulow {
136*a288e5a9SJoshua M. Clulow 	ddi_periodic_impl_t *dpr = arg;
137*a288e5a9SJoshua M. Clulow 	mutex_enter(&dpr->dpr_lock);
138*a288e5a9SJoshua M. Clulow 
139*a288e5a9SJoshua M. Clulow 	/*
140*a288e5a9SJoshua M. Clulow 	 * We must be DISPATCHED, but not yet EXECUTING:
141*a288e5a9SJoshua M. Clulow 	 */
142*a288e5a9SJoshua M. Clulow 	VERIFY((dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) ==
143*a288e5a9SJoshua M. Clulow 	    DPF_DISPATCHED);
144*a288e5a9SJoshua M. Clulow 	VERIFY(dpr->dpr_thread == NULL);
145*a288e5a9SJoshua M. Clulow 
146*a288e5a9SJoshua M. Clulow 	if (!(dpr->dpr_flags & DPF_CANCELLED)) {
147*a288e5a9SJoshua M. Clulow 		int level = dpr->dpr_level;
148*a288e5a9SJoshua M. Clulow 		uint64_t count = dpr->dpr_fire_count;
149*a288e5a9SJoshua M. Clulow 		/*
150*a288e5a9SJoshua M. Clulow 		 * If we have not yet been cancelled, then
151*a288e5a9SJoshua M. Clulow 		 * mark us executing:
152*a288e5a9SJoshua M. Clulow 		 */
153*a288e5a9SJoshua M. Clulow 		dpr->dpr_flags |= DPF_EXECUTING;
154*a288e5a9SJoshua M. Clulow 		dpr->dpr_thread = curthread;
155*a288e5a9SJoshua M. Clulow 		mutex_exit(&dpr->dpr_lock);
156*a288e5a9SJoshua M. Clulow 
157*a288e5a9SJoshua M. Clulow 		/*
158*a288e5a9SJoshua M. Clulow 		 * Execute the handler, without holding locks:
159*a288e5a9SJoshua M. Clulow 		 */
160*a288e5a9SJoshua M. Clulow 		DTRACE_PROBE4(ddi__periodic__execute, void *, dpr->dpr_handler,
161*a288e5a9SJoshua M. Clulow 		    void *, dpr->dpr_arg, int, level, uint64_t, count);
162*a288e5a9SJoshua M. Clulow 		(*dpr->dpr_handler)(dpr->dpr_arg);
163*a288e5a9SJoshua M. Clulow 		DTRACE_PROBE4(ddi__periodic__done, void *, dpr->dpr_handler,
164*a288e5a9SJoshua M. Clulow 		    void *, dpr->dpr_arg, int, level, uint64_t, count);
165*a288e5a9SJoshua M. Clulow 
166*a288e5a9SJoshua M. Clulow 		mutex_enter(&dpr->dpr_lock);
167*a288e5a9SJoshua M. Clulow 		dpr->dpr_thread = NULL;
168*a288e5a9SJoshua M. Clulow 		dpr->dpr_fire_count++;
169*a288e5a9SJoshua M. Clulow 	}
170*a288e5a9SJoshua M. Clulow 
171*a288e5a9SJoshua M. Clulow 	/*
172*a288e5a9SJoshua M. Clulow 	 * We're done with this periodic for now, so release it and
173*a288e5a9SJoshua M. Clulow 	 * wake anybody that was waiting for us to be finished:
174*a288e5a9SJoshua M. Clulow 	 */
175*a288e5a9SJoshua M. Clulow 	dpr->dpr_flags &= ~(DPF_DISPATCHED | DPF_EXECUTING);
176*a288e5a9SJoshua M. Clulow 	cv_broadcast(&dpr->dpr_cv);
177*a288e5a9SJoshua M. Clulow 	mutex_exit(&dpr->dpr_lock);
178*a288e5a9SJoshua M. Clulow }
179*a288e5a9SJoshua M. Clulow 
180*a288e5a9SJoshua M. Clulow void
181*a288e5a9SJoshua M. Clulow ddi_periodic_softintr(int level)
182*a288e5a9SJoshua M. Clulow {
183*a288e5a9SJoshua M. Clulow 	ddi_periodic_impl_t *dpr;
184*a288e5a9SJoshua M. Clulow 	VERIFY(level >= PERI_IPL_1 && level <= PERI_IPL_10);
185*a288e5a9SJoshua M. Clulow 
186*a288e5a9SJoshua M. Clulow 	mutex_enter(&periodics_lock);
187*a288e5a9SJoshua M. Clulow 	/*
188*a288e5a9SJoshua M. Clulow 	 * Pull the first scheduled periodic off the queue for this priority
189*a288e5a9SJoshua M. Clulow 	 * level:
190*a288e5a9SJoshua M. Clulow 	 */
191*a288e5a9SJoshua M. Clulow 	while ((dpr = list_remove_head(&periodic_softint_queue[level - 1])) !=
192*a288e5a9SJoshua M. Clulow 	    NULL) {
193*a288e5a9SJoshua M. Clulow 		mutex_exit(&periodics_lock);
194*a288e5a9SJoshua M. Clulow 		/*
195*a288e5a9SJoshua M. Clulow 		 * And execute it:
196*a288e5a9SJoshua M. Clulow 		 */
197*a288e5a9SJoshua M. Clulow 		periodic_execute(dpr);
198*a288e5a9SJoshua M. Clulow 		mutex_enter(&periodics_lock);
199*a288e5a9SJoshua M. Clulow 	}
200*a288e5a9SJoshua M. Clulow 	mutex_exit(&periodics_lock);
201*a288e5a9SJoshua M. Clulow }
202*a288e5a9SJoshua M. Clulow 
203*a288e5a9SJoshua M. Clulow void
204*a288e5a9SJoshua M. Clulow ddi_periodic_init(void)
205*a288e5a9SJoshua M. Clulow {
206*a288e5a9SJoshua M. Clulow 	int i;
207*a288e5a9SJoshua M. Clulow 
208*a288e5a9SJoshua M. Clulow 	/*
209*a288e5a9SJoshua M. Clulow 	 * Create a kmem_cache for request tracking objects, and a list
210*a288e5a9SJoshua M. Clulow 	 * to store them in so we can later delete based on opaque handles:
211*a288e5a9SJoshua M. Clulow 	 */
212*a288e5a9SJoshua M. Clulow 	periodic_cache = kmem_cache_create("ddi_periodic",
213*a288e5a9SJoshua M. Clulow 	    sizeof (ddi_periodic_impl_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
214*a288e5a9SJoshua M. Clulow 	list_create(&periodics, sizeof (ddi_periodic_impl_t),
215*a288e5a9SJoshua M. Clulow 	    offsetof(ddi_periodic_impl_t, dpr_link));
216*a288e5a9SJoshua M. Clulow 
217*a288e5a9SJoshua M. Clulow 	/*
218*a288e5a9SJoshua M. Clulow 	 * Initialise the identifier space for ddi_periodic_add(9F):
219*a288e5a9SJoshua M. Clulow 	 */
220*a288e5a9SJoshua M. Clulow 	periodic_id_space = id_space_create("ddi_periodic", 1,
221*a288e5a9SJoshua M. Clulow 	    ddi_periodic_max_id);
222*a288e5a9SJoshua M. Clulow 
223*a288e5a9SJoshua M. Clulow 	/*
224*a288e5a9SJoshua M. Clulow 	 * Initialise the request queue for each soft interrupt level:
225*a288e5a9SJoshua M. Clulow 	 */
226*a288e5a9SJoshua M. Clulow 	for (i = PERI_IPL_1; i <= PERI_IPL_10; i++) {
227*a288e5a9SJoshua M. Clulow 		list_create(&periodic_softint_queue[i - 1],
228*a288e5a9SJoshua M. Clulow 		    sizeof (ddi_periodic_impl_t), offsetof(ddi_periodic_impl_t,
229*a288e5a9SJoshua M. Clulow 		    dpr_softint_link));
230*a288e5a9SJoshua M. Clulow 	}
231*a288e5a9SJoshua M. Clulow 
232*a288e5a9SJoshua M. Clulow 	/*
233*a288e5a9SJoshua M. Clulow 	 * Create the taskq for running PERI_IPL_0 handlers.  This taskq will
234*a288e5a9SJoshua M. Clulow 	 * _only_ be used with taskq_dispatch_ent(), and a taskq_ent_t
235*a288e5a9SJoshua M. Clulow 	 * pre-allocated with the ddi_periodic_impl_t.
236*a288e5a9SJoshua M. Clulow 	 */
237*a288e5a9SJoshua M. Clulow 	periodic_taskq = taskq_create_instance("ddi_periodic_taskq", -1,
238*a288e5a9SJoshua M. Clulow 	    ddi_periodic_taskq_threadcount, maxclsyspri, 0, 0, 0);
239*a288e5a9SJoshua M. Clulow 
240*a288e5a9SJoshua M. Clulow 	/*
241*a288e5a9SJoshua M. Clulow 	 * Initialize the mutex lock used for the soft interrupt request
242*a288e5a9SJoshua M. Clulow 	 * queues.
243*a288e5a9SJoshua M. Clulow 	 */
244*a288e5a9SJoshua M. Clulow 	mutex_init(&periodics_lock, NULL, MUTEX_ADAPTIVE, NULL);
245*a288e5a9SJoshua M. Clulow }
246*a288e5a9SJoshua M. Clulow 
247*a288e5a9SJoshua M. Clulow void
248*a288e5a9SJoshua M. Clulow ddi_periodic_fini(void)
249*a288e5a9SJoshua M. Clulow {
250*a288e5a9SJoshua M. Clulow 	int i;
251*a288e5a9SJoshua M. Clulow 	ddi_periodic_impl_t *dpr;
252*a288e5a9SJoshua M. Clulow 
253*a288e5a9SJoshua M. Clulow 	/*
254*a288e5a9SJoshua M. Clulow 	 * Find all periodics that have not yet been unregistered and,
255*a288e5a9SJoshua M. Clulow 	 * on DEBUG bits, print a warning about this resource leak.
256*a288e5a9SJoshua M. Clulow 	 */
257*a288e5a9SJoshua M. Clulow 	mutex_enter(&periodics_lock);
258*a288e5a9SJoshua M. Clulow 	while ((dpr = list_head(&periodics)) != NULL) {
259*a288e5a9SJoshua M. Clulow #ifdef	DEBUG
260*a288e5a9SJoshua M. Clulow 		printf("DDI periodic handler not deleted (id=%lx, hdlr=%s)\n",
261*a288e5a9SJoshua M. Clulow 		    (unsigned long)dpr->dpr_id, periodic_handler_symbol(dpr));
262*a288e5a9SJoshua M. Clulow #endif
263*a288e5a9SJoshua M. Clulow 
264*a288e5a9SJoshua M. Clulow 		mutex_exit(&periodics_lock);
265*a288e5a9SJoshua M. Clulow 		/*
266*a288e5a9SJoshua M. Clulow 		 * Delete the periodic ourselves:
267*a288e5a9SJoshua M. Clulow 		 */
268*a288e5a9SJoshua M. Clulow 		i_untimeout((timeout_t)(uintptr_t)dpr->dpr_id);
269*a288e5a9SJoshua M. Clulow 		mutex_enter(&periodics_lock);
270*a288e5a9SJoshua M. Clulow 	}
271*a288e5a9SJoshua M. Clulow 	mutex_exit(&periodics_lock);
272*a288e5a9SJoshua M. Clulow 
273*a288e5a9SJoshua M. Clulow 	/*
274*a288e5a9SJoshua M. Clulow 	 * At this point there are no remaining cyclics, so clean up the
275*a288e5a9SJoshua M. Clulow 	 * remaining resources:
276*a288e5a9SJoshua M. Clulow 	 */
277*a288e5a9SJoshua M. Clulow 	taskq_destroy(periodic_taskq);
278*a288e5a9SJoshua M. Clulow 	periodic_taskq = NULL;
279*a288e5a9SJoshua M. Clulow 
280*a288e5a9SJoshua M. Clulow 	id_space_destroy(periodic_id_space);
281*a288e5a9SJoshua M. Clulow 	periodic_id_space = NULL;
282*a288e5a9SJoshua M. Clulow 
283*a288e5a9SJoshua M. Clulow 	kmem_cache_destroy(periodic_cache);
284*a288e5a9SJoshua M. Clulow 	periodic_cache = NULL;
285*a288e5a9SJoshua M. Clulow 
286*a288e5a9SJoshua M. Clulow 	list_destroy(&periodics);
287*a288e5a9SJoshua M. Clulow 	for (i = PERI_IPL_1; i <= PERI_IPL_10; i++)
288*a288e5a9SJoshua M. Clulow 		list_destroy(&periodic_softint_queue[i - 1]);
289*a288e5a9SJoshua M. Clulow 
290*a288e5a9SJoshua M. Clulow 	mutex_destroy(&periodics_lock);
291*a288e5a9SJoshua M. Clulow }
292*a288e5a9SJoshua M. Clulow 
293*a288e5a9SJoshua M. Clulow static void
294*a288e5a9SJoshua M. Clulow periodic_cyclic_handler(void *arg)
295*a288e5a9SJoshua M. Clulow {
296*a288e5a9SJoshua M. Clulow 	ddi_periodic_impl_t *dpr = arg;
297*a288e5a9SJoshua M. Clulow 
298*a288e5a9SJoshua M. Clulow 	mutex_enter(&dpr->dpr_lock);
299*a288e5a9SJoshua M. Clulow 	/*
300*a288e5a9SJoshua M. Clulow 	 * If we've been cancelled, or we're already dispatched, then exit
301*a288e5a9SJoshua M. Clulow 	 * immediately:
302*a288e5a9SJoshua M. Clulow 	 */
303*a288e5a9SJoshua M. Clulow 	if (dpr->dpr_flags & (DPF_CANCELLED | DPF_DISPATCHED)) {
304*a288e5a9SJoshua M. Clulow 		mutex_exit(&dpr->dpr_lock);
305*a288e5a9SJoshua M. Clulow 		return;
306*a288e5a9SJoshua M. Clulow 	}
307*a288e5a9SJoshua M. Clulow 	VERIFY(!(dpr->dpr_flags & DPF_EXECUTING));
308*a288e5a9SJoshua M. Clulow 
309*a288e5a9SJoshua M. Clulow 	/*
310*a288e5a9SJoshua M. Clulow 	 * This periodic is not presently dispatched, so dispatch it now:
311*a288e5a9SJoshua M. Clulow 	 */
312*a288e5a9SJoshua M. Clulow 	dpr->dpr_flags |= DPF_DISPATCHED;
313*a288e5a9SJoshua M. Clulow 	mutex_exit(&dpr->dpr_lock);
314*a288e5a9SJoshua M. Clulow 
315*a288e5a9SJoshua M. Clulow 	if (dpr->dpr_level == PERI_IPL_0) {
316*a288e5a9SJoshua M. Clulow 		/*
317*a288e5a9SJoshua M. Clulow 		 * DDI_IPL_0 periodics are dispatched onto the taskq:
318*a288e5a9SJoshua M. Clulow 		 */
319*a288e5a9SJoshua M. Clulow 		taskq_dispatch_ent(periodic_taskq, periodic_execute,
320*a288e5a9SJoshua M. Clulow 		    dpr, 0, &dpr->dpr_taskq_ent);
321*a288e5a9SJoshua M. Clulow 	} else {
322*a288e5a9SJoshua M. Clulow 		/*
323*a288e5a9SJoshua M. Clulow 		 * Higher priority periodics are handled by a soft interrupt
324*a288e5a9SJoshua M. Clulow 		 * handler.  Enqueue us for processing by the handler:
325*a288e5a9SJoshua M. Clulow 		 */
326*a288e5a9SJoshua M. Clulow 		mutex_enter(&periodics_lock);
327*a288e5a9SJoshua M. Clulow 		list_insert_tail(&periodic_softint_queue[dpr->dpr_level - 1],
328*a288e5a9SJoshua M. Clulow 		    dpr);
329*a288e5a9SJoshua M. Clulow 		mutex_exit(&periodics_lock);
330*a288e5a9SJoshua M. Clulow 
331*a288e5a9SJoshua M. Clulow 		/*
332*a288e5a9SJoshua M. Clulow 		 * Request the execution of the soft interrupt handler for this
333*a288e5a9SJoshua M. Clulow 		 * periodic's priority level.
334*a288e5a9SJoshua M. Clulow 		 */
335*a288e5a9SJoshua M. Clulow 		sir_on(dpr->dpr_level);
336*a288e5a9SJoshua M. Clulow 	}
337*a288e5a9SJoshua M. Clulow }
338*a288e5a9SJoshua M. Clulow 
339*a288e5a9SJoshua M. Clulow static void
340*a288e5a9SJoshua M. Clulow periodic_destroy(ddi_periodic_impl_t *dpr)
341*a288e5a9SJoshua M. Clulow {
342*a288e5a9SJoshua M. Clulow 	if (dpr == NULL)
343*a288e5a9SJoshua M. Clulow 		return;
344*a288e5a9SJoshua M. Clulow 
345*a288e5a9SJoshua M. Clulow 	/*
346*a288e5a9SJoshua M. Clulow 	 * By now, we should have a periodic that is not busy, and has been
347*a288e5a9SJoshua M. Clulow 	 * cancelled:
348*a288e5a9SJoshua M. Clulow 	 */
349*a288e5a9SJoshua M. Clulow 	VERIFY(dpr->dpr_flags == DPF_CANCELLED);
350*a288e5a9SJoshua M. Clulow 	VERIFY(dpr->dpr_thread == NULL);
351*a288e5a9SJoshua M. Clulow 
352*a288e5a9SJoshua M. Clulow 	id_free(periodic_id_space, dpr->dpr_id);
353*a288e5a9SJoshua M. Clulow 	cv_destroy(&dpr->dpr_cv);
354*a288e5a9SJoshua M. Clulow 	mutex_destroy(&dpr->dpr_lock);
355*a288e5a9SJoshua M. Clulow 	kmem_cache_free(periodic_cache, dpr);
356*a288e5a9SJoshua M. Clulow }
357*a288e5a9SJoshua M. Clulow 
358*a288e5a9SJoshua M. Clulow static ddi_periodic_impl_t *
359*a288e5a9SJoshua M. Clulow periodic_create(void)
360*a288e5a9SJoshua M. Clulow {
361*a288e5a9SJoshua M. Clulow 	ddi_periodic_impl_t *dpr;
362*a288e5a9SJoshua M. Clulow 
363*a288e5a9SJoshua M. Clulow 	dpr = kmem_cache_alloc(periodic_cache, KM_SLEEP);
364*a288e5a9SJoshua M. Clulow 	bzero(dpr, sizeof (*dpr));
365*a288e5a9SJoshua M. Clulow 	dpr->dpr_id = id_alloc(periodic_id_space);
366*a288e5a9SJoshua M. Clulow 	mutex_init(&dpr->dpr_lock, NULL, MUTEX_ADAPTIVE, NULL);
367*a288e5a9SJoshua M. Clulow 	cv_init(&dpr->dpr_cv, NULL, CV_DEFAULT, NULL);
368*a288e5a9SJoshua M. Clulow 
369*a288e5a9SJoshua M. Clulow 	return (dpr);
370*a288e5a9SJoshua M. Clulow }
371*a288e5a9SJoshua M. Clulow 
372*a288e5a9SJoshua M. Clulow /*
373*a288e5a9SJoshua M. Clulow  * This function provides the implementation for the ddi_periodic_add(9F)
374*a288e5a9SJoshua M. Clulow  * interface.  It registers a periodic handler and returns an opaque identifier
375*a288e5a9SJoshua M. Clulow  * that can be unregistered via ddi_periodic_delete(9F)/i_untimeout().
376*a288e5a9SJoshua M. Clulow  *
377*a288e5a9SJoshua M. Clulow  * It may be called in user or kernel context, provided cpu_lock is not held.
378*a288e5a9SJoshua M. Clulow  */
379*a288e5a9SJoshua M. Clulow timeout_t
380*a288e5a9SJoshua M. Clulow i_timeout(void (*func)(void *), void *arg, hrtime_t interval, int level)
381*a288e5a9SJoshua M. Clulow {
382*a288e5a9SJoshua M. Clulow 	cyc_handler_t cyh;
383*a288e5a9SJoshua M. Clulow 	cyc_time_t cyt;
384*a288e5a9SJoshua M. Clulow 	ddi_periodic_impl_t *dpr;
385*a288e5a9SJoshua M. Clulow 
386*a288e5a9SJoshua M. Clulow 	VERIFY(func != NULL);
387*a288e5a9SJoshua M. Clulow 	VERIFY(level >= 0 && level <= 10);
388*a288e5a9SJoshua M. Clulow 
389*a288e5a9SJoshua M. Clulow 	/*
390*a288e5a9SJoshua M. Clulow 	 * Allocate object to track this periodic:
391*a288e5a9SJoshua M. Clulow 	 */
392*a288e5a9SJoshua M. Clulow 	dpr = periodic_create();
393*a288e5a9SJoshua M. Clulow 	dpr->dpr_level = level;
394*a288e5a9SJoshua M. Clulow 	dpr->dpr_handler = func;
395*a288e5a9SJoshua M. Clulow 	dpr->dpr_arg = arg;
396*a288e5a9SJoshua M. Clulow 
397*a288e5a9SJoshua M. Clulow 	/*
398*a288e5a9SJoshua M. Clulow 	 * The minimum supported interval between firings of the periodic
399*a288e5a9SJoshua M. Clulow 	 * handler is 10ms; see ddi_periodic_add(9F) for more details.  If a
400*a288e5a9SJoshua M. Clulow 	 * shorter interval is requested, round up.
401*a288e5a9SJoshua M. Clulow 	 */
402*a288e5a9SJoshua M. Clulow 	if (ddi_periodic_resolution > interval) {
403*a288e5a9SJoshua M. Clulow 		cmn_err(CE_WARN,
404*a288e5a9SJoshua M. Clulow 		    "The periodic timeout (handler=%s, interval=%lld) "
405*a288e5a9SJoshua M. Clulow 		    "requests a finer interval than the supported resolution. "
406*a288e5a9SJoshua M. Clulow 		    "It rounds up to %lld\n", periodic_handler_symbol(dpr),
407*a288e5a9SJoshua M. Clulow 		    interval, ddi_periodic_resolution);
408*a288e5a9SJoshua M. Clulow 		interval = ddi_periodic_resolution;
409*a288e5a9SJoshua M. Clulow 	}
410*a288e5a9SJoshua M. Clulow 
411*a288e5a9SJoshua M. Clulow 	/*
412*a288e5a9SJoshua M. Clulow 	 * Ensure that the interval is an even multiple of the base resolution
413*a288e5a9SJoshua M. Clulow 	 * that is at least as long as the requested interval.
414*a288e5a9SJoshua M. Clulow 	 */
415*a288e5a9SJoshua M. Clulow 	dpr->dpr_interval = roundup(interval, ddi_periodic_resolution);
416*a288e5a9SJoshua M. Clulow 
417*a288e5a9SJoshua M. Clulow 	/*
418*a288e5a9SJoshua M. Clulow 	 * Create the underlying cyclic:
419*a288e5a9SJoshua M. Clulow 	 */
420*a288e5a9SJoshua M. Clulow 	cyh.cyh_func = periodic_cyclic_handler;
421*a288e5a9SJoshua M. Clulow 	cyh.cyh_arg = dpr;
422*a288e5a9SJoshua M. Clulow 	cyh.cyh_level = CY_LOCK_LEVEL;
423*a288e5a9SJoshua M. Clulow 
424*a288e5a9SJoshua M. Clulow 	cyt.cyt_when = 0;
425*a288e5a9SJoshua M. Clulow 	cyt.cyt_interval = dpr->dpr_interval;
426*a288e5a9SJoshua M. Clulow 
427*a288e5a9SJoshua M. Clulow 	mutex_enter(&cpu_lock);
428*a288e5a9SJoshua M. Clulow 	dpr->dpr_cyclic_id = cyclic_add(&cyh, &cyt);
429*a288e5a9SJoshua M. Clulow 	mutex_exit(&cpu_lock);
430*a288e5a9SJoshua M. Clulow 
431*a288e5a9SJoshua M. Clulow 	/*
432*a288e5a9SJoshua M. Clulow 	 * Make the id visible to ddi_periodic_delete(9F) before we
433*a288e5a9SJoshua M. Clulow 	 * return it:
434*a288e5a9SJoshua M. Clulow 	 */
435*a288e5a9SJoshua M. Clulow 	mutex_enter(&periodics_lock);
436*a288e5a9SJoshua M. Clulow 	list_insert_tail(&periodics, dpr);
437*a288e5a9SJoshua M. Clulow 	mutex_exit(&periodics_lock);
438*a288e5a9SJoshua M. Clulow 
439*a288e5a9SJoshua M. Clulow 	return ((timeout_t)(uintptr_t)dpr->dpr_id);
440*a288e5a9SJoshua M. Clulow }
441*a288e5a9SJoshua M. Clulow 
442*a288e5a9SJoshua M. Clulow /*
443*a288e5a9SJoshua M. Clulow  * This function provides the implementation for the ddi_periodic_delete(9F)
444*a288e5a9SJoshua M. Clulow  * interface.  It cancels a periodic handler previously registered through
445*a288e5a9SJoshua M. Clulow  * ddi_periodic_add(9F)/i_timeout().
446*a288e5a9SJoshua M. Clulow  *
447*a288e5a9SJoshua M. Clulow  * It may be called in user or kernel context, provided cpu_lock is not held.
448*a288e5a9SJoshua M. Clulow  * It may NOT be called from within a periodic handler.
449*a288e5a9SJoshua M. Clulow  */
450*a288e5a9SJoshua M. Clulow void
451*a288e5a9SJoshua M. Clulow i_untimeout(timeout_t id)
452*a288e5a9SJoshua M. Clulow {
453*a288e5a9SJoshua M. Clulow 	ddi_periodic_impl_t *dpr;
454*a288e5a9SJoshua M. Clulow 
455*a288e5a9SJoshua M. Clulow 	/*
456*a288e5a9SJoshua M. Clulow 	 * Find the periodic in the list of all periodics and remove it.
457*a288e5a9SJoshua M. Clulow 	 * If we find in (and remove it from) the global list, we have
458*a288e5a9SJoshua M. Clulow 	 * license to free it once it is no longer busy.
459*a288e5a9SJoshua M. Clulow 	 */
460*a288e5a9SJoshua M. Clulow 	mutex_enter(&periodics_lock);
461*a288e5a9SJoshua M. Clulow 	for (dpr = list_head(&periodics); dpr != NULL; dpr =
462*a288e5a9SJoshua M. Clulow 	    list_next(&periodics, dpr)) {
463*a288e5a9SJoshua M. Clulow 		if (dpr->dpr_id == (id_t)(uintptr_t)id) {
464*a288e5a9SJoshua M. Clulow 			list_remove(&periodics, dpr);
465*a288e5a9SJoshua M. Clulow 			break;
466*a288e5a9SJoshua M. Clulow 		}
467*a288e5a9SJoshua M. Clulow 	}
468*a288e5a9SJoshua M. Clulow 	mutex_exit(&periodics_lock);
469*a288e5a9SJoshua M. Clulow 
470*a288e5a9SJoshua M. Clulow 	/*
471*a288e5a9SJoshua M. Clulow 	 * We could not find a periodic for this id, so bail out:
472*a288e5a9SJoshua M. Clulow 	 */
473*a288e5a9SJoshua M. Clulow 	if (dpr == NULL)
474*a288e5a9SJoshua M. Clulow 		return;
475*a288e5a9SJoshua M. Clulow 
476*a288e5a9SJoshua M. Clulow 	mutex_enter(&dpr->dpr_lock);
477*a288e5a9SJoshua M. Clulow 	/*
478*a288e5a9SJoshua M. Clulow 	 * We should be the only one trying to cancel this periodic:
479*a288e5a9SJoshua M. Clulow 	 */
480*a288e5a9SJoshua M. Clulow 	VERIFY(!(dpr->dpr_flags & DPF_CANCELLED));
481*a288e5a9SJoshua M. Clulow 	/*
482*a288e5a9SJoshua M. Clulow 	 * Removing a periodic from within its own handler function will
483*a288e5a9SJoshua M. Clulow 	 * cause a deadlock, so panic explicitly.
484*a288e5a9SJoshua M. Clulow 	 */
485*a288e5a9SJoshua M. Clulow 	if (dpr->dpr_thread == curthread) {
486*a288e5a9SJoshua M. Clulow 		panic("ddi_periodic_delete(%lx) called from its own handler\n",
487*a288e5a9SJoshua M. Clulow 		    (unsigned long)dpr->dpr_id);
488*a288e5a9SJoshua M. Clulow 	}
489*a288e5a9SJoshua M. Clulow 	/*
490*a288e5a9SJoshua M. Clulow 	 * Mark the periodic as cancelled:
491*a288e5a9SJoshua M. Clulow 	 */
492*a288e5a9SJoshua M. Clulow 	dpr->dpr_flags |= DPF_CANCELLED;
493*a288e5a9SJoshua M. Clulow 	mutex_exit(&dpr->dpr_lock);
494*a288e5a9SJoshua M. Clulow 
495*a288e5a9SJoshua M. Clulow 	/*
496*a288e5a9SJoshua M. Clulow 	 * Cancel our cyclic.  cyclic_remove() guarantees that the cyclic
497*a288e5a9SJoshua M. Clulow 	 * handler will not run again after it returns.  Note that the cyclic
498*a288e5a9SJoshua M. Clulow 	 * handler merely _dispatches_ the periodic, so this does _not_ mean
499*a288e5a9SJoshua M. Clulow 	 * the periodic handler is also finished running.
500*a288e5a9SJoshua M. Clulow 	 */
501*a288e5a9SJoshua M. Clulow 	mutex_enter(&cpu_lock);
502*a288e5a9SJoshua M. Clulow 	cyclic_remove(dpr->dpr_cyclic_id);
503*a288e5a9SJoshua M. Clulow 	mutex_exit(&cpu_lock);
504*a288e5a9SJoshua M. Clulow 
505*a288e5a9SJoshua M. Clulow 	/*
506*a288e5a9SJoshua M. Clulow 	 * Wait until the periodic handler is no longer running:
507*a288e5a9SJoshua M. Clulow 	 */
508*a288e5a9SJoshua M. Clulow 	mutex_enter(&dpr->dpr_lock);
509*a288e5a9SJoshua M. Clulow 	while (dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) {
510*a288e5a9SJoshua M. Clulow 		cv_wait(&dpr->dpr_cv, &dpr->dpr_lock);
511*a288e5a9SJoshua M. Clulow 	}
512*a288e5a9SJoshua M. Clulow 	mutex_exit(&dpr->dpr_lock);
513*a288e5a9SJoshua M. Clulow 
514*a288e5a9SJoshua M. Clulow 	periodic_destroy(dpr);
515*a288e5a9SJoshua M. Clulow }
516