xref: /titanic_41/usr/src/uts/common/io/drcompat.c (revision 70025d765b044c6d8594bb965a2247a61e991a99)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Standard module for handling DLPI Style 2 attach/detach
31  */
32 
33 #include <sys/types.h>
34 #include <sys/conf.h>
35 #include <sys/modctl.h>
36 #include <sys/cmn_err.h>
37 #include <sys/sunddi.h>
38 #include <sys/esunddi.h>
39 #include <sys/strsubr.h>
40 #include <sys/ddi.h>
41 #include <sys/dlpi.h>
42 #include <sys/strsun.h>
43 #include <sys/policy.h>
44 
45 static struct streamtab drstab;
46 
47 static struct fmodsw fsw = {
48 	DRMODNAME,
49 	&drstab,
50 	D_MP
51 };
52 
53 
54 /*
55  * Module linkage information for the kernel.
56  */
57 
58 static struct modlstrmod modlstrmod = {
59 	&mod_strmodops, "dr compatibility for DLPI style 2 drivers %I%", &fsw
60 };
61 
62 
63 static struct modlinkage modlinkage = {
64 	MODREV_1, &modlstrmod, NULL
65 };
66 
67 
68 int
69 _init(void)
70 {
71 	return (mod_install(&modlinkage));
72 }
73 
74 int
75 _fini(void)
76 {
77 	return (mod_remove(&modlinkage));
78 }
79 
80 int
81 _info(struct modinfo *modinfop)
82 {
83 	return (mod_info(&modlinkage, modinfop));
84 }
85 
86 
87 static int	dropen(queue_t *, dev_t *, int, int, cred_t *);
88 static int	drclose(queue_t *, int, cred_t *);
89 static int	drrput(queue_t *, mblk_t *);
90 static int	drwput(queue_t *, mblk_t *);
91 
92 static struct module_info drinfo = {
93 	0,
94 	DRMODNAME,
95 	0,
96 	INFPSZ,
97 	1,
98 	0
99 };
100 
101 static struct qinit drrinit = {
102 	(int (*)())drrput,
103 	NULL,
104 	dropen,
105 	drclose,
106 	NULL,
107 	&drinfo
108 };
109 
110 static struct qinit drwinit = {
111 	(int (*)())drwput,
112 	NULL,
113 	NULL,
114 	NULL,
115 	NULL,
116 	&drinfo
117 };
118 
119 static struct streamtab drstab = {
120 	&drrinit,
121 	&drwinit,
122 	NULL,
123 	NULL
124 };
125 
126 /*
127  * This module is pushed directly on top of the bottom driver
128  * in a DLPI style-2 stream by stropen(). It intercepts
129  * DL_ATTACH_REQ/DL_DETACH_REQ messages on the write side
130  * and acks on the read side, calls qassociate where needed.
131  * The primary purpose is to workaround a DR race condition
132  * affecting non-DDI compliant DLPI style 2 drivers, which may
133  * cause the system to panic.
134  *
135  * The following action is taken:
136  * Write side (drwput):
137  *	attach request:	hold driver instance assuming ppa == instance.
138  *		This way, the instance cannot be detached while the
139  *		driver is processing DL_ATTACH_REQ.
140  *
141  *		On a successful hold, store the dip in a ring buffer
142  *		to be processed lated by the read side.
143  *		If hold fails (most likely ppa != instance), we store
144  *		NULL in the ring buffer and read side won't take
145  *		any action on ack.
146  *
147  * Read side (drrput):
148  *	attach success: if (dip held on write side) associate queue with dip
149  *	attach failure:	if (dip held on write side) release hold on dip
150  *	detach success: associate queue with NULL
151  *	detach failure:	do nothing
152  *
153  * The module assumes that incoming DL_ATTACH_REQ/DL_DETACH_REQ
154  * messages are ordered (non-concurrent) and the bottom
155  * driver processes them and sends acknowledgements in the same
156  * order. This assumption is reasonable because concurrent
157  * association results in non-deterministic queue behavior.
158  * The module is coded carefully such that unordered messages
159  * do not result in a system panic.
160  *
161  * The module handles multiple outstanding messages queued
162  * in the bottom driver. Messages processed on the write side
163  * but not yet arrived at read side are placed in the ring buffer
164  * dr_dip[], between dr_nfirst and dr_nlast. The write side is
165  * producer and the read side is the consumer. The buffer is full
166  * when dr_nfirst == dr_nlast.
167  *
168  * The current size of the ring buffer is 64 (MAX_DLREQS) per stream.
169  * During normal testing, we have not seen outstanding messages
170  * above 10.
171  */
172 
173 #define	MAX_DLREQS	64
174 #define	INCR(x)		{(x)++; if ((x) >= MAX_DLREQS) (x) = 0; }
175 
176 struct drstate {
177 	kmutex_t dr_lock;
178 	major_t dr_major;
179 	int dr_nfirst;
180 	int dr_nlast;
181 	dev_info_t *dr_dip[MAX_DLREQS];
182 };
183 
184 /* ARGSUSED1 */
185 static int
186 dropen(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *crp)
187 {
188 	struct drstate *dsp;
189 
190 	if (sflag != MODOPEN) {	/* must be a pushed module */
191 		return (EINVAL);
192 	}
193 
194 	if (secpolicy_net_rawaccess(crp) != 0) {
195 		return (EPERM);
196 	}
197 
198 	if (q->q_ptr != NULL) {
199 		return (0);	/* already open */
200 	}
201 
202 	dsp = kmem_zalloc(sizeof (*dsp), KM_SLEEP);
203 	dsp->dr_major = getmajor(*devp);
204 	mutex_init(&dsp->dr_lock, NULL, MUTEX_DEFAULT, NULL);
205 	q->q_ptr = OTHERQ(q)->q_ptr = dsp;
206 	qprocson(q);
207 	ddi_assoc_queue_with_devi(q, NULL);
208 	return (0);
209 }
210 
211 /* ARGSUSED1 */
212 static int
213 drclose(queue_t *q, int cflag, cred_t *crp)
214 {
215 	struct drstate *dsp = q->q_ptr;
216 
217 	ASSERT(dsp);
218 	ddi_assoc_queue_with_devi(q, NULL);
219 	qprocsoff(q);
220 
221 	mutex_destroy(&dsp->dr_lock);
222 	kmem_free(dsp, sizeof (*dsp));
223 	q->q_ptr = NULL;
224 
225 	return (0);
226 }
227 
228 static int
229 drrput(queue_t *q, mblk_t *mp)
230 {
231 	struct drstate *dsp;
232 	union DL_primitives *dlp;
233 	dev_info_t *dip;
234 
235 	switch (DB_TYPE(mp)) {
236 	case M_PROTO:
237 	case M_PCPROTO:
238 		break;
239 	default:
240 		putnext(q, mp);
241 		return (0);
242 	}
243 
244 	/* make sure size is sufficient for dl_primitive */
245 	if (MBLKL(mp) < sizeof (t_uscalar_t)) {
246 		putnext(q, mp);
247 		return (0);
248 	}
249 
250 	dlp = (union DL_primitives *)mp->b_rptr;
251 	switch (dlp->dl_primitive) {
252 	case DL_OK_ACK: {
253 		/* check for proper size, let upper layer deal with error */
254 		if (MBLKL(mp) < DL_OK_ACK_SIZE) {
255 			putnext(q, mp);
256 			return (0);
257 		}
258 
259 		dsp = q->q_ptr;
260 		switch (dlp->ok_ack.dl_correct_primitive) {
261 		case DL_ATTACH_REQ:
262 			/*
263 			 * ddi_assoc_queue_with_devi() will hold dip,
264 			 * so release after association.
265 			 *
266 			 * dip is NULL means we didn't hold dip on read side.
267 			 * (unlikely, but possible), so we do nothing.
268 			 */
269 			mutex_enter(&dsp->dr_lock);
270 			dip = dsp->dr_dip[dsp->dr_nlast];
271 			dsp->dr_dip[dsp->dr_nlast] = NULL;
272 			INCR(dsp->dr_nlast);
273 			mutex_exit(&dsp->dr_lock);
274 			if (dip) {
275 				ddi_assoc_queue_with_devi(q, dip);
276 				ddi_release_devi(dip);
277 			}
278 			break;
279 
280 		case DL_DETACH_REQ:
281 			ddi_assoc_queue_with_devi(q, NULL);
282 			break;
283 		default:
284 			break;
285 		}
286 		break;
287 	}
288 	case DL_ERROR_ACK:
289 		if (dlp->error_ack.dl_error_primitive != DL_ATTACH_REQ)
290 			break;
291 
292 		dsp = q->q_ptr;
293 		mutex_enter(&dsp->dr_lock);
294 		dip = dsp->dr_dip[dsp->dr_nlast];
295 		dsp->dr_dip[dsp->dr_nlast] = NULL;
296 		INCR(dsp->dr_nlast);
297 		mutex_exit(&dsp->dr_lock);
298 		/*
299 		 * Release dip on attach failure
300 		 */
301 		if (dip) {
302 			ddi_release_devi(dip);
303 		}
304 		break;
305 	default:
306 		break;
307 	}
308 
309 	putnext(q, mp);
310 	return (0);
311 }
312 
313 /*
314  * Detect dl attach, hold the dip to prevent it from detaching
315  */
316 static int
317 drwput(queue_t *q, mblk_t *mp)
318 {
319 	struct drstate *dsp;
320 	union DL_primitives *dlp;
321 	dev_info_t *dip;
322 
323 	switch (DB_TYPE(mp)) {
324 	case M_PROTO:
325 	case M_PCPROTO:
326 		break;
327 	default:
328 		putnext(q, mp);
329 		return (0);
330 	}
331 
332 	/* make sure size is sufficient for dl_primitive */
333 	if (MBLKL(mp) < sizeof (t_uscalar_t)) {
334 		putnext(q, mp);
335 		return (0);
336 	}
337 
338 	dlp = (union DL_primitives *)mp->b_rptr;
339 	switch (dlp->dl_primitive) {
340 	case DL_ATTACH_REQ:
341 		/*
342 		 * Check for proper size of the message.
343 		 *
344 		 * If size is correct, get the ppa and attempt to
345 		 * hold the device assuming ppa is instance.
346 		 *
347 		 * If size is wrong, we can't get the ppa, but
348 		 * still increment dr_nfirst because the read side
349 		 * will get a error ack on DL_ATTACH_REQ.
350 		 */
351 		dip = NULL;
352 		dsp = q->q_ptr;
353 		if (MBLKL(mp) >= DL_OK_ACK_SIZE) {
354 			dip = ddi_hold_devi_by_instance(dsp->dr_major,
355 			    dlp->attach_req.dl_ppa, E_DDI_HOLD_DEVI_NOATTACH);
356 		}
357 
358 		mutex_enter(&dsp->dr_lock);
359 		dsp->dr_dip[dsp->dr_nfirst] = dip;
360 		INCR(dsp->dr_nfirst);
361 		/*
362 		 * Check if ring buffer is full. If so, assert in debug
363 		 * kernel and produce a warning in non-debug kernel.
364 		 */
365 		ASSERT(dsp->dr_nfirst != dsp->dr_nlast);
366 		if (dsp->dr_nfirst == dsp->dr_nlast) {
367 			cmn_err(CE_WARN, "drcompat: internal buffer full");
368 		}
369 		mutex_exit(&dsp->dr_lock);
370 		break;
371 	default:
372 		break;
373 	}
374 
375 	putnext(q, mp);
376 	return (0);
377 }
378