xref: /titanic_50/usr/src/uts/intel/io/dktp/drvobj/strategy.c (revision 004388ebfdfe2ed7dfd2d153a876dfcc22d2c006)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *	Device Strategy
31  */
32 #include <sys/dktp/cm.h>
33 #include <sys/kstat.h>
34 
35 #include <sys/dktp/quetypes.h>
36 #include <sys/dktp/queue.h>
37 #include <sys/dktp/tgcom.h>
38 #include <sys/dktp/fctypes.h>
39 #include <sys/dktp/flowctrl.h>
40 #include <sys/param.h>
41 #include <vm/page.h>
42 #include <sys/modctl.h>
43 
44 /*
45  *	Object Management
46  */
47 
48 static struct buf *qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge,
49     int *can_merge);
50 
51 static struct modlmisc modlmisc = {
52 	&mod_miscops,	/* Type of module */
53 	"Device Strategy Objects %I%"
54 };
55 
56 static struct modlinkage modlinkage = {
57 	MODREV_1,
58 	&modlmisc,
59 	NULL
60 };
61 
62 int
63 _init(void)
64 {
65 	return (mod_install(&modlinkage));
66 }
67 
68 int
69 _fini(void)
70 {
71 	return (mod_remove(&modlinkage));
72 }
73 
74 int
75 _info(struct modinfo *modinfop)
76 {
77 	return (mod_info(&modlinkage, modinfop));
78 }
79 
80 
81 /*
82  *	Common Flow Control functions
83  */
84 
85 /*
86  * Local static data
87  */
88 #ifdef	FLC_DEBUG
89 #define	DENT	0x0001
90 #define	DERR	0x0002
91 #define	DIO	0x0004
92 static	int	flc_debug = DENT|DERR|DIO;
93 
94 #include <sys/thread.h>
95 static 	int	flc_malloc_intr = 0;
96 #endif	/* FLC_DEBUG */
97 
98 static	int	flc_kstat = 1;
99 
100 static struct flc_obj *fc_create(struct flc_objops *fcopsp);
101 static int fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
102     void *lkarg);
103 static int fc_free(struct flc_obj *flcobjp);
104 static int fc_start_kstat(opaque_t queuep, char *devtype, int instance);
105 static int fc_stop_kstat(opaque_t queuep);
106 
107 static struct flc_obj *
108 fc_create(struct flc_objops *fcopsp)
109 {
110 	struct	flc_obj *flcobjp;
111 	struct	fc_data *fcdp;
112 
113 	flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
114 	if (!flcobjp)
115 		return (NULL);
116 
117 	fcdp = (struct fc_data *)(flcobjp+1);
118 	flcobjp->flc_data = (opaque_t)fcdp;
119 	flcobjp->flc_ops  = fcopsp;
120 
121 	return ((opaque_t)flcobjp);
122 }
123 
124 static int dmult_maxcnt = DMULT_MAXCNT;
125 
126 static int
127 fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
128 {
129 	struct fc_data *fcdp = (struct fc_data *)queuep;
130 
131 	mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
132 
133 	fcdp->ds_queobjp   = que_objp;
134 	fcdp->ds_tgcomobjp = tgcom_objp;
135 	fcdp->ds_waitcnt   = dmult_maxcnt;
136 
137 	QUE_INIT(que_objp, lkarg);
138 	TGCOM_INIT(tgcom_objp);
139 	return (DDI_SUCCESS);
140 }
141 
142 static int
143 fc_free(struct flc_obj *flcobjp)
144 {
145 	struct fc_data *fcdp;
146 
147 	fcdp = (struct fc_data *)flcobjp->flc_data;
148 	if (fcdp->ds_queobjp)
149 		QUE_FREE(fcdp->ds_queobjp);
150 	if (fcdp->ds_tgcomobjp) {
151 		TGCOM_FREE(fcdp->ds_tgcomobjp);
152 		mutex_destroy(&fcdp->ds_mutex);
153 	}
154 	kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
155 	return (0);
156 }
157 
158 /*ARGSUSED*/
159 static int
160 fc_start_kstat(opaque_t queuep, char *devtype, int instance)
161 {
162 	struct fc_data *fcdp = (struct fc_data *)queuep;
163 	if (!flc_kstat)
164 		return (0);
165 
166 	if (!fcdp->ds_kstat) {
167 		if (fcdp->ds_kstat = kstat_create("cmdk", instance, NULL,
168 		    "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT)) {
169 			kstat_install(fcdp->ds_kstat);
170 		}
171 	}
172 	return (0);
173 }
174 
175 static int
176 fc_stop_kstat(opaque_t queuep)
177 {
178 	struct fc_data *fcdp = (struct fc_data *)queuep;
179 
180 	if (fcdp->ds_kstat) {
181 		kstat_delete(fcdp->ds_kstat);
182 		fcdp->ds_kstat = NULL;
183 	}
184 	return (0);
185 }
186 
187 
188 /*
189  *	Single Command per Device
190  */
191 /*
192  * Local Function Prototypes
193  */
194 static int dsngl_restart();
195 
196 static int dsngl_enque(opaque_t, struct buf *);
197 static int dsngl_deque(opaque_t, struct buf *);
198 
199 struct 	flc_objops dsngl_ops = {
200 	fc_init,
201 	fc_free,
202 	dsngl_enque,
203 	dsngl_deque,
204 	fc_start_kstat,
205 	fc_stop_kstat,
206 	0, 0
207 };
208 
209 struct flc_obj *
210 dsngl_create()
211 {
212 	return (fc_create((struct flc_objops *)&dsngl_ops));
213 }
214 
215 static int
216 dsngl_enque(opaque_t queuep, struct buf *in_bp)
217 {
218 	struct fc_data *dsnglp = (struct fc_data *)queuep;
219 	opaque_t tgcom_objp;
220 	opaque_t que_objp;
221 
222 	que_objp   = dsnglp->ds_queobjp;
223 	tgcom_objp = dsnglp->ds_tgcomobjp;
224 
225 	if (!in_bp)
226 		return (0);
227 	mutex_enter(&dsnglp->ds_mutex);
228 	if (dsnglp->ds_bp || dsnglp->ds_outcnt) {
229 		QUE_ADD(que_objp, in_bp);
230 		if (dsnglp->ds_kstat) {
231 			kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
232 		}
233 		mutex_exit(&dsnglp->ds_mutex);
234 		return (0);
235 	}
236 	if (dsnglp->ds_kstat) {
237 		kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
238 	}
239 	if (TGCOM_PKT(tgcom_objp, in_bp, dsngl_restart,
240 		(caddr_t)dsnglp) != DDI_SUCCESS) {
241 
242 		dsnglp->ds_bp = in_bp;
243 		mutex_exit(&dsnglp->ds_mutex);
244 		return (0);
245 	}
246 	dsnglp->ds_outcnt++;
247 	if (dsnglp->ds_kstat)
248 		kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
249 	mutex_exit(&dsnglp->ds_mutex);
250 	TGCOM_TRANSPORT(tgcom_objp, in_bp);
251 	return (0);
252 }
253 
254 static int
255 dsngl_deque(opaque_t queuep, struct buf *in_bp)
256 {
257 	struct fc_data *dsnglp = (struct fc_data *)queuep;
258 	opaque_t tgcom_objp;
259 	opaque_t que_objp;
260 	struct	 buf *bp;
261 
262 	que_objp   = dsnglp->ds_queobjp;
263 	tgcom_objp = dsnglp->ds_tgcomobjp;
264 
265 	mutex_enter(&dsnglp->ds_mutex);
266 	if (in_bp) {
267 		dsnglp->ds_outcnt--;
268 		if (dsnglp->ds_kstat) {
269 			if (in_bp->b_flags & B_READ) {
270 				KSTAT_IO_PTR(dsnglp->ds_kstat)->reads++;
271 				KSTAT_IO_PTR(dsnglp->ds_kstat)->nread +=
272 				    (in_bp->b_bcount - in_bp->b_resid);
273 			} else {
274 				KSTAT_IO_PTR(dsnglp->ds_kstat)->writes++;
275 				KSTAT_IO_PTR(dsnglp->ds_kstat)->nwritten +=
276 				    (in_bp->b_bcount - in_bp->b_resid);
277 			}
278 			kstat_runq_exit(KSTAT_IO_PTR(dsnglp->ds_kstat));
279 		}
280 	}
281 	for (;;) {
282 		if (!dsnglp->ds_bp)
283 			dsnglp->ds_bp = QUE_DEL(que_objp);
284 		if (!dsnglp->ds_bp ||
285 		    (TGCOM_PKT(tgcom_objp, dsnglp->ds_bp, dsngl_restart,
286 		    (caddr_t)dsnglp) != DDI_SUCCESS) ||
287 		    dsnglp->ds_outcnt) {
288 			mutex_exit(&dsnglp->ds_mutex);
289 			return (0);
290 		}
291 		dsnglp->ds_outcnt++;
292 		bp = dsnglp->ds_bp;
293 		dsnglp->ds_bp = QUE_DEL(que_objp);
294 		if (dsnglp->ds_kstat)
295 			kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
296 		mutex_exit(&dsnglp->ds_mutex);
297 
298 		TGCOM_TRANSPORT(tgcom_objp, bp);
299 
300 		if (!mutex_tryenter(&dsnglp->ds_mutex))
301 			return (0);
302 	}
303 }
304 
305 static int
306 dsngl_restart(struct fc_data *dsnglp)
307 {
308 	(void) dsngl_deque(dsnglp, NULL);
309 	return (-1);
310 }
311 
312 
313 /*
314  *	Multiple Commands per Device
315  */
316 /*
317  * Local Function Prototypes
318  */
319 static int dmult_restart();
320 
321 static int dmult_enque(opaque_t, struct buf *);
322 static int dmult_deque(opaque_t, struct buf *);
323 
324 struct 	flc_objops dmult_ops = {
325 	fc_init,
326 	fc_free,
327 	dmult_enque,
328 	dmult_deque,
329 	fc_start_kstat,
330 	fc_stop_kstat,
331 	0, 0
332 };
333 
334 struct flc_obj *
335 dmult_create()
336 {
337 	return (fc_create((struct flc_objops *)&dmult_ops));
338 
339 }
340 
341 
342 /*
343  * Some of the object management functions QUE_ADD() and QUE_DEL()
344  * do not accquire lock.
345  * They depend on dmult_enque(), dmult_deque() to do all locking.
346  * If this changes we have to grab locks in qmerge_add() and qmerge_del().
347  */
348 static int
349 dmult_enque(opaque_t queuep, struct buf *in_bp)
350 {
351 	struct fc_data *dmultp = (struct fc_data *)queuep;
352 	opaque_t tgcom_objp;
353 	opaque_t que_objp;
354 
355 	que_objp   = dmultp->ds_queobjp;
356 	tgcom_objp = dmultp->ds_tgcomobjp;
357 
358 	if (!in_bp)
359 		return (0);
360 	mutex_enter(&dmultp->ds_mutex);
361 	if ((dmultp->ds_outcnt >= dmultp->ds_waitcnt) || dmultp->ds_bp) {
362 		QUE_ADD(que_objp, in_bp);
363 		if (dmultp->ds_kstat) {
364 			kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
365 		}
366 		mutex_exit(&dmultp->ds_mutex);
367 		return (0);
368 	}
369 	if (dmultp->ds_kstat) {
370 		kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
371 	}
372 
373 	if (TGCOM_PKT(tgcom_objp, in_bp, dmult_restart,
374 		(caddr_t)dmultp) != DDI_SUCCESS) {
375 
376 		dmultp->ds_bp = in_bp;
377 		mutex_exit(&dmultp->ds_mutex);
378 		return (0);
379 	}
380 	dmultp->ds_outcnt++;
381 	if (dmultp->ds_kstat)
382 		kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
383 	mutex_exit(&dmultp->ds_mutex);
384 
385 	TGCOM_TRANSPORT(tgcom_objp, in_bp);
386 	return (0);
387 }
388 
389 static int
390 dmult_deque(opaque_t queuep, struct buf *in_bp)
391 {
392 	struct fc_data *dmultp = (struct fc_data *)queuep;
393 	opaque_t tgcom_objp;
394 	opaque_t que_objp;
395 	struct	 buf *bp;
396 
397 	que_objp = dmultp->ds_queobjp;
398 	tgcom_objp = dmultp->ds_tgcomobjp;
399 
400 	mutex_enter(&dmultp->ds_mutex);
401 	if (in_bp) {
402 		dmultp->ds_outcnt--;
403 		if (dmultp->ds_kstat) {
404 			if (in_bp->b_flags & B_READ) {
405 				KSTAT_IO_PTR(dmultp->ds_kstat)->reads++;
406 				KSTAT_IO_PTR(dmultp->ds_kstat)->nread +=
407 				    (in_bp->b_bcount - in_bp->b_resid);
408 			} else {
409 				KSTAT_IO_PTR(dmultp->ds_kstat)->writes++;
410 				KSTAT_IO_PTR(dmultp->ds_kstat)->nwritten +=
411 				    (in_bp->b_bcount - in_bp->b_resid);
412 			}
413 			kstat_runq_exit(KSTAT_IO_PTR(dmultp->ds_kstat));
414 		}
415 	}
416 
417 	for (;;) {
418 
419 #ifdef	FLC_DEBUG
420 		if ((curthread->t_intr) && (!dmultp->ds_bp) &&
421 		    (!dmultp->ds_outcnt))
422 			flc_malloc_intr++;
423 #endif
424 
425 		if (!dmultp->ds_bp)
426 			dmultp->ds_bp = QUE_DEL(que_objp);
427 		if (!dmultp->ds_bp ||
428 		    (TGCOM_PKT(tgcom_objp, dmultp->ds_bp, dmult_restart,
429 		    (caddr_t)dmultp) != DDI_SUCCESS) ||
430 		    (dmultp->ds_outcnt >= dmultp->ds_waitcnt)) {
431 			mutex_exit(&dmultp->ds_mutex);
432 			return (0);
433 		}
434 		dmultp->ds_outcnt++;
435 		bp = dmultp->ds_bp;
436 		dmultp->ds_bp = QUE_DEL(que_objp);
437 
438 		if (dmultp->ds_kstat)
439 			kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
440 
441 		mutex_exit(&dmultp->ds_mutex);
442 
443 		TGCOM_TRANSPORT(tgcom_objp, bp);
444 
445 		if (!mutex_tryenter(&dmultp->ds_mutex))
446 			return (0);
447 	}
448 }
449 
450 static int
451 dmult_restart(struct fc_data *dmultp)
452 {
453 	(void) dmult_deque(dmultp, NULL);
454 	return (-1);
455 }
456 
457 /*
458  *	Duplexed Commands per Device: Read Queue and Write Queue
459  */
460 /*
461  * Local Function Prototypes
462  */
463 static int duplx_restart();
464 
465 static int duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
466     void *lkarg);
467 static int duplx_free(struct flc_obj *flcobjp);
468 static int duplx_enque(opaque_t queuep, struct buf *bp);
469 static int duplx_deque(opaque_t queuep, struct buf *bp);
470 
471 struct 	flc_objops duplx_ops = {
472 	duplx_init,
473 	duplx_free,
474 	duplx_enque,
475 	duplx_deque,
476 	fc_start_kstat,
477 	fc_stop_kstat,
478 	0, 0
479 };
480 
481 struct flc_obj *
482 duplx_create()
483 {
484 	struct	flc_obj *flcobjp;
485 	struct	duplx_data *fcdp;
486 
487 	flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
488 	if (!flcobjp)
489 		return (NULL);
490 
491 	fcdp = (struct duplx_data *)(flcobjp+1);
492 	flcobjp->flc_data = (opaque_t)fcdp;
493 	flcobjp->flc_ops  = &duplx_ops;
494 
495 	fcdp->ds_writeq.fc_qobjp = qfifo_create();
496 	if (!(fcdp->ds_writeq.fc_qobjp = qfifo_create())) {
497 		kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
498 		return (NULL);
499 	}
500 	return (flcobjp);
501 }
502 
503 static int
504 duplx_free(struct flc_obj *flcobjp)
505 {
506 	struct duplx_data *fcdp;
507 
508 	fcdp = (struct duplx_data *)flcobjp->flc_data;
509 	if (fcdp->ds_writeq.fc_qobjp) {
510 		QUE_FREE(fcdp->ds_writeq.fc_qobjp);
511 	}
512 	if (fcdp->ds_readq.fc_qobjp)
513 		QUE_FREE(fcdp->ds_readq.fc_qobjp);
514 	if (fcdp->ds_tgcomobjp) {
515 		TGCOM_FREE(fcdp->ds_tgcomobjp);
516 		mutex_destroy(&fcdp->ds_mutex);
517 	}
518 	kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
519 	return (0);
520 }
521 
522 static int
523 duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
524 {
525 	struct duplx_data *fcdp = (struct duplx_data *)queuep;
526 	fcdp->ds_tgcomobjp = tgcom_objp;
527 	fcdp->ds_readq.fc_qobjp = que_objp;
528 
529 	QUE_INIT(que_objp, lkarg);
530 	QUE_INIT(fcdp->ds_writeq.fc_qobjp, lkarg);
531 	TGCOM_INIT(tgcom_objp);
532 
533 	mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
534 
535 	fcdp->ds_writeq.fc_maxcnt = DUPLX_MAXCNT;
536 	fcdp->ds_readq.fc_maxcnt  = DUPLX_MAXCNT;
537 
538 	/* queues point to each other for round robin */
539 	fcdp->ds_readq.next = &fcdp->ds_writeq;
540 	fcdp->ds_writeq.next = &fcdp->ds_readq;
541 
542 	return (DDI_SUCCESS);
543 }
544 
545 static int
546 duplx_enque(opaque_t queuep, struct buf *in_bp)
547 {
548 	struct duplx_data *duplxp = (struct duplx_data *)queuep;
549 	opaque_t tgcom_objp;
550 	struct fc_que *activeq;
551 	struct buf *bp;
552 
553 	mutex_enter(&duplxp->ds_mutex);
554 	if (in_bp) {
555 		if (duplxp->ds_kstat) {
556 			kstat_waitq_enter(KSTAT_IO_PTR(duplxp->ds_kstat));
557 		}
558 		if (in_bp->b_flags & B_READ)
559 			activeq = &duplxp->ds_readq;
560 		else
561 			activeq = &duplxp->ds_writeq;
562 
563 		QUE_ADD(activeq->fc_qobjp, in_bp);
564 	} else {
565 		activeq = &duplxp->ds_readq;
566 	}
567 
568 	tgcom_objp = duplxp->ds_tgcomobjp;
569 
570 	for (;;) {
571 		if (!activeq->fc_bp)
572 			activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
573 		if (!activeq->fc_bp ||
574 		    (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
575 		    (caddr_t)duplxp) != DDI_SUCCESS) ||
576 		    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
577 
578 			/* switch read/write queues */
579 			activeq = activeq->next;
580 			if (!activeq->fc_bp)
581 				activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
582 			if (!activeq->fc_bp ||
583 			    (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
584 			    duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
585 			    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
586 				mutex_exit(&duplxp->ds_mutex);
587 				return (0);
588 			}
589 		}
590 
591 		activeq->fc_outcnt++;
592 		bp = activeq->fc_bp;
593 		activeq->fc_bp = NULL;
594 
595 		if (duplxp->ds_kstat)
596 			kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
597 		mutex_exit(&duplxp->ds_mutex);
598 
599 		TGCOM_TRANSPORT(tgcom_objp, bp);
600 
601 		if (!mutex_tryenter(&duplxp->ds_mutex))
602 			return (0);
603 
604 		activeq = activeq->next;
605 	}
606 }
607 
608 static int
609 duplx_deque(opaque_t queuep, struct buf *in_bp)
610 {
611 	struct duplx_data *duplxp = (struct duplx_data *)queuep;
612 	opaque_t tgcom_objp;
613 	struct fc_que *activeq;
614 	struct buf *bp;
615 
616 	mutex_enter(&duplxp->ds_mutex);
617 
618 	tgcom_objp = duplxp->ds_tgcomobjp;
619 
620 	if (in_bp->b_flags & B_READ)
621 		activeq = &duplxp->ds_readq;
622 	else
623 		activeq = &duplxp->ds_writeq;
624 	activeq->fc_outcnt--;
625 
626 	if (duplxp->ds_kstat) {
627 		if (in_bp->b_flags & B_READ) {
628 			KSTAT_IO_PTR(duplxp->ds_kstat)->reads++;
629 			KSTAT_IO_PTR(duplxp->ds_kstat)->nread +=
630 			    (in_bp->b_bcount - in_bp->b_resid);
631 		} else {
632 			KSTAT_IO_PTR(duplxp->ds_kstat)->writes++;
633 			KSTAT_IO_PTR(duplxp->ds_kstat)->nwritten +=
634 			    (in_bp->b_bcount - in_bp->b_resid);
635 		}
636 		kstat_runq_exit(KSTAT_IO_PTR(duplxp->ds_kstat));
637 	}
638 
639 	for (;;) {
640 
641 		/* if needed, try to pull request off a queue */
642 		if (!activeq->fc_bp)
643 			activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
644 
645 		if (!activeq->fc_bp ||
646 		    (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
647 		    (caddr_t)duplxp) != DDI_SUCCESS) ||
648 		    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
649 
650 			activeq = activeq->next;
651 			if (!activeq->fc_bp)
652 				activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
653 
654 			if (!activeq->fc_bp ||
655 			    (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
656 			    duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
657 			    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
658 				mutex_exit(&duplxp->ds_mutex);
659 				return (0);
660 			}
661 		}
662 
663 		activeq->fc_outcnt++;
664 		bp = activeq->fc_bp;
665 		activeq->fc_bp = NULL;
666 
667 		if (duplxp->ds_kstat)
668 			kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
669 
670 		mutex_exit(&duplxp->ds_mutex);
671 
672 		TGCOM_TRANSPORT(tgcom_objp, bp);
673 
674 		if (!mutex_tryenter(&duplxp->ds_mutex))
675 			return (0);
676 
677 		activeq = activeq->next;
678 	}
679 }
680 
681 static int
682 duplx_restart(struct duplx_data *duplxp)
683 {
684 	(void) duplx_enque(duplxp, NULL);
685 	return (-1);
686 }
687 
688 /*
689  *	Tagged queueing flow control
690  */
691 /*
692  * Local Function Prototypes
693  */
694 
695 struct 	flc_objops adapt_ops = {
696 	fc_init,
697 	fc_free,
698 	dmult_enque,
699 	dmult_deque,
700 	fc_start_kstat,
701 	fc_stop_kstat,
702 	0, 0
703 };
704 
705 struct flc_obj *
706 adapt_create()
707 {
708 	return (fc_create((struct flc_objops *)&adapt_ops));
709 
710 }
711 
712 /*
713  *	Common Queue functions
714  */
715 
716 /*
717  * 	Local static data
718  */
719 #ifdef	Q_DEBUG
720 #define	DENT	0x0001
721 #define	DERR	0x0002
722 #define	DIO	0x0004
723 static	int	que_debug = DENT|DERR|DIO;
724 
725 #endif	/* Q_DEBUG */
726 /*
727  * 	Local Function Prototypes
728  */
729 static struct que_obj *que_create(struct que_objops *qopsp);
730 static int que_init(struct que_data *qfp, void *lkarg);
731 static int que_free(struct que_obj *queobjp);
732 static struct buf *que_del(struct que_data *qfp);
733 
734 static struct que_obj *
735 que_create(struct que_objops *qopsp)
736 {
737 	struct	que_data *qfp;
738 	struct	que_obj *queobjp;
739 
740 	queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
741 	if (!queobjp)
742 		return (NULL);
743 
744 	queobjp->que_ops = qopsp;
745 	qfp = (struct que_data *)(queobjp+1);
746 	queobjp->que_data = (opaque_t)qfp;
747 
748 	return ((opaque_t)queobjp);
749 }
750 
751 static int
752 que_init(struct que_data *qfp, void *lkarg)
753 {
754 	mutex_init(&qfp->q_mutex, NULL, MUTEX_DRIVER, lkarg);
755 	return (DDI_SUCCESS);
756 }
757 
758 static int
759 que_free(struct que_obj *queobjp)
760 {
761 	struct	que_data *qfp;
762 
763 	qfp = (struct que_data *)queobjp->que_data;
764 	mutex_destroy(&qfp->q_mutex);
765 	kmem_free(queobjp, (sizeof (*queobjp) + sizeof (struct que_data)));
766 	return (0);
767 }
768 
769 static struct buf *
770 que_del(struct que_data *qfp)
771 {
772 	struct buf *bp;
773 
774 	bp = qfp->q_tab.b_actf;
775 	if (bp) {
776 		qfp->q_tab.b_actf = bp->av_forw;
777 		if (!qfp->q_tab.b_actf)
778 			qfp->q_tab.b_actl = NULL;
779 		bp->av_forw = 0;
780 	}
781 	return (bp);
782 }
783 
784 
785 
786 /*
787  *	Qmerge
788  * 	Local Function Prototypes
789  */
790 static int qmerge_add(), qmerge_free();
791 static struct buf *qmerge_del(struct que_data *qfp);
792 
793 struct 	que_objops qmerge_ops = {
794 	que_init,
795 	qmerge_free,
796 	qmerge_add,
797 	qmerge_del,
798 	0, 0
799 };
800 
801 /* fields in diskhd */
802 #define	hd_cnt			b_back
803 #define	hd_private		b_forw
804 #define	hd_flags		b_flags
805 #define	hd_sync_next		av_forw
806 #define	hd_async_next		av_back
807 
808 #define	hd_sync2async		sync_async_ratio
809 
810 #define	QNEAR_FORWARD		0x01
811 #define	QNEAR_BACKWARD		0x02
812 #define	QNEAR_ASYNCONLY		0x04
813 #define	QNEAR_ASYNCALSO		0x08
814 
815 #define	DBLK(bp) ((unsigned long)(bp)->b_private)
816 
817 #define	BP_LT_BP(a, b) (DBLK(a) < DBLK(b))
818 #define	BP_GT_BP(a, b) (DBLK(a) > DBLK(b))
819 #define	BP_LT_HD(a, b) (DBLK(a) < (unsigned long)((b)->hd_private))
820 #define	BP_GT_HD(a, b) (DBLK(a) > (unsigned long)((b)->hd_private))
821 #define	QNEAR_ASYNC	(QNEAR_ASYNCONLY|QNEAR_ASYNCALSO)
822 
823 #define	SYNC2ASYNC(a) ((a)->q_tab.hd_cnt)
824 
825 
826 /*
827  * qmerge implements a two priority queue, the low priority queue holding ASYNC
828  * write requests, while the rest are queued in the high priority sync queue.
829  * Requests on the async queue would be merged if possible.
830  * By default qmerge2wayscan is 1, indicating an elevator algorithm. When
831  * this variable is set to zero, it has the following side effects.
832  * 1. We assume fairness is the number one issue.
833  * 2. The next request to be picked indicates current head position.
834  *
835  * qmerge_sync2async indicates the ratio of scans of high prioriy
836  * sync queue to low priority async queue.
837  *
838  * When qmerge variables have the following values it defaults to qsort
839  *
840  * qmerge1pri = 1, qmerge2wayscan = 0, qmerge_max_merge = 0
841  *
842  */
843 static int	qmerge_max_merge = 128 * 1024;
844 static intptr_t	qmerge_sync2async = 4;
845 static int	qmerge2wayscan = 1;
846 static int	qmerge1pri = 0;
847 static int	qmerge_merge = 0;
848 
849 /*
850  * 	Local static data
851  */
852 struct que_obj *
853 qmerge_create()
854 {
855 	struct que_data *qfp;
856 	struct que_obj *queobjp;
857 
858 	queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
859 	if (!queobjp)
860 		return (NULL);
861 
862 	queobjp->que_ops = &qmerge_ops;
863 	qfp = (struct que_data *)(queobjp+1);
864 	qfp->q_tab.hd_private = qfp->q_tab.hd_private = 0;
865 	qfp->q_tab.hd_sync_next = qfp->q_tab.hd_async_next = NULL;
866 	qfp->q_tab.hd_cnt = (void *)qmerge_sync2async;
867 	queobjp->que_data = (opaque_t)qfp;
868 
869 	return ((opaque_t)queobjp);
870 }
871 
872 static int
873 qmerge_free(struct que_obj *queobjp)
874 {
875 	struct	que_data *qfp;
876 
877 	qfp = (struct que_data *)queobjp->que_data;
878 	mutex_destroy(&qfp->q_mutex);
879 	kmem_free(queobjp, (sizeof (*queobjp) + sizeof (*qfp)));
880 	return (0);
881 }
882 
883 static int
884 qmerge_can_merge(bp1, bp2)
885 struct	buf *bp1, *bp2;
886 {
887 	const int paw_flags = B_PAGEIO | B_ASYNC | B_WRITE;
888 
889 	if ((bp1->b_un.b_addr != 0) || (bp2->b_un.b_addr != 0) ||
890 	    ((bp1->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
891 	    ((bp2->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
892 	    (bp1->b_bcount & PAGEOFFSET) || (bp2->b_bcount & PAGEOFFSET) ||
893 	    (bp1->b_bcount + bp2->b_bcount > qmerge_max_merge))
894 		return (0);
895 
896 	if ((DBLK(bp2) + bp2->b_bcount / DEV_BSIZE == DBLK(bp1)) ||
897 	    (DBLK(bp1) + bp1->b_bcount / DEV_BSIZE == DBLK(bp2)))
898 		return (1);
899 	else
900 		return (0);
901 }
902 
903 static void
904 qmerge_mergesetup(bp_merge, bp)
905 struct	buf *bp_merge, *bp;
906 {
907 	struct	buf *bp1;
908 	struct	page *pp, *pp_merge, *pp_merge_prev;
909 	int	forward;
910 
911 	qmerge_merge++;
912 	forward = DBLK(bp_merge) < DBLK(bp);
913 
914 	bp_merge->b_bcount += bp->b_bcount;
915 
916 	pp = bp->b_pages;
917 	pp_merge = bp_merge->b_pages;
918 
919 	pp_merge_prev = pp_merge->p_prev;
920 
921 	pp_merge->p_prev->p_next = pp;
922 	pp_merge->p_prev = pp->p_prev;
923 	pp->p_prev->p_next = pp_merge;
924 	pp->p_prev = pp_merge_prev;
925 
926 	bp1 = bp_merge->b_forw;
927 
928 	bp1->av_back->av_forw = bp;
929 	bp->av_back = bp1->av_back;
930 	bp1->av_back = bp;
931 	bp->av_forw = bp1;
932 
933 	if (!forward) {
934 		bp_merge->b_forw = bp;
935 		bp_merge->b_pages = pp;
936 		bp_merge->b_private = bp->b_private;
937 	}
938 }
939 
940 static void
941 que_insert(struct que_data *qfp, struct buf *bp)
942 {
943 	struct buf	*bp1, *bp_start, *lowest_bp, *highest_bp;
944 	uintptr_t	highest_blk, lowest_blk;
945 	struct buf	**async_bpp, **sync_bpp, **bpp;
946 	struct diskhd	*dp = &qfp->q_tab;
947 
948 	sync_bpp = &dp->hd_sync_next;
949 	async_bpp = &dp->hd_async_next;
950 	/*
951 	 * The ioctl used by the format utility requires that bp->av_back be
952 	 * preserved.
953 	 */
954 	if (bp->av_back)
955 		bp->b_error = (intptr_t)bp->av_back;
956 	if (!qmerge1pri &&
957 	    ((bp->b_flags & (B_ASYNC|B_READ|B_FREE)) == B_ASYNC)) {
958 		bpp = &dp->hd_async_next;
959 	} else {
960 		bpp = &dp->hd_sync_next;
961 	}
962 
963 
964 	if ((bp1 = *bpp) == NULL) {
965 		*bpp = bp;
966 		bp->av_forw = bp->av_back = bp;
967 		if ((bpp == async_bpp) && (*sync_bpp == NULL)) {
968 			dp->hd_flags |= QNEAR_ASYNCONLY;
969 		} else if (bpp == sync_bpp) {
970 			dp->hd_flags &= ~QNEAR_ASYNCONLY;
971 			if (*async_bpp) {
972 				dp->hd_flags |= QNEAR_ASYNCALSO;
973 			}
974 		}
975 		return;
976 	}
977 	bp_start = bp1;
978 	if (DBLK(bp) < DBLK(bp1)) {
979 		lowest_blk = DBLK(bp1);
980 		lowest_bp = bp1;
981 		do {
982 			if (DBLK(bp) > DBLK(bp1)) {
983 				bp->av_forw = bp1->av_forw;
984 				bp1->av_forw->av_back = bp;
985 				bp1->av_forw = bp;
986 				bp->av_back = bp1;
987 
988 				if (((bpp == async_bpp) &&
989 				    (dp->hd_flags & QNEAR_ASYNC)) ||
990 				    (bpp == sync_bpp)) {
991 					if (!(dp->hd_flags & QNEAR_BACKWARD) &&
992 					    BP_GT_HD(bp, dp)) {
993 						*bpp = bp;
994 					}
995 				}
996 				return;
997 			} else if (DBLK(bp1) < lowest_blk) {
998 				lowest_bp = bp1;
999 				lowest_blk = DBLK(bp1);
1000 			}
1001 		} while ((DBLK(bp1->av_back) < DBLK(bp1)) &&
1002 		    ((bp1 = bp1->av_back) != bp_start));
1003 		bp->av_forw = lowest_bp;
1004 		lowest_bp->av_back->av_forw = bp;
1005 		bp->av_back = lowest_bp->av_back;
1006 		lowest_bp->av_back = bp;
1007 		if ((bpp == async_bpp) && !(dp->hd_flags & QNEAR_ASYNC)) {
1008 			*bpp = bp;
1009 		} else if (!(dp->hd_flags & QNEAR_BACKWARD) &&
1010 		    BP_GT_HD(bp, dp)) {
1011 			*bpp = bp;
1012 		}
1013 	} else {
1014 		highest_blk = DBLK(bp1);
1015 		highest_bp = bp1;
1016 		do {
1017 			if (DBLK(bp) < DBLK(bp1)) {
1018 				bp->av_forw = bp1;
1019 				bp1->av_back->av_forw = bp;
1020 				bp->av_back = bp1->av_back;
1021 				bp1->av_back = bp;
1022 				if (((bpp == async_bpp) &&
1023 				    (dp->hd_flags & QNEAR_ASYNC)) ||
1024 				    (bpp == sync_bpp)) {
1025 					if ((dp->hd_flags & QNEAR_BACKWARD) &&
1026 					    BP_LT_HD(bp, dp)) {
1027 						*bpp = bp;
1028 					}
1029 				}
1030 				return;
1031 			} else if (DBLK(bp1) > highest_blk) {
1032 				highest_bp = bp1;
1033 				highest_blk = DBLK(bp1);
1034 			}
1035 		} while ((DBLK(bp1->av_forw) > DBLK(bp1)) &&
1036 		    ((bp1 = bp1->av_forw) != bp_start));
1037 		bp->av_back = highest_bp;
1038 		highest_bp->av_forw->av_back = bp;
1039 		bp->av_forw = highest_bp->av_forw;
1040 		highest_bp->av_forw = bp;
1041 
1042 		if (((bpp == sync_bpp) ||
1043 		    ((bpp == async_bpp) && (dp->hd_flags & QNEAR_ASYNC))) &&
1044 		    (dp->hd_flags & QNEAR_BACKWARD) && (BP_LT_HD(bp, dp)))
1045 			*bpp = bp;
1046 	}
1047 }
1048 
1049 /*
1050  * dmult_enque() holds dmultp->ds_mutex lock, so we dont grab
1051  * lock here. If dmult_enque() changes we will have to visit
1052  * this function again
1053  */
1054 static int
1055 qmerge_add(struct que_data *qfp, struct buf *bp)
1056 {
1057 
1058 	que_insert(qfp, bp);
1059 	return (++qfp->q_cnt);
1060 }
1061 
1062 static int
1063 qmerge_iodone(struct buf *bp)
1064 {
1065 	struct buf *bp1;
1066 	struct	page *pp, *pp1, *tmp_pp;
1067 
1068 	if (bp->b_flags & B_REMAPPED)
1069 		bp_mapout(bp);
1070 
1071 	bp1 = bp->b_forw;
1072 	do {
1073 		bp->b_forw = bp1->av_forw;
1074 		bp1->av_forw->av_back = bp1->av_back;
1075 		bp1->av_back->av_forw = bp1->av_forw;
1076 		pp = (page_t *)bp1->b_pages;
1077 		pp1 = bp->b_forw->b_pages;
1078 
1079 		tmp_pp = pp->p_prev;
1080 		pp->p_prev = pp1->p_prev;
1081 		pp->p_prev->p_next = pp;
1082 
1083 		pp1->p_prev = tmp_pp;
1084 		pp1->p_prev->p_next = pp1;
1085 
1086 		if (bp->b_flags & B_ERROR) {
1087 			bp1->b_error = bp->b_error;
1088 			bp1->b_flags |= B_ERROR;
1089 		}
1090 
1091 		biodone(bp1);
1092 	} while ((bp1 = bp->b_forw) != bp->b_forw->av_forw);
1093 
1094 	biodone(bp1);
1095 	kmem_free(bp, sizeof (*bp));
1096 	return (0);
1097 }
1098 
1099 
1100 
1101 
1102 static struct buf *
1103 qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge, int *can_merge)
1104 {
1105 	intptr_t	private, cnt;
1106 	int		flags;
1107 	struct		buf *sync_bp, *async_bp, *bp;
1108 	struct		buf **sync_bpp, **async_bpp, **bpp;
1109 	struct		diskhd *dp = &qfp->q_tab;
1110 
1111 	if (qfp->q_cnt == 0) {
1112 		return (NULL);
1113 	}
1114 	flags = qfp->q_tab.hd_flags;
1115 	sync_bpp = &qfp->q_tab.hd_sync_next;
1116 	async_bpp = &qfp->q_tab.hd_async_next;
1117 
1118 begin_nextbp:
1119 	if (flags & QNEAR_ASYNCONLY) {
1120 		bp = *async_bpp;
1121 		private = DBLK(bp);
1122 		if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1123 			return (NULL);
1124 		} else if (bp->av_forw == bp) {
1125 			bp->av_forw = bp->av_back = NULL;
1126 			flags &= ~(QNEAR_ASYNCONLY | QNEAR_BACKWARD);
1127 			private = 0;
1128 		} else if (flags & QNEAR_BACKWARD) {
1129 			if (DBLK(bp) < DBLK(bp->av_back)) {
1130 				flags &= ~QNEAR_BACKWARD;
1131 				private = 0;
1132 			}
1133 		} else if (DBLK(bp) > DBLK(bp->av_forw)) {
1134 			if (qmerge2wayscan) {
1135 				flags |= QNEAR_BACKWARD;
1136 			} else {
1137 				private = 0;
1138 			}
1139 		} else if (qmerge2wayscan == 0) {
1140 			private = DBLK(bp->av_forw);
1141 		}
1142 		bpp = async_bpp;
1143 
1144 	} else if (flags & QNEAR_ASYNCALSO) {
1145 		sync_bp = *sync_bpp;
1146 		async_bp = *async_bpp;
1147 		if (flags & QNEAR_BACKWARD) {
1148 			if (BP_GT_HD(sync_bp, dp) && BP_GT_HD(async_bp, dp)) {
1149 				flags &= ~(QNEAR_BACKWARD|QNEAR_ASYNCALSO);
1150 				*sync_bpp = sync_bp->av_forw;
1151 				*async_bpp = async_bp->av_forw;
1152 				SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1153 				qfp->q_tab.hd_private = 0;
1154 				goto begin_nextbp;
1155 			}
1156 			if (BP_LT_HD(async_bp, dp) && BP_LT_HD(sync_bp, dp)) {
1157 				if (BP_GT_BP(async_bp, sync_bp)) {
1158 					bpp = async_bpp;
1159 					bp = *async_bpp;
1160 				} else {
1161 					bpp = sync_bpp;
1162 					bp = *sync_bpp;
1163 				}
1164 			} else if (BP_LT_HD(async_bp, dp)) {
1165 				bpp = async_bpp;
1166 				bp = *async_bpp;
1167 			} else {
1168 				bpp = sync_bpp;
1169 				bp = *sync_bpp;
1170 			}
1171 		} else {
1172 			if (BP_LT_HD(sync_bp, dp) && BP_LT_HD(async_bp, dp)) {
1173 				if (qmerge2wayscan) {
1174 					flags |= QNEAR_BACKWARD;
1175 					*sync_bpp = sync_bp->av_back;
1176 					*async_bpp = async_bp->av_back;
1177 					goto begin_nextbp;
1178 				} else {
1179 					flags &= ~QNEAR_ASYNCALSO;
1180 					SYNC2ASYNC(qfp) =
1181 						(void *)qmerge_sync2async;
1182 					qfp->q_tab.hd_private = 0;
1183 					goto begin_nextbp;
1184 				}
1185 			}
1186 			if (BP_GT_HD(async_bp, dp) && BP_GT_HD(sync_bp, dp)) {
1187 				if (BP_LT_BP(async_bp, sync_bp)) {
1188 					bpp = async_bpp;
1189 					bp = *async_bpp;
1190 				} else {
1191 					bpp = sync_bpp;
1192 					bp = *sync_bpp;
1193 				}
1194 			} else if (BP_GT_HD(async_bp, dp)) {
1195 				bpp = async_bpp;
1196 				bp = *async_bpp;
1197 			} else {
1198 				bpp = sync_bpp;
1199 				bp = *sync_bpp;
1200 			}
1201 		}
1202 		if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1203 			return (NULL);
1204 		} else if (bp->av_forw == bp) {
1205 			bp->av_forw = bp->av_back = NULL;
1206 			flags &= ~QNEAR_ASYNCALSO;
1207 			if (bpp == async_bpp) {
1208 				SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1209 			} else {
1210 				flags |= QNEAR_ASYNCONLY;
1211 			}
1212 		}
1213 		private = DBLK(bp);
1214 	} else {
1215 		bp = *sync_bpp;
1216 		private = DBLK(bp);
1217 		if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1218 			return (NULL);
1219 		} else if (bp->av_forw == bp) {
1220 			private = 0;
1221 			SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1222 			bp->av_forw = bp->av_back = NULL;
1223 			flags &= ~QNEAR_BACKWARD;
1224 			if (*async_bpp)
1225 				flags |= QNEAR_ASYNCONLY;
1226 		} else if (flags & QNEAR_BACKWARD) {
1227 			if (DBLK(bp) < DBLK(bp->av_back)) {
1228 				flags &= ~QNEAR_BACKWARD;
1229 				cnt = (intptr_t)SYNC2ASYNC(qfp);
1230 				if (cnt > 0) {
1231 					cnt--;
1232 					SYNC2ASYNC(qfp) = (void *)cnt;
1233 				} else {
1234 					if (*async_bpp)
1235 						flags |= QNEAR_ASYNCALSO;
1236 					SYNC2ASYNC(qfp) =
1237 						(void *)qmerge_sync2async;
1238 				}
1239 				private = 0;
1240 			}
1241 		} else if (DBLK(bp) > DBLK(bp->av_forw)) {
1242 			private = 0;
1243 			if (qmerge2wayscan) {
1244 				flags |= QNEAR_BACKWARD;
1245 				private = DBLK(bp);
1246 			} else {
1247 				cnt = (intptr_t)SYNC2ASYNC(qfp);
1248 				if (cnt > 0) {
1249 					cnt--;
1250 					SYNC2ASYNC(qfp) = (void *)cnt;
1251 				} else {
1252 					if (*async_bpp)
1253 						flags |= QNEAR_ASYNCALSO;
1254 					SYNC2ASYNC(qfp) =
1255 						(void *)qmerge_sync2async;
1256 				}
1257 			}
1258 		} else if (qmerge2wayscan == 0) {
1259 			private = DBLK(bp->av_forw);
1260 		}
1261 		bpp = sync_bpp;
1262 	}
1263 
1264 	if (bp->av_forw) {
1265 		*can_merge = !(bp->b_flags & B_READ);
1266 		if (flags & QNEAR_BACKWARD) {
1267 			*bpp = bp->av_back;
1268 			if ((DBLK(bp->av_back) +
1269 			    bp->av_back->b_bcount / DEV_BSIZE) != DBLK(bp))
1270 				*can_merge = 0;
1271 		} else {
1272 			*bpp = bp->av_forw;
1273 			if ((DBLK(bp) + bp->b_bcount / DEV_BSIZE) !=
1274 			    DBLK(bp->av_forw))
1275 				*can_merge = 0;
1276 		}
1277 		bp->av_forw->av_back = bp->av_back;
1278 		bp->av_back->av_forw = bp->av_forw;
1279 		bp->av_forw = bp->av_back = NULL;
1280 	} else {
1281 		*bpp = NULL;
1282 		*can_merge = 0;
1283 	}
1284 	qfp->q_tab.hd_private = (void *)private;
1285 	qfp->q_cnt--;
1286 	qfp->q_tab.hd_flags = flags;
1287 	if (bp->b_error) {
1288 		bp->av_back = (void *)(intptr_t)bp->b_error;
1289 		bp->b_error = 0;
1290 	}
1291 	return (bp);
1292 }
1293 
1294 static struct buf *
1295 qmerge_del(struct que_data *qfp)
1296 {
1297 	struct	buf *bp, *next_bp, *bp_merge;
1298 	int	alloc_mergebp, merge;
1299 
1300 	if (qfp->q_cnt == 0) {
1301 		return (NULL);
1302 	}
1303 
1304 	bp_merge = bp = qmerge_nextbp(qfp, NULL, &merge);
1305 	alloc_mergebp = 1;
1306 	while (merge && (next_bp = qmerge_nextbp(qfp, bp_merge, &merge))) {
1307 		if (alloc_mergebp) {
1308 			bp_merge = kmem_alloc(sizeof (*bp_merge), KM_NOSLEEP);
1309 			if (bp_merge == NULL) {
1310 				mutex_exit(&qfp->q_mutex);
1311 				return (bp);
1312 			}
1313 			bcopy(bp, bp_merge, sizeof (*bp_merge));
1314 			bp_merge->b_iodone = qmerge_iodone;
1315 			bp_merge->b_forw = bp;
1316 			bp_merge->b_back = (struct buf *)qfp;
1317 			bp->av_forw = bp->av_back = bp;
1318 			alloc_mergebp = 0;
1319 		}
1320 		qmerge_mergesetup(bp_merge, next_bp);
1321 	}
1322 	return (bp_merge);
1323 }
1324 
1325 
1326 /*
1327  *	FIFO Queue functions
1328  */
1329 /*
1330  * 	Local Function Prototypes
1331  */
1332 static int qfifo_add();
1333 
1334 struct 	que_objops qfifo_ops = {
1335 	que_init,
1336 	que_free,
1337 	qfifo_add,
1338 	que_del,
1339 	0, 0
1340 };
1341 
1342 /*
1343  * 	Local static data
1344  */
1345 struct que_obj *
1346 qfifo_create()
1347 {
1348 	return (que_create((struct que_objops *)&qfifo_ops));
1349 }
1350 
1351 static int
1352 qfifo_add(struct que_data *qfp, struct buf *bp)
1353 {
1354 
1355 	if (!qfp->q_tab.b_actf)
1356 		qfp->q_tab.b_actf = bp;
1357 	else
1358 		qfp->q_tab.b_actl->av_forw = bp;
1359 	qfp->q_tab.b_actl = bp;
1360 	bp->av_forw = NULL;
1361 	return (0);
1362 }
1363 
1364 /*
1365  *	One-Way-Scan Queue functions
1366  */
1367 /*
1368  * 	Local Function Prototypes
1369  */
1370 static int qsort_add();
1371 static struct buf *qsort_del();
1372 static void oneway_scan_binary(struct diskhd *dp, struct buf *bp);
1373 
1374 struct 	que_objops qsort_ops = {
1375 	que_init,
1376 	que_free,
1377 	qsort_add,
1378 	qsort_del,
1379 	0, 0
1380 };
1381 
1382 /*
1383  * 	Local static data
1384  */
1385 struct que_obj *
1386 qsort_create()
1387 {
1388 	return (que_create((struct que_objops *)&qsort_ops));
1389 }
1390 
1391 static int
1392 qsort_add(struct que_data *qfp, struct buf *bp)
1393 {
1394 	qfp->q_cnt++;
1395 	oneway_scan_binary(&qfp->q_tab, bp);
1396 	return (0);
1397 }
1398 
1399 
1400 #define	b_pasf	b_forw
1401 #define	b_pasl	b_back
1402 static void
1403 oneway_scan_binary(struct diskhd *dp, struct buf *bp)
1404 {
1405 	struct buf *ap;
1406 
1407 	ap = dp->b_actf;
1408 	if (ap == NULL) {
1409 		dp->b_actf = bp;
1410 		bp->av_forw = NULL;
1411 		return;
1412 	}
1413 	if (DBLK(bp) < DBLK(ap)) {
1414 		ap = dp->b_pasf;
1415 		if ((ap == NULL) || (DBLK(bp) < DBLK(ap))) {
1416 			dp->b_pasf = bp;
1417 			bp->av_forw = ap;
1418 			return;
1419 		}
1420 	}
1421 	while (ap->av_forw) {
1422 		if (DBLK(bp) < DBLK(ap->av_forw))
1423 			break;
1424 		ap = ap->av_forw;
1425 	}
1426 	bp->av_forw = ap->av_forw;
1427 	ap->av_forw = bp;
1428 }
1429 
1430 static struct buf *
1431 qsort_del(struct que_data *qfp)
1432 {
1433 	struct buf *bp;
1434 
1435 	if (qfp->q_cnt == 0) {
1436 		return (NULL);
1437 	}
1438 	qfp->q_cnt--;
1439 	bp = qfp->q_tab.b_actf;
1440 	qfp->q_tab.b_actf = bp->av_forw;
1441 	bp->av_forw = 0;
1442 	if (!qfp->q_tab.b_actf && qfp->q_tab.b_pasf) {
1443 		qfp->q_tab.b_actf = qfp->q_tab.b_pasf;
1444 		qfp->q_tab.b_pasf = NULL;
1445 	}
1446 	return (bp);
1447 }
1448 
1449 /*
1450  *	Tagged queueing
1451  */
1452 /*
1453  * 	Local Function Prototypes
1454  */
1455 
1456 struct 	que_objops qtag_ops = {
1457 	que_init,
1458 	que_free,
1459 	qsort_add,
1460 	qsort_del,
1461 	0, 0
1462 };
1463 
1464 /*
1465  * 	Local static data
1466  */
1467 struct que_obj *
1468 qtag_create()
1469 {
1470 	return (que_create((struct que_objops *)&qtag_ops));
1471 }
1472