xref: /titanic_50/usr/src/uts/intel/io/dktp/drvobj/strategy.c (revision 1b22764f59e3a183ca5db98b6bfd27fdf2b20e02)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *	Device Strategy
29  */
30 #include <sys/dktp/cm.h>
31 #include <sys/kstat.h>
32 
33 #include <sys/dktp/quetypes.h>
34 #include <sys/dktp/queue.h>
35 #include <sys/dktp/tgcom.h>
36 #include <sys/dktp/fctypes.h>
37 #include <sys/dktp/flowctrl.h>
38 #include <sys/param.h>
39 #include <vm/page.h>
40 #include <sys/modctl.h>
41 
42 /*
43  *	Object Management
44  */
45 
46 static struct buf *qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge,
47     int *can_merge);
48 
49 static struct modlmisc modlmisc = {
50 	&mod_miscops,	/* Type of module */
51 	"Device Strategy Objects"
52 };
53 
54 static struct modlinkage modlinkage = {
55 	MODREV_1,
56 	&modlmisc,
57 	NULL
58 };
59 
60 int
61 _init(void)
62 {
63 	return (mod_install(&modlinkage));
64 }
65 
66 int
67 _fini(void)
68 {
69 	return (mod_remove(&modlinkage));
70 }
71 
72 int
73 _info(struct modinfo *modinfop)
74 {
75 	return (mod_info(&modlinkage, modinfop));
76 }
77 
78 
79 /*
80  *	Common Flow Control functions
81  */
82 
83 /*
84  * Local static data
85  */
86 #ifdef	FLC_DEBUG
87 #define	DENT	0x0001
88 #define	DERR	0x0002
89 #define	DIO	0x0004
90 static	int	flc_debug = DENT|DERR|DIO;
91 
92 #include <sys/thread.h>
93 static 	int	flc_malloc_intr = 0;
94 #endif	/* FLC_DEBUG */
95 
96 static	int	flc_kstat = 1;
97 
98 static struct flc_obj *fc_create(struct flc_objops *fcopsp);
99 static int fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
100     void *lkarg);
101 static int fc_free(struct flc_obj *flcobjp);
102 static int fc_start_kstat(opaque_t queuep, char *devtype, int instance);
103 static int fc_stop_kstat(opaque_t queuep);
104 
105 static struct flc_obj *
106 fc_create(struct flc_objops *fcopsp)
107 {
108 	struct	flc_obj *flcobjp;
109 	struct	fc_data *fcdp;
110 
111 	flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
112 	if (!flcobjp)
113 		return (NULL);
114 
115 	fcdp = (struct fc_data *)(flcobjp+1);
116 	flcobjp->flc_data = (opaque_t)fcdp;
117 	flcobjp->flc_ops  = fcopsp;
118 
119 	return ((opaque_t)flcobjp);
120 }
121 
122 static int dmult_maxcnt = DMULT_MAXCNT;
123 
124 static int
125 fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
126 {
127 	struct fc_data *fcdp = (struct fc_data *)queuep;
128 
129 	mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
130 
131 	fcdp->ds_queobjp   = que_objp;
132 	fcdp->ds_tgcomobjp = tgcom_objp;
133 	fcdp->ds_waitcnt   = dmult_maxcnt;
134 
135 	QUE_INIT(que_objp, lkarg);
136 	TGCOM_INIT(tgcom_objp);
137 	return (DDI_SUCCESS);
138 }
139 
140 static int
141 fc_free(struct flc_obj *flcobjp)
142 {
143 	struct fc_data *fcdp;
144 
145 	fcdp = (struct fc_data *)flcobjp->flc_data;
146 	if (fcdp->ds_queobjp)
147 		QUE_FREE(fcdp->ds_queobjp);
148 	if (fcdp->ds_tgcomobjp) {
149 		TGCOM_FREE(fcdp->ds_tgcomobjp);
150 		mutex_destroy(&fcdp->ds_mutex);
151 	}
152 	kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
153 	return (0);
154 }
155 
156 /*ARGSUSED*/
157 static int
158 fc_start_kstat(opaque_t queuep, char *devtype, int instance)
159 {
160 	struct fc_data *fcdp = (struct fc_data *)queuep;
161 	if (!flc_kstat)
162 		return (0);
163 
164 	if (!fcdp->ds_kstat) {
165 		if (fcdp->ds_kstat = kstat_create("cmdk", instance, NULL,
166 		    "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT)) {
167 			kstat_install(fcdp->ds_kstat);
168 		}
169 	}
170 	return (0);
171 }
172 
173 static int
174 fc_stop_kstat(opaque_t queuep)
175 {
176 	struct fc_data *fcdp = (struct fc_data *)queuep;
177 
178 	if (fcdp->ds_kstat) {
179 		kstat_delete(fcdp->ds_kstat);
180 		fcdp->ds_kstat = NULL;
181 	}
182 	return (0);
183 }
184 
185 
186 /*
187  *	Single Command per Device
188  */
189 /*
190  * Local Function Prototypes
191  */
192 static int dsngl_restart();
193 
194 static int dsngl_enque(opaque_t, struct buf *);
195 static int dsngl_deque(opaque_t, struct buf *);
196 
197 struct 	flc_objops dsngl_ops = {
198 	fc_init,
199 	fc_free,
200 	dsngl_enque,
201 	dsngl_deque,
202 	fc_start_kstat,
203 	fc_stop_kstat,
204 	0, 0
205 };
206 
207 struct flc_obj *
208 dsngl_create()
209 {
210 	return (fc_create((struct flc_objops *)&dsngl_ops));
211 }
212 
213 static int
214 dsngl_enque(opaque_t queuep, struct buf *in_bp)
215 {
216 	struct fc_data *dsnglp = (struct fc_data *)queuep;
217 	opaque_t tgcom_objp;
218 	opaque_t que_objp;
219 
220 	que_objp   = dsnglp->ds_queobjp;
221 	tgcom_objp = dsnglp->ds_tgcomobjp;
222 
223 	if (!in_bp)
224 		return (0);
225 	mutex_enter(&dsnglp->ds_mutex);
226 	if (dsnglp->ds_bp || dsnglp->ds_outcnt) {
227 		QUE_ADD(que_objp, in_bp);
228 		if (dsnglp->ds_kstat) {
229 			kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
230 		}
231 		mutex_exit(&dsnglp->ds_mutex);
232 		return (0);
233 	}
234 	if (dsnglp->ds_kstat) {
235 		kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
236 	}
237 	if (TGCOM_PKT(tgcom_objp, in_bp, dsngl_restart,
238 		(caddr_t)dsnglp) != DDI_SUCCESS) {
239 
240 		dsnglp->ds_bp = in_bp;
241 		mutex_exit(&dsnglp->ds_mutex);
242 		return (0);
243 	}
244 	dsnglp->ds_outcnt++;
245 	if (dsnglp->ds_kstat)
246 		kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
247 	mutex_exit(&dsnglp->ds_mutex);
248 	TGCOM_TRANSPORT(tgcom_objp, in_bp);
249 	return (0);
250 }
251 
252 static int
253 dsngl_deque(opaque_t queuep, struct buf *in_bp)
254 {
255 	struct fc_data *dsnglp = (struct fc_data *)queuep;
256 	opaque_t tgcom_objp;
257 	opaque_t que_objp;
258 	struct	 buf *bp;
259 
260 	que_objp   = dsnglp->ds_queobjp;
261 	tgcom_objp = dsnglp->ds_tgcomobjp;
262 
263 	mutex_enter(&dsnglp->ds_mutex);
264 	if (in_bp) {
265 		dsnglp->ds_outcnt--;
266 		if (dsnglp->ds_kstat) {
267 			if (in_bp->b_flags & B_READ) {
268 				KSTAT_IO_PTR(dsnglp->ds_kstat)->reads++;
269 				KSTAT_IO_PTR(dsnglp->ds_kstat)->nread +=
270 				    (in_bp->b_bcount - in_bp->b_resid);
271 			} else {
272 				KSTAT_IO_PTR(dsnglp->ds_kstat)->writes++;
273 				KSTAT_IO_PTR(dsnglp->ds_kstat)->nwritten +=
274 				    (in_bp->b_bcount - in_bp->b_resid);
275 			}
276 			kstat_runq_exit(KSTAT_IO_PTR(dsnglp->ds_kstat));
277 		}
278 	}
279 	for (;;) {
280 		if (!dsnglp->ds_bp)
281 			dsnglp->ds_bp = QUE_DEL(que_objp);
282 		if (!dsnglp->ds_bp ||
283 		    (TGCOM_PKT(tgcom_objp, dsnglp->ds_bp, dsngl_restart,
284 		    (caddr_t)dsnglp) != DDI_SUCCESS) ||
285 		    dsnglp->ds_outcnt) {
286 			mutex_exit(&dsnglp->ds_mutex);
287 			return (0);
288 		}
289 		dsnglp->ds_outcnt++;
290 		bp = dsnglp->ds_bp;
291 		dsnglp->ds_bp = QUE_DEL(que_objp);
292 		if (dsnglp->ds_kstat)
293 			kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
294 		mutex_exit(&dsnglp->ds_mutex);
295 
296 		TGCOM_TRANSPORT(tgcom_objp, bp);
297 
298 		if (!mutex_tryenter(&dsnglp->ds_mutex))
299 			return (0);
300 	}
301 }
302 
303 static int
304 dsngl_restart(struct fc_data *dsnglp)
305 {
306 	(void) dsngl_deque(dsnglp, NULL);
307 	return (-1);
308 }
309 
310 
311 /*
312  *	Multiple Commands per Device
313  */
314 /*
315  * Local Function Prototypes
316  */
317 static int dmult_restart();
318 
319 static int dmult_enque(opaque_t, struct buf *);
320 static int dmult_deque(opaque_t, struct buf *);
321 
322 struct 	flc_objops dmult_ops = {
323 	fc_init,
324 	fc_free,
325 	dmult_enque,
326 	dmult_deque,
327 	fc_start_kstat,
328 	fc_stop_kstat,
329 	0, 0
330 };
331 
332 struct flc_obj *
333 dmult_create()
334 {
335 	return (fc_create((struct flc_objops *)&dmult_ops));
336 
337 }
338 
339 
340 /*
341  * Some of the object management functions QUE_ADD() and QUE_DEL()
342  * do not accquire lock.
343  * They depend on dmult_enque(), dmult_deque() to do all locking.
344  * If this changes we have to grab locks in qmerge_add() and qmerge_del().
345  */
346 static int
347 dmult_enque(opaque_t queuep, struct buf *in_bp)
348 {
349 	struct fc_data *dmultp = (struct fc_data *)queuep;
350 	opaque_t tgcom_objp;
351 	opaque_t que_objp;
352 
353 	que_objp   = dmultp->ds_queobjp;
354 	tgcom_objp = dmultp->ds_tgcomobjp;
355 
356 	if (!in_bp)
357 		return (0);
358 	mutex_enter(&dmultp->ds_mutex);
359 	if ((dmultp->ds_outcnt >= dmultp->ds_waitcnt) || dmultp->ds_bp) {
360 		QUE_ADD(que_objp, in_bp);
361 		if (dmultp->ds_kstat) {
362 			kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
363 		}
364 		mutex_exit(&dmultp->ds_mutex);
365 		return (0);
366 	}
367 	if (dmultp->ds_kstat) {
368 		kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
369 	}
370 
371 	if (TGCOM_PKT(tgcom_objp, in_bp, dmult_restart,
372 		(caddr_t)dmultp) != DDI_SUCCESS) {
373 
374 		dmultp->ds_bp = in_bp;
375 		mutex_exit(&dmultp->ds_mutex);
376 		return (0);
377 	}
378 	dmultp->ds_outcnt++;
379 	if (dmultp->ds_kstat)
380 		kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
381 	mutex_exit(&dmultp->ds_mutex);
382 
383 	TGCOM_TRANSPORT(tgcom_objp, in_bp);
384 	return (0);
385 }
386 
387 static int
388 dmult_deque(opaque_t queuep, struct buf *in_bp)
389 {
390 	struct fc_data *dmultp = (struct fc_data *)queuep;
391 	opaque_t tgcom_objp;
392 	opaque_t que_objp;
393 	struct	 buf *bp;
394 
395 	que_objp = dmultp->ds_queobjp;
396 	tgcom_objp = dmultp->ds_tgcomobjp;
397 
398 	mutex_enter(&dmultp->ds_mutex);
399 	if (in_bp) {
400 		dmultp->ds_outcnt--;
401 		if (dmultp->ds_kstat) {
402 			if (in_bp->b_flags & B_READ) {
403 				KSTAT_IO_PTR(dmultp->ds_kstat)->reads++;
404 				KSTAT_IO_PTR(dmultp->ds_kstat)->nread +=
405 				    (in_bp->b_bcount - in_bp->b_resid);
406 			} else {
407 				KSTAT_IO_PTR(dmultp->ds_kstat)->writes++;
408 				KSTAT_IO_PTR(dmultp->ds_kstat)->nwritten +=
409 				    (in_bp->b_bcount - in_bp->b_resid);
410 			}
411 			kstat_runq_exit(KSTAT_IO_PTR(dmultp->ds_kstat));
412 		}
413 	}
414 
415 	for (;;) {
416 
417 #ifdef	FLC_DEBUG
418 		if ((curthread->t_intr) && (!dmultp->ds_bp) &&
419 		    (!dmultp->ds_outcnt))
420 			flc_malloc_intr++;
421 #endif
422 
423 		if (!dmultp->ds_bp)
424 			dmultp->ds_bp = QUE_DEL(que_objp);
425 		if (!dmultp->ds_bp ||
426 		    (TGCOM_PKT(tgcom_objp, dmultp->ds_bp, dmult_restart,
427 		    (caddr_t)dmultp) != DDI_SUCCESS) ||
428 		    (dmultp->ds_outcnt >= dmultp->ds_waitcnt)) {
429 			mutex_exit(&dmultp->ds_mutex);
430 			return (0);
431 		}
432 		dmultp->ds_outcnt++;
433 		bp = dmultp->ds_bp;
434 		dmultp->ds_bp = QUE_DEL(que_objp);
435 
436 		if (dmultp->ds_kstat)
437 			kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
438 
439 		mutex_exit(&dmultp->ds_mutex);
440 
441 		TGCOM_TRANSPORT(tgcom_objp, bp);
442 
443 		if (!mutex_tryenter(&dmultp->ds_mutex))
444 			return (0);
445 	}
446 }
447 
448 static int
449 dmult_restart(struct fc_data *dmultp)
450 {
451 	(void) dmult_deque(dmultp, NULL);
452 	return (-1);
453 }
454 
455 /*
456  *	Duplexed Commands per Device: Read Queue and Write Queue
457  */
458 /*
459  * Local Function Prototypes
460  */
461 static int duplx_restart();
462 
463 static int duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
464     void *lkarg);
465 static int duplx_free(struct flc_obj *flcobjp);
466 static int duplx_enque(opaque_t queuep, struct buf *bp);
467 static int duplx_deque(opaque_t queuep, struct buf *bp);
468 
469 struct 	flc_objops duplx_ops = {
470 	duplx_init,
471 	duplx_free,
472 	duplx_enque,
473 	duplx_deque,
474 	fc_start_kstat,
475 	fc_stop_kstat,
476 	0, 0
477 };
478 
479 struct flc_obj *
480 duplx_create()
481 {
482 	struct	flc_obj *flcobjp;
483 	struct	duplx_data *fcdp;
484 
485 	flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
486 	if (!flcobjp)
487 		return (NULL);
488 
489 	fcdp = (struct duplx_data *)(flcobjp+1);
490 	flcobjp->flc_data = (opaque_t)fcdp;
491 	flcobjp->flc_ops  = &duplx_ops;
492 
493 	fcdp->ds_writeq.fc_qobjp = qfifo_create();
494 	if (!(fcdp->ds_writeq.fc_qobjp = qfifo_create())) {
495 		kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
496 		return (NULL);
497 	}
498 	return (flcobjp);
499 }
500 
501 static int
502 duplx_free(struct flc_obj *flcobjp)
503 {
504 	struct duplx_data *fcdp;
505 
506 	fcdp = (struct duplx_data *)flcobjp->flc_data;
507 	if (fcdp->ds_writeq.fc_qobjp) {
508 		QUE_FREE(fcdp->ds_writeq.fc_qobjp);
509 	}
510 	if (fcdp->ds_readq.fc_qobjp)
511 		QUE_FREE(fcdp->ds_readq.fc_qobjp);
512 	if (fcdp->ds_tgcomobjp) {
513 		TGCOM_FREE(fcdp->ds_tgcomobjp);
514 		mutex_destroy(&fcdp->ds_mutex);
515 	}
516 	kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
517 	return (0);
518 }
519 
520 static int
521 duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
522 {
523 	struct duplx_data *fcdp = (struct duplx_data *)queuep;
524 	fcdp->ds_tgcomobjp = tgcom_objp;
525 	fcdp->ds_readq.fc_qobjp = que_objp;
526 
527 	QUE_INIT(que_objp, lkarg);
528 	QUE_INIT(fcdp->ds_writeq.fc_qobjp, lkarg);
529 	TGCOM_INIT(tgcom_objp);
530 
531 	mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
532 
533 	fcdp->ds_writeq.fc_maxcnt = DUPLX_MAXCNT;
534 	fcdp->ds_readq.fc_maxcnt  = DUPLX_MAXCNT;
535 
536 	/* queues point to each other for round robin */
537 	fcdp->ds_readq.next = &fcdp->ds_writeq;
538 	fcdp->ds_writeq.next = &fcdp->ds_readq;
539 
540 	return (DDI_SUCCESS);
541 }
542 
543 static int
544 duplx_enque(opaque_t queuep, struct buf *in_bp)
545 {
546 	struct duplx_data *duplxp = (struct duplx_data *)queuep;
547 	opaque_t tgcom_objp;
548 	struct fc_que *activeq;
549 	struct buf *bp;
550 
551 	mutex_enter(&duplxp->ds_mutex);
552 	if (in_bp) {
553 		if (duplxp->ds_kstat) {
554 			kstat_waitq_enter(KSTAT_IO_PTR(duplxp->ds_kstat));
555 		}
556 		if (in_bp->b_flags & B_READ)
557 			activeq = &duplxp->ds_readq;
558 		else
559 			activeq = &duplxp->ds_writeq;
560 
561 		QUE_ADD(activeq->fc_qobjp, in_bp);
562 	} else {
563 		activeq = &duplxp->ds_readq;
564 	}
565 
566 	tgcom_objp = duplxp->ds_tgcomobjp;
567 
568 	for (;;) {
569 		if (!activeq->fc_bp)
570 			activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
571 		if (!activeq->fc_bp ||
572 		    (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
573 		    (caddr_t)duplxp) != DDI_SUCCESS) ||
574 		    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
575 
576 			/* switch read/write queues */
577 			activeq = activeq->next;
578 			if (!activeq->fc_bp)
579 				activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
580 			if (!activeq->fc_bp ||
581 			    (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
582 			    duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
583 			    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
584 				mutex_exit(&duplxp->ds_mutex);
585 				return (0);
586 			}
587 		}
588 
589 		activeq->fc_outcnt++;
590 		bp = activeq->fc_bp;
591 		activeq->fc_bp = NULL;
592 
593 		if (duplxp->ds_kstat)
594 			kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
595 		mutex_exit(&duplxp->ds_mutex);
596 
597 		TGCOM_TRANSPORT(tgcom_objp, bp);
598 
599 		if (!mutex_tryenter(&duplxp->ds_mutex))
600 			return (0);
601 
602 		activeq = activeq->next;
603 	}
604 }
605 
606 static int
607 duplx_deque(opaque_t queuep, struct buf *in_bp)
608 {
609 	struct duplx_data *duplxp = (struct duplx_data *)queuep;
610 	opaque_t tgcom_objp;
611 	struct fc_que *activeq;
612 	struct buf *bp;
613 
614 	mutex_enter(&duplxp->ds_mutex);
615 
616 	tgcom_objp = duplxp->ds_tgcomobjp;
617 
618 	if (in_bp->b_flags & B_READ)
619 		activeq = &duplxp->ds_readq;
620 	else
621 		activeq = &duplxp->ds_writeq;
622 	activeq->fc_outcnt--;
623 
624 	if (duplxp->ds_kstat) {
625 		if (in_bp->b_flags & B_READ) {
626 			KSTAT_IO_PTR(duplxp->ds_kstat)->reads++;
627 			KSTAT_IO_PTR(duplxp->ds_kstat)->nread +=
628 			    (in_bp->b_bcount - in_bp->b_resid);
629 		} else {
630 			KSTAT_IO_PTR(duplxp->ds_kstat)->writes++;
631 			KSTAT_IO_PTR(duplxp->ds_kstat)->nwritten +=
632 			    (in_bp->b_bcount - in_bp->b_resid);
633 		}
634 		kstat_runq_exit(KSTAT_IO_PTR(duplxp->ds_kstat));
635 	}
636 
637 	for (;;) {
638 
639 		/* if needed, try to pull request off a queue */
640 		if (!activeq->fc_bp)
641 			activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
642 
643 		if (!activeq->fc_bp ||
644 		    (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
645 		    (caddr_t)duplxp) != DDI_SUCCESS) ||
646 		    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
647 
648 			activeq = activeq->next;
649 			if (!activeq->fc_bp)
650 				activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
651 
652 			if (!activeq->fc_bp ||
653 			    (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
654 			    duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
655 			    (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
656 				mutex_exit(&duplxp->ds_mutex);
657 				return (0);
658 			}
659 		}
660 
661 		activeq->fc_outcnt++;
662 		bp = activeq->fc_bp;
663 		activeq->fc_bp = NULL;
664 
665 		if (duplxp->ds_kstat)
666 			kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
667 
668 		mutex_exit(&duplxp->ds_mutex);
669 
670 		TGCOM_TRANSPORT(tgcom_objp, bp);
671 
672 		if (!mutex_tryenter(&duplxp->ds_mutex))
673 			return (0);
674 
675 		activeq = activeq->next;
676 	}
677 }
678 
679 static int
680 duplx_restart(struct duplx_data *duplxp)
681 {
682 	(void) duplx_enque(duplxp, NULL);
683 	return (-1);
684 }
685 
686 /*
687  *	Tagged queueing flow control
688  */
689 /*
690  * Local Function Prototypes
691  */
692 
693 struct 	flc_objops adapt_ops = {
694 	fc_init,
695 	fc_free,
696 	dmult_enque,
697 	dmult_deque,
698 	fc_start_kstat,
699 	fc_stop_kstat,
700 	0, 0
701 };
702 
703 struct flc_obj *
704 adapt_create()
705 {
706 	return (fc_create((struct flc_objops *)&adapt_ops));
707 
708 }
709 
710 /*
711  *	Common Queue functions
712  */
713 
714 /*
715  * 	Local static data
716  */
717 #ifdef	Q_DEBUG
718 #define	DENT	0x0001
719 #define	DERR	0x0002
720 #define	DIO	0x0004
721 static	int	que_debug = DENT|DERR|DIO;
722 
723 #endif	/* Q_DEBUG */
724 /*
725  * 	Local Function Prototypes
726  */
727 static struct que_obj *que_create(struct que_objops *qopsp);
728 static int que_init(struct que_data *qfp, void *lkarg);
729 static int que_free(struct que_obj *queobjp);
730 static struct buf *que_del(struct que_data *qfp);
731 
732 static struct que_obj *
733 que_create(struct que_objops *qopsp)
734 {
735 	struct	que_data *qfp;
736 	struct	que_obj *queobjp;
737 
738 	queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
739 	if (!queobjp)
740 		return (NULL);
741 
742 	queobjp->que_ops = qopsp;
743 	qfp = (struct que_data *)(queobjp+1);
744 	queobjp->que_data = (opaque_t)qfp;
745 
746 	return ((opaque_t)queobjp);
747 }
748 
749 static int
750 que_init(struct que_data *qfp, void *lkarg)
751 {
752 	mutex_init(&qfp->q_mutex, NULL, MUTEX_DRIVER, lkarg);
753 	return (DDI_SUCCESS);
754 }
755 
756 static int
757 que_free(struct que_obj *queobjp)
758 {
759 	struct	que_data *qfp;
760 
761 	qfp = (struct que_data *)queobjp->que_data;
762 	mutex_destroy(&qfp->q_mutex);
763 	kmem_free(queobjp, (sizeof (*queobjp) + sizeof (struct que_data)));
764 	return (0);
765 }
766 
767 static struct buf *
768 que_del(struct que_data *qfp)
769 {
770 	struct buf *bp;
771 
772 	bp = qfp->q_tab.b_actf;
773 	if (bp) {
774 		qfp->q_tab.b_actf = bp->av_forw;
775 		if (!qfp->q_tab.b_actf)
776 			qfp->q_tab.b_actl = NULL;
777 		bp->av_forw = 0;
778 	}
779 	return (bp);
780 }
781 
782 
783 
784 /*
785  *	Qmerge
786  * 	Local Function Prototypes
787  */
788 static int qmerge_add(), qmerge_free();
789 static struct buf *qmerge_del(struct que_data *qfp);
790 
791 struct 	que_objops qmerge_ops = {
792 	que_init,
793 	qmerge_free,
794 	qmerge_add,
795 	qmerge_del,
796 	0, 0
797 };
798 
799 /* fields in diskhd */
800 #define	hd_cnt			b_back
801 #define	hd_private		b_forw
802 #define	hd_flags		b_flags
803 #define	hd_sync_next		av_forw
804 #define	hd_async_next		av_back
805 
806 #define	hd_sync2async		sync_async_ratio
807 
808 #define	QNEAR_FORWARD		0x01
809 #define	QNEAR_BACKWARD		0x02
810 #define	QNEAR_ASYNCONLY		0x04
811 #define	QNEAR_ASYNCALSO		0x08
812 
813 #define	DBLK(bp) ((unsigned long)(bp)->b_private)
814 
815 #define	BP_LT_BP(a, b) (DBLK(a) < DBLK(b))
816 #define	BP_GT_BP(a, b) (DBLK(a) > DBLK(b))
817 #define	BP_LT_HD(a, b) (DBLK(a) < (unsigned long)((b)->hd_private))
818 #define	BP_GT_HD(a, b) (DBLK(a) > (unsigned long)((b)->hd_private))
819 #define	QNEAR_ASYNC	(QNEAR_ASYNCONLY|QNEAR_ASYNCALSO)
820 
821 #define	SYNC2ASYNC(a) ((a)->q_tab.hd_cnt)
822 
823 
824 /*
825  * qmerge implements a two priority queue, the low priority queue holding ASYNC
826  * write requests, while the rest are queued in the high priority sync queue.
827  * Requests on the async queue would be merged if possible.
828  * By default qmerge2wayscan is 1, indicating an elevator algorithm. When
829  * this variable is set to zero, it has the following side effects.
830  * 1. We assume fairness is the number one issue.
831  * 2. The next request to be picked indicates current head position.
832  *
833  * qmerge_sync2async indicates the ratio of scans of high prioriy
834  * sync queue to low priority async queue.
835  *
836  * When qmerge variables have the following values it defaults to qsort
837  *
838  * qmerge1pri = 1, qmerge2wayscan = 0, qmerge_max_merge = 0
839  *
840  */
841 static int	qmerge_max_merge = 128 * 1024;
842 static intptr_t	qmerge_sync2async = 4;
843 static int	qmerge2wayscan = 1;
844 static int	qmerge1pri = 0;
845 static int	qmerge_merge = 0;
846 
847 /*
848  * 	Local static data
849  */
850 struct que_obj *
851 qmerge_create()
852 {
853 	struct que_data *qfp;
854 	struct que_obj *queobjp;
855 
856 	queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
857 	if (!queobjp)
858 		return (NULL);
859 
860 	queobjp->que_ops = &qmerge_ops;
861 	qfp = (struct que_data *)(queobjp+1);
862 	qfp->q_tab.hd_private = qfp->q_tab.hd_private = 0;
863 	qfp->q_tab.hd_sync_next = qfp->q_tab.hd_async_next = NULL;
864 	qfp->q_tab.hd_cnt = (void *)qmerge_sync2async;
865 	queobjp->que_data = (opaque_t)qfp;
866 
867 	return ((opaque_t)queobjp);
868 }
869 
870 static int
871 qmerge_free(struct que_obj *queobjp)
872 {
873 	struct	que_data *qfp;
874 
875 	qfp = (struct que_data *)queobjp->que_data;
876 	mutex_destroy(&qfp->q_mutex);
877 	kmem_free(queobjp, (sizeof (*queobjp) + sizeof (*qfp)));
878 	return (0);
879 }
880 
881 static int
882 qmerge_can_merge(bp1, bp2)
883 struct	buf *bp1, *bp2;
884 {
885 	const int paw_flags = B_PAGEIO | B_ASYNC | B_WRITE;
886 
887 	if ((bp1->b_un.b_addr != 0) || (bp2->b_un.b_addr != 0) ||
888 	    ((bp1->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
889 	    ((bp2->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
890 	    (bp1->b_bcount & PAGEOFFSET) || (bp2->b_bcount & PAGEOFFSET) ||
891 	    (bp1->b_bcount + bp2->b_bcount > qmerge_max_merge))
892 		return (0);
893 
894 	if ((DBLK(bp2) + bp2->b_bcount / DEV_BSIZE == DBLK(bp1)) ||
895 	    (DBLK(bp1) + bp1->b_bcount / DEV_BSIZE == DBLK(bp2)))
896 		return (1);
897 	else
898 		return (0);
899 }
900 
901 static void
902 qmerge_mergesetup(bp_merge, bp)
903 struct	buf *bp_merge, *bp;
904 {
905 	struct	buf *bp1;
906 	struct	page *pp, *pp_merge, *pp_merge_prev;
907 	int	forward;
908 
909 	qmerge_merge++;
910 	forward = DBLK(bp_merge) < DBLK(bp);
911 
912 	bp_merge->b_bcount += bp->b_bcount;
913 
914 	pp = bp->b_pages;
915 	pp_merge = bp_merge->b_pages;
916 
917 	pp_merge_prev = pp_merge->p_prev;
918 
919 	pp_merge->p_prev->p_next = pp;
920 	pp_merge->p_prev = pp->p_prev;
921 	pp->p_prev->p_next = pp_merge;
922 	pp->p_prev = pp_merge_prev;
923 
924 	bp1 = bp_merge->b_forw;
925 
926 	bp1->av_back->av_forw = bp;
927 	bp->av_back = bp1->av_back;
928 	bp1->av_back = bp;
929 	bp->av_forw = bp1;
930 
931 	if (!forward) {
932 		bp_merge->b_forw = bp;
933 		bp_merge->b_pages = pp;
934 		bp_merge->b_private = bp->b_private;
935 	}
936 }
937 
938 static void
939 que_insert(struct que_data *qfp, struct buf *bp)
940 {
941 	struct buf	*bp1, *bp_start, *lowest_bp, *highest_bp;
942 	uintptr_t	highest_blk, lowest_blk;
943 	struct buf	**async_bpp, **sync_bpp, **bpp;
944 	struct diskhd	*dp = &qfp->q_tab;
945 
946 	sync_bpp = &dp->hd_sync_next;
947 	async_bpp = &dp->hd_async_next;
948 	/*
949 	 * The ioctl used by the format utility requires that bp->av_back be
950 	 * preserved.
951 	 */
952 	if (bp->av_back)
953 		bp->b_error = (intptr_t)bp->av_back;
954 	if (!qmerge1pri &&
955 	    ((bp->b_flags & (B_ASYNC|B_READ|B_FREE)) == B_ASYNC)) {
956 		bpp = &dp->hd_async_next;
957 	} else {
958 		bpp = &dp->hd_sync_next;
959 	}
960 
961 
962 	if ((bp1 = *bpp) == NULL) {
963 		*bpp = bp;
964 		bp->av_forw = bp->av_back = bp;
965 		if ((bpp == async_bpp) && (*sync_bpp == NULL)) {
966 			dp->hd_flags |= QNEAR_ASYNCONLY;
967 		} else if (bpp == sync_bpp) {
968 			dp->hd_flags &= ~QNEAR_ASYNCONLY;
969 			if (*async_bpp) {
970 				dp->hd_flags |= QNEAR_ASYNCALSO;
971 			}
972 		}
973 		return;
974 	}
975 	bp_start = bp1;
976 	if (DBLK(bp) < DBLK(bp1)) {
977 		lowest_blk = DBLK(bp1);
978 		lowest_bp = bp1;
979 		do {
980 			if (DBLK(bp) > DBLK(bp1)) {
981 				bp->av_forw = bp1->av_forw;
982 				bp1->av_forw->av_back = bp;
983 				bp1->av_forw = bp;
984 				bp->av_back = bp1;
985 
986 				if (((bpp == async_bpp) &&
987 				    (dp->hd_flags & QNEAR_ASYNC)) ||
988 				    (bpp == sync_bpp)) {
989 					if (!(dp->hd_flags & QNEAR_BACKWARD) &&
990 					    BP_GT_HD(bp, dp)) {
991 						*bpp = bp;
992 					}
993 				}
994 				return;
995 			} else if (DBLK(bp1) < lowest_blk) {
996 				lowest_bp = bp1;
997 				lowest_blk = DBLK(bp1);
998 			}
999 		} while ((DBLK(bp1->av_back) < DBLK(bp1)) &&
1000 		    ((bp1 = bp1->av_back) != bp_start));
1001 		bp->av_forw = lowest_bp;
1002 		lowest_bp->av_back->av_forw = bp;
1003 		bp->av_back = lowest_bp->av_back;
1004 		lowest_bp->av_back = bp;
1005 		if ((bpp == async_bpp) && !(dp->hd_flags & QNEAR_ASYNC)) {
1006 			*bpp = bp;
1007 		} else if (!(dp->hd_flags & QNEAR_BACKWARD) &&
1008 		    BP_GT_HD(bp, dp)) {
1009 			*bpp = bp;
1010 		}
1011 	} else {
1012 		highest_blk = DBLK(bp1);
1013 		highest_bp = bp1;
1014 		do {
1015 			if (DBLK(bp) < DBLK(bp1)) {
1016 				bp->av_forw = bp1;
1017 				bp1->av_back->av_forw = bp;
1018 				bp->av_back = bp1->av_back;
1019 				bp1->av_back = bp;
1020 				if (((bpp == async_bpp) &&
1021 				    (dp->hd_flags & QNEAR_ASYNC)) ||
1022 				    (bpp == sync_bpp)) {
1023 					if ((dp->hd_flags & QNEAR_BACKWARD) &&
1024 					    BP_LT_HD(bp, dp)) {
1025 						*bpp = bp;
1026 					}
1027 				}
1028 				return;
1029 			} else if (DBLK(bp1) > highest_blk) {
1030 				highest_bp = bp1;
1031 				highest_blk = DBLK(bp1);
1032 			}
1033 		} while ((DBLK(bp1->av_forw) > DBLK(bp1)) &&
1034 		    ((bp1 = bp1->av_forw) != bp_start));
1035 		bp->av_back = highest_bp;
1036 		highest_bp->av_forw->av_back = bp;
1037 		bp->av_forw = highest_bp->av_forw;
1038 		highest_bp->av_forw = bp;
1039 
1040 		if (((bpp == sync_bpp) ||
1041 		    ((bpp == async_bpp) && (dp->hd_flags & QNEAR_ASYNC))) &&
1042 		    (dp->hd_flags & QNEAR_BACKWARD) && (BP_LT_HD(bp, dp)))
1043 			*bpp = bp;
1044 	}
1045 }
1046 
1047 /*
1048  * dmult_enque() holds dmultp->ds_mutex lock, so we dont grab
1049  * lock here. If dmult_enque() changes we will have to visit
1050  * this function again
1051  */
1052 static int
1053 qmerge_add(struct que_data *qfp, struct buf *bp)
1054 {
1055 
1056 	que_insert(qfp, bp);
1057 	return (++qfp->q_cnt);
1058 }
1059 
1060 static int
1061 qmerge_iodone(struct buf *bp)
1062 {
1063 	struct buf *bp1;
1064 	struct	page *pp, *pp1, *tmp_pp;
1065 
1066 	if (bp->b_flags & B_REMAPPED)
1067 		bp_mapout(bp);
1068 
1069 	bp1 = bp->b_forw;
1070 	do {
1071 		bp->b_forw = bp1->av_forw;
1072 		bp1->av_forw->av_back = bp1->av_back;
1073 		bp1->av_back->av_forw = bp1->av_forw;
1074 		pp = (page_t *)bp1->b_pages;
1075 		pp1 = bp->b_forw->b_pages;
1076 
1077 		tmp_pp = pp->p_prev;
1078 		pp->p_prev = pp1->p_prev;
1079 		pp->p_prev->p_next = pp;
1080 
1081 		pp1->p_prev = tmp_pp;
1082 		pp1->p_prev->p_next = pp1;
1083 
1084 		if (bp->b_flags & B_ERROR) {
1085 			bp1->b_error = bp->b_error;
1086 			bp1->b_flags |= B_ERROR;
1087 		}
1088 
1089 		biodone(bp1);
1090 	} while ((bp1 = bp->b_forw) != bp->b_forw->av_forw);
1091 
1092 	biodone(bp1);
1093 	kmem_free(bp, sizeof (*bp));
1094 	return (0);
1095 }
1096 
1097 
1098 
1099 
1100 static struct buf *
1101 qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge, int *can_merge)
1102 {
1103 	intptr_t	private, cnt;
1104 	int		flags;
1105 	struct		buf *sync_bp, *async_bp, *bp;
1106 	struct		buf **sync_bpp, **async_bpp, **bpp;
1107 	struct		diskhd *dp = &qfp->q_tab;
1108 
1109 	if (qfp->q_cnt == 0) {
1110 		return (NULL);
1111 	}
1112 	flags = qfp->q_tab.hd_flags;
1113 	sync_bpp = &qfp->q_tab.hd_sync_next;
1114 	async_bpp = &qfp->q_tab.hd_async_next;
1115 
1116 begin_nextbp:
1117 	if (flags & QNEAR_ASYNCONLY) {
1118 		bp = *async_bpp;
1119 		private = DBLK(bp);
1120 		if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1121 			return (NULL);
1122 		} else if (bp->av_forw == bp) {
1123 			bp->av_forw = bp->av_back = NULL;
1124 			flags &= ~(QNEAR_ASYNCONLY | QNEAR_BACKWARD);
1125 			private = 0;
1126 		} else if (flags & QNEAR_BACKWARD) {
1127 			if (DBLK(bp) < DBLK(bp->av_back)) {
1128 				flags &= ~QNEAR_BACKWARD;
1129 				private = 0;
1130 			}
1131 		} else if (DBLK(bp) > DBLK(bp->av_forw)) {
1132 			if (qmerge2wayscan) {
1133 				flags |= QNEAR_BACKWARD;
1134 			} else {
1135 				private = 0;
1136 			}
1137 		} else if (qmerge2wayscan == 0) {
1138 			private = DBLK(bp->av_forw);
1139 		}
1140 		bpp = async_bpp;
1141 
1142 	} else if (flags & QNEAR_ASYNCALSO) {
1143 		sync_bp = *sync_bpp;
1144 		async_bp = *async_bpp;
1145 		if (flags & QNEAR_BACKWARD) {
1146 			if (BP_GT_HD(sync_bp, dp) && BP_GT_HD(async_bp, dp)) {
1147 				flags &= ~(QNEAR_BACKWARD|QNEAR_ASYNCALSO);
1148 				*sync_bpp = sync_bp->av_forw;
1149 				*async_bpp = async_bp->av_forw;
1150 				SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1151 				qfp->q_tab.hd_private = 0;
1152 				goto begin_nextbp;
1153 			}
1154 			if (BP_LT_HD(async_bp, dp) && BP_LT_HD(sync_bp, dp)) {
1155 				if (BP_GT_BP(async_bp, sync_bp)) {
1156 					bpp = async_bpp;
1157 					bp = *async_bpp;
1158 				} else {
1159 					bpp = sync_bpp;
1160 					bp = *sync_bpp;
1161 				}
1162 			} else if (BP_LT_HD(async_bp, dp)) {
1163 				bpp = async_bpp;
1164 				bp = *async_bpp;
1165 			} else {
1166 				bpp = sync_bpp;
1167 				bp = *sync_bpp;
1168 			}
1169 		} else {
1170 			if (BP_LT_HD(sync_bp, dp) && BP_LT_HD(async_bp, dp)) {
1171 				if (qmerge2wayscan) {
1172 					flags |= QNEAR_BACKWARD;
1173 					*sync_bpp = sync_bp->av_back;
1174 					*async_bpp = async_bp->av_back;
1175 					goto begin_nextbp;
1176 				} else {
1177 					flags &= ~QNEAR_ASYNCALSO;
1178 					SYNC2ASYNC(qfp) =
1179 						(void *)qmerge_sync2async;
1180 					qfp->q_tab.hd_private = 0;
1181 					goto begin_nextbp;
1182 				}
1183 			}
1184 			if (BP_GT_HD(async_bp, dp) && BP_GT_HD(sync_bp, dp)) {
1185 				if (BP_LT_BP(async_bp, sync_bp)) {
1186 					bpp = async_bpp;
1187 					bp = *async_bpp;
1188 				} else {
1189 					bpp = sync_bpp;
1190 					bp = *sync_bpp;
1191 				}
1192 			} else if (BP_GT_HD(async_bp, dp)) {
1193 				bpp = async_bpp;
1194 				bp = *async_bpp;
1195 			} else {
1196 				bpp = sync_bpp;
1197 				bp = *sync_bpp;
1198 			}
1199 		}
1200 		if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1201 			return (NULL);
1202 		} else if (bp->av_forw == bp) {
1203 			bp->av_forw = bp->av_back = NULL;
1204 			flags &= ~QNEAR_ASYNCALSO;
1205 			if (bpp == async_bpp) {
1206 				SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1207 			} else {
1208 				flags |= QNEAR_ASYNCONLY;
1209 			}
1210 		}
1211 		private = DBLK(bp);
1212 	} else {
1213 		bp = *sync_bpp;
1214 		private = DBLK(bp);
1215 		if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1216 			return (NULL);
1217 		} else if (bp->av_forw == bp) {
1218 			private = 0;
1219 			SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1220 			bp->av_forw = bp->av_back = NULL;
1221 			flags &= ~QNEAR_BACKWARD;
1222 			if (*async_bpp)
1223 				flags |= QNEAR_ASYNCONLY;
1224 		} else if (flags & QNEAR_BACKWARD) {
1225 			if (DBLK(bp) < DBLK(bp->av_back)) {
1226 				flags &= ~QNEAR_BACKWARD;
1227 				cnt = (intptr_t)SYNC2ASYNC(qfp);
1228 				if (cnt > 0) {
1229 					cnt--;
1230 					SYNC2ASYNC(qfp) = (void *)cnt;
1231 				} else {
1232 					if (*async_bpp)
1233 						flags |= QNEAR_ASYNCALSO;
1234 					SYNC2ASYNC(qfp) =
1235 						(void *)qmerge_sync2async;
1236 				}
1237 				private = 0;
1238 			}
1239 		} else if (DBLK(bp) > DBLK(bp->av_forw)) {
1240 			private = 0;
1241 			if (qmerge2wayscan) {
1242 				flags |= QNEAR_BACKWARD;
1243 				private = DBLK(bp);
1244 			} else {
1245 				cnt = (intptr_t)SYNC2ASYNC(qfp);
1246 				if (cnt > 0) {
1247 					cnt--;
1248 					SYNC2ASYNC(qfp) = (void *)cnt;
1249 				} else {
1250 					if (*async_bpp)
1251 						flags |= QNEAR_ASYNCALSO;
1252 					SYNC2ASYNC(qfp) =
1253 						(void *)qmerge_sync2async;
1254 				}
1255 			}
1256 		} else if (qmerge2wayscan == 0) {
1257 			private = DBLK(bp->av_forw);
1258 		}
1259 		bpp = sync_bpp;
1260 	}
1261 
1262 	if (bp->av_forw) {
1263 		*can_merge = !(bp->b_flags & B_READ);
1264 		if (flags & QNEAR_BACKWARD) {
1265 			*bpp = bp->av_back;
1266 			if ((DBLK(bp->av_back) +
1267 			    bp->av_back->b_bcount / DEV_BSIZE) != DBLK(bp))
1268 				*can_merge = 0;
1269 		} else {
1270 			*bpp = bp->av_forw;
1271 			if ((DBLK(bp) + bp->b_bcount / DEV_BSIZE) !=
1272 			    DBLK(bp->av_forw))
1273 				*can_merge = 0;
1274 		}
1275 		bp->av_forw->av_back = bp->av_back;
1276 		bp->av_back->av_forw = bp->av_forw;
1277 		bp->av_forw = bp->av_back = NULL;
1278 	} else {
1279 		*bpp = NULL;
1280 		*can_merge = 0;
1281 	}
1282 	qfp->q_tab.hd_private = (void *)private;
1283 	qfp->q_cnt--;
1284 	qfp->q_tab.hd_flags = flags;
1285 	if (bp->b_error) {
1286 		bp->av_back = (void *)(intptr_t)bp->b_error;
1287 		bp->b_error = 0;
1288 	}
1289 	return (bp);
1290 }
1291 
1292 static struct buf *
1293 qmerge_del(struct que_data *qfp)
1294 {
1295 	struct	buf *bp, *next_bp, *bp_merge;
1296 	int	alloc_mergebp, merge;
1297 
1298 	if (qfp->q_cnt == 0) {
1299 		return (NULL);
1300 	}
1301 
1302 	bp_merge = bp = qmerge_nextbp(qfp, NULL, &merge);
1303 	alloc_mergebp = 1;
1304 	while (merge && (next_bp = qmerge_nextbp(qfp, bp_merge, &merge))) {
1305 		if (alloc_mergebp) {
1306 			bp_merge = kmem_alloc(sizeof (*bp_merge), KM_NOSLEEP);
1307 			if (bp_merge == NULL) {
1308 				mutex_exit(&qfp->q_mutex);
1309 				return (bp);
1310 			}
1311 			bcopy(bp, bp_merge, sizeof (*bp_merge));
1312 			bp_merge->b_iodone = qmerge_iodone;
1313 			bp_merge->b_forw = bp;
1314 			bp_merge->b_back = (struct buf *)qfp;
1315 			bp->av_forw = bp->av_back = bp;
1316 			alloc_mergebp = 0;
1317 		}
1318 		qmerge_mergesetup(bp_merge, next_bp);
1319 	}
1320 	return (bp_merge);
1321 }
1322 
1323 
1324 /*
1325  *	FIFO Queue functions
1326  */
1327 /*
1328  * 	Local Function Prototypes
1329  */
1330 static int qfifo_add();
1331 
1332 struct 	que_objops qfifo_ops = {
1333 	que_init,
1334 	que_free,
1335 	qfifo_add,
1336 	que_del,
1337 	0, 0
1338 };
1339 
1340 /*
1341  * 	Local static data
1342  */
1343 struct que_obj *
1344 qfifo_create()
1345 {
1346 	return (que_create((struct que_objops *)&qfifo_ops));
1347 }
1348 
1349 static int
1350 qfifo_add(struct que_data *qfp, struct buf *bp)
1351 {
1352 
1353 	if (!qfp->q_tab.b_actf)
1354 		qfp->q_tab.b_actf = bp;
1355 	else
1356 		qfp->q_tab.b_actl->av_forw = bp;
1357 	qfp->q_tab.b_actl = bp;
1358 	bp->av_forw = NULL;
1359 	return (0);
1360 }
1361 
1362 /*
1363  *	One-Way-Scan Queue functions
1364  */
1365 /*
1366  * 	Local Function Prototypes
1367  */
1368 static int qsort_add();
1369 static struct buf *qsort_del();
1370 static void oneway_scan_binary(struct diskhd *dp, struct buf *bp);
1371 
1372 struct 	que_objops qsort_ops = {
1373 	que_init,
1374 	que_free,
1375 	qsort_add,
1376 	qsort_del,
1377 	0, 0
1378 };
1379 
1380 /*
1381  * 	Local static data
1382  */
1383 struct que_obj *
1384 qsort_create()
1385 {
1386 	return (que_create((struct que_objops *)&qsort_ops));
1387 }
1388 
1389 static int
1390 qsort_add(struct que_data *qfp, struct buf *bp)
1391 {
1392 	qfp->q_cnt++;
1393 	oneway_scan_binary(&qfp->q_tab, bp);
1394 	return (0);
1395 }
1396 
1397 
1398 #define	b_pasf	b_forw
1399 #define	b_pasl	b_back
1400 static void
1401 oneway_scan_binary(struct diskhd *dp, struct buf *bp)
1402 {
1403 	struct buf *ap;
1404 
1405 	ap = dp->b_actf;
1406 	if (ap == NULL) {
1407 		dp->b_actf = bp;
1408 		bp->av_forw = NULL;
1409 		return;
1410 	}
1411 	if (DBLK(bp) < DBLK(ap)) {
1412 		ap = dp->b_pasf;
1413 		if ((ap == NULL) || (DBLK(bp) < DBLK(ap))) {
1414 			dp->b_pasf = bp;
1415 			bp->av_forw = ap;
1416 			return;
1417 		}
1418 	}
1419 	while (ap->av_forw) {
1420 		if (DBLK(bp) < DBLK(ap->av_forw))
1421 			break;
1422 		ap = ap->av_forw;
1423 	}
1424 	bp->av_forw = ap->av_forw;
1425 	ap->av_forw = bp;
1426 }
1427 
1428 static struct buf *
1429 qsort_del(struct que_data *qfp)
1430 {
1431 	struct buf *bp;
1432 
1433 	if (qfp->q_cnt == 0) {
1434 		return (NULL);
1435 	}
1436 	qfp->q_cnt--;
1437 	bp = qfp->q_tab.b_actf;
1438 	qfp->q_tab.b_actf = bp->av_forw;
1439 	bp->av_forw = 0;
1440 	if (!qfp->q_tab.b_actf && qfp->q_tab.b_pasf) {
1441 		qfp->q_tab.b_actf = qfp->q_tab.b_pasf;
1442 		qfp->q_tab.b_pasf = NULL;
1443 	}
1444 	return (bp);
1445 }
1446 
1447 /*
1448  *	Tagged queueing
1449  */
1450 /*
1451  * 	Local Function Prototypes
1452  */
1453 
1454 struct 	que_objops qtag_ops = {
1455 	que_init,
1456 	que_free,
1457 	qsort_add,
1458 	qsort_del,
1459 	0, 0
1460 };
1461 
1462 /*
1463  * 	Local static data
1464  */
1465 struct que_obj *
1466 qtag_create()
1467 {
1468 	return (que_create((struct que_objops *)&qtag_ops));
1469 }
1470